Linux Audio

Check our new training course

Loading...
v5.4
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 * Copyright (C) 2007 Oracle.  All rights reserved.
   4 */
   5
   6#include <linux/fs.h>
   7#include <linux/pagemap.h>
 
   8#include <linux/time.h>
   9#include <linux/init.h>
  10#include <linux/string.h>
  11#include <linux/backing-dev.h>
 
  12#include <linux/falloc.h>
 
  13#include <linux/writeback.h>
  14#include <linux/compat.h>
  15#include <linux/slab.h>
  16#include <linux/btrfs.h>
  17#include <linux/uio.h>
  18#include <linux/iversion.h>
  19#include "ctree.h"
  20#include "disk-io.h"
  21#include "transaction.h"
  22#include "btrfs_inode.h"
  23#include "print-tree.h"
  24#include "tree-log.h"
  25#include "locking.h"
  26#include "volumes.h"
  27#include "qgroup.h"
  28#include "compression.h"
  29#include "delalloc-space.h"
  30
  31static struct kmem_cache *btrfs_inode_defrag_cachep;
  32/*
  33 * when auto defrag is enabled we
  34 * queue up these defrag structs to remember which
  35 * inodes need defragging passes
  36 */
  37struct inode_defrag {
  38	struct rb_node rb_node;
  39	/* objectid */
  40	u64 ino;
  41	/*
  42	 * transid where the defrag was added, we search for
  43	 * extents newer than this
  44	 */
  45	u64 transid;
  46
  47	/* root objectid */
  48	u64 root;
  49
  50	/* last offset we were able to defrag */
  51	u64 last_offset;
  52
  53	/* if we've wrapped around back to zero once already */
  54	int cycled;
  55};
  56
  57static int __compare_inode_defrag(struct inode_defrag *defrag1,
  58				  struct inode_defrag *defrag2)
  59{
  60	if (defrag1->root > defrag2->root)
  61		return 1;
  62	else if (defrag1->root < defrag2->root)
  63		return -1;
  64	else if (defrag1->ino > defrag2->ino)
  65		return 1;
  66	else if (defrag1->ino < defrag2->ino)
  67		return -1;
  68	else
  69		return 0;
  70}
  71
  72/* pop a record for an inode into the defrag tree.  The lock
  73 * must be held already
  74 *
  75 * If you're inserting a record for an older transid than an
  76 * existing record, the transid already in the tree is lowered
  77 *
  78 * If an existing record is found the defrag item you
  79 * pass in is freed
  80 */
  81static int __btrfs_add_inode_defrag(struct btrfs_inode *inode,
  82				    struct inode_defrag *defrag)
  83{
  84	struct btrfs_fs_info *fs_info = inode->root->fs_info;
  85	struct inode_defrag *entry;
  86	struct rb_node **p;
  87	struct rb_node *parent = NULL;
  88	int ret;
  89
  90	p = &fs_info->defrag_inodes.rb_node;
  91	while (*p) {
  92		parent = *p;
  93		entry = rb_entry(parent, struct inode_defrag, rb_node);
  94
  95		ret = __compare_inode_defrag(defrag, entry);
  96		if (ret < 0)
  97			p = &parent->rb_left;
  98		else if (ret > 0)
  99			p = &parent->rb_right;
 100		else {
 101			/* if we're reinserting an entry for
 102			 * an old defrag run, make sure to
 103			 * lower the transid of our existing record
 104			 */
 105			if (defrag->transid < entry->transid)
 106				entry->transid = defrag->transid;
 107			if (defrag->last_offset > entry->last_offset)
 108				entry->last_offset = defrag->last_offset;
 109			return -EEXIST;
 110		}
 111	}
 112	set_bit(BTRFS_INODE_IN_DEFRAG, &inode->runtime_flags);
 113	rb_link_node(&defrag->rb_node, parent, p);
 114	rb_insert_color(&defrag->rb_node, &fs_info->defrag_inodes);
 115	return 0;
 116}
 117
 118static inline int __need_auto_defrag(struct btrfs_fs_info *fs_info)
 119{
 120	if (!btrfs_test_opt(fs_info, AUTO_DEFRAG))
 121		return 0;
 122
 123	if (btrfs_fs_closing(fs_info))
 124		return 0;
 125
 126	return 1;
 127}
 128
 129/*
 130 * insert a defrag record for this inode if auto defrag is
 131 * enabled
 132 */
 133int btrfs_add_inode_defrag(struct btrfs_trans_handle *trans,
 134			   struct btrfs_inode *inode)
 135{
 
 136	struct btrfs_root *root = inode->root;
 137	struct btrfs_fs_info *fs_info = root->fs_info;
 138	struct inode_defrag *defrag;
 139	u64 transid;
 140	int ret;
 141
 142	if (!__need_auto_defrag(fs_info))
 143		return 0;
 144
 145	if (test_bit(BTRFS_INODE_IN_DEFRAG, &inode->runtime_flags))
 146		return 0;
 147
 148	if (trans)
 149		transid = trans->transid;
 150	else
 151		transid = inode->root->last_trans;
 152
 153	defrag = kmem_cache_zalloc(btrfs_inode_defrag_cachep, GFP_NOFS);
 154	if (!defrag)
 155		return -ENOMEM;
 156
 157	defrag->ino = btrfs_ino(inode);
 158	defrag->transid = transid;
 159	defrag->root = root->root_key.objectid;
 160
 161	spin_lock(&fs_info->defrag_inodes_lock);
 162	if (!test_bit(BTRFS_INODE_IN_DEFRAG, &inode->runtime_flags)) {
 163		/*
 164		 * If we set IN_DEFRAG flag and evict the inode from memory,
 165		 * and then re-read this inode, this new inode doesn't have
 166		 * IN_DEFRAG flag. At the case, we may find the existed defrag.
 167		 */
 168		ret = __btrfs_add_inode_defrag(inode, defrag);
 169		if (ret)
 170			kmem_cache_free(btrfs_inode_defrag_cachep, defrag);
 171	} else {
 172		kmem_cache_free(btrfs_inode_defrag_cachep, defrag);
 173	}
 174	spin_unlock(&fs_info->defrag_inodes_lock);
 175	return 0;
 176}
 177
 178/*
 179 * Requeue the defrag object. If there is a defrag object that points to
 180 * the same inode in the tree, we will merge them together (by
 181 * __btrfs_add_inode_defrag()) and free the one that we want to requeue.
 182 */
 183static void btrfs_requeue_inode_defrag(struct btrfs_inode *inode,
 184				       struct inode_defrag *defrag)
 185{
 186	struct btrfs_fs_info *fs_info = inode->root->fs_info;
 187	int ret;
 188
 189	if (!__need_auto_defrag(fs_info))
 190		goto out;
 191
 192	/*
 193	 * Here we don't check the IN_DEFRAG flag, because we need merge
 194	 * them together.
 195	 */
 196	spin_lock(&fs_info->defrag_inodes_lock);
 197	ret = __btrfs_add_inode_defrag(inode, defrag);
 198	spin_unlock(&fs_info->defrag_inodes_lock);
 199	if (ret)
 200		goto out;
 201	return;
 202out:
 203	kmem_cache_free(btrfs_inode_defrag_cachep, defrag);
 204}
 205
 206/*
 207 * pick the defragable inode that we want, if it doesn't exist, we will get
 208 * the next one.
 209 */
 210static struct inode_defrag *
 211btrfs_pick_defrag_inode(struct btrfs_fs_info *fs_info, u64 root, u64 ino)
 212{
 213	struct inode_defrag *entry = NULL;
 214	struct inode_defrag tmp;
 215	struct rb_node *p;
 216	struct rb_node *parent = NULL;
 217	int ret;
 218
 219	tmp.ino = ino;
 220	tmp.root = root;
 221
 222	spin_lock(&fs_info->defrag_inodes_lock);
 223	p = fs_info->defrag_inodes.rb_node;
 224	while (p) {
 225		parent = p;
 226		entry = rb_entry(parent, struct inode_defrag, rb_node);
 227
 228		ret = __compare_inode_defrag(&tmp, entry);
 229		if (ret < 0)
 230			p = parent->rb_left;
 231		else if (ret > 0)
 232			p = parent->rb_right;
 233		else
 234			goto out;
 235	}
 236
 237	if (parent && __compare_inode_defrag(&tmp, entry) > 0) {
 238		parent = rb_next(parent);
 239		if (parent)
 240			entry = rb_entry(parent, struct inode_defrag, rb_node);
 241		else
 242			entry = NULL;
 243	}
 244out:
 245	if (entry)
 246		rb_erase(parent, &fs_info->defrag_inodes);
 247	spin_unlock(&fs_info->defrag_inodes_lock);
 248	return entry;
 249}
 250
 251void btrfs_cleanup_defrag_inodes(struct btrfs_fs_info *fs_info)
 252{
 253	struct inode_defrag *defrag;
 254	struct rb_node *node;
 255
 256	spin_lock(&fs_info->defrag_inodes_lock);
 257	node = rb_first(&fs_info->defrag_inodes);
 258	while (node) {
 259		rb_erase(node, &fs_info->defrag_inodes);
 260		defrag = rb_entry(node, struct inode_defrag, rb_node);
 261		kmem_cache_free(btrfs_inode_defrag_cachep, defrag);
 262
 263		cond_resched_lock(&fs_info->defrag_inodes_lock);
 264
 265		node = rb_first(&fs_info->defrag_inodes);
 266	}
 267	spin_unlock(&fs_info->defrag_inodes_lock);
 268}
 269
 270#define BTRFS_DEFRAG_BATCH	1024
 271
 272static int __btrfs_run_defrag_inode(struct btrfs_fs_info *fs_info,
 273				    struct inode_defrag *defrag)
 274{
 275	struct btrfs_root *inode_root;
 276	struct inode *inode;
 277	struct btrfs_key key;
 278	struct btrfs_ioctl_defrag_range_args range;
 279	int num_defrag;
 280	int index;
 281	int ret;
 282
 283	/* get the inode */
 284	key.objectid = defrag->root;
 285	key.type = BTRFS_ROOT_ITEM_KEY;
 286	key.offset = (u64)-1;
 287
 288	index = srcu_read_lock(&fs_info->subvol_srcu);
 289
 290	inode_root = btrfs_read_fs_root_no_name(fs_info, &key);
 291	if (IS_ERR(inode_root)) {
 292		ret = PTR_ERR(inode_root);
 293		goto cleanup;
 294	}
 295
 296	key.objectid = defrag->ino;
 297	key.type = BTRFS_INODE_ITEM_KEY;
 298	key.offset = 0;
 299	inode = btrfs_iget(fs_info->sb, &key, inode_root, NULL);
 300	if (IS_ERR(inode)) {
 301		ret = PTR_ERR(inode);
 302		goto cleanup;
 303	}
 304	srcu_read_unlock(&fs_info->subvol_srcu, index);
 305
 306	/* do a chunk of defrag */
 307	clear_bit(BTRFS_INODE_IN_DEFRAG, &BTRFS_I(inode)->runtime_flags);
 308	memset(&range, 0, sizeof(range));
 309	range.len = (u64)-1;
 310	range.start = defrag->last_offset;
 311
 312	sb_start_write(fs_info->sb);
 313	num_defrag = btrfs_defrag_file(inode, NULL, &range, defrag->transid,
 314				       BTRFS_DEFRAG_BATCH);
 315	sb_end_write(fs_info->sb);
 316	/*
 317	 * if we filled the whole defrag batch, there
 318	 * must be more work to do.  Queue this defrag
 319	 * again
 320	 */
 321	if (num_defrag == BTRFS_DEFRAG_BATCH) {
 322		defrag->last_offset = range.start;
 323		btrfs_requeue_inode_defrag(BTRFS_I(inode), defrag);
 324	} else if (defrag->last_offset && !defrag->cycled) {
 325		/*
 326		 * we didn't fill our defrag batch, but
 327		 * we didn't start at zero.  Make sure we loop
 328		 * around to the start of the file.
 329		 */
 330		defrag->last_offset = 0;
 331		defrag->cycled = 1;
 332		btrfs_requeue_inode_defrag(BTRFS_I(inode), defrag);
 333	} else {
 334		kmem_cache_free(btrfs_inode_defrag_cachep, defrag);
 335	}
 336
 337	iput(inode);
 338	return 0;
 339cleanup:
 340	srcu_read_unlock(&fs_info->subvol_srcu, index);
 341	kmem_cache_free(btrfs_inode_defrag_cachep, defrag);
 342	return ret;
 343}
 344
 345/*
 346 * run through the list of inodes in the FS that need
 347 * defragging
 348 */
 349int btrfs_run_defrag_inodes(struct btrfs_fs_info *fs_info)
 350{
 351	struct inode_defrag *defrag;
 352	u64 first_ino = 0;
 353	u64 root_objectid = 0;
 354
 355	atomic_inc(&fs_info->defrag_running);
 356	while (1) {
 357		/* Pause the auto defragger. */
 358		if (test_bit(BTRFS_FS_STATE_REMOUNTING,
 359			     &fs_info->fs_state))
 360			break;
 361
 362		if (!__need_auto_defrag(fs_info))
 363			break;
 364
 365		/* find an inode to defrag */
 366		defrag = btrfs_pick_defrag_inode(fs_info, root_objectid,
 367						 first_ino);
 368		if (!defrag) {
 369			if (root_objectid || first_ino) {
 370				root_objectid = 0;
 371				first_ino = 0;
 372				continue;
 373			} else {
 374				break;
 375			}
 376		}
 377
 378		first_ino = defrag->ino + 1;
 379		root_objectid = defrag->root;
 380
 381		__btrfs_run_defrag_inode(fs_info, defrag);
 382	}
 383	atomic_dec(&fs_info->defrag_running);
 384
 385	/*
 386	 * during unmount, we use the transaction_wait queue to
 387	 * wait for the defragger to stop
 388	 */
 389	wake_up(&fs_info->transaction_wait);
 390	return 0;
 391}
 392
 393/* simple helper to fault in pages and copy.  This should go away
 394 * and be replaced with calls into generic code.
 395 */
 396static noinline int btrfs_copy_from_user(loff_t pos, size_t write_bytes,
 397					 struct page **prepared_pages,
 398					 struct iov_iter *i)
 399{
 400	size_t copied = 0;
 401	size_t total_copied = 0;
 402	int pg = 0;
 403	int offset = offset_in_page(pos);
 404
 405	while (write_bytes > 0) {
 406		size_t count = min_t(size_t,
 407				     PAGE_SIZE - offset, write_bytes);
 408		struct page *page = prepared_pages[pg];
 409		/*
 410		 * Copy data from userspace to the current page
 411		 */
 412		copied = iov_iter_copy_from_user_atomic(page, i, offset, count);
 413
 414		/* Flush processor's dcache for this page */
 415		flush_dcache_page(page);
 416
 417		/*
 418		 * if we get a partial write, we can end up with
 419		 * partially up to date pages.  These add
 420		 * a lot of complexity, so make sure they don't
 421		 * happen by forcing this copy to be retried.
 422		 *
 423		 * The rest of the btrfs_file_write code will fall
 424		 * back to page at a time copies after we return 0.
 425		 */
 426		if (!PageUptodate(page) && copied < count)
 427			copied = 0;
 428
 429		iov_iter_advance(i, copied);
 430		write_bytes -= copied;
 431		total_copied += copied;
 432
 433		/* Return to btrfs_file_write_iter to fault page */
 434		if (unlikely(copied == 0))
 435			break;
 436
 437		if (copied < PAGE_SIZE - offset) {
 438			offset += copied;
 439		} else {
 440			pg++;
 441			offset = 0;
 442		}
 443	}
 444	return total_copied;
 445}
 446
 447/*
 448 * unlocks pages after btrfs_file_write is done with them
 449 */
 450static void btrfs_drop_pages(struct page **pages, size_t num_pages)
 451{
 452	size_t i;
 453	for (i = 0; i < num_pages; i++) {
 454		/* page checked is some magic around finding pages that
 455		 * have been modified without going through btrfs_set_page_dirty
 456		 * clear it here. There should be no need to mark the pages
 457		 * accessed as prepare_pages should have marked them accessed
 458		 * in prepare_pages via find_or_create_page()
 459		 */
 460		ClearPageChecked(pages[i]);
 461		unlock_page(pages[i]);
 462		put_page(pages[i]);
 463	}
 464}
 465
 466static int btrfs_find_new_delalloc_bytes(struct btrfs_inode *inode,
 467					 const u64 start,
 468					 const u64 len,
 469					 struct extent_state **cached_state)
 470{
 471	u64 search_start = start;
 472	const u64 end = start + len - 1;
 473
 474	while (search_start < end) {
 475		const u64 search_len = end - search_start + 1;
 476		struct extent_map *em;
 477		u64 em_len;
 478		int ret = 0;
 479
 480		em = btrfs_get_extent(inode, NULL, 0, search_start,
 481				      search_len, 0);
 482		if (IS_ERR(em))
 483			return PTR_ERR(em);
 484
 485		if (em->block_start != EXTENT_MAP_HOLE)
 486			goto next;
 487
 488		em_len = em->len;
 489		if (em->start < search_start)
 490			em_len -= search_start - em->start;
 491		if (em_len > search_len)
 492			em_len = search_len;
 493
 494		ret = set_extent_bit(&inode->io_tree, search_start,
 495				     search_start + em_len - 1,
 496				     EXTENT_DELALLOC_NEW,
 497				     NULL, cached_state, GFP_NOFS);
 498next:
 499		search_start = extent_map_end(em);
 500		free_extent_map(em);
 501		if (ret)
 502			return ret;
 503	}
 504	return 0;
 505}
 506
 507/*
 508 * after copy_from_user, pages need to be dirtied and we need to make
 509 * sure holes are created between the current EOF and the start of
 510 * any next extents (if required).
 511 *
 512 * this also makes the decision about creating an inline extent vs
 513 * doing real data extents, marking pages dirty and delalloc as required.
 514 */
 515int btrfs_dirty_pages(struct inode *inode, struct page **pages,
 516		      size_t num_pages, loff_t pos, size_t write_bytes,
 517		      struct extent_state **cached)
 518{
 519	struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
 520	int err = 0;
 521	int i;
 522	u64 num_bytes;
 523	u64 start_pos;
 524	u64 end_of_last_block;
 525	u64 end_pos = pos + write_bytes;
 526	loff_t isize = i_size_read(inode);
 527	unsigned int extra_bits = 0;
 528
 529	start_pos = pos & ~((u64) fs_info->sectorsize - 1);
 530	num_bytes = round_up(write_bytes + pos - start_pos,
 531			     fs_info->sectorsize);
 532
 533	end_of_last_block = start_pos + num_bytes - 1;
 534
 535	/*
 536	 * The pages may have already been dirty, clear out old accounting so
 537	 * we can set things up properly
 538	 */
 539	clear_extent_bit(&BTRFS_I(inode)->io_tree, start_pos, end_of_last_block,
 540			 EXTENT_DELALLOC | EXTENT_DO_ACCOUNTING | EXTENT_DEFRAG,
 541			 0, 0, cached);
 542
 543	if (!btrfs_is_free_space_inode(BTRFS_I(inode))) {
 544		if (start_pos >= isize &&
 545		    !(BTRFS_I(inode)->flags & BTRFS_INODE_PREALLOC)) {
 546			/*
 547			 * There can't be any extents following eof in this case
 548			 * so just set the delalloc new bit for the range
 549			 * directly.
 550			 */
 551			extra_bits |= EXTENT_DELALLOC_NEW;
 552		} else {
 553			err = btrfs_find_new_delalloc_bytes(BTRFS_I(inode),
 554							    start_pos,
 555							    num_bytes, cached);
 556			if (err)
 557				return err;
 558		}
 559	}
 560
 561	err = btrfs_set_extent_delalloc(inode, start_pos, end_of_last_block,
 562					extra_bits, cached);
 563	if (err)
 564		return err;
 565
 566	for (i = 0; i < num_pages; i++) {
 567		struct page *p = pages[i];
 568		SetPageUptodate(p);
 569		ClearPageChecked(p);
 570		set_page_dirty(p);
 571	}
 572
 573	/*
 574	 * we've only changed i_size in ram, and we haven't updated
 575	 * the disk i_size.  There is no need to log the inode
 576	 * at this time.
 577	 */
 578	if (end_pos > isize)
 579		i_size_write(inode, end_pos);
 580	return 0;
 581}
 582
 583/*
 584 * this drops all the extents in the cache that intersect the range
 585 * [start, end].  Existing extents are split as required.
 586 */
 587void btrfs_drop_extent_cache(struct btrfs_inode *inode, u64 start, u64 end,
 588			     int skip_pinned)
 589{
 590	struct extent_map *em;
 591	struct extent_map *split = NULL;
 592	struct extent_map *split2 = NULL;
 593	struct extent_map_tree *em_tree = &inode->extent_tree;
 594	u64 len = end - start + 1;
 595	u64 gen;
 596	int ret;
 597	int testend = 1;
 598	unsigned long flags;
 599	int compressed = 0;
 600	bool modified;
 601
 602	WARN_ON(end < start);
 603	if (end == (u64)-1) {
 604		len = (u64)-1;
 605		testend = 0;
 606	}
 607	while (1) {
 608		int no_splits = 0;
 609
 610		modified = false;
 611		if (!split)
 612			split = alloc_extent_map();
 613		if (!split2)
 614			split2 = alloc_extent_map();
 615		if (!split || !split2)
 616			no_splits = 1;
 617
 618		write_lock(&em_tree->lock);
 619		em = lookup_extent_mapping(em_tree, start, len);
 620		if (!em) {
 621			write_unlock(&em_tree->lock);
 622			break;
 623		}
 624		flags = em->flags;
 625		gen = em->generation;
 626		if (skip_pinned && test_bit(EXTENT_FLAG_PINNED, &em->flags)) {
 627			if (testend && em->start + em->len >= start + len) {
 628				free_extent_map(em);
 629				write_unlock(&em_tree->lock);
 630				break;
 631			}
 632			start = em->start + em->len;
 633			if (testend)
 634				len = start + len - (em->start + em->len);
 635			free_extent_map(em);
 636			write_unlock(&em_tree->lock);
 637			continue;
 638		}
 639		compressed = test_bit(EXTENT_FLAG_COMPRESSED, &em->flags);
 640		clear_bit(EXTENT_FLAG_PINNED, &em->flags);
 641		clear_bit(EXTENT_FLAG_LOGGING, &flags);
 642		modified = !list_empty(&em->list);
 643		if (no_splits)
 644			goto next;
 645
 646		if (em->start < start) {
 647			split->start = em->start;
 648			split->len = start - em->start;
 649
 650			if (em->block_start < EXTENT_MAP_LAST_BYTE) {
 651				split->orig_start = em->orig_start;
 652				split->block_start = em->block_start;
 653
 654				if (compressed)
 655					split->block_len = em->block_len;
 656				else
 657					split->block_len = split->len;
 658				split->orig_block_len = max(split->block_len,
 659						em->orig_block_len);
 660				split->ram_bytes = em->ram_bytes;
 661			} else {
 662				split->orig_start = split->start;
 663				split->block_len = 0;
 664				split->block_start = em->block_start;
 665				split->orig_block_len = 0;
 666				split->ram_bytes = split->len;
 667			}
 668
 669			split->generation = gen;
 670			split->bdev = em->bdev;
 671			split->flags = flags;
 672			split->compress_type = em->compress_type;
 673			replace_extent_mapping(em_tree, em, split, modified);
 674			free_extent_map(split);
 675			split = split2;
 676			split2 = NULL;
 677		}
 678		if (testend && em->start + em->len > start + len) {
 679			u64 diff = start + len - em->start;
 680
 681			split->start = start + len;
 682			split->len = em->start + em->len - (start + len);
 683			split->bdev = em->bdev;
 684			split->flags = flags;
 685			split->compress_type = em->compress_type;
 686			split->generation = gen;
 687
 688			if (em->block_start < EXTENT_MAP_LAST_BYTE) {
 689				split->orig_block_len = max(em->block_len,
 690						    em->orig_block_len);
 691
 692				split->ram_bytes = em->ram_bytes;
 693				if (compressed) {
 694					split->block_len = em->block_len;
 695					split->block_start = em->block_start;
 696					split->orig_start = em->orig_start;
 697				} else {
 698					split->block_len = split->len;
 699					split->block_start = em->block_start
 700						+ diff;
 701					split->orig_start = em->orig_start;
 702				}
 703			} else {
 704				split->ram_bytes = split->len;
 705				split->orig_start = split->start;
 706				split->block_len = 0;
 707				split->block_start = em->block_start;
 708				split->orig_block_len = 0;
 709			}
 710
 711			if (extent_map_in_tree(em)) {
 712				replace_extent_mapping(em_tree, em, split,
 713						       modified);
 714			} else {
 715				ret = add_extent_mapping(em_tree, split,
 716							 modified);
 717				ASSERT(ret == 0); /* Logic error */
 718			}
 719			free_extent_map(split);
 720			split = NULL;
 721		}
 722next:
 723		if (extent_map_in_tree(em))
 724			remove_extent_mapping(em_tree, em);
 725		write_unlock(&em_tree->lock);
 726
 727		/* once for us */
 728		free_extent_map(em);
 729		/* once for the tree*/
 730		free_extent_map(em);
 731	}
 732	if (split)
 733		free_extent_map(split);
 734	if (split2)
 735		free_extent_map(split2);
 736}
 737
 738/*
 739 * this is very complex, but the basic idea is to drop all extents
 740 * in the range start - end.  hint_block is filled in with a block number
 741 * that would be a good hint to the block allocator for this file.
 742 *
 743 * If an extent intersects the range but is not entirely inside the range
 744 * it is either truncated or split.  Anything entirely inside the range
 745 * is deleted from the tree.
 746 */
 747int __btrfs_drop_extents(struct btrfs_trans_handle *trans,
 748			 struct btrfs_root *root, struct inode *inode,
 749			 struct btrfs_path *path, u64 start, u64 end,
 750			 u64 *drop_end, int drop_cache,
 751			 int replace_extent,
 752			 u32 extent_item_size,
 753			 int *key_inserted)
 754{
 755	struct btrfs_fs_info *fs_info = root->fs_info;
 756	struct extent_buffer *leaf;
 757	struct btrfs_file_extent_item *fi;
 758	struct btrfs_ref ref = { 0 };
 759	struct btrfs_key key;
 760	struct btrfs_key new_key;
 761	u64 ino = btrfs_ino(BTRFS_I(inode));
 762	u64 search_start = start;
 763	u64 disk_bytenr = 0;
 764	u64 num_bytes = 0;
 765	u64 extent_offset = 0;
 766	u64 extent_end = 0;
 767	u64 last_end = start;
 768	int del_nr = 0;
 769	int del_slot = 0;
 770	int extent_type;
 771	int recow;
 772	int ret;
 773	int modify_tree = -1;
 774	int update_refs;
 775	int found = 0;
 776	int leafs_visited = 0;
 777
 778	if (drop_cache)
 779		btrfs_drop_extent_cache(BTRFS_I(inode), start, end - 1, 0);
 780
 781	if (start >= BTRFS_I(inode)->disk_i_size && !replace_extent)
 782		modify_tree = 0;
 783
 784	update_refs = (test_bit(BTRFS_ROOT_REF_COWS, &root->state) ||
 785		       root == fs_info->tree_root);
 786	while (1) {
 787		recow = 0;
 788		ret = btrfs_lookup_file_extent(trans, root, path, ino,
 789					       search_start, modify_tree);
 790		if (ret < 0)
 791			break;
 792		if (ret > 0 && path->slots[0] > 0 && search_start == start) {
 793			leaf = path->nodes[0];
 794			btrfs_item_key_to_cpu(leaf, &key, path->slots[0] - 1);
 795			if (key.objectid == ino &&
 796			    key.type == BTRFS_EXTENT_DATA_KEY)
 797				path->slots[0]--;
 798		}
 799		ret = 0;
 800		leafs_visited++;
 801next_slot:
 802		leaf = path->nodes[0];
 803		if (path->slots[0] >= btrfs_header_nritems(leaf)) {
 804			BUG_ON(del_nr > 0);
 805			ret = btrfs_next_leaf(root, path);
 806			if (ret < 0)
 807				break;
 808			if (ret > 0) {
 809				ret = 0;
 810				break;
 811			}
 812			leafs_visited++;
 813			leaf = path->nodes[0];
 814			recow = 1;
 815		}
 816
 817		btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
 818
 819		if (key.objectid > ino)
 820			break;
 821		if (WARN_ON_ONCE(key.objectid < ino) ||
 822		    key.type < BTRFS_EXTENT_DATA_KEY) {
 823			ASSERT(del_nr == 0);
 824			path->slots[0]++;
 825			goto next_slot;
 826		}
 827		if (key.type > BTRFS_EXTENT_DATA_KEY || key.offset >= end)
 828			break;
 829
 830		fi = btrfs_item_ptr(leaf, path->slots[0],
 831				    struct btrfs_file_extent_item);
 832		extent_type = btrfs_file_extent_type(leaf, fi);
 833
 834		if (extent_type == BTRFS_FILE_EXTENT_REG ||
 835		    extent_type == BTRFS_FILE_EXTENT_PREALLOC) {
 836			disk_bytenr = btrfs_file_extent_disk_bytenr(leaf, fi);
 837			num_bytes = btrfs_file_extent_disk_num_bytes(leaf, fi);
 838			extent_offset = btrfs_file_extent_offset(leaf, fi);
 839			extent_end = key.offset +
 840				btrfs_file_extent_num_bytes(leaf, fi);
 841		} else if (extent_type == BTRFS_FILE_EXTENT_INLINE) {
 842			extent_end = key.offset +
 843				btrfs_file_extent_ram_bytes(leaf, fi);
 
 844		} else {
 845			/* can't happen */
 846			BUG();
 847		}
 848
 849		/*
 850		 * Don't skip extent items representing 0 byte lengths. They
 851		 * used to be created (bug) if while punching holes we hit
 852		 * -ENOSPC condition. So if we find one here, just ensure we
 853		 * delete it, otherwise we would insert a new file extent item
 854		 * with the same key (offset) as that 0 bytes length file
 855		 * extent item in the call to setup_items_for_insert() later
 856		 * in this function.
 857		 */
 858		if (extent_end == key.offset && extent_end >= search_start) {
 859			last_end = extent_end;
 860			goto delete_extent_item;
 861		}
 862
 863		if (extent_end <= search_start) {
 864			path->slots[0]++;
 865			goto next_slot;
 866		}
 867
 868		found = 1;
 869		search_start = max(key.offset, start);
 870		if (recow || !modify_tree) {
 871			modify_tree = -1;
 872			btrfs_release_path(path);
 873			continue;
 874		}
 875
 876		/*
 877		 *     | - range to drop - |
 878		 *  | -------- extent -------- |
 879		 */
 880		if (start > key.offset && end < extent_end) {
 881			BUG_ON(del_nr > 0);
 882			if (extent_type == BTRFS_FILE_EXTENT_INLINE) {
 883				ret = -EOPNOTSUPP;
 884				break;
 885			}
 886
 887			memcpy(&new_key, &key, sizeof(new_key));
 888			new_key.offset = start;
 889			ret = btrfs_duplicate_item(trans, root, path,
 890						   &new_key);
 891			if (ret == -EAGAIN) {
 892				btrfs_release_path(path);
 893				continue;
 894			}
 895			if (ret < 0)
 896				break;
 897
 898			leaf = path->nodes[0];
 899			fi = btrfs_item_ptr(leaf, path->slots[0] - 1,
 900					    struct btrfs_file_extent_item);
 901			btrfs_set_file_extent_num_bytes(leaf, fi,
 902							start - key.offset);
 903
 904			fi = btrfs_item_ptr(leaf, path->slots[0],
 905					    struct btrfs_file_extent_item);
 906
 907			extent_offset += start - key.offset;
 908			btrfs_set_file_extent_offset(leaf, fi, extent_offset);
 909			btrfs_set_file_extent_num_bytes(leaf, fi,
 910							extent_end - start);
 911			btrfs_mark_buffer_dirty(leaf);
 912
 913			if (update_refs && disk_bytenr > 0) {
 914				btrfs_init_generic_ref(&ref,
 915						BTRFS_ADD_DELAYED_REF,
 916						disk_bytenr, num_bytes, 0);
 917				btrfs_init_data_ref(&ref,
 918						root->root_key.objectid,
 919						new_key.objectid,
 920						start - extent_offset);
 921				ret = btrfs_inc_extent_ref(trans, &ref);
 922				BUG_ON(ret); /* -ENOMEM */
 923			}
 924			key.offset = start;
 925		}
 926		/*
 927		 * From here on out we will have actually dropped something, so
 928		 * last_end can be updated.
 929		 */
 930		last_end = extent_end;
 931
 932		/*
 933		 *  | ---- range to drop ----- |
 934		 *      | -------- extent -------- |
 935		 */
 936		if (start <= key.offset && end < extent_end) {
 937			if (extent_type == BTRFS_FILE_EXTENT_INLINE) {
 938				ret = -EOPNOTSUPP;
 939				break;
 940			}
 941
 942			memcpy(&new_key, &key, sizeof(new_key));
 943			new_key.offset = end;
 944			btrfs_set_item_key_safe(fs_info, path, &new_key);
 945
 946			extent_offset += end - key.offset;
 947			btrfs_set_file_extent_offset(leaf, fi, extent_offset);
 948			btrfs_set_file_extent_num_bytes(leaf, fi,
 949							extent_end - end);
 950			btrfs_mark_buffer_dirty(leaf);
 951			if (update_refs && disk_bytenr > 0)
 952				inode_sub_bytes(inode, end - key.offset);
 953			break;
 954		}
 955
 956		search_start = extent_end;
 957		/*
 958		 *       | ---- range to drop ----- |
 959		 *  | -------- extent -------- |
 960		 */
 961		if (start > key.offset && end >= extent_end) {
 962			BUG_ON(del_nr > 0);
 963			if (extent_type == BTRFS_FILE_EXTENT_INLINE) {
 964				ret = -EOPNOTSUPP;
 965				break;
 966			}
 967
 968			btrfs_set_file_extent_num_bytes(leaf, fi,
 969							start - key.offset);
 970			btrfs_mark_buffer_dirty(leaf);
 971			if (update_refs && disk_bytenr > 0)
 972				inode_sub_bytes(inode, extent_end - start);
 973			if (end == extent_end)
 974				break;
 975
 976			path->slots[0]++;
 977			goto next_slot;
 978		}
 979
 980		/*
 981		 *  | ---- range to drop ----- |
 982		 *    | ------ extent ------ |
 983		 */
 984		if (start <= key.offset && end >= extent_end) {
 985delete_extent_item:
 986			if (del_nr == 0) {
 987				del_slot = path->slots[0];
 988				del_nr = 1;
 989			} else {
 990				BUG_ON(del_slot + del_nr != path->slots[0]);
 991				del_nr++;
 992			}
 993
 994			if (update_refs &&
 995			    extent_type == BTRFS_FILE_EXTENT_INLINE) {
 996				inode_sub_bytes(inode,
 997						extent_end - key.offset);
 998				extent_end = ALIGN(extent_end,
 999						   fs_info->sectorsize);
1000			} else if (update_refs && disk_bytenr > 0) {
1001				btrfs_init_generic_ref(&ref,
1002						BTRFS_DROP_DELAYED_REF,
1003						disk_bytenr, num_bytes, 0);
1004				btrfs_init_data_ref(&ref,
1005						root->root_key.objectid,
1006						key.objectid,
1007						key.offset - extent_offset);
1008				ret = btrfs_free_extent(trans, &ref);
1009				BUG_ON(ret); /* -ENOMEM */
1010				inode_sub_bytes(inode,
1011						extent_end - key.offset);
1012			}
1013
1014			if (end == extent_end)
1015				break;
1016
1017			if (path->slots[0] + 1 < btrfs_header_nritems(leaf)) {
1018				path->slots[0]++;
1019				goto next_slot;
1020			}
1021
1022			ret = btrfs_del_items(trans, root, path, del_slot,
1023					      del_nr);
1024			if (ret) {
1025				btrfs_abort_transaction(trans, ret);
1026				break;
1027			}
1028
1029			del_nr = 0;
1030			del_slot = 0;
1031
1032			btrfs_release_path(path);
1033			continue;
1034		}
1035
1036		BUG();
1037	}
1038
1039	if (!ret && del_nr > 0) {
1040		/*
1041		 * Set path->slots[0] to first slot, so that after the delete
1042		 * if items are move off from our leaf to its immediate left or
1043		 * right neighbor leafs, we end up with a correct and adjusted
1044		 * path->slots[0] for our insertion (if replace_extent != 0).
1045		 */
1046		path->slots[0] = del_slot;
1047		ret = btrfs_del_items(trans, root, path, del_slot, del_nr);
1048		if (ret)
1049			btrfs_abort_transaction(trans, ret);
1050	}
1051
1052	leaf = path->nodes[0];
1053	/*
1054	 * If btrfs_del_items() was called, it might have deleted a leaf, in
1055	 * which case it unlocked our path, so check path->locks[0] matches a
1056	 * write lock.
1057	 */
1058	if (!ret && replace_extent && leafs_visited == 1 &&
1059	    (path->locks[0] == BTRFS_WRITE_LOCK_BLOCKING ||
1060	     path->locks[0] == BTRFS_WRITE_LOCK) &&
1061	    btrfs_leaf_free_space(leaf) >=
1062	    sizeof(struct btrfs_item) + extent_item_size) {
1063
1064		key.objectid = ino;
1065		key.type = BTRFS_EXTENT_DATA_KEY;
1066		key.offset = start;
1067		if (!del_nr && path->slots[0] < btrfs_header_nritems(leaf)) {
1068			struct btrfs_key slot_key;
1069
1070			btrfs_item_key_to_cpu(leaf, &slot_key, path->slots[0]);
1071			if (btrfs_comp_cpu_keys(&key, &slot_key) > 0)
1072				path->slots[0]++;
1073		}
1074		setup_items_for_insert(root, path, &key,
1075				       &extent_item_size,
1076				       extent_item_size,
1077				       sizeof(struct btrfs_item) +
1078				       extent_item_size, 1);
1079		*key_inserted = 1;
1080	}
1081
1082	if (!replace_extent || !(*key_inserted))
1083		btrfs_release_path(path);
1084	if (drop_end)
1085		*drop_end = found ? min(end, last_end) : end;
1086	return ret;
1087}
1088
1089int btrfs_drop_extents(struct btrfs_trans_handle *trans,
1090		       struct btrfs_root *root, struct inode *inode, u64 start,
1091		       u64 end, int drop_cache)
1092{
1093	struct btrfs_path *path;
1094	int ret;
1095
1096	path = btrfs_alloc_path();
1097	if (!path)
1098		return -ENOMEM;
1099	ret = __btrfs_drop_extents(trans, root, inode, path, start, end, NULL,
1100				   drop_cache, 0, 0, NULL);
1101	btrfs_free_path(path);
1102	return ret;
1103}
1104
1105static int extent_mergeable(struct extent_buffer *leaf, int slot,
1106			    u64 objectid, u64 bytenr, u64 orig_offset,
1107			    u64 *start, u64 *end)
1108{
1109	struct btrfs_file_extent_item *fi;
1110	struct btrfs_key key;
1111	u64 extent_end;
1112
1113	if (slot < 0 || slot >= btrfs_header_nritems(leaf))
1114		return 0;
1115
1116	btrfs_item_key_to_cpu(leaf, &key, slot);
1117	if (key.objectid != objectid || key.type != BTRFS_EXTENT_DATA_KEY)
1118		return 0;
1119
1120	fi = btrfs_item_ptr(leaf, slot, struct btrfs_file_extent_item);
1121	if (btrfs_file_extent_type(leaf, fi) != BTRFS_FILE_EXTENT_REG ||
1122	    btrfs_file_extent_disk_bytenr(leaf, fi) != bytenr ||
1123	    btrfs_file_extent_offset(leaf, fi) != key.offset - orig_offset ||
1124	    btrfs_file_extent_compression(leaf, fi) ||
1125	    btrfs_file_extent_encryption(leaf, fi) ||
1126	    btrfs_file_extent_other_encoding(leaf, fi))
1127		return 0;
1128
1129	extent_end = key.offset + btrfs_file_extent_num_bytes(leaf, fi);
1130	if ((*start && *start != key.offset) || (*end && *end != extent_end))
1131		return 0;
1132
1133	*start = key.offset;
1134	*end = extent_end;
1135	return 1;
1136}
1137
1138/*
1139 * Mark extent in the range start - end as written.
1140 *
1141 * This changes extent type from 'pre-allocated' to 'regular'. If only
1142 * part of extent is marked as written, the extent will be split into
1143 * two or three.
1144 */
1145int btrfs_mark_extent_written(struct btrfs_trans_handle *trans,
1146			      struct btrfs_inode *inode, u64 start, u64 end)
1147{
1148	struct btrfs_fs_info *fs_info = trans->fs_info;
1149	struct btrfs_root *root = inode->root;
1150	struct extent_buffer *leaf;
1151	struct btrfs_path *path;
1152	struct btrfs_file_extent_item *fi;
1153	struct btrfs_ref ref = { 0 };
1154	struct btrfs_key key;
1155	struct btrfs_key new_key;
1156	u64 bytenr;
1157	u64 num_bytes;
1158	u64 extent_end;
1159	u64 orig_offset;
1160	u64 other_start;
1161	u64 other_end;
1162	u64 split;
1163	int del_nr = 0;
1164	int del_slot = 0;
1165	int recow;
1166	int ret;
1167	u64 ino = btrfs_ino(inode);
1168
1169	path = btrfs_alloc_path();
1170	if (!path)
1171		return -ENOMEM;
1172again:
1173	recow = 0;
1174	split = start;
1175	key.objectid = ino;
1176	key.type = BTRFS_EXTENT_DATA_KEY;
1177	key.offset = split;
1178
1179	ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
1180	if (ret < 0)
1181		goto out;
1182	if (ret > 0 && path->slots[0] > 0)
1183		path->slots[0]--;
1184
1185	leaf = path->nodes[0];
1186	btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
1187	if (key.objectid != ino ||
1188	    key.type != BTRFS_EXTENT_DATA_KEY) {
1189		ret = -EINVAL;
1190		btrfs_abort_transaction(trans, ret);
1191		goto out;
1192	}
1193	fi = btrfs_item_ptr(leaf, path->slots[0],
1194			    struct btrfs_file_extent_item);
1195	if (btrfs_file_extent_type(leaf, fi) != BTRFS_FILE_EXTENT_PREALLOC) {
1196		ret = -EINVAL;
1197		btrfs_abort_transaction(trans, ret);
1198		goto out;
1199	}
1200	extent_end = key.offset + btrfs_file_extent_num_bytes(leaf, fi);
1201	if (key.offset > start || extent_end < end) {
1202		ret = -EINVAL;
1203		btrfs_abort_transaction(trans, ret);
1204		goto out;
1205	}
1206
1207	bytenr = btrfs_file_extent_disk_bytenr(leaf, fi);
1208	num_bytes = btrfs_file_extent_disk_num_bytes(leaf, fi);
1209	orig_offset = key.offset - btrfs_file_extent_offset(leaf, fi);
1210	memcpy(&new_key, &key, sizeof(new_key));
1211
1212	if (start == key.offset && end < extent_end) {
1213		other_start = 0;
1214		other_end = start;
1215		if (extent_mergeable(leaf, path->slots[0] - 1,
1216				     ino, bytenr, orig_offset,
1217				     &other_start, &other_end)) {
1218			new_key.offset = end;
1219			btrfs_set_item_key_safe(fs_info, path, &new_key);
1220			fi = btrfs_item_ptr(leaf, path->slots[0],
1221					    struct btrfs_file_extent_item);
1222			btrfs_set_file_extent_generation(leaf, fi,
1223							 trans->transid);
1224			btrfs_set_file_extent_num_bytes(leaf, fi,
1225							extent_end - end);
1226			btrfs_set_file_extent_offset(leaf, fi,
1227						     end - orig_offset);
1228			fi = btrfs_item_ptr(leaf, path->slots[0] - 1,
1229					    struct btrfs_file_extent_item);
1230			btrfs_set_file_extent_generation(leaf, fi,
1231							 trans->transid);
1232			btrfs_set_file_extent_num_bytes(leaf, fi,
1233							end - other_start);
1234			btrfs_mark_buffer_dirty(leaf);
1235			goto out;
1236		}
1237	}
1238
1239	if (start > key.offset && end == extent_end) {
1240		other_start = end;
1241		other_end = 0;
1242		if (extent_mergeable(leaf, path->slots[0] + 1,
1243				     ino, bytenr, orig_offset,
1244				     &other_start, &other_end)) {
1245			fi = btrfs_item_ptr(leaf, path->slots[0],
1246					    struct btrfs_file_extent_item);
1247			btrfs_set_file_extent_num_bytes(leaf, fi,
1248							start - key.offset);
1249			btrfs_set_file_extent_generation(leaf, fi,
1250							 trans->transid);
1251			path->slots[0]++;
1252			new_key.offset = start;
1253			btrfs_set_item_key_safe(fs_info, path, &new_key);
1254
1255			fi = btrfs_item_ptr(leaf, path->slots[0],
1256					    struct btrfs_file_extent_item);
1257			btrfs_set_file_extent_generation(leaf, fi,
1258							 trans->transid);
1259			btrfs_set_file_extent_num_bytes(leaf, fi,
1260							other_end - start);
1261			btrfs_set_file_extent_offset(leaf, fi,
1262						     start - orig_offset);
1263			btrfs_mark_buffer_dirty(leaf);
1264			goto out;
1265		}
1266	}
1267
1268	while (start > key.offset || end < extent_end) {
1269		if (key.offset == start)
1270			split = end;
1271
1272		new_key.offset = split;
1273		ret = btrfs_duplicate_item(trans, root, path, &new_key);
1274		if (ret == -EAGAIN) {
1275			btrfs_release_path(path);
1276			goto again;
1277		}
1278		if (ret < 0) {
1279			btrfs_abort_transaction(trans, ret);
1280			goto out;
1281		}
1282
1283		leaf = path->nodes[0];
1284		fi = btrfs_item_ptr(leaf, path->slots[0] - 1,
1285				    struct btrfs_file_extent_item);
1286		btrfs_set_file_extent_generation(leaf, fi, trans->transid);
1287		btrfs_set_file_extent_num_bytes(leaf, fi,
1288						split - key.offset);
1289
1290		fi = btrfs_item_ptr(leaf, path->slots[0],
1291				    struct btrfs_file_extent_item);
1292
1293		btrfs_set_file_extent_generation(leaf, fi, trans->transid);
1294		btrfs_set_file_extent_offset(leaf, fi, split - orig_offset);
1295		btrfs_set_file_extent_num_bytes(leaf, fi,
1296						extent_end - split);
1297		btrfs_mark_buffer_dirty(leaf);
1298
1299		btrfs_init_generic_ref(&ref, BTRFS_ADD_DELAYED_REF, bytenr,
1300				       num_bytes, 0);
1301		btrfs_init_data_ref(&ref, root->root_key.objectid, ino,
1302				    orig_offset);
1303		ret = btrfs_inc_extent_ref(trans, &ref);
1304		if (ret) {
1305			btrfs_abort_transaction(trans, ret);
1306			goto out;
1307		}
1308
1309		if (split == start) {
1310			key.offset = start;
1311		} else {
1312			if (start != key.offset) {
1313				ret = -EINVAL;
1314				btrfs_abort_transaction(trans, ret);
1315				goto out;
1316			}
1317			path->slots[0]--;
1318			extent_end = end;
1319		}
1320		recow = 1;
1321	}
1322
1323	other_start = end;
1324	other_end = 0;
1325	btrfs_init_generic_ref(&ref, BTRFS_DROP_DELAYED_REF, bytenr,
1326			       num_bytes, 0);
1327	btrfs_init_data_ref(&ref, root->root_key.objectid, ino, orig_offset);
1328	if (extent_mergeable(leaf, path->slots[0] + 1,
1329			     ino, bytenr, orig_offset,
1330			     &other_start, &other_end)) {
1331		if (recow) {
1332			btrfs_release_path(path);
1333			goto again;
1334		}
1335		extent_end = other_end;
1336		del_slot = path->slots[0] + 1;
1337		del_nr++;
1338		ret = btrfs_free_extent(trans, &ref);
 
 
1339		if (ret) {
1340			btrfs_abort_transaction(trans, ret);
1341			goto out;
1342		}
1343	}
1344	other_start = 0;
1345	other_end = start;
1346	if (extent_mergeable(leaf, path->slots[0] - 1,
1347			     ino, bytenr, orig_offset,
1348			     &other_start, &other_end)) {
1349		if (recow) {
1350			btrfs_release_path(path);
1351			goto again;
1352		}
1353		key.offset = other_start;
1354		del_slot = path->slots[0];
1355		del_nr++;
1356		ret = btrfs_free_extent(trans, &ref);
 
 
1357		if (ret) {
1358			btrfs_abort_transaction(trans, ret);
1359			goto out;
1360		}
1361	}
1362	if (del_nr == 0) {
1363		fi = btrfs_item_ptr(leaf, path->slots[0],
1364			   struct btrfs_file_extent_item);
1365		btrfs_set_file_extent_type(leaf, fi,
1366					   BTRFS_FILE_EXTENT_REG);
1367		btrfs_set_file_extent_generation(leaf, fi, trans->transid);
1368		btrfs_mark_buffer_dirty(leaf);
1369	} else {
1370		fi = btrfs_item_ptr(leaf, del_slot - 1,
1371			   struct btrfs_file_extent_item);
1372		btrfs_set_file_extent_type(leaf, fi,
1373					   BTRFS_FILE_EXTENT_REG);
1374		btrfs_set_file_extent_generation(leaf, fi, trans->transid);
1375		btrfs_set_file_extent_num_bytes(leaf, fi,
1376						extent_end - key.offset);
1377		btrfs_mark_buffer_dirty(leaf);
1378
1379		ret = btrfs_del_items(trans, root, path, del_slot, del_nr);
1380		if (ret < 0) {
1381			btrfs_abort_transaction(trans, ret);
1382			goto out;
1383		}
1384	}
1385out:
1386	btrfs_free_path(path);
1387	return 0;
1388}
1389
1390/*
1391 * on error we return an unlocked page and the error value
1392 * on success we return a locked page and 0
1393 */
1394static int prepare_uptodate_page(struct inode *inode,
1395				 struct page *page, u64 pos,
1396				 bool force_uptodate)
1397{
1398	int ret = 0;
1399
1400	if (((pos & (PAGE_SIZE - 1)) || force_uptodate) &&
1401	    !PageUptodate(page)) {
1402		ret = btrfs_readpage(NULL, page);
1403		if (ret)
1404			return ret;
1405		lock_page(page);
1406		if (!PageUptodate(page)) {
1407			unlock_page(page);
1408			return -EIO;
1409		}
1410		if (page->mapping != inode->i_mapping) {
1411			unlock_page(page);
1412			return -EAGAIN;
1413		}
1414	}
1415	return 0;
1416}
1417
1418/*
1419 * this just gets pages into the page cache and locks them down.
1420 */
1421static noinline int prepare_pages(struct inode *inode, struct page **pages,
1422				  size_t num_pages, loff_t pos,
1423				  size_t write_bytes, bool force_uptodate)
1424{
1425	int i;
1426	unsigned long index = pos >> PAGE_SHIFT;
1427	gfp_t mask = btrfs_alloc_write_mask(inode->i_mapping);
1428	int err = 0;
1429	int faili;
1430
1431	for (i = 0; i < num_pages; i++) {
1432again:
1433		pages[i] = find_or_create_page(inode->i_mapping, index + i,
1434					       mask | __GFP_WRITE);
1435		if (!pages[i]) {
1436			faili = i - 1;
1437			err = -ENOMEM;
1438			goto fail;
1439		}
1440
1441		if (i == 0)
1442			err = prepare_uptodate_page(inode, pages[i], pos,
1443						    force_uptodate);
1444		if (!err && i == num_pages - 1)
1445			err = prepare_uptodate_page(inode, pages[i],
1446						    pos + write_bytes, false);
1447		if (err) {
1448			put_page(pages[i]);
1449			if (err == -EAGAIN) {
1450				err = 0;
1451				goto again;
1452			}
1453			faili = i - 1;
1454			goto fail;
1455		}
1456		wait_on_page_writeback(pages[i]);
1457	}
1458
1459	return 0;
1460fail:
1461	while (faili >= 0) {
1462		unlock_page(pages[faili]);
1463		put_page(pages[faili]);
1464		faili--;
1465	}
1466	return err;
1467
1468}
1469
1470/*
1471 * This function locks the extent and properly waits for data=ordered extents
1472 * to finish before allowing the pages to be modified if need.
1473 *
1474 * The return value:
1475 * 1 - the extent is locked
1476 * 0 - the extent is not locked, and everything is OK
1477 * -EAGAIN - need re-prepare the pages
1478 * the other < 0 number - Something wrong happens
1479 */
1480static noinline int
1481lock_and_cleanup_extent_if_need(struct btrfs_inode *inode, struct page **pages,
1482				size_t num_pages, loff_t pos,
1483				size_t write_bytes,
1484				u64 *lockstart, u64 *lockend,
1485				struct extent_state **cached_state)
1486{
1487	struct btrfs_fs_info *fs_info = inode->root->fs_info;
1488	u64 start_pos;
1489	u64 last_pos;
1490	int i;
1491	int ret = 0;
1492
1493	start_pos = round_down(pos, fs_info->sectorsize);
1494	last_pos = start_pos
1495		+ round_up(pos + write_bytes - start_pos,
1496			   fs_info->sectorsize) - 1;
1497
1498	if (start_pos < inode->vfs_inode.i_size) {
1499		struct btrfs_ordered_extent *ordered;
1500
1501		lock_extent_bits(&inode->io_tree, start_pos, last_pos,
1502				cached_state);
1503		ordered = btrfs_lookup_ordered_range(inode, start_pos,
1504						     last_pos - start_pos + 1);
1505		if (ordered &&
1506		    ordered->file_offset + ordered->len > start_pos &&
1507		    ordered->file_offset <= last_pos) {
1508			unlock_extent_cached(&inode->io_tree, start_pos,
1509					last_pos, cached_state);
1510			for (i = 0; i < num_pages; i++) {
1511				unlock_page(pages[i]);
1512				put_page(pages[i]);
1513			}
1514			btrfs_start_ordered_extent(&inode->vfs_inode,
1515					ordered, 1);
1516			btrfs_put_ordered_extent(ordered);
1517			return -EAGAIN;
1518		}
1519		if (ordered)
1520			btrfs_put_ordered_extent(ordered);
1521
 
 
 
1522		*lockstart = start_pos;
1523		*lockend = last_pos;
1524		ret = 1;
1525	}
1526
1527	/*
1528	 * It's possible the pages are dirty right now, but we don't want
1529	 * to clean them yet because copy_from_user may catch a page fault
1530	 * and we might have to fall back to one page at a time.  If that
1531	 * happens, we'll unlock these pages and we'd have a window where
1532	 * reclaim could sneak in and drop the once-dirty page on the floor
1533	 * without writing it.
1534	 *
1535	 * We have the pages locked and the extent range locked, so there's
1536	 * no way someone can start IO on any dirty pages in this range.
1537	 *
1538	 * We'll call btrfs_dirty_pages() later on, and that will flip around
1539	 * delalloc bits and dirty the pages as required.
1540	 */
1541	for (i = 0; i < num_pages; i++) {
 
 
1542		set_page_extent_mapped(pages[i]);
1543		WARN_ON(!PageLocked(pages[i]));
1544	}
1545
1546	return ret;
1547}
1548
1549static noinline int check_can_nocow(struct btrfs_inode *inode, loff_t pos,
1550				    size_t *write_bytes)
1551{
1552	struct btrfs_fs_info *fs_info = inode->root->fs_info;
1553	struct btrfs_root *root = inode->root;
 
1554	u64 lockstart, lockend;
1555	u64 num_bytes;
1556	int ret;
1557
1558	ret = btrfs_start_write_no_snapshotting(root);
1559	if (!ret)
1560		return -EAGAIN;
1561
1562	lockstart = round_down(pos, fs_info->sectorsize);
1563	lockend = round_up(pos + *write_bytes,
1564			   fs_info->sectorsize) - 1;
1565
1566	btrfs_lock_and_flush_ordered_range(&inode->io_tree, inode, lockstart,
1567					   lockend, NULL);
 
 
 
 
 
 
 
 
 
1568
1569	num_bytes = lockend - lockstart + 1;
1570	ret = can_nocow_extent(&inode->vfs_inode, lockstart, &num_bytes,
1571			NULL, NULL, NULL);
1572	if (ret <= 0) {
1573		ret = 0;
1574		btrfs_end_write_no_snapshotting(root);
1575	} else {
1576		*write_bytes = min_t(size_t, *write_bytes ,
1577				     num_bytes - pos + lockstart);
1578	}
1579
1580	unlock_extent(&inode->io_tree, lockstart, lockend);
1581
1582	return ret;
1583}
1584
1585static noinline ssize_t btrfs_buffered_write(struct kiocb *iocb,
1586					       struct iov_iter *i)
 
1587{
1588	struct file *file = iocb->ki_filp;
1589	loff_t pos = iocb->ki_pos;
1590	struct inode *inode = file_inode(file);
1591	struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
1592	struct btrfs_root *root = BTRFS_I(inode)->root;
1593	struct page **pages = NULL;
 
1594	struct extent_changeset *data_reserved = NULL;
1595	u64 release_bytes = 0;
1596	u64 lockstart;
1597	u64 lockend;
1598	size_t num_written = 0;
1599	int nrptrs;
1600	int ret = 0;
1601	bool only_release_metadata = false;
1602	bool force_page_uptodate = false;
1603
1604	nrptrs = min(DIV_ROUND_UP(iov_iter_count(i), PAGE_SIZE),
1605			PAGE_SIZE / (sizeof(struct page *)));
1606	nrptrs = min(nrptrs, current->nr_dirtied_pause - current->nr_dirtied);
1607	nrptrs = max(nrptrs, 8);
1608	pages = kmalloc_array(nrptrs, sizeof(struct page *), GFP_KERNEL);
1609	if (!pages)
1610		return -ENOMEM;
1611
1612	while (iov_iter_count(i) > 0) {
1613		struct extent_state *cached_state = NULL;
1614		size_t offset = offset_in_page(pos);
1615		size_t sector_offset;
1616		size_t write_bytes = min(iov_iter_count(i),
1617					 nrptrs * (size_t)PAGE_SIZE -
1618					 offset);
1619		size_t num_pages = DIV_ROUND_UP(write_bytes + offset,
1620						PAGE_SIZE);
1621		size_t reserve_bytes;
1622		size_t dirty_pages;
1623		size_t copied;
1624		size_t dirty_sectors;
1625		size_t num_sectors;
1626		int extents_locked;
1627
1628		WARN_ON(num_pages > nrptrs);
1629
1630		/*
1631		 * Fault pages before locking them in prepare_pages
1632		 * to avoid recursive lock
1633		 */
1634		if (unlikely(iov_iter_fault_in_readable(i, write_bytes))) {
1635			ret = -EFAULT;
1636			break;
1637		}
1638
1639		sector_offset = pos & (fs_info->sectorsize - 1);
1640		reserve_bytes = round_up(write_bytes + sector_offset,
1641				fs_info->sectorsize);
1642
1643		extent_changeset_release(data_reserved);
1644		ret = btrfs_check_data_free_space(inode, &data_reserved, pos,
1645						  write_bytes);
1646		if (ret < 0) {
1647			if ((BTRFS_I(inode)->flags & (BTRFS_INODE_NODATACOW |
1648						      BTRFS_INODE_PREALLOC)) &&
1649			    check_can_nocow(BTRFS_I(inode), pos,
1650					&write_bytes) > 0) {
1651				/*
1652				 * For nodata cow case, no need to reserve
1653				 * data space.
1654				 */
1655				only_release_metadata = true;
1656				/*
1657				 * our prealloc extent may be smaller than
1658				 * write_bytes, so scale down.
1659				 */
1660				num_pages = DIV_ROUND_UP(write_bytes + offset,
1661							 PAGE_SIZE);
1662				reserve_bytes = round_up(write_bytes +
1663							 sector_offset,
1664							 fs_info->sectorsize);
1665			} else {
1666				break;
1667			}
1668		}
1669
1670		WARN_ON(reserve_bytes == 0);
1671		ret = btrfs_delalloc_reserve_metadata(BTRFS_I(inode),
1672				reserve_bytes);
1673		if (ret) {
1674			if (!only_release_metadata)
1675				btrfs_free_reserved_data_space(inode,
1676						data_reserved, pos,
1677						write_bytes);
1678			else
1679				btrfs_end_write_no_snapshotting(root);
1680			break;
1681		}
1682
1683		release_bytes = reserve_bytes;
1684again:
1685		/*
1686		 * This is going to setup the pages array with the number of
1687		 * pages we want, so we don't really need to worry about the
1688		 * contents of pages from loop to loop
1689		 */
1690		ret = prepare_pages(inode, pages, num_pages,
1691				    pos, write_bytes,
1692				    force_page_uptodate);
1693		if (ret) {
1694			btrfs_delalloc_release_extents(BTRFS_I(inode),
1695						       reserve_bytes);
1696			break;
1697		}
1698
1699		extents_locked = lock_and_cleanup_extent_if_need(
1700				BTRFS_I(inode), pages,
1701				num_pages, pos, write_bytes, &lockstart,
1702				&lockend, &cached_state);
1703		if (extents_locked < 0) {
1704			if (extents_locked == -EAGAIN)
1705				goto again;
1706			btrfs_delalloc_release_extents(BTRFS_I(inode),
1707						       reserve_bytes);
1708			ret = extents_locked;
1709			break;
1710		}
1711
1712		copied = btrfs_copy_from_user(pos, write_bytes, pages, i);
1713
1714		num_sectors = BTRFS_BYTES_TO_BLKS(fs_info, reserve_bytes);
1715		dirty_sectors = round_up(copied + sector_offset,
1716					fs_info->sectorsize);
1717		dirty_sectors = BTRFS_BYTES_TO_BLKS(fs_info, dirty_sectors);
1718
1719		/*
1720		 * if we have trouble faulting in the pages, fall
1721		 * back to one page at a time
1722		 */
1723		if (copied < write_bytes)
1724			nrptrs = 1;
1725
1726		if (copied == 0) {
1727			force_page_uptodate = true;
1728			dirty_sectors = 0;
1729			dirty_pages = 0;
1730		} else {
1731			force_page_uptodate = false;
1732			dirty_pages = DIV_ROUND_UP(copied + offset,
1733						   PAGE_SIZE);
1734		}
1735
1736		if (num_sectors > dirty_sectors) {
1737			/* release everything except the sectors we dirtied */
1738			release_bytes -= dirty_sectors <<
1739						fs_info->sb->s_blocksize_bits;
1740			if (only_release_metadata) {
1741				btrfs_delalloc_release_metadata(BTRFS_I(inode),
1742							release_bytes, true);
1743			} else {
1744				u64 __pos;
1745
1746				__pos = round_down(pos,
1747						   fs_info->sectorsize) +
1748					(dirty_pages << PAGE_SHIFT);
1749				btrfs_delalloc_release_space(inode,
1750						data_reserved, __pos,
1751						release_bytes, true);
1752			}
1753		}
1754
1755		release_bytes = round_up(copied + sector_offset,
1756					fs_info->sectorsize);
1757
1758		if (copied > 0)
1759			ret = btrfs_dirty_pages(inode, pages, dirty_pages,
1760						pos, copied, &cached_state);
1761
1762		/*
1763		 * If we have not locked the extent range, because the range's
1764		 * start offset is >= i_size, we might still have a non-NULL
1765		 * cached extent state, acquired while marking the extent range
1766		 * as delalloc through btrfs_dirty_pages(). Therefore free any
1767		 * possible cached extent state to avoid a memory leak.
1768		 */
1769		if (extents_locked)
1770			unlock_extent_cached(&BTRFS_I(inode)->io_tree,
1771					     lockstart, lockend, &cached_state);
1772		else
1773			free_extent_state(cached_state);
1774
1775		btrfs_delalloc_release_extents(BTRFS_I(inode), reserve_bytes);
1776		if (ret) {
1777			btrfs_drop_pages(pages, num_pages);
1778			break;
1779		}
1780
1781		release_bytes = 0;
1782		if (only_release_metadata)
1783			btrfs_end_write_no_snapshotting(root);
1784
1785		if (only_release_metadata && copied > 0) {
1786			lockstart = round_down(pos,
1787					       fs_info->sectorsize);
1788			lockend = round_up(pos + copied,
1789					   fs_info->sectorsize) - 1;
1790
1791			set_extent_bit(&BTRFS_I(inode)->io_tree, lockstart,
1792				       lockend, EXTENT_NORESERVE, NULL,
1793				       NULL, GFP_NOFS);
1794			only_release_metadata = false;
1795		}
1796
1797		btrfs_drop_pages(pages, num_pages);
1798
1799		cond_resched();
1800
1801		balance_dirty_pages_ratelimited(inode->i_mapping);
1802		if (dirty_pages < (fs_info->nodesize >> PAGE_SHIFT) + 1)
1803			btrfs_btree_balance_dirty(fs_info);
1804
1805		pos += copied;
1806		num_written += copied;
1807	}
1808
1809	kfree(pages);
1810
1811	if (release_bytes) {
1812		if (only_release_metadata) {
1813			btrfs_end_write_no_snapshotting(root);
1814			btrfs_delalloc_release_metadata(BTRFS_I(inode),
1815					release_bytes, true);
1816		} else {
1817			btrfs_delalloc_release_space(inode, data_reserved,
1818					round_down(pos, fs_info->sectorsize),
1819					release_bytes, true);
1820		}
1821	}
1822
1823	extent_changeset_free(data_reserved);
1824	return num_written ? num_written : ret;
1825}
1826
1827static ssize_t __btrfs_direct_write(struct kiocb *iocb, struct iov_iter *from)
1828{
1829	struct file *file = iocb->ki_filp;
1830	struct inode *inode = file_inode(file);
1831	loff_t pos;
1832	ssize_t written;
1833	ssize_t written_buffered;
1834	loff_t endbyte;
1835	int err;
1836
1837	written = generic_file_direct_write(iocb, from);
1838
1839	if (written < 0 || !iov_iter_count(from))
1840		return written;
1841
1842	pos = iocb->ki_pos;
1843	written_buffered = btrfs_buffered_write(iocb, from);
1844	if (written_buffered < 0) {
1845		err = written_buffered;
1846		goto out;
1847	}
1848	/*
1849	 * Ensure all data is persisted. We want the next direct IO read to be
1850	 * able to read what was just written.
1851	 */
1852	endbyte = pos + written_buffered - 1;
1853	err = btrfs_fdatawrite_range(inode, pos, endbyte);
1854	if (err)
1855		goto out;
1856	err = filemap_fdatawait_range(inode->i_mapping, pos, endbyte);
1857	if (err)
1858		goto out;
1859	written += written_buffered;
1860	iocb->ki_pos = pos + written_buffered;
1861	invalidate_mapping_pages(file->f_mapping, pos >> PAGE_SHIFT,
1862				 endbyte >> PAGE_SHIFT);
1863out:
1864	return written ? written : err;
1865}
1866
1867static void update_time_for_write(struct inode *inode)
1868{
1869	struct timespec64 now;
1870
1871	if (IS_NOCMTIME(inode))
1872		return;
1873
1874	now = current_time(inode);
1875	if (!timespec64_equal(&inode->i_mtime, &now))
1876		inode->i_mtime = now;
1877
1878	if (!timespec64_equal(&inode->i_ctime, &now))
1879		inode->i_ctime = now;
1880
1881	if (IS_I_VERSION(inode))
1882		inode_inc_iversion(inode);
1883}
1884
1885static ssize_t btrfs_file_write_iter(struct kiocb *iocb,
1886				    struct iov_iter *from)
1887{
1888	struct file *file = iocb->ki_filp;
1889	struct inode *inode = file_inode(file);
1890	struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
1891	struct btrfs_root *root = BTRFS_I(inode)->root;
1892	u64 start_pos;
1893	u64 end_pos;
1894	ssize_t num_written = 0;
1895	const bool sync = iocb->ki_flags & IOCB_DSYNC;
1896	ssize_t err;
1897	loff_t pos;
1898	size_t count;
1899	loff_t oldsize;
1900	int clean_page = 0;
1901
1902	if (!(iocb->ki_flags & IOCB_DIRECT) &&
1903	    (iocb->ki_flags & IOCB_NOWAIT))
1904		return -EOPNOTSUPP;
1905
1906	if (!inode_trylock(inode)) {
1907		if (iocb->ki_flags & IOCB_NOWAIT)
1908			return -EAGAIN;
1909		inode_lock(inode);
1910	}
1911
1912	err = generic_write_checks(iocb, from);
1913	if (err <= 0) {
1914		inode_unlock(inode);
1915		return err;
1916	}
1917
1918	pos = iocb->ki_pos;
1919	count = iov_iter_count(from);
1920	if (iocb->ki_flags & IOCB_NOWAIT) {
1921		/*
1922		 * We will allocate space in case nodatacow is not set,
1923		 * so bail
1924		 */
1925		if (!(BTRFS_I(inode)->flags & (BTRFS_INODE_NODATACOW |
1926					      BTRFS_INODE_PREALLOC)) ||
1927		    check_can_nocow(BTRFS_I(inode), pos, &count) <= 0) {
1928			inode_unlock(inode);
1929			return -EAGAIN;
1930		}
1931	}
1932
1933	current->backing_dev_info = inode_to_bdi(inode);
1934	err = file_remove_privs(file);
1935	if (err) {
1936		inode_unlock(inode);
1937		goto out;
1938	}
1939
1940	/*
1941	 * If BTRFS flips readonly due to some impossible error
1942	 * (fs_info->fs_state now has BTRFS_SUPER_FLAG_ERROR),
1943	 * although we have opened a file as writable, we have
1944	 * to stop this write operation to ensure FS consistency.
1945	 */
1946	if (test_bit(BTRFS_FS_STATE_ERROR, &fs_info->fs_state)) {
1947		inode_unlock(inode);
1948		err = -EROFS;
1949		goto out;
1950	}
1951
1952	/*
1953	 * We reserve space for updating the inode when we reserve space for the
1954	 * extent we are going to write, so we will enospc out there.  We don't
1955	 * need to start yet another transaction to update the inode as we will
1956	 * update the inode when we finish writing whatever data we write.
1957	 */
1958	update_time_for_write(inode);
1959
1960	start_pos = round_down(pos, fs_info->sectorsize);
1961	oldsize = i_size_read(inode);
1962	if (start_pos > oldsize) {
1963		/* Expand hole size to cover write data, preventing empty gap */
1964		end_pos = round_up(pos + count,
1965				   fs_info->sectorsize);
1966		err = btrfs_cont_expand(inode, oldsize, end_pos);
1967		if (err) {
1968			inode_unlock(inode);
1969			goto out;
1970		}
1971		if (start_pos > round_up(oldsize, fs_info->sectorsize))
1972			clean_page = 1;
1973	}
1974
1975	if (sync)
1976		atomic_inc(&BTRFS_I(inode)->sync_writers);
1977
1978	if (iocb->ki_flags & IOCB_DIRECT) {
1979		num_written = __btrfs_direct_write(iocb, from);
1980	} else {
1981		num_written = btrfs_buffered_write(iocb, from);
1982		if (num_written > 0)
1983			iocb->ki_pos = pos + num_written;
1984		if (clean_page)
1985			pagecache_isize_extended(inode, oldsize,
1986						i_size_read(inode));
1987	}
1988
1989	inode_unlock(inode);
1990
1991	/*
1992	 * We also have to set last_sub_trans to the current log transid,
1993	 * otherwise subsequent syncs to a file that's been synced in this
1994	 * transaction will appear to have already occurred.
1995	 */
1996	spin_lock(&BTRFS_I(inode)->lock);
1997	BTRFS_I(inode)->last_sub_trans = root->log_transid;
1998	spin_unlock(&BTRFS_I(inode)->lock);
1999	if (num_written > 0)
2000		num_written = generic_write_sync(iocb, num_written);
2001
2002	if (sync)
2003		atomic_dec(&BTRFS_I(inode)->sync_writers);
2004out:
2005	current->backing_dev_info = NULL;
2006	return num_written ? num_written : err;
2007}
2008
2009int btrfs_release_file(struct inode *inode, struct file *filp)
2010{
2011	struct btrfs_file_private *private = filp->private_data;
2012
2013	if (private && private->filldir_buf)
2014		kfree(private->filldir_buf);
2015	kfree(private);
2016	filp->private_data = NULL;
2017
2018	/*
2019	 * ordered_data_close is set by setattr when we are about to truncate
2020	 * a file from a non-zero size to a zero size.  This tries to
2021	 * flush down new bytes that may have been written if the
2022	 * application were using truncate to replace a file in place.
2023	 */
2024	if (test_and_clear_bit(BTRFS_INODE_ORDERED_DATA_CLOSE,
2025			       &BTRFS_I(inode)->runtime_flags))
2026			filemap_flush(inode->i_mapping);
2027	return 0;
2028}
2029
2030static int start_ordered_ops(struct inode *inode, loff_t start, loff_t end)
2031{
2032	int ret;
2033	struct blk_plug plug;
2034
2035	/*
2036	 * This is only called in fsync, which would do synchronous writes, so
2037	 * a plug can merge adjacent IOs as much as possible.  Esp. in case of
2038	 * multiple disks using raid profile, a large IO can be split to
2039	 * several segments of stripe length (currently 64K).
2040	 */
2041	blk_start_plug(&plug);
2042	atomic_inc(&BTRFS_I(inode)->sync_writers);
2043	ret = btrfs_fdatawrite_range(inode, start, end);
2044	atomic_dec(&BTRFS_I(inode)->sync_writers);
2045	blk_finish_plug(&plug);
2046
2047	return ret;
2048}
2049
2050/*
2051 * fsync call for both files and directories.  This logs the inode into
2052 * the tree log instead of forcing full commits whenever possible.
2053 *
2054 * It needs to call filemap_fdatawait so that all ordered extent updates are
2055 * in the metadata btree are up to date for copying to the log.
2056 *
2057 * It drops the inode mutex before doing the tree log commit.  This is an
2058 * important optimization for directories because holding the mutex prevents
2059 * new operations on the dir while we write to disk.
2060 */
2061int btrfs_sync_file(struct file *file, loff_t start, loff_t end, int datasync)
2062{
2063	struct dentry *dentry = file_dentry(file);
2064	struct inode *inode = d_inode(dentry);
2065	struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
2066	struct btrfs_root *root = BTRFS_I(inode)->root;
2067	struct btrfs_trans_handle *trans;
2068	struct btrfs_log_ctx ctx;
2069	int ret = 0, err;
 
 
2070
 
 
 
 
 
2071	trace_btrfs_sync_file(file, datasync);
2072
2073	btrfs_init_log_ctx(&ctx, inode);
2074
2075	/*
2076	 * We write the dirty pages in the range and wait until they complete
2077	 * out of the ->i_mutex. If so, we can flush the dirty pages by
2078	 * multi-task, and make the performance up.  See
2079	 * btrfs_wait_ordered_range for an explanation of the ASYNC check.
2080	 */
2081	ret = start_ordered_ops(inode, start, end);
2082	if (ret)
2083		goto out;
2084
2085	inode_lock(inode);
2086
2087	/*
2088	 * We take the dio_sem here because the tree log stuff can race with
2089	 * lockless dio writes and get an extent map logged for an extent we
2090	 * never waited on.  We need it this high up for lockdep reasons.
2091	 */
2092	down_write(&BTRFS_I(inode)->dio_sem);
2093
2094	atomic_inc(&root->log_batch);
2095
2096	/*
2097	 * If the inode needs a full sync, make sure we use a full range to
2098	 * avoid log tree corruption, due to hole detection racing with ordered
2099	 * extent completion for adjacent ranges, and assertion failures during
2100	 * hole detection. Do this while holding the inode lock, to avoid races
2101	 * with other tasks.
2102	 */
2103	if (test_bit(BTRFS_INODE_NEEDS_FULL_SYNC,
2104		     &BTRFS_I(inode)->runtime_flags)) {
2105		start = 0;
2106		end = LLONG_MAX;
2107	}
2108
2109	/*
2110	 * Before we acquired the inode's lock, someone may have dirtied more
2111	 * pages in the target range. We need to make sure that writeback for
2112	 * any such pages does not start while we are logging the inode, because
2113	 * if it does, any of the following might happen when we are not doing a
2114	 * full inode sync:
2115	 *
2116	 * 1) We log an extent after its writeback finishes but before its
2117	 *    checksums are added to the csum tree, leading to -EIO errors
2118	 *    when attempting to read the extent after a log replay.
2119	 *
2120	 * 2) We can end up logging an extent before its writeback finishes.
2121	 *    Therefore after the log replay we will have a file extent item
2122	 *    pointing to an unwritten extent (and no data checksums as well).
2123	 *
2124	 * So trigger writeback for any eventual new dirty pages and then we
2125	 * wait for all ordered extents to complete below.
2126	 */
2127	ret = start_ordered_ops(inode, start, end);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2128	if (ret) {
2129		inode_unlock(inode);
2130		goto out;
2131	}
 
2132
2133	/*
2134	 * We have to do this here to avoid the priority inversion of waiting on
2135	 * IO of a lower priority task while holding a transaction open.
 
2136	 *
2137	 * Also, the range length can be represented by u64, we have to do the
2138	 * typecasts to avoid signed overflow if it's [0, LLONG_MAX].
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2139	 */
2140	ret = btrfs_wait_ordered_range(inode, start, (u64)end - (u64)start + 1);
2141	if (ret) {
2142		up_write(&BTRFS_I(inode)->dio_sem);
2143		inode_unlock(inode);
2144		goto out;
2145	}
2146	atomic_inc(&root->log_batch);
2147
2148	smp_mb();
2149	if (btrfs_inode_in_log(BTRFS_I(inode), fs_info->generation) ||
2150	    BTRFS_I(inode)->last_trans <= fs_info->last_trans_committed) {
 
 
 
 
2151		/*
2152		 * We've had everything committed since the last time we were
2153		 * modified so clear this flag in case it was set for whatever
2154		 * reason, it's no longer relevant.
2155		 */
2156		clear_bit(BTRFS_INODE_NEEDS_FULL_SYNC,
2157			  &BTRFS_I(inode)->runtime_flags);
2158		/*
2159		 * An ordered extent might have started before and completed
2160		 * already with io errors, in which case the inode was not
2161		 * updated and we end up here. So check the inode's mapping
2162		 * for any errors that might have happened since we last
2163		 * checked called fsync.
2164		 */
2165		ret = filemap_check_wb_err(inode->i_mapping, file->f_wb_err);
2166		up_write(&BTRFS_I(inode)->dio_sem);
2167		inode_unlock(inode);
2168		goto out;
2169	}
2170
2171	/*
2172	 * We use start here because we will need to wait on the IO to complete
2173	 * in btrfs_sync_log, which could require joining a transaction (for
2174	 * example checking cross references in the nocow path).  If we use join
2175	 * here we could get into a situation where we're waiting on IO to
2176	 * happen that is blocked on a transaction trying to commit.  With start
2177	 * we inc the extwriter counter, so we wait for all extwriters to exit
2178	 * before we start blocking joiners.  This comment is to keep somebody
2179	 * from thinking they are super smart and changing this to
2180	 * btrfs_join_transaction *cough*Josef*cough*.
2181	 */
2182	trans = btrfs_start_transaction(root, 0);
2183	if (IS_ERR(trans)) {
2184		ret = PTR_ERR(trans);
2185		up_write(&BTRFS_I(inode)->dio_sem);
2186		inode_unlock(inode);
2187		goto out;
2188	}
 
2189
2190	ret = btrfs_log_dentry_safe(trans, dentry, start, end, &ctx);
2191	if (ret < 0) {
2192		/* Fallthrough and commit/free transaction. */
2193		ret = 1;
2194	}
2195
2196	/* we've logged all the items and now have a consistent
2197	 * version of the file in the log.  It is possible that
2198	 * someone will come in and modify the file, but that's
2199	 * fine because the log is consistent on disk, and we
2200	 * have references to all of the file's extents
2201	 *
2202	 * It is possible that someone will come in and log the
2203	 * file again, but that will end up using the synchronization
2204	 * inside btrfs_sync_log to keep things safe.
2205	 */
2206	up_write(&BTRFS_I(inode)->dio_sem);
2207	inode_unlock(inode);
2208
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2209	if (ret != BTRFS_NO_LOG_SYNC) {
2210		if (!ret) {
2211			ret = btrfs_sync_log(trans, root, &ctx);
2212			if (!ret) {
2213				ret = btrfs_end_transaction(trans);
2214				goto out;
2215			}
2216		}
 
 
 
 
 
 
 
2217		ret = btrfs_commit_transaction(trans);
2218	} else {
2219		ret = btrfs_end_transaction(trans);
2220	}
2221out:
2222	ASSERT(list_empty(&ctx.list));
2223	err = file_check_and_advance_wb_err(file);
2224	if (!ret)
2225		ret = err;
2226	return ret > 0 ? -EIO : ret;
2227}
2228
2229static const struct vm_operations_struct btrfs_file_vm_ops = {
2230	.fault		= filemap_fault,
2231	.map_pages	= filemap_map_pages,
2232	.page_mkwrite	= btrfs_page_mkwrite,
2233};
2234
2235static int btrfs_file_mmap(struct file	*filp, struct vm_area_struct *vma)
2236{
2237	struct address_space *mapping = filp->f_mapping;
2238
2239	if (!mapping->a_ops->readpage)
2240		return -ENOEXEC;
2241
2242	file_accessed(filp);
2243	vma->vm_ops = &btrfs_file_vm_ops;
2244
2245	return 0;
2246}
2247
2248static int hole_mergeable(struct btrfs_inode *inode, struct extent_buffer *leaf,
2249			  int slot, u64 start, u64 end)
2250{
2251	struct btrfs_file_extent_item *fi;
2252	struct btrfs_key key;
2253
2254	if (slot < 0 || slot >= btrfs_header_nritems(leaf))
2255		return 0;
2256
2257	btrfs_item_key_to_cpu(leaf, &key, slot);
2258	if (key.objectid != btrfs_ino(inode) ||
2259	    key.type != BTRFS_EXTENT_DATA_KEY)
2260		return 0;
2261
2262	fi = btrfs_item_ptr(leaf, slot, struct btrfs_file_extent_item);
2263
2264	if (btrfs_file_extent_type(leaf, fi) != BTRFS_FILE_EXTENT_REG)
2265		return 0;
2266
2267	if (btrfs_file_extent_disk_bytenr(leaf, fi))
2268		return 0;
2269
2270	if (key.offset == end)
2271		return 1;
2272	if (key.offset + btrfs_file_extent_num_bytes(leaf, fi) == start)
2273		return 1;
2274	return 0;
2275}
2276
2277static int fill_holes(struct btrfs_trans_handle *trans,
2278		struct btrfs_inode *inode,
2279		struct btrfs_path *path, u64 offset, u64 end)
2280{
2281	struct btrfs_fs_info *fs_info = trans->fs_info;
2282	struct btrfs_root *root = inode->root;
2283	struct extent_buffer *leaf;
2284	struct btrfs_file_extent_item *fi;
2285	struct extent_map *hole_em;
2286	struct extent_map_tree *em_tree = &inode->extent_tree;
2287	struct btrfs_key key;
2288	int ret;
2289
2290	if (btrfs_fs_incompat(fs_info, NO_HOLES))
2291		goto out;
2292
2293	key.objectid = btrfs_ino(inode);
2294	key.type = BTRFS_EXTENT_DATA_KEY;
2295	key.offset = offset;
2296
2297	ret = btrfs_search_slot(trans, root, &key, path, 0, 1);
2298	if (ret <= 0) {
2299		/*
2300		 * We should have dropped this offset, so if we find it then
2301		 * something has gone horribly wrong.
2302		 */
2303		if (ret == 0)
2304			ret = -EINVAL;
2305		return ret;
2306	}
2307
2308	leaf = path->nodes[0];
2309	if (hole_mergeable(inode, leaf, path->slots[0] - 1, offset, end)) {
2310		u64 num_bytes;
2311
2312		path->slots[0]--;
2313		fi = btrfs_item_ptr(leaf, path->slots[0],
2314				    struct btrfs_file_extent_item);
2315		num_bytes = btrfs_file_extent_num_bytes(leaf, fi) +
2316			end - offset;
2317		btrfs_set_file_extent_num_bytes(leaf, fi, num_bytes);
2318		btrfs_set_file_extent_ram_bytes(leaf, fi, num_bytes);
2319		btrfs_set_file_extent_offset(leaf, fi, 0);
2320		btrfs_mark_buffer_dirty(leaf);
2321		goto out;
2322	}
2323
2324	if (hole_mergeable(inode, leaf, path->slots[0], offset, end)) {
2325		u64 num_bytes;
2326
2327		key.offset = offset;
2328		btrfs_set_item_key_safe(fs_info, path, &key);
2329		fi = btrfs_item_ptr(leaf, path->slots[0],
2330				    struct btrfs_file_extent_item);
2331		num_bytes = btrfs_file_extent_num_bytes(leaf, fi) + end -
2332			offset;
2333		btrfs_set_file_extent_num_bytes(leaf, fi, num_bytes);
2334		btrfs_set_file_extent_ram_bytes(leaf, fi, num_bytes);
2335		btrfs_set_file_extent_offset(leaf, fi, 0);
2336		btrfs_mark_buffer_dirty(leaf);
2337		goto out;
2338	}
2339	btrfs_release_path(path);
2340
2341	ret = btrfs_insert_file_extent(trans, root, btrfs_ino(inode),
2342			offset, 0, 0, end - offset, 0, end - offset, 0, 0, 0);
2343	if (ret)
2344		return ret;
2345
2346out:
2347	btrfs_release_path(path);
2348
2349	hole_em = alloc_extent_map();
2350	if (!hole_em) {
2351		btrfs_drop_extent_cache(inode, offset, end - 1, 0);
2352		set_bit(BTRFS_INODE_NEEDS_FULL_SYNC, &inode->runtime_flags);
2353	} else {
2354		hole_em->start = offset;
2355		hole_em->len = end - offset;
2356		hole_em->ram_bytes = hole_em->len;
2357		hole_em->orig_start = offset;
2358
2359		hole_em->block_start = EXTENT_MAP_HOLE;
2360		hole_em->block_len = 0;
2361		hole_em->orig_block_len = 0;
2362		hole_em->bdev = fs_info->fs_devices->latest_bdev;
2363		hole_em->compress_type = BTRFS_COMPRESS_NONE;
2364		hole_em->generation = trans->transid;
2365
2366		do {
2367			btrfs_drop_extent_cache(inode, offset, end - 1, 0);
2368			write_lock(&em_tree->lock);
2369			ret = add_extent_mapping(em_tree, hole_em, 1);
2370			write_unlock(&em_tree->lock);
2371		} while (ret == -EEXIST);
2372		free_extent_map(hole_em);
2373		if (ret)
2374			set_bit(BTRFS_INODE_NEEDS_FULL_SYNC,
2375					&inode->runtime_flags);
2376	}
2377
2378	return 0;
2379}
2380
2381/*
2382 * Find a hole extent on given inode and change start/len to the end of hole
2383 * extent.(hole/vacuum extent whose em->start <= start &&
2384 *	   em->start + em->len > start)
2385 * When a hole extent is found, return 1 and modify start/len.
2386 */
2387static int find_first_non_hole(struct inode *inode, u64 *start, u64 *len)
2388{
2389	struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
2390	struct extent_map *em;
2391	int ret = 0;
2392
2393	em = btrfs_get_extent(BTRFS_I(inode), NULL, 0,
2394			      round_down(*start, fs_info->sectorsize),
2395			      round_up(*len, fs_info->sectorsize), 0);
2396	if (IS_ERR(em))
2397		return PTR_ERR(em);
2398
2399	/* Hole or vacuum extent(only exists in no-hole mode) */
2400	if (em->block_start == EXTENT_MAP_HOLE) {
2401		ret = 1;
2402		*len = em->start + em->len > *start + *len ?
2403		       0 : *start + *len - em->start - em->len;
2404		*start = em->start + em->len;
2405	}
2406	free_extent_map(em);
2407	return ret;
2408}
2409
2410static int btrfs_punch_hole_lock_range(struct inode *inode,
2411				       const u64 lockstart,
2412				       const u64 lockend,
2413				       struct extent_state **cached_state)
2414{
2415	while (1) {
2416		struct btrfs_ordered_extent *ordered;
2417		int ret;
2418
2419		truncate_pagecache_range(inode, lockstart, lockend);
2420
2421		lock_extent_bits(&BTRFS_I(inode)->io_tree, lockstart, lockend,
2422				 cached_state);
2423		ordered = btrfs_lookup_first_ordered_extent(inode, lockend);
2424
2425		/*
2426		 * We need to make sure we have no ordered extents in this range
2427		 * and nobody raced in and read a page in this range, if we did
2428		 * we need to try again.
2429		 */
2430		if ((!ordered ||
2431		    (ordered->file_offset + ordered->len <= lockstart ||
2432		     ordered->file_offset > lockend)) &&
2433		     !filemap_range_has_page(inode->i_mapping,
2434					     lockstart, lockend)) {
2435			if (ordered)
2436				btrfs_put_ordered_extent(ordered);
2437			break;
2438		}
2439		if (ordered)
2440			btrfs_put_ordered_extent(ordered);
2441		unlock_extent_cached(&BTRFS_I(inode)->io_tree, lockstart,
2442				     lockend, cached_state);
2443		ret = btrfs_wait_ordered_range(inode, lockstart,
2444					       lockend - lockstart + 1);
2445		if (ret)
2446			return ret;
2447	}
2448	return 0;
2449}
2450
2451static int btrfs_insert_clone_extent(struct btrfs_trans_handle *trans,
2452				     struct inode *inode,
2453				     struct btrfs_path *path,
2454				     struct btrfs_clone_extent_info *clone_info,
2455				     const u64 clone_len)
2456{
2457	struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
2458	struct btrfs_root *root = BTRFS_I(inode)->root;
2459	struct btrfs_file_extent_item *extent;
2460	struct extent_buffer *leaf;
2461	struct btrfs_key key;
2462	int slot;
2463	struct btrfs_ref ref = { 0 };
2464	u64 ref_offset;
2465	int ret;
2466
2467	if (clone_len == 0)
2468		return 0;
2469
2470	if (clone_info->disk_offset == 0 &&
2471	    btrfs_fs_incompat(fs_info, NO_HOLES))
2472		return 0;
2473
2474	key.objectid = btrfs_ino(BTRFS_I(inode));
2475	key.type = BTRFS_EXTENT_DATA_KEY;
2476	key.offset = clone_info->file_offset;
2477	ret = btrfs_insert_empty_item(trans, root, path, &key,
2478				      clone_info->item_size);
2479	if (ret)
2480		return ret;
2481	leaf = path->nodes[0];
2482	slot = path->slots[0];
2483	write_extent_buffer(leaf, clone_info->extent_buf,
2484			    btrfs_item_ptr_offset(leaf, slot),
2485			    clone_info->item_size);
2486	extent = btrfs_item_ptr(leaf, slot, struct btrfs_file_extent_item);
2487	btrfs_set_file_extent_offset(leaf, extent, clone_info->data_offset);
2488	btrfs_set_file_extent_num_bytes(leaf, extent, clone_len);
2489	btrfs_mark_buffer_dirty(leaf);
2490	btrfs_release_path(path);
2491
2492	/* If it's a hole, nothing more needs to be done. */
2493	if (clone_info->disk_offset == 0)
2494		return 0;
2495
2496	inode_add_bytes(inode, clone_len);
2497	btrfs_init_generic_ref(&ref, BTRFS_ADD_DELAYED_REF,
2498			       clone_info->disk_offset,
2499			       clone_info->disk_len, 0);
2500	ref_offset = clone_info->file_offset - clone_info->data_offset;
2501	btrfs_init_data_ref(&ref, root->root_key.objectid,
2502			    btrfs_ino(BTRFS_I(inode)), ref_offset);
2503	ret = btrfs_inc_extent_ref(trans, &ref);
2504
2505	return ret;
2506}
2507
2508/*
2509 * The respective range must have been previously locked, as well as the inode.
2510 * The end offset is inclusive (last byte of the range).
2511 * @clone_info is NULL for fallocate's hole punching and non-NULL for extent
2512 * cloning.
2513 * When cloning, we don't want to end up in a state where we dropped extents
2514 * without inserting a new one, so we must abort the transaction to avoid a
2515 * corruption.
2516 */
2517int btrfs_punch_hole_range(struct inode *inode, struct btrfs_path *path,
2518			   const u64 start, const u64 end,
2519			   struct btrfs_clone_extent_info *clone_info,
2520			   struct btrfs_trans_handle **trans_out)
2521{
2522	struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
2523	u64 min_size = btrfs_calc_insert_metadata_size(fs_info, 1);
2524	u64 ino_size = round_up(inode->i_size, fs_info->sectorsize);
2525	struct btrfs_root *root = BTRFS_I(inode)->root;
2526	struct btrfs_trans_handle *trans = NULL;
2527	struct btrfs_block_rsv *rsv;
2528	unsigned int rsv_count;
2529	u64 cur_offset;
2530	u64 drop_end;
2531	u64 len = end - start;
2532	int ret = 0;
2533
2534	if (end <= start)
2535		return -EINVAL;
2536
2537	rsv = btrfs_alloc_block_rsv(fs_info, BTRFS_BLOCK_RSV_TEMP);
2538	if (!rsv) {
2539		ret = -ENOMEM;
2540		goto out;
2541	}
2542	rsv->size = btrfs_calc_insert_metadata_size(fs_info, 1);
2543	rsv->failfast = 1;
2544
2545	/*
2546	 * 1 - update the inode
2547	 * 1 - removing the extents in the range
2548	 * 1 - adding the hole extent if no_holes isn't set or if we are cloning
2549	 *     an extent
2550	 */
2551	if (!btrfs_fs_incompat(fs_info, NO_HOLES) || clone_info)
2552		rsv_count = 3;
2553	else
2554		rsv_count = 2;
2555
2556	trans = btrfs_start_transaction(root, rsv_count);
2557	if (IS_ERR(trans)) {
2558		ret = PTR_ERR(trans);
2559		trans = NULL;
2560		goto out_free;
2561	}
2562
2563	ret = btrfs_block_rsv_migrate(&fs_info->trans_block_rsv, rsv,
2564				      min_size, false);
2565	BUG_ON(ret);
2566	trans->block_rsv = rsv;
2567
2568	cur_offset = start;
2569	while (cur_offset < end) {
2570		ret = __btrfs_drop_extents(trans, root, inode, path,
2571					   cur_offset, end + 1, &drop_end,
2572					   1, 0, 0, NULL);
2573		if (ret != -ENOSPC) {
2574			/*
2575			 * When cloning we want to avoid transaction aborts when
2576			 * nothing was done and we are attempting to clone parts
2577			 * of inline extents, in such cases -EOPNOTSUPP is
2578			 * returned by __btrfs_drop_extents() without having
2579			 * changed anything in the file.
2580			 */
2581			if (clone_info && ret && ret != -EOPNOTSUPP)
2582				btrfs_abort_transaction(trans, ret);
2583			break;
2584		}
2585
2586		trans->block_rsv = &fs_info->trans_block_rsv;
2587
2588		if (!clone_info && cur_offset < drop_end &&
2589		    cur_offset < ino_size) {
2590			ret = fill_holes(trans, BTRFS_I(inode), path,
2591					cur_offset, drop_end);
2592			if (ret) {
2593				/*
2594				 * If we failed then we didn't insert our hole
2595				 * entries for the area we dropped, so now the
2596				 * fs is corrupted, so we must abort the
2597				 * transaction.
2598				 */
2599				btrfs_abort_transaction(trans, ret);
2600				break;
2601			}
2602		}
2603
2604		if (clone_info) {
2605			u64 clone_len = drop_end - cur_offset;
2606
2607			ret = btrfs_insert_clone_extent(trans, inode, path,
2608							clone_info, clone_len);
2609			if (ret) {
2610				btrfs_abort_transaction(trans, ret);
2611				break;
2612			}
2613			clone_info->data_len -= clone_len;
2614			clone_info->data_offset += clone_len;
2615			clone_info->file_offset += clone_len;
2616		}
2617
2618		cur_offset = drop_end;
2619
2620		ret = btrfs_update_inode(trans, root, inode);
2621		if (ret)
2622			break;
2623
2624		btrfs_end_transaction(trans);
2625		btrfs_btree_balance_dirty(fs_info);
2626
2627		trans = btrfs_start_transaction(root, rsv_count);
2628		if (IS_ERR(trans)) {
2629			ret = PTR_ERR(trans);
2630			trans = NULL;
2631			break;
2632		}
2633
2634		ret = btrfs_block_rsv_migrate(&fs_info->trans_block_rsv,
2635					      rsv, min_size, false);
2636		BUG_ON(ret);	/* shouldn't happen */
2637		trans->block_rsv = rsv;
2638
2639		if (!clone_info) {
2640			ret = find_first_non_hole(inode, &cur_offset, &len);
2641			if (unlikely(ret < 0))
2642				break;
2643			if (ret && !len) {
2644				ret = 0;
2645				break;
2646			}
2647		}
2648	}
2649
2650	/*
2651	 * If we were cloning, force the next fsync to be a full one since we
2652	 * we replaced (or just dropped in the case of cloning holes when
2653	 * NO_HOLES is enabled) extents and extent maps.
2654	 * This is for the sake of simplicity, and cloning into files larger
2655	 * than 16Mb would force the full fsync any way (when
2656	 * try_release_extent_mapping() is invoked during page cache truncation.
2657	 */
2658	if (clone_info)
2659		set_bit(BTRFS_INODE_NEEDS_FULL_SYNC,
2660			&BTRFS_I(inode)->runtime_flags);
2661
2662	if (ret)
2663		goto out_trans;
2664
2665	trans->block_rsv = &fs_info->trans_block_rsv;
2666	/*
2667	 * If we are using the NO_HOLES feature we might have had already an
2668	 * hole that overlaps a part of the region [lockstart, lockend] and
2669	 * ends at (or beyond) lockend. Since we have no file extent items to
2670	 * represent holes, drop_end can be less than lockend and so we must
2671	 * make sure we have an extent map representing the existing hole (the
2672	 * call to __btrfs_drop_extents() might have dropped the existing extent
2673	 * map representing the existing hole), otherwise the fast fsync path
2674	 * will not record the existence of the hole region
2675	 * [existing_hole_start, lockend].
2676	 */
2677	if (drop_end <= end)
2678		drop_end = end + 1;
2679	/*
2680	 * Don't insert file hole extent item if it's for a range beyond eof
2681	 * (because it's useless) or if it represents a 0 bytes range (when
2682	 * cur_offset == drop_end).
2683	 */
2684	if (!clone_info && cur_offset < ino_size && cur_offset < drop_end) {
2685		ret = fill_holes(trans, BTRFS_I(inode), path,
2686				cur_offset, drop_end);
2687		if (ret) {
2688			/* Same comment as above. */
2689			btrfs_abort_transaction(trans, ret);
2690			goto out_trans;
2691		}
2692	}
2693	if (clone_info) {
2694		ret = btrfs_insert_clone_extent(trans, inode, path, clone_info,
2695						clone_info->data_len);
2696		if (ret) {
2697			btrfs_abort_transaction(trans, ret);
2698			goto out_trans;
2699		}
2700	}
2701
2702out_trans:
2703	if (!trans)
2704		goto out_free;
2705
2706	trans->block_rsv = &fs_info->trans_block_rsv;
2707	if (ret)
2708		btrfs_end_transaction(trans);
2709	else
2710		*trans_out = trans;
2711out_free:
2712	btrfs_free_block_rsv(fs_info, rsv);
2713out:
2714	return ret;
2715}
2716
2717static int btrfs_punch_hole(struct inode *inode, loff_t offset, loff_t len)
2718{
2719	struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
2720	struct btrfs_root *root = BTRFS_I(inode)->root;
2721	struct extent_state *cached_state = NULL;
2722	struct btrfs_path *path;
2723	struct btrfs_trans_handle *trans = NULL;
 
2724	u64 lockstart;
2725	u64 lockend;
2726	u64 tail_start;
2727	u64 tail_len;
2728	u64 orig_start = offset;
 
 
 
2729	int ret = 0;
 
 
2730	bool same_block;
 
2731	u64 ino_size;
2732	bool truncated_block = false;
2733	bool updated_inode = false;
2734
2735	ret = btrfs_wait_ordered_range(inode, offset, len);
2736	if (ret)
2737		return ret;
2738
2739	inode_lock(inode);
2740	ino_size = round_up(inode->i_size, fs_info->sectorsize);
2741	ret = find_first_non_hole(inode, &offset, &len);
2742	if (ret < 0)
2743		goto out_only_mutex;
2744	if (ret && !len) {
2745		/* Already in a large hole */
2746		ret = 0;
2747		goto out_only_mutex;
2748	}
2749
2750	lockstart = round_up(offset, btrfs_inode_sectorsize(inode));
2751	lockend = round_down(offset + len,
2752			     btrfs_inode_sectorsize(inode)) - 1;
2753	same_block = (BTRFS_BYTES_TO_BLKS(fs_info, offset))
2754		== (BTRFS_BYTES_TO_BLKS(fs_info, offset + len - 1));
2755	/*
2756	 * We needn't truncate any block which is beyond the end of the file
2757	 * because we are sure there is no data there.
2758	 */
2759	/*
2760	 * Only do this if we are in the same block and we aren't doing the
2761	 * entire block.
2762	 */
2763	if (same_block && len < fs_info->sectorsize) {
2764		if (offset < ino_size) {
2765			truncated_block = true;
2766			ret = btrfs_truncate_block(inode, offset, len, 0);
2767		} else {
2768			ret = 0;
2769		}
2770		goto out_only_mutex;
2771	}
2772
2773	/* zero back part of the first block */
2774	if (offset < ino_size) {
2775		truncated_block = true;
2776		ret = btrfs_truncate_block(inode, offset, 0, 0);
2777		if (ret) {
2778			inode_unlock(inode);
2779			return ret;
2780		}
2781	}
2782
2783	/* Check the aligned pages after the first unaligned page,
2784	 * if offset != orig_start, which means the first unaligned page
2785	 * including several following pages are already in holes,
2786	 * the extra check can be skipped */
2787	if (offset == orig_start) {
2788		/* after truncate page, check hole again */
2789		len = offset + len - lockstart;
2790		offset = lockstart;
2791		ret = find_first_non_hole(inode, &offset, &len);
2792		if (ret < 0)
2793			goto out_only_mutex;
2794		if (ret && !len) {
2795			ret = 0;
2796			goto out_only_mutex;
2797		}
2798		lockstart = offset;
2799	}
2800
2801	/* Check the tail unaligned part is in a hole */
2802	tail_start = lockend + 1;
2803	tail_len = offset + len - tail_start;
2804	if (tail_len) {
2805		ret = find_first_non_hole(inode, &tail_start, &tail_len);
2806		if (unlikely(ret < 0))
2807			goto out_only_mutex;
2808		if (!ret) {
2809			/* zero the front end of the last page */
2810			if (tail_start + tail_len < ino_size) {
2811				truncated_block = true;
2812				ret = btrfs_truncate_block(inode,
2813							tail_start + tail_len,
2814							0, 1);
2815				if (ret)
2816					goto out_only_mutex;
2817			}
2818		}
2819	}
2820
2821	if (lockend < lockstart) {
2822		ret = 0;
2823		goto out_only_mutex;
2824	}
2825
2826	ret = btrfs_punch_hole_lock_range(inode, lockstart, lockend,
2827					  &cached_state);
2828	if (ret)
 
2829		goto out_only_mutex;
 
2830
2831	path = btrfs_alloc_path();
2832	if (!path) {
2833		ret = -ENOMEM;
2834		goto out;
2835	}
2836
2837	ret = btrfs_punch_hole_range(inode, path, lockstart, lockend, NULL,
2838				     &trans);
2839	btrfs_free_path(path);
2840	if (ret)
2841		goto out;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2842
2843	ASSERT(trans != NULL);
2844	inode_inc_iversion(inode);
2845	inode->i_mtime = inode->i_ctime = current_time(inode);
 
 
2846	ret = btrfs_update_inode(trans, root, inode);
2847	updated_inode = true;
2848	btrfs_end_transaction(trans);
2849	btrfs_btree_balance_dirty(fs_info);
 
 
 
2850out:
2851	unlock_extent_cached(&BTRFS_I(inode)->io_tree, lockstart, lockend,
2852			     &cached_state);
2853out_only_mutex:
2854	if (!updated_inode && truncated_block && !ret) {
2855		/*
2856		 * If we only end up zeroing part of a page, we still need to
2857		 * update the inode item, so that all the time fields are
2858		 * updated as well as the necessary btrfs inode in memory fields
2859		 * for detecting, at fsync time, if the inode isn't yet in the
2860		 * log tree or it's there but not up to date.
2861		 */
2862		struct timespec64 now = current_time(inode);
2863
2864		inode_inc_iversion(inode);
2865		inode->i_mtime = now;
2866		inode->i_ctime = now;
2867		trans = btrfs_start_transaction(root, 1);
2868		if (IS_ERR(trans)) {
2869			ret = PTR_ERR(trans);
2870		} else {
2871			int ret2;
2872
2873			ret = btrfs_update_inode(trans, root, inode);
2874			ret2 = btrfs_end_transaction(trans);
2875			if (!ret)
2876				ret = ret2;
2877		}
2878	}
2879	inode_unlock(inode);
2880	return ret;
 
 
2881}
2882
2883/* Helper structure to record which range is already reserved */
2884struct falloc_range {
2885	struct list_head list;
2886	u64 start;
2887	u64 len;
2888};
2889
2890/*
2891 * Helper function to add falloc range
2892 *
2893 * Caller should have locked the larger range of extent containing
2894 * [start, len)
2895 */
2896static int add_falloc_range(struct list_head *head, u64 start, u64 len)
2897{
2898	struct falloc_range *prev = NULL;
2899	struct falloc_range *range = NULL;
2900
2901	if (list_empty(head))
2902		goto insert;
2903
2904	/*
2905	 * As fallocate iterate by bytenr order, we only need to check
2906	 * the last range.
2907	 */
2908	prev = list_entry(head->prev, struct falloc_range, list);
2909	if (prev->start + prev->len == start) {
2910		prev->len += len;
2911		return 0;
2912	}
2913insert:
2914	range = kmalloc(sizeof(*range), GFP_KERNEL);
2915	if (!range)
2916		return -ENOMEM;
2917	range->start = start;
2918	range->len = len;
2919	list_add_tail(&range->list, head);
2920	return 0;
2921}
2922
2923static int btrfs_fallocate_update_isize(struct inode *inode,
2924					const u64 end,
2925					const int mode)
2926{
2927	struct btrfs_trans_handle *trans;
2928	struct btrfs_root *root = BTRFS_I(inode)->root;
2929	int ret;
2930	int ret2;
2931
2932	if (mode & FALLOC_FL_KEEP_SIZE || end <= i_size_read(inode))
2933		return 0;
2934
2935	trans = btrfs_start_transaction(root, 1);
2936	if (IS_ERR(trans))
2937		return PTR_ERR(trans);
2938
2939	inode->i_ctime = current_time(inode);
2940	i_size_write(inode, end);
2941	btrfs_ordered_update_i_size(inode, end, NULL);
2942	ret = btrfs_update_inode(trans, root, inode);
2943	ret2 = btrfs_end_transaction(trans);
2944
2945	return ret ? ret : ret2;
2946}
2947
2948enum {
2949	RANGE_BOUNDARY_WRITTEN_EXTENT,
2950	RANGE_BOUNDARY_PREALLOC_EXTENT,
2951	RANGE_BOUNDARY_HOLE,
2952};
2953
2954static int btrfs_zero_range_check_range_boundary(struct inode *inode,
2955						 u64 offset)
2956{
2957	const u64 sectorsize = btrfs_inode_sectorsize(inode);
2958	struct extent_map *em;
2959	int ret;
2960
2961	offset = round_down(offset, sectorsize);
2962	em = btrfs_get_extent(BTRFS_I(inode), NULL, 0, offset, sectorsize, 0);
2963	if (IS_ERR(em))
2964		return PTR_ERR(em);
2965
2966	if (em->block_start == EXTENT_MAP_HOLE)
2967		ret = RANGE_BOUNDARY_HOLE;
2968	else if (test_bit(EXTENT_FLAG_PREALLOC, &em->flags))
2969		ret = RANGE_BOUNDARY_PREALLOC_EXTENT;
2970	else
2971		ret = RANGE_BOUNDARY_WRITTEN_EXTENT;
2972
2973	free_extent_map(em);
2974	return ret;
2975}
2976
2977static int btrfs_zero_range(struct inode *inode,
2978			    loff_t offset,
2979			    loff_t len,
2980			    const int mode)
2981{
2982	struct btrfs_fs_info *fs_info = BTRFS_I(inode)->root->fs_info;
2983	struct extent_map *em;
2984	struct extent_changeset *data_reserved = NULL;
2985	int ret;
2986	u64 alloc_hint = 0;
2987	const u64 sectorsize = btrfs_inode_sectorsize(inode);
2988	u64 alloc_start = round_down(offset, sectorsize);
2989	u64 alloc_end = round_up(offset + len, sectorsize);
2990	u64 bytes_to_reserve = 0;
2991	bool space_reserved = false;
2992
2993	inode_dio_wait(inode);
2994
2995	em = btrfs_get_extent(BTRFS_I(inode), NULL, 0,
2996			      alloc_start, alloc_end - alloc_start, 0);
2997	if (IS_ERR(em)) {
2998		ret = PTR_ERR(em);
2999		goto out;
3000	}
3001
3002	/*
3003	 * Avoid hole punching and extent allocation for some cases. More cases
3004	 * could be considered, but these are unlikely common and we keep things
3005	 * as simple as possible for now. Also, intentionally, if the target
3006	 * range contains one or more prealloc extents together with regular
3007	 * extents and holes, we drop all the existing extents and allocate a
3008	 * new prealloc extent, so that we get a larger contiguous disk extent.
3009	 */
3010	if (em->start <= alloc_start &&
3011	    test_bit(EXTENT_FLAG_PREALLOC, &em->flags)) {
3012		const u64 em_end = em->start + em->len;
3013
3014		if (em_end >= offset + len) {
3015			/*
3016			 * The whole range is already a prealloc extent,
3017			 * do nothing except updating the inode's i_size if
3018			 * needed.
3019			 */
3020			free_extent_map(em);
3021			ret = btrfs_fallocate_update_isize(inode, offset + len,
3022							   mode);
3023			goto out;
3024		}
3025		/*
3026		 * Part of the range is already a prealloc extent, so operate
3027		 * only on the remaining part of the range.
3028		 */
3029		alloc_start = em_end;
3030		ASSERT(IS_ALIGNED(alloc_start, sectorsize));
3031		len = offset + len - alloc_start;
3032		offset = alloc_start;
3033		alloc_hint = em->block_start + em->len;
3034	}
3035	free_extent_map(em);
3036
3037	if (BTRFS_BYTES_TO_BLKS(fs_info, offset) ==
3038	    BTRFS_BYTES_TO_BLKS(fs_info, offset + len - 1)) {
3039		em = btrfs_get_extent(BTRFS_I(inode), NULL, 0,
3040				      alloc_start, sectorsize, 0);
3041		if (IS_ERR(em)) {
3042			ret = PTR_ERR(em);
3043			goto out;
3044		}
3045
3046		if (test_bit(EXTENT_FLAG_PREALLOC, &em->flags)) {
3047			free_extent_map(em);
3048			ret = btrfs_fallocate_update_isize(inode, offset + len,
3049							   mode);
3050			goto out;
3051		}
3052		if (len < sectorsize && em->block_start != EXTENT_MAP_HOLE) {
3053			free_extent_map(em);
3054			ret = btrfs_truncate_block(inode, offset, len, 0);
3055			if (!ret)
3056				ret = btrfs_fallocate_update_isize(inode,
3057								   offset + len,
3058								   mode);
3059			return ret;
3060		}
3061		free_extent_map(em);
3062		alloc_start = round_down(offset, sectorsize);
3063		alloc_end = alloc_start + sectorsize;
3064		goto reserve_space;
3065	}
3066
3067	alloc_start = round_up(offset, sectorsize);
3068	alloc_end = round_down(offset + len, sectorsize);
3069
3070	/*
3071	 * For unaligned ranges, check the pages at the boundaries, they might
3072	 * map to an extent, in which case we need to partially zero them, or
3073	 * they might map to a hole, in which case we need our allocation range
3074	 * to cover them.
3075	 */
3076	if (!IS_ALIGNED(offset, sectorsize)) {
3077		ret = btrfs_zero_range_check_range_boundary(inode, offset);
3078		if (ret < 0)
3079			goto out;
3080		if (ret == RANGE_BOUNDARY_HOLE) {
3081			alloc_start = round_down(offset, sectorsize);
3082			ret = 0;
3083		} else if (ret == RANGE_BOUNDARY_WRITTEN_EXTENT) {
3084			ret = btrfs_truncate_block(inode, offset, 0, 0);
3085			if (ret)
3086				goto out;
3087		} else {
3088			ret = 0;
3089		}
3090	}
3091
3092	if (!IS_ALIGNED(offset + len, sectorsize)) {
3093		ret = btrfs_zero_range_check_range_boundary(inode,
3094							    offset + len);
3095		if (ret < 0)
3096			goto out;
3097		if (ret == RANGE_BOUNDARY_HOLE) {
3098			alloc_end = round_up(offset + len, sectorsize);
3099			ret = 0;
3100		} else if (ret == RANGE_BOUNDARY_WRITTEN_EXTENT) {
3101			ret = btrfs_truncate_block(inode, offset + len, 0, 1);
3102			if (ret)
3103				goto out;
3104		} else {
3105			ret = 0;
3106		}
3107	}
3108
3109reserve_space:
3110	if (alloc_start < alloc_end) {
3111		struct extent_state *cached_state = NULL;
3112		const u64 lockstart = alloc_start;
3113		const u64 lockend = alloc_end - 1;
3114
3115		bytes_to_reserve = alloc_end - alloc_start;
3116		ret = btrfs_alloc_data_chunk_ondemand(BTRFS_I(inode),
3117						      bytes_to_reserve);
3118		if (ret < 0)
3119			goto out;
3120		space_reserved = true;
3121		ret = btrfs_qgroup_reserve_data(inode, &data_reserved,
3122						alloc_start, bytes_to_reserve);
3123		if (ret)
3124			goto out;
3125		ret = btrfs_punch_hole_lock_range(inode, lockstart, lockend,
3126						  &cached_state);
3127		if (ret)
3128			goto out;
3129		ret = btrfs_prealloc_file_range(inode, mode, alloc_start,
3130						alloc_end - alloc_start,
3131						i_blocksize(inode),
3132						offset + len, &alloc_hint);
3133		unlock_extent_cached(&BTRFS_I(inode)->io_tree, lockstart,
3134				     lockend, &cached_state);
3135		/* btrfs_prealloc_file_range releases reserved space on error */
3136		if (ret) {
3137			space_reserved = false;
3138			goto out;
3139		}
3140	}
3141	ret = btrfs_fallocate_update_isize(inode, offset + len, mode);
3142 out:
3143	if (ret && space_reserved)
3144		btrfs_free_reserved_data_space(inode, data_reserved,
3145					       alloc_start, bytes_to_reserve);
3146	extent_changeset_free(data_reserved);
3147
3148	return ret;
3149}
3150
3151static long btrfs_fallocate(struct file *file, int mode,
3152			    loff_t offset, loff_t len)
3153{
3154	struct inode *inode = file_inode(file);
3155	struct extent_state *cached_state = NULL;
3156	struct extent_changeset *data_reserved = NULL;
3157	struct falloc_range *range;
3158	struct falloc_range *tmp;
3159	struct list_head reserve_list;
3160	u64 cur_offset;
3161	u64 last_byte;
3162	u64 alloc_start;
3163	u64 alloc_end;
3164	u64 alloc_hint = 0;
3165	u64 locked_end;
3166	u64 actual_end = 0;
3167	struct extent_map *em;
3168	int blocksize = btrfs_inode_sectorsize(inode);
3169	int ret;
3170
3171	alloc_start = round_down(offset, blocksize);
3172	alloc_end = round_up(offset + len, blocksize);
3173	cur_offset = alloc_start;
3174
3175	/* Make sure we aren't being give some crap mode */
3176	if (mode & ~(FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE |
3177		     FALLOC_FL_ZERO_RANGE))
3178		return -EOPNOTSUPP;
3179
3180	if (mode & FALLOC_FL_PUNCH_HOLE)
3181		return btrfs_punch_hole(inode, offset, len);
3182
3183	/*
3184	 * Only trigger disk allocation, don't trigger qgroup reserve
3185	 *
3186	 * For qgroup space, it will be checked later.
3187	 */
3188	if (!(mode & FALLOC_FL_ZERO_RANGE)) {
3189		ret = btrfs_alloc_data_chunk_ondemand(BTRFS_I(inode),
3190						      alloc_end - alloc_start);
3191		if (ret < 0)
3192			return ret;
3193	}
3194
3195	inode_lock(inode);
3196
3197	if (!(mode & FALLOC_FL_KEEP_SIZE) && offset + len > inode->i_size) {
3198		ret = inode_newsize_ok(inode, offset + len);
3199		if (ret)
3200			goto out;
3201	}
3202
3203	/*
3204	 * TODO: Move these two operations after we have checked
3205	 * accurate reserved space, or fallocate can still fail but
3206	 * with page truncated or size expanded.
3207	 *
3208	 * But that's a minor problem and won't do much harm BTW.
3209	 */
3210	if (alloc_start > inode->i_size) {
3211		ret = btrfs_cont_expand(inode, i_size_read(inode),
3212					alloc_start);
3213		if (ret)
3214			goto out;
3215	} else if (offset + len > inode->i_size) {
3216		/*
3217		 * If we are fallocating from the end of the file onward we
3218		 * need to zero out the end of the block if i_size lands in the
3219		 * middle of a block.
3220		 */
3221		ret = btrfs_truncate_block(inode, inode->i_size, 0, 0);
3222		if (ret)
3223			goto out;
3224	}
3225
3226	/*
3227	 * wait for ordered IO before we have any locks.  We'll loop again
3228	 * below with the locks held.
3229	 */
3230	ret = btrfs_wait_ordered_range(inode, alloc_start,
3231				       alloc_end - alloc_start);
3232	if (ret)
3233		goto out;
3234
3235	if (mode & FALLOC_FL_ZERO_RANGE) {
3236		ret = btrfs_zero_range(inode, offset, len, mode);
3237		inode_unlock(inode);
3238		return ret;
3239	}
3240
3241	locked_end = alloc_end - 1;
3242	while (1) {
3243		struct btrfs_ordered_extent *ordered;
3244
3245		/* the extent lock is ordered inside the running
3246		 * transaction
3247		 */
3248		lock_extent_bits(&BTRFS_I(inode)->io_tree, alloc_start,
3249				 locked_end, &cached_state);
3250		ordered = btrfs_lookup_first_ordered_extent(inode, locked_end);
3251
3252		if (ordered &&
3253		    ordered->file_offset + ordered->len > alloc_start &&
3254		    ordered->file_offset < alloc_end) {
3255			btrfs_put_ordered_extent(ordered);
3256			unlock_extent_cached(&BTRFS_I(inode)->io_tree,
3257					     alloc_start, locked_end,
3258					     &cached_state);
3259			/*
3260			 * we can't wait on the range with the transaction
3261			 * running or with the extent lock held
3262			 */
3263			ret = btrfs_wait_ordered_range(inode, alloc_start,
3264						       alloc_end - alloc_start);
3265			if (ret)
3266				goto out;
3267		} else {
3268			if (ordered)
3269				btrfs_put_ordered_extent(ordered);
3270			break;
3271		}
3272	}
3273
3274	/* First, check if we exceed the qgroup limit */
3275	INIT_LIST_HEAD(&reserve_list);
3276	while (cur_offset < alloc_end) {
3277		em = btrfs_get_extent(BTRFS_I(inode), NULL, 0, cur_offset,
3278				      alloc_end - cur_offset, 0);
3279		if (IS_ERR(em)) {
3280			ret = PTR_ERR(em);
3281			break;
3282		}
3283		last_byte = min(extent_map_end(em), alloc_end);
3284		actual_end = min_t(u64, extent_map_end(em), offset + len);
3285		last_byte = ALIGN(last_byte, blocksize);
3286		if (em->block_start == EXTENT_MAP_HOLE ||
3287		    (cur_offset >= inode->i_size &&
3288		     !test_bit(EXTENT_FLAG_PREALLOC, &em->flags))) {
3289			ret = add_falloc_range(&reserve_list, cur_offset,
3290					       last_byte - cur_offset);
3291			if (ret < 0) {
3292				free_extent_map(em);
3293				break;
3294			}
3295			ret = btrfs_qgroup_reserve_data(inode, &data_reserved,
3296					cur_offset, last_byte - cur_offset);
3297			if (ret < 0) {
3298				cur_offset = last_byte;
3299				free_extent_map(em);
3300				break;
3301			}
3302		} else {
3303			/*
3304			 * Do not need to reserve unwritten extent for this
3305			 * range, free reserved data space first, otherwise
3306			 * it'll result in false ENOSPC error.
3307			 */
3308			btrfs_free_reserved_data_space(inode, data_reserved,
3309					cur_offset, last_byte - cur_offset);
3310		}
3311		free_extent_map(em);
3312		cur_offset = last_byte;
3313	}
3314
3315	/*
3316	 * If ret is still 0, means we're OK to fallocate.
3317	 * Or just cleanup the list and exit.
3318	 */
3319	list_for_each_entry_safe(range, tmp, &reserve_list, list) {
3320		if (!ret)
3321			ret = btrfs_prealloc_file_range(inode, mode,
3322					range->start,
3323					range->len, i_blocksize(inode),
3324					offset + len, &alloc_hint);
3325		else
3326			btrfs_free_reserved_data_space(inode,
3327					data_reserved, range->start,
3328					range->len);
3329		list_del(&range->list);
3330		kfree(range);
3331	}
3332	if (ret < 0)
3333		goto out_unlock;
3334
3335	/*
3336	 * We didn't need to allocate any more space, but we still extended the
3337	 * size of the file so we need to update i_size and the inode item.
3338	 */
3339	ret = btrfs_fallocate_update_isize(inode, actual_end, mode);
3340out_unlock:
3341	unlock_extent_cached(&BTRFS_I(inode)->io_tree, alloc_start, locked_end,
3342			     &cached_state);
3343out:
3344	inode_unlock(inode);
3345	/* Let go of our reservation. */
3346	if (ret != 0 && !(mode & FALLOC_FL_ZERO_RANGE))
3347		btrfs_free_reserved_data_space(inode, data_reserved,
3348				cur_offset, alloc_end - cur_offset);
3349	extent_changeset_free(data_reserved);
3350	return ret;
3351}
3352
3353static int find_desired_extent(struct inode *inode, loff_t *offset, int whence)
3354{
3355	struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
3356	struct extent_map *em = NULL;
3357	struct extent_state *cached_state = NULL;
3358	u64 lockstart;
3359	u64 lockend;
3360	u64 start;
3361	u64 len;
3362	int ret = 0;
3363
3364	if (inode->i_size == 0)
3365		return -ENXIO;
3366
3367	/*
3368	 * *offset can be negative, in this case we start finding DATA/HOLE from
3369	 * the very start of the file.
3370	 */
3371	start = max_t(loff_t, 0, *offset);
3372
3373	lockstart = round_down(start, fs_info->sectorsize);
3374	lockend = round_up(i_size_read(inode),
3375			   fs_info->sectorsize);
3376	if (lockend <= lockstart)
3377		lockend = lockstart + fs_info->sectorsize;
3378	lockend--;
3379	len = lockend - lockstart + 1;
3380
3381	lock_extent_bits(&BTRFS_I(inode)->io_tree, lockstart, lockend,
3382			 &cached_state);
3383
3384	while (start < inode->i_size) {
3385		em = btrfs_get_extent_fiemap(BTRFS_I(inode), start, len);
 
3386		if (IS_ERR(em)) {
3387			ret = PTR_ERR(em);
3388			em = NULL;
3389			break;
3390		}
3391
3392		if (whence == SEEK_HOLE &&
3393		    (em->block_start == EXTENT_MAP_HOLE ||
3394		     test_bit(EXTENT_FLAG_PREALLOC, &em->flags)))
3395			break;
3396		else if (whence == SEEK_DATA &&
3397			   (em->block_start != EXTENT_MAP_HOLE &&
3398			    !test_bit(EXTENT_FLAG_PREALLOC, &em->flags)))
3399			break;
3400
3401		start = em->start + em->len;
3402		free_extent_map(em);
3403		em = NULL;
3404		cond_resched();
3405	}
3406	free_extent_map(em);
3407	if (!ret) {
3408		if (whence == SEEK_DATA && start >= inode->i_size)
3409			ret = -ENXIO;
3410		else
3411			*offset = min_t(loff_t, start, inode->i_size);
3412	}
3413	unlock_extent_cached(&BTRFS_I(inode)->io_tree, lockstart, lockend,
3414			     &cached_state);
3415	return ret;
3416}
3417
3418static loff_t btrfs_file_llseek(struct file *file, loff_t offset, int whence)
3419{
3420	struct inode *inode = file->f_mapping->host;
3421	int ret;
3422
3423	inode_lock(inode);
3424	switch (whence) {
3425	case SEEK_END:
3426	case SEEK_CUR:
3427		offset = generic_file_llseek(file, offset, whence);
3428		goto out;
3429	case SEEK_DATA:
3430	case SEEK_HOLE:
3431		if (offset >= i_size_read(inode)) {
3432			inode_unlock(inode);
3433			return -ENXIO;
3434		}
3435
3436		ret = find_desired_extent(inode, &offset, whence);
3437		if (ret) {
3438			inode_unlock(inode);
3439			return ret;
3440		}
3441	}
3442
3443	offset = vfs_setpos(file, offset, inode->i_sb->s_maxbytes);
3444out:
3445	inode_unlock(inode);
3446	return offset;
3447}
3448
3449static int btrfs_file_open(struct inode *inode, struct file *filp)
3450{
3451	filp->f_mode |= FMODE_NOWAIT;
3452	return generic_file_open(inode, filp);
3453}
3454
3455const struct file_operations btrfs_file_operations = {
3456	.llseek		= btrfs_file_llseek,
3457	.read_iter      = generic_file_read_iter,
3458	.splice_read	= generic_file_splice_read,
3459	.write_iter	= btrfs_file_write_iter,
3460	.mmap		= btrfs_file_mmap,
3461	.open		= btrfs_file_open,
3462	.release	= btrfs_release_file,
3463	.fsync		= btrfs_sync_file,
3464	.fallocate	= btrfs_fallocate,
3465	.unlocked_ioctl	= btrfs_ioctl,
3466#ifdef CONFIG_COMPAT
3467	.compat_ioctl	= btrfs_compat_ioctl,
3468#endif
3469	.remap_file_range = btrfs_remap_file_range,
 
3470};
3471
3472void __cold btrfs_auto_defrag_exit(void)
3473{
3474	kmem_cache_destroy(btrfs_inode_defrag_cachep);
3475}
3476
3477int __init btrfs_auto_defrag_init(void)
3478{
3479	btrfs_inode_defrag_cachep = kmem_cache_create("btrfs_inode_defrag",
3480					sizeof(struct inode_defrag), 0,
3481					SLAB_MEM_SPREAD,
3482					NULL);
3483	if (!btrfs_inode_defrag_cachep)
3484		return -ENOMEM;
3485
3486	return 0;
3487}
3488
3489int btrfs_fdatawrite_range(struct inode *inode, loff_t start, loff_t end)
3490{
3491	int ret;
3492
3493	/*
3494	 * So with compression we will find and lock a dirty page and clear the
3495	 * first one as dirty, setup an async extent, and immediately return
3496	 * with the entire range locked but with nobody actually marked with
3497	 * writeback.  So we can't just filemap_write_and_wait_range() and
3498	 * expect it to work since it will just kick off a thread to do the
3499	 * actual work.  So we need to call filemap_fdatawrite_range _again_
3500	 * since it will wait on the page lock, which won't be unlocked until
3501	 * after the pages have been marked as writeback and so we're good to go
3502	 * from there.  We have to do this otherwise we'll miss the ordered
3503	 * extents and that results in badness.  Please Josef, do not think you
3504	 * know better and pull this out at some point in the future, it is
3505	 * right and you are wrong.
3506	 */
3507	ret = filemap_fdatawrite_range(inode->i_mapping, start, end);
3508	if (!ret && test_bit(BTRFS_INODE_HAS_ASYNC_EXTENT,
3509			     &BTRFS_I(inode)->runtime_flags))
3510		ret = filemap_fdatawrite_range(inode->i_mapping, start, end);
3511
3512	return ret;
3513}
v4.17
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 * Copyright (C) 2007 Oracle.  All rights reserved.
   4 */
   5
   6#include <linux/fs.h>
   7#include <linux/pagemap.h>
   8#include <linux/highmem.h>
   9#include <linux/time.h>
  10#include <linux/init.h>
  11#include <linux/string.h>
  12#include <linux/backing-dev.h>
  13#include <linux/mpage.h>
  14#include <linux/falloc.h>
  15#include <linux/swap.h>
  16#include <linux/writeback.h>
  17#include <linux/compat.h>
  18#include <linux/slab.h>
  19#include <linux/btrfs.h>
  20#include <linux/uio.h>
  21#include <linux/iversion.h>
  22#include "ctree.h"
  23#include "disk-io.h"
  24#include "transaction.h"
  25#include "btrfs_inode.h"
  26#include "print-tree.h"
  27#include "tree-log.h"
  28#include "locking.h"
  29#include "volumes.h"
  30#include "qgroup.h"
  31#include "compression.h"
 
  32
  33static struct kmem_cache *btrfs_inode_defrag_cachep;
  34/*
  35 * when auto defrag is enabled we
  36 * queue up these defrag structs to remember which
  37 * inodes need defragging passes
  38 */
  39struct inode_defrag {
  40	struct rb_node rb_node;
  41	/* objectid */
  42	u64 ino;
  43	/*
  44	 * transid where the defrag was added, we search for
  45	 * extents newer than this
  46	 */
  47	u64 transid;
  48
  49	/* root objectid */
  50	u64 root;
  51
  52	/* last offset we were able to defrag */
  53	u64 last_offset;
  54
  55	/* if we've wrapped around back to zero once already */
  56	int cycled;
  57};
  58
  59static int __compare_inode_defrag(struct inode_defrag *defrag1,
  60				  struct inode_defrag *defrag2)
  61{
  62	if (defrag1->root > defrag2->root)
  63		return 1;
  64	else if (defrag1->root < defrag2->root)
  65		return -1;
  66	else if (defrag1->ino > defrag2->ino)
  67		return 1;
  68	else if (defrag1->ino < defrag2->ino)
  69		return -1;
  70	else
  71		return 0;
  72}
  73
  74/* pop a record for an inode into the defrag tree.  The lock
  75 * must be held already
  76 *
  77 * If you're inserting a record for an older transid than an
  78 * existing record, the transid already in the tree is lowered
  79 *
  80 * If an existing record is found the defrag item you
  81 * pass in is freed
  82 */
  83static int __btrfs_add_inode_defrag(struct btrfs_inode *inode,
  84				    struct inode_defrag *defrag)
  85{
  86	struct btrfs_fs_info *fs_info = btrfs_sb(inode->vfs_inode.i_sb);
  87	struct inode_defrag *entry;
  88	struct rb_node **p;
  89	struct rb_node *parent = NULL;
  90	int ret;
  91
  92	p = &fs_info->defrag_inodes.rb_node;
  93	while (*p) {
  94		parent = *p;
  95		entry = rb_entry(parent, struct inode_defrag, rb_node);
  96
  97		ret = __compare_inode_defrag(defrag, entry);
  98		if (ret < 0)
  99			p = &parent->rb_left;
 100		else if (ret > 0)
 101			p = &parent->rb_right;
 102		else {
 103			/* if we're reinserting an entry for
 104			 * an old defrag run, make sure to
 105			 * lower the transid of our existing record
 106			 */
 107			if (defrag->transid < entry->transid)
 108				entry->transid = defrag->transid;
 109			if (defrag->last_offset > entry->last_offset)
 110				entry->last_offset = defrag->last_offset;
 111			return -EEXIST;
 112		}
 113	}
 114	set_bit(BTRFS_INODE_IN_DEFRAG, &inode->runtime_flags);
 115	rb_link_node(&defrag->rb_node, parent, p);
 116	rb_insert_color(&defrag->rb_node, &fs_info->defrag_inodes);
 117	return 0;
 118}
 119
 120static inline int __need_auto_defrag(struct btrfs_fs_info *fs_info)
 121{
 122	if (!btrfs_test_opt(fs_info, AUTO_DEFRAG))
 123		return 0;
 124
 125	if (btrfs_fs_closing(fs_info))
 126		return 0;
 127
 128	return 1;
 129}
 130
 131/*
 132 * insert a defrag record for this inode if auto defrag is
 133 * enabled
 134 */
 135int btrfs_add_inode_defrag(struct btrfs_trans_handle *trans,
 136			   struct btrfs_inode *inode)
 137{
 138	struct btrfs_fs_info *fs_info = btrfs_sb(inode->vfs_inode.i_sb);
 139	struct btrfs_root *root = inode->root;
 
 140	struct inode_defrag *defrag;
 141	u64 transid;
 142	int ret;
 143
 144	if (!__need_auto_defrag(fs_info))
 145		return 0;
 146
 147	if (test_bit(BTRFS_INODE_IN_DEFRAG, &inode->runtime_flags))
 148		return 0;
 149
 150	if (trans)
 151		transid = trans->transid;
 152	else
 153		transid = inode->root->last_trans;
 154
 155	defrag = kmem_cache_zalloc(btrfs_inode_defrag_cachep, GFP_NOFS);
 156	if (!defrag)
 157		return -ENOMEM;
 158
 159	defrag->ino = btrfs_ino(inode);
 160	defrag->transid = transid;
 161	defrag->root = root->root_key.objectid;
 162
 163	spin_lock(&fs_info->defrag_inodes_lock);
 164	if (!test_bit(BTRFS_INODE_IN_DEFRAG, &inode->runtime_flags)) {
 165		/*
 166		 * If we set IN_DEFRAG flag and evict the inode from memory,
 167		 * and then re-read this inode, this new inode doesn't have
 168		 * IN_DEFRAG flag. At the case, we may find the existed defrag.
 169		 */
 170		ret = __btrfs_add_inode_defrag(inode, defrag);
 171		if (ret)
 172			kmem_cache_free(btrfs_inode_defrag_cachep, defrag);
 173	} else {
 174		kmem_cache_free(btrfs_inode_defrag_cachep, defrag);
 175	}
 176	spin_unlock(&fs_info->defrag_inodes_lock);
 177	return 0;
 178}
 179
 180/*
 181 * Requeue the defrag object. If there is a defrag object that points to
 182 * the same inode in the tree, we will merge them together (by
 183 * __btrfs_add_inode_defrag()) and free the one that we want to requeue.
 184 */
 185static void btrfs_requeue_inode_defrag(struct btrfs_inode *inode,
 186				       struct inode_defrag *defrag)
 187{
 188	struct btrfs_fs_info *fs_info = btrfs_sb(inode->vfs_inode.i_sb);
 189	int ret;
 190
 191	if (!__need_auto_defrag(fs_info))
 192		goto out;
 193
 194	/*
 195	 * Here we don't check the IN_DEFRAG flag, because we need merge
 196	 * them together.
 197	 */
 198	spin_lock(&fs_info->defrag_inodes_lock);
 199	ret = __btrfs_add_inode_defrag(inode, defrag);
 200	spin_unlock(&fs_info->defrag_inodes_lock);
 201	if (ret)
 202		goto out;
 203	return;
 204out:
 205	kmem_cache_free(btrfs_inode_defrag_cachep, defrag);
 206}
 207
 208/*
 209 * pick the defragable inode that we want, if it doesn't exist, we will get
 210 * the next one.
 211 */
 212static struct inode_defrag *
 213btrfs_pick_defrag_inode(struct btrfs_fs_info *fs_info, u64 root, u64 ino)
 214{
 215	struct inode_defrag *entry = NULL;
 216	struct inode_defrag tmp;
 217	struct rb_node *p;
 218	struct rb_node *parent = NULL;
 219	int ret;
 220
 221	tmp.ino = ino;
 222	tmp.root = root;
 223
 224	spin_lock(&fs_info->defrag_inodes_lock);
 225	p = fs_info->defrag_inodes.rb_node;
 226	while (p) {
 227		parent = p;
 228		entry = rb_entry(parent, struct inode_defrag, rb_node);
 229
 230		ret = __compare_inode_defrag(&tmp, entry);
 231		if (ret < 0)
 232			p = parent->rb_left;
 233		else if (ret > 0)
 234			p = parent->rb_right;
 235		else
 236			goto out;
 237	}
 238
 239	if (parent && __compare_inode_defrag(&tmp, entry) > 0) {
 240		parent = rb_next(parent);
 241		if (parent)
 242			entry = rb_entry(parent, struct inode_defrag, rb_node);
 243		else
 244			entry = NULL;
 245	}
 246out:
 247	if (entry)
 248		rb_erase(parent, &fs_info->defrag_inodes);
 249	spin_unlock(&fs_info->defrag_inodes_lock);
 250	return entry;
 251}
 252
 253void btrfs_cleanup_defrag_inodes(struct btrfs_fs_info *fs_info)
 254{
 255	struct inode_defrag *defrag;
 256	struct rb_node *node;
 257
 258	spin_lock(&fs_info->defrag_inodes_lock);
 259	node = rb_first(&fs_info->defrag_inodes);
 260	while (node) {
 261		rb_erase(node, &fs_info->defrag_inodes);
 262		defrag = rb_entry(node, struct inode_defrag, rb_node);
 263		kmem_cache_free(btrfs_inode_defrag_cachep, defrag);
 264
 265		cond_resched_lock(&fs_info->defrag_inodes_lock);
 266
 267		node = rb_first(&fs_info->defrag_inodes);
 268	}
 269	spin_unlock(&fs_info->defrag_inodes_lock);
 270}
 271
 272#define BTRFS_DEFRAG_BATCH	1024
 273
 274static int __btrfs_run_defrag_inode(struct btrfs_fs_info *fs_info,
 275				    struct inode_defrag *defrag)
 276{
 277	struct btrfs_root *inode_root;
 278	struct inode *inode;
 279	struct btrfs_key key;
 280	struct btrfs_ioctl_defrag_range_args range;
 281	int num_defrag;
 282	int index;
 283	int ret;
 284
 285	/* get the inode */
 286	key.objectid = defrag->root;
 287	key.type = BTRFS_ROOT_ITEM_KEY;
 288	key.offset = (u64)-1;
 289
 290	index = srcu_read_lock(&fs_info->subvol_srcu);
 291
 292	inode_root = btrfs_read_fs_root_no_name(fs_info, &key);
 293	if (IS_ERR(inode_root)) {
 294		ret = PTR_ERR(inode_root);
 295		goto cleanup;
 296	}
 297
 298	key.objectid = defrag->ino;
 299	key.type = BTRFS_INODE_ITEM_KEY;
 300	key.offset = 0;
 301	inode = btrfs_iget(fs_info->sb, &key, inode_root, NULL);
 302	if (IS_ERR(inode)) {
 303		ret = PTR_ERR(inode);
 304		goto cleanup;
 305	}
 306	srcu_read_unlock(&fs_info->subvol_srcu, index);
 307
 308	/* do a chunk of defrag */
 309	clear_bit(BTRFS_INODE_IN_DEFRAG, &BTRFS_I(inode)->runtime_flags);
 310	memset(&range, 0, sizeof(range));
 311	range.len = (u64)-1;
 312	range.start = defrag->last_offset;
 313
 314	sb_start_write(fs_info->sb);
 315	num_defrag = btrfs_defrag_file(inode, NULL, &range, defrag->transid,
 316				       BTRFS_DEFRAG_BATCH);
 317	sb_end_write(fs_info->sb);
 318	/*
 319	 * if we filled the whole defrag batch, there
 320	 * must be more work to do.  Queue this defrag
 321	 * again
 322	 */
 323	if (num_defrag == BTRFS_DEFRAG_BATCH) {
 324		defrag->last_offset = range.start;
 325		btrfs_requeue_inode_defrag(BTRFS_I(inode), defrag);
 326	} else if (defrag->last_offset && !defrag->cycled) {
 327		/*
 328		 * we didn't fill our defrag batch, but
 329		 * we didn't start at zero.  Make sure we loop
 330		 * around to the start of the file.
 331		 */
 332		defrag->last_offset = 0;
 333		defrag->cycled = 1;
 334		btrfs_requeue_inode_defrag(BTRFS_I(inode), defrag);
 335	} else {
 336		kmem_cache_free(btrfs_inode_defrag_cachep, defrag);
 337	}
 338
 339	iput(inode);
 340	return 0;
 341cleanup:
 342	srcu_read_unlock(&fs_info->subvol_srcu, index);
 343	kmem_cache_free(btrfs_inode_defrag_cachep, defrag);
 344	return ret;
 345}
 346
 347/*
 348 * run through the list of inodes in the FS that need
 349 * defragging
 350 */
 351int btrfs_run_defrag_inodes(struct btrfs_fs_info *fs_info)
 352{
 353	struct inode_defrag *defrag;
 354	u64 first_ino = 0;
 355	u64 root_objectid = 0;
 356
 357	atomic_inc(&fs_info->defrag_running);
 358	while (1) {
 359		/* Pause the auto defragger. */
 360		if (test_bit(BTRFS_FS_STATE_REMOUNTING,
 361			     &fs_info->fs_state))
 362			break;
 363
 364		if (!__need_auto_defrag(fs_info))
 365			break;
 366
 367		/* find an inode to defrag */
 368		defrag = btrfs_pick_defrag_inode(fs_info, root_objectid,
 369						 first_ino);
 370		if (!defrag) {
 371			if (root_objectid || first_ino) {
 372				root_objectid = 0;
 373				first_ino = 0;
 374				continue;
 375			} else {
 376				break;
 377			}
 378		}
 379
 380		first_ino = defrag->ino + 1;
 381		root_objectid = defrag->root;
 382
 383		__btrfs_run_defrag_inode(fs_info, defrag);
 384	}
 385	atomic_dec(&fs_info->defrag_running);
 386
 387	/*
 388	 * during unmount, we use the transaction_wait queue to
 389	 * wait for the defragger to stop
 390	 */
 391	wake_up(&fs_info->transaction_wait);
 392	return 0;
 393}
 394
 395/* simple helper to fault in pages and copy.  This should go away
 396 * and be replaced with calls into generic code.
 397 */
 398static noinline int btrfs_copy_from_user(loff_t pos, size_t write_bytes,
 399					 struct page **prepared_pages,
 400					 struct iov_iter *i)
 401{
 402	size_t copied = 0;
 403	size_t total_copied = 0;
 404	int pg = 0;
 405	int offset = pos & (PAGE_SIZE - 1);
 406
 407	while (write_bytes > 0) {
 408		size_t count = min_t(size_t,
 409				     PAGE_SIZE - offset, write_bytes);
 410		struct page *page = prepared_pages[pg];
 411		/*
 412		 * Copy data from userspace to the current page
 413		 */
 414		copied = iov_iter_copy_from_user_atomic(page, i, offset, count);
 415
 416		/* Flush processor's dcache for this page */
 417		flush_dcache_page(page);
 418
 419		/*
 420		 * if we get a partial write, we can end up with
 421		 * partially up to date pages.  These add
 422		 * a lot of complexity, so make sure they don't
 423		 * happen by forcing this copy to be retried.
 424		 *
 425		 * The rest of the btrfs_file_write code will fall
 426		 * back to page at a time copies after we return 0.
 427		 */
 428		if (!PageUptodate(page) && copied < count)
 429			copied = 0;
 430
 431		iov_iter_advance(i, copied);
 432		write_bytes -= copied;
 433		total_copied += copied;
 434
 435		/* Return to btrfs_file_write_iter to fault page */
 436		if (unlikely(copied == 0))
 437			break;
 438
 439		if (copied < PAGE_SIZE - offset) {
 440			offset += copied;
 441		} else {
 442			pg++;
 443			offset = 0;
 444		}
 445	}
 446	return total_copied;
 447}
 448
 449/*
 450 * unlocks pages after btrfs_file_write is done with them
 451 */
 452static void btrfs_drop_pages(struct page **pages, size_t num_pages)
 453{
 454	size_t i;
 455	for (i = 0; i < num_pages; i++) {
 456		/* page checked is some magic around finding pages that
 457		 * have been modified without going through btrfs_set_page_dirty
 458		 * clear it here. There should be no need to mark the pages
 459		 * accessed as prepare_pages should have marked them accessed
 460		 * in prepare_pages via find_or_create_page()
 461		 */
 462		ClearPageChecked(pages[i]);
 463		unlock_page(pages[i]);
 464		put_page(pages[i]);
 465	}
 466}
 467
 468static int btrfs_find_new_delalloc_bytes(struct btrfs_inode *inode,
 469					 const u64 start,
 470					 const u64 len,
 471					 struct extent_state **cached_state)
 472{
 473	u64 search_start = start;
 474	const u64 end = start + len - 1;
 475
 476	while (search_start < end) {
 477		const u64 search_len = end - search_start + 1;
 478		struct extent_map *em;
 479		u64 em_len;
 480		int ret = 0;
 481
 482		em = btrfs_get_extent(inode, NULL, 0, search_start,
 483				      search_len, 0);
 484		if (IS_ERR(em))
 485			return PTR_ERR(em);
 486
 487		if (em->block_start != EXTENT_MAP_HOLE)
 488			goto next;
 489
 490		em_len = em->len;
 491		if (em->start < search_start)
 492			em_len -= search_start - em->start;
 493		if (em_len > search_len)
 494			em_len = search_len;
 495
 496		ret = set_extent_bit(&inode->io_tree, search_start,
 497				     search_start + em_len - 1,
 498				     EXTENT_DELALLOC_NEW,
 499				     NULL, cached_state, GFP_NOFS);
 500next:
 501		search_start = extent_map_end(em);
 502		free_extent_map(em);
 503		if (ret)
 504			return ret;
 505	}
 506	return 0;
 507}
 508
 509/*
 510 * after copy_from_user, pages need to be dirtied and we need to make
 511 * sure holes are created between the current EOF and the start of
 512 * any next extents (if required).
 513 *
 514 * this also makes the decision about creating an inline extent vs
 515 * doing real data extents, marking pages dirty and delalloc as required.
 516 */
 517int btrfs_dirty_pages(struct inode *inode, struct page **pages,
 518		      size_t num_pages, loff_t pos, size_t write_bytes,
 519		      struct extent_state **cached)
 520{
 521	struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
 522	int err = 0;
 523	int i;
 524	u64 num_bytes;
 525	u64 start_pos;
 526	u64 end_of_last_block;
 527	u64 end_pos = pos + write_bytes;
 528	loff_t isize = i_size_read(inode);
 529	unsigned int extra_bits = 0;
 530
 531	start_pos = pos & ~((u64) fs_info->sectorsize - 1);
 532	num_bytes = round_up(write_bytes + pos - start_pos,
 533			     fs_info->sectorsize);
 534
 535	end_of_last_block = start_pos + num_bytes - 1;
 536
 
 
 
 
 
 
 
 
 537	if (!btrfs_is_free_space_inode(BTRFS_I(inode))) {
 538		if (start_pos >= isize &&
 539		    !(BTRFS_I(inode)->flags & BTRFS_INODE_PREALLOC)) {
 540			/*
 541			 * There can't be any extents following eof in this case
 542			 * so just set the delalloc new bit for the range
 543			 * directly.
 544			 */
 545			extra_bits |= EXTENT_DELALLOC_NEW;
 546		} else {
 547			err = btrfs_find_new_delalloc_bytes(BTRFS_I(inode),
 548							    start_pos,
 549							    num_bytes, cached);
 550			if (err)
 551				return err;
 552		}
 553	}
 554
 555	err = btrfs_set_extent_delalloc(inode, start_pos, end_of_last_block,
 556					extra_bits, cached, 0);
 557	if (err)
 558		return err;
 559
 560	for (i = 0; i < num_pages; i++) {
 561		struct page *p = pages[i];
 562		SetPageUptodate(p);
 563		ClearPageChecked(p);
 564		set_page_dirty(p);
 565	}
 566
 567	/*
 568	 * we've only changed i_size in ram, and we haven't updated
 569	 * the disk i_size.  There is no need to log the inode
 570	 * at this time.
 571	 */
 572	if (end_pos > isize)
 573		i_size_write(inode, end_pos);
 574	return 0;
 575}
 576
 577/*
 578 * this drops all the extents in the cache that intersect the range
 579 * [start, end].  Existing extents are split as required.
 580 */
 581void btrfs_drop_extent_cache(struct btrfs_inode *inode, u64 start, u64 end,
 582			     int skip_pinned)
 583{
 584	struct extent_map *em;
 585	struct extent_map *split = NULL;
 586	struct extent_map *split2 = NULL;
 587	struct extent_map_tree *em_tree = &inode->extent_tree;
 588	u64 len = end - start + 1;
 589	u64 gen;
 590	int ret;
 591	int testend = 1;
 592	unsigned long flags;
 593	int compressed = 0;
 594	bool modified;
 595
 596	WARN_ON(end < start);
 597	if (end == (u64)-1) {
 598		len = (u64)-1;
 599		testend = 0;
 600	}
 601	while (1) {
 602		int no_splits = 0;
 603
 604		modified = false;
 605		if (!split)
 606			split = alloc_extent_map();
 607		if (!split2)
 608			split2 = alloc_extent_map();
 609		if (!split || !split2)
 610			no_splits = 1;
 611
 612		write_lock(&em_tree->lock);
 613		em = lookup_extent_mapping(em_tree, start, len);
 614		if (!em) {
 615			write_unlock(&em_tree->lock);
 616			break;
 617		}
 618		flags = em->flags;
 619		gen = em->generation;
 620		if (skip_pinned && test_bit(EXTENT_FLAG_PINNED, &em->flags)) {
 621			if (testend && em->start + em->len >= start + len) {
 622				free_extent_map(em);
 623				write_unlock(&em_tree->lock);
 624				break;
 625			}
 626			start = em->start + em->len;
 627			if (testend)
 628				len = start + len - (em->start + em->len);
 629			free_extent_map(em);
 630			write_unlock(&em_tree->lock);
 631			continue;
 632		}
 633		compressed = test_bit(EXTENT_FLAG_COMPRESSED, &em->flags);
 634		clear_bit(EXTENT_FLAG_PINNED, &em->flags);
 635		clear_bit(EXTENT_FLAG_LOGGING, &flags);
 636		modified = !list_empty(&em->list);
 637		if (no_splits)
 638			goto next;
 639
 640		if (em->start < start) {
 641			split->start = em->start;
 642			split->len = start - em->start;
 643
 644			if (em->block_start < EXTENT_MAP_LAST_BYTE) {
 645				split->orig_start = em->orig_start;
 646				split->block_start = em->block_start;
 647
 648				if (compressed)
 649					split->block_len = em->block_len;
 650				else
 651					split->block_len = split->len;
 652				split->orig_block_len = max(split->block_len,
 653						em->orig_block_len);
 654				split->ram_bytes = em->ram_bytes;
 655			} else {
 656				split->orig_start = split->start;
 657				split->block_len = 0;
 658				split->block_start = em->block_start;
 659				split->orig_block_len = 0;
 660				split->ram_bytes = split->len;
 661			}
 662
 663			split->generation = gen;
 664			split->bdev = em->bdev;
 665			split->flags = flags;
 666			split->compress_type = em->compress_type;
 667			replace_extent_mapping(em_tree, em, split, modified);
 668			free_extent_map(split);
 669			split = split2;
 670			split2 = NULL;
 671		}
 672		if (testend && em->start + em->len > start + len) {
 673			u64 diff = start + len - em->start;
 674
 675			split->start = start + len;
 676			split->len = em->start + em->len - (start + len);
 677			split->bdev = em->bdev;
 678			split->flags = flags;
 679			split->compress_type = em->compress_type;
 680			split->generation = gen;
 681
 682			if (em->block_start < EXTENT_MAP_LAST_BYTE) {
 683				split->orig_block_len = max(em->block_len,
 684						    em->orig_block_len);
 685
 686				split->ram_bytes = em->ram_bytes;
 687				if (compressed) {
 688					split->block_len = em->block_len;
 689					split->block_start = em->block_start;
 690					split->orig_start = em->orig_start;
 691				} else {
 692					split->block_len = split->len;
 693					split->block_start = em->block_start
 694						+ diff;
 695					split->orig_start = em->orig_start;
 696				}
 697			} else {
 698				split->ram_bytes = split->len;
 699				split->orig_start = split->start;
 700				split->block_len = 0;
 701				split->block_start = em->block_start;
 702				split->orig_block_len = 0;
 703			}
 704
 705			if (extent_map_in_tree(em)) {
 706				replace_extent_mapping(em_tree, em, split,
 707						       modified);
 708			} else {
 709				ret = add_extent_mapping(em_tree, split,
 710							 modified);
 711				ASSERT(ret == 0); /* Logic error */
 712			}
 713			free_extent_map(split);
 714			split = NULL;
 715		}
 716next:
 717		if (extent_map_in_tree(em))
 718			remove_extent_mapping(em_tree, em);
 719		write_unlock(&em_tree->lock);
 720
 721		/* once for us */
 722		free_extent_map(em);
 723		/* once for the tree*/
 724		free_extent_map(em);
 725	}
 726	if (split)
 727		free_extent_map(split);
 728	if (split2)
 729		free_extent_map(split2);
 730}
 731
 732/*
 733 * this is very complex, but the basic idea is to drop all extents
 734 * in the range start - end.  hint_block is filled in with a block number
 735 * that would be a good hint to the block allocator for this file.
 736 *
 737 * If an extent intersects the range but is not entirely inside the range
 738 * it is either truncated or split.  Anything entirely inside the range
 739 * is deleted from the tree.
 740 */
 741int __btrfs_drop_extents(struct btrfs_trans_handle *trans,
 742			 struct btrfs_root *root, struct inode *inode,
 743			 struct btrfs_path *path, u64 start, u64 end,
 744			 u64 *drop_end, int drop_cache,
 745			 int replace_extent,
 746			 u32 extent_item_size,
 747			 int *key_inserted)
 748{
 749	struct btrfs_fs_info *fs_info = root->fs_info;
 750	struct extent_buffer *leaf;
 751	struct btrfs_file_extent_item *fi;
 
 752	struct btrfs_key key;
 753	struct btrfs_key new_key;
 754	u64 ino = btrfs_ino(BTRFS_I(inode));
 755	u64 search_start = start;
 756	u64 disk_bytenr = 0;
 757	u64 num_bytes = 0;
 758	u64 extent_offset = 0;
 759	u64 extent_end = 0;
 760	u64 last_end = start;
 761	int del_nr = 0;
 762	int del_slot = 0;
 763	int extent_type;
 764	int recow;
 765	int ret;
 766	int modify_tree = -1;
 767	int update_refs;
 768	int found = 0;
 769	int leafs_visited = 0;
 770
 771	if (drop_cache)
 772		btrfs_drop_extent_cache(BTRFS_I(inode), start, end - 1, 0);
 773
 774	if (start >= BTRFS_I(inode)->disk_i_size && !replace_extent)
 775		modify_tree = 0;
 776
 777	update_refs = (test_bit(BTRFS_ROOT_REF_COWS, &root->state) ||
 778		       root == fs_info->tree_root);
 779	while (1) {
 780		recow = 0;
 781		ret = btrfs_lookup_file_extent(trans, root, path, ino,
 782					       search_start, modify_tree);
 783		if (ret < 0)
 784			break;
 785		if (ret > 0 && path->slots[0] > 0 && search_start == start) {
 786			leaf = path->nodes[0];
 787			btrfs_item_key_to_cpu(leaf, &key, path->slots[0] - 1);
 788			if (key.objectid == ino &&
 789			    key.type == BTRFS_EXTENT_DATA_KEY)
 790				path->slots[0]--;
 791		}
 792		ret = 0;
 793		leafs_visited++;
 794next_slot:
 795		leaf = path->nodes[0];
 796		if (path->slots[0] >= btrfs_header_nritems(leaf)) {
 797			BUG_ON(del_nr > 0);
 798			ret = btrfs_next_leaf(root, path);
 799			if (ret < 0)
 800				break;
 801			if (ret > 0) {
 802				ret = 0;
 803				break;
 804			}
 805			leafs_visited++;
 806			leaf = path->nodes[0];
 807			recow = 1;
 808		}
 809
 810		btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
 811
 812		if (key.objectid > ino)
 813			break;
 814		if (WARN_ON_ONCE(key.objectid < ino) ||
 815		    key.type < BTRFS_EXTENT_DATA_KEY) {
 816			ASSERT(del_nr == 0);
 817			path->slots[0]++;
 818			goto next_slot;
 819		}
 820		if (key.type > BTRFS_EXTENT_DATA_KEY || key.offset >= end)
 821			break;
 822
 823		fi = btrfs_item_ptr(leaf, path->slots[0],
 824				    struct btrfs_file_extent_item);
 825		extent_type = btrfs_file_extent_type(leaf, fi);
 826
 827		if (extent_type == BTRFS_FILE_EXTENT_REG ||
 828		    extent_type == BTRFS_FILE_EXTENT_PREALLOC) {
 829			disk_bytenr = btrfs_file_extent_disk_bytenr(leaf, fi);
 830			num_bytes = btrfs_file_extent_disk_num_bytes(leaf, fi);
 831			extent_offset = btrfs_file_extent_offset(leaf, fi);
 832			extent_end = key.offset +
 833				btrfs_file_extent_num_bytes(leaf, fi);
 834		} else if (extent_type == BTRFS_FILE_EXTENT_INLINE) {
 835			extent_end = key.offset +
 836				btrfs_file_extent_inline_len(leaf,
 837						     path->slots[0], fi);
 838		} else {
 839			/* can't happen */
 840			BUG();
 841		}
 842
 843		/*
 844		 * Don't skip extent items representing 0 byte lengths. They
 845		 * used to be created (bug) if while punching holes we hit
 846		 * -ENOSPC condition. So if we find one here, just ensure we
 847		 * delete it, otherwise we would insert a new file extent item
 848		 * with the same key (offset) as that 0 bytes length file
 849		 * extent item in the call to setup_items_for_insert() later
 850		 * in this function.
 851		 */
 852		if (extent_end == key.offset && extent_end >= search_start) {
 853			last_end = extent_end;
 854			goto delete_extent_item;
 855		}
 856
 857		if (extent_end <= search_start) {
 858			path->slots[0]++;
 859			goto next_slot;
 860		}
 861
 862		found = 1;
 863		search_start = max(key.offset, start);
 864		if (recow || !modify_tree) {
 865			modify_tree = -1;
 866			btrfs_release_path(path);
 867			continue;
 868		}
 869
 870		/*
 871		 *     | - range to drop - |
 872		 *  | -------- extent -------- |
 873		 */
 874		if (start > key.offset && end < extent_end) {
 875			BUG_ON(del_nr > 0);
 876			if (extent_type == BTRFS_FILE_EXTENT_INLINE) {
 877				ret = -EOPNOTSUPP;
 878				break;
 879			}
 880
 881			memcpy(&new_key, &key, sizeof(new_key));
 882			new_key.offset = start;
 883			ret = btrfs_duplicate_item(trans, root, path,
 884						   &new_key);
 885			if (ret == -EAGAIN) {
 886				btrfs_release_path(path);
 887				continue;
 888			}
 889			if (ret < 0)
 890				break;
 891
 892			leaf = path->nodes[0];
 893			fi = btrfs_item_ptr(leaf, path->slots[0] - 1,
 894					    struct btrfs_file_extent_item);
 895			btrfs_set_file_extent_num_bytes(leaf, fi,
 896							start - key.offset);
 897
 898			fi = btrfs_item_ptr(leaf, path->slots[0],
 899					    struct btrfs_file_extent_item);
 900
 901			extent_offset += start - key.offset;
 902			btrfs_set_file_extent_offset(leaf, fi, extent_offset);
 903			btrfs_set_file_extent_num_bytes(leaf, fi,
 904							extent_end - start);
 905			btrfs_mark_buffer_dirty(leaf);
 906
 907			if (update_refs && disk_bytenr > 0) {
 908				ret = btrfs_inc_extent_ref(trans, root,
 909						disk_bytenr, num_bytes, 0,
 
 
 910						root->root_key.objectid,
 911						new_key.objectid,
 912						start - extent_offset);
 
 913				BUG_ON(ret); /* -ENOMEM */
 914			}
 915			key.offset = start;
 916		}
 917		/*
 918		 * From here on out we will have actually dropped something, so
 919		 * last_end can be updated.
 920		 */
 921		last_end = extent_end;
 922
 923		/*
 924		 *  | ---- range to drop ----- |
 925		 *      | -------- extent -------- |
 926		 */
 927		if (start <= key.offset && end < extent_end) {
 928			if (extent_type == BTRFS_FILE_EXTENT_INLINE) {
 929				ret = -EOPNOTSUPP;
 930				break;
 931			}
 932
 933			memcpy(&new_key, &key, sizeof(new_key));
 934			new_key.offset = end;
 935			btrfs_set_item_key_safe(fs_info, path, &new_key);
 936
 937			extent_offset += end - key.offset;
 938			btrfs_set_file_extent_offset(leaf, fi, extent_offset);
 939			btrfs_set_file_extent_num_bytes(leaf, fi,
 940							extent_end - end);
 941			btrfs_mark_buffer_dirty(leaf);
 942			if (update_refs && disk_bytenr > 0)
 943				inode_sub_bytes(inode, end - key.offset);
 944			break;
 945		}
 946
 947		search_start = extent_end;
 948		/*
 949		 *       | ---- range to drop ----- |
 950		 *  | -------- extent -------- |
 951		 */
 952		if (start > key.offset && end >= extent_end) {
 953			BUG_ON(del_nr > 0);
 954			if (extent_type == BTRFS_FILE_EXTENT_INLINE) {
 955				ret = -EOPNOTSUPP;
 956				break;
 957			}
 958
 959			btrfs_set_file_extent_num_bytes(leaf, fi,
 960							start - key.offset);
 961			btrfs_mark_buffer_dirty(leaf);
 962			if (update_refs && disk_bytenr > 0)
 963				inode_sub_bytes(inode, extent_end - start);
 964			if (end == extent_end)
 965				break;
 966
 967			path->slots[0]++;
 968			goto next_slot;
 969		}
 970
 971		/*
 972		 *  | ---- range to drop ----- |
 973		 *    | ------ extent ------ |
 974		 */
 975		if (start <= key.offset && end >= extent_end) {
 976delete_extent_item:
 977			if (del_nr == 0) {
 978				del_slot = path->slots[0];
 979				del_nr = 1;
 980			} else {
 981				BUG_ON(del_slot + del_nr != path->slots[0]);
 982				del_nr++;
 983			}
 984
 985			if (update_refs &&
 986			    extent_type == BTRFS_FILE_EXTENT_INLINE) {
 987				inode_sub_bytes(inode,
 988						extent_end - key.offset);
 989				extent_end = ALIGN(extent_end,
 990						   fs_info->sectorsize);
 991			} else if (update_refs && disk_bytenr > 0) {
 992				ret = btrfs_free_extent(trans, root,
 993						disk_bytenr, num_bytes, 0,
 
 
 994						root->root_key.objectid,
 995						key.objectid, key.offset -
 996						extent_offset);
 
 997				BUG_ON(ret); /* -ENOMEM */
 998				inode_sub_bytes(inode,
 999						extent_end - key.offset);
1000			}
1001
1002			if (end == extent_end)
1003				break;
1004
1005			if (path->slots[0] + 1 < btrfs_header_nritems(leaf)) {
1006				path->slots[0]++;
1007				goto next_slot;
1008			}
1009
1010			ret = btrfs_del_items(trans, root, path, del_slot,
1011					      del_nr);
1012			if (ret) {
1013				btrfs_abort_transaction(trans, ret);
1014				break;
1015			}
1016
1017			del_nr = 0;
1018			del_slot = 0;
1019
1020			btrfs_release_path(path);
1021			continue;
1022		}
1023
1024		BUG_ON(1);
1025	}
1026
1027	if (!ret && del_nr > 0) {
1028		/*
1029		 * Set path->slots[0] to first slot, so that after the delete
1030		 * if items are move off from our leaf to its immediate left or
1031		 * right neighbor leafs, we end up with a correct and adjusted
1032		 * path->slots[0] for our insertion (if replace_extent != 0).
1033		 */
1034		path->slots[0] = del_slot;
1035		ret = btrfs_del_items(trans, root, path, del_slot, del_nr);
1036		if (ret)
1037			btrfs_abort_transaction(trans, ret);
1038	}
1039
1040	leaf = path->nodes[0];
1041	/*
1042	 * If btrfs_del_items() was called, it might have deleted a leaf, in
1043	 * which case it unlocked our path, so check path->locks[0] matches a
1044	 * write lock.
1045	 */
1046	if (!ret && replace_extent && leafs_visited == 1 &&
1047	    (path->locks[0] == BTRFS_WRITE_LOCK_BLOCKING ||
1048	     path->locks[0] == BTRFS_WRITE_LOCK) &&
1049	    btrfs_leaf_free_space(fs_info, leaf) >=
1050	    sizeof(struct btrfs_item) + extent_item_size) {
1051
1052		key.objectid = ino;
1053		key.type = BTRFS_EXTENT_DATA_KEY;
1054		key.offset = start;
1055		if (!del_nr && path->slots[0] < btrfs_header_nritems(leaf)) {
1056			struct btrfs_key slot_key;
1057
1058			btrfs_item_key_to_cpu(leaf, &slot_key, path->slots[0]);
1059			if (btrfs_comp_cpu_keys(&key, &slot_key) > 0)
1060				path->slots[0]++;
1061		}
1062		setup_items_for_insert(root, path, &key,
1063				       &extent_item_size,
1064				       extent_item_size,
1065				       sizeof(struct btrfs_item) +
1066				       extent_item_size, 1);
1067		*key_inserted = 1;
1068	}
1069
1070	if (!replace_extent || !(*key_inserted))
1071		btrfs_release_path(path);
1072	if (drop_end)
1073		*drop_end = found ? min(end, last_end) : end;
1074	return ret;
1075}
1076
1077int btrfs_drop_extents(struct btrfs_trans_handle *trans,
1078		       struct btrfs_root *root, struct inode *inode, u64 start,
1079		       u64 end, int drop_cache)
1080{
1081	struct btrfs_path *path;
1082	int ret;
1083
1084	path = btrfs_alloc_path();
1085	if (!path)
1086		return -ENOMEM;
1087	ret = __btrfs_drop_extents(trans, root, inode, path, start, end, NULL,
1088				   drop_cache, 0, 0, NULL);
1089	btrfs_free_path(path);
1090	return ret;
1091}
1092
1093static int extent_mergeable(struct extent_buffer *leaf, int slot,
1094			    u64 objectid, u64 bytenr, u64 orig_offset,
1095			    u64 *start, u64 *end)
1096{
1097	struct btrfs_file_extent_item *fi;
1098	struct btrfs_key key;
1099	u64 extent_end;
1100
1101	if (slot < 0 || slot >= btrfs_header_nritems(leaf))
1102		return 0;
1103
1104	btrfs_item_key_to_cpu(leaf, &key, slot);
1105	if (key.objectid != objectid || key.type != BTRFS_EXTENT_DATA_KEY)
1106		return 0;
1107
1108	fi = btrfs_item_ptr(leaf, slot, struct btrfs_file_extent_item);
1109	if (btrfs_file_extent_type(leaf, fi) != BTRFS_FILE_EXTENT_REG ||
1110	    btrfs_file_extent_disk_bytenr(leaf, fi) != bytenr ||
1111	    btrfs_file_extent_offset(leaf, fi) != key.offset - orig_offset ||
1112	    btrfs_file_extent_compression(leaf, fi) ||
1113	    btrfs_file_extent_encryption(leaf, fi) ||
1114	    btrfs_file_extent_other_encoding(leaf, fi))
1115		return 0;
1116
1117	extent_end = key.offset + btrfs_file_extent_num_bytes(leaf, fi);
1118	if ((*start && *start != key.offset) || (*end && *end != extent_end))
1119		return 0;
1120
1121	*start = key.offset;
1122	*end = extent_end;
1123	return 1;
1124}
1125
1126/*
1127 * Mark extent in the range start - end as written.
1128 *
1129 * This changes extent type from 'pre-allocated' to 'regular'. If only
1130 * part of extent is marked as written, the extent will be split into
1131 * two or three.
1132 */
1133int btrfs_mark_extent_written(struct btrfs_trans_handle *trans,
1134			      struct btrfs_inode *inode, u64 start, u64 end)
1135{
1136	struct btrfs_fs_info *fs_info = btrfs_sb(inode->vfs_inode.i_sb);
1137	struct btrfs_root *root = inode->root;
1138	struct extent_buffer *leaf;
1139	struct btrfs_path *path;
1140	struct btrfs_file_extent_item *fi;
 
1141	struct btrfs_key key;
1142	struct btrfs_key new_key;
1143	u64 bytenr;
1144	u64 num_bytes;
1145	u64 extent_end;
1146	u64 orig_offset;
1147	u64 other_start;
1148	u64 other_end;
1149	u64 split;
1150	int del_nr = 0;
1151	int del_slot = 0;
1152	int recow;
1153	int ret;
1154	u64 ino = btrfs_ino(inode);
1155
1156	path = btrfs_alloc_path();
1157	if (!path)
1158		return -ENOMEM;
1159again:
1160	recow = 0;
1161	split = start;
1162	key.objectid = ino;
1163	key.type = BTRFS_EXTENT_DATA_KEY;
1164	key.offset = split;
1165
1166	ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
1167	if (ret < 0)
1168		goto out;
1169	if (ret > 0 && path->slots[0] > 0)
1170		path->slots[0]--;
1171
1172	leaf = path->nodes[0];
1173	btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
1174	if (key.objectid != ino ||
1175	    key.type != BTRFS_EXTENT_DATA_KEY) {
1176		ret = -EINVAL;
1177		btrfs_abort_transaction(trans, ret);
1178		goto out;
1179	}
1180	fi = btrfs_item_ptr(leaf, path->slots[0],
1181			    struct btrfs_file_extent_item);
1182	if (btrfs_file_extent_type(leaf, fi) != BTRFS_FILE_EXTENT_PREALLOC) {
1183		ret = -EINVAL;
1184		btrfs_abort_transaction(trans, ret);
1185		goto out;
1186	}
1187	extent_end = key.offset + btrfs_file_extent_num_bytes(leaf, fi);
1188	if (key.offset > start || extent_end < end) {
1189		ret = -EINVAL;
1190		btrfs_abort_transaction(trans, ret);
1191		goto out;
1192	}
1193
1194	bytenr = btrfs_file_extent_disk_bytenr(leaf, fi);
1195	num_bytes = btrfs_file_extent_disk_num_bytes(leaf, fi);
1196	orig_offset = key.offset - btrfs_file_extent_offset(leaf, fi);
1197	memcpy(&new_key, &key, sizeof(new_key));
1198
1199	if (start == key.offset && end < extent_end) {
1200		other_start = 0;
1201		other_end = start;
1202		if (extent_mergeable(leaf, path->slots[0] - 1,
1203				     ino, bytenr, orig_offset,
1204				     &other_start, &other_end)) {
1205			new_key.offset = end;
1206			btrfs_set_item_key_safe(fs_info, path, &new_key);
1207			fi = btrfs_item_ptr(leaf, path->slots[0],
1208					    struct btrfs_file_extent_item);
1209			btrfs_set_file_extent_generation(leaf, fi,
1210							 trans->transid);
1211			btrfs_set_file_extent_num_bytes(leaf, fi,
1212							extent_end - end);
1213			btrfs_set_file_extent_offset(leaf, fi,
1214						     end - orig_offset);
1215			fi = btrfs_item_ptr(leaf, path->slots[0] - 1,
1216					    struct btrfs_file_extent_item);
1217			btrfs_set_file_extent_generation(leaf, fi,
1218							 trans->transid);
1219			btrfs_set_file_extent_num_bytes(leaf, fi,
1220							end - other_start);
1221			btrfs_mark_buffer_dirty(leaf);
1222			goto out;
1223		}
1224	}
1225
1226	if (start > key.offset && end == extent_end) {
1227		other_start = end;
1228		other_end = 0;
1229		if (extent_mergeable(leaf, path->slots[0] + 1,
1230				     ino, bytenr, orig_offset,
1231				     &other_start, &other_end)) {
1232			fi = btrfs_item_ptr(leaf, path->slots[0],
1233					    struct btrfs_file_extent_item);
1234			btrfs_set_file_extent_num_bytes(leaf, fi,
1235							start - key.offset);
1236			btrfs_set_file_extent_generation(leaf, fi,
1237							 trans->transid);
1238			path->slots[0]++;
1239			new_key.offset = start;
1240			btrfs_set_item_key_safe(fs_info, path, &new_key);
1241
1242			fi = btrfs_item_ptr(leaf, path->slots[0],
1243					    struct btrfs_file_extent_item);
1244			btrfs_set_file_extent_generation(leaf, fi,
1245							 trans->transid);
1246			btrfs_set_file_extent_num_bytes(leaf, fi,
1247							other_end - start);
1248			btrfs_set_file_extent_offset(leaf, fi,
1249						     start - orig_offset);
1250			btrfs_mark_buffer_dirty(leaf);
1251			goto out;
1252		}
1253	}
1254
1255	while (start > key.offset || end < extent_end) {
1256		if (key.offset == start)
1257			split = end;
1258
1259		new_key.offset = split;
1260		ret = btrfs_duplicate_item(trans, root, path, &new_key);
1261		if (ret == -EAGAIN) {
1262			btrfs_release_path(path);
1263			goto again;
1264		}
1265		if (ret < 0) {
1266			btrfs_abort_transaction(trans, ret);
1267			goto out;
1268		}
1269
1270		leaf = path->nodes[0];
1271		fi = btrfs_item_ptr(leaf, path->slots[0] - 1,
1272				    struct btrfs_file_extent_item);
1273		btrfs_set_file_extent_generation(leaf, fi, trans->transid);
1274		btrfs_set_file_extent_num_bytes(leaf, fi,
1275						split - key.offset);
1276
1277		fi = btrfs_item_ptr(leaf, path->slots[0],
1278				    struct btrfs_file_extent_item);
1279
1280		btrfs_set_file_extent_generation(leaf, fi, trans->transid);
1281		btrfs_set_file_extent_offset(leaf, fi, split - orig_offset);
1282		btrfs_set_file_extent_num_bytes(leaf, fi,
1283						extent_end - split);
1284		btrfs_mark_buffer_dirty(leaf);
1285
1286		ret = btrfs_inc_extent_ref(trans, root, bytenr, num_bytes,
1287					   0, root->root_key.objectid,
1288					   ino, orig_offset);
 
 
1289		if (ret) {
1290			btrfs_abort_transaction(trans, ret);
1291			goto out;
1292		}
1293
1294		if (split == start) {
1295			key.offset = start;
1296		} else {
1297			if (start != key.offset) {
1298				ret = -EINVAL;
1299				btrfs_abort_transaction(trans, ret);
1300				goto out;
1301			}
1302			path->slots[0]--;
1303			extent_end = end;
1304		}
1305		recow = 1;
1306	}
1307
1308	other_start = end;
1309	other_end = 0;
 
 
 
1310	if (extent_mergeable(leaf, path->slots[0] + 1,
1311			     ino, bytenr, orig_offset,
1312			     &other_start, &other_end)) {
1313		if (recow) {
1314			btrfs_release_path(path);
1315			goto again;
1316		}
1317		extent_end = other_end;
1318		del_slot = path->slots[0] + 1;
1319		del_nr++;
1320		ret = btrfs_free_extent(trans, root, bytenr, num_bytes,
1321					0, root->root_key.objectid,
1322					ino, orig_offset);
1323		if (ret) {
1324			btrfs_abort_transaction(trans, ret);
1325			goto out;
1326		}
1327	}
1328	other_start = 0;
1329	other_end = start;
1330	if (extent_mergeable(leaf, path->slots[0] - 1,
1331			     ino, bytenr, orig_offset,
1332			     &other_start, &other_end)) {
1333		if (recow) {
1334			btrfs_release_path(path);
1335			goto again;
1336		}
1337		key.offset = other_start;
1338		del_slot = path->slots[0];
1339		del_nr++;
1340		ret = btrfs_free_extent(trans, root, bytenr, num_bytes,
1341					0, root->root_key.objectid,
1342					ino, orig_offset);
1343		if (ret) {
1344			btrfs_abort_transaction(trans, ret);
1345			goto out;
1346		}
1347	}
1348	if (del_nr == 0) {
1349		fi = btrfs_item_ptr(leaf, path->slots[0],
1350			   struct btrfs_file_extent_item);
1351		btrfs_set_file_extent_type(leaf, fi,
1352					   BTRFS_FILE_EXTENT_REG);
1353		btrfs_set_file_extent_generation(leaf, fi, trans->transid);
1354		btrfs_mark_buffer_dirty(leaf);
1355	} else {
1356		fi = btrfs_item_ptr(leaf, del_slot - 1,
1357			   struct btrfs_file_extent_item);
1358		btrfs_set_file_extent_type(leaf, fi,
1359					   BTRFS_FILE_EXTENT_REG);
1360		btrfs_set_file_extent_generation(leaf, fi, trans->transid);
1361		btrfs_set_file_extent_num_bytes(leaf, fi,
1362						extent_end - key.offset);
1363		btrfs_mark_buffer_dirty(leaf);
1364
1365		ret = btrfs_del_items(trans, root, path, del_slot, del_nr);
1366		if (ret < 0) {
1367			btrfs_abort_transaction(trans, ret);
1368			goto out;
1369		}
1370	}
1371out:
1372	btrfs_free_path(path);
1373	return 0;
1374}
1375
1376/*
1377 * on error we return an unlocked page and the error value
1378 * on success we return a locked page and 0
1379 */
1380static int prepare_uptodate_page(struct inode *inode,
1381				 struct page *page, u64 pos,
1382				 bool force_uptodate)
1383{
1384	int ret = 0;
1385
1386	if (((pos & (PAGE_SIZE - 1)) || force_uptodate) &&
1387	    !PageUptodate(page)) {
1388		ret = btrfs_readpage(NULL, page);
1389		if (ret)
1390			return ret;
1391		lock_page(page);
1392		if (!PageUptodate(page)) {
1393			unlock_page(page);
1394			return -EIO;
1395		}
1396		if (page->mapping != inode->i_mapping) {
1397			unlock_page(page);
1398			return -EAGAIN;
1399		}
1400	}
1401	return 0;
1402}
1403
1404/*
1405 * this just gets pages into the page cache and locks them down.
1406 */
1407static noinline int prepare_pages(struct inode *inode, struct page **pages,
1408				  size_t num_pages, loff_t pos,
1409				  size_t write_bytes, bool force_uptodate)
1410{
1411	int i;
1412	unsigned long index = pos >> PAGE_SHIFT;
1413	gfp_t mask = btrfs_alloc_write_mask(inode->i_mapping);
1414	int err = 0;
1415	int faili;
1416
1417	for (i = 0; i < num_pages; i++) {
1418again:
1419		pages[i] = find_or_create_page(inode->i_mapping, index + i,
1420					       mask | __GFP_WRITE);
1421		if (!pages[i]) {
1422			faili = i - 1;
1423			err = -ENOMEM;
1424			goto fail;
1425		}
1426
1427		if (i == 0)
1428			err = prepare_uptodate_page(inode, pages[i], pos,
1429						    force_uptodate);
1430		if (!err && i == num_pages - 1)
1431			err = prepare_uptodate_page(inode, pages[i],
1432						    pos + write_bytes, false);
1433		if (err) {
1434			put_page(pages[i]);
1435			if (err == -EAGAIN) {
1436				err = 0;
1437				goto again;
1438			}
1439			faili = i - 1;
1440			goto fail;
1441		}
1442		wait_on_page_writeback(pages[i]);
1443	}
1444
1445	return 0;
1446fail:
1447	while (faili >= 0) {
1448		unlock_page(pages[faili]);
1449		put_page(pages[faili]);
1450		faili--;
1451	}
1452	return err;
1453
1454}
1455
1456/*
1457 * This function locks the extent and properly waits for data=ordered extents
1458 * to finish before allowing the pages to be modified if need.
1459 *
1460 * The return value:
1461 * 1 - the extent is locked
1462 * 0 - the extent is not locked, and everything is OK
1463 * -EAGAIN - need re-prepare the pages
1464 * the other < 0 number - Something wrong happens
1465 */
1466static noinline int
1467lock_and_cleanup_extent_if_need(struct btrfs_inode *inode, struct page **pages,
1468				size_t num_pages, loff_t pos,
1469				size_t write_bytes,
1470				u64 *lockstart, u64 *lockend,
1471				struct extent_state **cached_state)
1472{
1473	struct btrfs_fs_info *fs_info = btrfs_sb(inode->vfs_inode.i_sb);
1474	u64 start_pos;
1475	u64 last_pos;
1476	int i;
1477	int ret = 0;
1478
1479	start_pos = round_down(pos, fs_info->sectorsize);
1480	last_pos = start_pos
1481		+ round_up(pos + write_bytes - start_pos,
1482			   fs_info->sectorsize) - 1;
1483
1484	if (start_pos < inode->vfs_inode.i_size) {
1485		struct btrfs_ordered_extent *ordered;
1486
1487		lock_extent_bits(&inode->io_tree, start_pos, last_pos,
1488				cached_state);
1489		ordered = btrfs_lookup_ordered_range(inode, start_pos,
1490						     last_pos - start_pos + 1);
1491		if (ordered &&
1492		    ordered->file_offset + ordered->len > start_pos &&
1493		    ordered->file_offset <= last_pos) {
1494			unlock_extent_cached(&inode->io_tree, start_pos,
1495					last_pos, cached_state);
1496			for (i = 0; i < num_pages; i++) {
1497				unlock_page(pages[i]);
1498				put_page(pages[i]);
1499			}
1500			btrfs_start_ordered_extent(&inode->vfs_inode,
1501					ordered, 1);
1502			btrfs_put_ordered_extent(ordered);
1503			return -EAGAIN;
1504		}
1505		if (ordered)
1506			btrfs_put_ordered_extent(ordered);
1507		clear_extent_bit(&inode->io_tree, start_pos, last_pos,
1508				 EXTENT_DIRTY | EXTENT_DELALLOC |
1509				 EXTENT_DO_ACCOUNTING | EXTENT_DEFRAG,
1510				 0, 0, cached_state);
1511		*lockstart = start_pos;
1512		*lockend = last_pos;
1513		ret = 1;
1514	}
1515
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1516	for (i = 0; i < num_pages; i++) {
1517		if (clear_page_dirty_for_io(pages[i]))
1518			account_page_redirty(pages[i]);
1519		set_page_extent_mapped(pages[i]);
1520		WARN_ON(!PageLocked(pages[i]));
1521	}
1522
1523	return ret;
1524}
1525
1526static noinline int check_can_nocow(struct btrfs_inode *inode, loff_t pos,
1527				    size_t *write_bytes)
1528{
1529	struct btrfs_fs_info *fs_info = btrfs_sb(inode->vfs_inode.i_sb);
1530	struct btrfs_root *root = inode->root;
1531	struct btrfs_ordered_extent *ordered;
1532	u64 lockstart, lockend;
1533	u64 num_bytes;
1534	int ret;
1535
1536	ret = btrfs_start_write_no_snapshotting(root);
1537	if (!ret)
1538		return -ENOSPC;
1539
1540	lockstart = round_down(pos, fs_info->sectorsize);
1541	lockend = round_up(pos + *write_bytes,
1542			   fs_info->sectorsize) - 1;
1543
1544	while (1) {
1545		lock_extent(&inode->io_tree, lockstart, lockend);
1546		ordered = btrfs_lookup_ordered_range(inode, lockstart,
1547						     lockend - lockstart + 1);
1548		if (!ordered) {
1549			break;
1550		}
1551		unlock_extent(&inode->io_tree, lockstart, lockend);
1552		btrfs_start_ordered_extent(&inode->vfs_inode, ordered, 1);
1553		btrfs_put_ordered_extent(ordered);
1554	}
1555
1556	num_bytes = lockend - lockstart + 1;
1557	ret = can_nocow_extent(&inode->vfs_inode, lockstart, &num_bytes,
1558			NULL, NULL, NULL);
1559	if (ret <= 0) {
1560		ret = 0;
1561		btrfs_end_write_no_snapshotting(root);
1562	} else {
1563		*write_bytes = min_t(size_t, *write_bytes ,
1564				     num_bytes - pos + lockstart);
1565	}
1566
1567	unlock_extent(&inode->io_tree, lockstart, lockend);
1568
1569	return ret;
1570}
1571
1572static noinline ssize_t __btrfs_buffered_write(struct file *file,
1573					       struct iov_iter *i,
1574					       loff_t pos)
1575{
 
 
1576	struct inode *inode = file_inode(file);
1577	struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
1578	struct btrfs_root *root = BTRFS_I(inode)->root;
1579	struct page **pages = NULL;
1580	struct extent_state *cached_state = NULL;
1581	struct extent_changeset *data_reserved = NULL;
1582	u64 release_bytes = 0;
1583	u64 lockstart;
1584	u64 lockend;
1585	size_t num_written = 0;
1586	int nrptrs;
1587	int ret = 0;
1588	bool only_release_metadata = false;
1589	bool force_page_uptodate = false;
1590
1591	nrptrs = min(DIV_ROUND_UP(iov_iter_count(i), PAGE_SIZE),
1592			PAGE_SIZE / (sizeof(struct page *)));
1593	nrptrs = min(nrptrs, current->nr_dirtied_pause - current->nr_dirtied);
1594	nrptrs = max(nrptrs, 8);
1595	pages = kmalloc_array(nrptrs, sizeof(struct page *), GFP_KERNEL);
1596	if (!pages)
1597		return -ENOMEM;
1598
1599	while (iov_iter_count(i) > 0) {
1600		size_t offset = pos & (PAGE_SIZE - 1);
 
1601		size_t sector_offset;
1602		size_t write_bytes = min(iov_iter_count(i),
1603					 nrptrs * (size_t)PAGE_SIZE -
1604					 offset);
1605		size_t num_pages = DIV_ROUND_UP(write_bytes + offset,
1606						PAGE_SIZE);
1607		size_t reserve_bytes;
1608		size_t dirty_pages;
1609		size_t copied;
1610		size_t dirty_sectors;
1611		size_t num_sectors;
1612		int extents_locked;
1613
1614		WARN_ON(num_pages > nrptrs);
1615
1616		/*
1617		 * Fault pages before locking them in prepare_pages
1618		 * to avoid recursive lock
1619		 */
1620		if (unlikely(iov_iter_fault_in_readable(i, write_bytes))) {
1621			ret = -EFAULT;
1622			break;
1623		}
1624
1625		sector_offset = pos & (fs_info->sectorsize - 1);
1626		reserve_bytes = round_up(write_bytes + sector_offset,
1627				fs_info->sectorsize);
1628
1629		extent_changeset_release(data_reserved);
1630		ret = btrfs_check_data_free_space(inode, &data_reserved, pos,
1631						  write_bytes);
1632		if (ret < 0) {
1633			if ((BTRFS_I(inode)->flags & (BTRFS_INODE_NODATACOW |
1634						      BTRFS_INODE_PREALLOC)) &&
1635			    check_can_nocow(BTRFS_I(inode), pos,
1636					&write_bytes) > 0) {
1637				/*
1638				 * For nodata cow case, no need to reserve
1639				 * data space.
1640				 */
1641				only_release_metadata = true;
1642				/*
1643				 * our prealloc extent may be smaller than
1644				 * write_bytes, so scale down.
1645				 */
1646				num_pages = DIV_ROUND_UP(write_bytes + offset,
1647							 PAGE_SIZE);
1648				reserve_bytes = round_up(write_bytes +
1649							 sector_offset,
1650							 fs_info->sectorsize);
1651			} else {
1652				break;
1653			}
1654		}
1655
1656		WARN_ON(reserve_bytes == 0);
1657		ret = btrfs_delalloc_reserve_metadata(BTRFS_I(inode),
1658				reserve_bytes);
1659		if (ret) {
1660			if (!only_release_metadata)
1661				btrfs_free_reserved_data_space(inode,
1662						data_reserved, pos,
1663						write_bytes);
1664			else
1665				btrfs_end_write_no_snapshotting(root);
1666			break;
1667		}
1668
1669		release_bytes = reserve_bytes;
1670again:
1671		/*
1672		 * This is going to setup the pages array with the number of
1673		 * pages we want, so we don't really need to worry about the
1674		 * contents of pages from loop to loop
1675		 */
1676		ret = prepare_pages(inode, pages, num_pages,
1677				    pos, write_bytes,
1678				    force_page_uptodate);
1679		if (ret) {
1680			btrfs_delalloc_release_extents(BTRFS_I(inode),
1681						       reserve_bytes, true);
1682			break;
1683		}
1684
1685		extents_locked = lock_and_cleanup_extent_if_need(
1686				BTRFS_I(inode), pages,
1687				num_pages, pos, write_bytes, &lockstart,
1688				&lockend, &cached_state);
1689		if (extents_locked < 0) {
1690			if (extents_locked == -EAGAIN)
1691				goto again;
1692			btrfs_delalloc_release_extents(BTRFS_I(inode),
1693						       reserve_bytes, true);
1694			ret = extents_locked;
1695			break;
1696		}
1697
1698		copied = btrfs_copy_from_user(pos, write_bytes, pages, i);
1699
1700		num_sectors = BTRFS_BYTES_TO_BLKS(fs_info, reserve_bytes);
1701		dirty_sectors = round_up(copied + sector_offset,
1702					fs_info->sectorsize);
1703		dirty_sectors = BTRFS_BYTES_TO_BLKS(fs_info, dirty_sectors);
1704
1705		/*
1706		 * if we have trouble faulting in the pages, fall
1707		 * back to one page at a time
1708		 */
1709		if (copied < write_bytes)
1710			nrptrs = 1;
1711
1712		if (copied == 0) {
1713			force_page_uptodate = true;
1714			dirty_sectors = 0;
1715			dirty_pages = 0;
1716		} else {
1717			force_page_uptodate = false;
1718			dirty_pages = DIV_ROUND_UP(copied + offset,
1719						   PAGE_SIZE);
1720		}
1721
1722		if (num_sectors > dirty_sectors) {
1723			/* release everything except the sectors we dirtied */
1724			release_bytes -= dirty_sectors <<
1725						fs_info->sb->s_blocksize_bits;
1726			if (only_release_metadata) {
1727				btrfs_delalloc_release_metadata(BTRFS_I(inode),
1728							release_bytes, true);
1729			} else {
1730				u64 __pos;
1731
1732				__pos = round_down(pos,
1733						   fs_info->sectorsize) +
1734					(dirty_pages << PAGE_SHIFT);
1735				btrfs_delalloc_release_space(inode,
1736						data_reserved, __pos,
1737						release_bytes, true);
1738			}
1739		}
1740
1741		release_bytes = round_up(copied + sector_offset,
1742					fs_info->sectorsize);
1743
1744		if (copied > 0)
1745			ret = btrfs_dirty_pages(inode, pages, dirty_pages,
1746						pos, copied, &cached_state);
 
 
 
 
 
 
 
 
1747		if (extents_locked)
1748			unlock_extent_cached(&BTRFS_I(inode)->io_tree,
1749					     lockstart, lockend, &cached_state);
1750		btrfs_delalloc_release_extents(BTRFS_I(inode), reserve_bytes,
1751					       true);
 
 
1752		if (ret) {
1753			btrfs_drop_pages(pages, num_pages);
1754			break;
1755		}
1756
1757		release_bytes = 0;
1758		if (only_release_metadata)
1759			btrfs_end_write_no_snapshotting(root);
1760
1761		if (only_release_metadata && copied > 0) {
1762			lockstart = round_down(pos,
1763					       fs_info->sectorsize);
1764			lockend = round_up(pos + copied,
1765					   fs_info->sectorsize) - 1;
1766
1767			set_extent_bit(&BTRFS_I(inode)->io_tree, lockstart,
1768				       lockend, EXTENT_NORESERVE, NULL,
1769				       NULL, GFP_NOFS);
1770			only_release_metadata = false;
1771		}
1772
1773		btrfs_drop_pages(pages, num_pages);
1774
1775		cond_resched();
1776
1777		balance_dirty_pages_ratelimited(inode->i_mapping);
1778		if (dirty_pages < (fs_info->nodesize >> PAGE_SHIFT) + 1)
1779			btrfs_btree_balance_dirty(fs_info);
1780
1781		pos += copied;
1782		num_written += copied;
1783	}
1784
1785	kfree(pages);
1786
1787	if (release_bytes) {
1788		if (only_release_metadata) {
1789			btrfs_end_write_no_snapshotting(root);
1790			btrfs_delalloc_release_metadata(BTRFS_I(inode),
1791					release_bytes, true);
1792		} else {
1793			btrfs_delalloc_release_space(inode, data_reserved,
1794					round_down(pos, fs_info->sectorsize),
1795					release_bytes, true);
1796		}
1797	}
1798
1799	extent_changeset_free(data_reserved);
1800	return num_written ? num_written : ret;
1801}
1802
1803static ssize_t __btrfs_direct_write(struct kiocb *iocb, struct iov_iter *from)
1804{
1805	struct file *file = iocb->ki_filp;
1806	struct inode *inode = file_inode(file);
1807	loff_t pos = iocb->ki_pos;
1808	ssize_t written;
1809	ssize_t written_buffered;
1810	loff_t endbyte;
1811	int err;
1812
1813	written = generic_file_direct_write(iocb, from);
1814
1815	if (written < 0 || !iov_iter_count(from))
1816		return written;
1817
1818	pos += written;
1819	written_buffered = __btrfs_buffered_write(file, from, pos);
1820	if (written_buffered < 0) {
1821		err = written_buffered;
1822		goto out;
1823	}
1824	/*
1825	 * Ensure all data is persisted. We want the next direct IO read to be
1826	 * able to read what was just written.
1827	 */
1828	endbyte = pos + written_buffered - 1;
1829	err = btrfs_fdatawrite_range(inode, pos, endbyte);
1830	if (err)
1831		goto out;
1832	err = filemap_fdatawait_range(inode->i_mapping, pos, endbyte);
1833	if (err)
1834		goto out;
1835	written += written_buffered;
1836	iocb->ki_pos = pos + written_buffered;
1837	invalidate_mapping_pages(file->f_mapping, pos >> PAGE_SHIFT,
1838				 endbyte >> PAGE_SHIFT);
1839out:
1840	return written ? written : err;
1841}
1842
1843static void update_time_for_write(struct inode *inode)
1844{
1845	struct timespec now;
1846
1847	if (IS_NOCMTIME(inode))
1848		return;
1849
1850	now = current_time(inode);
1851	if (!timespec_equal(&inode->i_mtime, &now))
1852		inode->i_mtime = now;
1853
1854	if (!timespec_equal(&inode->i_ctime, &now))
1855		inode->i_ctime = now;
1856
1857	if (IS_I_VERSION(inode))
1858		inode_inc_iversion(inode);
1859}
1860
1861static ssize_t btrfs_file_write_iter(struct kiocb *iocb,
1862				    struct iov_iter *from)
1863{
1864	struct file *file = iocb->ki_filp;
1865	struct inode *inode = file_inode(file);
1866	struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
1867	struct btrfs_root *root = BTRFS_I(inode)->root;
1868	u64 start_pos;
1869	u64 end_pos;
1870	ssize_t num_written = 0;
1871	bool sync = (file->f_flags & O_DSYNC) || IS_SYNC(file->f_mapping->host);
1872	ssize_t err;
1873	loff_t pos;
1874	size_t count = iov_iter_count(from);
1875	loff_t oldsize;
1876	int clean_page = 0;
1877
1878	if (!(iocb->ki_flags & IOCB_DIRECT) &&
1879	    (iocb->ki_flags & IOCB_NOWAIT))
1880		return -EOPNOTSUPP;
1881
1882	if (!inode_trylock(inode)) {
1883		if (iocb->ki_flags & IOCB_NOWAIT)
1884			return -EAGAIN;
1885		inode_lock(inode);
1886	}
1887
1888	err = generic_write_checks(iocb, from);
1889	if (err <= 0) {
1890		inode_unlock(inode);
1891		return err;
1892	}
1893
1894	pos = iocb->ki_pos;
 
1895	if (iocb->ki_flags & IOCB_NOWAIT) {
1896		/*
1897		 * We will allocate space in case nodatacow is not set,
1898		 * so bail
1899		 */
1900		if (!(BTRFS_I(inode)->flags & (BTRFS_INODE_NODATACOW |
1901					      BTRFS_INODE_PREALLOC)) ||
1902		    check_can_nocow(BTRFS_I(inode), pos, &count) <= 0) {
1903			inode_unlock(inode);
1904			return -EAGAIN;
1905		}
1906	}
1907
1908	current->backing_dev_info = inode_to_bdi(inode);
1909	err = file_remove_privs(file);
1910	if (err) {
1911		inode_unlock(inode);
1912		goto out;
1913	}
1914
1915	/*
1916	 * If BTRFS flips readonly due to some impossible error
1917	 * (fs_info->fs_state now has BTRFS_SUPER_FLAG_ERROR),
1918	 * although we have opened a file as writable, we have
1919	 * to stop this write operation to ensure FS consistency.
1920	 */
1921	if (test_bit(BTRFS_FS_STATE_ERROR, &fs_info->fs_state)) {
1922		inode_unlock(inode);
1923		err = -EROFS;
1924		goto out;
1925	}
1926
1927	/*
1928	 * We reserve space for updating the inode when we reserve space for the
1929	 * extent we are going to write, so we will enospc out there.  We don't
1930	 * need to start yet another transaction to update the inode as we will
1931	 * update the inode when we finish writing whatever data we write.
1932	 */
1933	update_time_for_write(inode);
1934
1935	start_pos = round_down(pos, fs_info->sectorsize);
1936	oldsize = i_size_read(inode);
1937	if (start_pos > oldsize) {
1938		/* Expand hole size to cover write data, preventing empty gap */
1939		end_pos = round_up(pos + count,
1940				   fs_info->sectorsize);
1941		err = btrfs_cont_expand(inode, oldsize, end_pos);
1942		if (err) {
1943			inode_unlock(inode);
1944			goto out;
1945		}
1946		if (start_pos > round_up(oldsize, fs_info->sectorsize))
1947			clean_page = 1;
1948	}
1949
1950	if (sync)
1951		atomic_inc(&BTRFS_I(inode)->sync_writers);
1952
1953	if (iocb->ki_flags & IOCB_DIRECT) {
1954		num_written = __btrfs_direct_write(iocb, from);
1955	} else {
1956		num_written = __btrfs_buffered_write(file, from, pos);
1957		if (num_written > 0)
1958			iocb->ki_pos = pos + num_written;
1959		if (clean_page)
1960			pagecache_isize_extended(inode, oldsize,
1961						i_size_read(inode));
1962	}
1963
1964	inode_unlock(inode);
1965
1966	/*
1967	 * We also have to set last_sub_trans to the current log transid,
1968	 * otherwise subsequent syncs to a file that's been synced in this
1969	 * transaction will appear to have already occurred.
1970	 */
1971	spin_lock(&BTRFS_I(inode)->lock);
1972	BTRFS_I(inode)->last_sub_trans = root->log_transid;
1973	spin_unlock(&BTRFS_I(inode)->lock);
1974	if (num_written > 0)
1975		num_written = generic_write_sync(iocb, num_written);
1976
1977	if (sync)
1978		atomic_dec(&BTRFS_I(inode)->sync_writers);
1979out:
1980	current->backing_dev_info = NULL;
1981	return num_written ? num_written : err;
1982}
1983
1984int btrfs_release_file(struct inode *inode, struct file *filp)
1985{
1986	struct btrfs_file_private *private = filp->private_data;
1987
1988	if (private && private->filldir_buf)
1989		kfree(private->filldir_buf);
1990	kfree(private);
1991	filp->private_data = NULL;
1992
1993	/*
1994	 * ordered_data_close is set by settattr when we are about to truncate
1995	 * a file from a non-zero size to a zero size.  This tries to
1996	 * flush down new bytes that may have been written if the
1997	 * application were using truncate to replace a file in place.
1998	 */
1999	if (test_and_clear_bit(BTRFS_INODE_ORDERED_DATA_CLOSE,
2000			       &BTRFS_I(inode)->runtime_flags))
2001			filemap_flush(inode->i_mapping);
2002	return 0;
2003}
2004
2005static int start_ordered_ops(struct inode *inode, loff_t start, loff_t end)
2006{
2007	int ret;
2008	struct blk_plug plug;
2009
2010	/*
2011	 * This is only called in fsync, which would do synchronous writes, so
2012	 * a plug can merge adjacent IOs as much as possible.  Esp. in case of
2013	 * multiple disks using raid profile, a large IO can be split to
2014	 * several segments of stripe length (currently 64K).
2015	 */
2016	blk_start_plug(&plug);
2017	atomic_inc(&BTRFS_I(inode)->sync_writers);
2018	ret = btrfs_fdatawrite_range(inode, start, end);
2019	atomic_dec(&BTRFS_I(inode)->sync_writers);
2020	blk_finish_plug(&plug);
2021
2022	return ret;
2023}
2024
2025/*
2026 * fsync call for both files and directories.  This logs the inode into
2027 * the tree log instead of forcing full commits whenever possible.
2028 *
2029 * It needs to call filemap_fdatawait so that all ordered extent updates are
2030 * in the metadata btree are up to date for copying to the log.
2031 *
2032 * It drops the inode mutex before doing the tree log commit.  This is an
2033 * important optimization for directories because holding the mutex prevents
2034 * new operations on the dir while we write to disk.
2035 */
2036int btrfs_sync_file(struct file *file, loff_t start, loff_t end, int datasync)
2037{
2038	struct dentry *dentry = file_dentry(file);
2039	struct inode *inode = d_inode(dentry);
2040	struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
2041	struct btrfs_root *root = BTRFS_I(inode)->root;
2042	struct btrfs_trans_handle *trans;
2043	struct btrfs_log_ctx ctx;
2044	int ret = 0, err;
2045	bool full_sync = false;
2046	u64 len;
2047
2048	/*
2049	 * The range length can be represented by u64, we have to do the typecasts
2050	 * to avoid signed overflow if it's [0, LLONG_MAX] eg. from fsync()
2051	 */
2052	len = (u64)end - (u64)start + 1;
2053	trace_btrfs_sync_file(file, datasync);
2054
2055	btrfs_init_log_ctx(&ctx, inode);
2056
2057	/*
2058	 * We write the dirty pages in the range and wait until they complete
2059	 * out of the ->i_mutex. If so, we can flush the dirty pages by
2060	 * multi-task, and make the performance up.  See
2061	 * btrfs_wait_ordered_range for an explanation of the ASYNC check.
2062	 */
2063	ret = start_ordered_ops(inode, start, end);
2064	if (ret)
2065		goto out;
2066
2067	inode_lock(inode);
 
 
 
 
 
 
 
 
2068	atomic_inc(&root->log_batch);
2069	full_sync = test_bit(BTRFS_INODE_NEEDS_FULL_SYNC,
2070			     &BTRFS_I(inode)->runtime_flags);
 
 
 
 
 
 
 
 
 
 
 
 
2071	/*
2072	 * We might have have had more pages made dirty after calling
2073	 * start_ordered_ops and before acquiring the inode's i_mutex.
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2074	 */
2075	if (full_sync) {
2076		/*
2077		 * For a full sync, we need to make sure any ordered operations
2078		 * start and finish before we start logging the inode, so that
2079		 * all extents are persisted and the respective file extent
2080		 * items are in the fs/subvol btree.
2081		 */
2082		ret = btrfs_wait_ordered_range(inode, start, len);
2083	} else {
2084		/*
2085		 * Start any new ordered operations before starting to log the
2086		 * inode. We will wait for them to finish in btrfs_sync_log().
2087		 *
2088		 * Right before acquiring the inode's mutex, we might have new
2089		 * writes dirtying pages, which won't immediately start the
2090		 * respective ordered operations - that is done through the
2091		 * fill_delalloc callbacks invoked from the writepage and
2092		 * writepages address space operations. So make sure we start
2093		 * all ordered operations before starting to log our inode. Not
2094		 * doing this means that while logging the inode, writeback
2095		 * could start and invoke writepage/writepages, which would call
2096		 * the fill_delalloc callbacks (cow_file_range,
2097		 * submit_compressed_extents). These callbacks add first an
2098		 * extent map to the modified list of extents and then create
2099		 * the respective ordered operation, which means in
2100		 * tree-log.c:btrfs_log_inode() we might capture all existing
2101		 * ordered operations (with btrfs_get_logged_extents()) before
2102		 * the fill_delalloc callback adds its ordered operation, and by
2103		 * the time we visit the modified list of extent maps (with
2104		 * btrfs_log_changed_extents()), we see and process the extent
2105		 * map they created. We then use the extent map to construct a
2106		 * file extent item for logging without waiting for the
2107		 * respective ordered operation to finish - this file extent
2108		 * item points to a disk location that might not have yet been
2109		 * written to, containing random data - so after a crash a log
2110		 * replay will make our inode have file extent items that point
2111		 * to disk locations containing invalid data, as we returned
2112		 * success to userspace without waiting for the respective
2113		 * ordered operation to finish, because it wasn't captured by
2114		 * btrfs_get_logged_extents().
2115		 */
2116		ret = start_ordered_ops(inode, start, end);
2117	}
2118	if (ret) {
2119		inode_unlock(inode);
2120		goto out;
2121	}
2122	atomic_inc(&root->log_batch);
2123
2124	/*
2125	 * If the last transaction that changed this file was before the current
2126	 * transaction and we have the full sync flag set in our inode, we can
2127	 * bail out now without any syncing.
2128	 *
2129	 * Note that we can't bail out if the full sync flag isn't set. This is
2130	 * because when the full sync flag is set we start all ordered extents
2131	 * and wait for them to fully complete - when they complete they update
2132	 * the inode's last_trans field through:
2133	 *
2134	 *     btrfs_finish_ordered_io() ->
2135	 *         btrfs_update_inode_fallback() ->
2136	 *             btrfs_update_inode() ->
2137	 *                 btrfs_set_inode_last_trans()
2138	 *
2139	 * So we are sure that last_trans is up to date and can do this check to
2140	 * bail out safely. For the fast path, when the full sync flag is not
2141	 * set in our inode, we can not do it because we start only our ordered
2142	 * extents and don't wait for them to complete (that is when
2143	 * btrfs_finish_ordered_io runs), so here at this point their last_trans
2144	 * value might be less than or equals to fs_info->last_trans_committed,
2145	 * and setting a speculative last_trans for an inode when a buffered
2146	 * write is made (such as fs_info->generation + 1 for example) would not
2147	 * be reliable since after setting the value and before fsync is called
2148	 * any number of transactions can start and commit (transaction kthread
2149	 * commits the current transaction periodically), and a transaction
2150	 * commit does not start nor waits for ordered extents to complete.
2151	 */
 
 
 
 
 
 
 
 
2152	smp_mb();
2153	if (btrfs_inode_in_log(BTRFS_I(inode), fs_info->generation) ||
2154	    (full_sync && BTRFS_I(inode)->last_trans <=
2155	     fs_info->last_trans_committed) ||
2156	    (!btrfs_have_ordered_extents_in_range(inode, start, len) &&
2157	     BTRFS_I(inode)->last_trans
2158	     <= fs_info->last_trans_committed)) {
2159		/*
2160		 * We've had everything committed since the last time we were
2161		 * modified so clear this flag in case it was set for whatever
2162		 * reason, it's no longer relevant.
2163		 */
2164		clear_bit(BTRFS_INODE_NEEDS_FULL_SYNC,
2165			  &BTRFS_I(inode)->runtime_flags);
2166		/*
2167		 * An ordered extent might have started before and completed
2168		 * already with io errors, in which case the inode was not
2169		 * updated and we end up here. So check the inode's mapping
2170		 * for any errors that might have happened since we last
2171		 * checked called fsync.
2172		 */
2173		ret = filemap_check_wb_err(inode->i_mapping, file->f_wb_err);
 
2174		inode_unlock(inode);
2175		goto out;
2176	}
2177
2178	/*
2179	 * We use start here because we will need to wait on the IO to complete
2180	 * in btrfs_sync_log, which could require joining a transaction (for
2181	 * example checking cross references in the nocow path).  If we use join
2182	 * here we could get into a situation where we're waiting on IO to
2183	 * happen that is blocked on a transaction trying to commit.  With start
2184	 * we inc the extwriter counter, so we wait for all extwriters to exit
2185	 * before we start blocking join'ers.  This comment is to keep somebody
2186	 * from thinking they are super smart and changing this to
2187	 * btrfs_join_transaction *cough*Josef*cough*.
2188	 */
2189	trans = btrfs_start_transaction(root, 0);
2190	if (IS_ERR(trans)) {
2191		ret = PTR_ERR(trans);
 
2192		inode_unlock(inode);
2193		goto out;
2194	}
2195	trans->sync = true;
2196
2197	ret = btrfs_log_dentry_safe(trans, dentry, start, end, &ctx);
2198	if (ret < 0) {
2199		/* Fallthrough and commit/free transaction. */
2200		ret = 1;
2201	}
2202
2203	/* we've logged all the items and now have a consistent
2204	 * version of the file in the log.  It is possible that
2205	 * someone will come in and modify the file, but that's
2206	 * fine because the log is consistent on disk, and we
2207	 * have references to all of the file's extents
2208	 *
2209	 * It is possible that someone will come in and log the
2210	 * file again, but that will end up using the synchronization
2211	 * inside btrfs_sync_log to keep things safe.
2212	 */
 
2213	inode_unlock(inode);
2214
2215	/*
2216	 * If any of the ordered extents had an error, just return it to user
2217	 * space, so that the application knows some writes didn't succeed and
2218	 * can take proper action (retry for e.g.). Blindly committing the
2219	 * transaction in this case, would fool userspace that everything was
2220	 * successful. And we also want to make sure our log doesn't contain
2221	 * file extent items pointing to extents that weren't fully written to -
2222	 * just like in the non fast fsync path, where we check for the ordered
2223	 * operation's error flag before writing to the log tree and return -EIO
2224	 * if any of them had this flag set (btrfs_wait_ordered_range) -
2225	 * therefore we need to check for errors in the ordered operations,
2226	 * which are indicated by ctx.io_err.
2227	 */
2228	if (ctx.io_err) {
2229		btrfs_end_transaction(trans);
2230		ret = ctx.io_err;
2231		goto out;
2232	}
2233
2234	if (ret != BTRFS_NO_LOG_SYNC) {
2235		if (!ret) {
2236			ret = btrfs_sync_log(trans, root, &ctx);
2237			if (!ret) {
2238				ret = btrfs_end_transaction(trans);
2239				goto out;
2240			}
2241		}
2242		if (!full_sync) {
2243			ret = btrfs_wait_ordered_range(inode, start, len);
2244			if (ret) {
2245				btrfs_end_transaction(trans);
2246				goto out;
2247			}
2248		}
2249		ret = btrfs_commit_transaction(trans);
2250	} else {
2251		ret = btrfs_end_transaction(trans);
2252	}
2253out:
2254	ASSERT(list_empty(&ctx.list));
2255	err = file_check_and_advance_wb_err(file);
2256	if (!ret)
2257		ret = err;
2258	return ret > 0 ? -EIO : ret;
2259}
2260
2261static const struct vm_operations_struct btrfs_file_vm_ops = {
2262	.fault		= filemap_fault,
2263	.map_pages	= filemap_map_pages,
2264	.page_mkwrite	= btrfs_page_mkwrite,
2265};
2266
2267static int btrfs_file_mmap(struct file	*filp, struct vm_area_struct *vma)
2268{
2269	struct address_space *mapping = filp->f_mapping;
2270
2271	if (!mapping->a_ops->readpage)
2272		return -ENOEXEC;
2273
2274	file_accessed(filp);
2275	vma->vm_ops = &btrfs_file_vm_ops;
2276
2277	return 0;
2278}
2279
2280static int hole_mergeable(struct btrfs_inode *inode, struct extent_buffer *leaf,
2281			  int slot, u64 start, u64 end)
2282{
2283	struct btrfs_file_extent_item *fi;
2284	struct btrfs_key key;
2285
2286	if (slot < 0 || slot >= btrfs_header_nritems(leaf))
2287		return 0;
2288
2289	btrfs_item_key_to_cpu(leaf, &key, slot);
2290	if (key.objectid != btrfs_ino(inode) ||
2291	    key.type != BTRFS_EXTENT_DATA_KEY)
2292		return 0;
2293
2294	fi = btrfs_item_ptr(leaf, slot, struct btrfs_file_extent_item);
2295
2296	if (btrfs_file_extent_type(leaf, fi) != BTRFS_FILE_EXTENT_REG)
2297		return 0;
2298
2299	if (btrfs_file_extent_disk_bytenr(leaf, fi))
2300		return 0;
2301
2302	if (key.offset == end)
2303		return 1;
2304	if (key.offset + btrfs_file_extent_num_bytes(leaf, fi) == start)
2305		return 1;
2306	return 0;
2307}
2308
2309static int fill_holes(struct btrfs_trans_handle *trans,
2310		struct btrfs_inode *inode,
2311		struct btrfs_path *path, u64 offset, u64 end)
2312{
2313	struct btrfs_fs_info *fs_info = btrfs_sb(inode->vfs_inode.i_sb);
2314	struct btrfs_root *root = inode->root;
2315	struct extent_buffer *leaf;
2316	struct btrfs_file_extent_item *fi;
2317	struct extent_map *hole_em;
2318	struct extent_map_tree *em_tree = &inode->extent_tree;
2319	struct btrfs_key key;
2320	int ret;
2321
2322	if (btrfs_fs_incompat(fs_info, NO_HOLES))
2323		goto out;
2324
2325	key.objectid = btrfs_ino(inode);
2326	key.type = BTRFS_EXTENT_DATA_KEY;
2327	key.offset = offset;
2328
2329	ret = btrfs_search_slot(trans, root, &key, path, 0, 1);
2330	if (ret <= 0) {
2331		/*
2332		 * We should have dropped this offset, so if we find it then
2333		 * something has gone horribly wrong.
2334		 */
2335		if (ret == 0)
2336			ret = -EINVAL;
2337		return ret;
2338	}
2339
2340	leaf = path->nodes[0];
2341	if (hole_mergeable(inode, leaf, path->slots[0] - 1, offset, end)) {
2342		u64 num_bytes;
2343
2344		path->slots[0]--;
2345		fi = btrfs_item_ptr(leaf, path->slots[0],
2346				    struct btrfs_file_extent_item);
2347		num_bytes = btrfs_file_extent_num_bytes(leaf, fi) +
2348			end - offset;
2349		btrfs_set_file_extent_num_bytes(leaf, fi, num_bytes);
2350		btrfs_set_file_extent_ram_bytes(leaf, fi, num_bytes);
2351		btrfs_set_file_extent_offset(leaf, fi, 0);
2352		btrfs_mark_buffer_dirty(leaf);
2353		goto out;
2354	}
2355
2356	if (hole_mergeable(inode, leaf, path->slots[0], offset, end)) {
2357		u64 num_bytes;
2358
2359		key.offset = offset;
2360		btrfs_set_item_key_safe(fs_info, path, &key);
2361		fi = btrfs_item_ptr(leaf, path->slots[0],
2362				    struct btrfs_file_extent_item);
2363		num_bytes = btrfs_file_extent_num_bytes(leaf, fi) + end -
2364			offset;
2365		btrfs_set_file_extent_num_bytes(leaf, fi, num_bytes);
2366		btrfs_set_file_extent_ram_bytes(leaf, fi, num_bytes);
2367		btrfs_set_file_extent_offset(leaf, fi, 0);
2368		btrfs_mark_buffer_dirty(leaf);
2369		goto out;
2370	}
2371	btrfs_release_path(path);
2372
2373	ret = btrfs_insert_file_extent(trans, root, btrfs_ino(inode),
2374			offset, 0, 0, end - offset, 0, end - offset, 0, 0, 0);
2375	if (ret)
2376		return ret;
2377
2378out:
2379	btrfs_release_path(path);
2380
2381	hole_em = alloc_extent_map();
2382	if (!hole_em) {
2383		btrfs_drop_extent_cache(inode, offset, end - 1, 0);
2384		set_bit(BTRFS_INODE_NEEDS_FULL_SYNC, &inode->runtime_flags);
2385	} else {
2386		hole_em->start = offset;
2387		hole_em->len = end - offset;
2388		hole_em->ram_bytes = hole_em->len;
2389		hole_em->orig_start = offset;
2390
2391		hole_em->block_start = EXTENT_MAP_HOLE;
2392		hole_em->block_len = 0;
2393		hole_em->orig_block_len = 0;
2394		hole_em->bdev = fs_info->fs_devices->latest_bdev;
2395		hole_em->compress_type = BTRFS_COMPRESS_NONE;
2396		hole_em->generation = trans->transid;
2397
2398		do {
2399			btrfs_drop_extent_cache(inode, offset, end - 1, 0);
2400			write_lock(&em_tree->lock);
2401			ret = add_extent_mapping(em_tree, hole_em, 1);
2402			write_unlock(&em_tree->lock);
2403		} while (ret == -EEXIST);
2404		free_extent_map(hole_em);
2405		if (ret)
2406			set_bit(BTRFS_INODE_NEEDS_FULL_SYNC,
2407					&inode->runtime_flags);
2408	}
2409
2410	return 0;
2411}
2412
2413/*
2414 * Find a hole extent on given inode and change start/len to the end of hole
2415 * extent.(hole/vacuum extent whose em->start <= start &&
2416 *	   em->start + em->len > start)
2417 * When a hole extent is found, return 1 and modify start/len.
2418 */
2419static int find_first_non_hole(struct inode *inode, u64 *start, u64 *len)
2420{
2421	struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
2422	struct extent_map *em;
2423	int ret = 0;
2424
2425	em = btrfs_get_extent(BTRFS_I(inode), NULL, 0,
2426			      round_down(*start, fs_info->sectorsize),
2427			      round_up(*len, fs_info->sectorsize), 0);
2428	if (IS_ERR(em))
2429		return PTR_ERR(em);
2430
2431	/* Hole or vacuum extent(only exists in no-hole mode) */
2432	if (em->block_start == EXTENT_MAP_HOLE) {
2433		ret = 1;
2434		*len = em->start + em->len > *start + *len ?
2435		       0 : *start + *len - em->start - em->len;
2436		*start = em->start + em->len;
2437	}
2438	free_extent_map(em);
2439	return ret;
2440}
2441
2442static int btrfs_punch_hole_lock_range(struct inode *inode,
2443				       const u64 lockstart,
2444				       const u64 lockend,
2445				       struct extent_state **cached_state)
2446{
2447	while (1) {
2448		struct btrfs_ordered_extent *ordered;
2449		int ret;
2450
2451		truncate_pagecache_range(inode, lockstart, lockend);
2452
2453		lock_extent_bits(&BTRFS_I(inode)->io_tree, lockstart, lockend,
2454				 cached_state);
2455		ordered = btrfs_lookup_first_ordered_extent(inode, lockend);
2456
2457		/*
2458		 * We need to make sure we have no ordered extents in this range
2459		 * and nobody raced in and read a page in this range, if we did
2460		 * we need to try again.
2461		 */
2462		if ((!ordered ||
2463		    (ordered->file_offset + ordered->len <= lockstart ||
2464		     ordered->file_offset > lockend)) &&
2465		     !filemap_range_has_page(inode->i_mapping,
2466					     lockstart, lockend)) {
2467			if (ordered)
2468				btrfs_put_ordered_extent(ordered);
2469			break;
2470		}
2471		if (ordered)
2472			btrfs_put_ordered_extent(ordered);
2473		unlock_extent_cached(&BTRFS_I(inode)->io_tree, lockstart,
2474				     lockend, cached_state);
2475		ret = btrfs_wait_ordered_range(inode, lockstart,
2476					       lockend - lockstart + 1);
2477		if (ret)
2478			return ret;
2479	}
2480	return 0;
2481}
2482
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2483static int btrfs_punch_hole(struct inode *inode, loff_t offset, loff_t len)
2484{
2485	struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
2486	struct btrfs_root *root = BTRFS_I(inode)->root;
2487	struct extent_state *cached_state = NULL;
2488	struct btrfs_path *path;
2489	struct btrfs_block_rsv *rsv;
2490	struct btrfs_trans_handle *trans;
2491	u64 lockstart;
2492	u64 lockend;
2493	u64 tail_start;
2494	u64 tail_len;
2495	u64 orig_start = offset;
2496	u64 cur_offset;
2497	u64 min_size = btrfs_calc_trans_metadata_size(fs_info, 1);
2498	u64 drop_end;
2499	int ret = 0;
2500	int err = 0;
2501	unsigned int rsv_count;
2502	bool same_block;
2503	bool no_holes = btrfs_fs_incompat(fs_info, NO_HOLES);
2504	u64 ino_size;
2505	bool truncated_block = false;
2506	bool updated_inode = false;
2507
2508	ret = btrfs_wait_ordered_range(inode, offset, len);
2509	if (ret)
2510		return ret;
2511
2512	inode_lock(inode);
2513	ino_size = round_up(inode->i_size, fs_info->sectorsize);
2514	ret = find_first_non_hole(inode, &offset, &len);
2515	if (ret < 0)
2516		goto out_only_mutex;
2517	if (ret && !len) {
2518		/* Already in a large hole */
2519		ret = 0;
2520		goto out_only_mutex;
2521	}
2522
2523	lockstart = round_up(offset, btrfs_inode_sectorsize(inode));
2524	lockend = round_down(offset + len,
2525			     btrfs_inode_sectorsize(inode)) - 1;
2526	same_block = (BTRFS_BYTES_TO_BLKS(fs_info, offset))
2527		== (BTRFS_BYTES_TO_BLKS(fs_info, offset + len - 1));
2528	/*
2529	 * We needn't truncate any block which is beyond the end of the file
2530	 * because we are sure there is no data there.
2531	 */
2532	/*
2533	 * Only do this if we are in the same block and we aren't doing the
2534	 * entire block.
2535	 */
2536	if (same_block && len < fs_info->sectorsize) {
2537		if (offset < ino_size) {
2538			truncated_block = true;
2539			ret = btrfs_truncate_block(inode, offset, len, 0);
2540		} else {
2541			ret = 0;
2542		}
2543		goto out_only_mutex;
2544	}
2545
2546	/* zero back part of the first block */
2547	if (offset < ino_size) {
2548		truncated_block = true;
2549		ret = btrfs_truncate_block(inode, offset, 0, 0);
2550		if (ret) {
2551			inode_unlock(inode);
2552			return ret;
2553		}
2554	}
2555
2556	/* Check the aligned pages after the first unaligned page,
2557	 * if offset != orig_start, which means the first unaligned page
2558	 * including several following pages are already in holes,
2559	 * the extra check can be skipped */
2560	if (offset == orig_start) {
2561		/* after truncate page, check hole again */
2562		len = offset + len - lockstart;
2563		offset = lockstart;
2564		ret = find_first_non_hole(inode, &offset, &len);
2565		if (ret < 0)
2566			goto out_only_mutex;
2567		if (ret && !len) {
2568			ret = 0;
2569			goto out_only_mutex;
2570		}
2571		lockstart = offset;
2572	}
2573
2574	/* Check the tail unaligned part is in a hole */
2575	tail_start = lockend + 1;
2576	tail_len = offset + len - tail_start;
2577	if (tail_len) {
2578		ret = find_first_non_hole(inode, &tail_start, &tail_len);
2579		if (unlikely(ret < 0))
2580			goto out_only_mutex;
2581		if (!ret) {
2582			/* zero the front end of the last page */
2583			if (tail_start + tail_len < ino_size) {
2584				truncated_block = true;
2585				ret = btrfs_truncate_block(inode,
2586							tail_start + tail_len,
2587							0, 1);
2588				if (ret)
2589					goto out_only_mutex;
2590			}
2591		}
2592	}
2593
2594	if (lockend < lockstart) {
2595		ret = 0;
2596		goto out_only_mutex;
2597	}
2598
2599	ret = btrfs_punch_hole_lock_range(inode, lockstart, lockend,
2600					  &cached_state);
2601	if (ret) {
2602		inode_unlock(inode);
2603		goto out_only_mutex;
2604	}
2605
2606	path = btrfs_alloc_path();
2607	if (!path) {
2608		ret = -ENOMEM;
2609		goto out;
2610	}
2611
2612	rsv = btrfs_alloc_block_rsv(fs_info, BTRFS_BLOCK_RSV_TEMP);
2613	if (!rsv) {
2614		ret = -ENOMEM;
2615		goto out_free;
2616	}
2617	rsv->size = btrfs_calc_trans_metadata_size(fs_info, 1);
2618	rsv->failfast = 1;
2619
2620	/*
2621	 * 1 - update the inode
2622	 * 1 - removing the extents in the range
2623	 * 1 - adding the hole extent if no_holes isn't set
2624	 */
2625	rsv_count = no_holes ? 2 : 3;
2626	trans = btrfs_start_transaction(root, rsv_count);
2627	if (IS_ERR(trans)) {
2628		err = PTR_ERR(trans);
2629		goto out_free;
2630	}
2631
2632	ret = btrfs_block_rsv_migrate(&fs_info->trans_block_rsv, rsv,
2633				      min_size, 0);
2634	BUG_ON(ret);
2635	trans->block_rsv = rsv;
2636
2637	cur_offset = lockstart;
2638	len = lockend - cur_offset;
2639	while (cur_offset < lockend) {
2640		ret = __btrfs_drop_extents(trans, root, inode, path,
2641					   cur_offset, lockend + 1,
2642					   &drop_end, 1, 0, 0, NULL);
2643		if (ret != -ENOSPC)
2644			break;
2645
2646		trans->block_rsv = &fs_info->trans_block_rsv;
2647
2648		if (cur_offset < drop_end && cur_offset < ino_size) {
2649			ret = fill_holes(trans, BTRFS_I(inode), path,
2650					cur_offset, drop_end);
2651			if (ret) {
2652				/*
2653				 * If we failed then we didn't insert our hole
2654				 * entries for the area we dropped, so now the
2655				 * fs is corrupted, so we must abort the
2656				 * transaction.
2657				 */
2658				btrfs_abort_transaction(trans, ret);
2659				err = ret;
2660				break;
2661			}
2662		}
2663
2664		cur_offset = drop_end;
2665
2666		ret = btrfs_update_inode(trans, root, inode);
2667		if (ret) {
2668			err = ret;
2669			break;
2670		}
2671
2672		btrfs_end_transaction(trans);
2673		btrfs_btree_balance_dirty(fs_info);
2674
2675		trans = btrfs_start_transaction(root, rsv_count);
2676		if (IS_ERR(trans)) {
2677			ret = PTR_ERR(trans);
2678			trans = NULL;
2679			break;
2680		}
2681
2682		ret = btrfs_block_rsv_migrate(&fs_info->trans_block_rsv,
2683					      rsv, min_size, 0);
2684		BUG_ON(ret);	/* shouldn't happen */
2685		trans->block_rsv = rsv;
2686
2687		ret = find_first_non_hole(inode, &cur_offset, &len);
2688		if (unlikely(ret < 0))
2689			break;
2690		if (ret && !len) {
2691			ret = 0;
2692			break;
2693		}
2694	}
2695
2696	if (ret) {
2697		err = ret;
2698		goto out_trans;
2699	}
2700
2701	trans->block_rsv = &fs_info->trans_block_rsv;
2702	/*
2703	 * If we are using the NO_HOLES feature we might have had already an
2704	 * hole that overlaps a part of the region [lockstart, lockend] and
2705	 * ends at (or beyond) lockend. Since we have no file extent items to
2706	 * represent holes, drop_end can be less than lockend and so we must
2707	 * make sure we have an extent map representing the existing hole (the
2708	 * call to __btrfs_drop_extents() might have dropped the existing extent
2709	 * map representing the existing hole), otherwise the fast fsync path
2710	 * will not record the existence of the hole region
2711	 * [existing_hole_start, lockend].
2712	 */
2713	if (drop_end <= lockend)
2714		drop_end = lockend + 1;
2715	/*
2716	 * Don't insert file hole extent item if it's for a range beyond eof
2717	 * (because it's useless) or if it represents a 0 bytes range (when
2718	 * cur_offset == drop_end).
2719	 */
2720	if (cur_offset < ino_size && cur_offset < drop_end) {
2721		ret = fill_holes(trans, BTRFS_I(inode), path,
2722				cur_offset, drop_end);
2723		if (ret) {
2724			/* Same comment as above. */
2725			btrfs_abort_transaction(trans, ret);
2726			err = ret;
2727			goto out_trans;
2728		}
2729	}
2730
2731out_trans:
2732	if (!trans)
2733		goto out_free;
2734
 
2735	inode_inc_iversion(inode);
2736	inode->i_mtime = inode->i_ctime = current_time(inode);
2737
2738	trans->block_rsv = &fs_info->trans_block_rsv;
2739	ret = btrfs_update_inode(trans, root, inode);
2740	updated_inode = true;
2741	btrfs_end_transaction(trans);
2742	btrfs_btree_balance_dirty(fs_info);
2743out_free:
2744	btrfs_free_path(path);
2745	btrfs_free_block_rsv(fs_info, rsv);
2746out:
2747	unlock_extent_cached(&BTRFS_I(inode)->io_tree, lockstart, lockend,
2748			     &cached_state);
2749out_only_mutex:
2750	if (!updated_inode && truncated_block && !ret && !err) {
2751		/*
2752		 * If we only end up zeroing part of a page, we still need to
2753		 * update the inode item, so that all the time fields are
2754		 * updated as well as the necessary btrfs inode in memory fields
2755		 * for detecting, at fsync time, if the inode isn't yet in the
2756		 * log tree or it's there but not up to date.
2757		 */
 
 
 
 
 
2758		trans = btrfs_start_transaction(root, 1);
2759		if (IS_ERR(trans)) {
2760			err = PTR_ERR(trans);
2761		} else {
2762			err = btrfs_update_inode(trans, root, inode);
2763			ret = btrfs_end_transaction(trans);
 
 
 
 
2764		}
2765	}
2766	inode_unlock(inode);
2767	if (ret && !err)
2768		err = ret;
2769	return err;
2770}
2771
2772/* Helper structure to record which range is already reserved */
2773struct falloc_range {
2774	struct list_head list;
2775	u64 start;
2776	u64 len;
2777};
2778
2779/*
2780 * Helper function to add falloc range
2781 *
2782 * Caller should have locked the larger range of extent containing
2783 * [start, len)
2784 */
2785static int add_falloc_range(struct list_head *head, u64 start, u64 len)
2786{
2787	struct falloc_range *prev = NULL;
2788	struct falloc_range *range = NULL;
2789
2790	if (list_empty(head))
2791		goto insert;
2792
2793	/*
2794	 * As fallocate iterate by bytenr order, we only need to check
2795	 * the last range.
2796	 */
2797	prev = list_entry(head->prev, struct falloc_range, list);
2798	if (prev->start + prev->len == start) {
2799		prev->len += len;
2800		return 0;
2801	}
2802insert:
2803	range = kmalloc(sizeof(*range), GFP_KERNEL);
2804	if (!range)
2805		return -ENOMEM;
2806	range->start = start;
2807	range->len = len;
2808	list_add_tail(&range->list, head);
2809	return 0;
2810}
2811
2812static int btrfs_fallocate_update_isize(struct inode *inode,
2813					const u64 end,
2814					const int mode)
2815{
2816	struct btrfs_trans_handle *trans;
2817	struct btrfs_root *root = BTRFS_I(inode)->root;
2818	int ret;
2819	int ret2;
2820
2821	if (mode & FALLOC_FL_KEEP_SIZE || end <= i_size_read(inode))
2822		return 0;
2823
2824	trans = btrfs_start_transaction(root, 1);
2825	if (IS_ERR(trans))
2826		return PTR_ERR(trans);
2827
2828	inode->i_ctime = current_time(inode);
2829	i_size_write(inode, end);
2830	btrfs_ordered_update_i_size(inode, end, NULL);
2831	ret = btrfs_update_inode(trans, root, inode);
2832	ret2 = btrfs_end_transaction(trans);
2833
2834	return ret ? ret : ret2;
2835}
2836
2837enum {
2838	RANGE_BOUNDARY_WRITTEN_EXTENT = 0,
2839	RANGE_BOUNDARY_PREALLOC_EXTENT = 1,
2840	RANGE_BOUNDARY_HOLE = 2,
2841};
2842
2843static int btrfs_zero_range_check_range_boundary(struct inode *inode,
2844						 u64 offset)
2845{
2846	const u64 sectorsize = btrfs_inode_sectorsize(inode);
2847	struct extent_map *em;
2848	int ret;
2849
2850	offset = round_down(offset, sectorsize);
2851	em = btrfs_get_extent(BTRFS_I(inode), NULL, 0, offset, sectorsize, 0);
2852	if (IS_ERR(em))
2853		return PTR_ERR(em);
2854
2855	if (em->block_start == EXTENT_MAP_HOLE)
2856		ret = RANGE_BOUNDARY_HOLE;
2857	else if (test_bit(EXTENT_FLAG_PREALLOC, &em->flags))
2858		ret = RANGE_BOUNDARY_PREALLOC_EXTENT;
2859	else
2860		ret = RANGE_BOUNDARY_WRITTEN_EXTENT;
2861
2862	free_extent_map(em);
2863	return ret;
2864}
2865
2866static int btrfs_zero_range(struct inode *inode,
2867			    loff_t offset,
2868			    loff_t len,
2869			    const int mode)
2870{
2871	struct btrfs_fs_info *fs_info = BTRFS_I(inode)->root->fs_info;
2872	struct extent_map *em;
2873	struct extent_changeset *data_reserved = NULL;
2874	int ret;
2875	u64 alloc_hint = 0;
2876	const u64 sectorsize = btrfs_inode_sectorsize(inode);
2877	u64 alloc_start = round_down(offset, sectorsize);
2878	u64 alloc_end = round_up(offset + len, sectorsize);
2879	u64 bytes_to_reserve = 0;
2880	bool space_reserved = false;
2881
2882	inode_dio_wait(inode);
2883
2884	em = btrfs_get_extent(BTRFS_I(inode), NULL, 0,
2885			      alloc_start, alloc_end - alloc_start, 0);
2886	if (IS_ERR(em)) {
2887		ret = PTR_ERR(em);
2888		goto out;
2889	}
2890
2891	/*
2892	 * Avoid hole punching and extent allocation for some cases. More cases
2893	 * could be considered, but these are unlikely common and we keep things
2894	 * as simple as possible for now. Also, intentionally, if the target
2895	 * range contains one or more prealloc extents together with regular
2896	 * extents and holes, we drop all the existing extents and allocate a
2897	 * new prealloc extent, so that we get a larger contiguous disk extent.
2898	 */
2899	if (em->start <= alloc_start &&
2900	    test_bit(EXTENT_FLAG_PREALLOC, &em->flags)) {
2901		const u64 em_end = em->start + em->len;
2902
2903		if (em_end >= offset + len) {
2904			/*
2905			 * The whole range is already a prealloc extent,
2906			 * do nothing except updating the inode's i_size if
2907			 * needed.
2908			 */
2909			free_extent_map(em);
2910			ret = btrfs_fallocate_update_isize(inode, offset + len,
2911							   mode);
2912			goto out;
2913		}
2914		/*
2915		 * Part of the range is already a prealloc extent, so operate
2916		 * only on the remaining part of the range.
2917		 */
2918		alloc_start = em_end;
2919		ASSERT(IS_ALIGNED(alloc_start, sectorsize));
2920		len = offset + len - alloc_start;
2921		offset = alloc_start;
2922		alloc_hint = em->block_start + em->len;
2923	}
2924	free_extent_map(em);
2925
2926	if (BTRFS_BYTES_TO_BLKS(fs_info, offset) ==
2927	    BTRFS_BYTES_TO_BLKS(fs_info, offset + len - 1)) {
2928		em = btrfs_get_extent(BTRFS_I(inode), NULL, 0,
2929				      alloc_start, sectorsize, 0);
2930		if (IS_ERR(em)) {
2931			ret = PTR_ERR(em);
2932			goto out;
2933		}
2934
2935		if (test_bit(EXTENT_FLAG_PREALLOC, &em->flags)) {
2936			free_extent_map(em);
2937			ret = btrfs_fallocate_update_isize(inode, offset + len,
2938							   mode);
2939			goto out;
2940		}
2941		if (len < sectorsize && em->block_start != EXTENT_MAP_HOLE) {
2942			free_extent_map(em);
2943			ret = btrfs_truncate_block(inode, offset, len, 0);
2944			if (!ret)
2945				ret = btrfs_fallocate_update_isize(inode,
2946								   offset + len,
2947								   mode);
2948			return ret;
2949		}
2950		free_extent_map(em);
2951		alloc_start = round_down(offset, sectorsize);
2952		alloc_end = alloc_start + sectorsize;
2953		goto reserve_space;
2954	}
2955
2956	alloc_start = round_up(offset, sectorsize);
2957	alloc_end = round_down(offset + len, sectorsize);
2958
2959	/*
2960	 * For unaligned ranges, check the pages at the boundaries, they might
2961	 * map to an extent, in which case we need to partially zero them, or
2962	 * they might map to a hole, in which case we need our allocation range
2963	 * to cover them.
2964	 */
2965	if (!IS_ALIGNED(offset, sectorsize)) {
2966		ret = btrfs_zero_range_check_range_boundary(inode, offset);
2967		if (ret < 0)
2968			goto out;
2969		if (ret == RANGE_BOUNDARY_HOLE) {
2970			alloc_start = round_down(offset, sectorsize);
2971			ret = 0;
2972		} else if (ret == RANGE_BOUNDARY_WRITTEN_EXTENT) {
2973			ret = btrfs_truncate_block(inode, offset, 0, 0);
2974			if (ret)
2975				goto out;
2976		} else {
2977			ret = 0;
2978		}
2979	}
2980
2981	if (!IS_ALIGNED(offset + len, sectorsize)) {
2982		ret = btrfs_zero_range_check_range_boundary(inode,
2983							    offset + len);
2984		if (ret < 0)
2985			goto out;
2986		if (ret == RANGE_BOUNDARY_HOLE) {
2987			alloc_end = round_up(offset + len, sectorsize);
2988			ret = 0;
2989		} else if (ret == RANGE_BOUNDARY_WRITTEN_EXTENT) {
2990			ret = btrfs_truncate_block(inode, offset + len, 0, 1);
2991			if (ret)
2992				goto out;
2993		} else {
2994			ret = 0;
2995		}
2996	}
2997
2998reserve_space:
2999	if (alloc_start < alloc_end) {
3000		struct extent_state *cached_state = NULL;
3001		const u64 lockstart = alloc_start;
3002		const u64 lockend = alloc_end - 1;
3003
3004		bytes_to_reserve = alloc_end - alloc_start;
3005		ret = btrfs_alloc_data_chunk_ondemand(BTRFS_I(inode),
3006						      bytes_to_reserve);
3007		if (ret < 0)
3008			goto out;
3009		space_reserved = true;
3010		ret = btrfs_qgroup_reserve_data(inode, &data_reserved,
3011						alloc_start, bytes_to_reserve);
3012		if (ret)
3013			goto out;
3014		ret = btrfs_punch_hole_lock_range(inode, lockstart, lockend,
3015						  &cached_state);
3016		if (ret)
3017			goto out;
3018		ret = btrfs_prealloc_file_range(inode, mode, alloc_start,
3019						alloc_end - alloc_start,
3020						i_blocksize(inode),
3021						offset + len, &alloc_hint);
3022		unlock_extent_cached(&BTRFS_I(inode)->io_tree, lockstart,
3023				     lockend, &cached_state);
3024		/* btrfs_prealloc_file_range releases reserved space on error */
3025		if (ret) {
3026			space_reserved = false;
3027			goto out;
3028		}
3029	}
3030	ret = btrfs_fallocate_update_isize(inode, offset + len, mode);
3031 out:
3032	if (ret && space_reserved)
3033		btrfs_free_reserved_data_space(inode, data_reserved,
3034					       alloc_start, bytes_to_reserve);
3035	extent_changeset_free(data_reserved);
3036
3037	return ret;
3038}
3039
3040static long btrfs_fallocate(struct file *file, int mode,
3041			    loff_t offset, loff_t len)
3042{
3043	struct inode *inode = file_inode(file);
3044	struct extent_state *cached_state = NULL;
3045	struct extent_changeset *data_reserved = NULL;
3046	struct falloc_range *range;
3047	struct falloc_range *tmp;
3048	struct list_head reserve_list;
3049	u64 cur_offset;
3050	u64 last_byte;
3051	u64 alloc_start;
3052	u64 alloc_end;
3053	u64 alloc_hint = 0;
3054	u64 locked_end;
3055	u64 actual_end = 0;
3056	struct extent_map *em;
3057	int blocksize = btrfs_inode_sectorsize(inode);
3058	int ret;
3059
3060	alloc_start = round_down(offset, blocksize);
3061	alloc_end = round_up(offset + len, blocksize);
3062	cur_offset = alloc_start;
3063
3064	/* Make sure we aren't being give some crap mode */
3065	if (mode & ~(FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE |
3066		     FALLOC_FL_ZERO_RANGE))
3067		return -EOPNOTSUPP;
3068
3069	if (mode & FALLOC_FL_PUNCH_HOLE)
3070		return btrfs_punch_hole(inode, offset, len);
3071
3072	/*
3073	 * Only trigger disk allocation, don't trigger qgroup reserve
3074	 *
3075	 * For qgroup space, it will be checked later.
3076	 */
3077	if (!(mode & FALLOC_FL_ZERO_RANGE)) {
3078		ret = btrfs_alloc_data_chunk_ondemand(BTRFS_I(inode),
3079						      alloc_end - alloc_start);
3080		if (ret < 0)
3081			return ret;
3082	}
3083
3084	inode_lock(inode);
3085
3086	if (!(mode & FALLOC_FL_KEEP_SIZE) && offset + len > inode->i_size) {
3087		ret = inode_newsize_ok(inode, offset + len);
3088		if (ret)
3089			goto out;
3090	}
3091
3092	/*
3093	 * TODO: Move these two operations after we have checked
3094	 * accurate reserved space, or fallocate can still fail but
3095	 * with page truncated or size expanded.
3096	 *
3097	 * But that's a minor problem and won't do much harm BTW.
3098	 */
3099	if (alloc_start > inode->i_size) {
3100		ret = btrfs_cont_expand(inode, i_size_read(inode),
3101					alloc_start);
3102		if (ret)
3103			goto out;
3104	} else if (offset + len > inode->i_size) {
3105		/*
3106		 * If we are fallocating from the end of the file onward we
3107		 * need to zero out the end of the block if i_size lands in the
3108		 * middle of a block.
3109		 */
3110		ret = btrfs_truncate_block(inode, inode->i_size, 0, 0);
3111		if (ret)
3112			goto out;
3113	}
3114
3115	/*
3116	 * wait for ordered IO before we have any locks.  We'll loop again
3117	 * below with the locks held.
3118	 */
3119	ret = btrfs_wait_ordered_range(inode, alloc_start,
3120				       alloc_end - alloc_start);
3121	if (ret)
3122		goto out;
3123
3124	if (mode & FALLOC_FL_ZERO_RANGE) {
3125		ret = btrfs_zero_range(inode, offset, len, mode);
3126		inode_unlock(inode);
3127		return ret;
3128	}
3129
3130	locked_end = alloc_end - 1;
3131	while (1) {
3132		struct btrfs_ordered_extent *ordered;
3133
3134		/* the extent lock is ordered inside the running
3135		 * transaction
3136		 */
3137		lock_extent_bits(&BTRFS_I(inode)->io_tree, alloc_start,
3138				 locked_end, &cached_state);
3139		ordered = btrfs_lookup_first_ordered_extent(inode, locked_end);
3140
3141		if (ordered &&
3142		    ordered->file_offset + ordered->len > alloc_start &&
3143		    ordered->file_offset < alloc_end) {
3144			btrfs_put_ordered_extent(ordered);
3145			unlock_extent_cached(&BTRFS_I(inode)->io_tree,
3146					     alloc_start, locked_end,
3147					     &cached_state);
3148			/*
3149			 * we can't wait on the range with the transaction
3150			 * running or with the extent lock held
3151			 */
3152			ret = btrfs_wait_ordered_range(inode, alloc_start,
3153						       alloc_end - alloc_start);
3154			if (ret)
3155				goto out;
3156		} else {
3157			if (ordered)
3158				btrfs_put_ordered_extent(ordered);
3159			break;
3160		}
3161	}
3162
3163	/* First, check if we exceed the qgroup limit */
3164	INIT_LIST_HEAD(&reserve_list);
3165	while (cur_offset < alloc_end) {
3166		em = btrfs_get_extent(BTRFS_I(inode), NULL, 0, cur_offset,
3167				      alloc_end - cur_offset, 0);
3168		if (IS_ERR(em)) {
3169			ret = PTR_ERR(em);
3170			break;
3171		}
3172		last_byte = min(extent_map_end(em), alloc_end);
3173		actual_end = min_t(u64, extent_map_end(em), offset + len);
3174		last_byte = ALIGN(last_byte, blocksize);
3175		if (em->block_start == EXTENT_MAP_HOLE ||
3176		    (cur_offset >= inode->i_size &&
3177		     !test_bit(EXTENT_FLAG_PREALLOC, &em->flags))) {
3178			ret = add_falloc_range(&reserve_list, cur_offset,
3179					       last_byte - cur_offset);
3180			if (ret < 0) {
3181				free_extent_map(em);
3182				break;
3183			}
3184			ret = btrfs_qgroup_reserve_data(inode, &data_reserved,
3185					cur_offset, last_byte - cur_offset);
3186			if (ret < 0) {
 
3187				free_extent_map(em);
3188				break;
3189			}
3190		} else {
3191			/*
3192			 * Do not need to reserve unwritten extent for this
3193			 * range, free reserved data space first, otherwise
3194			 * it'll result in false ENOSPC error.
3195			 */
3196			btrfs_free_reserved_data_space(inode, data_reserved,
3197					cur_offset, last_byte - cur_offset);
3198		}
3199		free_extent_map(em);
3200		cur_offset = last_byte;
3201	}
3202
3203	/*
3204	 * If ret is still 0, means we're OK to fallocate.
3205	 * Or just cleanup the list and exit.
3206	 */
3207	list_for_each_entry_safe(range, tmp, &reserve_list, list) {
3208		if (!ret)
3209			ret = btrfs_prealloc_file_range(inode, mode,
3210					range->start,
3211					range->len, i_blocksize(inode),
3212					offset + len, &alloc_hint);
3213		else
3214			btrfs_free_reserved_data_space(inode,
3215					data_reserved, range->start,
3216					range->len);
3217		list_del(&range->list);
3218		kfree(range);
3219	}
3220	if (ret < 0)
3221		goto out_unlock;
3222
3223	/*
3224	 * We didn't need to allocate any more space, but we still extended the
3225	 * size of the file so we need to update i_size and the inode item.
3226	 */
3227	ret = btrfs_fallocate_update_isize(inode, actual_end, mode);
3228out_unlock:
3229	unlock_extent_cached(&BTRFS_I(inode)->io_tree, alloc_start, locked_end,
3230			     &cached_state);
3231out:
3232	inode_unlock(inode);
3233	/* Let go of our reservation. */
3234	if (ret != 0 && !(mode & FALLOC_FL_ZERO_RANGE))
3235		btrfs_free_reserved_data_space(inode, data_reserved,
3236				alloc_start, alloc_end - cur_offset);
3237	extent_changeset_free(data_reserved);
3238	return ret;
3239}
3240
3241static int find_desired_extent(struct inode *inode, loff_t *offset, int whence)
3242{
3243	struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
3244	struct extent_map *em = NULL;
3245	struct extent_state *cached_state = NULL;
3246	u64 lockstart;
3247	u64 lockend;
3248	u64 start;
3249	u64 len;
3250	int ret = 0;
3251
3252	if (inode->i_size == 0)
3253		return -ENXIO;
3254
3255	/*
3256	 * *offset can be negative, in this case we start finding DATA/HOLE from
3257	 * the very start of the file.
3258	 */
3259	start = max_t(loff_t, 0, *offset);
3260
3261	lockstart = round_down(start, fs_info->sectorsize);
3262	lockend = round_up(i_size_read(inode),
3263			   fs_info->sectorsize);
3264	if (lockend <= lockstart)
3265		lockend = lockstart + fs_info->sectorsize;
3266	lockend--;
3267	len = lockend - lockstart + 1;
3268
3269	lock_extent_bits(&BTRFS_I(inode)->io_tree, lockstart, lockend,
3270			 &cached_state);
3271
3272	while (start < inode->i_size) {
3273		em = btrfs_get_extent_fiemap(BTRFS_I(inode), NULL, 0,
3274				start, len, 0);
3275		if (IS_ERR(em)) {
3276			ret = PTR_ERR(em);
3277			em = NULL;
3278			break;
3279		}
3280
3281		if (whence == SEEK_HOLE &&
3282		    (em->block_start == EXTENT_MAP_HOLE ||
3283		     test_bit(EXTENT_FLAG_PREALLOC, &em->flags)))
3284			break;
3285		else if (whence == SEEK_DATA &&
3286			   (em->block_start != EXTENT_MAP_HOLE &&
3287			    !test_bit(EXTENT_FLAG_PREALLOC, &em->flags)))
3288			break;
3289
3290		start = em->start + em->len;
3291		free_extent_map(em);
3292		em = NULL;
3293		cond_resched();
3294	}
3295	free_extent_map(em);
3296	if (!ret) {
3297		if (whence == SEEK_DATA && start >= inode->i_size)
3298			ret = -ENXIO;
3299		else
3300			*offset = min_t(loff_t, start, inode->i_size);
3301	}
3302	unlock_extent_cached(&BTRFS_I(inode)->io_tree, lockstart, lockend,
3303			     &cached_state);
3304	return ret;
3305}
3306
3307static loff_t btrfs_file_llseek(struct file *file, loff_t offset, int whence)
3308{
3309	struct inode *inode = file->f_mapping->host;
3310	int ret;
3311
3312	inode_lock(inode);
3313	switch (whence) {
3314	case SEEK_END:
3315	case SEEK_CUR:
3316		offset = generic_file_llseek(file, offset, whence);
3317		goto out;
3318	case SEEK_DATA:
3319	case SEEK_HOLE:
3320		if (offset >= i_size_read(inode)) {
3321			inode_unlock(inode);
3322			return -ENXIO;
3323		}
3324
3325		ret = find_desired_extent(inode, &offset, whence);
3326		if (ret) {
3327			inode_unlock(inode);
3328			return ret;
3329		}
3330	}
3331
3332	offset = vfs_setpos(file, offset, inode->i_sb->s_maxbytes);
3333out:
3334	inode_unlock(inode);
3335	return offset;
3336}
3337
3338static int btrfs_file_open(struct inode *inode, struct file *filp)
3339{
3340	filp->f_mode |= FMODE_NOWAIT;
3341	return generic_file_open(inode, filp);
3342}
3343
3344const struct file_operations btrfs_file_operations = {
3345	.llseek		= btrfs_file_llseek,
3346	.read_iter      = generic_file_read_iter,
3347	.splice_read	= generic_file_splice_read,
3348	.write_iter	= btrfs_file_write_iter,
3349	.mmap		= btrfs_file_mmap,
3350	.open		= btrfs_file_open,
3351	.release	= btrfs_release_file,
3352	.fsync		= btrfs_sync_file,
3353	.fallocate	= btrfs_fallocate,
3354	.unlocked_ioctl	= btrfs_ioctl,
3355#ifdef CONFIG_COMPAT
3356	.compat_ioctl	= btrfs_compat_ioctl,
3357#endif
3358	.clone_file_range = btrfs_clone_file_range,
3359	.dedupe_file_range = btrfs_dedupe_file_range,
3360};
3361
3362void __cold btrfs_auto_defrag_exit(void)
3363{
3364	kmem_cache_destroy(btrfs_inode_defrag_cachep);
3365}
3366
3367int __init btrfs_auto_defrag_init(void)
3368{
3369	btrfs_inode_defrag_cachep = kmem_cache_create("btrfs_inode_defrag",
3370					sizeof(struct inode_defrag), 0,
3371					SLAB_MEM_SPREAD,
3372					NULL);
3373	if (!btrfs_inode_defrag_cachep)
3374		return -ENOMEM;
3375
3376	return 0;
3377}
3378
3379int btrfs_fdatawrite_range(struct inode *inode, loff_t start, loff_t end)
3380{
3381	int ret;
3382
3383	/*
3384	 * So with compression we will find and lock a dirty page and clear the
3385	 * first one as dirty, setup an async extent, and immediately return
3386	 * with the entire range locked but with nobody actually marked with
3387	 * writeback.  So we can't just filemap_write_and_wait_range() and
3388	 * expect it to work since it will just kick off a thread to do the
3389	 * actual work.  So we need to call filemap_fdatawrite_range _again_
3390	 * since it will wait on the page lock, which won't be unlocked until
3391	 * after the pages have been marked as writeback and so we're good to go
3392	 * from there.  We have to do this otherwise we'll miss the ordered
3393	 * extents and that results in badness.  Please Josef, do not think you
3394	 * know better and pull this out at some point in the future, it is
3395	 * right and you are wrong.
3396	 */
3397	ret = filemap_fdatawrite_range(inode->i_mapping, start, end);
3398	if (!ret && test_bit(BTRFS_INODE_HAS_ASYNC_EXTENT,
3399			     &BTRFS_I(inode)->runtime_flags))
3400		ret = filemap_fdatawrite_range(inode->i_mapping, start, end);
3401
3402	return ret;
3403}