Linux Audio

Check our new training course

Loading...
v5.4
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 * Copyright (C) 2007 Oracle.  All rights reserved.
   4 */
   5
   6#include <linux/fs.h>
   7#include <linux/pagemap.h>
   8#include <linux/time.h>
   9#include <linux/init.h>
  10#include <linux/string.h>
  11#include <linux/backing-dev.h>
  12#include <linux/falloc.h>
  13#include <linux/writeback.h>
  14#include <linux/compat.h>
  15#include <linux/slab.h>
  16#include <linux/btrfs.h>
  17#include <linux/uio.h>
  18#include <linux/iversion.h>
  19#include "ctree.h"
  20#include "disk-io.h"
  21#include "transaction.h"
  22#include "btrfs_inode.h"
  23#include "print-tree.h"
  24#include "tree-log.h"
  25#include "locking.h"
  26#include "volumes.h"
  27#include "qgroup.h"
  28#include "compression.h"
  29#include "delalloc-space.h"
 
  30
  31static struct kmem_cache *btrfs_inode_defrag_cachep;
  32/*
  33 * when auto defrag is enabled we
  34 * queue up these defrag structs to remember which
  35 * inodes need defragging passes
  36 */
  37struct inode_defrag {
  38	struct rb_node rb_node;
  39	/* objectid */
  40	u64 ino;
  41	/*
  42	 * transid where the defrag was added, we search for
  43	 * extents newer than this
  44	 */
  45	u64 transid;
  46
  47	/* root objectid */
  48	u64 root;
  49
  50	/* last offset we were able to defrag */
  51	u64 last_offset;
  52
  53	/* if we've wrapped around back to zero once already */
  54	int cycled;
  55};
  56
  57static int __compare_inode_defrag(struct inode_defrag *defrag1,
  58				  struct inode_defrag *defrag2)
  59{
  60	if (defrag1->root > defrag2->root)
  61		return 1;
  62	else if (defrag1->root < defrag2->root)
  63		return -1;
  64	else if (defrag1->ino > defrag2->ino)
  65		return 1;
  66	else if (defrag1->ino < defrag2->ino)
  67		return -1;
  68	else
  69		return 0;
  70}
  71
  72/* pop a record for an inode into the defrag tree.  The lock
  73 * must be held already
  74 *
  75 * If you're inserting a record for an older transid than an
  76 * existing record, the transid already in the tree is lowered
  77 *
  78 * If an existing record is found the defrag item you
  79 * pass in is freed
  80 */
  81static int __btrfs_add_inode_defrag(struct btrfs_inode *inode,
  82				    struct inode_defrag *defrag)
  83{
  84	struct btrfs_fs_info *fs_info = inode->root->fs_info;
  85	struct inode_defrag *entry;
  86	struct rb_node **p;
  87	struct rb_node *parent = NULL;
  88	int ret;
  89
  90	p = &fs_info->defrag_inodes.rb_node;
  91	while (*p) {
  92		parent = *p;
  93		entry = rb_entry(parent, struct inode_defrag, rb_node);
  94
  95		ret = __compare_inode_defrag(defrag, entry);
  96		if (ret < 0)
  97			p = &parent->rb_left;
  98		else if (ret > 0)
  99			p = &parent->rb_right;
 100		else {
 101			/* if we're reinserting an entry for
 102			 * an old defrag run, make sure to
 103			 * lower the transid of our existing record
 104			 */
 105			if (defrag->transid < entry->transid)
 106				entry->transid = defrag->transid;
 107			if (defrag->last_offset > entry->last_offset)
 108				entry->last_offset = defrag->last_offset;
 109			return -EEXIST;
 110		}
 111	}
 112	set_bit(BTRFS_INODE_IN_DEFRAG, &inode->runtime_flags);
 113	rb_link_node(&defrag->rb_node, parent, p);
 114	rb_insert_color(&defrag->rb_node, &fs_info->defrag_inodes);
 115	return 0;
 116}
 117
 118static inline int __need_auto_defrag(struct btrfs_fs_info *fs_info)
 119{
 120	if (!btrfs_test_opt(fs_info, AUTO_DEFRAG))
 121		return 0;
 122
 123	if (btrfs_fs_closing(fs_info))
 124		return 0;
 125
 126	return 1;
 127}
 128
 129/*
 130 * insert a defrag record for this inode if auto defrag is
 131 * enabled
 132 */
 133int btrfs_add_inode_defrag(struct btrfs_trans_handle *trans,
 134			   struct btrfs_inode *inode)
 135{
 136	struct btrfs_root *root = inode->root;
 137	struct btrfs_fs_info *fs_info = root->fs_info;
 138	struct inode_defrag *defrag;
 139	u64 transid;
 140	int ret;
 141
 142	if (!__need_auto_defrag(fs_info))
 143		return 0;
 144
 145	if (test_bit(BTRFS_INODE_IN_DEFRAG, &inode->runtime_flags))
 146		return 0;
 147
 148	if (trans)
 149		transid = trans->transid;
 150	else
 151		transid = inode->root->last_trans;
 152
 153	defrag = kmem_cache_zalloc(btrfs_inode_defrag_cachep, GFP_NOFS);
 154	if (!defrag)
 155		return -ENOMEM;
 156
 157	defrag->ino = btrfs_ino(inode);
 158	defrag->transid = transid;
 159	defrag->root = root->root_key.objectid;
 160
 161	spin_lock(&fs_info->defrag_inodes_lock);
 162	if (!test_bit(BTRFS_INODE_IN_DEFRAG, &inode->runtime_flags)) {
 163		/*
 164		 * If we set IN_DEFRAG flag and evict the inode from memory,
 165		 * and then re-read this inode, this new inode doesn't have
 166		 * IN_DEFRAG flag. At the case, we may find the existed defrag.
 167		 */
 168		ret = __btrfs_add_inode_defrag(inode, defrag);
 169		if (ret)
 170			kmem_cache_free(btrfs_inode_defrag_cachep, defrag);
 171	} else {
 172		kmem_cache_free(btrfs_inode_defrag_cachep, defrag);
 173	}
 174	spin_unlock(&fs_info->defrag_inodes_lock);
 175	return 0;
 176}
 177
 178/*
 179 * Requeue the defrag object. If there is a defrag object that points to
 180 * the same inode in the tree, we will merge them together (by
 181 * __btrfs_add_inode_defrag()) and free the one that we want to requeue.
 182 */
 183static void btrfs_requeue_inode_defrag(struct btrfs_inode *inode,
 184				       struct inode_defrag *defrag)
 185{
 186	struct btrfs_fs_info *fs_info = inode->root->fs_info;
 187	int ret;
 188
 189	if (!__need_auto_defrag(fs_info))
 190		goto out;
 191
 192	/*
 193	 * Here we don't check the IN_DEFRAG flag, because we need merge
 194	 * them together.
 195	 */
 196	spin_lock(&fs_info->defrag_inodes_lock);
 197	ret = __btrfs_add_inode_defrag(inode, defrag);
 198	spin_unlock(&fs_info->defrag_inodes_lock);
 199	if (ret)
 200		goto out;
 201	return;
 202out:
 203	kmem_cache_free(btrfs_inode_defrag_cachep, defrag);
 204}
 205
 206/*
 207 * pick the defragable inode that we want, if it doesn't exist, we will get
 208 * the next one.
 209 */
 210static struct inode_defrag *
 211btrfs_pick_defrag_inode(struct btrfs_fs_info *fs_info, u64 root, u64 ino)
 212{
 213	struct inode_defrag *entry = NULL;
 214	struct inode_defrag tmp;
 215	struct rb_node *p;
 216	struct rb_node *parent = NULL;
 217	int ret;
 218
 219	tmp.ino = ino;
 220	tmp.root = root;
 221
 222	spin_lock(&fs_info->defrag_inodes_lock);
 223	p = fs_info->defrag_inodes.rb_node;
 224	while (p) {
 225		parent = p;
 226		entry = rb_entry(parent, struct inode_defrag, rb_node);
 227
 228		ret = __compare_inode_defrag(&tmp, entry);
 229		if (ret < 0)
 230			p = parent->rb_left;
 231		else if (ret > 0)
 232			p = parent->rb_right;
 233		else
 234			goto out;
 235	}
 236
 237	if (parent && __compare_inode_defrag(&tmp, entry) > 0) {
 238		parent = rb_next(parent);
 239		if (parent)
 240			entry = rb_entry(parent, struct inode_defrag, rb_node);
 241		else
 242			entry = NULL;
 243	}
 244out:
 245	if (entry)
 246		rb_erase(parent, &fs_info->defrag_inodes);
 247	spin_unlock(&fs_info->defrag_inodes_lock);
 248	return entry;
 249}
 250
 251void btrfs_cleanup_defrag_inodes(struct btrfs_fs_info *fs_info)
 252{
 253	struct inode_defrag *defrag;
 254	struct rb_node *node;
 255
 256	spin_lock(&fs_info->defrag_inodes_lock);
 257	node = rb_first(&fs_info->defrag_inodes);
 258	while (node) {
 259		rb_erase(node, &fs_info->defrag_inodes);
 260		defrag = rb_entry(node, struct inode_defrag, rb_node);
 261		kmem_cache_free(btrfs_inode_defrag_cachep, defrag);
 262
 263		cond_resched_lock(&fs_info->defrag_inodes_lock);
 264
 265		node = rb_first(&fs_info->defrag_inodes);
 266	}
 267	spin_unlock(&fs_info->defrag_inodes_lock);
 268}
 269
 270#define BTRFS_DEFRAG_BATCH	1024
 271
 272static int __btrfs_run_defrag_inode(struct btrfs_fs_info *fs_info,
 273				    struct inode_defrag *defrag)
 274{
 275	struct btrfs_root *inode_root;
 276	struct inode *inode;
 277	struct btrfs_key key;
 278	struct btrfs_ioctl_defrag_range_args range;
 279	int num_defrag;
 280	int index;
 281	int ret;
 282
 283	/* get the inode */
 284	key.objectid = defrag->root;
 285	key.type = BTRFS_ROOT_ITEM_KEY;
 286	key.offset = (u64)-1;
 287
 288	index = srcu_read_lock(&fs_info->subvol_srcu);
 289
 290	inode_root = btrfs_read_fs_root_no_name(fs_info, &key);
 291	if (IS_ERR(inode_root)) {
 292		ret = PTR_ERR(inode_root);
 293		goto cleanup;
 294	}
 295
 296	key.objectid = defrag->ino;
 297	key.type = BTRFS_INODE_ITEM_KEY;
 298	key.offset = 0;
 299	inode = btrfs_iget(fs_info->sb, &key, inode_root, NULL);
 300	if (IS_ERR(inode)) {
 301		ret = PTR_ERR(inode);
 302		goto cleanup;
 303	}
 304	srcu_read_unlock(&fs_info->subvol_srcu, index);
 305
 306	/* do a chunk of defrag */
 307	clear_bit(BTRFS_INODE_IN_DEFRAG, &BTRFS_I(inode)->runtime_flags);
 308	memset(&range, 0, sizeof(range));
 309	range.len = (u64)-1;
 310	range.start = defrag->last_offset;
 311
 312	sb_start_write(fs_info->sb);
 313	num_defrag = btrfs_defrag_file(inode, NULL, &range, defrag->transid,
 314				       BTRFS_DEFRAG_BATCH);
 315	sb_end_write(fs_info->sb);
 316	/*
 317	 * if we filled the whole defrag batch, there
 318	 * must be more work to do.  Queue this defrag
 319	 * again
 320	 */
 321	if (num_defrag == BTRFS_DEFRAG_BATCH) {
 322		defrag->last_offset = range.start;
 323		btrfs_requeue_inode_defrag(BTRFS_I(inode), defrag);
 324	} else if (defrag->last_offset && !defrag->cycled) {
 325		/*
 326		 * we didn't fill our defrag batch, but
 327		 * we didn't start at zero.  Make sure we loop
 328		 * around to the start of the file.
 329		 */
 330		defrag->last_offset = 0;
 331		defrag->cycled = 1;
 332		btrfs_requeue_inode_defrag(BTRFS_I(inode), defrag);
 333	} else {
 334		kmem_cache_free(btrfs_inode_defrag_cachep, defrag);
 335	}
 336
 337	iput(inode);
 338	return 0;
 339cleanup:
 340	srcu_read_unlock(&fs_info->subvol_srcu, index);
 341	kmem_cache_free(btrfs_inode_defrag_cachep, defrag);
 342	return ret;
 343}
 344
 345/*
 346 * run through the list of inodes in the FS that need
 347 * defragging
 348 */
 349int btrfs_run_defrag_inodes(struct btrfs_fs_info *fs_info)
 350{
 351	struct inode_defrag *defrag;
 352	u64 first_ino = 0;
 353	u64 root_objectid = 0;
 354
 355	atomic_inc(&fs_info->defrag_running);
 356	while (1) {
 357		/* Pause the auto defragger. */
 358		if (test_bit(BTRFS_FS_STATE_REMOUNTING,
 359			     &fs_info->fs_state))
 360			break;
 361
 362		if (!__need_auto_defrag(fs_info))
 363			break;
 364
 365		/* find an inode to defrag */
 366		defrag = btrfs_pick_defrag_inode(fs_info, root_objectid,
 367						 first_ino);
 368		if (!defrag) {
 369			if (root_objectid || first_ino) {
 370				root_objectid = 0;
 371				first_ino = 0;
 372				continue;
 373			} else {
 374				break;
 375			}
 376		}
 377
 378		first_ino = defrag->ino + 1;
 379		root_objectid = defrag->root;
 380
 381		__btrfs_run_defrag_inode(fs_info, defrag);
 382	}
 383	atomic_dec(&fs_info->defrag_running);
 384
 385	/*
 386	 * during unmount, we use the transaction_wait queue to
 387	 * wait for the defragger to stop
 388	 */
 389	wake_up(&fs_info->transaction_wait);
 390	return 0;
 391}
 392
 393/* simple helper to fault in pages and copy.  This should go away
 394 * and be replaced with calls into generic code.
 395 */
 396static noinline int btrfs_copy_from_user(loff_t pos, size_t write_bytes,
 397					 struct page **prepared_pages,
 398					 struct iov_iter *i)
 399{
 400	size_t copied = 0;
 401	size_t total_copied = 0;
 402	int pg = 0;
 403	int offset = offset_in_page(pos);
 404
 405	while (write_bytes > 0) {
 406		size_t count = min_t(size_t,
 407				     PAGE_SIZE - offset, write_bytes);
 408		struct page *page = prepared_pages[pg];
 409		/*
 410		 * Copy data from userspace to the current page
 411		 */
 412		copied = iov_iter_copy_from_user_atomic(page, i, offset, count);
 413
 414		/* Flush processor's dcache for this page */
 415		flush_dcache_page(page);
 416
 417		/*
 418		 * if we get a partial write, we can end up with
 419		 * partially up to date pages.  These add
 420		 * a lot of complexity, so make sure they don't
 421		 * happen by forcing this copy to be retried.
 422		 *
 423		 * The rest of the btrfs_file_write code will fall
 424		 * back to page at a time copies after we return 0.
 425		 */
 426		if (!PageUptodate(page) && copied < count)
 427			copied = 0;
 428
 429		iov_iter_advance(i, copied);
 430		write_bytes -= copied;
 431		total_copied += copied;
 432
 433		/* Return to btrfs_file_write_iter to fault page */
 434		if (unlikely(copied == 0))
 435			break;
 436
 437		if (copied < PAGE_SIZE - offset) {
 438			offset += copied;
 439		} else {
 440			pg++;
 441			offset = 0;
 442		}
 443	}
 444	return total_copied;
 445}
 446
 447/*
 448 * unlocks pages after btrfs_file_write is done with them
 449 */
 450static void btrfs_drop_pages(struct page **pages, size_t num_pages)
 451{
 452	size_t i;
 453	for (i = 0; i < num_pages; i++) {
 454		/* page checked is some magic around finding pages that
 455		 * have been modified without going through btrfs_set_page_dirty
 456		 * clear it here. There should be no need to mark the pages
 457		 * accessed as prepare_pages should have marked them accessed
 458		 * in prepare_pages via find_or_create_page()
 459		 */
 460		ClearPageChecked(pages[i]);
 461		unlock_page(pages[i]);
 462		put_page(pages[i]);
 463	}
 464}
 465
 466static int btrfs_find_new_delalloc_bytes(struct btrfs_inode *inode,
 467					 const u64 start,
 468					 const u64 len,
 469					 struct extent_state **cached_state)
 470{
 471	u64 search_start = start;
 472	const u64 end = start + len - 1;
 473
 474	while (search_start < end) {
 475		const u64 search_len = end - search_start + 1;
 476		struct extent_map *em;
 477		u64 em_len;
 478		int ret = 0;
 479
 480		em = btrfs_get_extent(inode, NULL, 0, search_start,
 481				      search_len, 0);
 482		if (IS_ERR(em))
 483			return PTR_ERR(em);
 484
 485		if (em->block_start != EXTENT_MAP_HOLE)
 486			goto next;
 487
 488		em_len = em->len;
 489		if (em->start < search_start)
 490			em_len -= search_start - em->start;
 491		if (em_len > search_len)
 492			em_len = search_len;
 493
 494		ret = set_extent_bit(&inode->io_tree, search_start,
 495				     search_start + em_len - 1,
 496				     EXTENT_DELALLOC_NEW,
 497				     NULL, cached_state, GFP_NOFS);
 498next:
 499		search_start = extent_map_end(em);
 500		free_extent_map(em);
 501		if (ret)
 502			return ret;
 503	}
 504	return 0;
 505}
 506
 507/*
 508 * after copy_from_user, pages need to be dirtied and we need to make
 509 * sure holes are created between the current EOF and the start of
 510 * any next extents (if required).
 511 *
 512 * this also makes the decision about creating an inline extent vs
 513 * doing real data extents, marking pages dirty and delalloc as required.
 514 */
 515int btrfs_dirty_pages(struct inode *inode, struct page **pages,
 516		      size_t num_pages, loff_t pos, size_t write_bytes,
 517		      struct extent_state **cached)
 518{
 519	struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
 520	int err = 0;
 521	int i;
 522	u64 num_bytes;
 523	u64 start_pos;
 524	u64 end_of_last_block;
 525	u64 end_pos = pos + write_bytes;
 526	loff_t isize = i_size_read(inode);
 527	unsigned int extra_bits = 0;
 528
 529	start_pos = pos & ~((u64) fs_info->sectorsize - 1);
 530	num_bytes = round_up(write_bytes + pos - start_pos,
 531			     fs_info->sectorsize);
 532
 533	end_of_last_block = start_pos + num_bytes - 1;
 534
 535	/*
 536	 * The pages may have already been dirty, clear out old accounting so
 537	 * we can set things up properly
 538	 */
 539	clear_extent_bit(&BTRFS_I(inode)->io_tree, start_pos, end_of_last_block,
 540			 EXTENT_DELALLOC | EXTENT_DO_ACCOUNTING | EXTENT_DEFRAG,
 541			 0, 0, cached);
 542
 543	if (!btrfs_is_free_space_inode(BTRFS_I(inode))) {
 544		if (start_pos >= isize &&
 545		    !(BTRFS_I(inode)->flags & BTRFS_INODE_PREALLOC)) {
 546			/*
 547			 * There can't be any extents following eof in this case
 548			 * so just set the delalloc new bit for the range
 549			 * directly.
 550			 */
 551			extra_bits |= EXTENT_DELALLOC_NEW;
 552		} else {
 553			err = btrfs_find_new_delalloc_bytes(BTRFS_I(inode),
 554							    start_pos,
 555							    num_bytes, cached);
 556			if (err)
 557				return err;
 558		}
 559	}
 560
 561	err = btrfs_set_extent_delalloc(inode, start_pos, end_of_last_block,
 562					extra_bits, cached);
 563	if (err)
 564		return err;
 565
 566	for (i = 0; i < num_pages; i++) {
 567		struct page *p = pages[i];
 568		SetPageUptodate(p);
 569		ClearPageChecked(p);
 570		set_page_dirty(p);
 571	}
 572
 573	/*
 574	 * we've only changed i_size in ram, and we haven't updated
 575	 * the disk i_size.  There is no need to log the inode
 576	 * at this time.
 577	 */
 578	if (end_pos > isize)
 579		i_size_write(inode, end_pos);
 580	return 0;
 581}
 582
 583/*
 584 * this drops all the extents in the cache that intersect the range
 585 * [start, end].  Existing extents are split as required.
 586 */
 587void btrfs_drop_extent_cache(struct btrfs_inode *inode, u64 start, u64 end,
 588			     int skip_pinned)
 589{
 590	struct extent_map *em;
 591	struct extent_map *split = NULL;
 592	struct extent_map *split2 = NULL;
 593	struct extent_map_tree *em_tree = &inode->extent_tree;
 594	u64 len = end - start + 1;
 595	u64 gen;
 596	int ret;
 597	int testend = 1;
 598	unsigned long flags;
 599	int compressed = 0;
 600	bool modified;
 601
 602	WARN_ON(end < start);
 603	if (end == (u64)-1) {
 604		len = (u64)-1;
 605		testend = 0;
 606	}
 607	while (1) {
 608		int no_splits = 0;
 609
 610		modified = false;
 611		if (!split)
 612			split = alloc_extent_map();
 613		if (!split2)
 614			split2 = alloc_extent_map();
 615		if (!split || !split2)
 616			no_splits = 1;
 617
 618		write_lock(&em_tree->lock);
 619		em = lookup_extent_mapping(em_tree, start, len);
 620		if (!em) {
 621			write_unlock(&em_tree->lock);
 622			break;
 623		}
 624		flags = em->flags;
 625		gen = em->generation;
 626		if (skip_pinned && test_bit(EXTENT_FLAG_PINNED, &em->flags)) {
 627			if (testend && em->start + em->len >= start + len) {
 628				free_extent_map(em);
 629				write_unlock(&em_tree->lock);
 630				break;
 631			}
 632			start = em->start + em->len;
 633			if (testend)
 634				len = start + len - (em->start + em->len);
 635			free_extent_map(em);
 636			write_unlock(&em_tree->lock);
 637			continue;
 638		}
 639		compressed = test_bit(EXTENT_FLAG_COMPRESSED, &em->flags);
 640		clear_bit(EXTENT_FLAG_PINNED, &em->flags);
 641		clear_bit(EXTENT_FLAG_LOGGING, &flags);
 642		modified = !list_empty(&em->list);
 643		if (no_splits)
 644			goto next;
 645
 646		if (em->start < start) {
 647			split->start = em->start;
 648			split->len = start - em->start;
 649
 650			if (em->block_start < EXTENT_MAP_LAST_BYTE) {
 651				split->orig_start = em->orig_start;
 652				split->block_start = em->block_start;
 653
 654				if (compressed)
 655					split->block_len = em->block_len;
 656				else
 657					split->block_len = split->len;
 658				split->orig_block_len = max(split->block_len,
 659						em->orig_block_len);
 660				split->ram_bytes = em->ram_bytes;
 661			} else {
 662				split->orig_start = split->start;
 663				split->block_len = 0;
 664				split->block_start = em->block_start;
 665				split->orig_block_len = 0;
 666				split->ram_bytes = split->len;
 667			}
 668
 669			split->generation = gen;
 670			split->bdev = em->bdev;
 671			split->flags = flags;
 672			split->compress_type = em->compress_type;
 673			replace_extent_mapping(em_tree, em, split, modified);
 674			free_extent_map(split);
 675			split = split2;
 676			split2 = NULL;
 677		}
 678		if (testend && em->start + em->len > start + len) {
 679			u64 diff = start + len - em->start;
 680
 681			split->start = start + len;
 682			split->len = em->start + em->len - (start + len);
 683			split->bdev = em->bdev;
 684			split->flags = flags;
 685			split->compress_type = em->compress_type;
 686			split->generation = gen;
 687
 688			if (em->block_start < EXTENT_MAP_LAST_BYTE) {
 689				split->orig_block_len = max(em->block_len,
 690						    em->orig_block_len);
 691
 692				split->ram_bytes = em->ram_bytes;
 693				if (compressed) {
 694					split->block_len = em->block_len;
 695					split->block_start = em->block_start;
 696					split->orig_start = em->orig_start;
 697				} else {
 698					split->block_len = split->len;
 699					split->block_start = em->block_start
 700						+ diff;
 701					split->orig_start = em->orig_start;
 702				}
 703			} else {
 704				split->ram_bytes = split->len;
 705				split->orig_start = split->start;
 706				split->block_len = 0;
 707				split->block_start = em->block_start;
 708				split->orig_block_len = 0;
 709			}
 710
 711			if (extent_map_in_tree(em)) {
 712				replace_extent_mapping(em_tree, em, split,
 713						       modified);
 714			} else {
 715				ret = add_extent_mapping(em_tree, split,
 716							 modified);
 717				ASSERT(ret == 0); /* Logic error */
 718			}
 719			free_extent_map(split);
 720			split = NULL;
 721		}
 722next:
 723		if (extent_map_in_tree(em))
 724			remove_extent_mapping(em_tree, em);
 725		write_unlock(&em_tree->lock);
 726
 727		/* once for us */
 728		free_extent_map(em);
 729		/* once for the tree*/
 730		free_extent_map(em);
 731	}
 732	if (split)
 733		free_extent_map(split);
 734	if (split2)
 735		free_extent_map(split2);
 736}
 737
 738/*
 739 * this is very complex, but the basic idea is to drop all extents
 740 * in the range start - end.  hint_block is filled in with a block number
 741 * that would be a good hint to the block allocator for this file.
 742 *
 743 * If an extent intersects the range but is not entirely inside the range
 744 * it is either truncated or split.  Anything entirely inside the range
 745 * is deleted from the tree.
 746 */
 747int __btrfs_drop_extents(struct btrfs_trans_handle *trans,
 748			 struct btrfs_root *root, struct inode *inode,
 749			 struct btrfs_path *path, u64 start, u64 end,
 750			 u64 *drop_end, int drop_cache,
 751			 int replace_extent,
 752			 u32 extent_item_size,
 753			 int *key_inserted)
 754{
 755	struct btrfs_fs_info *fs_info = root->fs_info;
 756	struct extent_buffer *leaf;
 757	struct btrfs_file_extent_item *fi;
 758	struct btrfs_ref ref = { 0 };
 759	struct btrfs_key key;
 760	struct btrfs_key new_key;
 761	u64 ino = btrfs_ino(BTRFS_I(inode));
 
 762	u64 search_start = start;
 763	u64 disk_bytenr = 0;
 764	u64 num_bytes = 0;
 765	u64 extent_offset = 0;
 766	u64 extent_end = 0;
 767	u64 last_end = start;
 768	int del_nr = 0;
 769	int del_slot = 0;
 770	int extent_type;
 771	int recow;
 772	int ret;
 773	int modify_tree = -1;
 774	int update_refs;
 775	int found = 0;
 776	int leafs_visited = 0;
 777
 778	if (drop_cache)
 779		btrfs_drop_extent_cache(BTRFS_I(inode), start, end - 1, 0);
 780
 781	if (start >= BTRFS_I(inode)->disk_i_size && !replace_extent)
 782		modify_tree = 0;
 783
 784	update_refs = (test_bit(BTRFS_ROOT_REF_COWS, &root->state) ||
 785		       root == fs_info->tree_root);
 786	while (1) {
 787		recow = 0;
 788		ret = btrfs_lookup_file_extent(trans, root, path, ino,
 789					       search_start, modify_tree);
 790		if (ret < 0)
 791			break;
 792		if (ret > 0 && path->slots[0] > 0 && search_start == start) {
 793			leaf = path->nodes[0];
 794			btrfs_item_key_to_cpu(leaf, &key, path->slots[0] - 1);
 795			if (key.objectid == ino &&
 796			    key.type == BTRFS_EXTENT_DATA_KEY)
 797				path->slots[0]--;
 798		}
 799		ret = 0;
 800		leafs_visited++;
 801next_slot:
 802		leaf = path->nodes[0];
 803		if (path->slots[0] >= btrfs_header_nritems(leaf)) {
 804			BUG_ON(del_nr > 0);
 805			ret = btrfs_next_leaf(root, path);
 806			if (ret < 0)
 807				break;
 808			if (ret > 0) {
 809				ret = 0;
 810				break;
 811			}
 812			leafs_visited++;
 813			leaf = path->nodes[0];
 814			recow = 1;
 815		}
 816
 817		btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
 818
 819		if (key.objectid > ino)
 820			break;
 821		if (WARN_ON_ONCE(key.objectid < ino) ||
 822		    key.type < BTRFS_EXTENT_DATA_KEY) {
 823			ASSERT(del_nr == 0);
 824			path->slots[0]++;
 825			goto next_slot;
 826		}
 827		if (key.type > BTRFS_EXTENT_DATA_KEY || key.offset >= end)
 828			break;
 829
 830		fi = btrfs_item_ptr(leaf, path->slots[0],
 831				    struct btrfs_file_extent_item);
 832		extent_type = btrfs_file_extent_type(leaf, fi);
 833
 834		if (extent_type == BTRFS_FILE_EXTENT_REG ||
 835		    extent_type == BTRFS_FILE_EXTENT_PREALLOC) {
 836			disk_bytenr = btrfs_file_extent_disk_bytenr(leaf, fi);
 837			num_bytes = btrfs_file_extent_disk_num_bytes(leaf, fi);
 838			extent_offset = btrfs_file_extent_offset(leaf, fi);
 839			extent_end = key.offset +
 840				btrfs_file_extent_num_bytes(leaf, fi);
 841		} else if (extent_type == BTRFS_FILE_EXTENT_INLINE) {
 842			extent_end = key.offset +
 843				btrfs_file_extent_ram_bytes(leaf, fi);
 844		} else {
 845			/* can't happen */
 846			BUG();
 847		}
 848
 849		/*
 850		 * Don't skip extent items representing 0 byte lengths. They
 851		 * used to be created (bug) if while punching holes we hit
 852		 * -ENOSPC condition. So if we find one here, just ensure we
 853		 * delete it, otherwise we would insert a new file extent item
 854		 * with the same key (offset) as that 0 bytes length file
 855		 * extent item in the call to setup_items_for_insert() later
 856		 * in this function.
 857		 */
 858		if (extent_end == key.offset && extent_end >= search_start) {
 859			last_end = extent_end;
 860			goto delete_extent_item;
 861		}
 862
 863		if (extent_end <= search_start) {
 864			path->slots[0]++;
 865			goto next_slot;
 866		}
 867
 868		found = 1;
 869		search_start = max(key.offset, start);
 870		if (recow || !modify_tree) {
 871			modify_tree = -1;
 872			btrfs_release_path(path);
 873			continue;
 874		}
 875
 876		/*
 877		 *     | - range to drop - |
 878		 *  | -------- extent -------- |
 879		 */
 880		if (start > key.offset && end < extent_end) {
 881			BUG_ON(del_nr > 0);
 882			if (extent_type == BTRFS_FILE_EXTENT_INLINE) {
 883				ret = -EOPNOTSUPP;
 884				break;
 885			}
 886
 887			memcpy(&new_key, &key, sizeof(new_key));
 888			new_key.offset = start;
 889			ret = btrfs_duplicate_item(trans, root, path,
 890						   &new_key);
 891			if (ret == -EAGAIN) {
 892				btrfs_release_path(path);
 893				continue;
 894			}
 895			if (ret < 0)
 896				break;
 897
 898			leaf = path->nodes[0];
 899			fi = btrfs_item_ptr(leaf, path->slots[0] - 1,
 900					    struct btrfs_file_extent_item);
 901			btrfs_set_file_extent_num_bytes(leaf, fi,
 902							start - key.offset);
 903
 904			fi = btrfs_item_ptr(leaf, path->slots[0],
 905					    struct btrfs_file_extent_item);
 906
 907			extent_offset += start - key.offset;
 908			btrfs_set_file_extent_offset(leaf, fi, extent_offset);
 909			btrfs_set_file_extent_num_bytes(leaf, fi,
 910							extent_end - start);
 911			btrfs_mark_buffer_dirty(leaf);
 912
 913			if (update_refs && disk_bytenr > 0) {
 914				btrfs_init_generic_ref(&ref,
 915						BTRFS_ADD_DELAYED_REF,
 916						disk_bytenr, num_bytes, 0);
 917				btrfs_init_data_ref(&ref,
 918						root->root_key.objectid,
 919						new_key.objectid,
 920						start - extent_offset);
 921				ret = btrfs_inc_extent_ref(trans, &ref);
 922				BUG_ON(ret); /* -ENOMEM */
 923			}
 924			key.offset = start;
 925		}
 926		/*
 927		 * From here on out we will have actually dropped something, so
 928		 * last_end can be updated.
 929		 */
 930		last_end = extent_end;
 931
 932		/*
 933		 *  | ---- range to drop ----- |
 934		 *      | -------- extent -------- |
 935		 */
 936		if (start <= key.offset && end < extent_end) {
 937			if (extent_type == BTRFS_FILE_EXTENT_INLINE) {
 938				ret = -EOPNOTSUPP;
 939				break;
 940			}
 941
 942			memcpy(&new_key, &key, sizeof(new_key));
 943			new_key.offset = end;
 944			btrfs_set_item_key_safe(fs_info, path, &new_key);
 945
 946			extent_offset += end - key.offset;
 947			btrfs_set_file_extent_offset(leaf, fi, extent_offset);
 948			btrfs_set_file_extent_num_bytes(leaf, fi,
 949							extent_end - end);
 950			btrfs_mark_buffer_dirty(leaf);
 951			if (update_refs && disk_bytenr > 0)
 952				inode_sub_bytes(inode, end - key.offset);
 953			break;
 954		}
 955
 956		search_start = extent_end;
 957		/*
 958		 *       | ---- range to drop ----- |
 959		 *  | -------- extent -------- |
 960		 */
 961		if (start > key.offset && end >= extent_end) {
 962			BUG_ON(del_nr > 0);
 963			if (extent_type == BTRFS_FILE_EXTENT_INLINE) {
 964				ret = -EOPNOTSUPP;
 965				break;
 966			}
 967
 968			btrfs_set_file_extent_num_bytes(leaf, fi,
 969							start - key.offset);
 970			btrfs_mark_buffer_dirty(leaf);
 971			if (update_refs && disk_bytenr > 0)
 972				inode_sub_bytes(inode, extent_end - start);
 973			if (end == extent_end)
 974				break;
 975
 976			path->slots[0]++;
 977			goto next_slot;
 978		}
 979
 980		/*
 981		 *  | ---- range to drop ----- |
 982		 *    | ------ extent ------ |
 983		 */
 984		if (start <= key.offset && end >= extent_end) {
 985delete_extent_item:
 986			if (del_nr == 0) {
 987				del_slot = path->slots[0];
 988				del_nr = 1;
 989			} else {
 990				BUG_ON(del_slot + del_nr != path->slots[0]);
 991				del_nr++;
 992			}
 993
 994			if (update_refs &&
 995			    extent_type == BTRFS_FILE_EXTENT_INLINE) {
 996				inode_sub_bytes(inode,
 997						extent_end - key.offset);
 998				extent_end = ALIGN(extent_end,
 999						   fs_info->sectorsize);
1000			} else if (update_refs && disk_bytenr > 0) {
1001				btrfs_init_generic_ref(&ref,
1002						BTRFS_DROP_DELAYED_REF,
1003						disk_bytenr, num_bytes, 0);
1004				btrfs_init_data_ref(&ref,
1005						root->root_key.objectid,
1006						key.objectid,
1007						key.offset - extent_offset);
1008				ret = btrfs_free_extent(trans, &ref);
1009				BUG_ON(ret); /* -ENOMEM */
1010				inode_sub_bytes(inode,
1011						extent_end - key.offset);
1012			}
1013
1014			if (end == extent_end)
1015				break;
1016
1017			if (path->slots[0] + 1 < btrfs_header_nritems(leaf)) {
1018				path->slots[0]++;
1019				goto next_slot;
1020			}
1021
1022			ret = btrfs_del_items(trans, root, path, del_slot,
1023					      del_nr);
1024			if (ret) {
1025				btrfs_abort_transaction(trans, ret);
1026				break;
1027			}
1028
1029			del_nr = 0;
1030			del_slot = 0;
1031
1032			btrfs_release_path(path);
1033			continue;
1034		}
1035
1036		BUG();
1037	}
1038
1039	if (!ret && del_nr > 0) {
1040		/*
1041		 * Set path->slots[0] to first slot, so that after the delete
1042		 * if items are move off from our leaf to its immediate left or
1043		 * right neighbor leafs, we end up with a correct and adjusted
1044		 * path->slots[0] for our insertion (if replace_extent != 0).
1045		 */
1046		path->slots[0] = del_slot;
1047		ret = btrfs_del_items(trans, root, path, del_slot, del_nr);
1048		if (ret)
1049			btrfs_abort_transaction(trans, ret);
1050	}
1051
1052	leaf = path->nodes[0];
1053	/*
1054	 * If btrfs_del_items() was called, it might have deleted a leaf, in
1055	 * which case it unlocked our path, so check path->locks[0] matches a
1056	 * write lock.
1057	 */
1058	if (!ret && replace_extent && leafs_visited == 1 &&
1059	    (path->locks[0] == BTRFS_WRITE_LOCK_BLOCKING ||
1060	     path->locks[0] == BTRFS_WRITE_LOCK) &&
1061	    btrfs_leaf_free_space(leaf) >=
1062	    sizeof(struct btrfs_item) + extent_item_size) {
1063
1064		key.objectid = ino;
1065		key.type = BTRFS_EXTENT_DATA_KEY;
1066		key.offset = start;
1067		if (!del_nr && path->slots[0] < btrfs_header_nritems(leaf)) {
1068			struct btrfs_key slot_key;
1069
1070			btrfs_item_key_to_cpu(leaf, &slot_key, path->slots[0]);
1071			if (btrfs_comp_cpu_keys(&key, &slot_key) > 0)
1072				path->slots[0]++;
1073		}
1074		setup_items_for_insert(root, path, &key,
1075				       &extent_item_size,
1076				       extent_item_size,
1077				       sizeof(struct btrfs_item) +
1078				       extent_item_size, 1);
1079		*key_inserted = 1;
1080	}
1081
1082	if (!replace_extent || !(*key_inserted))
1083		btrfs_release_path(path);
1084	if (drop_end)
1085		*drop_end = found ? min(end, last_end) : end;
1086	return ret;
1087}
1088
1089int btrfs_drop_extents(struct btrfs_trans_handle *trans,
1090		       struct btrfs_root *root, struct inode *inode, u64 start,
1091		       u64 end, int drop_cache)
1092{
1093	struct btrfs_path *path;
1094	int ret;
1095
1096	path = btrfs_alloc_path();
1097	if (!path)
1098		return -ENOMEM;
1099	ret = __btrfs_drop_extents(trans, root, inode, path, start, end, NULL,
1100				   drop_cache, 0, 0, NULL);
1101	btrfs_free_path(path);
1102	return ret;
1103}
1104
1105static int extent_mergeable(struct extent_buffer *leaf, int slot,
1106			    u64 objectid, u64 bytenr, u64 orig_offset,
1107			    u64 *start, u64 *end)
1108{
1109	struct btrfs_file_extent_item *fi;
1110	struct btrfs_key key;
1111	u64 extent_end;
1112
1113	if (slot < 0 || slot >= btrfs_header_nritems(leaf))
1114		return 0;
1115
1116	btrfs_item_key_to_cpu(leaf, &key, slot);
1117	if (key.objectid != objectid || key.type != BTRFS_EXTENT_DATA_KEY)
1118		return 0;
1119
1120	fi = btrfs_item_ptr(leaf, slot, struct btrfs_file_extent_item);
1121	if (btrfs_file_extent_type(leaf, fi) != BTRFS_FILE_EXTENT_REG ||
1122	    btrfs_file_extent_disk_bytenr(leaf, fi) != bytenr ||
1123	    btrfs_file_extent_offset(leaf, fi) != key.offset - orig_offset ||
1124	    btrfs_file_extent_compression(leaf, fi) ||
1125	    btrfs_file_extent_encryption(leaf, fi) ||
1126	    btrfs_file_extent_other_encoding(leaf, fi))
1127		return 0;
1128
1129	extent_end = key.offset + btrfs_file_extent_num_bytes(leaf, fi);
1130	if ((*start && *start != key.offset) || (*end && *end != extent_end))
1131		return 0;
1132
1133	*start = key.offset;
1134	*end = extent_end;
1135	return 1;
1136}
1137
1138/*
1139 * Mark extent in the range start - end as written.
1140 *
1141 * This changes extent type from 'pre-allocated' to 'regular'. If only
1142 * part of extent is marked as written, the extent will be split into
1143 * two or three.
1144 */
1145int btrfs_mark_extent_written(struct btrfs_trans_handle *trans,
1146			      struct btrfs_inode *inode, u64 start, u64 end)
1147{
1148	struct btrfs_fs_info *fs_info = trans->fs_info;
1149	struct btrfs_root *root = inode->root;
1150	struct extent_buffer *leaf;
1151	struct btrfs_path *path;
1152	struct btrfs_file_extent_item *fi;
1153	struct btrfs_ref ref = { 0 };
1154	struct btrfs_key key;
1155	struct btrfs_key new_key;
1156	u64 bytenr;
1157	u64 num_bytes;
1158	u64 extent_end;
1159	u64 orig_offset;
1160	u64 other_start;
1161	u64 other_end;
1162	u64 split;
1163	int del_nr = 0;
1164	int del_slot = 0;
1165	int recow;
1166	int ret;
1167	u64 ino = btrfs_ino(inode);
1168
1169	path = btrfs_alloc_path();
1170	if (!path)
1171		return -ENOMEM;
1172again:
1173	recow = 0;
1174	split = start;
1175	key.objectid = ino;
1176	key.type = BTRFS_EXTENT_DATA_KEY;
1177	key.offset = split;
1178
1179	ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
1180	if (ret < 0)
1181		goto out;
1182	if (ret > 0 && path->slots[0] > 0)
1183		path->slots[0]--;
1184
1185	leaf = path->nodes[0];
1186	btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
1187	if (key.objectid != ino ||
1188	    key.type != BTRFS_EXTENT_DATA_KEY) {
1189		ret = -EINVAL;
1190		btrfs_abort_transaction(trans, ret);
1191		goto out;
1192	}
1193	fi = btrfs_item_ptr(leaf, path->slots[0],
1194			    struct btrfs_file_extent_item);
1195	if (btrfs_file_extent_type(leaf, fi) != BTRFS_FILE_EXTENT_PREALLOC) {
1196		ret = -EINVAL;
1197		btrfs_abort_transaction(trans, ret);
1198		goto out;
1199	}
1200	extent_end = key.offset + btrfs_file_extent_num_bytes(leaf, fi);
1201	if (key.offset > start || extent_end < end) {
1202		ret = -EINVAL;
1203		btrfs_abort_transaction(trans, ret);
1204		goto out;
1205	}
1206
1207	bytenr = btrfs_file_extent_disk_bytenr(leaf, fi);
1208	num_bytes = btrfs_file_extent_disk_num_bytes(leaf, fi);
1209	orig_offset = key.offset - btrfs_file_extent_offset(leaf, fi);
1210	memcpy(&new_key, &key, sizeof(new_key));
1211
1212	if (start == key.offset && end < extent_end) {
1213		other_start = 0;
1214		other_end = start;
1215		if (extent_mergeable(leaf, path->slots[0] - 1,
1216				     ino, bytenr, orig_offset,
1217				     &other_start, &other_end)) {
1218			new_key.offset = end;
1219			btrfs_set_item_key_safe(fs_info, path, &new_key);
1220			fi = btrfs_item_ptr(leaf, path->slots[0],
1221					    struct btrfs_file_extent_item);
1222			btrfs_set_file_extent_generation(leaf, fi,
1223							 trans->transid);
1224			btrfs_set_file_extent_num_bytes(leaf, fi,
1225							extent_end - end);
1226			btrfs_set_file_extent_offset(leaf, fi,
1227						     end - orig_offset);
1228			fi = btrfs_item_ptr(leaf, path->slots[0] - 1,
1229					    struct btrfs_file_extent_item);
1230			btrfs_set_file_extent_generation(leaf, fi,
1231							 trans->transid);
1232			btrfs_set_file_extent_num_bytes(leaf, fi,
1233							end - other_start);
1234			btrfs_mark_buffer_dirty(leaf);
1235			goto out;
1236		}
1237	}
1238
1239	if (start > key.offset && end == extent_end) {
1240		other_start = end;
1241		other_end = 0;
1242		if (extent_mergeable(leaf, path->slots[0] + 1,
1243				     ino, bytenr, orig_offset,
1244				     &other_start, &other_end)) {
1245			fi = btrfs_item_ptr(leaf, path->slots[0],
1246					    struct btrfs_file_extent_item);
1247			btrfs_set_file_extent_num_bytes(leaf, fi,
1248							start - key.offset);
1249			btrfs_set_file_extent_generation(leaf, fi,
1250							 trans->transid);
1251			path->slots[0]++;
1252			new_key.offset = start;
1253			btrfs_set_item_key_safe(fs_info, path, &new_key);
1254
1255			fi = btrfs_item_ptr(leaf, path->slots[0],
1256					    struct btrfs_file_extent_item);
1257			btrfs_set_file_extent_generation(leaf, fi,
1258							 trans->transid);
1259			btrfs_set_file_extent_num_bytes(leaf, fi,
1260							other_end - start);
1261			btrfs_set_file_extent_offset(leaf, fi,
1262						     start - orig_offset);
1263			btrfs_mark_buffer_dirty(leaf);
1264			goto out;
1265		}
1266	}
1267
1268	while (start > key.offset || end < extent_end) {
1269		if (key.offset == start)
1270			split = end;
1271
1272		new_key.offset = split;
1273		ret = btrfs_duplicate_item(trans, root, path, &new_key);
1274		if (ret == -EAGAIN) {
1275			btrfs_release_path(path);
1276			goto again;
1277		}
1278		if (ret < 0) {
1279			btrfs_abort_transaction(trans, ret);
1280			goto out;
1281		}
1282
1283		leaf = path->nodes[0];
1284		fi = btrfs_item_ptr(leaf, path->slots[0] - 1,
1285				    struct btrfs_file_extent_item);
1286		btrfs_set_file_extent_generation(leaf, fi, trans->transid);
1287		btrfs_set_file_extent_num_bytes(leaf, fi,
1288						split - key.offset);
1289
1290		fi = btrfs_item_ptr(leaf, path->slots[0],
1291				    struct btrfs_file_extent_item);
1292
1293		btrfs_set_file_extent_generation(leaf, fi, trans->transid);
1294		btrfs_set_file_extent_offset(leaf, fi, split - orig_offset);
1295		btrfs_set_file_extent_num_bytes(leaf, fi,
1296						extent_end - split);
1297		btrfs_mark_buffer_dirty(leaf);
1298
1299		btrfs_init_generic_ref(&ref, BTRFS_ADD_DELAYED_REF, bytenr,
1300				       num_bytes, 0);
1301		btrfs_init_data_ref(&ref, root->root_key.objectid, ino,
1302				    orig_offset);
1303		ret = btrfs_inc_extent_ref(trans, &ref);
1304		if (ret) {
1305			btrfs_abort_transaction(trans, ret);
1306			goto out;
1307		}
1308
1309		if (split == start) {
1310			key.offset = start;
1311		} else {
1312			if (start != key.offset) {
1313				ret = -EINVAL;
1314				btrfs_abort_transaction(trans, ret);
1315				goto out;
1316			}
1317			path->slots[0]--;
1318			extent_end = end;
1319		}
1320		recow = 1;
1321	}
1322
1323	other_start = end;
1324	other_end = 0;
1325	btrfs_init_generic_ref(&ref, BTRFS_DROP_DELAYED_REF, bytenr,
1326			       num_bytes, 0);
1327	btrfs_init_data_ref(&ref, root->root_key.objectid, ino, orig_offset);
1328	if (extent_mergeable(leaf, path->slots[0] + 1,
1329			     ino, bytenr, orig_offset,
1330			     &other_start, &other_end)) {
1331		if (recow) {
1332			btrfs_release_path(path);
1333			goto again;
1334		}
1335		extent_end = other_end;
1336		del_slot = path->slots[0] + 1;
1337		del_nr++;
1338		ret = btrfs_free_extent(trans, &ref);
1339		if (ret) {
1340			btrfs_abort_transaction(trans, ret);
1341			goto out;
1342		}
1343	}
1344	other_start = 0;
1345	other_end = start;
1346	if (extent_mergeable(leaf, path->slots[0] - 1,
1347			     ino, bytenr, orig_offset,
1348			     &other_start, &other_end)) {
1349		if (recow) {
1350			btrfs_release_path(path);
1351			goto again;
1352		}
1353		key.offset = other_start;
1354		del_slot = path->slots[0];
1355		del_nr++;
1356		ret = btrfs_free_extent(trans, &ref);
1357		if (ret) {
1358			btrfs_abort_transaction(trans, ret);
1359			goto out;
1360		}
1361	}
1362	if (del_nr == 0) {
1363		fi = btrfs_item_ptr(leaf, path->slots[0],
1364			   struct btrfs_file_extent_item);
1365		btrfs_set_file_extent_type(leaf, fi,
1366					   BTRFS_FILE_EXTENT_REG);
1367		btrfs_set_file_extent_generation(leaf, fi, trans->transid);
1368		btrfs_mark_buffer_dirty(leaf);
1369	} else {
1370		fi = btrfs_item_ptr(leaf, del_slot - 1,
1371			   struct btrfs_file_extent_item);
1372		btrfs_set_file_extent_type(leaf, fi,
1373					   BTRFS_FILE_EXTENT_REG);
1374		btrfs_set_file_extent_generation(leaf, fi, trans->transid);
1375		btrfs_set_file_extent_num_bytes(leaf, fi,
1376						extent_end - key.offset);
1377		btrfs_mark_buffer_dirty(leaf);
1378
1379		ret = btrfs_del_items(trans, root, path, del_slot, del_nr);
1380		if (ret < 0) {
1381			btrfs_abort_transaction(trans, ret);
1382			goto out;
1383		}
1384	}
1385out:
1386	btrfs_free_path(path);
1387	return 0;
1388}
1389
1390/*
1391 * on error we return an unlocked page and the error value
1392 * on success we return a locked page and 0
1393 */
1394static int prepare_uptodate_page(struct inode *inode,
1395				 struct page *page, u64 pos,
1396				 bool force_uptodate)
1397{
1398	int ret = 0;
1399
1400	if (((pos & (PAGE_SIZE - 1)) || force_uptodate) &&
1401	    !PageUptodate(page)) {
1402		ret = btrfs_readpage(NULL, page);
1403		if (ret)
1404			return ret;
1405		lock_page(page);
1406		if (!PageUptodate(page)) {
1407			unlock_page(page);
1408			return -EIO;
1409		}
1410		if (page->mapping != inode->i_mapping) {
1411			unlock_page(page);
1412			return -EAGAIN;
1413		}
1414	}
1415	return 0;
1416}
1417
1418/*
1419 * this just gets pages into the page cache and locks them down.
1420 */
1421static noinline int prepare_pages(struct inode *inode, struct page **pages,
1422				  size_t num_pages, loff_t pos,
1423				  size_t write_bytes, bool force_uptodate)
1424{
1425	int i;
1426	unsigned long index = pos >> PAGE_SHIFT;
1427	gfp_t mask = btrfs_alloc_write_mask(inode->i_mapping);
1428	int err = 0;
1429	int faili;
1430
1431	for (i = 0; i < num_pages; i++) {
1432again:
1433		pages[i] = find_or_create_page(inode->i_mapping, index + i,
1434					       mask | __GFP_WRITE);
1435		if (!pages[i]) {
1436			faili = i - 1;
1437			err = -ENOMEM;
1438			goto fail;
1439		}
1440
1441		if (i == 0)
1442			err = prepare_uptodate_page(inode, pages[i], pos,
1443						    force_uptodate);
1444		if (!err && i == num_pages - 1)
1445			err = prepare_uptodate_page(inode, pages[i],
1446						    pos + write_bytes, false);
1447		if (err) {
1448			put_page(pages[i]);
1449			if (err == -EAGAIN) {
1450				err = 0;
1451				goto again;
1452			}
1453			faili = i - 1;
1454			goto fail;
1455		}
1456		wait_on_page_writeback(pages[i]);
1457	}
1458
1459	return 0;
1460fail:
1461	while (faili >= 0) {
1462		unlock_page(pages[faili]);
1463		put_page(pages[faili]);
1464		faili--;
1465	}
1466	return err;
1467
1468}
1469
1470/*
1471 * This function locks the extent and properly waits for data=ordered extents
1472 * to finish before allowing the pages to be modified if need.
1473 *
1474 * The return value:
1475 * 1 - the extent is locked
1476 * 0 - the extent is not locked, and everything is OK
1477 * -EAGAIN - need re-prepare the pages
1478 * the other < 0 number - Something wrong happens
1479 */
1480static noinline int
1481lock_and_cleanup_extent_if_need(struct btrfs_inode *inode, struct page **pages,
1482				size_t num_pages, loff_t pos,
1483				size_t write_bytes,
1484				u64 *lockstart, u64 *lockend,
1485				struct extent_state **cached_state)
1486{
1487	struct btrfs_fs_info *fs_info = inode->root->fs_info;
1488	u64 start_pos;
1489	u64 last_pos;
1490	int i;
1491	int ret = 0;
1492
1493	start_pos = round_down(pos, fs_info->sectorsize);
1494	last_pos = start_pos
1495		+ round_up(pos + write_bytes - start_pos,
1496			   fs_info->sectorsize) - 1;
1497
1498	if (start_pos < inode->vfs_inode.i_size) {
1499		struct btrfs_ordered_extent *ordered;
1500
1501		lock_extent_bits(&inode->io_tree, start_pos, last_pos,
1502				cached_state);
1503		ordered = btrfs_lookup_ordered_range(inode, start_pos,
1504						     last_pos - start_pos + 1);
1505		if (ordered &&
1506		    ordered->file_offset + ordered->len > start_pos &&
1507		    ordered->file_offset <= last_pos) {
1508			unlock_extent_cached(&inode->io_tree, start_pos,
1509					last_pos, cached_state);
1510			for (i = 0; i < num_pages; i++) {
1511				unlock_page(pages[i]);
1512				put_page(pages[i]);
1513			}
1514			btrfs_start_ordered_extent(&inode->vfs_inode,
1515					ordered, 1);
1516			btrfs_put_ordered_extent(ordered);
1517			return -EAGAIN;
1518		}
1519		if (ordered)
1520			btrfs_put_ordered_extent(ordered);
1521
1522		*lockstart = start_pos;
1523		*lockend = last_pos;
1524		ret = 1;
1525	}
1526
1527	/*
1528	 * It's possible the pages are dirty right now, but we don't want
1529	 * to clean them yet because copy_from_user may catch a page fault
1530	 * and we might have to fall back to one page at a time.  If that
1531	 * happens, we'll unlock these pages and we'd have a window where
1532	 * reclaim could sneak in and drop the once-dirty page on the floor
1533	 * without writing it.
1534	 *
1535	 * We have the pages locked and the extent range locked, so there's
1536	 * no way someone can start IO on any dirty pages in this range.
1537	 *
1538	 * We'll call btrfs_dirty_pages() later on, and that will flip around
1539	 * delalloc bits and dirty the pages as required.
1540	 */
1541	for (i = 0; i < num_pages; i++) {
1542		set_page_extent_mapped(pages[i]);
1543		WARN_ON(!PageLocked(pages[i]));
1544	}
1545
1546	return ret;
1547}
1548
1549static noinline int check_can_nocow(struct btrfs_inode *inode, loff_t pos,
1550				    size_t *write_bytes)
1551{
1552	struct btrfs_fs_info *fs_info = inode->root->fs_info;
1553	struct btrfs_root *root = inode->root;
1554	u64 lockstart, lockend;
1555	u64 num_bytes;
1556	int ret;
1557
1558	ret = btrfs_start_write_no_snapshotting(root);
1559	if (!ret)
 
 
1560		return -EAGAIN;
1561
1562	lockstart = round_down(pos, fs_info->sectorsize);
1563	lockend = round_up(pos + *write_bytes,
1564			   fs_info->sectorsize) - 1;
 
1565
1566	btrfs_lock_and_flush_ordered_range(&inode->io_tree, inode, lockstart,
1567					   lockend, NULL);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1568
1569	num_bytes = lockend - lockstart + 1;
1570	ret = can_nocow_extent(&inode->vfs_inode, lockstart, &num_bytes,
1571			NULL, NULL, NULL);
1572	if (ret <= 0) {
1573		ret = 0;
1574		btrfs_end_write_no_snapshotting(root);
 
1575	} else {
1576		*write_bytes = min_t(size_t, *write_bytes ,
1577				     num_bytes - pos + lockstart);
1578	}
1579
1580	unlock_extent(&inode->io_tree, lockstart, lockend);
1581
1582	return ret;
1583}
1584
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1585static noinline ssize_t btrfs_buffered_write(struct kiocb *iocb,
1586					       struct iov_iter *i)
1587{
1588	struct file *file = iocb->ki_filp;
1589	loff_t pos = iocb->ki_pos;
1590	struct inode *inode = file_inode(file);
1591	struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
1592	struct btrfs_root *root = BTRFS_I(inode)->root;
1593	struct page **pages = NULL;
1594	struct extent_changeset *data_reserved = NULL;
1595	u64 release_bytes = 0;
1596	u64 lockstart;
1597	u64 lockend;
1598	size_t num_written = 0;
1599	int nrptrs;
1600	int ret = 0;
1601	bool only_release_metadata = false;
1602	bool force_page_uptodate = false;
1603
1604	nrptrs = min(DIV_ROUND_UP(iov_iter_count(i), PAGE_SIZE),
1605			PAGE_SIZE / (sizeof(struct page *)));
1606	nrptrs = min(nrptrs, current->nr_dirtied_pause - current->nr_dirtied);
1607	nrptrs = max(nrptrs, 8);
1608	pages = kmalloc_array(nrptrs, sizeof(struct page *), GFP_KERNEL);
1609	if (!pages)
1610		return -ENOMEM;
1611
1612	while (iov_iter_count(i) > 0) {
1613		struct extent_state *cached_state = NULL;
1614		size_t offset = offset_in_page(pos);
1615		size_t sector_offset;
1616		size_t write_bytes = min(iov_iter_count(i),
1617					 nrptrs * (size_t)PAGE_SIZE -
1618					 offset);
1619		size_t num_pages = DIV_ROUND_UP(write_bytes + offset,
1620						PAGE_SIZE);
1621		size_t reserve_bytes;
1622		size_t dirty_pages;
1623		size_t copied;
1624		size_t dirty_sectors;
1625		size_t num_sectors;
1626		int extents_locked;
1627
1628		WARN_ON(num_pages > nrptrs);
1629
1630		/*
1631		 * Fault pages before locking them in prepare_pages
1632		 * to avoid recursive lock
1633		 */
1634		if (unlikely(iov_iter_fault_in_readable(i, write_bytes))) {
1635			ret = -EFAULT;
1636			break;
1637		}
1638
 
1639		sector_offset = pos & (fs_info->sectorsize - 1);
1640		reserve_bytes = round_up(write_bytes + sector_offset,
1641				fs_info->sectorsize);
1642
1643		extent_changeset_release(data_reserved);
1644		ret = btrfs_check_data_free_space(inode, &data_reserved, pos,
 
1645						  write_bytes);
1646		if (ret < 0) {
1647			if ((BTRFS_I(inode)->flags & (BTRFS_INODE_NODATACOW |
1648						      BTRFS_INODE_PREALLOC)) &&
1649			    check_can_nocow(BTRFS_I(inode), pos,
1650					&write_bytes) > 0) {
1651				/*
1652				 * For nodata cow case, no need to reserve
1653				 * data space.
1654				 */
1655				only_release_metadata = true;
1656				/*
1657				 * our prealloc extent may be smaller than
1658				 * write_bytes, so scale down.
1659				 */
1660				num_pages = DIV_ROUND_UP(write_bytes + offset,
1661							 PAGE_SIZE);
1662				reserve_bytes = round_up(write_bytes +
1663							 sector_offset,
1664							 fs_info->sectorsize);
1665			} else {
1666				break;
1667			}
1668		}
1669
1670		WARN_ON(reserve_bytes == 0);
1671		ret = btrfs_delalloc_reserve_metadata(BTRFS_I(inode),
1672				reserve_bytes);
1673		if (ret) {
1674			if (!only_release_metadata)
1675				btrfs_free_reserved_data_space(inode,
1676						data_reserved, pos,
1677						write_bytes);
1678			else
1679				btrfs_end_write_no_snapshotting(root);
1680			break;
1681		}
1682
1683		release_bytes = reserve_bytes;
1684again:
1685		/*
1686		 * This is going to setup the pages array with the number of
1687		 * pages we want, so we don't really need to worry about the
1688		 * contents of pages from loop to loop
1689		 */
1690		ret = prepare_pages(inode, pages, num_pages,
1691				    pos, write_bytes,
1692				    force_page_uptodate);
1693		if (ret) {
1694			btrfs_delalloc_release_extents(BTRFS_I(inode),
1695						       reserve_bytes);
1696			break;
1697		}
1698
1699		extents_locked = lock_and_cleanup_extent_if_need(
1700				BTRFS_I(inode), pages,
1701				num_pages, pos, write_bytes, &lockstart,
1702				&lockend, &cached_state);
1703		if (extents_locked < 0) {
1704			if (extents_locked == -EAGAIN)
1705				goto again;
1706			btrfs_delalloc_release_extents(BTRFS_I(inode),
1707						       reserve_bytes);
1708			ret = extents_locked;
1709			break;
1710		}
1711
1712		copied = btrfs_copy_from_user(pos, write_bytes, pages, i);
1713
1714		num_sectors = BTRFS_BYTES_TO_BLKS(fs_info, reserve_bytes);
1715		dirty_sectors = round_up(copied + sector_offset,
1716					fs_info->sectorsize);
1717		dirty_sectors = BTRFS_BYTES_TO_BLKS(fs_info, dirty_sectors);
1718
1719		/*
1720		 * if we have trouble faulting in the pages, fall
1721		 * back to one page at a time
1722		 */
1723		if (copied < write_bytes)
1724			nrptrs = 1;
1725
1726		if (copied == 0) {
1727			force_page_uptodate = true;
1728			dirty_sectors = 0;
1729			dirty_pages = 0;
1730		} else {
1731			force_page_uptodate = false;
1732			dirty_pages = DIV_ROUND_UP(copied + offset,
1733						   PAGE_SIZE);
1734		}
1735
1736		if (num_sectors > dirty_sectors) {
1737			/* release everything except the sectors we dirtied */
1738			release_bytes -= dirty_sectors <<
1739						fs_info->sb->s_blocksize_bits;
1740			if (only_release_metadata) {
1741				btrfs_delalloc_release_metadata(BTRFS_I(inode),
1742							release_bytes, true);
1743			} else {
1744				u64 __pos;
1745
1746				__pos = round_down(pos,
1747						   fs_info->sectorsize) +
1748					(dirty_pages << PAGE_SHIFT);
1749				btrfs_delalloc_release_space(inode,
1750						data_reserved, __pos,
1751						release_bytes, true);
1752			}
1753		}
1754
1755		release_bytes = round_up(copied + sector_offset,
1756					fs_info->sectorsize);
1757
1758		if (copied > 0)
1759			ret = btrfs_dirty_pages(inode, pages, dirty_pages,
1760						pos, copied, &cached_state);
 
1761
1762		/*
1763		 * If we have not locked the extent range, because the range's
1764		 * start offset is >= i_size, we might still have a non-NULL
1765		 * cached extent state, acquired while marking the extent range
1766		 * as delalloc through btrfs_dirty_pages(). Therefore free any
1767		 * possible cached extent state to avoid a memory leak.
1768		 */
1769		if (extents_locked)
1770			unlock_extent_cached(&BTRFS_I(inode)->io_tree,
1771					     lockstart, lockend, &cached_state);
1772		else
1773			free_extent_state(cached_state);
1774
1775		btrfs_delalloc_release_extents(BTRFS_I(inode), reserve_bytes);
1776		if (ret) {
1777			btrfs_drop_pages(pages, num_pages);
1778			break;
1779		}
1780
1781		release_bytes = 0;
1782		if (only_release_metadata)
1783			btrfs_end_write_no_snapshotting(root);
1784
1785		if (only_release_metadata && copied > 0) {
1786			lockstart = round_down(pos,
1787					       fs_info->sectorsize);
1788			lockend = round_up(pos + copied,
1789					   fs_info->sectorsize) - 1;
1790
1791			set_extent_bit(&BTRFS_I(inode)->io_tree, lockstart,
1792				       lockend, EXTENT_NORESERVE, NULL,
1793				       NULL, GFP_NOFS);
1794			only_release_metadata = false;
1795		}
1796
1797		btrfs_drop_pages(pages, num_pages);
1798
1799		cond_resched();
1800
1801		balance_dirty_pages_ratelimited(inode->i_mapping);
1802		if (dirty_pages < (fs_info->nodesize >> PAGE_SHIFT) + 1)
1803			btrfs_btree_balance_dirty(fs_info);
1804
1805		pos += copied;
1806		num_written += copied;
1807	}
1808
1809	kfree(pages);
1810
1811	if (release_bytes) {
1812		if (only_release_metadata) {
1813			btrfs_end_write_no_snapshotting(root);
1814			btrfs_delalloc_release_metadata(BTRFS_I(inode),
1815					release_bytes, true);
1816		} else {
1817			btrfs_delalloc_release_space(inode, data_reserved,
 
1818					round_down(pos, fs_info->sectorsize),
1819					release_bytes, true);
1820		}
1821	}
1822
1823	extent_changeset_free(data_reserved);
1824	return num_written ? num_written : ret;
1825}
1826
1827static ssize_t __btrfs_direct_write(struct kiocb *iocb, struct iov_iter *from)
1828{
1829	struct file *file = iocb->ki_filp;
1830	struct inode *inode = file_inode(file);
1831	loff_t pos;
1832	ssize_t written;
1833	ssize_t written_buffered;
1834	loff_t endbyte;
1835	int err;
1836
1837	written = generic_file_direct_write(iocb, from);
1838
1839	if (written < 0 || !iov_iter_count(from))
1840		return written;
1841
1842	pos = iocb->ki_pos;
1843	written_buffered = btrfs_buffered_write(iocb, from);
1844	if (written_buffered < 0) {
1845		err = written_buffered;
1846		goto out;
1847	}
1848	/*
1849	 * Ensure all data is persisted. We want the next direct IO read to be
1850	 * able to read what was just written.
1851	 */
1852	endbyte = pos + written_buffered - 1;
1853	err = btrfs_fdatawrite_range(inode, pos, endbyte);
1854	if (err)
1855		goto out;
1856	err = filemap_fdatawait_range(inode->i_mapping, pos, endbyte);
1857	if (err)
1858		goto out;
1859	written += written_buffered;
1860	iocb->ki_pos = pos + written_buffered;
1861	invalidate_mapping_pages(file->f_mapping, pos >> PAGE_SHIFT,
1862				 endbyte >> PAGE_SHIFT);
1863out:
1864	return written ? written : err;
1865}
1866
1867static void update_time_for_write(struct inode *inode)
1868{
1869	struct timespec64 now;
1870
1871	if (IS_NOCMTIME(inode))
1872		return;
1873
1874	now = current_time(inode);
1875	if (!timespec64_equal(&inode->i_mtime, &now))
1876		inode->i_mtime = now;
1877
1878	if (!timespec64_equal(&inode->i_ctime, &now))
1879		inode->i_ctime = now;
1880
1881	if (IS_I_VERSION(inode))
1882		inode_inc_iversion(inode);
1883}
1884
1885static ssize_t btrfs_file_write_iter(struct kiocb *iocb,
1886				    struct iov_iter *from)
1887{
1888	struct file *file = iocb->ki_filp;
1889	struct inode *inode = file_inode(file);
1890	struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
1891	struct btrfs_root *root = BTRFS_I(inode)->root;
1892	u64 start_pos;
1893	u64 end_pos;
1894	ssize_t num_written = 0;
1895	const bool sync = iocb->ki_flags & IOCB_DSYNC;
1896	ssize_t err;
1897	loff_t pos;
1898	size_t count;
1899	loff_t oldsize;
1900	int clean_page = 0;
1901
1902	if (!(iocb->ki_flags & IOCB_DIRECT) &&
1903	    (iocb->ki_flags & IOCB_NOWAIT))
1904		return -EOPNOTSUPP;
1905
1906	if (!inode_trylock(inode)) {
1907		if (iocb->ki_flags & IOCB_NOWAIT)
1908			return -EAGAIN;
 
1909		inode_lock(inode);
1910	}
1911
1912	err = generic_write_checks(iocb, from);
1913	if (err <= 0) {
1914		inode_unlock(inode);
1915		return err;
1916	}
1917
1918	pos = iocb->ki_pos;
1919	count = iov_iter_count(from);
1920	if (iocb->ki_flags & IOCB_NOWAIT) {
 
 
1921		/*
1922		 * We will allocate space in case nodatacow is not set,
1923		 * so bail
1924		 */
1925		if (!(BTRFS_I(inode)->flags & (BTRFS_INODE_NODATACOW |
1926					      BTRFS_INODE_PREALLOC)) ||
1927		    check_can_nocow(BTRFS_I(inode), pos, &count) <= 0) {
 
 
 
 
 
 
 
 
1928			inode_unlock(inode);
1929			return -EAGAIN;
1930		}
1931	}
1932
1933	current->backing_dev_info = inode_to_bdi(inode);
1934	err = file_remove_privs(file);
1935	if (err) {
1936		inode_unlock(inode);
1937		goto out;
1938	}
1939
1940	/*
1941	 * If BTRFS flips readonly due to some impossible error
1942	 * (fs_info->fs_state now has BTRFS_SUPER_FLAG_ERROR),
1943	 * although we have opened a file as writable, we have
1944	 * to stop this write operation to ensure FS consistency.
1945	 */
1946	if (test_bit(BTRFS_FS_STATE_ERROR, &fs_info->fs_state)) {
1947		inode_unlock(inode);
1948		err = -EROFS;
1949		goto out;
1950	}
1951
1952	/*
1953	 * We reserve space for updating the inode when we reserve space for the
1954	 * extent we are going to write, so we will enospc out there.  We don't
1955	 * need to start yet another transaction to update the inode as we will
1956	 * update the inode when we finish writing whatever data we write.
1957	 */
1958	update_time_for_write(inode);
1959
1960	start_pos = round_down(pos, fs_info->sectorsize);
1961	oldsize = i_size_read(inode);
1962	if (start_pos > oldsize) {
1963		/* Expand hole size to cover write data, preventing empty gap */
1964		end_pos = round_up(pos + count,
1965				   fs_info->sectorsize);
1966		err = btrfs_cont_expand(inode, oldsize, end_pos);
1967		if (err) {
1968			inode_unlock(inode);
1969			goto out;
1970		}
1971		if (start_pos > round_up(oldsize, fs_info->sectorsize))
1972			clean_page = 1;
1973	}
1974
1975	if (sync)
1976		atomic_inc(&BTRFS_I(inode)->sync_writers);
1977
1978	if (iocb->ki_flags & IOCB_DIRECT) {
1979		num_written = __btrfs_direct_write(iocb, from);
1980	} else {
1981		num_written = btrfs_buffered_write(iocb, from);
1982		if (num_written > 0)
1983			iocb->ki_pos = pos + num_written;
1984		if (clean_page)
1985			pagecache_isize_extended(inode, oldsize,
1986						i_size_read(inode));
1987	}
1988
1989	inode_unlock(inode);
1990
1991	/*
1992	 * We also have to set last_sub_trans to the current log transid,
1993	 * otherwise subsequent syncs to a file that's been synced in this
1994	 * transaction will appear to have already occurred.
1995	 */
1996	spin_lock(&BTRFS_I(inode)->lock);
1997	BTRFS_I(inode)->last_sub_trans = root->log_transid;
1998	spin_unlock(&BTRFS_I(inode)->lock);
1999	if (num_written > 0)
2000		num_written = generic_write_sync(iocb, num_written);
2001
2002	if (sync)
2003		atomic_dec(&BTRFS_I(inode)->sync_writers);
2004out:
2005	current->backing_dev_info = NULL;
2006	return num_written ? num_written : err;
2007}
2008
2009int btrfs_release_file(struct inode *inode, struct file *filp)
2010{
2011	struct btrfs_file_private *private = filp->private_data;
2012
2013	if (private && private->filldir_buf)
2014		kfree(private->filldir_buf);
2015	kfree(private);
2016	filp->private_data = NULL;
2017
2018	/*
2019	 * ordered_data_close is set by setattr when we are about to truncate
2020	 * a file from a non-zero size to a zero size.  This tries to
2021	 * flush down new bytes that may have been written if the
2022	 * application were using truncate to replace a file in place.
2023	 */
2024	if (test_and_clear_bit(BTRFS_INODE_ORDERED_DATA_CLOSE,
2025			       &BTRFS_I(inode)->runtime_flags))
2026			filemap_flush(inode->i_mapping);
2027	return 0;
2028}
2029
2030static int start_ordered_ops(struct inode *inode, loff_t start, loff_t end)
2031{
2032	int ret;
2033	struct blk_plug plug;
2034
2035	/*
2036	 * This is only called in fsync, which would do synchronous writes, so
2037	 * a plug can merge adjacent IOs as much as possible.  Esp. in case of
2038	 * multiple disks using raid profile, a large IO can be split to
2039	 * several segments of stripe length (currently 64K).
2040	 */
2041	blk_start_plug(&plug);
2042	atomic_inc(&BTRFS_I(inode)->sync_writers);
2043	ret = btrfs_fdatawrite_range(inode, start, end);
2044	atomic_dec(&BTRFS_I(inode)->sync_writers);
2045	blk_finish_plug(&plug);
2046
2047	return ret;
2048}
2049
2050/*
2051 * fsync call for both files and directories.  This logs the inode into
2052 * the tree log instead of forcing full commits whenever possible.
2053 *
2054 * It needs to call filemap_fdatawait so that all ordered extent updates are
2055 * in the metadata btree are up to date for copying to the log.
2056 *
2057 * It drops the inode mutex before doing the tree log commit.  This is an
2058 * important optimization for directories because holding the mutex prevents
2059 * new operations on the dir while we write to disk.
2060 */
2061int btrfs_sync_file(struct file *file, loff_t start, loff_t end, int datasync)
2062{
2063	struct dentry *dentry = file_dentry(file);
2064	struct inode *inode = d_inode(dentry);
2065	struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
2066	struct btrfs_root *root = BTRFS_I(inode)->root;
2067	struct btrfs_trans_handle *trans;
2068	struct btrfs_log_ctx ctx;
2069	int ret = 0, err;
2070
2071	trace_btrfs_sync_file(file, datasync);
2072
2073	btrfs_init_log_ctx(&ctx, inode);
2074
2075	/*
 
 
 
 
 
 
 
 
 
 
2076	 * We write the dirty pages in the range and wait until they complete
2077	 * out of the ->i_mutex. If so, we can flush the dirty pages by
2078	 * multi-task, and make the performance up.  See
2079	 * btrfs_wait_ordered_range for an explanation of the ASYNC check.
2080	 */
2081	ret = start_ordered_ops(inode, start, end);
2082	if (ret)
2083		goto out;
2084
2085	inode_lock(inode);
2086
2087	/*
2088	 * We take the dio_sem here because the tree log stuff can race with
2089	 * lockless dio writes and get an extent map logged for an extent we
2090	 * never waited on.  We need it this high up for lockdep reasons.
2091	 */
2092	down_write(&BTRFS_I(inode)->dio_sem);
2093
2094	atomic_inc(&root->log_batch);
2095
2096	/*
2097	 * If the inode needs a full sync, make sure we use a full range to
2098	 * avoid log tree corruption, due to hole detection racing with ordered
2099	 * extent completion for adjacent ranges, and assertion failures during
2100	 * hole detection. Do this while holding the inode lock, to avoid races
2101	 * with other tasks.
 
 
2102	 */
2103	if (test_bit(BTRFS_INODE_NEEDS_FULL_SYNC,
2104		     &BTRFS_I(inode)->runtime_flags)) {
2105		start = 0;
2106		end = LLONG_MAX;
2107	}
2108
2109	/*
2110	 * Before we acquired the inode's lock, someone may have dirtied more
2111	 * pages in the target range. We need to make sure that writeback for
2112	 * any such pages does not start while we are logging the inode, because
2113	 * if it does, any of the following might happen when we are not doing a
2114	 * full inode sync:
2115	 *
2116	 * 1) We log an extent after its writeback finishes but before its
2117	 *    checksums are added to the csum tree, leading to -EIO errors
2118	 *    when attempting to read the extent after a log replay.
2119	 *
2120	 * 2) We can end up logging an extent before its writeback finishes.
2121	 *    Therefore after the log replay we will have a file extent item
2122	 *    pointing to an unwritten extent (and no data checksums as well).
2123	 *
2124	 * So trigger writeback for any eventual new dirty pages and then we
2125	 * wait for all ordered extents to complete below.
2126	 */
2127	ret = start_ordered_ops(inode, start, end);
2128	if (ret) {
 
2129		inode_unlock(inode);
2130		goto out;
2131	}
2132
2133	/*
2134	 * We have to do this here to avoid the priority inversion of waiting on
2135	 * IO of a lower priority task while holding a transaction open.
2136	 *
2137	 * Also, the range length can be represented by u64, we have to do the
2138	 * typecasts to avoid signed overflow if it's [0, LLONG_MAX].
2139	 */
2140	ret = btrfs_wait_ordered_range(inode, start, (u64)end - (u64)start + 1);
2141	if (ret) {
2142		up_write(&BTRFS_I(inode)->dio_sem);
2143		inode_unlock(inode);
2144		goto out;
2145	}
2146	atomic_inc(&root->log_batch);
2147
2148	smp_mb();
2149	if (btrfs_inode_in_log(BTRFS_I(inode), fs_info->generation) ||
2150	    BTRFS_I(inode)->last_trans <= fs_info->last_trans_committed) {
2151		/*
2152		 * We've had everything committed since the last time we were
2153		 * modified so clear this flag in case it was set for whatever
2154		 * reason, it's no longer relevant.
2155		 */
2156		clear_bit(BTRFS_INODE_NEEDS_FULL_SYNC,
2157			  &BTRFS_I(inode)->runtime_flags);
2158		/*
2159		 * An ordered extent might have started before and completed
2160		 * already with io errors, in which case the inode was not
2161		 * updated and we end up here. So check the inode's mapping
2162		 * for any errors that might have happened since we last
2163		 * checked called fsync.
2164		 */
2165		ret = filemap_check_wb_err(inode->i_mapping, file->f_wb_err);
2166		up_write(&BTRFS_I(inode)->dio_sem);
2167		inode_unlock(inode);
2168		goto out;
2169	}
2170
2171	/*
2172	 * We use start here because we will need to wait on the IO to complete
2173	 * in btrfs_sync_log, which could require joining a transaction (for
2174	 * example checking cross references in the nocow path).  If we use join
2175	 * here we could get into a situation where we're waiting on IO to
2176	 * happen that is blocked on a transaction trying to commit.  With start
2177	 * we inc the extwriter counter, so we wait for all extwriters to exit
2178	 * before we start blocking joiners.  This comment is to keep somebody
2179	 * from thinking they are super smart and changing this to
2180	 * btrfs_join_transaction *cough*Josef*cough*.
2181	 */
2182	trans = btrfs_start_transaction(root, 0);
2183	if (IS_ERR(trans)) {
2184		ret = PTR_ERR(trans);
2185		up_write(&BTRFS_I(inode)->dio_sem);
2186		inode_unlock(inode);
2187		goto out;
2188	}
2189
2190	ret = btrfs_log_dentry_safe(trans, dentry, start, end, &ctx);
2191	if (ret < 0) {
2192		/* Fallthrough and commit/free transaction. */
2193		ret = 1;
2194	}
2195
2196	/* we've logged all the items and now have a consistent
2197	 * version of the file in the log.  It is possible that
2198	 * someone will come in and modify the file, but that's
2199	 * fine because the log is consistent on disk, and we
2200	 * have references to all of the file's extents
2201	 *
2202	 * It is possible that someone will come in and log the
2203	 * file again, but that will end up using the synchronization
2204	 * inside btrfs_sync_log to keep things safe.
2205	 */
2206	up_write(&BTRFS_I(inode)->dio_sem);
2207	inode_unlock(inode);
2208
2209	if (ret != BTRFS_NO_LOG_SYNC) {
2210		if (!ret) {
2211			ret = btrfs_sync_log(trans, root, &ctx);
2212			if (!ret) {
2213				ret = btrfs_end_transaction(trans);
2214				goto out;
2215			}
2216		}
2217		ret = btrfs_commit_transaction(trans);
2218	} else {
2219		ret = btrfs_end_transaction(trans);
2220	}
2221out:
2222	ASSERT(list_empty(&ctx.list));
2223	err = file_check_and_advance_wb_err(file);
2224	if (!ret)
2225		ret = err;
2226	return ret > 0 ? -EIO : ret;
2227}
2228
2229static const struct vm_operations_struct btrfs_file_vm_ops = {
2230	.fault		= filemap_fault,
2231	.map_pages	= filemap_map_pages,
2232	.page_mkwrite	= btrfs_page_mkwrite,
2233};
2234
2235static int btrfs_file_mmap(struct file	*filp, struct vm_area_struct *vma)
2236{
2237	struct address_space *mapping = filp->f_mapping;
2238
2239	if (!mapping->a_ops->readpage)
2240		return -ENOEXEC;
2241
2242	file_accessed(filp);
2243	vma->vm_ops = &btrfs_file_vm_ops;
2244
2245	return 0;
2246}
2247
2248static int hole_mergeable(struct btrfs_inode *inode, struct extent_buffer *leaf,
2249			  int slot, u64 start, u64 end)
2250{
2251	struct btrfs_file_extent_item *fi;
2252	struct btrfs_key key;
2253
2254	if (slot < 0 || slot >= btrfs_header_nritems(leaf))
2255		return 0;
2256
2257	btrfs_item_key_to_cpu(leaf, &key, slot);
2258	if (key.objectid != btrfs_ino(inode) ||
2259	    key.type != BTRFS_EXTENT_DATA_KEY)
2260		return 0;
2261
2262	fi = btrfs_item_ptr(leaf, slot, struct btrfs_file_extent_item);
2263
2264	if (btrfs_file_extent_type(leaf, fi) != BTRFS_FILE_EXTENT_REG)
2265		return 0;
2266
2267	if (btrfs_file_extent_disk_bytenr(leaf, fi))
2268		return 0;
2269
2270	if (key.offset == end)
2271		return 1;
2272	if (key.offset + btrfs_file_extent_num_bytes(leaf, fi) == start)
2273		return 1;
2274	return 0;
2275}
2276
2277static int fill_holes(struct btrfs_trans_handle *trans,
2278		struct btrfs_inode *inode,
2279		struct btrfs_path *path, u64 offset, u64 end)
2280{
2281	struct btrfs_fs_info *fs_info = trans->fs_info;
2282	struct btrfs_root *root = inode->root;
2283	struct extent_buffer *leaf;
2284	struct btrfs_file_extent_item *fi;
2285	struct extent_map *hole_em;
2286	struct extent_map_tree *em_tree = &inode->extent_tree;
2287	struct btrfs_key key;
2288	int ret;
2289
2290	if (btrfs_fs_incompat(fs_info, NO_HOLES))
2291		goto out;
2292
2293	key.objectid = btrfs_ino(inode);
2294	key.type = BTRFS_EXTENT_DATA_KEY;
2295	key.offset = offset;
2296
2297	ret = btrfs_search_slot(trans, root, &key, path, 0, 1);
2298	if (ret <= 0) {
2299		/*
2300		 * We should have dropped this offset, so if we find it then
2301		 * something has gone horribly wrong.
2302		 */
2303		if (ret == 0)
2304			ret = -EINVAL;
2305		return ret;
2306	}
2307
2308	leaf = path->nodes[0];
2309	if (hole_mergeable(inode, leaf, path->slots[0] - 1, offset, end)) {
2310		u64 num_bytes;
2311
2312		path->slots[0]--;
2313		fi = btrfs_item_ptr(leaf, path->slots[0],
2314				    struct btrfs_file_extent_item);
2315		num_bytes = btrfs_file_extent_num_bytes(leaf, fi) +
2316			end - offset;
2317		btrfs_set_file_extent_num_bytes(leaf, fi, num_bytes);
2318		btrfs_set_file_extent_ram_bytes(leaf, fi, num_bytes);
2319		btrfs_set_file_extent_offset(leaf, fi, 0);
2320		btrfs_mark_buffer_dirty(leaf);
2321		goto out;
2322	}
2323
2324	if (hole_mergeable(inode, leaf, path->slots[0], offset, end)) {
2325		u64 num_bytes;
2326
2327		key.offset = offset;
2328		btrfs_set_item_key_safe(fs_info, path, &key);
2329		fi = btrfs_item_ptr(leaf, path->slots[0],
2330				    struct btrfs_file_extent_item);
2331		num_bytes = btrfs_file_extent_num_bytes(leaf, fi) + end -
2332			offset;
2333		btrfs_set_file_extent_num_bytes(leaf, fi, num_bytes);
2334		btrfs_set_file_extent_ram_bytes(leaf, fi, num_bytes);
2335		btrfs_set_file_extent_offset(leaf, fi, 0);
2336		btrfs_mark_buffer_dirty(leaf);
2337		goto out;
2338	}
2339	btrfs_release_path(path);
2340
2341	ret = btrfs_insert_file_extent(trans, root, btrfs_ino(inode),
2342			offset, 0, 0, end - offset, 0, end - offset, 0, 0, 0);
2343	if (ret)
2344		return ret;
2345
2346out:
2347	btrfs_release_path(path);
2348
2349	hole_em = alloc_extent_map();
2350	if (!hole_em) {
2351		btrfs_drop_extent_cache(inode, offset, end - 1, 0);
2352		set_bit(BTRFS_INODE_NEEDS_FULL_SYNC, &inode->runtime_flags);
2353	} else {
2354		hole_em->start = offset;
2355		hole_em->len = end - offset;
2356		hole_em->ram_bytes = hole_em->len;
2357		hole_em->orig_start = offset;
2358
2359		hole_em->block_start = EXTENT_MAP_HOLE;
2360		hole_em->block_len = 0;
2361		hole_em->orig_block_len = 0;
2362		hole_em->bdev = fs_info->fs_devices->latest_bdev;
2363		hole_em->compress_type = BTRFS_COMPRESS_NONE;
2364		hole_em->generation = trans->transid;
2365
2366		do {
2367			btrfs_drop_extent_cache(inode, offset, end - 1, 0);
2368			write_lock(&em_tree->lock);
2369			ret = add_extent_mapping(em_tree, hole_em, 1);
2370			write_unlock(&em_tree->lock);
2371		} while (ret == -EEXIST);
2372		free_extent_map(hole_em);
2373		if (ret)
2374			set_bit(BTRFS_INODE_NEEDS_FULL_SYNC,
2375					&inode->runtime_flags);
2376	}
2377
2378	return 0;
2379}
2380
2381/*
2382 * Find a hole extent on given inode and change start/len to the end of hole
2383 * extent.(hole/vacuum extent whose em->start <= start &&
2384 *	   em->start + em->len > start)
2385 * When a hole extent is found, return 1 and modify start/len.
2386 */
2387static int find_first_non_hole(struct inode *inode, u64 *start, u64 *len)
2388{
2389	struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
2390	struct extent_map *em;
2391	int ret = 0;
2392
2393	em = btrfs_get_extent(BTRFS_I(inode), NULL, 0,
2394			      round_down(*start, fs_info->sectorsize),
2395			      round_up(*len, fs_info->sectorsize), 0);
2396	if (IS_ERR(em))
2397		return PTR_ERR(em);
2398
2399	/* Hole or vacuum extent(only exists in no-hole mode) */
2400	if (em->block_start == EXTENT_MAP_HOLE) {
2401		ret = 1;
2402		*len = em->start + em->len > *start + *len ?
2403		       0 : *start + *len - em->start - em->len;
2404		*start = em->start + em->len;
2405	}
2406	free_extent_map(em);
2407	return ret;
2408}
2409
2410static int btrfs_punch_hole_lock_range(struct inode *inode,
2411				       const u64 lockstart,
2412				       const u64 lockend,
2413				       struct extent_state **cached_state)
2414{
2415	while (1) {
2416		struct btrfs_ordered_extent *ordered;
2417		int ret;
2418
2419		truncate_pagecache_range(inode, lockstart, lockend);
2420
2421		lock_extent_bits(&BTRFS_I(inode)->io_tree, lockstart, lockend,
2422				 cached_state);
2423		ordered = btrfs_lookup_first_ordered_extent(inode, lockend);
2424
2425		/*
2426		 * We need to make sure we have no ordered extents in this range
2427		 * and nobody raced in and read a page in this range, if we did
2428		 * we need to try again.
2429		 */
2430		if ((!ordered ||
2431		    (ordered->file_offset + ordered->len <= lockstart ||
2432		     ordered->file_offset > lockend)) &&
2433		     !filemap_range_has_page(inode->i_mapping,
2434					     lockstart, lockend)) {
2435			if (ordered)
2436				btrfs_put_ordered_extent(ordered);
2437			break;
2438		}
2439		if (ordered)
2440			btrfs_put_ordered_extent(ordered);
2441		unlock_extent_cached(&BTRFS_I(inode)->io_tree, lockstart,
2442				     lockend, cached_state);
2443		ret = btrfs_wait_ordered_range(inode, lockstart,
2444					       lockend - lockstart + 1);
2445		if (ret)
2446			return ret;
2447	}
2448	return 0;
2449}
2450
2451static int btrfs_insert_clone_extent(struct btrfs_trans_handle *trans,
2452				     struct inode *inode,
2453				     struct btrfs_path *path,
2454				     struct btrfs_clone_extent_info *clone_info,
2455				     const u64 clone_len)
2456{
2457	struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
2458	struct btrfs_root *root = BTRFS_I(inode)->root;
2459	struct btrfs_file_extent_item *extent;
2460	struct extent_buffer *leaf;
2461	struct btrfs_key key;
2462	int slot;
2463	struct btrfs_ref ref = { 0 };
2464	u64 ref_offset;
2465	int ret;
2466
2467	if (clone_len == 0)
2468		return 0;
2469
2470	if (clone_info->disk_offset == 0 &&
2471	    btrfs_fs_incompat(fs_info, NO_HOLES))
2472		return 0;
2473
2474	key.objectid = btrfs_ino(BTRFS_I(inode));
2475	key.type = BTRFS_EXTENT_DATA_KEY;
2476	key.offset = clone_info->file_offset;
2477	ret = btrfs_insert_empty_item(trans, root, path, &key,
2478				      clone_info->item_size);
2479	if (ret)
2480		return ret;
2481	leaf = path->nodes[0];
2482	slot = path->slots[0];
2483	write_extent_buffer(leaf, clone_info->extent_buf,
2484			    btrfs_item_ptr_offset(leaf, slot),
2485			    clone_info->item_size);
2486	extent = btrfs_item_ptr(leaf, slot, struct btrfs_file_extent_item);
2487	btrfs_set_file_extent_offset(leaf, extent, clone_info->data_offset);
2488	btrfs_set_file_extent_num_bytes(leaf, extent, clone_len);
2489	btrfs_mark_buffer_dirty(leaf);
2490	btrfs_release_path(path);
2491
 
 
 
 
 
2492	/* If it's a hole, nothing more needs to be done. */
2493	if (clone_info->disk_offset == 0)
2494		return 0;
2495
2496	inode_add_bytes(inode, clone_len);
2497	btrfs_init_generic_ref(&ref, BTRFS_ADD_DELAYED_REF,
2498			       clone_info->disk_offset,
2499			       clone_info->disk_len, 0);
2500	ref_offset = clone_info->file_offset - clone_info->data_offset;
2501	btrfs_init_data_ref(&ref, root->root_key.objectid,
2502			    btrfs_ino(BTRFS_I(inode)), ref_offset);
2503	ret = btrfs_inc_extent_ref(trans, &ref);
2504
2505	return ret;
2506}
2507
2508/*
2509 * The respective range must have been previously locked, as well as the inode.
2510 * The end offset is inclusive (last byte of the range).
2511 * @clone_info is NULL for fallocate's hole punching and non-NULL for extent
2512 * cloning.
2513 * When cloning, we don't want to end up in a state where we dropped extents
2514 * without inserting a new one, so we must abort the transaction to avoid a
2515 * corruption.
2516 */
2517int btrfs_punch_hole_range(struct inode *inode, struct btrfs_path *path,
2518			   const u64 start, const u64 end,
2519			   struct btrfs_clone_extent_info *clone_info,
2520			   struct btrfs_trans_handle **trans_out)
2521{
2522	struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
2523	u64 min_size = btrfs_calc_insert_metadata_size(fs_info, 1);
2524	u64 ino_size = round_up(inode->i_size, fs_info->sectorsize);
2525	struct btrfs_root *root = BTRFS_I(inode)->root;
2526	struct btrfs_trans_handle *trans = NULL;
2527	struct btrfs_block_rsv *rsv;
2528	unsigned int rsv_count;
2529	u64 cur_offset;
2530	u64 drop_end;
2531	u64 len = end - start;
2532	int ret = 0;
2533
2534	if (end <= start)
2535		return -EINVAL;
2536
2537	rsv = btrfs_alloc_block_rsv(fs_info, BTRFS_BLOCK_RSV_TEMP);
2538	if (!rsv) {
2539		ret = -ENOMEM;
2540		goto out;
2541	}
2542	rsv->size = btrfs_calc_insert_metadata_size(fs_info, 1);
2543	rsv->failfast = 1;
2544
2545	/*
2546	 * 1 - update the inode
2547	 * 1 - removing the extents in the range
2548	 * 1 - adding the hole extent if no_holes isn't set or if we are cloning
2549	 *     an extent
2550	 */
2551	if (!btrfs_fs_incompat(fs_info, NO_HOLES) || clone_info)
2552		rsv_count = 3;
2553	else
2554		rsv_count = 2;
2555
2556	trans = btrfs_start_transaction(root, rsv_count);
2557	if (IS_ERR(trans)) {
2558		ret = PTR_ERR(trans);
2559		trans = NULL;
2560		goto out_free;
2561	}
2562
2563	ret = btrfs_block_rsv_migrate(&fs_info->trans_block_rsv, rsv,
2564				      min_size, false);
2565	BUG_ON(ret);
2566	trans->block_rsv = rsv;
2567
2568	cur_offset = start;
2569	while (cur_offset < end) {
2570		ret = __btrfs_drop_extents(trans, root, inode, path,
2571					   cur_offset, end + 1, &drop_end,
2572					   1, 0, 0, NULL);
2573		if (ret != -ENOSPC) {
2574			/*
2575			 * When cloning we want to avoid transaction aborts when
2576			 * nothing was done and we are attempting to clone parts
2577			 * of inline extents, in such cases -EOPNOTSUPP is
2578			 * returned by __btrfs_drop_extents() without having
2579			 * changed anything in the file.
2580			 */
2581			if (clone_info && ret && ret != -EOPNOTSUPP)
2582				btrfs_abort_transaction(trans, ret);
2583			break;
2584		}
2585
2586		trans->block_rsv = &fs_info->trans_block_rsv;
2587
2588		if (!clone_info && cur_offset < drop_end &&
2589		    cur_offset < ino_size) {
2590			ret = fill_holes(trans, BTRFS_I(inode), path,
2591					cur_offset, drop_end);
2592			if (ret) {
2593				/*
2594				 * If we failed then we didn't insert our hole
2595				 * entries for the area we dropped, so now the
2596				 * fs is corrupted, so we must abort the
2597				 * transaction.
2598				 */
2599				btrfs_abort_transaction(trans, ret);
2600				break;
2601			}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2602		}
2603
2604		if (clone_info) {
2605			u64 clone_len = drop_end - cur_offset;
2606
2607			ret = btrfs_insert_clone_extent(trans, inode, path,
2608							clone_info, clone_len);
2609			if (ret) {
2610				btrfs_abort_transaction(trans, ret);
2611				break;
2612			}
2613			clone_info->data_len -= clone_len;
2614			clone_info->data_offset += clone_len;
2615			clone_info->file_offset += clone_len;
2616		}
2617
2618		cur_offset = drop_end;
2619
2620		ret = btrfs_update_inode(trans, root, inode);
2621		if (ret)
2622			break;
2623
2624		btrfs_end_transaction(trans);
2625		btrfs_btree_balance_dirty(fs_info);
2626
2627		trans = btrfs_start_transaction(root, rsv_count);
2628		if (IS_ERR(trans)) {
2629			ret = PTR_ERR(trans);
2630			trans = NULL;
2631			break;
2632		}
2633
2634		ret = btrfs_block_rsv_migrate(&fs_info->trans_block_rsv,
2635					      rsv, min_size, false);
2636		BUG_ON(ret);	/* shouldn't happen */
2637		trans->block_rsv = rsv;
2638
2639		if (!clone_info) {
2640			ret = find_first_non_hole(inode, &cur_offset, &len);
2641			if (unlikely(ret < 0))
2642				break;
2643			if (ret && !len) {
2644				ret = 0;
2645				break;
2646			}
2647		}
2648	}
2649
2650	/*
2651	 * If we were cloning, force the next fsync to be a full one since we
2652	 * we replaced (or just dropped in the case of cloning holes when
2653	 * NO_HOLES is enabled) extents and extent maps.
2654	 * This is for the sake of simplicity, and cloning into files larger
2655	 * than 16Mb would force the full fsync any way (when
2656	 * try_release_extent_mapping() is invoked during page cache truncation.
2657	 */
2658	if (clone_info)
2659		set_bit(BTRFS_INODE_NEEDS_FULL_SYNC,
2660			&BTRFS_I(inode)->runtime_flags);
2661
2662	if (ret)
2663		goto out_trans;
2664
2665	trans->block_rsv = &fs_info->trans_block_rsv;
2666	/*
2667	 * If we are using the NO_HOLES feature we might have had already an
2668	 * hole that overlaps a part of the region [lockstart, lockend] and
2669	 * ends at (or beyond) lockend. Since we have no file extent items to
2670	 * represent holes, drop_end can be less than lockend and so we must
2671	 * make sure we have an extent map representing the existing hole (the
2672	 * call to __btrfs_drop_extents() might have dropped the existing extent
2673	 * map representing the existing hole), otherwise the fast fsync path
2674	 * will not record the existence of the hole region
2675	 * [existing_hole_start, lockend].
2676	 */
2677	if (drop_end <= end)
2678		drop_end = end + 1;
2679	/*
2680	 * Don't insert file hole extent item if it's for a range beyond eof
2681	 * (because it's useless) or if it represents a 0 bytes range (when
2682	 * cur_offset == drop_end).
2683	 */
2684	if (!clone_info && cur_offset < ino_size && cur_offset < drop_end) {
2685		ret = fill_holes(trans, BTRFS_I(inode), path,
2686				cur_offset, drop_end);
2687		if (ret) {
2688			/* Same comment as above. */
2689			btrfs_abort_transaction(trans, ret);
2690			goto out_trans;
2691		}
 
 
 
 
 
 
 
 
 
2692	}
2693	if (clone_info) {
2694		ret = btrfs_insert_clone_extent(trans, inode, path, clone_info,
2695						clone_info->data_len);
2696		if (ret) {
2697			btrfs_abort_transaction(trans, ret);
2698			goto out_trans;
2699		}
2700	}
2701
2702out_trans:
2703	if (!trans)
2704		goto out_free;
2705
2706	trans->block_rsv = &fs_info->trans_block_rsv;
2707	if (ret)
2708		btrfs_end_transaction(trans);
2709	else
2710		*trans_out = trans;
2711out_free:
2712	btrfs_free_block_rsv(fs_info, rsv);
2713out:
2714	return ret;
2715}
2716
2717static int btrfs_punch_hole(struct inode *inode, loff_t offset, loff_t len)
2718{
2719	struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
2720	struct btrfs_root *root = BTRFS_I(inode)->root;
2721	struct extent_state *cached_state = NULL;
2722	struct btrfs_path *path;
2723	struct btrfs_trans_handle *trans = NULL;
2724	u64 lockstart;
2725	u64 lockend;
2726	u64 tail_start;
2727	u64 tail_len;
2728	u64 orig_start = offset;
2729	int ret = 0;
2730	bool same_block;
2731	u64 ino_size;
2732	bool truncated_block = false;
2733	bool updated_inode = false;
2734
2735	ret = btrfs_wait_ordered_range(inode, offset, len);
2736	if (ret)
2737		return ret;
2738
2739	inode_lock(inode);
2740	ino_size = round_up(inode->i_size, fs_info->sectorsize);
2741	ret = find_first_non_hole(inode, &offset, &len);
2742	if (ret < 0)
2743		goto out_only_mutex;
2744	if (ret && !len) {
2745		/* Already in a large hole */
2746		ret = 0;
2747		goto out_only_mutex;
2748	}
2749
2750	lockstart = round_up(offset, btrfs_inode_sectorsize(inode));
2751	lockend = round_down(offset + len,
2752			     btrfs_inode_sectorsize(inode)) - 1;
2753	same_block = (BTRFS_BYTES_TO_BLKS(fs_info, offset))
2754		== (BTRFS_BYTES_TO_BLKS(fs_info, offset + len - 1));
2755	/*
2756	 * We needn't truncate any block which is beyond the end of the file
2757	 * because we are sure there is no data there.
2758	 */
2759	/*
2760	 * Only do this if we are in the same block and we aren't doing the
2761	 * entire block.
2762	 */
2763	if (same_block && len < fs_info->sectorsize) {
2764		if (offset < ino_size) {
2765			truncated_block = true;
2766			ret = btrfs_truncate_block(inode, offset, len, 0);
2767		} else {
2768			ret = 0;
2769		}
2770		goto out_only_mutex;
2771	}
2772
2773	/* zero back part of the first block */
2774	if (offset < ino_size) {
2775		truncated_block = true;
2776		ret = btrfs_truncate_block(inode, offset, 0, 0);
2777		if (ret) {
2778			inode_unlock(inode);
2779			return ret;
2780		}
2781	}
2782
2783	/* Check the aligned pages after the first unaligned page,
2784	 * if offset != orig_start, which means the first unaligned page
2785	 * including several following pages are already in holes,
2786	 * the extra check can be skipped */
2787	if (offset == orig_start) {
2788		/* after truncate page, check hole again */
2789		len = offset + len - lockstart;
2790		offset = lockstart;
2791		ret = find_first_non_hole(inode, &offset, &len);
2792		if (ret < 0)
2793			goto out_only_mutex;
2794		if (ret && !len) {
2795			ret = 0;
2796			goto out_only_mutex;
2797		}
2798		lockstart = offset;
2799	}
2800
2801	/* Check the tail unaligned part is in a hole */
2802	tail_start = lockend + 1;
2803	tail_len = offset + len - tail_start;
2804	if (tail_len) {
2805		ret = find_first_non_hole(inode, &tail_start, &tail_len);
2806		if (unlikely(ret < 0))
2807			goto out_only_mutex;
2808		if (!ret) {
2809			/* zero the front end of the last page */
2810			if (tail_start + tail_len < ino_size) {
2811				truncated_block = true;
2812				ret = btrfs_truncate_block(inode,
2813							tail_start + tail_len,
2814							0, 1);
2815				if (ret)
2816					goto out_only_mutex;
2817			}
2818		}
2819	}
2820
2821	if (lockend < lockstart) {
2822		ret = 0;
2823		goto out_only_mutex;
2824	}
2825
2826	ret = btrfs_punch_hole_lock_range(inode, lockstart, lockend,
2827					  &cached_state);
2828	if (ret)
2829		goto out_only_mutex;
2830
2831	path = btrfs_alloc_path();
2832	if (!path) {
2833		ret = -ENOMEM;
2834		goto out;
2835	}
2836
2837	ret = btrfs_punch_hole_range(inode, path, lockstart, lockend, NULL,
2838				     &trans);
2839	btrfs_free_path(path);
2840	if (ret)
2841		goto out;
2842
2843	ASSERT(trans != NULL);
2844	inode_inc_iversion(inode);
2845	inode->i_mtime = inode->i_ctime = current_time(inode);
2846	ret = btrfs_update_inode(trans, root, inode);
2847	updated_inode = true;
2848	btrfs_end_transaction(trans);
2849	btrfs_btree_balance_dirty(fs_info);
2850out:
2851	unlock_extent_cached(&BTRFS_I(inode)->io_tree, lockstart, lockend,
2852			     &cached_state);
2853out_only_mutex:
2854	if (!updated_inode && truncated_block && !ret) {
2855		/*
2856		 * If we only end up zeroing part of a page, we still need to
2857		 * update the inode item, so that all the time fields are
2858		 * updated as well as the necessary btrfs inode in memory fields
2859		 * for detecting, at fsync time, if the inode isn't yet in the
2860		 * log tree or it's there but not up to date.
2861		 */
2862		struct timespec64 now = current_time(inode);
2863
2864		inode_inc_iversion(inode);
2865		inode->i_mtime = now;
2866		inode->i_ctime = now;
2867		trans = btrfs_start_transaction(root, 1);
2868		if (IS_ERR(trans)) {
2869			ret = PTR_ERR(trans);
2870		} else {
2871			int ret2;
2872
2873			ret = btrfs_update_inode(trans, root, inode);
2874			ret2 = btrfs_end_transaction(trans);
2875			if (!ret)
2876				ret = ret2;
2877		}
2878	}
2879	inode_unlock(inode);
2880	return ret;
2881}
2882
2883/* Helper structure to record which range is already reserved */
2884struct falloc_range {
2885	struct list_head list;
2886	u64 start;
2887	u64 len;
2888};
2889
2890/*
2891 * Helper function to add falloc range
2892 *
2893 * Caller should have locked the larger range of extent containing
2894 * [start, len)
2895 */
2896static int add_falloc_range(struct list_head *head, u64 start, u64 len)
2897{
2898	struct falloc_range *prev = NULL;
2899	struct falloc_range *range = NULL;
2900
2901	if (list_empty(head))
2902		goto insert;
2903
2904	/*
2905	 * As fallocate iterate by bytenr order, we only need to check
2906	 * the last range.
2907	 */
2908	prev = list_entry(head->prev, struct falloc_range, list);
2909	if (prev->start + prev->len == start) {
2910		prev->len += len;
2911		return 0;
2912	}
2913insert:
2914	range = kmalloc(sizeof(*range), GFP_KERNEL);
2915	if (!range)
2916		return -ENOMEM;
2917	range->start = start;
2918	range->len = len;
2919	list_add_tail(&range->list, head);
2920	return 0;
2921}
2922
2923static int btrfs_fallocate_update_isize(struct inode *inode,
2924					const u64 end,
2925					const int mode)
2926{
2927	struct btrfs_trans_handle *trans;
2928	struct btrfs_root *root = BTRFS_I(inode)->root;
2929	int ret;
2930	int ret2;
2931
2932	if (mode & FALLOC_FL_KEEP_SIZE || end <= i_size_read(inode))
2933		return 0;
2934
2935	trans = btrfs_start_transaction(root, 1);
2936	if (IS_ERR(trans))
2937		return PTR_ERR(trans);
2938
2939	inode->i_ctime = current_time(inode);
2940	i_size_write(inode, end);
2941	btrfs_ordered_update_i_size(inode, end, NULL);
2942	ret = btrfs_update_inode(trans, root, inode);
2943	ret2 = btrfs_end_transaction(trans);
2944
2945	return ret ? ret : ret2;
2946}
2947
2948enum {
2949	RANGE_BOUNDARY_WRITTEN_EXTENT,
2950	RANGE_BOUNDARY_PREALLOC_EXTENT,
2951	RANGE_BOUNDARY_HOLE,
2952};
2953
2954static int btrfs_zero_range_check_range_boundary(struct inode *inode,
2955						 u64 offset)
2956{
2957	const u64 sectorsize = btrfs_inode_sectorsize(inode);
2958	struct extent_map *em;
2959	int ret;
2960
2961	offset = round_down(offset, sectorsize);
2962	em = btrfs_get_extent(BTRFS_I(inode), NULL, 0, offset, sectorsize, 0);
2963	if (IS_ERR(em))
2964		return PTR_ERR(em);
2965
2966	if (em->block_start == EXTENT_MAP_HOLE)
2967		ret = RANGE_BOUNDARY_HOLE;
2968	else if (test_bit(EXTENT_FLAG_PREALLOC, &em->flags))
2969		ret = RANGE_BOUNDARY_PREALLOC_EXTENT;
2970	else
2971		ret = RANGE_BOUNDARY_WRITTEN_EXTENT;
2972
2973	free_extent_map(em);
2974	return ret;
2975}
2976
2977static int btrfs_zero_range(struct inode *inode,
2978			    loff_t offset,
2979			    loff_t len,
2980			    const int mode)
2981{
2982	struct btrfs_fs_info *fs_info = BTRFS_I(inode)->root->fs_info;
2983	struct extent_map *em;
2984	struct extent_changeset *data_reserved = NULL;
2985	int ret;
2986	u64 alloc_hint = 0;
2987	const u64 sectorsize = btrfs_inode_sectorsize(inode);
2988	u64 alloc_start = round_down(offset, sectorsize);
2989	u64 alloc_end = round_up(offset + len, sectorsize);
2990	u64 bytes_to_reserve = 0;
2991	bool space_reserved = false;
2992
2993	inode_dio_wait(inode);
2994
2995	em = btrfs_get_extent(BTRFS_I(inode), NULL, 0,
2996			      alloc_start, alloc_end - alloc_start, 0);
2997	if (IS_ERR(em)) {
2998		ret = PTR_ERR(em);
2999		goto out;
3000	}
3001
3002	/*
3003	 * Avoid hole punching and extent allocation for some cases. More cases
3004	 * could be considered, but these are unlikely common and we keep things
3005	 * as simple as possible for now. Also, intentionally, if the target
3006	 * range contains one or more prealloc extents together with regular
3007	 * extents and holes, we drop all the existing extents and allocate a
3008	 * new prealloc extent, so that we get a larger contiguous disk extent.
3009	 */
3010	if (em->start <= alloc_start &&
3011	    test_bit(EXTENT_FLAG_PREALLOC, &em->flags)) {
3012		const u64 em_end = em->start + em->len;
3013
3014		if (em_end >= offset + len) {
3015			/*
3016			 * The whole range is already a prealloc extent,
3017			 * do nothing except updating the inode's i_size if
3018			 * needed.
3019			 */
3020			free_extent_map(em);
3021			ret = btrfs_fallocate_update_isize(inode, offset + len,
3022							   mode);
3023			goto out;
3024		}
3025		/*
3026		 * Part of the range is already a prealloc extent, so operate
3027		 * only on the remaining part of the range.
3028		 */
3029		alloc_start = em_end;
3030		ASSERT(IS_ALIGNED(alloc_start, sectorsize));
3031		len = offset + len - alloc_start;
3032		offset = alloc_start;
3033		alloc_hint = em->block_start + em->len;
3034	}
3035	free_extent_map(em);
3036
3037	if (BTRFS_BYTES_TO_BLKS(fs_info, offset) ==
3038	    BTRFS_BYTES_TO_BLKS(fs_info, offset + len - 1)) {
3039		em = btrfs_get_extent(BTRFS_I(inode), NULL, 0,
3040				      alloc_start, sectorsize, 0);
3041		if (IS_ERR(em)) {
3042			ret = PTR_ERR(em);
3043			goto out;
3044		}
3045
3046		if (test_bit(EXTENT_FLAG_PREALLOC, &em->flags)) {
3047			free_extent_map(em);
3048			ret = btrfs_fallocate_update_isize(inode, offset + len,
3049							   mode);
3050			goto out;
3051		}
3052		if (len < sectorsize && em->block_start != EXTENT_MAP_HOLE) {
3053			free_extent_map(em);
3054			ret = btrfs_truncate_block(inode, offset, len, 0);
3055			if (!ret)
3056				ret = btrfs_fallocate_update_isize(inode,
3057								   offset + len,
3058								   mode);
3059			return ret;
3060		}
3061		free_extent_map(em);
3062		alloc_start = round_down(offset, sectorsize);
3063		alloc_end = alloc_start + sectorsize;
3064		goto reserve_space;
3065	}
3066
3067	alloc_start = round_up(offset, sectorsize);
3068	alloc_end = round_down(offset + len, sectorsize);
3069
3070	/*
3071	 * For unaligned ranges, check the pages at the boundaries, they might
3072	 * map to an extent, in which case we need to partially zero them, or
3073	 * they might map to a hole, in which case we need our allocation range
3074	 * to cover them.
3075	 */
3076	if (!IS_ALIGNED(offset, sectorsize)) {
3077		ret = btrfs_zero_range_check_range_boundary(inode, offset);
3078		if (ret < 0)
3079			goto out;
3080		if (ret == RANGE_BOUNDARY_HOLE) {
3081			alloc_start = round_down(offset, sectorsize);
3082			ret = 0;
3083		} else if (ret == RANGE_BOUNDARY_WRITTEN_EXTENT) {
3084			ret = btrfs_truncate_block(inode, offset, 0, 0);
3085			if (ret)
3086				goto out;
3087		} else {
3088			ret = 0;
3089		}
3090	}
3091
3092	if (!IS_ALIGNED(offset + len, sectorsize)) {
3093		ret = btrfs_zero_range_check_range_boundary(inode,
3094							    offset + len);
3095		if (ret < 0)
3096			goto out;
3097		if (ret == RANGE_BOUNDARY_HOLE) {
3098			alloc_end = round_up(offset + len, sectorsize);
3099			ret = 0;
3100		} else if (ret == RANGE_BOUNDARY_WRITTEN_EXTENT) {
3101			ret = btrfs_truncate_block(inode, offset + len, 0, 1);
3102			if (ret)
3103				goto out;
3104		} else {
3105			ret = 0;
3106		}
3107	}
3108
3109reserve_space:
3110	if (alloc_start < alloc_end) {
3111		struct extent_state *cached_state = NULL;
3112		const u64 lockstart = alloc_start;
3113		const u64 lockend = alloc_end - 1;
3114
3115		bytes_to_reserve = alloc_end - alloc_start;
3116		ret = btrfs_alloc_data_chunk_ondemand(BTRFS_I(inode),
3117						      bytes_to_reserve);
3118		if (ret < 0)
3119			goto out;
3120		space_reserved = true;
3121		ret = btrfs_qgroup_reserve_data(inode, &data_reserved,
3122						alloc_start, bytes_to_reserve);
3123		if (ret)
3124			goto out;
3125		ret = btrfs_punch_hole_lock_range(inode, lockstart, lockend,
3126						  &cached_state);
3127		if (ret)
3128			goto out;
 
 
 
 
3129		ret = btrfs_prealloc_file_range(inode, mode, alloc_start,
3130						alloc_end - alloc_start,
3131						i_blocksize(inode),
3132						offset + len, &alloc_hint);
3133		unlock_extent_cached(&BTRFS_I(inode)->io_tree, lockstart,
3134				     lockend, &cached_state);
3135		/* btrfs_prealloc_file_range releases reserved space on error */
3136		if (ret) {
3137			space_reserved = false;
3138			goto out;
3139		}
3140	}
3141	ret = btrfs_fallocate_update_isize(inode, offset + len, mode);
3142 out:
3143	if (ret && space_reserved)
3144		btrfs_free_reserved_data_space(inode, data_reserved,
3145					       alloc_start, bytes_to_reserve);
3146	extent_changeset_free(data_reserved);
3147
3148	return ret;
3149}
3150
3151static long btrfs_fallocate(struct file *file, int mode,
3152			    loff_t offset, loff_t len)
3153{
3154	struct inode *inode = file_inode(file);
3155	struct extent_state *cached_state = NULL;
3156	struct extent_changeset *data_reserved = NULL;
3157	struct falloc_range *range;
3158	struct falloc_range *tmp;
3159	struct list_head reserve_list;
3160	u64 cur_offset;
3161	u64 last_byte;
3162	u64 alloc_start;
3163	u64 alloc_end;
3164	u64 alloc_hint = 0;
3165	u64 locked_end;
3166	u64 actual_end = 0;
3167	struct extent_map *em;
3168	int blocksize = btrfs_inode_sectorsize(inode);
3169	int ret;
3170
3171	alloc_start = round_down(offset, blocksize);
3172	alloc_end = round_up(offset + len, blocksize);
3173	cur_offset = alloc_start;
3174
3175	/* Make sure we aren't being give some crap mode */
3176	if (mode & ~(FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE |
3177		     FALLOC_FL_ZERO_RANGE))
3178		return -EOPNOTSUPP;
3179
3180	if (mode & FALLOC_FL_PUNCH_HOLE)
3181		return btrfs_punch_hole(inode, offset, len);
3182
3183	/*
3184	 * Only trigger disk allocation, don't trigger qgroup reserve
3185	 *
3186	 * For qgroup space, it will be checked later.
3187	 */
3188	if (!(mode & FALLOC_FL_ZERO_RANGE)) {
3189		ret = btrfs_alloc_data_chunk_ondemand(BTRFS_I(inode),
3190						      alloc_end - alloc_start);
3191		if (ret < 0)
3192			return ret;
3193	}
3194
3195	inode_lock(inode);
3196
3197	if (!(mode & FALLOC_FL_KEEP_SIZE) && offset + len > inode->i_size) {
3198		ret = inode_newsize_ok(inode, offset + len);
3199		if (ret)
3200			goto out;
3201	}
3202
3203	/*
3204	 * TODO: Move these two operations after we have checked
3205	 * accurate reserved space, or fallocate can still fail but
3206	 * with page truncated or size expanded.
3207	 *
3208	 * But that's a minor problem and won't do much harm BTW.
3209	 */
3210	if (alloc_start > inode->i_size) {
3211		ret = btrfs_cont_expand(inode, i_size_read(inode),
3212					alloc_start);
3213		if (ret)
3214			goto out;
3215	} else if (offset + len > inode->i_size) {
3216		/*
3217		 * If we are fallocating from the end of the file onward we
3218		 * need to zero out the end of the block if i_size lands in the
3219		 * middle of a block.
3220		 */
3221		ret = btrfs_truncate_block(inode, inode->i_size, 0, 0);
3222		if (ret)
3223			goto out;
3224	}
3225
3226	/*
3227	 * wait for ordered IO before we have any locks.  We'll loop again
3228	 * below with the locks held.
3229	 */
3230	ret = btrfs_wait_ordered_range(inode, alloc_start,
3231				       alloc_end - alloc_start);
3232	if (ret)
3233		goto out;
3234
3235	if (mode & FALLOC_FL_ZERO_RANGE) {
3236		ret = btrfs_zero_range(inode, offset, len, mode);
3237		inode_unlock(inode);
3238		return ret;
3239	}
3240
3241	locked_end = alloc_end - 1;
3242	while (1) {
3243		struct btrfs_ordered_extent *ordered;
3244
3245		/* the extent lock is ordered inside the running
3246		 * transaction
3247		 */
3248		lock_extent_bits(&BTRFS_I(inode)->io_tree, alloc_start,
3249				 locked_end, &cached_state);
3250		ordered = btrfs_lookup_first_ordered_extent(inode, locked_end);
3251
3252		if (ordered &&
3253		    ordered->file_offset + ordered->len > alloc_start &&
3254		    ordered->file_offset < alloc_end) {
3255			btrfs_put_ordered_extent(ordered);
3256			unlock_extent_cached(&BTRFS_I(inode)->io_tree,
3257					     alloc_start, locked_end,
3258					     &cached_state);
3259			/*
3260			 * we can't wait on the range with the transaction
3261			 * running or with the extent lock held
3262			 */
3263			ret = btrfs_wait_ordered_range(inode, alloc_start,
3264						       alloc_end - alloc_start);
3265			if (ret)
3266				goto out;
3267		} else {
3268			if (ordered)
3269				btrfs_put_ordered_extent(ordered);
3270			break;
3271		}
3272	}
3273
3274	/* First, check if we exceed the qgroup limit */
3275	INIT_LIST_HEAD(&reserve_list);
3276	while (cur_offset < alloc_end) {
3277		em = btrfs_get_extent(BTRFS_I(inode), NULL, 0, cur_offset,
3278				      alloc_end - cur_offset, 0);
3279		if (IS_ERR(em)) {
3280			ret = PTR_ERR(em);
3281			break;
3282		}
3283		last_byte = min(extent_map_end(em), alloc_end);
3284		actual_end = min_t(u64, extent_map_end(em), offset + len);
3285		last_byte = ALIGN(last_byte, blocksize);
3286		if (em->block_start == EXTENT_MAP_HOLE ||
3287		    (cur_offset >= inode->i_size &&
3288		     !test_bit(EXTENT_FLAG_PREALLOC, &em->flags))) {
3289			ret = add_falloc_range(&reserve_list, cur_offset,
3290					       last_byte - cur_offset);
3291			if (ret < 0) {
3292				free_extent_map(em);
3293				break;
3294			}
3295			ret = btrfs_qgroup_reserve_data(inode, &data_reserved,
3296					cur_offset, last_byte - cur_offset);
 
3297			if (ret < 0) {
3298				cur_offset = last_byte;
3299				free_extent_map(em);
3300				break;
3301			}
3302		} else {
3303			/*
3304			 * Do not need to reserve unwritten extent for this
3305			 * range, free reserved data space first, otherwise
3306			 * it'll result in false ENOSPC error.
3307			 */
3308			btrfs_free_reserved_data_space(inode, data_reserved,
3309					cur_offset, last_byte - cur_offset);
 
3310		}
3311		free_extent_map(em);
3312		cur_offset = last_byte;
3313	}
3314
3315	/*
3316	 * If ret is still 0, means we're OK to fallocate.
3317	 * Or just cleanup the list and exit.
3318	 */
3319	list_for_each_entry_safe(range, tmp, &reserve_list, list) {
3320		if (!ret)
3321			ret = btrfs_prealloc_file_range(inode, mode,
3322					range->start,
3323					range->len, i_blocksize(inode),
3324					offset + len, &alloc_hint);
3325		else
3326			btrfs_free_reserved_data_space(inode,
3327					data_reserved, range->start,
3328					range->len);
3329		list_del(&range->list);
3330		kfree(range);
3331	}
3332	if (ret < 0)
3333		goto out_unlock;
3334
3335	/*
3336	 * We didn't need to allocate any more space, but we still extended the
3337	 * size of the file so we need to update i_size and the inode item.
3338	 */
3339	ret = btrfs_fallocate_update_isize(inode, actual_end, mode);
3340out_unlock:
3341	unlock_extent_cached(&BTRFS_I(inode)->io_tree, alloc_start, locked_end,
3342			     &cached_state);
3343out:
3344	inode_unlock(inode);
3345	/* Let go of our reservation. */
3346	if (ret != 0 && !(mode & FALLOC_FL_ZERO_RANGE))
3347		btrfs_free_reserved_data_space(inode, data_reserved,
3348				cur_offset, alloc_end - cur_offset);
3349	extent_changeset_free(data_reserved);
3350	return ret;
3351}
3352
3353static int find_desired_extent(struct inode *inode, loff_t *offset, int whence)
 
3354{
3355	struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
3356	struct extent_map *em = NULL;
3357	struct extent_state *cached_state = NULL;
 
3358	u64 lockstart;
3359	u64 lockend;
3360	u64 start;
3361	u64 len;
3362	int ret = 0;
3363
3364	if (inode->i_size == 0)
3365		return -ENXIO;
3366
3367	/*
3368	 * *offset can be negative, in this case we start finding DATA/HOLE from
3369	 * the very start of the file.
3370	 */
3371	start = max_t(loff_t, 0, *offset);
3372
3373	lockstart = round_down(start, fs_info->sectorsize);
3374	lockend = round_up(i_size_read(inode),
3375			   fs_info->sectorsize);
3376	if (lockend <= lockstart)
3377		lockend = lockstart + fs_info->sectorsize;
3378	lockend--;
3379	len = lockend - lockstart + 1;
3380
3381	lock_extent_bits(&BTRFS_I(inode)->io_tree, lockstart, lockend,
3382			 &cached_state);
3383
3384	while (start < inode->i_size) {
3385		em = btrfs_get_extent_fiemap(BTRFS_I(inode), start, len);
3386		if (IS_ERR(em)) {
3387			ret = PTR_ERR(em);
3388			em = NULL;
3389			break;
3390		}
3391
3392		if (whence == SEEK_HOLE &&
3393		    (em->block_start == EXTENT_MAP_HOLE ||
3394		     test_bit(EXTENT_FLAG_PREALLOC, &em->flags)))
3395			break;
3396		else if (whence == SEEK_DATA &&
3397			   (em->block_start != EXTENT_MAP_HOLE &&
3398			    !test_bit(EXTENT_FLAG_PREALLOC, &em->flags)))
3399			break;
3400
3401		start = em->start + em->len;
3402		free_extent_map(em);
3403		em = NULL;
3404		cond_resched();
3405	}
3406	free_extent_map(em);
3407	if (!ret) {
3408		if (whence == SEEK_DATA && start >= inode->i_size)
3409			ret = -ENXIO;
3410		else
3411			*offset = min_t(loff_t, start, inode->i_size);
3412	}
3413	unlock_extent_cached(&BTRFS_I(inode)->io_tree, lockstart, lockend,
3414			     &cached_state);
3415	return ret;
 
 
 
 
 
 
 
 
 
3416}
3417
3418static loff_t btrfs_file_llseek(struct file *file, loff_t offset, int whence)
3419{
3420	struct inode *inode = file->f_mapping->host;
3421	int ret;
3422
3423	inode_lock(inode);
3424	switch (whence) {
3425	case SEEK_END:
3426	case SEEK_CUR:
3427		offset = generic_file_llseek(file, offset, whence);
3428		goto out;
3429	case SEEK_DATA:
3430	case SEEK_HOLE:
3431		if (offset >= i_size_read(inode)) {
3432			inode_unlock(inode);
3433			return -ENXIO;
3434		}
3435
3436		ret = find_desired_extent(inode, &offset, whence);
3437		if (ret) {
3438			inode_unlock(inode);
3439			return ret;
3440		}
3441	}
3442
3443	offset = vfs_setpos(file, offset, inode->i_sb->s_maxbytes);
3444out:
3445	inode_unlock(inode);
3446	return offset;
3447}
3448
3449static int btrfs_file_open(struct inode *inode, struct file *filp)
3450{
3451	filp->f_mode |= FMODE_NOWAIT;
3452	return generic_file_open(inode, filp);
3453}
3454
3455const struct file_operations btrfs_file_operations = {
3456	.llseek		= btrfs_file_llseek,
3457	.read_iter      = generic_file_read_iter,
3458	.splice_read	= generic_file_splice_read,
3459	.write_iter	= btrfs_file_write_iter,
 
3460	.mmap		= btrfs_file_mmap,
3461	.open		= btrfs_file_open,
3462	.release	= btrfs_release_file,
3463	.fsync		= btrfs_sync_file,
3464	.fallocate	= btrfs_fallocate,
3465	.unlocked_ioctl	= btrfs_ioctl,
3466#ifdef CONFIG_COMPAT
3467	.compat_ioctl	= btrfs_compat_ioctl,
3468#endif
3469	.remap_file_range = btrfs_remap_file_range,
3470};
3471
3472void __cold btrfs_auto_defrag_exit(void)
3473{
3474	kmem_cache_destroy(btrfs_inode_defrag_cachep);
3475}
3476
3477int __init btrfs_auto_defrag_init(void)
3478{
3479	btrfs_inode_defrag_cachep = kmem_cache_create("btrfs_inode_defrag",
3480					sizeof(struct inode_defrag), 0,
3481					SLAB_MEM_SPREAD,
3482					NULL);
3483	if (!btrfs_inode_defrag_cachep)
3484		return -ENOMEM;
3485
3486	return 0;
3487}
3488
3489int btrfs_fdatawrite_range(struct inode *inode, loff_t start, loff_t end)
3490{
3491	int ret;
3492
3493	/*
3494	 * So with compression we will find and lock a dirty page and clear the
3495	 * first one as dirty, setup an async extent, and immediately return
3496	 * with the entire range locked but with nobody actually marked with
3497	 * writeback.  So we can't just filemap_write_and_wait_range() and
3498	 * expect it to work since it will just kick off a thread to do the
3499	 * actual work.  So we need to call filemap_fdatawrite_range _again_
3500	 * since it will wait on the page lock, which won't be unlocked until
3501	 * after the pages have been marked as writeback and so we're good to go
3502	 * from there.  We have to do this otherwise we'll miss the ordered
3503	 * extents and that results in badness.  Please Josef, do not think you
3504	 * know better and pull this out at some point in the future, it is
3505	 * right and you are wrong.
3506	 */
3507	ret = filemap_fdatawrite_range(inode->i_mapping, start, end);
3508	if (!ret && test_bit(BTRFS_INODE_HAS_ASYNC_EXTENT,
3509			     &BTRFS_I(inode)->runtime_flags))
3510		ret = filemap_fdatawrite_range(inode->i_mapping, start, end);
3511
3512	return ret;
3513}
v5.9
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 * Copyright (C) 2007 Oracle.  All rights reserved.
   4 */
   5
   6#include <linux/fs.h>
   7#include <linux/pagemap.h>
   8#include <linux/time.h>
   9#include <linux/init.h>
  10#include <linux/string.h>
  11#include <linux/backing-dev.h>
  12#include <linux/falloc.h>
  13#include <linux/writeback.h>
  14#include <linux/compat.h>
  15#include <linux/slab.h>
  16#include <linux/btrfs.h>
  17#include <linux/uio.h>
  18#include <linux/iversion.h>
  19#include "ctree.h"
  20#include "disk-io.h"
  21#include "transaction.h"
  22#include "btrfs_inode.h"
  23#include "print-tree.h"
  24#include "tree-log.h"
  25#include "locking.h"
  26#include "volumes.h"
  27#include "qgroup.h"
  28#include "compression.h"
  29#include "delalloc-space.h"
  30#include "reflink.h"
  31
  32static struct kmem_cache *btrfs_inode_defrag_cachep;
  33/*
  34 * when auto defrag is enabled we
  35 * queue up these defrag structs to remember which
  36 * inodes need defragging passes
  37 */
  38struct inode_defrag {
  39	struct rb_node rb_node;
  40	/* objectid */
  41	u64 ino;
  42	/*
  43	 * transid where the defrag was added, we search for
  44	 * extents newer than this
  45	 */
  46	u64 transid;
  47
  48	/* root objectid */
  49	u64 root;
  50
  51	/* last offset we were able to defrag */
  52	u64 last_offset;
  53
  54	/* if we've wrapped around back to zero once already */
  55	int cycled;
  56};
  57
  58static int __compare_inode_defrag(struct inode_defrag *defrag1,
  59				  struct inode_defrag *defrag2)
  60{
  61	if (defrag1->root > defrag2->root)
  62		return 1;
  63	else if (defrag1->root < defrag2->root)
  64		return -1;
  65	else if (defrag1->ino > defrag2->ino)
  66		return 1;
  67	else if (defrag1->ino < defrag2->ino)
  68		return -1;
  69	else
  70		return 0;
  71}
  72
  73/* pop a record for an inode into the defrag tree.  The lock
  74 * must be held already
  75 *
  76 * If you're inserting a record for an older transid than an
  77 * existing record, the transid already in the tree is lowered
  78 *
  79 * If an existing record is found the defrag item you
  80 * pass in is freed
  81 */
  82static int __btrfs_add_inode_defrag(struct btrfs_inode *inode,
  83				    struct inode_defrag *defrag)
  84{
  85	struct btrfs_fs_info *fs_info = inode->root->fs_info;
  86	struct inode_defrag *entry;
  87	struct rb_node **p;
  88	struct rb_node *parent = NULL;
  89	int ret;
  90
  91	p = &fs_info->defrag_inodes.rb_node;
  92	while (*p) {
  93		parent = *p;
  94		entry = rb_entry(parent, struct inode_defrag, rb_node);
  95
  96		ret = __compare_inode_defrag(defrag, entry);
  97		if (ret < 0)
  98			p = &parent->rb_left;
  99		else if (ret > 0)
 100			p = &parent->rb_right;
 101		else {
 102			/* if we're reinserting an entry for
 103			 * an old defrag run, make sure to
 104			 * lower the transid of our existing record
 105			 */
 106			if (defrag->transid < entry->transid)
 107				entry->transid = defrag->transid;
 108			if (defrag->last_offset > entry->last_offset)
 109				entry->last_offset = defrag->last_offset;
 110			return -EEXIST;
 111		}
 112	}
 113	set_bit(BTRFS_INODE_IN_DEFRAG, &inode->runtime_flags);
 114	rb_link_node(&defrag->rb_node, parent, p);
 115	rb_insert_color(&defrag->rb_node, &fs_info->defrag_inodes);
 116	return 0;
 117}
 118
 119static inline int __need_auto_defrag(struct btrfs_fs_info *fs_info)
 120{
 121	if (!btrfs_test_opt(fs_info, AUTO_DEFRAG))
 122		return 0;
 123
 124	if (btrfs_fs_closing(fs_info))
 125		return 0;
 126
 127	return 1;
 128}
 129
 130/*
 131 * insert a defrag record for this inode if auto defrag is
 132 * enabled
 133 */
 134int btrfs_add_inode_defrag(struct btrfs_trans_handle *trans,
 135			   struct btrfs_inode *inode)
 136{
 137	struct btrfs_root *root = inode->root;
 138	struct btrfs_fs_info *fs_info = root->fs_info;
 139	struct inode_defrag *defrag;
 140	u64 transid;
 141	int ret;
 142
 143	if (!__need_auto_defrag(fs_info))
 144		return 0;
 145
 146	if (test_bit(BTRFS_INODE_IN_DEFRAG, &inode->runtime_flags))
 147		return 0;
 148
 149	if (trans)
 150		transid = trans->transid;
 151	else
 152		transid = inode->root->last_trans;
 153
 154	defrag = kmem_cache_zalloc(btrfs_inode_defrag_cachep, GFP_NOFS);
 155	if (!defrag)
 156		return -ENOMEM;
 157
 158	defrag->ino = btrfs_ino(inode);
 159	defrag->transid = transid;
 160	defrag->root = root->root_key.objectid;
 161
 162	spin_lock(&fs_info->defrag_inodes_lock);
 163	if (!test_bit(BTRFS_INODE_IN_DEFRAG, &inode->runtime_flags)) {
 164		/*
 165		 * If we set IN_DEFRAG flag and evict the inode from memory,
 166		 * and then re-read this inode, this new inode doesn't have
 167		 * IN_DEFRAG flag. At the case, we may find the existed defrag.
 168		 */
 169		ret = __btrfs_add_inode_defrag(inode, defrag);
 170		if (ret)
 171			kmem_cache_free(btrfs_inode_defrag_cachep, defrag);
 172	} else {
 173		kmem_cache_free(btrfs_inode_defrag_cachep, defrag);
 174	}
 175	spin_unlock(&fs_info->defrag_inodes_lock);
 176	return 0;
 177}
 178
 179/*
 180 * Requeue the defrag object. If there is a defrag object that points to
 181 * the same inode in the tree, we will merge them together (by
 182 * __btrfs_add_inode_defrag()) and free the one that we want to requeue.
 183 */
 184static void btrfs_requeue_inode_defrag(struct btrfs_inode *inode,
 185				       struct inode_defrag *defrag)
 186{
 187	struct btrfs_fs_info *fs_info = inode->root->fs_info;
 188	int ret;
 189
 190	if (!__need_auto_defrag(fs_info))
 191		goto out;
 192
 193	/*
 194	 * Here we don't check the IN_DEFRAG flag, because we need merge
 195	 * them together.
 196	 */
 197	spin_lock(&fs_info->defrag_inodes_lock);
 198	ret = __btrfs_add_inode_defrag(inode, defrag);
 199	spin_unlock(&fs_info->defrag_inodes_lock);
 200	if (ret)
 201		goto out;
 202	return;
 203out:
 204	kmem_cache_free(btrfs_inode_defrag_cachep, defrag);
 205}
 206
 207/*
 208 * pick the defragable inode that we want, if it doesn't exist, we will get
 209 * the next one.
 210 */
 211static struct inode_defrag *
 212btrfs_pick_defrag_inode(struct btrfs_fs_info *fs_info, u64 root, u64 ino)
 213{
 214	struct inode_defrag *entry = NULL;
 215	struct inode_defrag tmp;
 216	struct rb_node *p;
 217	struct rb_node *parent = NULL;
 218	int ret;
 219
 220	tmp.ino = ino;
 221	tmp.root = root;
 222
 223	spin_lock(&fs_info->defrag_inodes_lock);
 224	p = fs_info->defrag_inodes.rb_node;
 225	while (p) {
 226		parent = p;
 227		entry = rb_entry(parent, struct inode_defrag, rb_node);
 228
 229		ret = __compare_inode_defrag(&tmp, entry);
 230		if (ret < 0)
 231			p = parent->rb_left;
 232		else if (ret > 0)
 233			p = parent->rb_right;
 234		else
 235			goto out;
 236	}
 237
 238	if (parent && __compare_inode_defrag(&tmp, entry) > 0) {
 239		parent = rb_next(parent);
 240		if (parent)
 241			entry = rb_entry(parent, struct inode_defrag, rb_node);
 242		else
 243			entry = NULL;
 244	}
 245out:
 246	if (entry)
 247		rb_erase(parent, &fs_info->defrag_inodes);
 248	spin_unlock(&fs_info->defrag_inodes_lock);
 249	return entry;
 250}
 251
 252void btrfs_cleanup_defrag_inodes(struct btrfs_fs_info *fs_info)
 253{
 254	struct inode_defrag *defrag;
 255	struct rb_node *node;
 256
 257	spin_lock(&fs_info->defrag_inodes_lock);
 258	node = rb_first(&fs_info->defrag_inodes);
 259	while (node) {
 260		rb_erase(node, &fs_info->defrag_inodes);
 261		defrag = rb_entry(node, struct inode_defrag, rb_node);
 262		kmem_cache_free(btrfs_inode_defrag_cachep, defrag);
 263
 264		cond_resched_lock(&fs_info->defrag_inodes_lock);
 265
 266		node = rb_first(&fs_info->defrag_inodes);
 267	}
 268	spin_unlock(&fs_info->defrag_inodes_lock);
 269}
 270
 271#define BTRFS_DEFRAG_BATCH	1024
 272
 273static int __btrfs_run_defrag_inode(struct btrfs_fs_info *fs_info,
 274				    struct inode_defrag *defrag)
 275{
 276	struct btrfs_root *inode_root;
 277	struct inode *inode;
 
 278	struct btrfs_ioctl_defrag_range_args range;
 279	int num_defrag;
 
 280	int ret;
 281
 282	/* get the inode */
 283	inode_root = btrfs_get_fs_root(fs_info, defrag->root, true);
 
 
 
 
 
 
 284	if (IS_ERR(inode_root)) {
 285		ret = PTR_ERR(inode_root);
 286		goto cleanup;
 287	}
 288
 289	inode = btrfs_iget(fs_info->sb, defrag->ino, inode_root);
 290	btrfs_put_root(inode_root);
 
 
 291	if (IS_ERR(inode)) {
 292		ret = PTR_ERR(inode);
 293		goto cleanup;
 294	}
 
 295
 296	/* do a chunk of defrag */
 297	clear_bit(BTRFS_INODE_IN_DEFRAG, &BTRFS_I(inode)->runtime_flags);
 298	memset(&range, 0, sizeof(range));
 299	range.len = (u64)-1;
 300	range.start = defrag->last_offset;
 301
 302	sb_start_write(fs_info->sb);
 303	num_defrag = btrfs_defrag_file(inode, NULL, &range, defrag->transid,
 304				       BTRFS_DEFRAG_BATCH);
 305	sb_end_write(fs_info->sb);
 306	/*
 307	 * if we filled the whole defrag batch, there
 308	 * must be more work to do.  Queue this defrag
 309	 * again
 310	 */
 311	if (num_defrag == BTRFS_DEFRAG_BATCH) {
 312		defrag->last_offset = range.start;
 313		btrfs_requeue_inode_defrag(BTRFS_I(inode), defrag);
 314	} else if (defrag->last_offset && !defrag->cycled) {
 315		/*
 316		 * we didn't fill our defrag batch, but
 317		 * we didn't start at zero.  Make sure we loop
 318		 * around to the start of the file.
 319		 */
 320		defrag->last_offset = 0;
 321		defrag->cycled = 1;
 322		btrfs_requeue_inode_defrag(BTRFS_I(inode), defrag);
 323	} else {
 324		kmem_cache_free(btrfs_inode_defrag_cachep, defrag);
 325	}
 326
 327	iput(inode);
 328	return 0;
 329cleanup:
 
 330	kmem_cache_free(btrfs_inode_defrag_cachep, defrag);
 331	return ret;
 332}
 333
 334/*
 335 * run through the list of inodes in the FS that need
 336 * defragging
 337 */
 338int btrfs_run_defrag_inodes(struct btrfs_fs_info *fs_info)
 339{
 340	struct inode_defrag *defrag;
 341	u64 first_ino = 0;
 342	u64 root_objectid = 0;
 343
 344	atomic_inc(&fs_info->defrag_running);
 345	while (1) {
 346		/* Pause the auto defragger. */
 347		if (test_bit(BTRFS_FS_STATE_REMOUNTING,
 348			     &fs_info->fs_state))
 349			break;
 350
 351		if (!__need_auto_defrag(fs_info))
 352			break;
 353
 354		/* find an inode to defrag */
 355		defrag = btrfs_pick_defrag_inode(fs_info, root_objectid,
 356						 first_ino);
 357		if (!defrag) {
 358			if (root_objectid || first_ino) {
 359				root_objectid = 0;
 360				first_ino = 0;
 361				continue;
 362			} else {
 363				break;
 364			}
 365		}
 366
 367		first_ino = defrag->ino + 1;
 368		root_objectid = defrag->root;
 369
 370		__btrfs_run_defrag_inode(fs_info, defrag);
 371	}
 372	atomic_dec(&fs_info->defrag_running);
 373
 374	/*
 375	 * during unmount, we use the transaction_wait queue to
 376	 * wait for the defragger to stop
 377	 */
 378	wake_up(&fs_info->transaction_wait);
 379	return 0;
 380}
 381
 382/* simple helper to fault in pages and copy.  This should go away
 383 * and be replaced with calls into generic code.
 384 */
 385static noinline int btrfs_copy_from_user(loff_t pos, size_t write_bytes,
 386					 struct page **prepared_pages,
 387					 struct iov_iter *i)
 388{
 389	size_t copied = 0;
 390	size_t total_copied = 0;
 391	int pg = 0;
 392	int offset = offset_in_page(pos);
 393
 394	while (write_bytes > 0) {
 395		size_t count = min_t(size_t,
 396				     PAGE_SIZE - offset, write_bytes);
 397		struct page *page = prepared_pages[pg];
 398		/*
 399		 * Copy data from userspace to the current page
 400		 */
 401		copied = iov_iter_copy_from_user_atomic(page, i, offset, count);
 402
 403		/* Flush processor's dcache for this page */
 404		flush_dcache_page(page);
 405
 406		/*
 407		 * if we get a partial write, we can end up with
 408		 * partially up to date pages.  These add
 409		 * a lot of complexity, so make sure they don't
 410		 * happen by forcing this copy to be retried.
 411		 *
 412		 * The rest of the btrfs_file_write code will fall
 413		 * back to page at a time copies after we return 0.
 414		 */
 415		if (!PageUptodate(page) && copied < count)
 416			copied = 0;
 417
 418		iov_iter_advance(i, copied);
 419		write_bytes -= copied;
 420		total_copied += copied;
 421
 422		/* Return to btrfs_file_write_iter to fault page */
 423		if (unlikely(copied == 0))
 424			break;
 425
 426		if (copied < PAGE_SIZE - offset) {
 427			offset += copied;
 428		} else {
 429			pg++;
 430			offset = 0;
 431		}
 432	}
 433	return total_copied;
 434}
 435
 436/*
 437 * unlocks pages after btrfs_file_write is done with them
 438 */
 439static void btrfs_drop_pages(struct page **pages, size_t num_pages)
 440{
 441	size_t i;
 442	for (i = 0; i < num_pages; i++) {
 443		/* page checked is some magic around finding pages that
 444		 * have been modified without going through btrfs_set_page_dirty
 445		 * clear it here. There should be no need to mark the pages
 446		 * accessed as prepare_pages should have marked them accessed
 447		 * in prepare_pages via find_or_create_page()
 448		 */
 449		ClearPageChecked(pages[i]);
 450		unlock_page(pages[i]);
 451		put_page(pages[i]);
 452	}
 453}
 454
 455static int btrfs_find_new_delalloc_bytes(struct btrfs_inode *inode,
 456					 const u64 start,
 457					 const u64 len,
 458					 struct extent_state **cached_state)
 459{
 460	u64 search_start = start;
 461	const u64 end = start + len - 1;
 462
 463	while (search_start < end) {
 464		const u64 search_len = end - search_start + 1;
 465		struct extent_map *em;
 466		u64 em_len;
 467		int ret = 0;
 468
 469		em = btrfs_get_extent(inode, NULL, 0, search_start, search_len);
 
 470		if (IS_ERR(em))
 471			return PTR_ERR(em);
 472
 473		if (em->block_start != EXTENT_MAP_HOLE)
 474			goto next;
 475
 476		em_len = em->len;
 477		if (em->start < search_start)
 478			em_len -= search_start - em->start;
 479		if (em_len > search_len)
 480			em_len = search_len;
 481
 482		ret = set_extent_bit(&inode->io_tree, search_start,
 483				     search_start + em_len - 1,
 484				     EXTENT_DELALLOC_NEW,
 485				     NULL, cached_state, GFP_NOFS);
 486next:
 487		search_start = extent_map_end(em);
 488		free_extent_map(em);
 489		if (ret)
 490			return ret;
 491	}
 492	return 0;
 493}
 494
 495/*
 496 * after copy_from_user, pages need to be dirtied and we need to make
 497 * sure holes are created between the current EOF and the start of
 498 * any next extents (if required).
 499 *
 500 * this also makes the decision about creating an inline extent vs
 501 * doing real data extents, marking pages dirty and delalloc as required.
 502 */
 503int btrfs_dirty_pages(struct btrfs_inode *inode, struct page **pages,
 504		      size_t num_pages, loff_t pos, size_t write_bytes,
 505		      struct extent_state **cached)
 506{
 507	struct btrfs_fs_info *fs_info = inode->root->fs_info;
 508	int err = 0;
 509	int i;
 510	u64 num_bytes;
 511	u64 start_pos;
 512	u64 end_of_last_block;
 513	u64 end_pos = pos + write_bytes;
 514	loff_t isize = i_size_read(&inode->vfs_inode);
 515	unsigned int extra_bits = 0;
 516
 517	start_pos = pos & ~((u64) fs_info->sectorsize - 1);
 518	num_bytes = round_up(write_bytes + pos - start_pos,
 519			     fs_info->sectorsize);
 520
 521	end_of_last_block = start_pos + num_bytes - 1;
 522
 523	/*
 524	 * The pages may have already been dirty, clear out old accounting so
 525	 * we can set things up properly
 526	 */
 527	clear_extent_bit(&inode->io_tree, start_pos, end_of_last_block,
 528			 EXTENT_DELALLOC | EXTENT_DO_ACCOUNTING | EXTENT_DEFRAG,
 529			 0, 0, cached);
 530
 531	if (!btrfs_is_free_space_inode(inode)) {
 532		if (start_pos >= isize &&
 533		    !(inode->flags & BTRFS_INODE_PREALLOC)) {
 534			/*
 535			 * There can't be any extents following eof in this case
 536			 * so just set the delalloc new bit for the range
 537			 * directly.
 538			 */
 539			extra_bits |= EXTENT_DELALLOC_NEW;
 540		} else {
 541			err = btrfs_find_new_delalloc_bytes(inode, start_pos,
 
 542							    num_bytes, cached);
 543			if (err)
 544				return err;
 545		}
 546	}
 547
 548	err = btrfs_set_extent_delalloc(inode, start_pos, end_of_last_block,
 549					extra_bits, cached);
 550	if (err)
 551		return err;
 552
 553	for (i = 0; i < num_pages; i++) {
 554		struct page *p = pages[i];
 555		SetPageUptodate(p);
 556		ClearPageChecked(p);
 557		set_page_dirty(p);
 558	}
 559
 560	/*
 561	 * we've only changed i_size in ram, and we haven't updated
 562	 * the disk i_size.  There is no need to log the inode
 563	 * at this time.
 564	 */
 565	if (end_pos > isize)
 566		i_size_write(&inode->vfs_inode, end_pos);
 567	return 0;
 568}
 569
 570/*
 571 * this drops all the extents in the cache that intersect the range
 572 * [start, end].  Existing extents are split as required.
 573 */
 574void btrfs_drop_extent_cache(struct btrfs_inode *inode, u64 start, u64 end,
 575			     int skip_pinned)
 576{
 577	struct extent_map *em;
 578	struct extent_map *split = NULL;
 579	struct extent_map *split2 = NULL;
 580	struct extent_map_tree *em_tree = &inode->extent_tree;
 581	u64 len = end - start + 1;
 582	u64 gen;
 583	int ret;
 584	int testend = 1;
 585	unsigned long flags;
 586	int compressed = 0;
 587	bool modified;
 588
 589	WARN_ON(end < start);
 590	if (end == (u64)-1) {
 591		len = (u64)-1;
 592		testend = 0;
 593	}
 594	while (1) {
 595		int no_splits = 0;
 596
 597		modified = false;
 598		if (!split)
 599			split = alloc_extent_map();
 600		if (!split2)
 601			split2 = alloc_extent_map();
 602		if (!split || !split2)
 603			no_splits = 1;
 604
 605		write_lock(&em_tree->lock);
 606		em = lookup_extent_mapping(em_tree, start, len);
 607		if (!em) {
 608			write_unlock(&em_tree->lock);
 609			break;
 610		}
 611		flags = em->flags;
 612		gen = em->generation;
 613		if (skip_pinned && test_bit(EXTENT_FLAG_PINNED, &em->flags)) {
 614			if (testend && em->start + em->len >= start + len) {
 615				free_extent_map(em);
 616				write_unlock(&em_tree->lock);
 617				break;
 618			}
 619			start = em->start + em->len;
 620			if (testend)
 621				len = start + len - (em->start + em->len);
 622			free_extent_map(em);
 623			write_unlock(&em_tree->lock);
 624			continue;
 625		}
 626		compressed = test_bit(EXTENT_FLAG_COMPRESSED, &em->flags);
 627		clear_bit(EXTENT_FLAG_PINNED, &em->flags);
 628		clear_bit(EXTENT_FLAG_LOGGING, &flags);
 629		modified = !list_empty(&em->list);
 630		if (no_splits)
 631			goto next;
 632
 633		if (em->start < start) {
 634			split->start = em->start;
 635			split->len = start - em->start;
 636
 637			if (em->block_start < EXTENT_MAP_LAST_BYTE) {
 638				split->orig_start = em->orig_start;
 639				split->block_start = em->block_start;
 640
 641				if (compressed)
 642					split->block_len = em->block_len;
 643				else
 644					split->block_len = split->len;
 645				split->orig_block_len = max(split->block_len,
 646						em->orig_block_len);
 647				split->ram_bytes = em->ram_bytes;
 648			} else {
 649				split->orig_start = split->start;
 650				split->block_len = 0;
 651				split->block_start = em->block_start;
 652				split->orig_block_len = 0;
 653				split->ram_bytes = split->len;
 654			}
 655
 656			split->generation = gen;
 
 657			split->flags = flags;
 658			split->compress_type = em->compress_type;
 659			replace_extent_mapping(em_tree, em, split, modified);
 660			free_extent_map(split);
 661			split = split2;
 662			split2 = NULL;
 663		}
 664		if (testend && em->start + em->len > start + len) {
 665			u64 diff = start + len - em->start;
 666
 667			split->start = start + len;
 668			split->len = em->start + em->len - (start + len);
 
 669			split->flags = flags;
 670			split->compress_type = em->compress_type;
 671			split->generation = gen;
 672
 673			if (em->block_start < EXTENT_MAP_LAST_BYTE) {
 674				split->orig_block_len = max(em->block_len,
 675						    em->orig_block_len);
 676
 677				split->ram_bytes = em->ram_bytes;
 678				if (compressed) {
 679					split->block_len = em->block_len;
 680					split->block_start = em->block_start;
 681					split->orig_start = em->orig_start;
 682				} else {
 683					split->block_len = split->len;
 684					split->block_start = em->block_start
 685						+ diff;
 686					split->orig_start = em->orig_start;
 687				}
 688			} else {
 689				split->ram_bytes = split->len;
 690				split->orig_start = split->start;
 691				split->block_len = 0;
 692				split->block_start = em->block_start;
 693				split->orig_block_len = 0;
 694			}
 695
 696			if (extent_map_in_tree(em)) {
 697				replace_extent_mapping(em_tree, em, split,
 698						       modified);
 699			} else {
 700				ret = add_extent_mapping(em_tree, split,
 701							 modified);
 702				ASSERT(ret == 0); /* Logic error */
 703			}
 704			free_extent_map(split);
 705			split = NULL;
 706		}
 707next:
 708		if (extent_map_in_tree(em))
 709			remove_extent_mapping(em_tree, em);
 710		write_unlock(&em_tree->lock);
 711
 712		/* once for us */
 713		free_extent_map(em);
 714		/* once for the tree*/
 715		free_extent_map(em);
 716	}
 717	if (split)
 718		free_extent_map(split);
 719	if (split2)
 720		free_extent_map(split2);
 721}
 722
 723/*
 724 * this is very complex, but the basic idea is to drop all extents
 725 * in the range start - end.  hint_block is filled in with a block number
 726 * that would be a good hint to the block allocator for this file.
 727 *
 728 * If an extent intersects the range but is not entirely inside the range
 729 * it is either truncated or split.  Anything entirely inside the range
 730 * is deleted from the tree.
 731 */
 732int __btrfs_drop_extents(struct btrfs_trans_handle *trans,
 733			 struct btrfs_root *root, struct btrfs_inode *inode,
 734			 struct btrfs_path *path, u64 start, u64 end,
 735			 u64 *drop_end, int drop_cache,
 736			 int replace_extent,
 737			 u32 extent_item_size,
 738			 int *key_inserted)
 739{
 740	struct btrfs_fs_info *fs_info = root->fs_info;
 741	struct extent_buffer *leaf;
 742	struct btrfs_file_extent_item *fi;
 743	struct btrfs_ref ref = { 0 };
 744	struct btrfs_key key;
 745	struct btrfs_key new_key;
 746	struct inode *vfs_inode = &inode->vfs_inode;
 747	u64 ino = btrfs_ino(inode);
 748	u64 search_start = start;
 749	u64 disk_bytenr = 0;
 750	u64 num_bytes = 0;
 751	u64 extent_offset = 0;
 752	u64 extent_end = 0;
 753	u64 last_end = start;
 754	int del_nr = 0;
 755	int del_slot = 0;
 756	int extent_type;
 757	int recow;
 758	int ret;
 759	int modify_tree = -1;
 760	int update_refs;
 761	int found = 0;
 762	int leafs_visited = 0;
 763
 764	if (drop_cache)
 765		btrfs_drop_extent_cache(inode, start, end - 1, 0);
 766
 767	if (start >= inode->disk_i_size && !replace_extent)
 768		modify_tree = 0;
 769
 770	update_refs = (test_bit(BTRFS_ROOT_SHAREABLE, &root->state) ||
 771		       root == fs_info->tree_root);
 772	while (1) {
 773		recow = 0;
 774		ret = btrfs_lookup_file_extent(trans, root, path, ino,
 775					       search_start, modify_tree);
 776		if (ret < 0)
 777			break;
 778		if (ret > 0 && path->slots[0] > 0 && search_start == start) {
 779			leaf = path->nodes[0];
 780			btrfs_item_key_to_cpu(leaf, &key, path->slots[0] - 1);
 781			if (key.objectid == ino &&
 782			    key.type == BTRFS_EXTENT_DATA_KEY)
 783				path->slots[0]--;
 784		}
 785		ret = 0;
 786		leafs_visited++;
 787next_slot:
 788		leaf = path->nodes[0];
 789		if (path->slots[0] >= btrfs_header_nritems(leaf)) {
 790			BUG_ON(del_nr > 0);
 791			ret = btrfs_next_leaf(root, path);
 792			if (ret < 0)
 793				break;
 794			if (ret > 0) {
 795				ret = 0;
 796				break;
 797			}
 798			leafs_visited++;
 799			leaf = path->nodes[0];
 800			recow = 1;
 801		}
 802
 803		btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
 804
 805		if (key.objectid > ino)
 806			break;
 807		if (WARN_ON_ONCE(key.objectid < ino) ||
 808		    key.type < BTRFS_EXTENT_DATA_KEY) {
 809			ASSERT(del_nr == 0);
 810			path->slots[0]++;
 811			goto next_slot;
 812		}
 813		if (key.type > BTRFS_EXTENT_DATA_KEY || key.offset >= end)
 814			break;
 815
 816		fi = btrfs_item_ptr(leaf, path->slots[0],
 817				    struct btrfs_file_extent_item);
 818		extent_type = btrfs_file_extent_type(leaf, fi);
 819
 820		if (extent_type == BTRFS_FILE_EXTENT_REG ||
 821		    extent_type == BTRFS_FILE_EXTENT_PREALLOC) {
 822			disk_bytenr = btrfs_file_extent_disk_bytenr(leaf, fi);
 823			num_bytes = btrfs_file_extent_disk_num_bytes(leaf, fi);
 824			extent_offset = btrfs_file_extent_offset(leaf, fi);
 825			extent_end = key.offset +
 826				btrfs_file_extent_num_bytes(leaf, fi);
 827		} else if (extent_type == BTRFS_FILE_EXTENT_INLINE) {
 828			extent_end = key.offset +
 829				btrfs_file_extent_ram_bytes(leaf, fi);
 830		} else {
 831			/* can't happen */
 832			BUG();
 833		}
 834
 835		/*
 836		 * Don't skip extent items representing 0 byte lengths. They
 837		 * used to be created (bug) if while punching holes we hit
 838		 * -ENOSPC condition. So if we find one here, just ensure we
 839		 * delete it, otherwise we would insert a new file extent item
 840		 * with the same key (offset) as that 0 bytes length file
 841		 * extent item in the call to setup_items_for_insert() later
 842		 * in this function.
 843		 */
 844		if (extent_end == key.offset && extent_end >= search_start) {
 845			last_end = extent_end;
 846			goto delete_extent_item;
 847		}
 848
 849		if (extent_end <= search_start) {
 850			path->slots[0]++;
 851			goto next_slot;
 852		}
 853
 854		found = 1;
 855		search_start = max(key.offset, start);
 856		if (recow || !modify_tree) {
 857			modify_tree = -1;
 858			btrfs_release_path(path);
 859			continue;
 860		}
 861
 862		/*
 863		 *     | - range to drop - |
 864		 *  | -------- extent -------- |
 865		 */
 866		if (start > key.offset && end < extent_end) {
 867			BUG_ON(del_nr > 0);
 868			if (extent_type == BTRFS_FILE_EXTENT_INLINE) {
 869				ret = -EOPNOTSUPP;
 870				break;
 871			}
 872
 873			memcpy(&new_key, &key, sizeof(new_key));
 874			new_key.offset = start;
 875			ret = btrfs_duplicate_item(trans, root, path,
 876						   &new_key);
 877			if (ret == -EAGAIN) {
 878				btrfs_release_path(path);
 879				continue;
 880			}
 881			if (ret < 0)
 882				break;
 883
 884			leaf = path->nodes[0];
 885			fi = btrfs_item_ptr(leaf, path->slots[0] - 1,
 886					    struct btrfs_file_extent_item);
 887			btrfs_set_file_extent_num_bytes(leaf, fi,
 888							start - key.offset);
 889
 890			fi = btrfs_item_ptr(leaf, path->slots[0],
 891					    struct btrfs_file_extent_item);
 892
 893			extent_offset += start - key.offset;
 894			btrfs_set_file_extent_offset(leaf, fi, extent_offset);
 895			btrfs_set_file_extent_num_bytes(leaf, fi,
 896							extent_end - start);
 897			btrfs_mark_buffer_dirty(leaf);
 898
 899			if (update_refs && disk_bytenr > 0) {
 900				btrfs_init_generic_ref(&ref,
 901						BTRFS_ADD_DELAYED_REF,
 902						disk_bytenr, num_bytes, 0);
 903				btrfs_init_data_ref(&ref,
 904						root->root_key.objectid,
 905						new_key.objectid,
 906						start - extent_offset);
 907				ret = btrfs_inc_extent_ref(trans, &ref);
 908				BUG_ON(ret); /* -ENOMEM */
 909			}
 910			key.offset = start;
 911		}
 912		/*
 913		 * From here on out we will have actually dropped something, so
 914		 * last_end can be updated.
 915		 */
 916		last_end = extent_end;
 917
 918		/*
 919		 *  | ---- range to drop ----- |
 920		 *      | -------- extent -------- |
 921		 */
 922		if (start <= key.offset && end < extent_end) {
 923			if (extent_type == BTRFS_FILE_EXTENT_INLINE) {
 924				ret = -EOPNOTSUPP;
 925				break;
 926			}
 927
 928			memcpy(&new_key, &key, sizeof(new_key));
 929			new_key.offset = end;
 930			btrfs_set_item_key_safe(fs_info, path, &new_key);
 931
 932			extent_offset += end - key.offset;
 933			btrfs_set_file_extent_offset(leaf, fi, extent_offset);
 934			btrfs_set_file_extent_num_bytes(leaf, fi,
 935							extent_end - end);
 936			btrfs_mark_buffer_dirty(leaf);
 937			if (update_refs && disk_bytenr > 0)
 938				inode_sub_bytes(vfs_inode, end - key.offset);
 939			break;
 940		}
 941
 942		search_start = extent_end;
 943		/*
 944		 *       | ---- range to drop ----- |
 945		 *  | -------- extent -------- |
 946		 */
 947		if (start > key.offset && end >= extent_end) {
 948			BUG_ON(del_nr > 0);
 949			if (extent_type == BTRFS_FILE_EXTENT_INLINE) {
 950				ret = -EOPNOTSUPP;
 951				break;
 952			}
 953
 954			btrfs_set_file_extent_num_bytes(leaf, fi,
 955							start - key.offset);
 956			btrfs_mark_buffer_dirty(leaf);
 957			if (update_refs && disk_bytenr > 0)
 958				inode_sub_bytes(vfs_inode, extent_end - start);
 959			if (end == extent_end)
 960				break;
 961
 962			path->slots[0]++;
 963			goto next_slot;
 964		}
 965
 966		/*
 967		 *  | ---- range to drop ----- |
 968		 *    | ------ extent ------ |
 969		 */
 970		if (start <= key.offset && end >= extent_end) {
 971delete_extent_item:
 972			if (del_nr == 0) {
 973				del_slot = path->slots[0];
 974				del_nr = 1;
 975			} else {
 976				BUG_ON(del_slot + del_nr != path->slots[0]);
 977				del_nr++;
 978			}
 979
 980			if (update_refs &&
 981			    extent_type == BTRFS_FILE_EXTENT_INLINE) {
 982				inode_sub_bytes(vfs_inode,
 983						extent_end - key.offset);
 984				extent_end = ALIGN(extent_end,
 985						   fs_info->sectorsize);
 986			} else if (update_refs && disk_bytenr > 0) {
 987				btrfs_init_generic_ref(&ref,
 988						BTRFS_DROP_DELAYED_REF,
 989						disk_bytenr, num_bytes, 0);
 990				btrfs_init_data_ref(&ref,
 991						root->root_key.objectid,
 992						key.objectid,
 993						key.offset - extent_offset);
 994				ret = btrfs_free_extent(trans, &ref);
 995				BUG_ON(ret); /* -ENOMEM */
 996				inode_sub_bytes(vfs_inode,
 997						extent_end - key.offset);
 998			}
 999
1000			if (end == extent_end)
1001				break;
1002
1003			if (path->slots[0] + 1 < btrfs_header_nritems(leaf)) {
1004				path->slots[0]++;
1005				goto next_slot;
1006			}
1007
1008			ret = btrfs_del_items(trans, root, path, del_slot,
1009					      del_nr);
1010			if (ret) {
1011				btrfs_abort_transaction(trans, ret);
1012				break;
1013			}
1014
1015			del_nr = 0;
1016			del_slot = 0;
1017
1018			btrfs_release_path(path);
1019			continue;
1020		}
1021
1022		BUG();
1023	}
1024
1025	if (!ret && del_nr > 0) {
1026		/*
1027		 * Set path->slots[0] to first slot, so that after the delete
1028		 * if items are move off from our leaf to its immediate left or
1029		 * right neighbor leafs, we end up with a correct and adjusted
1030		 * path->slots[0] for our insertion (if replace_extent != 0).
1031		 */
1032		path->slots[0] = del_slot;
1033		ret = btrfs_del_items(trans, root, path, del_slot, del_nr);
1034		if (ret)
1035			btrfs_abort_transaction(trans, ret);
1036	}
1037
1038	leaf = path->nodes[0];
1039	/*
1040	 * If btrfs_del_items() was called, it might have deleted a leaf, in
1041	 * which case it unlocked our path, so check path->locks[0] matches a
1042	 * write lock.
1043	 */
1044	if (!ret && replace_extent && leafs_visited == 1 &&
1045	    (path->locks[0] == BTRFS_WRITE_LOCK_BLOCKING ||
1046	     path->locks[0] == BTRFS_WRITE_LOCK) &&
1047	    btrfs_leaf_free_space(leaf) >=
1048	    sizeof(struct btrfs_item) + extent_item_size) {
1049
1050		key.objectid = ino;
1051		key.type = BTRFS_EXTENT_DATA_KEY;
1052		key.offset = start;
1053		if (!del_nr && path->slots[0] < btrfs_header_nritems(leaf)) {
1054			struct btrfs_key slot_key;
1055
1056			btrfs_item_key_to_cpu(leaf, &slot_key, path->slots[0]);
1057			if (btrfs_comp_cpu_keys(&key, &slot_key) > 0)
1058				path->slots[0]++;
1059		}
1060		setup_items_for_insert(root, path, &key,
1061				       &extent_item_size,
1062				       extent_item_size,
1063				       sizeof(struct btrfs_item) +
1064				       extent_item_size, 1);
1065		*key_inserted = 1;
1066	}
1067
1068	if (!replace_extent || !(*key_inserted))
1069		btrfs_release_path(path);
1070	if (drop_end)
1071		*drop_end = found ? min(end, last_end) : end;
1072	return ret;
1073}
1074
1075int btrfs_drop_extents(struct btrfs_trans_handle *trans,
1076		       struct btrfs_root *root, struct inode *inode, u64 start,
1077		       u64 end, int drop_cache)
1078{
1079	struct btrfs_path *path;
1080	int ret;
1081
1082	path = btrfs_alloc_path();
1083	if (!path)
1084		return -ENOMEM;
1085	ret = __btrfs_drop_extents(trans, root, BTRFS_I(inode), path, start,
1086				   end, NULL, drop_cache, 0, 0, NULL);
1087	btrfs_free_path(path);
1088	return ret;
1089}
1090
1091static int extent_mergeable(struct extent_buffer *leaf, int slot,
1092			    u64 objectid, u64 bytenr, u64 orig_offset,
1093			    u64 *start, u64 *end)
1094{
1095	struct btrfs_file_extent_item *fi;
1096	struct btrfs_key key;
1097	u64 extent_end;
1098
1099	if (slot < 0 || slot >= btrfs_header_nritems(leaf))
1100		return 0;
1101
1102	btrfs_item_key_to_cpu(leaf, &key, slot);
1103	if (key.objectid != objectid || key.type != BTRFS_EXTENT_DATA_KEY)
1104		return 0;
1105
1106	fi = btrfs_item_ptr(leaf, slot, struct btrfs_file_extent_item);
1107	if (btrfs_file_extent_type(leaf, fi) != BTRFS_FILE_EXTENT_REG ||
1108	    btrfs_file_extent_disk_bytenr(leaf, fi) != bytenr ||
1109	    btrfs_file_extent_offset(leaf, fi) != key.offset - orig_offset ||
1110	    btrfs_file_extent_compression(leaf, fi) ||
1111	    btrfs_file_extent_encryption(leaf, fi) ||
1112	    btrfs_file_extent_other_encoding(leaf, fi))
1113		return 0;
1114
1115	extent_end = key.offset + btrfs_file_extent_num_bytes(leaf, fi);
1116	if ((*start && *start != key.offset) || (*end && *end != extent_end))
1117		return 0;
1118
1119	*start = key.offset;
1120	*end = extent_end;
1121	return 1;
1122}
1123
1124/*
1125 * Mark extent in the range start - end as written.
1126 *
1127 * This changes extent type from 'pre-allocated' to 'regular'. If only
1128 * part of extent is marked as written, the extent will be split into
1129 * two or three.
1130 */
1131int btrfs_mark_extent_written(struct btrfs_trans_handle *trans,
1132			      struct btrfs_inode *inode, u64 start, u64 end)
1133{
1134	struct btrfs_fs_info *fs_info = trans->fs_info;
1135	struct btrfs_root *root = inode->root;
1136	struct extent_buffer *leaf;
1137	struct btrfs_path *path;
1138	struct btrfs_file_extent_item *fi;
1139	struct btrfs_ref ref = { 0 };
1140	struct btrfs_key key;
1141	struct btrfs_key new_key;
1142	u64 bytenr;
1143	u64 num_bytes;
1144	u64 extent_end;
1145	u64 orig_offset;
1146	u64 other_start;
1147	u64 other_end;
1148	u64 split;
1149	int del_nr = 0;
1150	int del_slot = 0;
1151	int recow;
1152	int ret;
1153	u64 ino = btrfs_ino(inode);
1154
1155	path = btrfs_alloc_path();
1156	if (!path)
1157		return -ENOMEM;
1158again:
1159	recow = 0;
1160	split = start;
1161	key.objectid = ino;
1162	key.type = BTRFS_EXTENT_DATA_KEY;
1163	key.offset = split;
1164
1165	ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
1166	if (ret < 0)
1167		goto out;
1168	if (ret > 0 && path->slots[0] > 0)
1169		path->slots[0]--;
1170
1171	leaf = path->nodes[0];
1172	btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
1173	if (key.objectid != ino ||
1174	    key.type != BTRFS_EXTENT_DATA_KEY) {
1175		ret = -EINVAL;
1176		btrfs_abort_transaction(trans, ret);
1177		goto out;
1178	}
1179	fi = btrfs_item_ptr(leaf, path->slots[0],
1180			    struct btrfs_file_extent_item);
1181	if (btrfs_file_extent_type(leaf, fi) != BTRFS_FILE_EXTENT_PREALLOC) {
1182		ret = -EINVAL;
1183		btrfs_abort_transaction(trans, ret);
1184		goto out;
1185	}
1186	extent_end = key.offset + btrfs_file_extent_num_bytes(leaf, fi);
1187	if (key.offset > start || extent_end < end) {
1188		ret = -EINVAL;
1189		btrfs_abort_transaction(trans, ret);
1190		goto out;
1191	}
1192
1193	bytenr = btrfs_file_extent_disk_bytenr(leaf, fi);
1194	num_bytes = btrfs_file_extent_disk_num_bytes(leaf, fi);
1195	orig_offset = key.offset - btrfs_file_extent_offset(leaf, fi);
1196	memcpy(&new_key, &key, sizeof(new_key));
1197
1198	if (start == key.offset && end < extent_end) {
1199		other_start = 0;
1200		other_end = start;
1201		if (extent_mergeable(leaf, path->slots[0] - 1,
1202				     ino, bytenr, orig_offset,
1203				     &other_start, &other_end)) {
1204			new_key.offset = end;
1205			btrfs_set_item_key_safe(fs_info, path, &new_key);
1206			fi = btrfs_item_ptr(leaf, path->slots[0],
1207					    struct btrfs_file_extent_item);
1208			btrfs_set_file_extent_generation(leaf, fi,
1209							 trans->transid);
1210			btrfs_set_file_extent_num_bytes(leaf, fi,
1211							extent_end - end);
1212			btrfs_set_file_extent_offset(leaf, fi,
1213						     end - orig_offset);
1214			fi = btrfs_item_ptr(leaf, path->slots[0] - 1,
1215					    struct btrfs_file_extent_item);
1216			btrfs_set_file_extent_generation(leaf, fi,
1217							 trans->transid);
1218			btrfs_set_file_extent_num_bytes(leaf, fi,
1219							end - other_start);
1220			btrfs_mark_buffer_dirty(leaf);
1221			goto out;
1222		}
1223	}
1224
1225	if (start > key.offset && end == extent_end) {
1226		other_start = end;
1227		other_end = 0;
1228		if (extent_mergeable(leaf, path->slots[0] + 1,
1229				     ino, bytenr, orig_offset,
1230				     &other_start, &other_end)) {
1231			fi = btrfs_item_ptr(leaf, path->slots[0],
1232					    struct btrfs_file_extent_item);
1233			btrfs_set_file_extent_num_bytes(leaf, fi,
1234							start - key.offset);
1235			btrfs_set_file_extent_generation(leaf, fi,
1236							 trans->transid);
1237			path->slots[0]++;
1238			new_key.offset = start;
1239			btrfs_set_item_key_safe(fs_info, path, &new_key);
1240
1241			fi = btrfs_item_ptr(leaf, path->slots[0],
1242					    struct btrfs_file_extent_item);
1243			btrfs_set_file_extent_generation(leaf, fi,
1244							 trans->transid);
1245			btrfs_set_file_extent_num_bytes(leaf, fi,
1246							other_end - start);
1247			btrfs_set_file_extent_offset(leaf, fi,
1248						     start - orig_offset);
1249			btrfs_mark_buffer_dirty(leaf);
1250			goto out;
1251		}
1252	}
1253
1254	while (start > key.offset || end < extent_end) {
1255		if (key.offset == start)
1256			split = end;
1257
1258		new_key.offset = split;
1259		ret = btrfs_duplicate_item(trans, root, path, &new_key);
1260		if (ret == -EAGAIN) {
1261			btrfs_release_path(path);
1262			goto again;
1263		}
1264		if (ret < 0) {
1265			btrfs_abort_transaction(trans, ret);
1266			goto out;
1267		}
1268
1269		leaf = path->nodes[0];
1270		fi = btrfs_item_ptr(leaf, path->slots[0] - 1,
1271				    struct btrfs_file_extent_item);
1272		btrfs_set_file_extent_generation(leaf, fi, trans->transid);
1273		btrfs_set_file_extent_num_bytes(leaf, fi,
1274						split - key.offset);
1275
1276		fi = btrfs_item_ptr(leaf, path->slots[0],
1277				    struct btrfs_file_extent_item);
1278
1279		btrfs_set_file_extent_generation(leaf, fi, trans->transid);
1280		btrfs_set_file_extent_offset(leaf, fi, split - orig_offset);
1281		btrfs_set_file_extent_num_bytes(leaf, fi,
1282						extent_end - split);
1283		btrfs_mark_buffer_dirty(leaf);
1284
1285		btrfs_init_generic_ref(&ref, BTRFS_ADD_DELAYED_REF, bytenr,
1286				       num_bytes, 0);
1287		btrfs_init_data_ref(&ref, root->root_key.objectid, ino,
1288				    orig_offset);
1289		ret = btrfs_inc_extent_ref(trans, &ref);
1290		if (ret) {
1291			btrfs_abort_transaction(trans, ret);
1292			goto out;
1293		}
1294
1295		if (split == start) {
1296			key.offset = start;
1297		} else {
1298			if (start != key.offset) {
1299				ret = -EINVAL;
1300				btrfs_abort_transaction(trans, ret);
1301				goto out;
1302			}
1303			path->slots[0]--;
1304			extent_end = end;
1305		}
1306		recow = 1;
1307	}
1308
1309	other_start = end;
1310	other_end = 0;
1311	btrfs_init_generic_ref(&ref, BTRFS_DROP_DELAYED_REF, bytenr,
1312			       num_bytes, 0);
1313	btrfs_init_data_ref(&ref, root->root_key.objectid, ino, orig_offset);
1314	if (extent_mergeable(leaf, path->slots[0] + 1,
1315			     ino, bytenr, orig_offset,
1316			     &other_start, &other_end)) {
1317		if (recow) {
1318			btrfs_release_path(path);
1319			goto again;
1320		}
1321		extent_end = other_end;
1322		del_slot = path->slots[0] + 1;
1323		del_nr++;
1324		ret = btrfs_free_extent(trans, &ref);
1325		if (ret) {
1326			btrfs_abort_transaction(trans, ret);
1327			goto out;
1328		}
1329	}
1330	other_start = 0;
1331	other_end = start;
1332	if (extent_mergeable(leaf, path->slots[0] - 1,
1333			     ino, bytenr, orig_offset,
1334			     &other_start, &other_end)) {
1335		if (recow) {
1336			btrfs_release_path(path);
1337			goto again;
1338		}
1339		key.offset = other_start;
1340		del_slot = path->slots[0];
1341		del_nr++;
1342		ret = btrfs_free_extent(trans, &ref);
1343		if (ret) {
1344			btrfs_abort_transaction(trans, ret);
1345			goto out;
1346		}
1347	}
1348	if (del_nr == 0) {
1349		fi = btrfs_item_ptr(leaf, path->slots[0],
1350			   struct btrfs_file_extent_item);
1351		btrfs_set_file_extent_type(leaf, fi,
1352					   BTRFS_FILE_EXTENT_REG);
1353		btrfs_set_file_extent_generation(leaf, fi, trans->transid);
1354		btrfs_mark_buffer_dirty(leaf);
1355	} else {
1356		fi = btrfs_item_ptr(leaf, del_slot - 1,
1357			   struct btrfs_file_extent_item);
1358		btrfs_set_file_extent_type(leaf, fi,
1359					   BTRFS_FILE_EXTENT_REG);
1360		btrfs_set_file_extent_generation(leaf, fi, trans->transid);
1361		btrfs_set_file_extent_num_bytes(leaf, fi,
1362						extent_end - key.offset);
1363		btrfs_mark_buffer_dirty(leaf);
1364
1365		ret = btrfs_del_items(trans, root, path, del_slot, del_nr);
1366		if (ret < 0) {
1367			btrfs_abort_transaction(trans, ret);
1368			goto out;
1369		}
1370	}
1371out:
1372	btrfs_free_path(path);
1373	return 0;
1374}
1375
1376/*
1377 * on error we return an unlocked page and the error value
1378 * on success we return a locked page and 0
1379 */
1380static int prepare_uptodate_page(struct inode *inode,
1381				 struct page *page, u64 pos,
1382				 bool force_uptodate)
1383{
1384	int ret = 0;
1385
1386	if (((pos & (PAGE_SIZE - 1)) || force_uptodate) &&
1387	    !PageUptodate(page)) {
1388		ret = btrfs_readpage(NULL, page);
1389		if (ret)
1390			return ret;
1391		lock_page(page);
1392		if (!PageUptodate(page)) {
1393			unlock_page(page);
1394			return -EIO;
1395		}
1396		if (page->mapping != inode->i_mapping) {
1397			unlock_page(page);
1398			return -EAGAIN;
1399		}
1400	}
1401	return 0;
1402}
1403
1404/*
1405 * this just gets pages into the page cache and locks them down.
1406 */
1407static noinline int prepare_pages(struct inode *inode, struct page **pages,
1408				  size_t num_pages, loff_t pos,
1409				  size_t write_bytes, bool force_uptodate)
1410{
1411	int i;
1412	unsigned long index = pos >> PAGE_SHIFT;
1413	gfp_t mask = btrfs_alloc_write_mask(inode->i_mapping);
1414	int err = 0;
1415	int faili;
1416
1417	for (i = 0; i < num_pages; i++) {
1418again:
1419		pages[i] = find_or_create_page(inode->i_mapping, index + i,
1420					       mask | __GFP_WRITE);
1421		if (!pages[i]) {
1422			faili = i - 1;
1423			err = -ENOMEM;
1424			goto fail;
1425		}
1426
1427		if (i == 0)
1428			err = prepare_uptodate_page(inode, pages[i], pos,
1429						    force_uptodate);
1430		if (!err && i == num_pages - 1)
1431			err = prepare_uptodate_page(inode, pages[i],
1432						    pos + write_bytes, false);
1433		if (err) {
1434			put_page(pages[i]);
1435			if (err == -EAGAIN) {
1436				err = 0;
1437				goto again;
1438			}
1439			faili = i - 1;
1440			goto fail;
1441		}
1442		wait_on_page_writeback(pages[i]);
1443	}
1444
1445	return 0;
1446fail:
1447	while (faili >= 0) {
1448		unlock_page(pages[faili]);
1449		put_page(pages[faili]);
1450		faili--;
1451	}
1452	return err;
1453
1454}
1455
1456/*
1457 * This function locks the extent and properly waits for data=ordered extents
1458 * to finish before allowing the pages to be modified if need.
1459 *
1460 * The return value:
1461 * 1 - the extent is locked
1462 * 0 - the extent is not locked, and everything is OK
1463 * -EAGAIN - need re-prepare the pages
1464 * the other < 0 number - Something wrong happens
1465 */
1466static noinline int
1467lock_and_cleanup_extent_if_need(struct btrfs_inode *inode, struct page **pages,
1468				size_t num_pages, loff_t pos,
1469				size_t write_bytes,
1470				u64 *lockstart, u64 *lockend,
1471				struct extent_state **cached_state)
1472{
1473	struct btrfs_fs_info *fs_info = inode->root->fs_info;
1474	u64 start_pos;
1475	u64 last_pos;
1476	int i;
1477	int ret = 0;
1478
1479	start_pos = round_down(pos, fs_info->sectorsize);
1480	last_pos = start_pos
1481		+ round_up(pos + write_bytes - start_pos,
1482			   fs_info->sectorsize) - 1;
1483
1484	if (start_pos < inode->vfs_inode.i_size) {
1485		struct btrfs_ordered_extent *ordered;
1486
1487		lock_extent_bits(&inode->io_tree, start_pos, last_pos,
1488				cached_state);
1489		ordered = btrfs_lookup_ordered_range(inode, start_pos,
1490						     last_pos - start_pos + 1);
1491		if (ordered &&
1492		    ordered->file_offset + ordered->num_bytes > start_pos &&
1493		    ordered->file_offset <= last_pos) {
1494			unlock_extent_cached(&inode->io_tree, start_pos,
1495					last_pos, cached_state);
1496			for (i = 0; i < num_pages; i++) {
1497				unlock_page(pages[i]);
1498				put_page(pages[i]);
1499			}
1500			btrfs_start_ordered_extent(&inode->vfs_inode,
1501					ordered, 1);
1502			btrfs_put_ordered_extent(ordered);
1503			return -EAGAIN;
1504		}
1505		if (ordered)
1506			btrfs_put_ordered_extent(ordered);
1507
1508		*lockstart = start_pos;
1509		*lockend = last_pos;
1510		ret = 1;
1511	}
1512
1513	/*
1514	 * It's possible the pages are dirty right now, but we don't want
1515	 * to clean them yet because copy_from_user may catch a page fault
1516	 * and we might have to fall back to one page at a time.  If that
1517	 * happens, we'll unlock these pages and we'd have a window where
1518	 * reclaim could sneak in and drop the once-dirty page on the floor
1519	 * without writing it.
1520	 *
1521	 * We have the pages locked and the extent range locked, so there's
1522	 * no way someone can start IO on any dirty pages in this range.
1523	 *
1524	 * We'll call btrfs_dirty_pages() later on, and that will flip around
1525	 * delalloc bits and dirty the pages as required.
1526	 */
1527	for (i = 0; i < num_pages; i++) {
1528		set_page_extent_mapped(pages[i]);
1529		WARN_ON(!PageLocked(pages[i]));
1530	}
1531
1532	return ret;
1533}
1534
1535static int check_can_nocow(struct btrfs_inode *inode, loff_t pos,
1536			   size_t *write_bytes, bool nowait)
1537{
1538	struct btrfs_fs_info *fs_info = inode->root->fs_info;
1539	struct btrfs_root *root = inode->root;
1540	u64 lockstart, lockend;
1541	u64 num_bytes;
1542	int ret;
1543
1544	if (!(inode->flags & (BTRFS_INODE_NODATACOW | BTRFS_INODE_PREALLOC)))
1545		return 0;
1546
1547	if (!nowait && !btrfs_drew_try_write_lock(&root->snapshot_lock))
1548		return -EAGAIN;
1549
1550	lockstart = round_down(pos, fs_info->sectorsize);
1551	lockend = round_up(pos + *write_bytes,
1552			   fs_info->sectorsize) - 1;
1553	num_bytes = lockend - lockstart + 1;
1554
1555	if (nowait) {
1556		struct btrfs_ordered_extent *ordered;
1557
1558		if (!try_lock_extent(&inode->io_tree, lockstart, lockend))
1559			return -EAGAIN;
1560
1561		ordered = btrfs_lookup_ordered_range(inode, lockstart,
1562						     num_bytes);
1563		if (ordered) {
1564			btrfs_put_ordered_extent(ordered);
1565			ret = -EAGAIN;
1566			goto out_unlock;
1567		}
1568	} else {
1569		btrfs_lock_and_flush_ordered_range(inode, lockstart,
1570						   lockend, NULL);
1571	}
1572
 
1573	ret = can_nocow_extent(&inode->vfs_inode, lockstart, &num_bytes,
1574			NULL, NULL, NULL, false);
1575	if (ret <= 0) {
1576		ret = 0;
1577		if (!nowait)
1578			btrfs_drew_write_unlock(&root->snapshot_lock);
1579	} else {
1580		*write_bytes = min_t(size_t, *write_bytes ,
1581				     num_bytes - pos + lockstart);
1582	}
1583out_unlock:
1584	unlock_extent(&inode->io_tree, lockstart, lockend);
1585
1586	return ret;
1587}
1588
1589static int check_nocow_nolock(struct btrfs_inode *inode, loff_t pos,
1590			      size_t *write_bytes)
1591{
1592	return check_can_nocow(inode, pos, write_bytes, true);
1593}
1594
1595/*
1596 * Check if we can do nocow write into the range [@pos, @pos + @write_bytes)
1597 *
1598 * @pos:	 File offset
1599 * @write_bytes: The length to write, will be updated to the nocow writeable
1600 *		 range
1601 *
1602 * This function will flush ordered extents in the range to ensure proper
1603 * nocow checks.
1604 *
1605 * Return:
1606 * >0		and update @write_bytes if we can do nocow write
1607 *  0		if we can't do nocow write
1608 * -EAGAIN	if we can't get the needed lock or there are ordered extents
1609 * 		for * (nowait == true) case
1610 * <0		if other error happened
1611 *
1612 * NOTE: Callers need to release the lock by btrfs_check_nocow_unlock().
1613 */
1614int btrfs_check_nocow_lock(struct btrfs_inode *inode, loff_t pos,
1615			   size_t *write_bytes)
1616{
1617	return check_can_nocow(inode, pos, write_bytes, false);
1618}
1619
1620void btrfs_check_nocow_unlock(struct btrfs_inode *inode)
1621{
1622	btrfs_drew_write_unlock(&inode->root->snapshot_lock);
1623}
1624
1625static noinline ssize_t btrfs_buffered_write(struct kiocb *iocb,
1626					       struct iov_iter *i)
1627{
1628	struct file *file = iocb->ki_filp;
1629	loff_t pos = iocb->ki_pos;
1630	struct inode *inode = file_inode(file);
1631	struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
 
1632	struct page **pages = NULL;
1633	struct extent_changeset *data_reserved = NULL;
1634	u64 release_bytes = 0;
1635	u64 lockstart;
1636	u64 lockend;
1637	size_t num_written = 0;
1638	int nrptrs;
1639	int ret = 0;
1640	bool only_release_metadata = false;
1641	bool force_page_uptodate = false;
1642
1643	nrptrs = min(DIV_ROUND_UP(iov_iter_count(i), PAGE_SIZE),
1644			PAGE_SIZE / (sizeof(struct page *)));
1645	nrptrs = min(nrptrs, current->nr_dirtied_pause - current->nr_dirtied);
1646	nrptrs = max(nrptrs, 8);
1647	pages = kmalloc_array(nrptrs, sizeof(struct page *), GFP_KERNEL);
1648	if (!pages)
1649		return -ENOMEM;
1650
1651	while (iov_iter_count(i) > 0) {
1652		struct extent_state *cached_state = NULL;
1653		size_t offset = offset_in_page(pos);
1654		size_t sector_offset;
1655		size_t write_bytes = min(iov_iter_count(i),
1656					 nrptrs * (size_t)PAGE_SIZE -
1657					 offset);
1658		size_t num_pages = DIV_ROUND_UP(write_bytes + offset,
1659						PAGE_SIZE);
1660		size_t reserve_bytes;
1661		size_t dirty_pages;
1662		size_t copied;
1663		size_t dirty_sectors;
1664		size_t num_sectors;
1665		int extents_locked;
1666
1667		WARN_ON(num_pages > nrptrs);
1668
1669		/*
1670		 * Fault pages before locking them in prepare_pages
1671		 * to avoid recursive lock
1672		 */
1673		if (unlikely(iov_iter_fault_in_readable(i, write_bytes))) {
1674			ret = -EFAULT;
1675			break;
1676		}
1677
1678		only_release_metadata = false;
1679		sector_offset = pos & (fs_info->sectorsize - 1);
1680		reserve_bytes = round_up(write_bytes + sector_offset,
1681				fs_info->sectorsize);
1682
1683		extent_changeset_release(data_reserved);
1684		ret = btrfs_check_data_free_space(BTRFS_I(inode),
1685						  &data_reserved, pos,
1686						  write_bytes);
1687		if (ret < 0) {
1688			if (btrfs_check_nocow_lock(BTRFS_I(inode), pos,
1689						   &write_bytes) > 0) {
 
 
1690				/*
1691				 * For nodata cow case, no need to reserve
1692				 * data space.
1693				 */
1694				only_release_metadata = true;
1695				/*
1696				 * our prealloc extent may be smaller than
1697				 * write_bytes, so scale down.
1698				 */
1699				num_pages = DIV_ROUND_UP(write_bytes + offset,
1700							 PAGE_SIZE);
1701				reserve_bytes = round_up(write_bytes +
1702							 sector_offset,
1703							 fs_info->sectorsize);
1704			} else {
1705				break;
1706			}
1707		}
1708
1709		WARN_ON(reserve_bytes == 0);
1710		ret = btrfs_delalloc_reserve_metadata(BTRFS_I(inode),
1711				reserve_bytes);
1712		if (ret) {
1713			if (!only_release_metadata)
1714				btrfs_free_reserved_data_space(BTRFS_I(inode),
1715						data_reserved, pos,
1716						write_bytes);
1717			else
1718				btrfs_check_nocow_unlock(BTRFS_I(inode));
1719			break;
1720		}
1721
1722		release_bytes = reserve_bytes;
1723again:
1724		/*
1725		 * This is going to setup the pages array with the number of
1726		 * pages we want, so we don't really need to worry about the
1727		 * contents of pages from loop to loop
1728		 */
1729		ret = prepare_pages(inode, pages, num_pages,
1730				    pos, write_bytes,
1731				    force_page_uptodate);
1732		if (ret) {
1733			btrfs_delalloc_release_extents(BTRFS_I(inode),
1734						       reserve_bytes);
1735			break;
1736		}
1737
1738		extents_locked = lock_and_cleanup_extent_if_need(
1739				BTRFS_I(inode), pages,
1740				num_pages, pos, write_bytes, &lockstart,
1741				&lockend, &cached_state);
1742		if (extents_locked < 0) {
1743			if (extents_locked == -EAGAIN)
1744				goto again;
1745			btrfs_delalloc_release_extents(BTRFS_I(inode),
1746						       reserve_bytes);
1747			ret = extents_locked;
1748			break;
1749		}
1750
1751		copied = btrfs_copy_from_user(pos, write_bytes, pages, i);
1752
1753		num_sectors = BTRFS_BYTES_TO_BLKS(fs_info, reserve_bytes);
1754		dirty_sectors = round_up(copied + sector_offset,
1755					fs_info->sectorsize);
1756		dirty_sectors = BTRFS_BYTES_TO_BLKS(fs_info, dirty_sectors);
1757
1758		/*
1759		 * if we have trouble faulting in the pages, fall
1760		 * back to one page at a time
1761		 */
1762		if (copied < write_bytes)
1763			nrptrs = 1;
1764
1765		if (copied == 0) {
1766			force_page_uptodate = true;
1767			dirty_sectors = 0;
1768			dirty_pages = 0;
1769		} else {
1770			force_page_uptodate = false;
1771			dirty_pages = DIV_ROUND_UP(copied + offset,
1772						   PAGE_SIZE);
1773		}
1774
1775		if (num_sectors > dirty_sectors) {
1776			/* release everything except the sectors we dirtied */
1777			release_bytes -= dirty_sectors <<
1778						fs_info->sb->s_blocksize_bits;
1779			if (only_release_metadata) {
1780				btrfs_delalloc_release_metadata(BTRFS_I(inode),
1781							release_bytes, true);
1782			} else {
1783				u64 __pos;
1784
1785				__pos = round_down(pos,
1786						   fs_info->sectorsize) +
1787					(dirty_pages << PAGE_SHIFT);
1788				btrfs_delalloc_release_space(BTRFS_I(inode),
1789						data_reserved, __pos,
1790						release_bytes, true);
1791			}
1792		}
1793
1794		release_bytes = round_up(copied + sector_offset,
1795					fs_info->sectorsize);
1796
1797		if (copied > 0)
1798			ret = btrfs_dirty_pages(BTRFS_I(inode), pages,
1799						dirty_pages, pos, copied,
1800						&cached_state);
1801
1802		/*
1803		 * If we have not locked the extent range, because the range's
1804		 * start offset is >= i_size, we might still have a non-NULL
1805		 * cached extent state, acquired while marking the extent range
1806		 * as delalloc through btrfs_dirty_pages(). Therefore free any
1807		 * possible cached extent state to avoid a memory leak.
1808		 */
1809		if (extents_locked)
1810			unlock_extent_cached(&BTRFS_I(inode)->io_tree,
1811					     lockstart, lockend, &cached_state);
1812		else
1813			free_extent_state(cached_state);
1814
1815		btrfs_delalloc_release_extents(BTRFS_I(inode), reserve_bytes);
1816		if (ret) {
1817			btrfs_drop_pages(pages, num_pages);
1818			break;
1819		}
1820
1821		release_bytes = 0;
1822		if (only_release_metadata)
1823			btrfs_check_nocow_unlock(BTRFS_I(inode));
1824
1825		if (only_release_metadata && copied > 0) {
1826			lockstart = round_down(pos,
1827					       fs_info->sectorsize);
1828			lockend = round_up(pos + copied,
1829					   fs_info->sectorsize) - 1;
1830
1831			set_extent_bit(&BTRFS_I(inode)->io_tree, lockstart,
1832				       lockend, EXTENT_NORESERVE, NULL,
1833				       NULL, GFP_NOFS);
 
1834		}
1835
1836		btrfs_drop_pages(pages, num_pages);
1837
1838		cond_resched();
1839
1840		balance_dirty_pages_ratelimited(inode->i_mapping);
 
 
1841
1842		pos += copied;
1843		num_written += copied;
1844	}
1845
1846	kfree(pages);
1847
1848	if (release_bytes) {
1849		if (only_release_metadata) {
1850			btrfs_check_nocow_unlock(BTRFS_I(inode));
1851			btrfs_delalloc_release_metadata(BTRFS_I(inode),
1852					release_bytes, true);
1853		} else {
1854			btrfs_delalloc_release_space(BTRFS_I(inode),
1855					data_reserved,
1856					round_down(pos, fs_info->sectorsize),
1857					release_bytes, true);
1858		}
1859	}
1860
1861	extent_changeset_free(data_reserved);
1862	return num_written ? num_written : ret;
1863}
1864
1865static ssize_t __btrfs_direct_write(struct kiocb *iocb, struct iov_iter *from)
1866{
1867	struct file *file = iocb->ki_filp;
1868	struct inode *inode = file_inode(file);
1869	loff_t pos;
1870	ssize_t written;
1871	ssize_t written_buffered;
1872	loff_t endbyte;
1873	int err;
1874
1875	written = generic_file_direct_write(iocb, from);
1876
1877	if (written < 0 || !iov_iter_count(from))
1878		return written;
1879
1880	pos = iocb->ki_pos;
1881	written_buffered = btrfs_buffered_write(iocb, from);
1882	if (written_buffered < 0) {
1883		err = written_buffered;
1884		goto out;
1885	}
1886	/*
1887	 * Ensure all data is persisted. We want the next direct IO read to be
1888	 * able to read what was just written.
1889	 */
1890	endbyte = pos + written_buffered - 1;
1891	err = btrfs_fdatawrite_range(inode, pos, endbyte);
1892	if (err)
1893		goto out;
1894	err = filemap_fdatawait_range(inode->i_mapping, pos, endbyte);
1895	if (err)
1896		goto out;
1897	written += written_buffered;
1898	iocb->ki_pos = pos + written_buffered;
1899	invalidate_mapping_pages(file->f_mapping, pos >> PAGE_SHIFT,
1900				 endbyte >> PAGE_SHIFT);
1901out:
1902	return written ? written : err;
1903}
1904
1905static void update_time_for_write(struct inode *inode)
1906{
1907	struct timespec64 now;
1908
1909	if (IS_NOCMTIME(inode))
1910		return;
1911
1912	now = current_time(inode);
1913	if (!timespec64_equal(&inode->i_mtime, &now))
1914		inode->i_mtime = now;
1915
1916	if (!timespec64_equal(&inode->i_ctime, &now))
1917		inode->i_ctime = now;
1918
1919	if (IS_I_VERSION(inode))
1920		inode_inc_iversion(inode);
1921}
1922
1923static ssize_t btrfs_file_write_iter(struct kiocb *iocb,
1924				    struct iov_iter *from)
1925{
1926	struct file *file = iocb->ki_filp;
1927	struct inode *inode = file_inode(file);
1928	struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
1929	struct btrfs_root *root = BTRFS_I(inode)->root;
1930	u64 start_pos;
1931	u64 end_pos;
1932	ssize_t num_written = 0;
1933	const bool sync = iocb->ki_flags & IOCB_DSYNC;
1934	ssize_t err;
1935	loff_t pos;
1936	size_t count;
1937	loff_t oldsize;
1938	int clean_page = 0;
1939
1940	if (!(iocb->ki_flags & IOCB_DIRECT) &&
1941	    (iocb->ki_flags & IOCB_NOWAIT))
1942		return -EOPNOTSUPP;
1943
1944	if (iocb->ki_flags & IOCB_NOWAIT) {
1945		if (!inode_trylock(inode))
1946			return -EAGAIN;
1947	} else {
1948		inode_lock(inode);
1949	}
1950
1951	err = generic_write_checks(iocb, from);
1952	if (err <= 0) {
1953		inode_unlock(inode);
1954		return err;
1955	}
1956
1957	pos = iocb->ki_pos;
1958	count = iov_iter_count(from);
1959	if (iocb->ki_flags & IOCB_NOWAIT) {
1960		size_t nocow_bytes = count;
1961
1962		/*
1963		 * We will allocate space in case nodatacow is not set,
1964		 * so bail
1965		 */
1966		if (check_nocow_nolock(BTRFS_I(inode), pos, &nocow_bytes)
1967		    <= 0) {
1968			inode_unlock(inode);
1969			return -EAGAIN;
1970		}
1971		/*
1972		 * There are holes in the range or parts of the range that must
1973		 * be COWed (shared extents, RO block groups, etc), so just bail
1974		 * out.
1975		 */
1976		if (nocow_bytes < count) {
1977			inode_unlock(inode);
1978			return -EAGAIN;
1979		}
1980	}
1981
1982	current->backing_dev_info = inode_to_bdi(inode);
1983	err = file_remove_privs(file);
1984	if (err) {
1985		inode_unlock(inode);
1986		goto out;
1987	}
1988
1989	/*
1990	 * If BTRFS flips readonly due to some impossible error
1991	 * (fs_info->fs_state now has BTRFS_SUPER_FLAG_ERROR),
1992	 * although we have opened a file as writable, we have
1993	 * to stop this write operation to ensure FS consistency.
1994	 */
1995	if (test_bit(BTRFS_FS_STATE_ERROR, &fs_info->fs_state)) {
1996		inode_unlock(inode);
1997		err = -EROFS;
1998		goto out;
1999	}
2000
2001	/*
2002	 * We reserve space for updating the inode when we reserve space for the
2003	 * extent we are going to write, so we will enospc out there.  We don't
2004	 * need to start yet another transaction to update the inode as we will
2005	 * update the inode when we finish writing whatever data we write.
2006	 */
2007	update_time_for_write(inode);
2008
2009	start_pos = round_down(pos, fs_info->sectorsize);
2010	oldsize = i_size_read(inode);
2011	if (start_pos > oldsize) {
2012		/* Expand hole size to cover write data, preventing empty gap */
2013		end_pos = round_up(pos + count,
2014				   fs_info->sectorsize);
2015		err = btrfs_cont_expand(inode, oldsize, end_pos);
2016		if (err) {
2017			inode_unlock(inode);
2018			goto out;
2019		}
2020		if (start_pos > round_up(oldsize, fs_info->sectorsize))
2021			clean_page = 1;
2022	}
2023
2024	if (sync)
2025		atomic_inc(&BTRFS_I(inode)->sync_writers);
2026
2027	if (iocb->ki_flags & IOCB_DIRECT) {
2028		num_written = __btrfs_direct_write(iocb, from);
2029	} else {
2030		num_written = btrfs_buffered_write(iocb, from);
2031		if (num_written > 0)
2032			iocb->ki_pos = pos + num_written;
2033		if (clean_page)
2034			pagecache_isize_extended(inode, oldsize,
2035						i_size_read(inode));
2036	}
2037
2038	inode_unlock(inode);
2039
2040	/*
2041	 * We also have to set last_sub_trans to the current log transid,
2042	 * otherwise subsequent syncs to a file that's been synced in this
2043	 * transaction will appear to have already occurred.
2044	 */
2045	spin_lock(&BTRFS_I(inode)->lock);
2046	BTRFS_I(inode)->last_sub_trans = root->log_transid;
2047	spin_unlock(&BTRFS_I(inode)->lock);
2048	if (num_written > 0)
2049		num_written = generic_write_sync(iocb, num_written);
2050
2051	if (sync)
2052		atomic_dec(&BTRFS_I(inode)->sync_writers);
2053out:
2054	current->backing_dev_info = NULL;
2055	return num_written ? num_written : err;
2056}
2057
2058int btrfs_release_file(struct inode *inode, struct file *filp)
2059{
2060	struct btrfs_file_private *private = filp->private_data;
2061
2062	if (private && private->filldir_buf)
2063		kfree(private->filldir_buf);
2064	kfree(private);
2065	filp->private_data = NULL;
2066
2067	/*
2068	 * ordered_data_close is set by setattr when we are about to truncate
2069	 * a file from a non-zero size to a zero size.  This tries to
2070	 * flush down new bytes that may have been written if the
2071	 * application were using truncate to replace a file in place.
2072	 */
2073	if (test_and_clear_bit(BTRFS_INODE_ORDERED_DATA_CLOSE,
2074			       &BTRFS_I(inode)->runtime_flags))
2075			filemap_flush(inode->i_mapping);
2076	return 0;
2077}
2078
2079static int start_ordered_ops(struct inode *inode, loff_t start, loff_t end)
2080{
2081	int ret;
2082	struct blk_plug plug;
2083
2084	/*
2085	 * This is only called in fsync, which would do synchronous writes, so
2086	 * a plug can merge adjacent IOs as much as possible.  Esp. in case of
2087	 * multiple disks using raid profile, a large IO can be split to
2088	 * several segments of stripe length (currently 64K).
2089	 */
2090	blk_start_plug(&plug);
2091	atomic_inc(&BTRFS_I(inode)->sync_writers);
2092	ret = btrfs_fdatawrite_range(inode, start, end);
2093	atomic_dec(&BTRFS_I(inode)->sync_writers);
2094	blk_finish_plug(&plug);
2095
2096	return ret;
2097}
2098
2099/*
2100 * fsync call for both files and directories.  This logs the inode into
2101 * the tree log instead of forcing full commits whenever possible.
2102 *
2103 * It needs to call filemap_fdatawait so that all ordered extent updates are
2104 * in the metadata btree are up to date for copying to the log.
2105 *
2106 * It drops the inode mutex before doing the tree log commit.  This is an
2107 * important optimization for directories because holding the mutex prevents
2108 * new operations on the dir while we write to disk.
2109 */
2110int btrfs_sync_file(struct file *file, loff_t start, loff_t end, int datasync)
2111{
2112	struct dentry *dentry = file_dentry(file);
2113	struct inode *inode = d_inode(dentry);
2114	struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
2115	struct btrfs_root *root = BTRFS_I(inode)->root;
2116	struct btrfs_trans_handle *trans;
2117	struct btrfs_log_ctx ctx;
2118	int ret = 0, err;
2119
2120	trace_btrfs_sync_file(file, datasync);
2121
2122	btrfs_init_log_ctx(&ctx, inode);
2123
2124	/*
2125	 * Set the range to full if the NO_HOLES feature is not enabled.
2126	 * This is to avoid missing file extent items representing holes after
2127	 * replaying the log.
2128	 */
2129	if (!btrfs_fs_incompat(fs_info, NO_HOLES)) {
2130		start = 0;
2131		end = LLONG_MAX;
2132	}
2133
2134	/*
2135	 * We write the dirty pages in the range and wait until they complete
2136	 * out of the ->i_mutex. If so, we can flush the dirty pages by
2137	 * multi-task, and make the performance up.  See
2138	 * btrfs_wait_ordered_range for an explanation of the ASYNC check.
2139	 */
2140	ret = start_ordered_ops(inode, start, end);
2141	if (ret)
2142		goto out;
2143
2144	inode_lock(inode);
2145
2146	/*
2147	 * We take the dio_sem here because the tree log stuff can race with
2148	 * lockless dio writes and get an extent map logged for an extent we
2149	 * never waited on.  We need it this high up for lockdep reasons.
2150	 */
2151	down_write(&BTRFS_I(inode)->dio_sem);
2152
2153	atomic_inc(&root->log_batch);
2154
2155	/*
2156	 * If the inode needs a full sync, make sure we use a full range to
2157	 * avoid log tree corruption, due to hole detection racing with ordered
2158	 * extent completion for adjacent ranges and races between logging and
2159	 * completion of ordered extents for adjancent ranges - both races
2160	 * could lead to file extent items in the log with overlapping ranges.
2161	 * Do this while holding the inode lock, to avoid races with other
2162	 * tasks.
2163	 */
2164	if (test_bit(BTRFS_INODE_NEEDS_FULL_SYNC,
2165		     &BTRFS_I(inode)->runtime_flags)) {
2166		start = 0;
2167		end = LLONG_MAX;
2168	}
2169
2170	/*
2171	 * Before we acquired the inode's lock, someone may have dirtied more
2172	 * pages in the target range. We need to make sure that writeback for
2173	 * any such pages does not start while we are logging the inode, because
2174	 * if it does, any of the following might happen when we are not doing a
2175	 * full inode sync:
2176	 *
2177	 * 1) We log an extent after its writeback finishes but before its
2178	 *    checksums are added to the csum tree, leading to -EIO errors
2179	 *    when attempting to read the extent after a log replay.
2180	 *
2181	 * 2) We can end up logging an extent before its writeback finishes.
2182	 *    Therefore after the log replay we will have a file extent item
2183	 *    pointing to an unwritten extent (and no data checksums as well).
2184	 *
2185	 * So trigger writeback for any eventual new dirty pages and then we
2186	 * wait for all ordered extents to complete below.
2187	 */
2188	ret = start_ordered_ops(inode, start, end);
2189	if (ret) {
2190		up_write(&BTRFS_I(inode)->dio_sem);
2191		inode_unlock(inode);
2192		goto out;
2193	}
2194
2195	/*
2196	 * We have to do this here to avoid the priority inversion of waiting on
2197	 * IO of a lower priority task while holding a transaction open.
2198	 *
2199	 * Also, the range length can be represented by u64, we have to do the
2200	 * typecasts to avoid signed overflow if it's [0, LLONG_MAX].
2201	 */
2202	ret = btrfs_wait_ordered_range(inode, start, (u64)end - (u64)start + 1);
2203	if (ret) {
2204		up_write(&BTRFS_I(inode)->dio_sem);
2205		inode_unlock(inode);
2206		goto out;
2207	}
2208	atomic_inc(&root->log_batch);
2209
2210	smp_mb();
2211	if (btrfs_inode_in_log(BTRFS_I(inode), fs_info->generation) ||
2212	    BTRFS_I(inode)->last_trans <= fs_info->last_trans_committed) {
2213		/*
2214		 * We've had everything committed since the last time we were
2215		 * modified so clear this flag in case it was set for whatever
2216		 * reason, it's no longer relevant.
2217		 */
2218		clear_bit(BTRFS_INODE_NEEDS_FULL_SYNC,
2219			  &BTRFS_I(inode)->runtime_flags);
2220		/*
2221		 * An ordered extent might have started before and completed
2222		 * already with io errors, in which case the inode was not
2223		 * updated and we end up here. So check the inode's mapping
2224		 * for any errors that might have happened since we last
2225		 * checked called fsync.
2226		 */
2227		ret = filemap_check_wb_err(inode->i_mapping, file->f_wb_err);
2228		up_write(&BTRFS_I(inode)->dio_sem);
2229		inode_unlock(inode);
2230		goto out;
2231	}
2232
2233	/*
2234	 * We use start here because we will need to wait on the IO to complete
2235	 * in btrfs_sync_log, which could require joining a transaction (for
2236	 * example checking cross references in the nocow path).  If we use join
2237	 * here we could get into a situation where we're waiting on IO to
2238	 * happen that is blocked on a transaction trying to commit.  With start
2239	 * we inc the extwriter counter, so we wait for all extwriters to exit
2240	 * before we start blocking joiners.  This comment is to keep somebody
2241	 * from thinking they are super smart and changing this to
2242	 * btrfs_join_transaction *cough*Josef*cough*.
2243	 */
2244	trans = btrfs_start_transaction(root, 0);
2245	if (IS_ERR(trans)) {
2246		ret = PTR_ERR(trans);
2247		up_write(&BTRFS_I(inode)->dio_sem);
2248		inode_unlock(inode);
2249		goto out;
2250	}
2251
2252	ret = btrfs_log_dentry_safe(trans, dentry, start, end, &ctx);
2253	if (ret < 0) {
2254		/* Fallthrough and commit/free transaction. */
2255		ret = 1;
2256	}
2257
2258	/* we've logged all the items and now have a consistent
2259	 * version of the file in the log.  It is possible that
2260	 * someone will come in and modify the file, but that's
2261	 * fine because the log is consistent on disk, and we
2262	 * have references to all of the file's extents
2263	 *
2264	 * It is possible that someone will come in and log the
2265	 * file again, but that will end up using the synchronization
2266	 * inside btrfs_sync_log to keep things safe.
2267	 */
2268	up_write(&BTRFS_I(inode)->dio_sem);
2269	inode_unlock(inode);
2270
2271	if (ret != BTRFS_NO_LOG_SYNC) {
2272		if (!ret) {
2273			ret = btrfs_sync_log(trans, root, &ctx);
2274			if (!ret) {
2275				ret = btrfs_end_transaction(trans);
2276				goto out;
2277			}
2278		}
2279		ret = btrfs_commit_transaction(trans);
2280	} else {
2281		ret = btrfs_end_transaction(trans);
2282	}
2283out:
2284	ASSERT(list_empty(&ctx.list));
2285	err = file_check_and_advance_wb_err(file);
2286	if (!ret)
2287		ret = err;
2288	return ret > 0 ? -EIO : ret;
2289}
2290
2291static const struct vm_operations_struct btrfs_file_vm_ops = {
2292	.fault		= filemap_fault,
2293	.map_pages	= filemap_map_pages,
2294	.page_mkwrite	= btrfs_page_mkwrite,
2295};
2296
2297static int btrfs_file_mmap(struct file	*filp, struct vm_area_struct *vma)
2298{
2299	struct address_space *mapping = filp->f_mapping;
2300
2301	if (!mapping->a_ops->readpage)
2302		return -ENOEXEC;
2303
2304	file_accessed(filp);
2305	vma->vm_ops = &btrfs_file_vm_ops;
2306
2307	return 0;
2308}
2309
2310static int hole_mergeable(struct btrfs_inode *inode, struct extent_buffer *leaf,
2311			  int slot, u64 start, u64 end)
2312{
2313	struct btrfs_file_extent_item *fi;
2314	struct btrfs_key key;
2315
2316	if (slot < 0 || slot >= btrfs_header_nritems(leaf))
2317		return 0;
2318
2319	btrfs_item_key_to_cpu(leaf, &key, slot);
2320	if (key.objectid != btrfs_ino(inode) ||
2321	    key.type != BTRFS_EXTENT_DATA_KEY)
2322		return 0;
2323
2324	fi = btrfs_item_ptr(leaf, slot, struct btrfs_file_extent_item);
2325
2326	if (btrfs_file_extent_type(leaf, fi) != BTRFS_FILE_EXTENT_REG)
2327		return 0;
2328
2329	if (btrfs_file_extent_disk_bytenr(leaf, fi))
2330		return 0;
2331
2332	if (key.offset == end)
2333		return 1;
2334	if (key.offset + btrfs_file_extent_num_bytes(leaf, fi) == start)
2335		return 1;
2336	return 0;
2337}
2338
2339static int fill_holes(struct btrfs_trans_handle *trans,
2340		struct btrfs_inode *inode,
2341		struct btrfs_path *path, u64 offset, u64 end)
2342{
2343	struct btrfs_fs_info *fs_info = trans->fs_info;
2344	struct btrfs_root *root = inode->root;
2345	struct extent_buffer *leaf;
2346	struct btrfs_file_extent_item *fi;
2347	struct extent_map *hole_em;
2348	struct extent_map_tree *em_tree = &inode->extent_tree;
2349	struct btrfs_key key;
2350	int ret;
2351
2352	if (btrfs_fs_incompat(fs_info, NO_HOLES))
2353		goto out;
2354
2355	key.objectid = btrfs_ino(inode);
2356	key.type = BTRFS_EXTENT_DATA_KEY;
2357	key.offset = offset;
2358
2359	ret = btrfs_search_slot(trans, root, &key, path, 0, 1);
2360	if (ret <= 0) {
2361		/*
2362		 * We should have dropped this offset, so if we find it then
2363		 * something has gone horribly wrong.
2364		 */
2365		if (ret == 0)
2366			ret = -EINVAL;
2367		return ret;
2368	}
2369
2370	leaf = path->nodes[0];
2371	if (hole_mergeable(inode, leaf, path->slots[0] - 1, offset, end)) {
2372		u64 num_bytes;
2373
2374		path->slots[0]--;
2375		fi = btrfs_item_ptr(leaf, path->slots[0],
2376				    struct btrfs_file_extent_item);
2377		num_bytes = btrfs_file_extent_num_bytes(leaf, fi) +
2378			end - offset;
2379		btrfs_set_file_extent_num_bytes(leaf, fi, num_bytes);
2380		btrfs_set_file_extent_ram_bytes(leaf, fi, num_bytes);
2381		btrfs_set_file_extent_offset(leaf, fi, 0);
2382		btrfs_mark_buffer_dirty(leaf);
2383		goto out;
2384	}
2385
2386	if (hole_mergeable(inode, leaf, path->slots[0], offset, end)) {
2387		u64 num_bytes;
2388
2389		key.offset = offset;
2390		btrfs_set_item_key_safe(fs_info, path, &key);
2391		fi = btrfs_item_ptr(leaf, path->slots[0],
2392				    struct btrfs_file_extent_item);
2393		num_bytes = btrfs_file_extent_num_bytes(leaf, fi) + end -
2394			offset;
2395		btrfs_set_file_extent_num_bytes(leaf, fi, num_bytes);
2396		btrfs_set_file_extent_ram_bytes(leaf, fi, num_bytes);
2397		btrfs_set_file_extent_offset(leaf, fi, 0);
2398		btrfs_mark_buffer_dirty(leaf);
2399		goto out;
2400	}
2401	btrfs_release_path(path);
2402
2403	ret = btrfs_insert_file_extent(trans, root, btrfs_ino(inode),
2404			offset, 0, 0, end - offset, 0, end - offset, 0, 0, 0);
2405	if (ret)
2406		return ret;
2407
2408out:
2409	btrfs_release_path(path);
2410
2411	hole_em = alloc_extent_map();
2412	if (!hole_em) {
2413		btrfs_drop_extent_cache(inode, offset, end - 1, 0);
2414		set_bit(BTRFS_INODE_NEEDS_FULL_SYNC, &inode->runtime_flags);
2415	} else {
2416		hole_em->start = offset;
2417		hole_em->len = end - offset;
2418		hole_em->ram_bytes = hole_em->len;
2419		hole_em->orig_start = offset;
2420
2421		hole_em->block_start = EXTENT_MAP_HOLE;
2422		hole_em->block_len = 0;
2423		hole_em->orig_block_len = 0;
 
2424		hole_em->compress_type = BTRFS_COMPRESS_NONE;
2425		hole_em->generation = trans->transid;
2426
2427		do {
2428			btrfs_drop_extent_cache(inode, offset, end - 1, 0);
2429			write_lock(&em_tree->lock);
2430			ret = add_extent_mapping(em_tree, hole_em, 1);
2431			write_unlock(&em_tree->lock);
2432		} while (ret == -EEXIST);
2433		free_extent_map(hole_em);
2434		if (ret)
2435			set_bit(BTRFS_INODE_NEEDS_FULL_SYNC,
2436					&inode->runtime_flags);
2437	}
2438
2439	return 0;
2440}
2441
2442/*
2443 * Find a hole extent on given inode and change start/len to the end of hole
2444 * extent.(hole/vacuum extent whose em->start <= start &&
2445 *	   em->start + em->len > start)
2446 * When a hole extent is found, return 1 and modify start/len.
2447 */
2448static int find_first_non_hole(struct inode *inode, u64 *start, u64 *len)
2449{
2450	struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
2451	struct extent_map *em;
2452	int ret = 0;
2453
2454	em = btrfs_get_extent(BTRFS_I(inode), NULL, 0,
2455			      round_down(*start, fs_info->sectorsize),
2456			      round_up(*len, fs_info->sectorsize));
2457	if (IS_ERR(em))
2458		return PTR_ERR(em);
2459
2460	/* Hole or vacuum extent(only exists in no-hole mode) */
2461	if (em->block_start == EXTENT_MAP_HOLE) {
2462		ret = 1;
2463		*len = em->start + em->len > *start + *len ?
2464		       0 : *start + *len - em->start - em->len;
2465		*start = em->start + em->len;
2466	}
2467	free_extent_map(em);
2468	return ret;
2469}
2470
2471static int btrfs_punch_hole_lock_range(struct inode *inode,
2472				       const u64 lockstart,
2473				       const u64 lockend,
2474				       struct extent_state **cached_state)
2475{
2476	while (1) {
2477		struct btrfs_ordered_extent *ordered;
2478		int ret;
2479
2480		truncate_pagecache_range(inode, lockstart, lockend);
2481
2482		lock_extent_bits(&BTRFS_I(inode)->io_tree, lockstart, lockend,
2483				 cached_state);
2484		ordered = btrfs_lookup_first_ordered_extent(inode, lockend);
2485
2486		/*
2487		 * We need to make sure we have no ordered extents in this range
2488		 * and nobody raced in and read a page in this range, if we did
2489		 * we need to try again.
2490		 */
2491		if ((!ordered ||
2492		    (ordered->file_offset + ordered->num_bytes <= lockstart ||
2493		     ordered->file_offset > lockend)) &&
2494		     !filemap_range_has_page(inode->i_mapping,
2495					     lockstart, lockend)) {
2496			if (ordered)
2497				btrfs_put_ordered_extent(ordered);
2498			break;
2499		}
2500		if (ordered)
2501			btrfs_put_ordered_extent(ordered);
2502		unlock_extent_cached(&BTRFS_I(inode)->io_tree, lockstart,
2503				     lockend, cached_state);
2504		ret = btrfs_wait_ordered_range(inode, lockstart,
2505					       lockend - lockstart + 1);
2506		if (ret)
2507			return ret;
2508	}
2509	return 0;
2510}
2511
2512static int btrfs_insert_clone_extent(struct btrfs_trans_handle *trans,
2513				     struct inode *inode,
2514				     struct btrfs_path *path,
2515				     struct btrfs_clone_extent_info *clone_info,
2516				     const u64 clone_len)
2517{
2518	struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
2519	struct btrfs_root *root = BTRFS_I(inode)->root;
2520	struct btrfs_file_extent_item *extent;
2521	struct extent_buffer *leaf;
2522	struct btrfs_key key;
2523	int slot;
2524	struct btrfs_ref ref = { 0 };
2525	u64 ref_offset;
2526	int ret;
2527
2528	if (clone_len == 0)
2529		return 0;
2530
2531	if (clone_info->disk_offset == 0 &&
2532	    btrfs_fs_incompat(fs_info, NO_HOLES))
2533		return 0;
2534
2535	key.objectid = btrfs_ino(BTRFS_I(inode));
2536	key.type = BTRFS_EXTENT_DATA_KEY;
2537	key.offset = clone_info->file_offset;
2538	ret = btrfs_insert_empty_item(trans, root, path, &key,
2539				      clone_info->item_size);
2540	if (ret)
2541		return ret;
2542	leaf = path->nodes[0];
2543	slot = path->slots[0];
2544	write_extent_buffer(leaf, clone_info->extent_buf,
2545			    btrfs_item_ptr_offset(leaf, slot),
2546			    clone_info->item_size);
2547	extent = btrfs_item_ptr(leaf, slot, struct btrfs_file_extent_item);
2548	btrfs_set_file_extent_offset(leaf, extent, clone_info->data_offset);
2549	btrfs_set_file_extent_num_bytes(leaf, extent, clone_len);
2550	btrfs_mark_buffer_dirty(leaf);
2551	btrfs_release_path(path);
2552
2553	ret = btrfs_inode_set_file_extent_range(BTRFS_I(inode),
2554			clone_info->file_offset, clone_len);
2555	if (ret)
2556		return ret;
2557
2558	/* If it's a hole, nothing more needs to be done. */
2559	if (clone_info->disk_offset == 0)
2560		return 0;
2561
2562	inode_add_bytes(inode, clone_len);
2563	btrfs_init_generic_ref(&ref, BTRFS_ADD_DELAYED_REF,
2564			       clone_info->disk_offset,
2565			       clone_info->disk_len, 0);
2566	ref_offset = clone_info->file_offset - clone_info->data_offset;
2567	btrfs_init_data_ref(&ref, root->root_key.objectid,
2568			    btrfs_ino(BTRFS_I(inode)), ref_offset);
2569	ret = btrfs_inc_extent_ref(trans, &ref);
2570
2571	return ret;
2572}
2573
2574/*
2575 * The respective range must have been previously locked, as well as the inode.
2576 * The end offset is inclusive (last byte of the range).
2577 * @clone_info is NULL for fallocate's hole punching and non-NULL for extent
2578 * cloning.
2579 * When cloning, we don't want to end up in a state where we dropped extents
2580 * without inserting a new one, so we must abort the transaction to avoid a
2581 * corruption.
2582 */
2583int btrfs_punch_hole_range(struct inode *inode, struct btrfs_path *path,
2584			   const u64 start, const u64 end,
2585			   struct btrfs_clone_extent_info *clone_info,
2586			   struct btrfs_trans_handle **trans_out)
2587{
2588	struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
2589	u64 min_size = btrfs_calc_insert_metadata_size(fs_info, 1);
2590	u64 ino_size = round_up(inode->i_size, fs_info->sectorsize);
2591	struct btrfs_root *root = BTRFS_I(inode)->root;
2592	struct btrfs_trans_handle *trans = NULL;
2593	struct btrfs_block_rsv *rsv;
2594	unsigned int rsv_count;
2595	u64 cur_offset;
2596	u64 drop_end;
2597	u64 len = end - start;
2598	int ret = 0;
2599
2600	if (end <= start)
2601		return -EINVAL;
2602
2603	rsv = btrfs_alloc_block_rsv(fs_info, BTRFS_BLOCK_RSV_TEMP);
2604	if (!rsv) {
2605		ret = -ENOMEM;
2606		goto out;
2607	}
2608	rsv->size = btrfs_calc_insert_metadata_size(fs_info, 1);
2609	rsv->failfast = 1;
2610
2611	/*
2612	 * 1 - update the inode
2613	 * 1 - removing the extents in the range
2614	 * 1 - adding the hole extent if no_holes isn't set or if we are cloning
2615	 *     an extent
2616	 */
2617	if (!btrfs_fs_incompat(fs_info, NO_HOLES) || clone_info)
2618		rsv_count = 3;
2619	else
2620		rsv_count = 2;
2621
2622	trans = btrfs_start_transaction(root, rsv_count);
2623	if (IS_ERR(trans)) {
2624		ret = PTR_ERR(trans);
2625		trans = NULL;
2626		goto out_free;
2627	}
2628
2629	ret = btrfs_block_rsv_migrate(&fs_info->trans_block_rsv, rsv,
2630				      min_size, false);
2631	BUG_ON(ret);
2632	trans->block_rsv = rsv;
2633
2634	cur_offset = start;
2635	while (cur_offset < end) {
2636		ret = __btrfs_drop_extents(trans, root, BTRFS_I(inode), path,
2637					   cur_offset, end + 1, &drop_end,
2638					   1, 0, 0, NULL);
2639		if (ret != -ENOSPC) {
2640			/*
2641			 * When cloning we want to avoid transaction aborts when
2642			 * nothing was done and we are attempting to clone parts
2643			 * of inline extents, in such cases -EOPNOTSUPP is
2644			 * returned by __btrfs_drop_extents() without having
2645			 * changed anything in the file.
2646			 */
2647			if (clone_info && ret && ret != -EOPNOTSUPP)
2648				btrfs_abort_transaction(trans, ret);
2649			break;
2650		}
2651
2652		trans->block_rsv = &fs_info->trans_block_rsv;
2653
2654		if (!clone_info && cur_offset < drop_end &&
2655		    cur_offset < ino_size) {
2656			ret = fill_holes(trans, BTRFS_I(inode), path,
2657					cur_offset, drop_end);
2658			if (ret) {
2659				/*
2660				 * If we failed then we didn't insert our hole
2661				 * entries for the area we dropped, so now the
2662				 * fs is corrupted, so we must abort the
2663				 * transaction.
2664				 */
2665				btrfs_abort_transaction(trans, ret);
2666				break;
2667			}
2668		} else if (!clone_info && cur_offset < drop_end) {
2669			/*
2670			 * We are past the i_size here, but since we didn't
2671			 * insert holes we need to clear the mapped area so we
2672			 * know to not set disk_i_size in this area until a new
2673			 * file extent is inserted here.
2674			 */
2675			ret = btrfs_inode_clear_file_extent_range(BTRFS_I(inode),
2676					cur_offset, drop_end - cur_offset);
2677			if (ret) {
2678				/*
2679				 * We couldn't clear our area, so we could
2680				 * presumably adjust up and corrupt the fs, so
2681				 * we need to abort.
2682				 */
2683				btrfs_abort_transaction(trans, ret);
2684				break;
2685			}
2686		}
2687
2688		if (clone_info && drop_end > clone_info->file_offset) {
2689			u64 clone_len = drop_end - clone_info->file_offset;
2690
2691			ret = btrfs_insert_clone_extent(trans, inode, path,
2692							clone_info, clone_len);
2693			if (ret) {
2694				btrfs_abort_transaction(trans, ret);
2695				break;
2696			}
2697			clone_info->data_len -= clone_len;
2698			clone_info->data_offset += clone_len;
2699			clone_info->file_offset += clone_len;
2700		}
2701
2702		cur_offset = drop_end;
2703
2704		ret = btrfs_update_inode(trans, root, inode);
2705		if (ret)
2706			break;
2707
2708		btrfs_end_transaction(trans);
2709		btrfs_btree_balance_dirty(fs_info);
2710
2711		trans = btrfs_start_transaction(root, rsv_count);
2712		if (IS_ERR(trans)) {
2713			ret = PTR_ERR(trans);
2714			trans = NULL;
2715			break;
2716		}
2717
2718		ret = btrfs_block_rsv_migrate(&fs_info->trans_block_rsv,
2719					      rsv, min_size, false);
2720		BUG_ON(ret);	/* shouldn't happen */
2721		trans->block_rsv = rsv;
2722
2723		if (!clone_info) {
2724			ret = find_first_non_hole(inode, &cur_offset, &len);
2725			if (unlikely(ret < 0))
2726				break;
2727			if (ret && !len) {
2728				ret = 0;
2729				break;
2730			}
2731		}
2732	}
2733
2734	/*
2735	 * If we were cloning, force the next fsync to be a full one since we
2736	 * we replaced (or just dropped in the case of cloning holes when
2737	 * NO_HOLES is enabled) extents and extent maps.
2738	 * This is for the sake of simplicity, and cloning into files larger
2739	 * than 16Mb would force the full fsync any way (when
2740	 * try_release_extent_mapping() is invoked during page cache truncation.
2741	 */
2742	if (clone_info)
2743		set_bit(BTRFS_INODE_NEEDS_FULL_SYNC,
2744			&BTRFS_I(inode)->runtime_flags);
2745
2746	if (ret)
2747		goto out_trans;
2748
2749	trans->block_rsv = &fs_info->trans_block_rsv;
2750	/*
2751	 * If we are using the NO_HOLES feature we might have had already an
2752	 * hole that overlaps a part of the region [lockstart, lockend] and
2753	 * ends at (or beyond) lockend. Since we have no file extent items to
2754	 * represent holes, drop_end can be less than lockend and so we must
2755	 * make sure we have an extent map representing the existing hole (the
2756	 * call to __btrfs_drop_extents() might have dropped the existing extent
2757	 * map representing the existing hole), otherwise the fast fsync path
2758	 * will not record the existence of the hole region
2759	 * [existing_hole_start, lockend].
2760	 */
2761	if (drop_end <= end)
2762		drop_end = end + 1;
2763	/*
2764	 * Don't insert file hole extent item if it's for a range beyond eof
2765	 * (because it's useless) or if it represents a 0 bytes range (when
2766	 * cur_offset == drop_end).
2767	 */
2768	if (!clone_info && cur_offset < ino_size && cur_offset < drop_end) {
2769		ret = fill_holes(trans, BTRFS_I(inode), path,
2770				cur_offset, drop_end);
2771		if (ret) {
2772			/* Same comment as above. */
2773			btrfs_abort_transaction(trans, ret);
2774			goto out_trans;
2775		}
2776	} else if (!clone_info && cur_offset < drop_end) {
2777		/* See the comment in the loop above for the reasoning here. */
2778		ret = btrfs_inode_clear_file_extent_range(BTRFS_I(inode),
2779					cur_offset, drop_end - cur_offset);
2780		if (ret) {
2781			btrfs_abort_transaction(trans, ret);
2782			goto out_trans;
2783		}
2784
2785	}
2786	if (clone_info) {
2787		ret = btrfs_insert_clone_extent(trans, inode, path, clone_info,
2788						clone_info->data_len);
2789		if (ret) {
2790			btrfs_abort_transaction(trans, ret);
2791			goto out_trans;
2792		}
2793	}
2794
2795out_trans:
2796	if (!trans)
2797		goto out_free;
2798
2799	trans->block_rsv = &fs_info->trans_block_rsv;
2800	if (ret)
2801		btrfs_end_transaction(trans);
2802	else
2803		*trans_out = trans;
2804out_free:
2805	btrfs_free_block_rsv(fs_info, rsv);
2806out:
2807	return ret;
2808}
2809
2810static int btrfs_punch_hole(struct inode *inode, loff_t offset, loff_t len)
2811{
2812	struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
2813	struct btrfs_root *root = BTRFS_I(inode)->root;
2814	struct extent_state *cached_state = NULL;
2815	struct btrfs_path *path;
2816	struct btrfs_trans_handle *trans = NULL;
2817	u64 lockstart;
2818	u64 lockend;
2819	u64 tail_start;
2820	u64 tail_len;
2821	u64 orig_start = offset;
2822	int ret = 0;
2823	bool same_block;
2824	u64 ino_size;
2825	bool truncated_block = false;
2826	bool updated_inode = false;
2827
2828	ret = btrfs_wait_ordered_range(inode, offset, len);
2829	if (ret)
2830		return ret;
2831
2832	inode_lock(inode);
2833	ino_size = round_up(inode->i_size, fs_info->sectorsize);
2834	ret = find_first_non_hole(inode, &offset, &len);
2835	if (ret < 0)
2836		goto out_only_mutex;
2837	if (ret && !len) {
2838		/* Already in a large hole */
2839		ret = 0;
2840		goto out_only_mutex;
2841	}
2842
2843	lockstart = round_up(offset, btrfs_inode_sectorsize(inode));
2844	lockend = round_down(offset + len,
2845			     btrfs_inode_sectorsize(inode)) - 1;
2846	same_block = (BTRFS_BYTES_TO_BLKS(fs_info, offset))
2847		== (BTRFS_BYTES_TO_BLKS(fs_info, offset + len - 1));
2848	/*
2849	 * We needn't truncate any block which is beyond the end of the file
2850	 * because we are sure there is no data there.
2851	 */
2852	/*
2853	 * Only do this if we are in the same block and we aren't doing the
2854	 * entire block.
2855	 */
2856	if (same_block && len < fs_info->sectorsize) {
2857		if (offset < ino_size) {
2858			truncated_block = true;
2859			ret = btrfs_truncate_block(inode, offset, len, 0);
2860		} else {
2861			ret = 0;
2862		}
2863		goto out_only_mutex;
2864	}
2865
2866	/* zero back part of the first block */
2867	if (offset < ino_size) {
2868		truncated_block = true;
2869		ret = btrfs_truncate_block(inode, offset, 0, 0);
2870		if (ret) {
2871			inode_unlock(inode);
2872			return ret;
2873		}
2874	}
2875
2876	/* Check the aligned pages after the first unaligned page,
2877	 * if offset != orig_start, which means the first unaligned page
2878	 * including several following pages are already in holes,
2879	 * the extra check can be skipped */
2880	if (offset == orig_start) {
2881		/* after truncate page, check hole again */
2882		len = offset + len - lockstart;
2883		offset = lockstart;
2884		ret = find_first_non_hole(inode, &offset, &len);
2885		if (ret < 0)
2886			goto out_only_mutex;
2887		if (ret && !len) {
2888			ret = 0;
2889			goto out_only_mutex;
2890		}
2891		lockstart = offset;
2892	}
2893
2894	/* Check the tail unaligned part is in a hole */
2895	tail_start = lockend + 1;
2896	tail_len = offset + len - tail_start;
2897	if (tail_len) {
2898		ret = find_first_non_hole(inode, &tail_start, &tail_len);
2899		if (unlikely(ret < 0))
2900			goto out_only_mutex;
2901		if (!ret) {
2902			/* zero the front end of the last page */
2903			if (tail_start + tail_len < ino_size) {
2904				truncated_block = true;
2905				ret = btrfs_truncate_block(inode,
2906							tail_start + tail_len,
2907							0, 1);
2908				if (ret)
2909					goto out_only_mutex;
2910			}
2911		}
2912	}
2913
2914	if (lockend < lockstart) {
2915		ret = 0;
2916		goto out_only_mutex;
2917	}
2918
2919	ret = btrfs_punch_hole_lock_range(inode, lockstart, lockend,
2920					  &cached_state);
2921	if (ret)
2922		goto out_only_mutex;
2923
2924	path = btrfs_alloc_path();
2925	if (!path) {
2926		ret = -ENOMEM;
2927		goto out;
2928	}
2929
2930	ret = btrfs_punch_hole_range(inode, path, lockstart, lockend, NULL,
2931				     &trans);
2932	btrfs_free_path(path);
2933	if (ret)
2934		goto out;
2935
2936	ASSERT(trans != NULL);
2937	inode_inc_iversion(inode);
2938	inode->i_mtime = inode->i_ctime = current_time(inode);
2939	ret = btrfs_update_inode(trans, root, inode);
2940	updated_inode = true;
2941	btrfs_end_transaction(trans);
2942	btrfs_btree_balance_dirty(fs_info);
2943out:
2944	unlock_extent_cached(&BTRFS_I(inode)->io_tree, lockstart, lockend,
2945			     &cached_state);
2946out_only_mutex:
2947	if (!updated_inode && truncated_block && !ret) {
2948		/*
2949		 * If we only end up zeroing part of a page, we still need to
2950		 * update the inode item, so that all the time fields are
2951		 * updated as well as the necessary btrfs inode in memory fields
2952		 * for detecting, at fsync time, if the inode isn't yet in the
2953		 * log tree or it's there but not up to date.
2954		 */
2955		struct timespec64 now = current_time(inode);
2956
2957		inode_inc_iversion(inode);
2958		inode->i_mtime = now;
2959		inode->i_ctime = now;
2960		trans = btrfs_start_transaction(root, 1);
2961		if (IS_ERR(trans)) {
2962			ret = PTR_ERR(trans);
2963		} else {
2964			int ret2;
2965
2966			ret = btrfs_update_inode(trans, root, inode);
2967			ret2 = btrfs_end_transaction(trans);
2968			if (!ret)
2969				ret = ret2;
2970		}
2971	}
2972	inode_unlock(inode);
2973	return ret;
2974}
2975
2976/* Helper structure to record which range is already reserved */
2977struct falloc_range {
2978	struct list_head list;
2979	u64 start;
2980	u64 len;
2981};
2982
2983/*
2984 * Helper function to add falloc range
2985 *
2986 * Caller should have locked the larger range of extent containing
2987 * [start, len)
2988 */
2989static int add_falloc_range(struct list_head *head, u64 start, u64 len)
2990{
2991	struct falloc_range *prev = NULL;
2992	struct falloc_range *range = NULL;
2993
2994	if (list_empty(head))
2995		goto insert;
2996
2997	/*
2998	 * As fallocate iterate by bytenr order, we only need to check
2999	 * the last range.
3000	 */
3001	prev = list_entry(head->prev, struct falloc_range, list);
3002	if (prev->start + prev->len == start) {
3003		prev->len += len;
3004		return 0;
3005	}
3006insert:
3007	range = kmalloc(sizeof(*range), GFP_KERNEL);
3008	if (!range)
3009		return -ENOMEM;
3010	range->start = start;
3011	range->len = len;
3012	list_add_tail(&range->list, head);
3013	return 0;
3014}
3015
3016static int btrfs_fallocate_update_isize(struct inode *inode,
3017					const u64 end,
3018					const int mode)
3019{
3020	struct btrfs_trans_handle *trans;
3021	struct btrfs_root *root = BTRFS_I(inode)->root;
3022	int ret;
3023	int ret2;
3024
3025	if (mode & FALLOC_FL_KEEP_SIZE || end <= i_size_read(inode))
3026		return 0;
3027
3028	trans = btrfs_start_transaction(root, 1);
3029	if (IS_ERR(trans))
3030		return PTR_ERR(trans);
3031
3032	inode->i_ctime = current_time(inode);
3033	i_size_write(inode, end);
3034	btrfs_inode_safe_disk_i_size_write(inode, 0);
3035	ret = btrfs_update_inode(trans, root, inode);
3036	ret2 = btrfs_end_transaction(trans);
3037
3038	return ret ? ret : ret2;
3039}
3040
3041enum {
3042	RANGE_BOUNDARY_WRITTEN_EXTENT,
3043	RANGE_BOUNDARY_PREALLOC_EXTENT,
3044	RANGE_BOUNDARY_HOLE,
3045};
3046
3047static int btrfs_zero_range_check_range_boundary(struct inode *inode,
3048						 u64 offset)
3049{
3050	const u64 sectorsize = btrfs_inode_sectorsize(inode);
3051	struct extent_map *em;
3052	int ret;
3053
3054	offset = round_down(offset, sectorsize);
3055	em = btrfs_get_extent(BTRFS_I(inode), NULL, 0, offset, sectorsize);
3056	if (IS_ERR(em))
3057		return PTR_ERR(em);
3058
3059	if (em->block_start == EXTENT_MAP_HOLE)
3060		ret = RANGE_BOUNDARY_HOLE;
3061	else if (test_bit(EXTENT_FLAG_PREALLOC, &em->flags))
3062		ret = RANGE_BOUNDARY_PREALLOC_EXTENT;
3063	else
3064		ret = RANGE_BOUNDARY_WRITTEN_EXTENT;
3065
3066	free_extent_map(em);
3067	return ret;
3068}
3069
3070static int btrfs_zero_range(struct inode *inode,
3071			    loff_t offset,
3072			    loff_t len,
3073			    const int mode)
3074{
3075	struct btrfs_fs_info *fs_info = BTRFS_I(inode)->root->fs_info;
3076	struct extent_map *em;
3077	struct extent_changeset *data_reserved = NULL;
3078	int ret;
3079	u64 alloc_hint = 0;
3080	const u64 sectorsize = btrfs_inode_sectorsize(inode);
3081	u64 alloc_start = round_down(offset, sectorsize);
3082	u64 alloc_end = round_up(offset + len, sectorsize);
3083	u64 bytes_to_reserve = 0;
3084	bool space_reserved = false;
3085
3086	inode_dio_wait(inode);
3087
3088	em = btrfs_get_extent(BTRFS_I(inode), NULL, 0, alloc_start,
3089			      alloc_end - alloc_start);
3090	if (IS_ERR(em)) {
3091		ret = PTR_ERR(em);
3092		goto out;
3093	}
3094
3095	/*
3096	 * Avoid hole punching and extent allocation for some cases. More cases
3097	 * could be considered, but these are unlikely common and we keep things
3098	 * as simple as possible for now. Also, intentionally, if the target
3099	 * range contains one or more prealloc extents together with regular
3100	 * extents and holes, we drop all the existing extents and allocate a
3101	 * new prealloc extent, so that we get a larger contiguous disk extent.
3102	 */
3103	if (em->start <= alloc_start &&
3104	    test_bit(EXTENT_FLAG_PREALLOC, &em->flags)) {
3105		const u64 em_end = em->start + em->len;
3106
3107		if (em_end >= offset + len) {
3108			/*
3109			 * The whole range is already a prealloc extent,
3110			 * do nothing except updating the inode's i_size if
3111			 * needed.
3112			 */
3113			free_extent_map(em);
3114			ret = btrfs_fallocate_update_isize(inode, offset + len,
3115							   mode);
3116			goto out;
3117		}
3118		/*
3119		 * Part of the range is already a prealloc extent, so operate
3120		 * only on the remaining part of the range.
3121		 */
3122		alloc_start = em_end;
3123		ASSERT(IS_ALIGNED(alloc_start, sectorsize));
3124		len = offset + len - alloc_start;
3125		offset = alloc_start;
3126		alloc_hint = em->block_start + em->len;
3127	}
3128	free_extent_map(em);
3129
3130	if (BTRFS_BYTES_TO_BLKS(fs_info, offset) ==
3131	    BTRFS_BYTES_TO_BLKS(fs_info, offset + len - 1)) {
3132		em = btrfs_get_extent(BTRFS_I(inode), NULL, 0, alloc_start,
3133				      sectorsize);
3134		if (IS_ERR(em)) {
3135			ret = PTR_ERR(em);
3136			goto out;
3137		}
3138
3139		if (test_bit(EXTENT_FLAG_PREALLOC, &em->flags)) {
3140			free_extent_map(em);
3141			ret = btrfs_fallocate_update_isize(inode, offset + len,
3142							   mode);
3143			goto out;
3144		}
3145		if (len < sectorsize && em->block_start != EXTENT_MAP_HOLE) {
3146			free_extent_map(em);
3147			ret = btrfs_truncate_block(inode, offset, len, 0);
3148			if (!ret)
3149				ret = btrfs_fallocate_update_isize(inode,
3150								   offset + len,
3151								   mode);
3152			return ret;
3153		}
3154		free_extent_map(em);
3155		alloc_start = round_down(offset, sectorsize);
3156		alloc_end = alloc_start + sectorsize;
3157		goto reserve_space;
3158	}
3159
3160	alloc_start = round_up(offset, sectorsize);
3161	alloc_end = round_down(offset + len, sectorsize);
3162
3163	/*
3164	 * For unaligned ranges, check the pages at the boundaries, they might
3165	 * map to an extent, in which case we need to partially zero them, or
3166	 * they might map to a hole, in which case we need our allocation range
3167	 * to cover them.
3168	 */
3169	if (!IS_ALIGNED(offset, sectorsize)) {
3170		ret = btrfs_zero_range_check_range_boundary(inode, offset);
3171		if (ret < 0)
3172			goto out;
3173		if (ret == RANGE_BOUNDARY_HOLE) {
3174			alloc_start = round_down(offset, sectorsize);
3175			ret = 0;
3176		} else if (ret == RANGE_BOUNDARY_WRITTEN_EXTENT) {
3177			ret = btrfs_truncate_block(inode, offset, 0, 0);
3178			if (ret)
3179				goto out;
3180		} else {
3181			ret = 0;
3182		}
3183	}
3184
3185	if (!IS_ALIGNED(offset + len, sectorsize)) {
3186		ret = btrfs_zero_range_check_range_boundary(inode,
3187							    offset + len);
3188		if (ret < 0)
3189			goto out;
3190		if (ret == RANGE_BOUNDARY_HOLE) {
3191			alloc_end = round_up(offset + len, sectorsize);
3192			ret = 0;
3193		} else if (ret == RANGE_BOUNDARY_WRITTEN_EXTENT) {
3194			ret = btrfs_truncate_block(inode, offset + len, 0, 1);
3195			if (ret)
3196				goto out;
3197		} else {
3198			ret = 0;
3199		}
3200	}
3201
3202reserve_space:
3203	if (alloc_start < alloc_end) {
3204		struct extent_state *cached_state = NULL;
3205		const u64 lockstart = alloc_start;
3206		const u64 lockend = alloc_end - 1;
3207
3208		bytes_to_reserve = alloc_end - alloc_start;
3209		ret = btrfs_alloc_data_chunk_ondemand(BTRFS_I(inode),
3210						      bytes_to_reserve);
3211		if (ret < 0)
3212			goto out;
3213		space_reserved = true;
 
 
 
 
3214		ret = btrfs_punch_hole_lock_range(inode, lockstart, lockend,
3215						  &cached_state);
3216		if (ret)
3217			goto out;
3218		ret = btrfs_qgroup_reserve_data(BTRFS_I(inode), &data_reserved,
3219						alloc_start, bytes_to_reserve);
3220		if (ret)
3221			goto out;
3222		ret = btrfs_prealloc_file_range(inode, mode, alloc_start,
3223						alloc_end - alloc_start,
3224						i_blocksize(inode),
3225						offset + len, &alloc_hint);
3226		unlock_extent_cached(&BTRFS_I(inode)->io_tree, lockstart,
3227				     lockend, &cached_state);
3228		/* btrfs_prealloc_file_range releases reserved space on error */
3229		if (ret) {
3230			space_reserved = false;
3231			goto out;
3232		}
3233	}
3234	ret = btrfs_fallocate_update_isize(inode, offset + len, mode);
3235 out:
3236	if (ret && space_reserved)
3237		btrfs_free_reserved_data_space(BTRFS_I(inode), data_reserved,
3238					       alloc_start, bytes_to_reserve);
3239	extent_changeset_free(data_reserved);
3240
3241	return ret;
3242}
3243
3244static long btrfs_fallocate(struct file *file, int mode,
3245			    loff_t offset, loff_t len)
3246{
3247	struct inode *inode = file_inode(file);
3248	struct extent_state *cached_state = NULL;
3249	struct extent_changeset *data_reserved = NULL;
3250	struct falloc_range *range;
3251	struct falloc_range *tmp;
3252	struct list_head reserve_list;
3253	u64 cur_offset;
3254	u64 last_byte;
3255	u64 alloc_start;
3256	u64 alloc_end;
3257	u64 alloc_hint = 0;
3258	u64 locked_end;
3259	u64 actual_end = 0;
3260	struct extent_map *em;
3261	int blocksize = btrfs_inode_sectorsize(inode);
3262	int ret;
3263
3264	alloc_start = round_down(offset, blocksize);
3265	alloc_end = round_up(offset + len, blocksize);
3266	cur_offset = alloc_start;
3267
3268	/* Make sure we aren't being give some crap mode */
3269	if (mode & ~(FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE |
3270		     FALLOC_FL_ZERO_RANGE))
3271		return -EOPNOTSUPP;
3272
3273	if (mode & FALLOC_FL_PUNCH_HOLE)
3274		return btrfs_punch_hole(inode, offset, len);
3275
3276	/*
3277	 * Only trigger disk allocation, don't trigger qgroup reserve
3278	 *
3279	 * For qgroup space, it will be checked later.
3280	 */
3281	if (!(mode & FALLOC_FL_ZERO_RANGE)) {
3282		ret = btrfs_alloc_data_chunk_ondemand(BTRFS_I(inode),
3283						      alloc_end - alloc_start);
3284		if (ret < 0)
3285			return ret;
3286	}
3287
3288	inode_lock(inode);
3289
3290	if (!(mode & FALLOC_FL_KEEP_SIZE) && offset + len > inode->i_size) {
3291		ret = inode_newsize_ok(inode, offset + len);
3292		if (ret)
3293			goto out;
3294	}
3295
3296	/*
3297	 * TODO: Move these two operations after we have checked
3298	 * accurate reserved space, or fallocate can still fail but
3299	 * with page truncated or size expanded.
3300	 *
3301	 * But that's a minor problem and won't do much harm BTW.
3302	 */
3303	if (alloc_start > inode->i_size) {
3304		ret = btrfs_cont_expand(inode, i_size_read(inode),
3305					alloc_start);
3306		if (ret)
3307			goto out;
3308	} else if (offset + len > inode->i_size) {
3309		/*
3310		 * If we are fallocating from the end of the file onward we
3311		 * need to zero out the end of the block if i_size lands in the
3312		 * middle of a block.
3313		 */
3314		ret = btrfs_truncate_block(inode, inode->i_size, 0, 0);
3315		if (ret)
3316			goto out;
3317	}
3318
3319	/*
3320	 * wait for ordered IO before we have any locks.  We'll loop again
3321	 * below with the locks held.
3322	 */
3323	ret = btrfs_wait_ordered_range(inode, alloc_start,
3324				       alloc_end - alloc_start);
3325	if (ret)
3326		goto out;
3327
3328	if (mode & FALLOC_FL_ZERO_RANGE) {
3329		ret = btrfs_zero_range(inode, offset, len, mode);
3330		inode_unlock(inode);
3331		return ret;
3332	}
3333
3334	locked_end = alloc_end - 1;
3335	while (1) {
3336		struct btrfs_ordered_extent *ordered;
3337
3338		/* the extent lock is ordered inside the running
3339		 * transaction
3340		 */
3341		lock_extent_bits(&BTRFS_I(inode)->io_tree, alloc_start,
3342				 locked_end, &cached_state);
3343		ordered = btrfs_lookup_first_ordered_extent(inode, locked_end);
3344
3345		if (ordered &&
3346		    ordered->file_offset + ordered->num_bytes > alloc_start &&
3347		    ordered->file_offset < alloc_end) {
3348			btrfs_put_ordered_extent(ordered);
3349			unlock_extent_cached(&BTRFS_I(inode)->io_tree,
3350					     alloc_start, locked_end,
3351					     &cached_state);
3352			/*
3353			 * we can't wait on the range with the transaction
3354			 * running or with the extent lock held
3355			 */
3356			ret = btrfs_wait_ordered_range(inode, alloc_start,
3357						       alloc_end - alloc_start);
3358			if (ret)
3359				goto out;
3360		} else {
3361			if (ordered)
3362				btrfs_put_ordered_extent(ordered);
3363			break;
3364		}
3365	}
3366
3367	/* First, check if we exceed the qgroup limit */
3368	INIT_LIST_HEAD(&reserve_list);
3369	while (cur_offset < alloc_end) {
3370		em = btrfs_get_extent(BTRFS_I(inode), NULL, 0, cur_offset,
3371				      alloc_end - cur_offset);
3372		if (IS_ERR(em)) {
3373			ret = PTR_ERR(em);
3374			break;
3375		}
3376		last_byte = min(extent_map_end(em), alloc_end);
3377		actual_end = min_t(u64, extent_map_end(em), offset + len);
3378		last_byte = ALIGN(last_byte, blocksize);
3379		if (em->block_start == EXTENT_MAP_HOLE ||
3380		    (cur_offset >= inode->i_size &&
3381		     !test_bit(EXTENT_FLAG_PREALLOC, &em->flags))) {
3382			ret = add_falloc_range(&reserve_list, cur_offset,
3383					       last_byte - cur_offset);
3384			if (ret < 0) {
3385				free_extent_map(em);
3386				break;
3387			}
3388			ret = btrfs_qgroup_reserve_data(BTRFS_I(inode),
3389					&data_reserved, cur_offset,
3390					last_byte - cur_offset);
3391			if (ret < 0) {
3392				cur_offset = last_byte;
3393				free_extent_map(em);
3394				break;
3395			}
3396		} else {
3397			/*
3398			 * Do not need to reserve unwritten extent for this
3399			 * range, free reserved data space first, otherwise
3400			 * it'll result in false ENOSPC error.
3401			 */
3402			btrfs_free_reserved_data_space(BTRFS_I(inode),
3403				data_reserved, cur_offset,
3404				last_byte - cur_offset);
3405		}
3406		free_extent_map(em);
3407		cur_offset = last_byte;
3408	}
3409
3410	/*
3411	 * If ret is still 0, means we're OK to fallocate.
3412	 * Or just cleanup the list and exit.
3413	 */
3414	list_for_each_entry_safe(range, tmp, &reserve_list, list) {
3415		if (!ret)
3416			ret = btrfs_prealloc_file_range(inode, mode,
3417					range->start,
3418					range->len, i_blocksize(inode),
3419					offset + len, &alloc_hint);
3420		else
3421			btrfs_free_reserved_data_space(BTRFS_I(inode),
3422					data_reserved, range->start,
3423					range->len);
3424		list_del(&range->list);
3425		kfree(range);
3426	}
3427	if (ret < 0)
3428		goto out_unlock;
3429
3430	/*
3431	 * We didn't need to allocate any more space, but we still extended the
3432	 * size of the file so we need to update i_size and the inode item.
3433	 */
3434	ret = btrfs_fallocate_update_isize(inode, actual_end, mode);
3435out_unlock:
3436	unlock_extent_cached(&BTRFS_I(inode)->io_tree, alloc_start, locked_end,
3437			     &cached_state);
3438out:
3439	inode_unlock(inode);
3440	/* Let go of our reservation. */
3441	if (ret != 0 && !(mode & FALLOC_FL_ZERO_RANGE))
3442		btrfs_free_reserved_data_space(BTRFS_I(inode), data_reserved,
3443				cur_offset, alloc_end - cur_offset);
3444	extent_changeset_free(data_reserved);
3445	return ret;
3446}
3447
3448static loff_t find_desired_extent(struct inode *inode, loff_t offset,
3449				  int whence)
3450{
3451	struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
3452	struct extent_map *em = NULL;
3453	struct extent_state *cached_state = NULL;
3454	loff_t i_size = inode->i_size;
3455	u64 lockstart;
3456	u64 lockend;
3457	u64 start;
3458	u64 len;
3459	int ret = 0;
3460
3461	if (i_size == 0 || offset >= i_size)
3462		return -ENXIO;
3463
3464	/*
3465	 * offset can be negative, in this case we start finding DATA/HOLE from
3466	 * the very start of the file.
3467	 */
3468	start = max_t(loff_t, 0, offset);
3469
3470	lockstart = round_down(start, fs_info->sectorsize);
3471	lockend = round_up(i_size, fs_info->sectorsize);
 
3472	if (lockend <= lockstart)
3473		lockend = lockstart + fs_info->sectorsize;
3474	lockend--;
3475	len = lockend - lockstart + 1;
3476
3477	lock_extent_bits(&BTRFS_I(inode)->io_tree, lockstart, lockend,
3478			 &cached_state);
3479
3480	while (start < i_size) {
3481		em = btrfs_get_extent_fiemap(BTRFS_I(inode), start, len);
3482		if (IS_ERR(em)) {
3483			ret = PTR_ERR(em);
3484			em = NULL;
3485			break;
3486		}
3487
3488		if (whence == SEEK_HOLE &&
3489		    (em->block_start == EXTENT_MAP_HOLE ||
3490		     test_bit(EXTENT_FLAG_PREALLOC, &em->flags)))
3491			break;
3492		else if (whence == SEEK_DATA &&
3493			   (em->block_start != EXTENT_MAP_HOLE &&
3494			    !test_bit(EXTENT_FLAG_PREALLOC, &em->flags)))
3495			break;
3496
3497		start = em->start + em->len;
3498		free_extent_map(em);
3499		em = NULL;
3500		cond_resched();
3501	}
3502	free_extent_map(em);
 
 
 
 
 
 
3503	unlock_extent_cached(&BTRFS_I(inode)->io_tree, lockstart, lockend,
3504			     &cached_state);
3505	if (ret) {
3506		offset = ret;
3507	} else {
3508		if (whence == SEEK_DATA && start >= i_size)
3509			offset = -ENXIO;
3510		else
3511			offset = min_t(loff_t, start, i_size);
3512	}
3513
3514	return offset;
3515}
3516
3517static loff_t btrfs_file_llseek(struct file *file, loff_t offset, int whence)
3518{
3519	struct inode *inode = file->f_mapping->host;
 
3520
 
3521	switch (whence) {
3522	default:
3523		return generic_file_llseek(file, offset, whence);
 
 
3524	case SEEK_DATA:
3525	case SEEK_HOLE:
3526		inode_lock_shared(inode);
3527		offset = find_desired_extent(inode, offset, whence);
3528		inode_unlock_shared(inode);
3529		break;
 
 
 
 
 
 
3530	}
3531
3532	if (offset < 0)
3533		return offset;
3534
3535	return vfs_setpos(file, offset, inode->i_sb->s_maxbytes);
3536}
3537
3538static int btrfs_file_open(struct inode *inode, struct file *filp)
3539{
3540	filp->f_mode |= FMODE_NOWAIT | FMODE_BUF_RASYNC;
3541	return generic_file_open(inode, filp);
3542}
3543
3544const struct file_operations btrfs_file_operations = {
3545	.llseek		= btrfs_file_llseek,
3546	.read_iter      = generic_file_read_iter,
3547	.splice_read	= generic_file_splice_read,
3548	.write_iter	= btrfs_file_write_iter,
3549	.splice_write	= iter_file_splice_write,
3550	.mmap		= btrfs_file_mmap,
3551	.open		= btrfs_file_open,
3552	.release	= btrfs_release_file,
3553	.fsync		= btrfs_sync_file,
3554	.fallocate	= btrfs_fallocate,
3555	.unlocked_ioctl	= btrfs_ioctl,
3556#ifdef CONFIG_COMPAT
3557	.compat_ioctl	= btrfs_compat_ioctl,
3558#endif
3559	.remap_file_range = btrfs_remap_file_range,
3560};
3561
3562void __cold btrfs_auto_defrag_exit(void)
3563{
3564	kmem_cache_destroy(btrfs_inode_defrag_cachep);
3565}
3566
3567int __init btrfs_auto_defrag_init(void)
3568{
3569	btrfs_inode_defrag_cachep = kmem_cache_create("btrfs_inode_defrag",
3570					sizeof(struct inode_defrag), 0,
3571					SLAB_MEM_SPREAD,
3572					NULL);
3573	if (!btrfs_inode_defrag_cachep)
3574		return -ENOMEM;
3575
3576	return 0;
3577}
3578
3579int btrfs_fdatawrite_range(struct inode *inode, loff_t start, loff_t end)
3580{
3581	int ret;
3582
3583	/*
3584	 * So with compression we will find and lock a dirty page and clear the
3585	 * first one as dirty, setup an async extent, and immediately return
3586	 * with the entire range locked but with nobody actually marked with
3587	 * writeback.  So we can't just filemap_write_and_wait_range() and
3588	 * expect it to work since it will just kick off a thread to do the
3589	 * actual work.  So we need to call filemap_fdatawrite_range _again_
3590	 * since it will wait on the page lock, which won't be unlocked until
3591	 * after the pages have been marked as writeback and so we're good to go
3592	 * from there.  We have to do this otherwise we'll miss the ordered
3593	 * extents and that results in badness.  Please Josef, do not think you
3594	 * know better and pull this out at some point in the future, it is
3595	 * right and you are wrong.
3596	 */
3597	ret = filemap_fdatawrite_range(inode->i_mapping, start, end);
3598	if (!ret && test_bit(BTRFS_INODE_HAS_ASYNC_EXTENT,
3599			     &BTRFS_I(inode)->runtime_flags))
3600		ret = filemap_fdatawrite_range(inode->i_mapping, start, end);
3601
3602	return ret;
3603}