Linux Audio

Check our new training course

Loading...
v5.4
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 * Copyright (C) 2007 Oracle.  All rights reserved.
   4 */
   5
   6#include <linux/fs.h>
   7#include <linux/pagemap.h>
   8#include <linux/time.h>
   9#include <linux/init.h>
  10#include <linux/string.h>
  11#include <linux/backing-dev.h>
  12#include <linux/falloc.h>
  13#include <linux/writeback.h>
  14#include <linux/compat.h>
  15#include <linux/slab.h>
  16#include <linux/btrfs.h>
  17#include <linux/uio.h>
  18#include <linux/iversion.h>
  19#include "ctree.h"
  20#include "disk-io.h"
  21#include "transaction.h"
  22#include "btrfs_inode.h"
  23#include "print-tree.h"
  24#include "tree-log.h"
  25#include "locking.h"
  26#include "volumes.h"
  27#include "qgroup.h"
  28#include "compression.h"
  29#include "delalloc-space.h"
 
 
  30
  31static struct kmem_cache *btrfs_inode_defrag_cachep;
  32/*
  33 * when auto defrag is enabled we
  34 * queue up these defrag structs to remember which
  35 * inodes need defragging passes
  36 */
  37struct inode_defrag {
  38	struct rb_node rb_node;
  39	/* objectid */
  40	u64 ino;
  41	/*
  42	 * transid where the defrag was added, we search for
  43	 * extents newer than this
  44	 */
  45	u64 transid;
  46
  47	/* root objectid */
  48	u64 root;
  49
  50	/* last offset we were able to defrag */
  51	u64 last_offset;
  52
  53	/* if we've wrapped around back to zero once already */
  54	int cycled;
  55};
  56
  57static int __compare_inode_defrag(struct inode_defrag *defrag1,
  58				  struct inode_defrag *defrag2)
  59{
  60	if (defrag1->root > defrag2->root)
  61		return 1;
  62	else if (defrag1->root < defrag2->root)
  63		return -1;
  64	else if (defrag1->ino > defrag2->ino)
  65		return 1;
  66	else if (defrag1->ino < defrag2->ino)
  67		return -1;
  68	else
  69		return 0;
  70}
  71
  72/* pop a record for an inode into the defrag tree.  The lock
  73 * must be held already
  74 *
  75 * If you're inserting a record for an older transid than an
  76 * existing record, the transid already in the tree is lowered
  77 *
  78 * If an existing record is found the defrag item you
  79 * pass in is freed
  80 */
  81static int __btrfs_add_inode_defrag(struct btrfs_inode *inode,
  82				    struct inode_defrag *defrag)
  83{
  84	struct btrfs_fs_info *fs_info = inode->root->fs_info;
  85	struct inode_defrag *entry;
  86	struct rb_node **p;
  87	struct rb_node *parent = NULL;
  88	int ret;
  89
  90	p = &fs_info->defrag_inodes.rb_node;
  91	while (*p) {
  92		parent = *p;
  93		entry = rb_entry(parent, struct inode_defrag, rb_node);
  94
  95		ret = __compare_inode_defrag(defrag, entry);
  96		if (ret < 0)
  97			p = &parent->rb_left;
  98		else if (ret > 0)
  99			p = &parent->rb_right;
 100		else {
 101			/* if we're reinserting an entry for
 102			 * an old defrag run, make sure to
 103			 * lower the transid of our existing record
 104			 */
 105			if (defrag->transid < entry->transid)
 106				entry->transid = defrag->transid;
 107			if (defrag->last_offset > entry->last_offset)
 108				entry->last_offset = defrag->last_offset;
 109			return -EEXIST;
 110		}
 111	}
 112	set_bit(BTRFS_INODE_IN_DEFRAG, &inode->runtime_flags);
 113	rb_link_node(&defrag->rb_node, parent, p);
 114	rb_insert_color(&defrag->rb_node, &fs_info->defrag_inodes);
 115	return 0;
 116}
 117
 118static inline int __need_auto_defrag(struct btrfs_fs_info *fs_info)
 119{
 120	if (!btrfs_test_opt(fs_info, AUTO_DEFRAG))
 121		return 0;
 122
 123	if (btrfs_fs_closing(fs_info))
 124		return 0;
 125
 126	return 1;
 127}
 128
 129/*
 130 * insert a defrag record for this inode if auto defrag is
 131 * enabled
 132 */
 133int btrfs_add_inode_defrag(struct btrfs_trans_handle *trans,
 134			   struct btrfs_inode *inode)
 135{
 136	struct btrfs_root *root = inode->root;
 137	struct btrfs_fs_info *fs_info = root->fs_info;
 138	struct inode_defrag *defrag;
 139	u64 transid;
 140	int ret;
 141
 142	if (!__need_auto_defrag(fs_info))
 143		return 0;
 144
 145	if (test_bit(BTRFS_INODE_IN_DEFRAG, &inode->runtime_flags))
 146		return 0;
 147
 148	if (trans)
 149		transid = trans->transid;
 150	else
 151		transid = inode->root->last_trans;
 152
 153	defrag = kmem_cache_zalloc(btrfs_inode_defrag_cachep, GFP_NOFS);
 154	if (!defrag)
 155		return -ENOMEM;
 156
 157	defrag->ino = btrfs_ino(inode);
 158	defrag->transid = transid;
 159	defrag->root = root->root_key.objectid;
 160
 161	spin_lock(&fs_info->defrag_inodes_lock);
 162	if (!test_bit(BTRFS_INODE_IN_DEFRAG, &inode->runtime_flags)) {
 163		/*
 164		 * If we set IN_DEFRAG flag and evict the inode from memory,
 165		 * and then re-read this inode, this new inode doesn't have
 166		 * IN_DEFRAG flag. At the case, we may find the existed defrag.
 167		 */
 168		ret = __btrfs_add_inode_defrag(inode, defrag);
 169		if (ret)
 170			kmem_cache_free(btrfs_inode_defrag_cachep, defrag);
 171	} else {
 172		kmem_cache_free(btrfs_inode_defrag_cachep, defrag);
 173	}
 174	spin_unlock(&fs_info->defrag_inodes_lock);
 175	return 0;
 176}
 177
 178/*
 179 * Requeue the defrag object. If there is a defrag object that points to
 180 * the same inode in the tree, we will merge them together (by
 181 * __btrfs_add_inode_defrag()) and free the one that we want to requeue.
 182 */
 183static void btrfs_requeue_inode_defrag(struct btrfs_inode *inode,
 184				       struct inode_defrag *defrag)
 185{
 186	struct btrfs_fs_info *fs_info = inode->root->fs_info;
 187	int ret;
 188
 189	if (!__need_auto_defrag(fs_info))
 190		goto out;
 191
 192	/*
 193	 * Here we don't check the IN_DEFRAG flag, because we need merge
 194	 * them together.
 195	 */
 196	spin_lock(&fs_info->defrag_inodes_lock);
 197	ret = __btrfs_add_inode_defrag(inode, defrag);
 198	spin_unlock(&fs_info->defrag_inodes_lock);
 199	if (ret)
 200		goto out;
 201	return;
 202out:
 203	kmem_cache_free(btrfs_inode_defrag_cachep, defrag);
 204}
 205
 206/*
 207 * pick the defragable inode that we want, if it doesn't exist, we will get
 208 * the next one.
 209 */
 210static struct inode_defrag *
 211btrfs_pick_defrag_inode(struct btrfs_fs_info *fs_info, u64 root, u64 ino)
 212{
 213	struct inode_defrag *entry = NULL;
 214	struct inode_defrag tmp;
 215	struct rb_node *p;
 216	struct rb_node *parent = NULL;
 217	int ret;
 218
 219	tmp.ino = ino;
 220	tmp.root = root;
 221
 222	spin_lock(&fs_info->defrag_inodes_lock);
 223	p = fs_info->defrag_inodes.rb_node;
 224	while (p) {
 225		parent = p;
 226		entry = rb_entry(parent, struct inode_defrag, rb_node);
 227
 228		ret = __compare_inode_defrag(&tmp, entry);
 229		if (ret < 0)
 230			p = parent->rb_left;
 231		else if (ret > 0)
 232			p = parent->rb_right;
 233		else
 234			goto out;
 235	}
 236
 237	if (parent && __compare_inode_defrag(&tmp, entry) > 0) {
 238		parent = rb_next(parent);
 239		if (parent)
 240			entry = rb_entry(parent, struct inode_defrag, rb_node);
 241		else
 242			entry = NULL;
 243	}
 244out:
 245	if (entry)
 246		rb_erase(parent, &fs_info->defrag_inodes);
 247	spin_unlock(&fs_info->defrag_inodes_lock);
 248	return entry;
 249}
 250
 251void btrfs_cleanup_defrag_inodes(struct btrfs_fs_info *fs_info)
 252{
 253	struct inode_defrag *defrag;
 254	struct rb_node *node;
 255
 256	spin_lock(&fs_info->defrag_inodes_lock);
 257	node = rb_first(&fs_info->defrag_inodes);
 258	while (node) {
 259		rb_erase(node, &fs_info->defrag_inodes);
 260		defrag = rb_entry(node, struct inode_defrag, rb_node);
 261		kmem_cache_free(btrfs_inode_defrag_cachep, defrag);
 262
 263		cond_resched_lock(&fs_info->defrag_inodes_lock);
 264
 265		node = rb_first(&fs_info->defrag_inodes);
 266	}
 267	spin_unlock(&fs_info->defrag_inodes_lock);
 268}
 269
 270#define BTRFS_DEFRAG_BATCH	1024
 271
 272static int __btrfs_run_defrag_inode(struct btrfs_fs_info *fs_info,
 273				    struct inode_defrag *defrag)
 274{
 275	struct btrfs_root *inode_root;
 276	struct inode *inode;
 277	struct btrfs_key key;
 278	struct btrfs_ioctl_defrag_range_args range;
 279	int num_defrag;
 280	int index;
 281	int ret;
 282
 283	/* get the inode */
 284	key.objectid = defrag->root;
 285	key.type = BTRFS_ROOT_ITEM_KEY;
 286	key.offset = (u64)-1;
 287
 288	index = srcu_read_lock(&fs_info->subvol_srcu);
 289
 290	inode_root = btrfs_read_fs_root_no_name(fs_info, &key);
 291	if (IS_ERR(inode_root)) {
 292		ret = PTR_ERR(inode_root);
 293		goto cleanup;
 294	}
 295
 296	key.objectid = defrag->ino;
 297	key.type = BTRFS_INODE_ITEM_KEY;
 298	key.offset = 0;
 299	inode = btrfs_iget(fs_info->sb, &key, inode_root, NULL);
 300	if (IS_ERR(inode)) {
 301		ret = PTR_ERR(inode);
 302		goto cleanup;
 303	}
 304	srcu_read_unlock(&fs_info->subvol_srcu, index);
 305
 306	/* do a chunk of defrag */
 307	clear_bit(BTRFS_INODE_IN_DEFRAG, &BTRFS_I(inode)->runtime_flags);
 308	memset(&range, 0, sizeof(range));
 309	range.len = (u64)-1;
 310	range.start = defrag->last_offset;
 311
 312	sb_start_write(fs_info->sb);
 313	num_defrag = btrfs_defrag_file(inode, NULL, &range, defrag->transid,
 314				       BTRFS_DEFRAG_BATCH);
 315	sb_end_write(fs_info->sb);
 316	/*
 317	 * if we filled the whole defrag batch, there
 318	 * must be more work to do.  Queue this defrag
 319	 * again
 320	 */
 321	if (num_defrag == BTRFS_DEFRAG_BATCH) {
 322		defrag->last_offset = range.start;
 323		btrfs_requeue_inode_defrag(BTRFS_I(inode), defrag);
 324	} else if (defrag->last_offset && !defrag->cycled) {
 325		/*
 326		 * we didn't fill our defrag batch, but
 327		 * we didn't start at zero.  Make sure we loop
 328		 * around to the start of the file.
 329		 */
 330		defrag->last_offset = 0;
 331		defrag->cycled = 1;
 332		btrfs_requeue_inode_defrag(BTRFS_I(inode), defrag);
 333	} else {
 334		kmem_cache_free(btrfs_inode_defrag_cachep, defrag);
 335	}
 336
 337	iput(inode);
 338	return 0;
 339cleanup:
 340	srcu_read_unlock(&fs_info->subvol_srcu, index);
 341	kmem_cache_free(btrfs_inode_defrag_cachep, defrag);
 342	return ret;
 343}
 344
 345/*
 346 * run through the list of inodes in the FS that need
 347 * defragging
 348 */
 349int btrfs_run_defrag_inodes(struct btrfs_fs_info *fs_info)
 350{
 351	struct inode_defrag *defrag;
 352	u64 first_ino = 0;
 353	u64 root_objectid = 0;
 354
 355	atomic_inc(&fs_info->defrag_running);
 356	while (1) {
 357		/* Pause the auto defragger. */
 358		if (test_bit(BTRFS_FS_STATE_REMOUNTING,
 359			     &fs_info->fs_state))
 360			break;
 361
 362		if (!__need_auto_defrag(fs_info))
 363			break;
 364
 365		/* find an inode to defrag */
 366		defrag = btrfs_pick_defrag_inode(fs_info, root_objectid,
 367						 first_ino);
 368		if (!defrag) {
 369			if (root_objectid || first_ino) {
 370				root_objectid = 0;
 371				first_ino = 0;
 372				continue;
 373			} else {
 374				break;
 375			}
 376		}
 377
 378		first_ino = defrag->ino + 1;
 379		root_objectid = defrag->root;
 380
 381		__btrfs_run_defrag_inode(fs_info, defrag);
 382	}
 383	atomic_dec(&fs_info->defrag_running);
 384
 385	/*
 386	 * during unmount, we use the transaction_wait queue to
 387	 * wait for the defragger to stop
 388	 */
 389	wake_up(&fs_info->transaction_wait);
 390	return 0;
 391}
 392
 393/* simple helper to fault in pages and copy.  This should go away
 394 * and be replaced with calls into generic code.
 395 */
 396static noinline int btrfs_copy_from_user(loff_t pos, size_t write_bytes,
 397					 struct page **prepared_pages,
 398					 struct iov_iter *i)
 399{
 400	size_t copied = 0;
 401	size_t total_copied = 0;
 402	int pg = 0;
 403	int offset = offset_in_page(pos);
 404
 405	while (write_bytes > 0) {
 406		size_t count = min_t(size_t,
 407				     PAGE_SIZE - offset, write_bytes);
 408		struct page *page = prepared_pages[pg];
 409		/*
 410		 * Copy data from userspace to the current page
 411		 */
 412		copied = iov_iter_copy_from_user_atomic(page, i, offset, count);
 413
 414		/* Flush processor's dcache for this page */
 415		flush_dcache_page(page);
 416
 417		/*
 418		 * if we get a partial write, we can end up with
 419		 * partially up to date pages.  These add
 420		 * a lot of complexity, so make sure they don't
 421		 * happen by forcing this copy to be retried.
 422		 *
 423		 * The rest of the btrfs_file_write code will fall
 424		 * back to page at a time copies after we return 0.
 425		 */
 426		if (!PageUptodate(page) && copied < count)
 427			copied = 0;
 
 
 
 
 
 
 428
 429		iov_iter_advance(i, copied);
 430		write_bytes -= copied;
 431		total_copied += copied;
 432
 433		/* Return to btrfs_file_write_iter to fault page */
 434		if (unlikely(copied == 0))
 435			break;
 436
 437		if (copied < PAGE_SIZE - offset) {
 438			offset += copied;
 439		} else {
 440			pg++;
 441			offset = 0;
 442		}
 443	}
 444	return total_copied;
 445}
 446
 447/*
 448 * unlocks pages after btrfs_file_write is done with them
 449 */
 450static void btrfs_drop_pages(struct page **pages, size_t num_pages)
 451{
 452	size_t i;
 453	for (i = 0; i < num_pages; i++) {
 454		/* page checked is some magic around finding pages that
 455		 * have been modified without going through btrfs_set_page_dirty
 456		 * clear it here. There should be no need to mark the pages
 457		 * accessed as prepare_pages should have marked them accessed
 458		 * in prepare_pages via find_or_create_page()
 459		 */
 460		ClearPageChecked(pages[i]);
 461		unlock_page(pages[i]);
 462		put_page(pages[i]);
 463	}
 464}
 465
 466static int btrfs_find_new_delalloc_bytes(struct btrfs_inode *inode,
 467					 const u64 start,
 468					 const u64 len,
 469					 struct extent_state **cached_state)
 470{
 471	u64 search_start = start;
 472	const u64 end = start + len - 1;
 473
 474	while (search_start < end) {
 475		const u64 search_len = end - search_start + 1;
 476		struct extent_map *em;
 477		u64 em_len;
 478		int ret = 0;
 479
 480		em = btrfs_get_extent(inode, NULL, 0, search_start,
 481				      search_len, 0);
 482		if (IS_ERR(em))
 483			return PTR_ERR(em);
 484
 485		if (em->block_start != EXTENT_MAP_HOLE)
 486			goto next;
 487
 488		em_len = em->len;
 489		if (em->start < search_start)
 490			em_len -= search_start - em->start;
 491		if (em_len > search_len)
 492			em_len = search_len;
 493
 494		ret = set_extent_bit(&inode->io_tree, search_start,
 495				     search_start + em_len - 1,
 496				     EXTENT_DELALLOC_NEW,
 497				     NULL, cached_state, GFP_NOFS);
 498next:
 499		search_start = extent_map_end(em);
 500		free_extent_map(em);
 501		if (ret)
 502			return ret;
 503	}
 504	return 0;
 505}
 506
 507/*
 508 * after copy_from_user, pages need to be dirtied and we need to make
 509 * sure holes are created between the current EOF and the start of
 510 * any next extents (if required).
 511 *
 512 * this also makes the decision about creating an inline extent vs
 513 * doing real data extents, marking pages dirty and delalloc as required.
 514 */
 515int btrfs_dirty_pages(struct inode *inode, struct page **pages,
 516		      size_t num_pages, loff_t pos, size_t write_bytes,
 517		      struct extent_state **cached)
 518{
 519	struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
 520	int err = 0;
 521	int i;
 522	u64 num_bytes;
 523	u64 start_pos;
 524	u64 end_of_last_block;
 525	u64 end_pos = pos + write_bytes;
 526	loff_t isize = i_size_read(inode);
 527	unsigned int extra_bits = 0;
 528
 529	start_pos = pos & ~((u64) fs_info->sectorsize - 1);
 
 
 
 
 
 
 530	num_bytes = round_up(write_bytes + pos - start_pos,
 531			     fs_info->sectorsize);
 
 532
 533	end_of_last_block = start_pos + num_bytes - 1;
 534
 535	/*
 536	 * The pages may have already been dirty, clear out old accounting so
 537	 * we can set things up properly
 538	 */
 539	clear_extent_bit(&BTRFS_I(inode)->io_tree, start_pos, end_of_last_block,
 540			 EXTENT_DELALLOC | EXTENT_DO_ACCOUNTING | EXTENT_DEFRAG,
 541			 0, 0, cached);
 542
 543	if (!btrfs_is_free_space_inode(BTRFS_I(inode))) {
 544		if (start_pos >= isize &&
 545		    !(BTRFS_I(inode)->flags & BTRFS_INODE_PREALLOC)) {
 546			/*
 547			 * There can't be any extents following eof in this case
 548			 * so just set the delalloc new bit for the range
 549			 * directly.
 550			 */
 551			extra_bits |= EXTENT_DELALLOC_NEW;
 552		} else {
 553			err = btrfs_find_new_delalloc_bytes(BTRFS_I(inode),
 554							    start_pos,
 555							    num_bytes, cached);
 556			if (err)
 557				return err;
 558		}
 559	}
 560
 561	err = btrfs_set_extent_delalloc(inode, start_pos, end_of_last_block,
 562					extra_bits, cached);
 563	if (err)
 564		return err;
 565
 566	for (i = 0; i < num_pages; i++) {
 567		struct page *p = pages[i];
 568		SetPageUptodate(p);
 
 569		ClearPageChecked(p);
 570		set_page_dirty(p);
 571	}
 572
 573	/*
 574	 * we've only changed i_size in ram, and we haven't updated
 575	 * the disk i_size.  There is no need to log the inode
 576	 * at this time.
 577	 */
 578	if (end_pos > isize)
 579		i_size_write(inode, end_pos);
 580	return 0;
 581}
 582
 583/*
 584 * this drops all the extents in the cache that intersect the range
 585 * [start, end].  Existing extents are split as required.
 586 */
 587void btrfs_drop_extent_cache(struct btrfs_inode *inode, u64 start, u64 end,
 588			     int skip_pinned)
 589{
 590	struct extent_map *em;
 591	struct extent_map *split = NULL;
 592	struct extent_map *split2 = NULL;
 593	struct extent_map_tree *em_tree = &inode->extent_tree;
 594	u64 len = end - start + 1;
 595	u64 gen;
 596	int ret;
 597	int testend = 1;
 598	unsigned long flags;
 599	int compressed = 0;
 600	bool modified;
 601
 602	WARN_ON(end < start);
 603	if (end == (u64)-1) {
 604		len = (u64)-1;
 605		testend = 0;
 606	}
 607	while (1) {
 608		int no_splits = 0;
 609
 610		modified = false;
 611		if (!split)
 612			split = alloc_extent_map();
 613		if (!split2)
 614			split2 = alloc_extent_map();
 615		if (!split || !split2)
 616			no_splits = 1;
 617
 618		write_lock(&em_tree->lock);
 619		em = lookup_extent_mapping(em_tree, start, len);
 620		if (!em) {
 621			write_unlock(&em_tree->lock);
 622			break;
 623		}
 624		flags = em->flags;
 625		gen = em->generation;
 626		if (skip_pinned && test_bit(EXTENT_FLAG_PINNED, &em->flags)) {
 627			if (testend && em->start + em->len >= start + len) {
 628				free_extent_map(em);
 629				write_unlock(&em_tree->lock);
 630				break;
 631			}
 632			start = em->start + em->len;
 633			if (testend)
 634				len = start + len - (em->start + em->len);
 635			free_extent_map(em);
 636			write_unlock(&em_tree->lock);
 637			continue;
 638		}
 639		compressed = test_bit(EXTENT_FLAG_COMPRESSED, &em->flags);
 640		clear_bit(EXTENT_FLAG_PINNED, &em->flags);
 641		clear_bit(EXTENT_FLAG_LOGGING, &flags);
 642		modified = !list_empty(&em->list);
 643		if (no_splits)
 644			goto next;
 645
 646		if (em->start < start) {
 647			split->start = em->start;
 648			split->len = start - em->start;
 649
 650			if (em->block_start < EXTENT_MAP_LAST_BYTE) {
 651				split->orig_start = em->orig_start;
 652				split->block_start = em->block_start;
 653
 654				if (compressed)
 655					split->block_len = em->block_len;
 656				else
 657					split->block_len = split->len;
 658				split->orig_block_len = max(split->block_len,
 659						em->orig_block_len);
 660				split->ram_bytes = em->ram_bytes;
 661			} else {
 662				split->orig_start = split->start;
 663				split->block_len = 0;
 664				split->block_start = em->block_start;
 665				split->orig_block_len = 0;
 666				split->ram_bytes = split->len;
 667			}
 668
 669			split->generation = gen;
 670			split->bdev = em->bdev;
 671			split->flags = flags;
 672			split->compress_type = em->compress_type;
 673			replace_extent_mapping(em_tree, em, split, modified);
 674			free_extent_map(split);
 675			split = split2;
 676			split2 = NULL;
 677		}
 678		if (testend && em->start + em->len > start + len) {
 679			u64 diff = start + len - em->start;
 680
 681			split->start = start + len;
 682			split->len = em->start + em->len - (start + len);
 683			split->bdev = em->bdev;
 684			split->flags = flags;
 685			split->compress_type = em->compress_type;
 686			split->generation = gen;
 687
 688			if (em->block_start < EXTENT_MAP_LAST_BYTE) {
 689				split->orig_block_len = max(em->block_len,
 690						    em->orig_block_len);
 691
 692				split->ram_bytes = em->ram_bytes;
 693				if (compressed) {
 694					split->block_len = em->block_len;
 695					split->block_start = em->block_start;
 696					split->orig_start = em->orig_start;
 697				} else {
 698					split->block_len = split->len;
 699					split->block_start = em->block_start
 700						+ diff;
 701					split->orig_start = em->orig_start;
 702				}
 703			} else {
 704				split->ram_bytes = split->len;
 705				split->orig_start = split->start;
 706				split->block_len = 0;
 707				split->block_start = em->block_start;
 708				split->orig_block_len = 0;
 709			}
 710
 711			if (extent_map_in_tree(em)) {
 712				replace_extent_mapping(em_tree, em, split,
 713						       modified);
 714			} else {
 715				ret = add_extent_mapping(em_tree, split,
 716							 modified);
 717				ASSERT(ret == 0); /* Logic error */
 718			}
 719			free_extent_map(split);
 720			split = NULL;
 721		}
 722next:
 723		if (extent_map_in_tree(em))
 724			remove_extent_mapping(em_tree, em);
 725		write_unlock(&em_tree->lock);
 726
 727		/* once for us */
 728		free_extent_map(em);
 729		/* once for the tree*/
 730		free_extent_map(em);
 731	}
 732	if (split)
 733		free_extent_map(split);
 734	if (split2)
 735		free_extent_map(split2);
 736}
 737
 738/*
 739 * this is very complex, but the basic idea is to drop all extents
 740 * in the range start - end.  hint_block is filled in with a block number
 741 * that would be a good hint to the block allocator for this file.
 742 *
 743 * If an extent intersects the range but is not entirely inside the range
 744 * it is either truncated or split.  Anything entirely inside the range
 745 * is deleted from the tree.
 
 
 
 
 
 
 746 */
 747int __btrfs_drop_extents(struct btrfs_trans_handle *trans,
 748			 struct btrfs_root *root, struct inode *inode,
 749			 struct btrfs_path *path, u64 start, u64 end,
 750			 u64 *drop_end, int drop_cache,
 751			 int replace_extent,
 752			 u32 extent_item_size,
 753			 int *key_inserted)
 754{
 755	struct btrfs_fs_info *fs_info = root->fs_info;
 756	struct extent_buffer *leaf;
 757	struct btrfs_file_extent_item *fi;
 758	struct btrfs_ref ref = { 0 };
 759	struct btrfs_key key;
 760	struct btrfs_key new_key;
 761	u64 ino = btrfs_ino(BTRFS_I(inode));
 762	u64 search_start = start;
 763	u64 disk_bytenr = 0;
 764	u64 num_bytes = 0;
 765	u64 extent_offset = 0;
 766	u64 extent_end = 0;
 767	u64 last_end = start;
 768	int del_nr = 0;
 769	int del_slot = 0;
 770	int extent_type;
 771	int recow;
 772	int ret;
 773	int modify_tree = -1;
 774	int update_refs;
 775	int found = 0;
 776	int leafs_visited = 0;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 777
 778	if (drop_cache)
 779		btrfs_drop_extent_cache(BTRFS_I(inode), start, end - 1, 0);
 780
 781	if (start >= BTRFS_I(inode)->disk_i_size && !replace_extent)
 782		modify_tree = 0;
 783
 784	update_refs = (test_bit(BTRFS_ROOT_REF_COWS, &root->state) ||
 785		       root == fs_info->tree_root);
 786	while (1) {
 787		recow = 0;
 788		ret = btrfs_lookup_file_extent(trans, root, path, ino,
 789					       search_start, modify_tree);
 790		if (ret < 0)
 791			break;
 792		if (ret > 0 && path->slots[0] > 0 && search_start == start) {
 793			leaf = path->nodes[0];
 794			btrfs_item_key_to_cpu(leaf, &key, path->slots[0] - 1);
 795			if (key.objectid == ino &&
 796			    key.type == BTRFS_EXTENT_DATA_KEY)
 797				path->slots[0]--;
 798		}
 799		ret = 0;
 800		leafs_visited++;
 801next_slot:
 802		leaf = path->nodes[0];
 803		if (path->slots[0] >= btrfs_header_nritems(leaf)) {
 804			BUG_ON(del_nr > 0);
 805			ret = btrfs_next_leaf(root, path);
 806			if (ret < 0)
 807				break;
 808			if (ret > 0) {
 809				ret = 0;
 810				break;
 811			}
 812			leafs_visited++;
 813			leaf = path->nodes[0];
 814			recow = 1;
 815		}
 816
 817		btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
 818
 819		if (key.objectid > ino)
 820			break;
 821		if (WARN_ON_ONCE(key.objectid < ino) ||
 822		    key.type < BTRFS_EXTENT_DATA_KEY) {
 823			ASSERT(del_nr == 0);
 824			path->slots[0]++;
 825			goto next_slot;
 826		}
 827		if (key.type > BTRFS_EXTENT_DATA_KEY || key.offset >= end)
 828			break;
 829
 830		fi = btrfs_item_ptr(leaf, path->slots[0],
 831				    struct btrfs_file_extent_item);
 832		extent_type = btrfs_file_extent_type(leaf, fi);
 833
 834		if (extent_type == BTRFS_FILE_EXTENT_REG ||
 835		    extent_type == BTRFS_FILE_EXTENT_PREALLOC) {
 836			disk_bytenr = btrfs_file_extent_disk_bytenr(leaf, fi);
 837			num_bytes = btrfs_file_extent_disk_num_bytes(leaf, fi);
 838			extent_offset = btrfs_file_extent_offset(leaf, fi);
 839			extent_end = key.offset +
 840				btrfs_file_extent_num_bytes(leaf, fi);
 841		} else if (extent_type == BTRFS_FILE_EXTENT_INLINE) {
 842			extent_end = key.offset +
 843				btrfs_file_extent_ram_bytes(leaf, fi);
 844		} else {
 845			/* can't happen */
 846			BUG();
 847		}
 848
 849		/*
 850		 * Don't skip extent items representing 0 byte lengths. They
 851		 * used to be created (bug) if while punching holes we hit
 852		 * -ENOSPC condition. So if we find one here, just ensure we
 853		 * delete it, otherwise we would insert a new file extent item
 854		 * with the same key (offset) as that 0 bytes length file
 855		 * extent item in the call to setup_items_for_insert() later
 856		 * in this function.
 857		 */
 858		if (extent_end == key.offset && extent_end >= search_start) {
 859			last_end = extent_end;
 860			goto delete_extent_item;
 861		}
 862
 863		if (extent_end <= search_start) {
 864			path->slots[0]++;
 865			goto next_slot;
 866		}
 867
 868		found = 1;
 869		search_start = max(key.offset, start);
 870		if (recow || !modify_tree) {
 871			modify_tree = -1;
 872			btrfs_release_path(path);
 873			continue;
 874		}
 875
 876		/*
 877		 *     | - range to drop - |
 878		 *  | -------- extent -------- |
 879		 */
 880		if (start > key.offset && end < extent_end) {
 881			BUG_ON(del_nr > 0);
 882			if (extent_type == BTRFS_FILE_EXTENT_INLINE) {
 883				ret = -EOPNOTSUPP;
 884				break;
 885			}
 886
 887			memcpy(&new_key, &key, sizeof(new_key));
 888			new_key.offset = start;
 889			ret = btrfs_duplicate_item(trans, root, path,
 890						   &new_key);
 891			if (ret == -EAGAIN) {
 892				btrfs_release_path(path);
 893				continue;
 894			}
 895			if (ret < 0)
 896				break;
 897
 898			leaf = path->nodes[0];
 899			fi = btrfs_item_ptr(leaf, path->slots[0] - 1,
 900					    struct btrfs_file_extent_item);
 901			btrfs_set_file_extent_num_bytes(leaf, fi,
 902							start - key.offset);
 903
 904			fi = btrfs_item_ptr(leaf, path->slots[0],
 905					    struct btrfs_file_extent_item);
 906
 907			extent_offset += start - key.offset;
 908			btrfs_set_file_extent_offset(leaf, fi, extent_offset);
 909			btrfs_set_file_extent_num_bytes(leaf, fi,
 910							extent_end - start);
 911			btrfs_mark_buffer_dirty(leaf);
 912
 913			if (update_refs && disk_bytenr > 0) {
 914				btrfs_init_generic_ref(&ref,
 915						BTRFS_ADD_DELAYED_REF,
 916						disk_bytenr, num_bytes, 0);
 917				btrfs_init_data_ref(&ref,
 918						root->root_key.objectid,
 919						new_key.objectid,
 920						start - extent_offset);
 921				ret = btrfs_inc_extent_ref(trans, &ref);
 922				BUG_ON(ret); /* -ENOMEM */
 923			}
 924			key.offset = start;
 925		}
 926		/*
 927		 * From here on out we will have actually dropped something, so
 928		 * last_end can be updated.
 929		 */
 930		last_end = extent_end;
 931
 932		/*
 933		 *  | ---- range to drop ----- |
 934		 *      | -------- extent -------- |
 935		 */
 936		if (start <= key.offset && end < extent_end) {
 937			if (extent_type == BTRFS_FILE_EXTENT_INLINE) {
 938				ret = -EOPNOTSUPP;
 939				break;
 940			}
 941
 942			memcpy(&new_key, &key, sizeof(new_key));
 943			new_key.offset = end;
 944			btrfs_set_item_key_safe(fs_info, path, &new_key);
 945
 946			extent_offset += end - key.offset;
 947			btrfs_set_file_extent_offset(leaf, fi, extent_offset);
 948			btrfs_set_file_extent_num_bytes(leaf, fi,
 949							extent_end - end);
 950			btrfs_mark_buffer_dirty(leaf);
 951			if (update_refs && disk_bytenr > 0)
 952				inode_sub_bytes(inode, end - key.offset);
 953			break;
 954		}
 955
 956		search_start = extent_end;
 957		/*
 958		 *       | ---- range to drop ----- |
 959		 *  | -------- extent -------- |
 960		 */
 961		if (start > key.offset && end >= extent_end) {
 962			BUG_ON(del_nr > 0);
 963			if (extent_type == BTRFS_FILE_EXTENT_INLINE) {
 964				ret = -EOPNOTSUPP;
 965				break;
 966			}
 967
 968			btrfs_set_file_extent_num_bytes(leaf, fi,
 969							start - key.offset);
 970			btrfs_mark_buffer_dirty(leaf);
 971			if (update_refs && disk_bytenr > 0)
 972				inode_sub_bytes(inode, extent_end - start);
 973			if (end == extent_end)
 974				break;
 975
 976			path->slots[0]++;
 977			goto next_slot;
 978		}
 979
 980		/*
 981		 *  | ---- range to drop ----- |
 982		 *    | ------ extent ------ |
 983		 */
 984		if (start <= key.offset && end >= extent_end) {
 985delete_extent_item:
 986			if (del_nr == 0) {
 987				del_slot = path->slots[0];
 988				del_nr = 1;
 989			} else {
 990				BUG_ON(del_slot + del_nr != path->slots[0]);
 991				del_nr++;
 992			}
 993
 994			if (update_refs &&
 995			    extent_type == BTRFS_FILE_EXTENT_INLINE) {
 996				inode_sub_bytes(inode,
 997						extent_end - key.offset);
 998				extent_end = ALIGN(extent_end,
 999						   fs_info->sectorsize);
1000			} else if (update_refs && disk_bytenr > 0) {
1001				btrfs_init_generic_ref(&ref,
1002						BTRFS_DROP_DELAYED_REF,
1003						disk_bytenr, num_bytes, 0);
1004				btrfs_init_data_ref(&ref,
1005						root->root_key.objectid,
1006						key.objectid,
1007						key.offset - extent_offset);
1008				ret = btrfs_free_extent(trans, &ref);
1009				BUG_ON(ret); /* -ENOMEM */
1010				inode_sub_bytes(inode,
1011						extent_end - key.offset);
1012			}
1013
1014			if (end == extent_end)
1015				break;
1016
1017			if (path->slots[0] + 1 < btrfs_header_nritems(leaf)) {
1018				path->slots[0]++;
1019				goto next_slot;
1020			}
1021
1022			ret = btrfs_del_items(trans, root, path, del_slot,
1023					      del_nr);
1024			if (ret) {
1025				btrfs_abort_transaction(trans, ret);
1026				break;
1027			}
1028
1029			del_nr = 0;
1030			del_slot = 0;
1031
1032			btrfs_release_path(path);
1033			continue;
1034		}
1035
1036		BUG();
1037	}
1038
1039	if (!ret && del_nr > 0) {
1040		/*
1041		 * Set path->slots[0] to first slot, so that after the delete
1042		 * if items are move off from our leaf to its immediate left or
1043		 * right neighbor leafs, we end up with a correct and adjusted
1044		 * path->slots[0] for our insertion (if replace_extent != 0).
1045		 */
1046		path->slots[0] = del_slot;
1047		ret = btrfs_del_items(trans, root, path, del_slot, del_nr);
1048		if (ret)
1049			btrfs_abort_transaction(trans, ret);
1050	}
1051
1052	leaf = path->nodes[0];
1053	/*
1054	 * If btrfs_del_items() was called, it might have deleted a leaf, in
1055	 * which case it unlocked our path, so check path->locks[0] matches a
1056	 * write lock.
1057	 */
1058	if (!ret && replace_extent && leafs_visited == 1 &&
1059	    (path->locks[0] == BTRFS_WRITE_LOCK_BLOCKING ||
1060	     path->locks[0] == BTRFS_WRITE_LOCK) &&
1061	    btrfs_leaf_free_space(leaf) >=
1062	    sizeof(struct btrfs_item) + extent_item_size) {
1063
1064		key.objectid = ino;
1065		key.type = BTRFS_EXTENT_DATA_KEY;
1066		key.offset = start;
1067		if (!del_nr && path->slots[0] < btrfs_header_nritems(leaf)) {
1068			struct btrfs_key slot_key;
1069
1070			btrfs_item_key_to_cpu(leaf, &slot_key, path->slots[0]);
1071			if (btrfs_comp_cpu_keys(&key, &slot_key) > 0)
1072				path->slots[0]++;
1073		}
1074		setup_items_for_insert(root, path, &key,
1075				       &extent_item_size,
1076				       extent_item_size,
1077				       sizeof(struct btrfs_item) +
1078				       extent_item_size, 1);
1079		*key_inserted = 1;
1080	}
1081
1082	if (!replace_extent || !(*key_inserted))
 
 
1083		btrfs_release_path(path);
1084	if (drop_end)
1085		*drop_end = found ? min(end, last_end) : end;
1086	return ret;
1087}
1088
1089int btrfs_drop_extents(struct btrfs_trans_handle *trans,
1090		       struct btrfs_root *root, struct inode *inode, u64 start,
1091		       u64 end, int drop_cache)
1092{
1093	struct btrfs_path *path;
1094	int ret;
1095
1096	path = btrfs_alloc_path();
1097	if (!path)
1098		return -ENOMEM;
1099	ret = __btrfs_drop_extents(trans, root, inode, path, start, end, NULL,
1100				   drop_cache, 0, 0, NULL);
1101	btrfs_free_path(path);
1102	return ret;
1103}
1104
1105static int extent_mergeable(struct extent_buffer *leaf, int slot,
1106			    u64 objectid, u64 bytenr, u64 orig_offset,
1107			    u64 *start, u64 *end)
1108{
1109	struct btrfs_file_extent_item *fi;
1110	struct btrfs_key key;
1111	u64 extent_end;
1112
1113	if (slot < 0 || slot >= btrfs_header_nritems(leaf))
1114		return 0;
1115
1116	btrfs_item_key_to_cpu(leaf, &key, slot);
1117	if (key.objectid != objectid || key.type != BTRFS_EXTENT_DATA_KEY)
1118		return 0;
1119
1120	fi = btrfs_item_ptr(leaf, slot, struct btrfs_file_extent_item);
1121	if (btrfs_file_extent_type(leaf, fi) != BTRFS_FILE_EXTENT_REG ||
1122	    btrfs_file_extent_disk_bytenr(leaf, fi) != bytenr ||
1123	    btrfs_file_extent_offset(leaf, fi) != key.offset - orig_offset ||
1124	    btrfs_file_extent_compression(leaf, fi) ||
1125	    btrfs_file_extent_encryption(leaf, fi) ||
1126	    btrfs_file_extent_other_encoding(leaf, fi))
1127		return 0;
1128
1129	extent_end = key.offset + btrfs_file_extent_num_bytes(leaf, fi);
1130	if ((*start && *start != key.offset) || (*end && *end != extent_end))
1131		return 0;
1132
1133	*start = key.offset;
1134	*end = extent_end;
1135	return 1;
1136}
1137
1138/*
1139 * Mark extent in the range start - end as written.
1140 *
1141 * This changes extent type from 'pre-allocated' to 'regular'. If only
1142 * part of extent is marked as written, the extent will be split into
1143 * two or three.
1144 */
1145int btrfs_mark_extent_written(struct btrfs_trans_handle *trans,
1146			      struct btrfs_inode *inode, u64 start, u64 end)
1147{
1148	struct btrfs_fs_info *fs_info = trans->fs_info;
1149	struct btrfs_root *root = inode->root;
1150	struct extent_buffer *leaf;
1151	struct btrfs_path *path;
1152	struct btrfs_file_extent_item *fi;
1153	struct btrfs_ref ref = { 0 };
1154	struct btrfs_key key;
1155	struct btrfs_key new_key;
1156	u64 bytenr;
1157	u64 num_bytes;
1158	u64 extent_end;
1159	u64 orig_offset;
1160	u64 other_start;
1161	u64 other_end;
1162	u64 split;
1163	int del_nr = 0;
1164	int del_slot = 0;
1165	int recow;
1166	int ret;
1167	u64 ino = btrfs_ino(inode);
1168
1169	path = btrfs_alloc_path();
1170	if (!path)
1171		return -ENOMEM;
1172again:
1173	recow = 0;
1174	split = start;
1175	key.objectid = ino;
1176	key.type = BTRFS_EXTENT_DATA_KEY;
1177	key.offset = split;
1178
1179	ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
1180	if (ret < 0)
1181		goto out;
1182	if (ret > 0 && path->slots[0] > 0)
1183		path->slots[0]--;
1184
1185	leaf = path->nodes[0];
1186	btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
1187	if (key.objectid != ino ||
1188	    key.type != BTRFS_EXTENT_DATA_KEY) {
1189		ret = -EINVAL;
1190		btrfs_abort_transaction(trans, ret);
1191		goto out;
1192	}
1193	fi = btrfs_item_ptr(leaf, path->slots[0],
1194			    struct btrfs_file_extent_item);
1195	if (btrfs_file_extent_type(leaf, fi) != BTRFS_FILE_EXTENT_PREALLOC) {
1196		ret = -EINVAL;
1197		btrfs_abort_transaction(trans, ret);
1198		goto out;
1199	}
1200	extent_end = key.offset + btrfs_file_extent_num_bytes(leaf, fi);
1201	if (key.offset > start || extent_end < end) {
1202		ret = -EINVAL;
1203		btrfs_abort_transaction(trans, ret);
1204		goto out;
1205	}
1206
1207	bytenr = btrfs_file_extent_disk_bytenr(leaf, fi);
1208	num_bytes = btrfs_file_extent_disk_num_bytes(leaf, fi);
1209	orig_offset = key.offset - btrfs_file_extent_offset(leaf, fi);
1210	memcpy(&new_key, &key, sizeof(new_key));
1211
1212	if (start == key.offset && end < extent_end) {
1213		other_start = 0;
1214		other_end = start;
1215		if (extent_mergeable(leaf, path->slots[0] - 1,
1216				     ino, bytenr, orig_offset,
1217				     &other_start, &other_end)) {
1218			new_key.offset = end;
1219			btrfs_set_item_key_safe(fs_info, path, &new_key);
1220			fi = btrfs_item_ptr(leaf, path->slots[0],
1221					    struct btrfs_file_extent_item);
1222			btrfs_set_file_extent_generation(leaf, fi,
1223							 trans->transid);
1224			btrfs_set_file_extent_num_bytes(leaf, fi,
1225							extent_end - end);
1226			btrfs_set_file_extent_offset(leaf, fi,
1227						     end - orig_offset);
1228			fi = btrfs_item_ptr(leaf, path->slots[0] - 1,
1229					    struct btrfs_file_extent_item);
1230			btrfs_set_file_extent_generation(leaf, fi,
1231							 trans->transid);
1232			btrfs_set_file_extent_num_bytes(leaf, fi,
1233							end - other_start);
1234			btrfs_mark_buffer_dirty(leaf);
1235			goto out;
1236		}
1237	}
1238
1239	if (start > key.offset && end == extent_end) {
1240		other_start = end;
1241		other_end = 0;
1242		if (extent_mergeable(leaf, path->slots[0] + 1,
1243				     ino, bytenr, orig_offset,
1244				     &other_start, &other_end)) {
1245			fi = btrfs_item_ptr(leaf, path->slots[0],
1246					    struct btrfs_file_extent_item);
1247			btrfs_set_file_extent_num_bytes(leaf, fi,
1248							start - key.offset);
1249			btrfs_set_file_extent_generation(leaf, fi,
1250							 trans->transid);
1251			path->slots[0]++;
1252			new_key.offset = start;
1253			btrfs_set_item_key_safe(fs_info, path, &new_key);
1254
1255			fi = btrfs_item_ptr(leaf, path->slots[0],
1256					    struct btrfs_file_extent_item);
1257			btrfs_set_file_extent_generation(leaf, fi,
1258							 trans->transid);
1259			btrfs_set_file_extent_num_bytes(leaf, fi,
1260							other_end - start);
1261			btrfs_set_file_extent_offset(leaf, fi,
1262						     start - orig_offset);
1263			btrfs_mark_buffer_dirty(leaf);
1264			goto out;
1265		}
1266	}
1267
1268	while (start > key.offset || end < extent_end) {
1269		if (key.offset == start)
1270			split = end;
1271
1272		new_key.offset = split;
1273		ret = btrfs_duplicate_item(trans, root, path, &new_key);
1274		if (ret == -EAGAIN) {
1275			btrfs_release_path(path);
1276			goto again;
1277		}
1278		if (ret < 0) {
1279			btrfs_abort_transaction(trans, ret);
1280			goto out;
1281		}
1282
1283		leaf = path->nodes[0];
1284		fi = btrfs_item_ptr(leaf, path->slots[0] - 1,
1285				    struct btrfs_file_extent_item);
1286		btrfs_set_file_extent_generation(leaf, fi, trans->transid);
1287		btrfs_set_file_extent_num_bytes(leaf, fi,
1288						split - key.offset);
1289
1290		fi = btrfs_item_ptr(leaf, path->slots[0],
1291				    struct btrfs_file_extent_item);
1292
1293		btrfs_set_file_extent_generation(leaf, fi, trans->transid);
1294		btrfs_set_file_extent_offset(leaf, fi, split - orig_offset);
1295		btrfs_set_file_extent_num_bytes(leaf, fi,
1296						extent_end - split);
1297		btrfs_mark_buffer_dirty(leaf);
1298
1299		btrfs_init_generic_ref(&ref, BTRFS_ADD_DELAYED_REF, bytenr,
1300				       num_bytes, 0);
1301		btrfs_init_data_ref(&ref, root->root_key.objectid, ino,
1302				    orig_offset);
1303		ret = btrfs_inc_extent_ref(trans, &ref);
1304		if (ret) {
1305			btrfs_abort_transaction(trans, ret);
1306			goto out;
1307		}
1308
1309		if (split == start) {
1310			key.offset = start;
1311		} else {
1312			if (start != key.offset) {
1313				ret = -EINVAL;
1314				btrfs_abort_transaction(trans, ret);
1315				goto out;
1316			}
1317			path->slots[0]--;
1318			extent_end = end;
1319		}
1320		recow = 1;
1321	}
1322
1323	other_start = end;
1324	other_end = 0;
1325	btrfs_init_generic_ref(&ref, BTRFS_DROP_DELAYED_REF, bytenr,
1326			       num_bytes, 0);
1327	btrfs_init_data_ref(&ref, root->root_key.objectid, ino, orig_offset);
1328	if (extent_mergeable(leaf, path->slots[0] + 1,
1329			     ino, bytenr, orig_offset,
1330			     &other_start, &other_end)) {
1331		if (recow) {
1332			btrfs_release_path(path);
1333			goto again;
1334		}
1335		extent_end = other_end;
1336		del_slot = path->slots[0] + 1;
1337		del_nr++;
1338		ret = btrfs_free_extent(trans, &ref);
1339		if (ret) {
1340			btrfs_abort_transaction(trans, ret);
1341			goto out;
1342		}
1343	}
1344	other_start = 0;
1345	other_end = start;
1346	if (extent_mergeable(leaf, path->slots[0] - 1,
1347			     ino, bytenr, orig_offset,
1348			     &other_start, &other_end)) {
1349		if (recow) {
1350			btrfs_release_path(path);
1351			goto again;
1352		}
1353		key.offset = other_start;
1354		del_slot = path->slots[0];
1355		del_nr++;
1356		ret = btrfs_free_extent(trans, &ref);
1357		if (ret) {
1358			btrfs_abort_transaction(trans, ret);
1359			goto out;
1360		}
1361	}
1362	if (del_nr == 0) {
1363		fi = btrfs_item_ptr(leaf, path->slots[0],
1364			   struct btrfs_file_extent_item);
1365		btrfs_set_file_extent_type(leaf, fi,
1366					   BTRFS_FILE_EXTENT_REG);
1367		btrfs_set_file_extent_generation(leaf, fi, trans->transid);
1368		btrfs_mark_buffer_dirty(leaf);
1369	} else {
1370		fi = btrfs_item_ptr(leaf, del_slot - 1,
1371			   struct btrfs_file_extent_item);
1372		btrfs_set_file_extent_type(leaf, fi,
1373					   BTRFS_FILE_EXTENT_REG);
1374		btrfs_set_file_extent_generation(leaf, fi, trans->transid);
1375		btrfs_set_file_extent_num_bytes(leaf, fi,
1376						extent_end - key.offset);
1377		btrfs_mark_buffer_dirty(leaf);
1378
1379		ret = btrfs_del_items(trans, root, path, del_slot, del_nr);
1380		if (ret < 0) {
1381			btrfs_abort_transaction(trans, ret);
1382			goto out;
1383		}
1384	}
1385out:
1386	btrfs_free_path(path);
1387	return 0;
1388}
1389
1390/*
1391 * on error we return an unlocked page and the error value
1392 * on success we return a locked page and 0
1393 */
1394static int prepare_uptodate_page(struct inode *inode,
1395				 struct page *page, u64 pos,
1396				 bool force_uptodate)
1397{
1398	int ret = 0;
1399
1400	if (((pos & (PAGE_SIZE - 1)) || force_uptodate) &&
1401	    !PageUptodate(page)) {
1402		ret = btrfs_readpage(NULL, page);
1403		if (ret)
1404			return ret;
1405		lock_page(page);
1406		if (!PageUptodate(page)) {
1407			unlock_page(page);
1408			return -EIO;
1409		}
1410		if (page->mapping != inode->i_mapping) {
1411			unlock_page(page);
1412			return -EAGAIN;
1413		}
1414	}
1415	return 0;
1416}
1417
1418/*
1419 * this just gets pages into the page cache and locks them down.
1420 */
1421static noinline int prepare_pages(struct inode *inode, struct page **pages,
1422				  size_t num_pages, loff_t pos,
1423				  size_t write_bytes, bool force_uptodate)
1424{
1425	int i;
1426	unsigned long index = pos >> PAGE_SHIFT;
1427	gfp_t mask = btrfs_alloc_write_mask(inode->i_mapping);
1428	int err = 0;
1429	int faili;
1430
1431	for (i = 0; i < num_pages; i++) {
1432again:
1433		pages[i] = find_or_create_page(inode->i_mapping, index + i,
1434					       mask | __GFP_WRITE);
1435		if (!pages[i]) {
1436			faili = i - 1;
1437			err = -ENOMEM;
1438			goto fail;
1439		}
1440
 
 
 
 
 
 
1441		if (i == 0)
1442			err = prepare_uptodate_page(inode, pages[i], pos,
1443						    force_uptodate);
1444		if (!err && i == num_pages - 1)
1445			err = prepare_uptodate_page(inode, pages[i],
1446						    pos + write_bytes, false);
1447		if (err) {
1448			put_page(pages[i]);
1449			if (err == -EAGAIN) {
1450				err = 0;
1451				goto again;
1452			}
1453			faili = i - 1;
1454			goto fail;
1455		}
1456		wait_on_page_writeback(pages[i]);
1457	}
1458
1459	return 0;
1460fail:
1461	while (faili >= 0) {
1462		unlock_page(pages[faili]);
1463		put_page(pages[faili]);
1464		faili--;
1465	}
1466	return err;
1467
1468}
1469
1470/*
1471 * This function locks the extent and properly waits for data=ordered extents
1472 * to finish before allowing the pages to be modified if need.
1473 *
1474 * The return value:
1475 * 1 - the extent is locked
1476 * 0 - the extent is not locked, and everything is OK
1477 * -EAGAIN - need re-prepare the pages
1478 * the other < 0 number - Something wrong happens
1479 */
1480static noinline int
1481lock_and_cleanup_extent_if_need(struct btrfs_inode *inode, struct page **pages,
1482				size_t num_pages, loff_t pos,
1483				size_t write_bytes,
1484				u64 *lockstart, u64 *lockend,
1485				struct extent_state **cached_state)
1486{
1487	struct btrfs_fs_info *fs_info = inode->root->fs_info;
1488	u64 start_pos;
1489	u64 last_pos;
1490	int i;
1491	int ret = 0;
1492
1493	start_pos = round_down(pos, fs_info->sectorsize);
1494	last_pos = start_pos
1495		+ round_up(pos + write_bytes - start_pos,
1496			   fs_info->sectorsize) - 1;
1497
1498	if (start_pos < inode->vfs_inode.i_size) {
1499		struct btrfs_ordered_extent *ordered;
1500
1501		lock_extent_bits(&inode->io_tree, start_pos, last_pos,
1502				cached_state);
1503		ordered = btrfs_lookup_ordered_range(inode, start_pos,
1504						     last_pos - start_pos + 1);
1505		if (ordered &&
1506		    ordered->file_offset + ordered->len > start_pos &&
1507		    ordered->file_offset <= last_pos) {
1508			unlock_extent_cached(&inode->io_tree, start_pos,
1509					last_pos, cached_state);
1510			for (i = 0; i < num_pages; i++) {
1511				unlock_page(pages[i]);
1512				put_page(pages[i]);
1513			}
1514			btrfs_start_ordered_extent(&inode->vfs_inode,
1515					ordered, 1);
1516			btrfs_put_ordered_extent(ordered);
1517			return -EAGAIN;
1518		}
1519		if (ordered)
1520			btrfs_put_ordered_extent(ordered);
1521
1522		*lockstart = start_pos;
1523		*lockend = last_pos;
1524		ret = 1;
1525	}
1526
1527	/*
1528	 * It's possible the pages are dirty right now, but we don't want
1529	 * to clean them yet because copy_from_user may catch a page fault
1530	 * and we might have to fall back to one page at a time.  If that
1531	 * happens, we'll unlock these pages and we'd have a window where
1532	 * reclaim could sneak in and drop the once-dirty page on the floor
1533	 * without writing it.
1534	 *
1535	 * We have the pages locked and the extent range locked, so there's
1536	 * no way someone can start IO on any dirty pages in this range.
1537	 *
1538	 * We'll call btrfs_dirty_pages() later on, and that will flip around
1539	 * delalloc bits and dirty the pages as required.
1540	 */
1541	for (i = 0; i < num_pages; i++) {
1542		set_page_extent_mapped(pages[i]);
1543		WARN_ON(!PageLocked(pages[i]));
1544	}
1545
1546	return ret;
1547}
1548
1549static noinline int check_can_nocow(struct btrfs_inode *inode, loff_t pos,
1550				    size_t *write_bytes)
1551{
1552	struct btrfs_fs_info *fs_info = inode->root->fs_info;
1553	struct btrfs_root *root = inode->root;
1554	u64 lockstart, lockend;
1555	u64 num_bytes;
1556	int ret;
1557
1558	ret = btrfs_start_write_no_snapshotting(root);
1559	if (!ret)
 
 
1560		return -EAGAIN;
1561
1562	lockstart = round_down(pos, fs_info->sectorsize);
1563	lockend = round_up(pos + *write_bytes,
1564			   fs_info->sectorsize) - 1;
 
1565
1566	btrfs_lock_and_flush_ordered_range(&inode->io_tree, inode, lockstart,
1567					   lockend, NULL);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1568
1569	num_bytes = lockend - lockstart + 1;
1570	ret = can_nocow_extent(&inode->vfs_inode, lockstart, &num_bytes,
1571			NULL, NULL, NULL);
1572	if (ret <= 0) {
1573		ret = 0;
1574		btrfs_end_write_no_snapshotting(root);
 
1575	} else {
1576		*write_bytes = min_t(size_t, *write_bytes ,
1577				     num_bytes - pos + lockstart);
1578	}
1579
1580	unlock_extent(&inode->io_tree, lockstart, lockend);
1581
1582	return ret;
1583}
1584
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1585static noinline ssize_t btrfs_buffered_write(struct kiocb *iocb,
1586					       struct iov_iter *i)
1587{
1588	struct file *file = iocb->ki_filp;
1589	loff_t pos = iocb->ki_pos;
1590	struct inode *inode = file_inode(file);
1591	struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
1592	struct btrfs_root *root = BTRFS_I(inode)->root;
1593	struct page **pages = NULL;
1594	struct extent_changeset *data_reserved = NULL;
1595	u64 release_bytes = 0;
1596	u64 lockstart;
1597	u64 lockend;
1598	size_t num_written = 0;
1599	int nrptrs;
1600	int ret = 0;
1601	bool only_release_metadata = false;
1602	bool force_page_uptodate = false;
 
 
 
 
 
 
 
 
 
 
 
 
 
1603
 
 
 
 
 
1604	nrptrs = min(DIV_ROUND_UP(iov_iter_count(i), PAGE_SIZE),
1605			PAGE_SIZE / (sizeof(struct page *)));
1606	nrptrs = min(nrptrs, current->nr_dirtied_pause - current->nr_dirtied);
1607	nrptrs = max(nrptrs, 8);
1608	pages = kmalloc_array(nrptrs, sizeof(struct page *), GFP_KERNEL);
1609	if (!pages)
1610		return -ENOMEM;
 
 
1611
1612	while (iov_iter_count(i) > 0) {
1613		struct extent_state *cached_state = NULL;
1614		size_t offset = offset_in_page(pos);
1615		size_t sector_offset;
1616		size_t write_bytes = min(iov_iter_count(i),
1617					 nrptrs * (size_t)PAGE_SIZE -
1618					 offset);
1619		size_t num_pages = DIV_ROUND_UP(write_bytes + offset,
1620						PAGE_SIZE);
1621		size_t reserve_bytes;
1622		size_t dirty_pages;
1623		size_t copied;
1624		size_t dirty_sectors;
1625		size_t num_sectors;
1626		int extents_locked;
1627
1628		WARN_ON(num_pages > nrptrs);
1629
1630		/*
1631		 * Fault pages before locking them in prepare_pages
1632		 * to avoid recursive lock
1633		 */
1634		if (unlikely(iov_iter_fault_in_readable(i, write_bytes))) {
1635			ret = -EFAULT;
1636			break;
1637		}
1638
 
1639		sector_offset = pos & (fs_info->sectorsize - 1);
1640		reserve_bytes = round_up(write_bytes + sector_offset,
1641				fs_info->sectorsize);
1642
1643		extent_changeset_release(data_reserved);
1644		ret = btrfs_check_data_free_space(inode, &data_reserved, pos,
 
1645						  write_bytes);
1646		if (ret < 0) {
1647			if ((BTRFS_I(inode)->flags & (BTRFS_INODE_NODATACOW |
1648						      BTRFS_INODE_PREALLOC)) &&
1649			    check_can_nocow(BTRFS_I(inode), pos,
1650					&write_bytes) > 0) {
1651				/*
1652				 * For nodata cow case, no need to reserve
1653				 * data space.
1654				 */
1655				only_release_metadata = true;
1656				/*
1657				 * our prealloc extent may be smaller than
1658				 * write_bytes, so scale down.
1659				 */
1660				num_pages = DIV_ROUND_UP(write_bytes + offset,
1661							 PAGE_SIZE);
1662				reserve_bytes = round_up(write_bytes +
1663							 sector_offset,
1664							 fs_info->sectorsize);
1665			} else {
1666				break;
1667			}
1668		}
1669
 
 
 
 
1670		WARN_ON(reserve_bytes == 0);
1671		ret = btrfs_delalloc_reserve_metadata(BTRFS_I(inode),
1672				reserve_bytes);
1673		if (ret) {
1674			if (!only_release_metadata)
1675				btrfs_free_reserved_data_space(inode,
1676						data_reserved, pos,
1677						write_bytes);
1678			else
1679				btrfs_end_write_no_snapshotting(root);
1680			break;
1681		}
1682
1683		release_bytes = reserve_bytes;
1684again:
1685		/*
1686		 * This is going to setup the pages array with the number of
1687		 * pages we want, so we don't really need to worry about the
1688		 * contents of pages from loop to loop
1689		 */
1690		ret = prepare_pages(inode, pages, num_pages,
1691				    pos, write_bytes,
1692				    force_page_uptodate);
1693		if (ret) {
1694			btrfs_delalloc_release_extents(BTRFS_I(inode),
1695						       reserve_bytes);
1696			break;
1697		}
1698
1699		extents_locked = lock_and_cleanup_extent_if_need(
1700				BTRFS_I(inode), pages,
1701				num_pages, pos, write_bytes, &lockstart,
1702				&lockend, &cached_state);
1703		if (extents_locked < 0) {
1704			if (extents_locked == -EAGAIN)
1705				goto again;
1706			btrfs_delalloc_release_extents(BTRFS_I(inode),
1707						       reserve_bytes);
1708			ret = extents_locked;
1709			break;
1710		}
1711
1712		copied = btrfs_copy_from_user(pos, write_bytes, pages, i);
1713
1714		num_sectors = BTRFS_BYTES_TO_BLKS(fs_info, reserve_bytes);
1715		dirty_sectors = round_up(copied + sector_offset,
1716					fs_info->sectorsize);
1717		dirty_sectors = BTRFS_BYTES_TO_BLKS(fs_info, dirty_sectors);
1718
1719		/*
1720		 * if we have trouble faulting in the pages, fall
1721		 * back to one page at a time
1722		 */
1723		if (copied < write_bytes)
1724			nrptrs = 1;
1725
1726		if (copied == 0) {
1727			force_page_uptodate = true;
1728			dirty_sectors = 0;
1729			dirty_pages = 0;
1730		} else {
1731			force_page_uptodate = false;
1732			dirty_pages = DIV_ROUND_UP(copied + offset,
1733						   PAGE_SIZE);
1734		}
1735
1736		if (num_sectors > dirty_sectors) {
1737			/* release everything except the sectors we dirtied */
1738			release_bytes -= dirty_sectors <<
1739						fs_info->sb->s_blocksize_bits;
1740			if (only_release_metadata) {
1741				btrfs_delalloc_release_metadata(BTRFS_I(inode),
1742							release_bytes, true);
1743			} else {
1744				u64 __pos;
1745
1746				__pos = round_down(pos,
1747						   fs_info->sectorsize) +
1748					(dirty_pages << PAGE_SHIFT);
1749				btrfs_delalloc_release_space(inode,
1750						data_reserved, __pos,
1751						release_bytes, true);
1752			}
1753		}
1754
1755		release_bytes = round_up(copied + sector_offset,
1756					fs_info->sectorsize);
1757
1758		if (copied > 0)
1759			ret = btrfs_dirty_pages(inode, pages, dirty_pages,
1760						pos, copied, &cached_state);
1761
1762		/*
1763		 * If we have not locked the extent range, because the range's
1764		 * start offset is >= i_size, we might still have a non-NULL
1765		 * cached extent state, acquired while marking the extent range
1766		 * as delalloc through btrfs_dirty_pages(). Therefore free any
1767		 * possible cached extent state to avoid a memory leak.
1768		 */
1769		if (extents_locked)
1770			unlock_extent_cached(&BTRFS_I(inode)->io_tree,
1771					     lockstart, lockend, &cached_state);
1772		else
1773			free_extent_state(cached_state);
1774
1775		btrfs_delalloc_release_extents(BTRFS_I(inode), reserve_bytes);
1776		if (ret) {
1777			btrfs_drop_pages(pages, num_pages);
1778			break;
1779		}
1780
1781		release_bytes = 0;
1782		if (only_release_metadata)
1783			btrfs_end_write_no_snapshotting(root);
1784
1785		if (only_release_metadata && copied > 0) {
1786			lockstart = round_down(pos,
1787					       fs_info->sectorsize);
1788			lockend = round_up(pos + copied,
1789					   fs_info->sectorsize) - 1;
1790
1791			set_extent_bit(&BTRFS_I(inode)->io_tree, lockstart,
1792				       lockend, EXTENT_NORESERVE, NULL,
1793				       NULL, GFP_NOFS);
1794			only_release_metadata = false;
1795		}
1796
1797		btrfs_drop_pages(pages, num_pages);
1798
1799		cond_resched();
1800
1801		balance_dirty_pages_ratelimited(inode->i_mapping);
1802		if (dirty_pages < (fs_info->nodesize >> PAGE_SHIFT) + 1)
1803			btrfs_btree_balance_dirty(fs_info);
1804
1805		pos += copied;
1806		num_written += copied;
1807	}
1808
1809	kfree(pages);
1810
1811	if (release_bytes) {
1812		if (only_release_metadata) {
1813			btrfs_end_write_no_snapshotting(root);
1814			btrfs_delalloc_release_metadata(BTRFS_I(inode),
1815					release_bytes, true);
1816		} else {
1817			btrfs_delalloc_release_space(inode, data_reserved,
 
1818					round_down(pos, fs_info->sectorsize),
1819					release_bytes, true);
1820		}
1821	}
1822
1823	extent_changeset_free(data_reserved);
 
 
 
 
 
 
1824	return num_written ? num_written : ret;
1825}
1826
1827static ssize_t __btrfs_direct_write(struct kiocb *iocb, struct iov_iter *from)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1828{
1829	struct file *file = iocb->ki_filp;
1830	struct inode *inode = file_inode(file);
 
1831	loff_t pos;
1832	ssize_t written;
1833	ssize_t written_buffered;
1834	loff_t endbyte;
1835	int err;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1836
1837	written = generic_file_direct_write(iocb, from);
 
 
 
 
 
 
1838
1839	if (written < 0 || !iov_iter_count(from))
1840		return written;
1841
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1842	pos = iocb->ki_pos;
1843	written_buffered = btrfs_buffered_write(iocb, from);
1844	if (written_buffered < 0) {
1845		err = written_buffered;
1846		goto out;
1847	}
1848	/*
1849	 * Ensure all data is persisted. We want the next direct IO read to be
1850	 * able to read what was just written.
1851	 */
1852	endbyte = pos + written_buffered - 1;
1853	err = btrfs_fdatawrite_range(inode, pos, endbyte);
1854	if (err)
1855		goto out;
1856	err = filemap_fdatawait_range(inode->i_mapping, pos, endbyte);
1857	if (err)
1858		goto out;
1859	written += written_buffered;
1860	iocb->ki_pos = pos + written_buffered;
1861	invalidate_mapping_pages(file->f_mapping, pos >> PAGE_SHIFT,
1862				 endbyte >> PAGE_SHIFT);
1863out:
1864	return written ? written : err;
1865}
1866
1867static void update_time_for_write(struct inode *inode)
1868{
1869	struct timespec64 now;
1870
1871	if (IS_NOCMTIME(inode))
1872		return;
1873
1874	now = current_time(inode);
1875	if (!timespec64_equal(&inode->i_mtime, &now))
1876		inode->i_mtime = now;
1877
1878	if (!timespec64_equal(&inode->i_ctime, &now))
1879		inode->i_ctime = now;
1880
1881	if (IS_I_VERSION(inode))
1882		inode_inc_iversion(inode);
1883}
1884
1885static ssize_t btrfs_file_write_iter(struct kiocb *iocb,
1886				    struct iov_iter *from)
1887{
1888	struct file *file = iocb->ki_filp;
1889	struct inode *inode = file_inode(file);
1890	struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
1891	struct btrfs_root *root = BTRFS_I(inode)->root;
1892	u64 start_pos;
1893	u64 end_pos;
1894	ssize_t num_written = 0;
1895	const bool sync = iocb->ki_flags & IOCB_DSYNC;
1896	ssize_t err;
1897	loff_t pos;
1898	size_t count;
1899	loff_t oldsize;
1900	int clean_page = 0;
1901
1902	if (!(iocb->ki_flags & IOCB_DIRECT) &&
1903	    (iocb->ki_flags & IOCB_NOWAIT))
1904		return -EOPNOTSUPP;
1905
1906	if (!inode_trylock(inode)) {
1907		if (iocb->ki_flags & IOCB_NOWAIT)
1908			return -EAGAIN;
1909		inode_lock(inode);
1910	}
1911
1912	err = generic_write_checks(iocb, from);
1913	if (err <= 0) {
1914		inode_unlock(inode);
1915		return err;
1916	}
1917
1918	pos = iocb->ki_pos;
1919	count = iov_iter_count(from);
1920	if (iocb->ki_flags & IOCB_NOWAIT) {
1921		/*
1922		 * We will allocate space in case nodatacow is not set,
1923		 * so bail
1924		 */
1925		if (!(BTRFS_I(inode)->flags & (BTRFS_INODE_NODATACOW |
1926					      BTRFS_INODE_PREALLOC)) ||
1927		    check_can_nocow(BTRFS_I(inode), pos, &count) <= 0) {
1928			inode_unlock(inode);
1929			return -EAGAIN;
1930		}
1931	}
1932
1933	current->backing_dev_info = inode_to_bdi(inode);
1934	err = file_remove_privs(file);
1935	if (err) {
1936		inode_unlock(inode);
1937		goto out;
1938	}
1939
1940	/*
1941	 * If BTRFS flips readonly due to some impossible error
1942	 * (fs_info->fs_state now has BTRFS_SUPER_FLAG_ERROR),
1943	 * although we have opened a file as writable, we have
1944	 * to stop this write operation to ensure FS consistency.
1945	 */
1946	if (test_bit(BTRFS_FS_STATE_ERROR, &fs_info->fs_state)) {
1947		inode_unlock(inode);
1948		err = -EROFS;
1949		goto out;
1950	}
1951
1952	/*
1953	 * We reserve space for updating the inode when we reserve space for the
1954	 * extent we are going to write, so we will enospc out there.  We don't
1955	 * need to start yet another transaction to update the inode as we will
1956	 * update the inode when we finish writing whatever data we write.
1957	 */
1958	update_time_for_write(inode);
 
1959
1960	start_pos = round_down(pos, fs_info->sectorsize);
1961	oldsize = i_size_read(inode);
1962	if (start_pos > oldsize) {
1963		/* Expand hole size to cover write data, preventing empty gap */
1964		end_pos = round_up(pos + count,
1965				   fs_info->sectorsize);
1966		err = btrfs_cont_expand(inode, oldsize, end_pos);
1967		if (err) {
1968			inode_unlock(inode);
1969			goto out;
1970		}
1971		if (start_pos > round_up(oldsize, fs_info->sectorsize))
1972			clean_page = 1;
1973	}
1974
1975	if (sync)
1976		atomic_inc(&BTRFS_I(inode)->sync_writers);
1977
1978	if (iocb->ki_flags & IOCB_DIRECT) {
1979		num_written = __btrfs_direct_write(iocb, from);
1980	} else {
1981		num_written = btrfs_buffered_write(iocb, from);
1982		if (num_written > 0)
1983			iocb->ki_pos = pos + num_written;
1984		if (clean_page)
1985			pagecache_isize_extended(inode, oldsize,
1986						i_size_read(inode));
1987	}
1988
1989	inode_unlock(inode);
1990
1991	/*
1992	 * We also have to set last_sub_trans to the current log transid,
1993	 * otherwise subsequent syncs to a file that's been synced in this
1994	 * transaction will appear to have already occurred.
1995	 */
1996	spin_lock(&BTRFS_I(inode)->lock);
1997	BTRFS_I(inode)->last_sub_trans = root->log_transid;
1998	spin_unlock(&BTRFS_I(inode)->lock);
1999	if (num_written > 0)
2000		num_written = generic_write_sync(iocb, num_written);
2001
2002	if (sync)
2003		atomic_dec(&BTRFS_I(inode)->sync_writers);
2004out:
2005	current->backing_dev_info = NULL;
2006	return num_written ? num_written : err;
2007}
2008
2009int btrfs_release_file(struct inode *inode, struct file *filp)
2010{
2011	struct btrfs_file_private *private = filp->private_data;
2012
2013	if (private && private->filldir_buf)
2014		kfree(private->filldir_buf);
2015	kfree(private);
2016	filp->private_data = NULL;
2017
2018	/*
2019	 * ordered_data_close is set by setattr when we are about to truncate
2020	 * a file from a non-zero size to a zero size.  This tries to
2021	 * flush down new bytes that may have been written if the
2022	 * application were using truncate to replace a file in place.
2023	 */
2024	if (test_and_clear_bit(BTRFS_INODE_ORDERED_DATA_CLOSE,
2025			       &BTRFS_I(inode)->runtime_flags))
2026			filemap_flush(inode->i_mapping);
2027	return 0;
2028}
2029
2030static int start_ordered_ops(struct inode *inode, loff_t start, loff_t end)
2031{
2032	int ret;
2033	struct blk_plug plug;
2034
2035	/*
2036	 * This is only called in fsync, which would do synchronous writes, so
2037	 * a plug can merge adjacent IOs as much as possible.  Esp. in case of
2038	 * multiple disks using raid profile, a large IO can be split to
2039	 * several segments of stripe length (currently 64K).
2040	 */
2041	blk_start_plug(&plug);
2042	atomic_inc(&BTRFS_I(inode)->sync_writers);
2043	ret = btrfs_fdatawrite_range(inode, start, end);
2044	atomic_dec(&BTRFS_I(inode)->sync_writers);
2045	blk_finish_plug(&plug);
2046
2047	return ret;
2048}
2049
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2050/*
2051 * fsync call for both files and directories.  This logs the inode into
2052 * the tree log instead of forcing full commits whenever possible.
2053 *
2054 * It needs to call filemap_fdatawait so that all ordered extent updates are
2055 * in the metadata btree are up to date for copying to the log.
2056 *
2057 * It drops the inode mutex before doing the tree log commit.  This is an
2058 * important optimization for directories because holding the mutex prevents
2059 * new operations on the dir while we write to disk.
2060 */
2061int btrfs_sync_file(struct file *file, loff_t start, loff_t end, int datasync)
2062{
2063	struct dentry *dentry = file_dentry(file);
2064	struct inode *inode = d_inode(dentry);
2065	struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
2066	struct btrfs_root *root = BTRFS_I(inode)->root;
2067	struct btrfs_trans_handle *trans;
2068	struct btrfs_log_ctx ctx;
2069	int ret = 0, err;
 
 
2070
2071	trace_btrfs_sync_file(file, datasync);
2072
2073	btrfs_init_log_ctx(&ctx, inode);
2074
2075	/*
 
 
 
 
 
 
 
 
 
 
 
 
2076	 * We write the dirty pages in the range and wait until they complete
2077	 * out of the ->i_mutex. If so, we can flush the dirty pages by
2078	 * multi-task, and make the performance up.  See
2079	 * btrfs_wait_ordered_range for an explanation of the ASYNC check.
2080	 */
2081	ret = start_ordered_ops(inode, start, end);
2082	if (ret)
2083		goto out;
2084
2085	inode_lock(inode);
2086
2087	/*
2088	 * We take the dio_sem here because the tree log stuff can race with
2089	 * lockless dio writes and get an extent map logged for an extent we
2090	 * never waited on.  We need it this high up for lockdep reasons.
2091	 */
2092	down_write(&BTRFS_I(inode)->dio_sem);
2093
2094	atomic_inc(&root->log_batch);
2095
2096	/*
2097	 * If the inode needs a full sync, make sure we use a full range to
2098	 * avoid log tree corruption, due to hole detection racing with ordered
2099	 * extent completion for adjacent ranges, and assertion failures during
2100	 * hole detection. Do this while holding the inode lock, to avoid races
2101	 * with other tasks.
2102	 */
2103	if (test_bit(BTRFS_INODE_NEEDS_FULL_SYNC,
2104		     &BTRFS_I(inode)->runtime_flags)) {
2105		start = 0;
2106		end = LLONG_MAX;
2107	}
2108
2109	/*
2110	 * Before we acquired the inode's lock, someone may have dirtied more
2111	 * pages in the target range. We need to make sure that writeback for
2112	 * any such pages does not start while we are logging the inode, because
2113	 * if it does, any of the following might happen when we are not doing a
2114	 * full inode sync:
2115	 *
2116	 * 1) We log an extent after its writeback finishes but before its
2117	 *    checksums are added to the csum tree, leading to -EIO errors
2118	 *    when attempting to read the extent after a log replay.
2119	 *
2120	 * 2) We can end up logging an extent before its writeback finishes.
2121	 *    Therefore after the log replay we will have a file extent item
2122	 *    pointing to an unwritten extent (and no data checksums as well).
2123	 *
2124	 * So trigger writeback for any eventual new dirty pages and then we
2125	 * wait for all ordered extents to complete below.
2126	 */
2127	ret = start_ordered_ops(inode, start, end);
2128	if (ret) {
2129		inode_unlock(inode);
2130		goto out;
2131	}
2132
2133	/*
2134	 * We have to do this here to avoid the priority inversion of waiting on
2135	 * IO of a lower priority task while holding a transaction open.
2136	 *
2137	 * Also, the range length can be represented by u64, we have to do the
2138	 * typecasts to avoid signed overflow if it's [0, LLONG_MAX].
 
 
 
 
 
 
 
 
2139	 */
2140	ret = btrfs_wait_ordered_range(inode, start, (u64)end - (u64)start + 1);
2141	if (ret) {
2142		up_write(&BTRFS_I(inode)->dio_sem);
2143		inode_unlock(inode);
2144		goto out;
 
 
 
 
 
 
2145	}
 
 
 
 
2146	atomic_inc(&root->log_batch);
2147
2148	smp_mb();
2149	if (btrfs_inode_in_log(BTRFS_I(inode), fs_info->generation) ||
2150	    BTRFS_I(inode)->last_trans <= fs_info->last_trans_committed) {
2151		/*
2152		 * We've had everything committed since the last time we were
2153		 * modified so clear this flag in case it was set for whatever
2154		 * reason, it's no longer relevant.
2155		 */
2156		clear_bit(BTRFS_INODE_NEEDS_FULL_SYNC,
2157			  &BTRFS_I(inode)->runtime_flags);
2158		/*
2159		 * An ordered extent might have started before and completed
2160		 * already with io errors, in which case the inode was not
2161		 * updated and we end up here. So check the inode's mapping
2162		 * for any errors that might have happened since we last
2163		 * checked called fsync.
2164		 */
2165		ret = filemap_check_wb_err(inode->i_mapping, file->f_wb_err);
2166		up_write(&BTRFS_I(inode)->dio_sem);
2167		inode_unlock(inode);
2168		goto out;
2169	}
2170
2171	/*
2172	 * We use start here because we will need to wait on the IO to complete
2173	 * in btrfs_sync_log, which could require joining a transaction (for
2174	 * example checking cross references in the nocow path).  If we use join
2175	 * here we could get into a situation where we're waiting on IO to
2176	 * happen that is blocked on a transaction trying to commit.  With start
2177	 * we inc the extwriter counter, so we wait for all extwriters to exit
2178	 * before we start blocking joiners.  This comment is to keep somebody
2179	 * from thinking they are super smart and changing this to
2180	 * btrfs_join_transaction *cough*Josef*cough*.
2181	 */
2182	trans = btrfs_start_transaction(root, 0);
2183	if (IS_ERR(trans)) {
2184		ret = PTR_ERR(trans);
2185		up_write(&BTRFS_I(inode)->dio_sem);
2186		inode_unlock(inode);
2187		goto out;
2188	}
 
2189
2190	ret = btrfs_log_dentry_safe(trans, dentry, start, end, &ctx);
 
2191	if (ret < 0) {
2192		/* Fallthrough and commit/free transaction. */
2193		ret = 1;
2194	}
2195
2196	/* we've logged all the items and now have a consistent
2197	 * version of the file in the log.  It is possible that
2198	 * someone will come in and modify the file, but that's
2199	 * fine because the log is consistent on disk, and we
2200	 * have references to all of the file's extents
2201	 *
2202	 * It is possible that someone will come in and log the
2203	 * file again, but that will end up using the synchronization
2204	 * inside btrfs_sync_log to keep things safe.
2205	 */
2206	up_write(&BTRFS_I(inode)->dio_sem);
2207	inode_unlock(inode);
2208
2209	if (ret != BTRFS_NO_LOG_SYNC) {
2210		if (!ret) {
2211			ret = btrfs_sync_log(trans, root, &ctx);
2212			if (!ret) {
2213				ret = btrfs_end_transaction(trans);
2214				goto out;
2215			}
2216		}
 
 
 
 
 
 
 
2217		ret = btrfs_commit_transaction(trans);
2218	} else {
2219		ret = btrfs_end_transaction(trans);
2220	}
2221out:
2222	ASSERT(list_empty(&ctx.list));
2223	err = file_check_and_advance_wb_err(file);
2224	if (!ret)
2225		ret = err;
2226	return ret > 0 ? -EIO : ret;
 
 
 
 
 
2227}
2228
2229static const struct vm_operations_struct btrfs_file_vm_ops = {
2230	.fault		= filemap_fault,
2231	.map_pages	= filemap_map_pages,
2232	.page_mkwrite	= btrfs_page_mkwrite,
2233};
2234
2235static int btrfs_file_mmap(struct file	*filp, struct vm_area_struct *vma)
2236{
2237	struct address_space *mapping = filp->f_mapping;
2238
2239	if (!mapping->a_ops->readpage)
2240		return -ENOEXEC;
2241
2242	file_accessed(filp);
2243	vma->vm_ops = &btrfs_file_vm_ops;
2244
2245	return 0;
2246}
2247
2248static int hole_mergeable(struct btrfs_inode *inode, struct extent_buffer *leaf,
2249			  int slot, u64 start, u64 end)
2250{
2251	struct btrfs_file_extent_item *fi;
2252	struct btrfs_key key;
2253
2254	if (slot < 0 || slot >= btrfs_header_nritems(leaf))
2255		return 0;
2256
2257	btrfs_item_key_to_cpu(leaf, &key, slot);
2258	if (key.objectid != btrfs_ino(inode) ||
2259	    key.type != BTRFS_EXTENT_DATA_KEY)
2260		return 0;
2261
2262	fi = btrfs_item_ptr(leaf, slot, struct btrfs_file_extent_item);
2263
2264	if (btrfs_file_extent_type(leaf, fi) != BTRFS_FILE_EXTENT_REG)
2265		return 0;
2266
2267	if (btrfs_file_extent_disk_bytenr(leaf, fi))
2268		return 0;
2269
2270	if (key.offset == end)
2271		return 1;
2272	if (key.offset + btrfs_file_extent_num_bytes(leaf, fi) == start)
2273		return 1;
2274	return 0;
2275}
2276
2277static int fill_holes(struct btrfs_trans_handle *trans,
2278		struct btrfs_inode *inode,
2279		struct btrfs_path *path, u64 offset, u64 end)
2280{
2281	struct btrfs_fs_info *fs_info = trans->fs_info;
2282	struct btrfs_root *root = inode->root;
2283	struct extent_buffer *leaf;
2284	struct btrfs_file_extent_item *fi;
2285	struct extent_map *hole_em;
2286	struct extent_map_tree *em_tree = &inode->extent_tree;
2287	struct btrfs_key key;
2288	int ret;
2289
2290	if (btrfs_fs_incompat(fs_info, NO_HOLES))
2291		goto out;
2292
2293	key.objectid = btrfs_ino(inode);
2294	key.type = BTRFS_EXTENT_DATA_KEY;
2295	key.offset = offset;
2296
2297	ret = btrfs_search_slot(trans, root, &key, path, 0, 1);
2298	if (ret <= 0) {
2299		/*
2300		 * We should have dropped this offset, so if we find it then
2301		 * something has gone horribly wrong.
2302		 */
2303		if (ret == 0)
2304			ret = -EINVAL;
2305		return ret;
2306	}
2307
2308	leaf = path->nodes[0];
2309	if (hole_mergeable(inode, leaf, path->slots[0] - 1, offset, end)) {
2310		u64 num_bytes;
2311
2312		path->slots[0]--;
2313		fi = btrfs_item_ptr(leaf, path->slots[0],
2314				    struct btrfs_file_extent_item);
2315		num_bytes = btrfs_file_extent_num_bytes(leaf, fi) +
2316			end - offset;
2317		btrfs_set_file_extent_num_bytes(leaf, fi, num_bytes);
2318		btrfs_set_file_extent_ram_bytes(leaf, fi, num_bytes);
2319		btrfs_set_file_extent_offset(leaf, fi, 0);
2320		btrfs_mark_buffer_dirty(leaf);
2321		goto out;
2322	}
2323
2324	if (hole_mergeable(inode, leaf, path->slots[0], offset, end)) {
2325		u64 num_bytes;
2326
2327		key.offset = offset;
2328		btrfs_set_item_key_safe(fs_info, path, &key);
2329		fi = btrfs_item_ptr(leaf, path->slots[0],
2330				    struct btrfs_file_extent_item);
2331		num_bytes = btrfs_file_extent_num_bytes(leaf, fi) + end -
2332			offset;
2333		btrfs_set_file_extent_num_bytes(leaf, fi, num_bytes);
2334		btrfs_set_file_extent_ram_bytes(leaf, fi, num_bytes);
2335		btrfs_set_file_extent_offset(leaf, fi, 0);
2336		btrfs_mark_buffer_dirty(leaf);
2337		goto out;
2338	}
2339	btrfs_release_path(path);
2340
2341	ret = btrfs_insert_file_extent(trans, root, btrfs_ino(inode),
2342			offset, 0, 0, end - offset, 0, end - offset, 0, 0, 0);
2343	if (ret)
2344		return ret;
2345
2346out:
2347	btrfs_release_path(path);
2348
2349	hole_em = alloc_extent_map();
2350	if (!hole_em) {
2351		btrfs_drop_extent_cache(inode, offset, end - 1, 0);
2352		set_bit(BTRFS_INODE_NEEDS_FULL_SYNC, &inode->runtime_flags);
2353	} else {
2354		hole_em->start = offset;
2355		hole_em->len = end - offset;
2356		hole_em->ram_bytes = hole_em->len;
2357		hole_em->orig_start = offset;
2358
2359		hole_em->block_start = EXTENT_MAP_HOLE;
2360		hole_em->block_len = 0;
2361		hole_em->orig_block_len = 0;
2362		hole_em->bdev = fs_info->fs_devices->latest_bdev;
2363		hole_em->compress_type = BTRFS_COMPRESS_NONE;
2364		hole_em->generation = trans->transid;
2365
2366		do {
2367			btrfs_drop_extent_cache(inode, offset, end - 1, 0);
2368			write_lock(&em_tree->lock);
2369			ret = add_extent_mapping(em_tree, hole_em, 1);
2370			write_unlock(&em_tree->lock);
2371		} while (ret == -EEXIST);
2372		free_extent_map(hole_em);
2373		if (ret)
2374			set_bit(BTRFS_INODE_NEEDS_FULL_SYNC,
2375					&inode->runtime_flags);
2376	}
2377
2378	return 0;
2379}
2380
2381/*
2382 * Find a hole extent on given inode and change start/len to the end of hole
2383 * extent.(hole/vacuum extent whose em->start <= start &&
2384 *	   em->start + em->len > start)
2385 * When a hole extent is found, return 1 and modify start/len.
2386 */
2387static int find_first_non_hole(struct inode *inode, u64 *start, u64 *len)
2388{
2389	struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
2390	struct extent_map *em;
2391	int ret = 0;
2392
2393	em = btrfs_get_extent(BTRFS_I(inode), NULL, 0,
2394			      round_down(*start, fs_info->sectorsize),
2395			      round_up(*len, fs_info->sectorsize), 0);
2396	if (IS_ERR(em))
2397		return PTR_ERR(em);
2398
2399	/* Hole or vacuum extent(only exists in no-hole mode) */
2400	if (em->block_start == EXTENT_MAP_HOLE) {
2401		ret = 1;
2402		*len = em->start + em->len > *start + *len ?
2403		       0 : *start + *len - em->start - em->len;
2404		*start = em->start + em->len;
2405	}
2406	free_extent_map(em);
2407	return ret;
2408}
2409
2410static int btrfs_punch_hole_lock_range(struct inode *inode,
2411				       const u64 lockstart,
2412				       const u64 lockend,
2413				       struct extent_state **cached_state)
2414{
 
 
 
 
 
 
 
 
 
 
 
2415	while (1) {
2416		struct btrfs_ordered_extent *ordered;
2417		int ret;
2418
2419		truncate_pagecache_range(inode, lockstart, lockend);
2420
2421		lock_extent_bits(&BTRFS_I(inode)->io_tree, lockstart, lockend,
2422				 cached_state);
2423		ordered = btrfs_lookup_first_ordered_extent(inode, lockend);
 
2424
2425		/*
2426		 * We need to make sure we have no ordered extents in this range
2427		 * and nobody raced in and read a page in this range, if we did
2428		 * we need to try again.
2429		 */
2430		if ((!ordered ||
2431		    (ordered->file_offset + ordered->len <= lockstart ||
2432		     ordered->file_offset > lockend)) &&
2433		     !filemap_range_has_page(inode->i_mapping,
2434					     lockstart, lockend)) {
2435			if (ordered)
2436				btrfs_put_ordered_extent(ordered);
2437			break;
2438		}
2439		if (ordered)
2440			btrfs_put_ordered_extent(ordered);
2441		unlock_extent_cached(&BTRFS_I(inode)->io_tree, lockstart,
2442				     lockend, cached_state);
2443		ret = btrfs_wait_ordered_range(inode, lockstart,
2444					       lockend - lockstart + 1);
2445		if (ret)
2446			return ret;
2447	}
2448	return 0;
2449}
2450
2451static int btrfs_insert_clone_extent(struct btrfs_trans_handle *trans,
2452				     struct inode *inode,
2453				     struct btrfs_path *path,
2454				     struct btrfs_clone_extent_info *clone_info,
2455				     const u64 clone_len)
 
2456{
2457	struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
2458	struct btrfs_root *root = BTRFS_I(inode)->root;
2459	struct btrfs_file_extent_item *extent;
2460	struct extent_buffer *leaf;
2461	struct btrfs_key key;
2462	int slot;
2463	struct btrfs_ref ref = { 0 };
2464	u64 ref_offset;
2465	int ret;
2466
2467	if (clone_len == 0)
2468		return 0;
2469
2470	if (clone_info->disk_offset == 0 &&
2471	    btrfs_fs_incompat(fs_info, NO_HOLES))
 
2472		return 0;
 
2473
2474	key.objectid = btrfs_ino(BTRFS_I(inode));
2475	key.type = BTRFS_EXTENT_DATA_KEY;
2476	key.offset = clone_info->file_offset;
2477	ret = btrfs_insert_empty_item(trans, root, path, &key,
2478				      clone_info->item_size);
2479	if (ret)
2480		return ret;
2481	leaf = path->nodes[0];
2482	slot = path->slots[0];
2483	write_extent_buffer(leaf, clone_info->extent_buf,
2484			    btrfs_item_ptr_offset(leaf, slot),
2485			    clone_info->item_size);
2486	extent = btrfs_item_ptr(leaf, slot, struct btrfs_file_extent_item);
2487	btrfs_set_file_extent_offset(leaf, extent, clone_info->data_offset);
2488	btrfs_set_file_extent_num_bytes(leaf, extent, clone_len);
 
 
 
2489	btrfs_mark_buffer_dirty(leaf);
2490	btrfs_release_path(path);
2491
 
 
 
 
 
2492	/* If it's a hole, nothing more needs to be done. */
2493	if (clone_info->disk_offset == 0)
 
2494		return 0;
 
2495
2496	inode_add_bytes(inode, clone_len);
2497	btrfs_init_generic_ref(&ref, BTRFS_ADD_DELAYED_REF,
2498			       clone_info->disk_offset,
2499			       clone_info->disk_len, 0);
2500	ref_offset = clone_info->file_offset - clone_info->data_offset;
2501	btrfs_init_data_ref(&ref, root->root_key.objectid,
2502			    btrfs_ino(BTRFS_I(inode)), ref_offset);
2503	ret = btrfs_inc_extent_ref(trans, &ref);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2504
2505	return ret;
2506}
2507
2508/*
2509 * The respective range must have been previously locked, as well as the inode.
2510 * The end offset is inclusive (last byte of the range).
2511 * @clone_info is NULL for fallocate's hole punching and non-NULL for extent
2512 * cloning.
2513 * When cloning, we don't want to end up in a state where we dropped extents
2514 * without inserting a new one, so we must abort the transaction to avoid a
2515 * corruption.
2516 */
2517int btrfs_punch_hole_range(struct inode *inode, struct btrfs_path *path,
2518			   const u64 start, const u64 end,
2519			   struct btrfs_clone_extent_info *clone_info,
2520			   struct btrfs_trans_handle **trans_out)
 
2521{
2522	struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
 
 
2523	u64 min_size = btrfs_calc_insert_metadata_size(fs_info, 1);
2524	u64 ino_size = round_up(inode->i_size, fs_info->sectorsize);
2525	struct btrfs_root *root = BTRFS_I(inode)->root;
2526	struct btrfs_trans_handle *trans = NULL;
2527	struct btrfs_block_rsv *rsv;
2528	unsigned int rsv_count;
2529	u64 cur_offset;
2530	u64 drop_end;
2531	u64 len = end - start;
2532	int ret = 0;
2533
2534	if (end <= start)
2535		return -EINVAL;
2536
2537	rsv = btrfs_alloc_block_rsv(fs_info, BTRFS_BLOCK_RSV_TEMP);
2538	if (!rsv) {
2539		ret = -ENOMEM;
2540		goto out;
2541	}
2542	rsv->size = btrfs_calc_insert_metadata_size(fs_info, 1);
2543	rsv->failfast = 1;
2544
2545	/*
2546	 * 1 - update the inode
2547	 * 1 - removing the extents in the range
2548	 * 1 - adding the hole extent if no_holes isn't set or if we are cloning
2549	 *     an extent
2550	 */
2551	if (!btrfs_fs_incompat(fs_info, NO_HOLES) || clone_info)
2552		rsv_count = 3;
2553	else
2554		rsv_count = 2;
2555
2556	trans = btrfs_start_transaction(root, rsv_count);
2557	if (IS_ERR(trans)) {
2558		ret = PTR_ERR(trans);
2559		trans = NULL;
2560		goto out_free;
2561	}
2562
2563	ret = btrfs_block_rsv_migrate(&fs_info->trans_block_rsv, rsv,
2564				      min_size, false);
2565	BUG_ON(ret);
2566	trans->block_rsv = rsv;
2567
2568	cur_offset = start;
 
 
 
2569	while (cur_offset < end) {
2570		ret = __btrfs_drop_extents(trans, root, inode, path,
2571					   cur_offset, end + 1, &drop_end,
2572					   1, 0, 0, NULL);
 
 
 
2573		if (ret != -ENOSPC) {
2574			/*
2575			 * When cloning we want to avoid transaction aborts when
2576			 * nothing was done and we are attempting to clone parts
2577			 * of inline extents, in such cases -EOPNOTSUPP is
2578			 * returned by __btrfs_drop_extents() without having
2579			 * changed anything in the file.
 
2580			 */
2581			if (clone_info && ret && ret != -EOPNOTSUPP)
 
 
2582				btrfs_abort_transaction(trans, ret);
2583			break;
2584		}
2585
2586		trans->block_rsv = &fs_info->trans_block_rsv;
2587
2588		if (!clone_info && cur_offset < drop_end &&
2589		    cur_offset < ino_size) {
2590			ret = fill_holes(trans, BTRFS_I(inode), path,
2591					cur_offset, drop_end);
2592			if (ret) {
2593				/*
2594				 * If we failed then we didn't insert our hole
2595				 * entries for the area we dropped, so now the
2596				 * fs is corrupted, so we must abort the
2597				 * transaction.
2598				 */
2599				btrfs_abort_transaction(trans, ret);
2600				break;
2601			}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2602		}
2603
2604		if (clone_info) {
2605			u64 clone_len = drop_end - cur_offset;
2606
2607			ret = btrfs_insert_clone_extent(trans, inode, path,
2608							clone_info, clone_len);
 
 
 
2609			if (ret) {
2610				btrfs_abort_transaction(trans, ret);
2611				break;
2612			}
2613			clone_info->data_len -= clone_len;
2614			clone_info->data_offset += clone_len;
2615			clone_info->file_offset += clone_len;
2616		}
2617
2618		cur_offset = drop_end;
2619
2620		ret = btrfs_update_inode(trans, root, inode);
2621		if (ret)
2622			break;
2623
2624		btrfs_end_transaction(trans);
2625		btrfs_btree_balance_dirty(fs_info);
2626
2627		trans = btrfs_start_transaction(root, rsv_count);
2628		if (IS_ERR(trans)) {
2629			ret = PTR_ERR(trans);
2630			trans = NULL;
2631			break;
2632		}
2633
2634		ret = btrfs_block_rsv_migrate(&fs_info->trans_block_rsv,
2635					      rsv, min_size, false);
2636		BUG_ON(ret);	/* shouldn't happen */
2637		trans->block_rsv = rsv;
2638
2639		if (!clone_info) {
 
 
2640			ret = find_first_non_hole(inode, &cur_offset, &len);
2641			if (unlikely(ret < 0))
2642				break;
2643			if (ret && !len) {
2644				ret = 0;
2645				break;
2646			}
2647		}
2648	}
2649
2650	/*
2651	 * If we were cloning, force the next fsync to be a full one since we
2652	 * we replaced (or just dropped in the case of cloning holes when
2653	 * NO_HOLES is enabled) extents and extent maps.
2654	 * This is for the sake of simplicity, and cloning into files larger
2655	 * than 16Mb would force the full fsync any way (when
2656	 * try_release_extent_mapping() is invoked during page cache truncation.
2657	 */
2658	if (clone_info)
2659		set_bit(BTRFS_INODE_NEEDS_FULL_SYNC,
2660			&BTRFS_I(inode)->runtime_flags);
2661
2662	if (ret)
2663		goto out_trans;
2664
2665	trans->block_rsv = &fs_info->trans_block_rsv;
2666	/*
2667	 * If we are using the NO_HOLES feature we might have had already an
2668	 * hole that overlaps a part of the region [lockstart, lockend] and
2669	 * ends at (or beyond) lockend. Since we have no file extent items to
2670	 * represent holes, drop_end can be less than lockend and so we must
2671	 * make sure we have an extent map representing the existing hole (the
2672	 * call to __btrfs_drop_extents() might have dropped the existing extent
2673	 * map representing the existing hole), otherwise the fast fsync path
2674	 * will not record the existence of the hole region
2675	 * [existing_hole_start, lockend].
2676	 */
2677	if (drop_end <= end)
2678		drop_end = end + 1;
2679	/*
2680	 * Don't insert file hole extent item if it's for a range beyond eof
2681	 * (because it's useless) or if it represents a 0 bytes range (when
2682	 * cur_offset == drop_end).
2683	 */
2684	if (!clone_info && cur_offset < ino_size && cur_offset < drop_end) {
2685		ret = fill_holes(trans, BTRFS_I(inode), path,
2686				cur_offset, drop_end);
 
2687		if (ret) {
2688			/* Same comment as above. */
2689			btrfs_abort_transaction(trans, ret);
2690			goto out_trans;
2691		}
 
 
 
 
 
 
 
 
 
2692	}
2693	if (clone_info) {
2694		ret = btrfs_insert_clone_extent(trans, inode, path, clone_info,
2695						clone_info->data_len);
 
2696		if (ret) {
2697			btrfs_abort_transaction(trans, ret);
2698			goto out_trans;
2699		}
2700	}
2701
2702out_trans:
2703	if (!trans)
2704		goto out_free;
2705
2706	trans->block_rsv = &fs_info->trans_block_rsv;
2707	if (ret)
2708		btrfs_end_transaction(trans);
2709	else
2710		*trans_out = trans;
2711out_free:
2712	btrfs_free_block_rsv(fs_info, rsv);
2713out:
2714	return ret;
2715}
2716
2717static int btrfs_punch_hole(struct inode *inode, loff_t offset, loff_t len)
2718{
2719	struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
2720	struct btrfs_root *root = BTRFS_I(inode)->root;
2721	struct extent_state *cached_state = NULL;
2722	struct btrfs_path *path;
2723	struct btrfs_trans_handle *trans = NULL;
2724	u64 lockstart;
2725	u64 lockend;
2726	u64 tail_start;
2727	u64 tail_len;
2728	u64 orig_start = offset;
2729	int ret = 0;
2730	bool same_block;
2731	u64 ino_size;
2732	bool truncated_block = false;
2733	bool updated_inode = false;
2734
2735	ret = btrfs_wait_ordered_range(inode, offset, len);
2736	if (ret)
2737		return ret;
2738
2739	inode_lock(inode);
2740	ino_size = round_up(inode->i_size, fs_info->sectorsize);
2741	ret = find_first_non_hole(inode, &offset, &len);
2742	if (ret < 0)
2743		goto out_only_mutex;
2744	if (ret && !len) {
2745		/* Already in a large hole */
2746		ret = 0;
2747		goto out_only_mutex;
2748	}
2749
2750	lockstart = round_up(offset, btrfs_inode_sectorsize(inode));
2751	lockend = round_down(offset + len,
2752			     btrfs_inode_sectorsize(inode)) - 1;
2753	same_block = (BTRFS_BYTES_TO_BLKS(fs_info, offset))
2754		== (BTRFS_BYTES_TO_BLKS(fs_info, offset + len - 1));
2755	/*
2756	 * We needn't truncate any block which is beyond the end of the file
2757	 * because we are sure there is no data there.
2758	 */
2759	/*
2760	 * Only do this if we are in the same block and we aren't doing the
2761	 * entire block.
2762	 */
2763	if (same_block && len < fs_info->sectorsize) {
2764		if (offset < ino_size) {
2765			truncated_block = true;
2766			ret = btrfs_truncate_block(inode, offset, len, 0);
 
2767		} else {
2768			ret = 0;
2769		}
2770		goto out_only_mutex;
2771	}
2772
2773	/* zero back part of the first block */
2774	if (offset < ino_size) {
2775		truncated_block = true;
2776		ret = btrfs_truncate_block(inode, offset, 0, 0);
2777		if (ret) {
2778			inode_unlock(inode);
2779			return ret;
2780		}
2781	}
2782
2783	/* Check the aligned pages after the first unaligned page,
2784	 * if offset != orig_start, which means the first unaligned page
2785	 * including several following pages are already in holes,
2786	 * the extra check can be skipped */
2787	if (offset == orig_start) {
2788		/* after truncate page, check hole again */
2789		len = offset + len - lockstart;
2790		offset = lockstart;
2791		ret = find_first_non_hole(inode, &offset, &len);
2792		if (ret < 0)
2793			goto out_only_mutex;
2794		if (ret && !len) {
2795			ret = 0;
2796			goto out_only_mutex;
2797		}
2798		lockstart = offset;
2799	}
2800
2801	/* Check the tail unaligned part is in a hole */
2802	tail_start = lockend + 1;
2803	tail_len = offset + len - tail_start;
2804	if (tail_len) {
2805		ret = find_first_non_hole(inode, &tail_start, &tail_len);
2806		if (unlikely(ret < 0))
2807			goto out_only_mutex;
2808		if (!ret) {
2809			/* zero the front end of the last page */
2810			if (tail_start + tail_len < ino_size) {
2811				truncated_block = true;
2812				ret = btrfs_truncate_block(inode,
2813							tail_start + tail_len,
2814							0, 1);
2815				if (ret)
2816					goto out_only_mutex;
2817			}
2818		}
2819	}
2820
2821	if (lockend < lockstart) {
2822		ret = 0;
2823		goto out_only_mutex;
2824	}
2825
2826	ret = btrfs_punch_hole_lock_range(inode, lockstart, lockend,
2827					  &cached_state);
2828	if (ret)
2829		goto out_only_mutex;
2830
2831	path = btrfs_alloc_path();
2832	if (!path) {
2833		ret = -ENOMEM;
2834		goto out;
2835	}
2836
2837	ret = btrfs_punch_hole_range(inode, path, lockstart, lockend, NULL,
2838				     &trans);
2839	btrfs_free_path(path);
2840	if (ret)
2841		goto out;
2842
2843	ASSERT(trans != NULL);
2844	inode_inc_iversion(inode);
2845	inode->i_mtime = inode->i_ctime = current_time(inode);
2846	ret = btrfs_update_inode(trans, root, inode);
2847	updated_inode = true;
2848	btrfs_end_transaction(trans);
2849	btrfs_btree_balance_dirty(fs_info);
2850out:
2851	unlock_extent_cached(&BTRFS_I(inode)->io_tree, lockstart, lockend,
2852			     &cached_state);
2853out_only_mutex:
2854	if (!updated_inode && truncated_block && !ret) {
2855		/*
2856		 * If we only end up zeroing part of a page, we still need to
2857		 * update the inode item, so that all the time fields are
2858		 * updated as well as the necessary btrfs inode in memory fields
2859		 * for detecting, at fsync time, if the inode isn't yet in the
2860		 * log tree or it's there but not up to date.
2861		 */
2862		struct timespec64 now = current_time(inode);
2863
2864		inode_inc_iversion(inode);
2865		inode->i_mtime = now;
2866		inode->i_ctime = now;
2867		trans = btrfs_start_transaction(root, 1);
2868		if (IS_ERR(trans)) {
2869			ret = PTR_ERR(trans);
2870		} else {
2871			int ret2;
2872
2873			ret = btrfs_update_inode(trans, root, inode);
2874			ret2 = btrfs_end_transaction(trans);
2875			if (!ret)
2876				ret = ret2;
2877		}
2878	}
2879	inode_unlock(inode);
2880	return ret;
2881}
2882
2883/* Helper structure to record which range is already reserved */
2884struct falloc_range {
2885	struct list_head list;
2886	u64 start;
2887	u64 len;
2888};
2889
2890/*
2891 * Helper function to add falloc range
2892 *
2893 * Caller should have locked the larger range of extent containing
2894 * [start, len)
2895 */
2896static int add_falloc_range(struct list_head *head, u64 start, u64 len)
2897{
2898	struct falloc_range *prev = NULL;
2899	struct falloc_range *range = NULL;
2900
2901	if (list_empty(head))
2902		goto insert;
2903
2904	/*
2905	 * As fallocate iterate by bytenr order, we only need to check
2906	 * the last range.
2907	 */
2908	prev = list_entry(head->prev, struct falloc_range, list);
2909	if (prev->start + prev->len == start) {
2910		prev->len += len;
2911		return 0;
2912	}
2913insert:
2914	range = kmalloc(sizeof(*range), GFP_KERNEL);
2915	if (!range)
2916		return -ENOMEM;
2917	range->start = start;
2918	range->len = len;
2919	list_add_tail(&range->list, head);
2920	return 0;
2921}
2922
2923static int btrfs_fallocate_update_isize(struct inode *inode,
2924					const u64 end,
2925					const int mode)
2926{
2927	struct btrfs_trans_handle *trans;
2928	struct btrfs_root *root = BTRFS_I(inode)->root;
2929	int ret;
2930	int ret2;
2931
2932	if (mode & FALLOC_FL_KEEP_SIZE || end <= i_size_read(inode))
2933		return 0;
2934
2935	trans = btrfs_start_transaction(root, 1);
2936	if (IS_ERR(trans))
2937		return PTR_ERR(trans);
2938
2939	inode->i_ctime = current_time(inode);
2940	i_size_write(inode, end);
2941	btrfs_ordered_update_i_size(inode, end, NULL);
2942	ret = btrfs_update_inode(trans, root, inode);
2943	ret2 = btrfs_end_transaction(trans);
2944
2945	return ret ? ret : ret2;
2946}
2947
2948enum {
2949	RANGE_BOUNDARY_WRITTEN_EXTENT,
2950	RANGE_BOUNDARY_PREALLOC_EXTENT,
2951	RANGE_BOUNDARY_HOLE,
2952};
2953
2954static int btrfs_zero_range_check_range_boundary(struct inode *inode,
2955						 u64 offset)
2956{
2957	const u64 sectorsize = btrfs_inode_sectorsize(inode);
2958	struct extent_map *em;
2959	int ret;
2960
2961	offset = round_down(offset, sectorsize);
2962	em = btrfs_get_extent(BTRFS_I(inode), NULL, 0, offset, sectorsize, 0);
2963	if (IS_ERR(em))
2964		return PTR_ERR(em);
2965
2966	if (em->block_start == EXTENT_MAP_HOLE)
2967		ret = RANGE_BOUNDARY_HOLE;
2968	else if (test_bit(EXTENT_FLAG_PREALLOC, &em->flags))
2969		ret = RANGE_BOUNDARY_PREALLOC_EXTENT;
2970	else
2971		ret = RANGE_BOUNDARY_WRITTEN_EXTENT;
2972
2973	free_extent_map(em);
2974	return ret;
2975}
2976
2977static int btrfs_zero_range(struct inode *inode,
2978			    loff_t offset,
2979			    loff_t len,
2980			    const int mode)
2981{
2982	struct btrfs_fs_info *fs_info = BTRFS_I(inode)->root->fs_info;
2983	struct extent_map *em;
2984	struct extent_changeset *data_reserved = NULL;
2985	int ret;
2986	u64 alloc_hint = 0;
2987	const u64 sectorsize = btrfs_inode_sectorsize(inode);
2988	u64 alloc_start = round_down(offset, sectorsize);
2989	u64 alloc_end = round_up(offset + len, sectorsize);
2990	u64 bytes_to_reserve = 0;
2991	bool space_reserved = false;
2992
2993	inode_dio_wait(inode);
2994
2995	em = btrfs_get_extent(BTRFS_I(inode), NULL, 0,
2996			      alloc_start, alloc_end - alloc_start, 0);
2997	if (IS_ERR(em)) {
2998		ret = PTR_ERR(em);
2999		goto out;
3000	}
3001
3002	/*
3003	 * Avoid hole punching and extent allocation for some cases. More cases
3004	 * could be considered, but these are unlikely common and we keep things
3005	 * as simple as possible for now. Also, intentionally, if the target
3006	 * range contains one or more prealloc extents together with regular
3007	 * extents and holes, we drop all the existing extents and allocate a
3008	 * new prealloc extent, so that we get a larger contiguous disk extent.
3009	 */
3010	if (em->start <= alloc_start &&
3011	    test_bit(EXTENT_FLAG_PREALLOC, &em->flags)) {
3012		const u64 em_end = em->start + em->len;
3013
3014		if (em_end >= offset + len) {
3015			/*
3016			 * The whole range is already a prealloc extent,
3017			 * do nothing except updating the inode's i_size if
3018			 * needed.
3019			 */
3020			free_extent_map(em);
3021			ret = btrfs_fallocate_update_isize(inode, offset + len,
3022							   mode);
3023			goto out;
3024		}
3025		/*
3026		 * Part of the range is already a prealloc extent, so operate
3027		 * only on the remaining part of the range.
3028		 */
3029		alloc_start = em_end;
3030		ASSERT(IS_ALIGNED(alloc_start, sectorsize));
3031		len = offset + len - alloc_start;
3032		offset = alloc_start;
3033		alloc_hint = em->block_start + em->len;
3034	}
3035	free_extent_map(em);
3036
3037	if (BTRFS_BYTES_TO_BLKS(fs_info, offset) ==
3038	    BTRFS_BYTES_TO_BLKS(fs_info, offset + len - 1)) {
3039		em = btrfs_get_extent(BTRFS_I(inode), NULL, 0,
3040				      alloc_start, sectorsize, 0);
3041		if (IS_ERR(em)) {
3042			ret = PTR_ERR(em);
3043			goto out;
3044		}
3045
3046		if (test_bit(EXTENT_FLAG_PREALLOC, &em->flags)) {
3047			free_extent_map(em);
3048			ret = btrfs_fallocate_update_isize(inode, offset + len,
3049							   mode);
3050			goto out;
3051		}
3052		if (len < sectorsize && em->block_start != EXTENT_MAP_HOLE) {
3053			free_extent_map(em);
3054			ret = btrfs_truncate_block(inode, offset, len, 0);
 
3055			if (!ret)
3056				ret = btrfs_fallocate_update_isize(inode,
3057								   offset + len,
3058								   mode);
3059			return ret;
3060		}
3061		free_extent_map(em);
3062		alloc_start = round_down(offset, sectorsize);
3063		alloc_end = alloc_start + sectorsize;
3064		goto reserve_space;
3065	}
3066
3067	alloc_start = round_up(offset, sectorsize);
3068	alloc_end = round_down(offset + len, sectorsize);
3069
3070	/*
3071	 * For unaligned ranges, check the pages at the boundaries, they might
3072	 * map to an extent, in which case we need to partially zero them, or
3073	 * they might map to a hole, in which case we need our allocation range
3074	 * to cover them.
3075	 */
3076	if (!IS_ALIGNED(offset, sectorsize)) {
3077		ret = btrfs_zero_range_check_range_boundary(inode, offset);
 
3078		if (ret < 0)
3079			goto out;
3080		if (ret == RANGE_BOUNDARY_HOLE) {
3081			alloc_start = round_down(offset, sectorsize);
3082			ret = 0;
3083		} else if (ret == RANGE_BOUNDARY_WRITTEN_EXTENT) {
3084			ret = btrfs_truncate_block(inode, offset, 0, 0);
3085			if (ret)
3086				goto out;
3087		} else {
3088			ret = 0;
3089		}
3090	}
3091
3092	if (!IS_ALIGNED(offset + len, sectorsize)) {
3093		ret = btrfs_zero_range_check_range_boundary(inode,
3094							    offset + len);
3095		if (ret < 0)
3096			goto out;
3097		if (ret == RANGE_BOUNDARY_HOLE) {
3098			alloc_end = round_up(offset + len, sectorsize);
3099			ret = 0;
3100		} else if (ret == RANGE_BOUNDARY_WRITTEN_EXTENT) {
3101			ret = btrfs_truncate_block(inode, offset + len, 0, 1);
 
3102			if (ret)
3103				goto out;
3104		} else {
3105			ret = 0;
3106		}
3107	}
3108
3109reserve_space:
3110	if (alloc_start < alloc_end) {
3111		struct extent_state *cached_state = NULL;
3112		const u64 lockstart = alloc_start;
3113		const u64 lockend = alloc_end - 1;
3114
3115		bytes_to_reserve = alloc_end - alloc_start;
3116		ret = btrfs_alloc_data_chunk_ondemand(BTRFS_I(inode),
3117						      bytes_to_reserve);
3118		if (ret < 0)
3119			goto out;
3120		space_reserved = true;
3121		ret = btrfs_qgroup_reserve_data(inode, &data_reserved,
3122						alloc_start, bytes_to_reserve);
3123		if (ret)
3124			goto out;
3125		ret = btrfs_punch_hole_lock_range(inode, lockstart, lockend,
3126						  &cached_state);
3127		if (ret)
3128			goto out;
 
 
 
 
 
 
 
3129		ret = btrfs_prealloc_file_range(inode, mode, alloc_start,
3130						alloc_end - alloc_start,
3131						i_blocksize(inode),
3132						offset + len, &alloc_hint);
3133		unlock_extent_cached(&BTRFS_I(inode)->io_tree, lockstart,
3134				     lockend, &cached_state);
3135		/* btrfs_prealloc_file_range releases reserved space on error */
3136		if (ret) {
3137			space_reserved = false;
3138			goto out;
3139		}
3140	}
3141	ret = btrfs_fallocate_update_isize(inode, offset + len, mode);
3142 out:
3143	if (ret && space_reserved)
3144		btrfs_free_reserved_data_space(inode, data_reserved,
3145					       alloc_start, bytes_to_reserve);
3146	extent_changeset_free(data_reserved);
3147
3148	return ret;
3149}
3150
3151static long btrfs_fallocate(struct file *file, int mode,
3152			    loff_t offset, loff_t len)
3153{
3154	struct inode *inode = file_inode(file);
3155	struct extent_state *cached_state = NULL;
3156	struct extent_changeset *data_reserved = NULL;
3157	struct falloc_range *range;
3158	struct falloc_range *tmp;
3159	struct list_head reserve_list;
3160	u64 cur_offset;
3161	u64 last_byte;
3162	u64 alloc_start;
3163	u64 alloc_end;
3164	u64 alloc_hint = 0;
3165	u64 locked_end;
3166	u64 actual_end = 0;
3167	struct extent_map *em;
3168	int blocksize = btrfs_inode_sectorsize(inode);
3169	int ret;
3170
 
 
 
 
3171	alloc_start = round_down(offset, blocksize);
3172	alloc_end = round_up(offset + len, blocksize);
3173	cur_offset = alloc_start;
3174
3175	/* Make sure we aren't being give some crap mode */
3176	if (mode & ~(FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE |
3177		     FALLOC_FL_ZERO_RANGE))
3178		return -EOPNOTSUPP;
3179
3180	if (mode & FALLOC_FL_PUNCH_HOLE)
3181		return btrfs_punch_hole(inode, offset, len);
3182
3183	/*
3184	 * Only trigger disk allocation, don't trigger qgroup reserve
3185	 *
3186	 * For qgroup space, it will be checked later.
3187	 */
3188	if (!(mode & FALLOC_FL_ZERO_RANGE)) {
3189		ret = btrfs_alloc_data_chunk_ondemand(BTRFS_I(inode),
3190						      alloc_end - alloc_start);
3191		if (ret < 0)
3192			return ret;
3193	}
3194
3195	inode_lock(inode);
3196
3197	if (!(mode & FALLOC_FL_KEEP_SIZE) && offset + len > inode->i_size) {
3198		ret = inode_newsize_ok(inode, offset + len);
3199		if (ret)
3200			goto out;
3201	}
3202
3203	/*
3204	 * TODO: Move these two operations after we have checked
3205	 * accurate reserved space, or fallocate can still fail but
3206	 * with page truncated or size expanded.
3207	 *
3208	 * But that's a minor problem and won't do much harm BTW.
3209	 */
3210	if (alloc_start > inode->i_size) {
3211		ret = btrfs_cont_expand(inode, i_size_read(inode),
3212					alloc_start);
3213		if (ret)
3214			goto out;
3215	} else if (offset + len > inode->i_size) {
3216		/*
3217		 * If we are fallocating from the end of the file onward we
3218		 * need to zero out the end of the block if i_size lands in the
3219		 * middle of a block.
3220		 */
3221		ret = btrfs_truncate_block(inode, inode->i_size, 0, 0);
3222		if (ret)
3223			goto out;
3224	}
3225
3226	/*
3227	 * wait for ordered IO before we have any locks.  We'll loop again
3228	 * below with the locks held.
3229	 */
3230	ret = btrfs_wait_ordered_range(inode, alloc_start,
3231				       alloc_end - alloc_start);
3232	if (ret)
3233		goto out;
3234
3235	if (mode & FALLOC_FL_ZERO_RANGE) {
3236		ret = btrfs_zero_range(inode, offset, len, mode);
3237		inode_unlock(inode);
3238		return ret;
3239	}
3240
3241	locked_end = alloc_end - 1;
3242	while (1) {
3243		struct btrfs_ordered_extent *ordered;
3244
3245		/* the extent lock is ordered inside the running
3246		 * transaction
3247		 */
3248		lock_extent_bits(&BTRFS_I(inode)->io_tree, alloc_start,
3249				 locked_end, &cached_state);
3250		ordered = btrfs_lookup_first_ordered_extent(inode, locked_end);
 
3251
3252		if (ordered &&
3253		    ordered->file_offset + ordered->len > alloc_start &&
3254		    ordered->file_offset < alloc_end) {
3255			btrfs_put_ordered_extent(ordered);
3256			unlock_extent_cached(&BTRFS_I(inode)->io_tree,
3257					     alloc_start, locked_end,
3258					     &cached_state);
3259			/*
3260			 * we can't wait on the range with the transaction
3261			 * running or with the extent lock held
3262			 */
3263			ret = btrfs_wait_ordered_range(inode, alloc_start,
3264						       alloc_end - alloc_start);
3265			if (ret)
3266				goto out;
3267		} else {
3268			if (ordered)
3269				btrfs_put_ordered_extent(ordered);
3270			break;
3271		}
3272	}
3273
3274	/* First, check if we exceed the qgroup limit */
3275	INIT_LIST_HEAD(&reserve_list);
3276	while (cur_offset < alloc_end) {
3277		em = btrfs_get_extent(BTRFS_I(inode), NULL, 0, cur_offset,
3278				      alloc_end - cur_offset, 0);
3279		if (IS_ERR(em)) {
3280			ret = PTR_ERR(em);
3281			break;
3282		}
3283		last_byte = min(extent_map_end(em), alloc_end);
3284		actual_end = min_t(u64, extent_map_end(em), offset + len);
3285		last_byte = ALIGN(last_byte, blocksize);
3286		if (em->block_start == EXTENT_MAP_HOLE ||
3287		    (cur_offset >= inode->i_size &&
3288		     !test_bit(EXTENT_FLAG_PREALLOC, &em->flags))) {
3289			ret = add_falloc_range(&reserve_list, cur_offset,
3290					       last_byte - cur_offset);
3291			if (ret < 0) {
3292				free_extent_map(em);
3293				break;
3294			}
3295			ret = btrfs_qgroup_reserve_data(inode, &data_reserved,
3296					cur_offset, last_byte - cur_offset);
 
3297			if (ret < 0) {
3298				cur_offset = last_byte;
3299				free_extent_map(em);
3300				break;
3301			}
3302		} else {
3303			/*
3304			 * Do not need to reserve unwritten extent for this
3305			 * range, free reserved data space first, otherwise
3306			 * it'll result in false ENOSPC error.
3307			 */
3308			btrfs_free_reserved_data_space(inode, data_reserved,
3309					cur_offset, last_byte - cur_offset);
 
3310		}
3311		free_extent_map(em);
3312		cur_offset = last_byte;
3313	}
3314
3315	/*
3316	 * If ret is still 0, means we're OK to fallocate.
3317	 * Or just cleanup the list and exit.
3318	 */
3319	list_for_each_entry_safe(range, tmp, &reserve_list, list) {
3320		if (!ret)
3321			ret = btrfs_prealloc_file_range(inode, mode,
3322					range->start,
3323					range->len, i_blocksize(inode),
3324					offset + len, &alloc_hint);
3325		else
3326			btrfs_free_reserved_data_space(inode,
3327					data_reserved, range->start,
3328					range->len);
3329		list_del(&range->list);
3330		kfree(range);
3331	}
3332	if (ret < 0)
3333		goto out_unlock;
3334
3335	/*
3336	 * We didn't need to allocate any more space, but we still extended the
3337	 * size of the file so we need to update i_size and the inode item.
3338	 */
3339	ret = btrfs_fallocate_update_isize(inode, actual_end, mode);
3340out_unlock:
3341	unlock_extent_cached(&BTRFS_I(inode)->io_tree, alloc_start, locked_end,
3342			     &cached_state);
3343out:
3344	inode_unlock(inode);
3345	/* Let go of our reservation. */
3346	if (ret != 0 && !(mode & FALLOC_FL_ZERO_RANGE))
3347		btrfs_free_reserved_data_space(inode, data_reserved,
3348				cur_offset, alloc_end - cur_offset);
3349	extent_changeset_free(data_reserved);
3350	return ret;
3351}
3352
3353static int find_desired_extent(struct inode *inode, loff_t *offset, int whence)
 
3354{
3355	struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
3356	struct extent_map *em = NULL;
3357	struct extent_state *cached_state = NULL;
 
3358	u64 lockstart;
3359	u64 lockend;
3360	u64 start;
3361	u64 len;
3362	int ret = 0;
3363
3364	if (inode->i_size == 0)
3365		return -ENXIO;
3366
3367	/*
3368	 * *offset can be negative, in this case we start finding DATA/HOLE from
3369	 * the very start of the file.
3370	 */
3371	start = max_t(loff_t, 0, *offset);
3372
3373	lockstart = round_down(start, fs_info->sectorsize);
3374	lockend = round_up(i_size_read(inode),
3375			   fs_info->sectorsize);
3376	if (lockend <= lockstart)
3377		lockend = lockstart + fs_info->sectorsize;
3378	lockend--;
3379	len = lockend - lockstart + 1;
3380
3381	lock_extent_bits(&BTRFS_I(inode)->io_tree, lockstart, lockend,
3382			 &cached_state);
3383
3384	while (start < inode->i_size) {
3385		em = btrfs_get_extent_fiemap(BTRFS_I(inode), start, len);
3386		if (IS_ERR(em)) {
3387			ret = PTR_ERR(em);
3388			em = NULL;
3389			break;
3390		}
3391
3392		if (whence == SEEK_HOLE &&
3393		    (em->block_start == EXTENT_MAP_HOLE ||
3394		     test_bit(EXTENT_FLAG_PREALLOC, &em->flags)))
3395			break;
3396		else if (whence == SEEK_DATA &&
3397			   (em->block_start != EXTENT_MAP_HOLE &&
3398			    !test_bit(EXTENT_FLAG_PREALLOC, &em->flags)))
3399			break;
3400
3401		start = em->start + em->len;
3402		free_extent_map(em);
3403		em = NULL;
3404		cond_resched();
3405	}
3406	free_extent_map(em);
3407	if (!ret) {
3408		if (whence == SEEK_DATA && start >= inode->i_size)
3409			ret = -ENXIO;
 
 
 
 
3410		else
3411			*offset = min_t(loff_t, start, inode->i_size);
3412	}
3413	unlock_extent_cached(&BTRFS_I(inode)->io_tree, lockstart, lockend,
3414			     &cached_state);
3415	return ret;
3416}
3417
3418static loff_t btrfs_file_llseek(struct file *file, loff_t offset, int whence)
3419{
3420	struct inode *inode = file->f_mapping->host;
3421	int ret;
3422
3423	inode_lock(inode);
3424	switch (whence) {
3425	case SEEK_END:
3426	case SEEK_CUR:
3427		offset = generic_file_llseek(file, offset, whence);
3428		goto out;
3429	case SEEK_DATA:
3430	case SEEK_HOLE:
3431		if (offset >= i_size_read(inode)) {
3432			inode_unlock(inode);
3433			return -ENXIO;
3434		}
3435
3436		ret = find_desired_extent(inode, &offset, whence);
3437		if (ret) {
3438			inode_unlock(inode);
3439			return ret;
3440		}
3441	}
3442
3443	offset = vfs_setpos(file, offset, inode->i_sb->s_maxbytes);
3444out:
3445	inode_unlock(inode);
3446	return offset;
3447}
3448
3449static int btrfs_file_open(struct inode *inode, struct file *filp)
3450{
3451	filp->f_mode |= FMODE_NOWAIT;
3452	return generic_file_open(inode, filp);
3453}
3454
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3455const struct file_operations btrfs_file_operations = {
3456	.llseek		= btrfs_file_llseek,
3457	.read_iter      = generic_file_read_iter,
3458	.splice_read	= generic_file_splice_read,
3459	.write_iter	= btrfs_file_write_iter,
 
3460	.mmap		= btrfs_file_mmap,
3461	.open		= btrfs_file_open,
3462	.release	= btrfs_release_file,
3463	.fsync		= btrfs_sync_file,
3464	.fallocate	= btrfs_fallocate,
3465	.unlocked_ioctl	= btrfs_ioctl,
3466#ifdef CONFIG_COMPAT
3467	.compat_ioctl	= btrfs_compat_ioctl,
3468#endif
3469	.remap_file_range = btrfs_remap_file_range,
3470};
3471
3472void __cold btrfs_auto_defrag_exit(void)
3473{
3474	kmem_cache_destroy(btrfs_inode_defrag_cachep);
3475}
3476
3477int __init btrfs_auto_defrag_init(void)
3478{
3479	btrfs_inode_defrag_cachep = kmem_cache_create("btrfs_inode_defrag",
3480					sizeof(struct inode_defrag), 0,
3481					SLAB_MEM_SPREAD,
3482					NULL);
3483	if (!btrfs_inode_defrag_cachep)
3484		return -ENOMEM;
3485
3486	return 0;
3487}
3488
3489int btrfs_fdatawrite_range(struct inode *inode, loff_t start, loff_t end)
3490{
3491	int ret;
3492
3493	/*
3494	 * So with compression we will find and lock a dirty page and clear the
3495	 * first one as dirty, setup an async extent, and immediately return
3496	 * with the entire range locked but with nobody actually marked with
3497	 * writeback.  So we can't just filemap_write_and_wait_range() and
3498	 * expect it to work since it will just kick off a thread to do the
3499	 * actual work.  So we need to call filemap_fdatawrite_range _again_
3500	 * since it will wait on the page lock, which won't be unlocked until
3501	 * after the pages have been marked as writeback and so we're good to go
3502	 * from there.  We have to do this otherwise we'll miss the ordered
3503	 * extents and that results in badness.  Please Josef, do not think you
3504	 * know better and pull this out at some point in the future, it is
3505	 * right and you are wrong.
3506	 */
3507	ret = filemap_fdatawrite_range(inode->i_mapping, start, end);
3508	if (!ret && test_bit(BTRFS_INODE_HAS_ASYNC_EXTENT,
3509			     &BTRFS_I(inode)->runtime_flags))
3510		ret = filemap_fdatawrite_range(inode->i_mapping, start, end);
3511
3512	return ret;
3513}
v5.14.15
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 * Copyright (C) 2007 Oracle.  All rights reserved.
   4 */
   5
   6#include <linux/fs.h>
   7#include <linux/pagemap.h>
   8#include <linux/time.h>
   9#include <linux/init.h>
  10#include <linux/string.h>
  11#include <linux/backing-dev.h>
  12#include <linux/falloc.h>
  13#include <linux/writeback.h>
  14#include <linux/compat.h>
  15#include <linux/slab.h>
  16#include <linux/btrfs.h>
  17#include <linux/uio.h>
  18#include <linux/iversion.h>
  19#include "ctree.h"
  20#include "disk-io.h"
  21#include "transaction.h"
  22#include "btrfs_inode.h"
  23#include "print-tree.h"
  24#include "tree-log.h"
  25#include "locking.h"
  26#include "volumes.h"
  27#include "qgroup.h"
  28#include "compression.h"
  29#include "delalloc-space.h"
  30#include "reflink.h"
  31#include "subpage.h"
  32
  33static struct kmem_cache *btrfs_inode_defrag_cachep;
  34/*
  35 * when auto defrag is enabled we
  36 * queue up these defrag structs to remember which
  37 * inodes need defragging passes
  38 */
  39struct inode_defrag {
  40	struct rb_node rb_node;
  41	/* objectid */
  42	u64 ino;
  43	/*
  44	 * transid where the defrag was added, we search for
  45	 * extents newer than this
  46	 */
  47	u64 transid;
  48
  49	/* root objectid */
  50	u64 root;
  51
  52	/* last offset we were able to defrag */
  53	u64 last_offset;
  54
  55	/* if we've wrapped around back to zero once already */
  56	int cycled;
  57};
  58
  59static int __compare_inode_defrag(struct inode_defrag *defrag1,
  60				  struct inode_defrag *defrag2)
  61{
  62	if (defrag1->root > defrag2->root)
  63		return 1;
  64	else if (defrag1->root < defrag2->root)
  65		return -1;
  66	else if (defrag1->ino > defrag2->ino)
  67		return 1;
  68	else if (defrag1->ino < defrag2->ino)
  69		return -1;
  70	else
  71		return 0;
  72}
  73
  74/* pop a record for an inode into the defrag tree.  The lock
  75 * must be held already
  76 *
  77 * If you're inserting a record for an older transid than an
  78 * existing record, the transid already in the tree is lowered
  79 *
  80 * If an existing record is found the defrag item you
  81 * pass in is freed
  82 */
  83static int __btrfs_add_inode_defrag(struct btrfs_inode *inode,
  84				    struct inode_defrag *defrag)
  85{
  86	struct btrfs_fs_info *fs_info = inode->root->fs_info;
  87	struct inode_defrag *entry;
  88	struct rb_node **p;
  89	struct rb_node *parent = NULL;
  90	int ret;
  91
  92	p = &fs_info->defrag_inodes.rb_node;
  93	while (*p) {
  94		parent = *p;
  95		entry = rb_entry(parent, struct inode_defrag, rb_node);
  96
  97		ret = __compare_inode_defrag(defrag, entry);
  98		if (ret < 0)
  99			p = &parent->rb_left;
 100		else if (ret > 0)
 101			p = &parent->rb_right;
 102		else {
 103			/* if we're reinserting an entry for
 104			 * an old defrag run, make sure to
 105			 * lower the transid of our existing record
 106			 */
 107			if (defrag->transid < entry->transid)
 108				entry->transid = defrag->transid;
 109			if (defrag->last_offset > entry->last_offset)
 110				entry->last_offset = defrag->last_offset;
 111			return -EEXIST;
 112		}
 113	}
 114	set_bit(BTRFS_INODE_IN_DEFRAG, &inode->runtime_flags);
 115	rb_link_node(&defrag->rb_node, parent, p);
 116	rb_insert_color(&defrag->rb_node, &fs_info->defrag_inodes);
 117	return 0;
 118}
 119
 120static inline int __need_auto_defrag(struct btrfs_fs_info *fs_info)
 121{
 122	if (!btrfs_test_opt(fs_info, AUTO_DEFRAG))
 123		return 0;
 124
 125	if (btrfs_fs_closing(fs_info))
 126		return 0;
 127
 128	return 1;
 129}
 130
 131/*
 132 * insert a defrag record for this inode if auto defrag is
 133 * enabled
 134 */
 135int btrfs_add_inode_defrag(struct btrfs_trans_handle *trans,
 136			   struct btrfs_inode *inode)
 137{
 138	struct btrfs_root *root = inode->root;
 139	struct btrfs_fs_info *fs_info = root->fs_info;
 140	struct inode_defrag *defrag;
 141	u64 transid;
 142	int ret;
 143
 144	if (!__need_auto_defrag(fs_info))
 145		return 0;
 146
 147	if (test_bit(BTRFS_INODE_IN_DEFRAG, &inode->runtime_flags))
 148		return 0;
 149
 150	if (trans)
 151		transid = trans->transid;
 152	else
 153		transid = inode->root->last_trans;
 154
 155	defrag = kmem_cache_zalloc(btrfs_inode_defrag_cachep, GFP_NOFS);
 156	if (!defrag)
 157		return -ENOMEM;
 158
 159	defrag->ino = btrfs_ino(inode);
 160	defrag->transid = transid;
 161	defrag->root = root->root_key.objectid;
 162
 163	spin_lock(&fs_info->defrag_inodes_lock);
 164	if (!test_bit(BTRFS_INODE_IN_DEFRAG, &inode->runtime_flags)) {
 165		/*
 166		 * If we set IN_DEFRAG flag and evict the inode from memory,
 167		 * and then re-read this inode, this new inode doesn't have
 168		 * IN_DEFRAG flag. At the case, we may find the existed defrag.
 169		 */
 170		ret = __btrfs_add_inode_defrag(inode, defrag);
 171		if (ret)
 172			kmem_cache_free(btrfs_inode_defrag_cachep, defrag);
 173	} else {
 174		kmem_cache_free(btrfs_inode_defrag_cachep, defrag);
 175	}
 176	spin_unlock(&fs_info->defrag_inodes_lock);
 177	return 0;
 178}
 179
 180/*
 181 * Requeue the defrag object. If there is a defrag object that points to
 182 * the same inode in the tree, we will merge them together (by
 183 * __btrfs_add_inode_defrag()) and free the one that we want to requeue.
 184 */
 185static void btrfs_requeue_inode_defrag(struct btrfs_inode *inode,
 186				       struct inode_defrag *defrag)
 187{
 188	struct btrfs_fs_info *fs_info = inode->root->fs_info;
 189	int ret;
 190
 191	if (!__need_auto_defrag(fs_info))
 192		goto out;
 193
 194	/*
 195	 * Here we don't check the IN_DEFRAG flag, because we need merge
 196	 * them together.
 197	 */
 198	spin_lock(&fs_info->defrag_inodes_lock);
 199	ret = __btrfs_add_inode_defrag(inode, defrag);
 200	spin_unlock(&fs_info->defrag_inodes_lock);
 201	if (ret)
 202		goto out;
 203	return;
 204out:
 205	kmem_cache_free(btrfs_inode_defrag_cachep, defrag);
 206}
 207
 208/*
 209 * pick the defragable inode that we want, if it doesn't exist, we will get
 210 * the next one.
 211 */
 212static struct inode_defrag *
 213btrfs_pick_defrag_inode(struct btrfs_fs_info *fs_info, u64 root, u64 ino)
 214{
 215	struct inode_defrag *entry = NULL;
 216	struct inode_defrag tmp;
 217	struct rb_node *p;
 218	struct rb_node *parent = NULL;
 219	int ret;
 220
 221	tmp.ino = ino;
 222	tmp.root = root;
 223
 224	spin_lock(&fs_info->defrag_inodes_lock);
 225	p = fs_info->defrag_inodes.rb_node;
 226	while (p) {
 227		parent = p;
 228		entry = rb_entry(parent, struct inode_defrag, rb_node);
 229
 230		ret = __compare_inode_defrag(&tmp, entry);
 231		if (ret < 0)
 232			p = parent->rb_left;
 233		else if (ret > 0)
 234			p = parent->rb_right;
 235		else
 236			goto out;
 237	}
 238
 239	if (parent && __compare_inode_defrag(&tmp, entry) > 0) {
 240		parent = rb_next(parent);
 241		if (parent)
 242			entry = rb_entry(parent, struct inode_defrag, rb_node);
 243		else
 244			entry = NULL;
 245	}
 246out:
 247	if (entry)
 248		rb_erase(parent, &fs_info->defrag_inodes);
 249	spin_unlock(&fs_info->defrag_inodes_lock);
 250	return entry;
 251}
 252
 253void btrfs_cleanup_defrag_inodes(struct btrfs_fs_info *fs_info)
 254{
 255	struct inode_defrag *defrag;
 256	struct rb_node *node;
 257
 258	spin_lock(&fs_info->defrag_inodes_lock);
 259	node = rb_first(&fs_info->defrag_inodes);
 260	while (node) {
 261		rb_erase(node, &fs_info->defrag_inodes);
 262		defrag = rb_entry(node, struct inode_defrag, rb_node);
 263		kmem_cache_free(btrfs_inode_defrag_cachep, defrag);
 264
 265		cond_resched_lock(&fs_info->defrag_inodes_lock);
 266
 267		node = rb_first(&fs_info->defrag_inodes);
 268	}
 269	spin_unlock(&fs_info->defrag_inodes_lock);
 270}
 271
 272#define BTRFS_DEFRAG_BATCH	1024
 273
 274static int __btrfs_run_defrag_inode(struct btrfs_fs_info *fs_info,
 275				    struct inode_defrag *defrag)
 276{
 277	struct btrfs_root *inode_root;
 278	struct inode *inode;
 
 279	struct btrfs_ioctl_defrag_range_args range;
 280	int num_defrag;
 
 281	int ret;
 282
 283	/* get the inode */
 284	inode_root = btrfs_get_fs_root(fs_info, defrag->root, true);
 
 
 
 
 
 
 285	if (IS_ERR(inode_root)) {
 286		ret = PTR_ERR(inode_root);
 287		goto cleanup;
 288	}
 289
 290	inode = btrfs_iget(fs_info->sb, defrag->ino, inode_root);
 291	btrfs_put_root(inode_root);
 
 
 292	if (IS_ERR(inode)) {
 293		ret = PTR_ERR(inode);
 294		goto cleanup;
 295	}
 
 296
 297	/* do a chunk of defrag */
 298	clear_bit(BTRFS_INODE_IN_DEFRAG, &BTRFS_I(inode)->runtime_flags);
 299	memset(&range, 0, sizeof(range));
 300	range.len = (u64)-1;
 301	range.start = defrag->last_offset;
 302
 303	sb_start_write(fs_info->sb);
 304	num_defrag = btrfs_defrag_file(inode, NULL, &range, defrag->transid,
 305				       BTRFS_DEFRAG_BATCH);
 306	sb_end_write(fs_info->sb);
 307	/*
 308	 * if we filled the whole defrag batch, there
 309	 * must be more work to do.  Queue this defrag
 310	 * again
 311	 */
 312	if (num_defrag == BTRFS_DEFRAG_BATCH) {
 313		defrag->last_offset = range.start;
 314		btrfs_requeue_inode_defrag(BTRFS_I(inode), defrag);
 315	} else if (defrag->last_offset && !defrag->cycled) {
 316		/*
 317		 * we didn't fill our defrag batch, but
 318		 * we didn't start at zero.  Make sure we loop
 319		 * around to the start of the file.
 320		 */
 321		defrag->last_offset = 0;
 322		defrag->cycled = 1;
 323		btrfs_requeue_inode_defrag(BTRFS_I(inode), defrag);
 324	} else {
 325		kmem_cache_free(btrfs_inode_defrag_cachep, defrag);
 326	}
 327
 328	iput(inode);
 329	return 0;
 330cleanup:
 
 331	kmem_cache_free(btrfs_inode_defrag_cachep, defrag);
 332	return ret;
 333}
 334
 335/*
 336 * run through the list of inodes in the FS that need
 337 * defragging
 338 */
 339int btrfs_run_defrag_inodes(struct btrfs_fs_info *fs_info)
 340{
 341	struct inode_defrag *defrag;
 342	u64 first_ino = 0;
 343	u64 root_objectid = 0;
 344
 345	atomic_inc(&fs_info->defrag_running);
 346	while (1) {
 347		/* Pause the auto defragger. */
 348		if (test_bit(BTRFS_FS_STATE_REMOUNTING,
 349			     &fs_info->fs_state))
 350			break;
 351
 352		if (!__need_auto_defrag(fs_info))
 353			break;
 354
 355		/* find an inode to defrag */
 356		defrag = btrfs_pick_defrag_inode(fs_info, root_objectid,
 357						 first_ino);
 358		if (!defrag) {
 359			if (root_objectid || first_ino) {
 360				root_objectid = 0;
 361				first_ino = 0;
 362				continue;
 363			} else {
 364				break;
 365			}
 366		}
 367
 368		first_ino = defrag->ino + 1;
 369		root_objectid = defrag->root;
 370
 371		__btrfs_run_defrag_inode(fs_info, defrag);
 372	}
 373	atomic_dec(&fs_info->defrag_running);
 374
 375	/*
 376	 * during unmount, we use the transaction_wait queue to
 377	 * wait for the defragger to stop
 378	 */
 379	wake_up(&fs_info->transaction_wait);
 380	return 0;
 381}
 382
 383/* simple helper to fault in pages and copy.  This should go away
 384 * and be replaced with calls into generic code.
 385 */
 386static noinline int btrfs_copy_from_user(loff_t pos, size_t write_bytes,
 387					 struct page **prepared_pages,
 388					 struct iov_iter *i)
 389{
 390	size_t copied = 0;
 391	size_t total_copied = 0;
 392	int pg = 0;
 393	int offset = offset_in_page(pos);
 394
 395	while (write_bytes > 0) {
 396		size_t count = min_t(size_t,
 397				     PAGE_SIZE - offset, write_bytes);
 398		struct page *page = prepared_pages[pg];
 399		/*
 400		 * Copy data from userspace to the current page
 401		 */
 402		copied = copy_page_from_iter_atomic(page, offset, count, i);
 403
 404		/* Flush processor's dcache for this page */
 405		flush_dcache_page(page);
 406
 407		/*
 408		 * if we get a partial write, we can end up with
 409		 * partially up to date pages.  These add
 410		 * a lot of complexity, so make sure they don't
 411		 * happen by forcing this copy to be retried.
 412		 *
 413		 * The rest of the btrfs_file_write code will fall
 414		 * back to page at a time copies after we return 0.
 415		 */
 416		if (unlikely(copied < count)) {
 417			if (!PageUptodate(page)) {
 418				iov_iter_revert(i, copied);
 419				copied = 0;
 420			}
 421			if (!copied)
 422				break;
 423		}
 424
 
 425		write_bytes -= copied;
 426		total_copied += copied;
 427		offset += copied;
 428		if (offset == PAGE_SIZE) {
 
 
 
 
 
 
 429			pg++;
 430			offset = 0;
 431		}
 432	}
 433	return total_copied;
 434}
 435
 436/*
 437 * unlocks pages after btrfs_file_write is done with them
 438 */
 439static void btrfs_drop_pages(struct page **pages, size_t num_pages)
 440{
 441	size_t i;
 442	for (i = 0; i < num_pages; i++) {
 443		/* page checked is some magic around finding pages that
 444		 * have been modified without going through btrfs_set_page_dirty
 445		 * clear it here. There should be no need to mark the pages
 446		 * accessed as prepare_pages should have marked them accessed
 447		 * in prepare_pages via find_or_create_page()
 448		 */
 449		ClearPageChecked(pages[i]);
 450		unlock_page(pages[i]);
 451		put_page(pages[i]);
 452	}
 453}
 454
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 455/*
 456 * After btrfs_copy_from_user(), update the following things for delalloc:
 457 * - Mark newly dirtied pages as DELALLOC in the io tree.
 458 *   Used to advise which range is to be written back.
 459 * - Mark modified pages as Uptodate/Dirty and not needing COW fixup
 460 * - Update inode size for past EOF write
 
 461 */
 462int btrfs_dirty_pages(struct btrfs_inode *inode, struct page **pages,
 463		      size_t num_pages, loff_t pos, size_t write_bytes,
 464		      struct extent_state **cached, bool noreserve)
 465{
 466	struct btrfs_fs_info *fs_info = inode->root->fs_info;
 467	int err = 0;
 468	int i;
 469	u64 num_bytes;
 470	u64 start_pos;
 471	u64 end_of_last_block;
 472	u64 end_pos = pos + write_bytes;
 473	loff_t isize = i_size_read(&inode->vfs_inode);
 474	unsigned int extra_bits = 0;
 475
 476	if (write_bytes == 0)
 477		return 0;
 478
 479	if (noreserve)
 480		extra_bits |= EXTENT_NORESERVE;
 481
 482	start_pos = round_down(pos, fs_info->sectorsize);
 483	num_bytes = round_up(write_bytes + pos - start_pos,
 484			     fs_info->sectorsize);
 485	ASSERT(num_bytes <= U32_MAX);
 486
 487	end_of_last_block = start_pos + num_bytes - 1;
 488
 489	/*
 490	 * The pages may have already been dirty, clear out old accounting so
 491	 * we can set things up properly
 492	 */
 493	clear_extent_bit(&inode->io_tree, start_pos, end_of_last_block,
 494			 EXTENT_DELALLOC | EXTENT_DO_ACCOUNTING | EXTENT_DEFRAG,
 495			 0, 0, cached);
 496
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 497	err = btrfs_set_extent_delalloc(inode, start_pos, end_of_last_block,
 498					extra_bits, cached);
 499	if (err)
 500		return err;
 501
 502	for (i = 0; i < num_pages; i++) {
 503		struct page *p = pages[i];
 504
 505		btrfs_page_clamp_set_uptodate(fs_info, p, start_pos, num_bytes);
 506		ClearPageChecked(p);
 507		btrfs_page_clamp_set_dirty(fs_info, p, start_pos, num_bytes);
 508	}
 509
 510	/*
 511	 * we've only changed i_size in ram, and we haven't updated
 512	 * the disk i_size.  There is no need to log the inode
 513	 * at this time.
 514	 */
 515	if (end_pos > isize)
 516		i_size_write(&inode->vfs_inode, end_pos);
 517	return 0;
 518}
 519
 520/*
 521 * this drops all the extents in the cache that intersect the range
 522 * [start, end].  Existing extents are split as required.
 523 */
 524void btrfs_drop_extent_cache(struct btrfs_inode *inode, u64 start, u64 end,
 525			     int skip_pinned)
 526{
 527	struct extent_map *em;
 528	struct extent_map *split = NULL;
 529	struct extent_map *split2 = NULL;
 530	struct extent_map_tree *em_tree = &inode->extent_tree;
 531	u64 len = end - start + 1;
 532	u64 gen;
 533	int ret;
 534	int testend = 1;
 535	unsigned long flags;
 536	int compressed = 0;
 537	bool modified;
 538
 539	WARN_ON(end < start);
 540	if (end == (u64)-1) {
 541		len = (u64)-1;
 542		testend = 0;
 543	}
 544	while (1) {
 545		int no_splits = 0;
 546
 547		modified = false;
 548		if (!split)
 549			split = alloc_extent_map();
 550		if (!split2)
 551			split2 = alloc_extent_map();
 552		if (!split || !split2)
 553			no_splits = 1;
 554
 555		write_lock(&em_tree->lock);
 556		em = lookup_extent_mapping(em_tree, start, len);
 557		if (!em) {
 558			write_unlock(&em_tree->lock);
 559			break;
 560		}
 561		flags = em->flags;
 562		gen = em->generation;
 563		if (skip_pinned && test_bit(EXTENT_FLAG_PINNED, &em->flags)) {
 564			if (testend && em->start + em->len >= start + len) {
 565				free_extent_map(em);
 566				write_unlock(&em_tree->lock);
 567				break;
 568			}
 569			start = em->start + em->len;
 570			if (testend)
 571				len = start + len - (em->start + em->len);
 572			free_extent_map(em);
 573			write_unlock(&em_tree->lock);
 574			continue;
 575		}
 576		compressed = test_bit(EXTENT_FLAG_COMPRESSED, &em->flags);
 577		clear_bit(EXTENT_FLAG_PINNED, &em->flags);
 578		clear_bit(EXTENT_FLAG_LOGGING, &flags);
 579		modified = !list_empty(&em->list);
 580		if (no_splits)
 581			goto next;
 582
 583		if (em->start < start) {
 584			split->start = em->start;
 585			split->len = start - em->start;
 586
 587			if (em->block_start < EXTENT_MAP_LAST_BYTE) {
 588				split->orig_start = em->orig_start;
 589				split->block_start = em->block_start;
 590
 591				if (compressed)
 592					split->block_len = em->block_len;
 593				else
 594					split->block_len = split->len;
 595				split->orig_block_len = max(split->block_len,
 596						em->orig_block_len);
 597				split->ram_bytes = em->ram_bytes;
 598			} else {
 599				split->orig_start = split->start;
 600				split->block_len = 0;
 601				split->block_start = em->block_start;
 602				split->orig_block_len = 0;
 603				split->ram_bytes = split->len;
 604			}
 605
 606			split->generation = gen;
 
 607			split->flags = flags;
 608			split->compress_type = em->compress_type;
 609			replace_extent_mapping(em_tree, em, split, modified);
 610			free_extent_map(split);
 611			split = split2;
 612			split2 = NULL;
 613		}
 614		if (testend && em->start + em->len > start + len) {
 615			u64 diff = start + len - em->start;
 616
 617			split->start = start + len;
 618			split->len = em->start + em->len - (start + len);
 
 619			split->flags = flags;
 620			split->compress_type = em->compress_type;
 621			split->generation = gen;
 622
 623			if (em->block_start < EXTENT_MAP_LAST_BYTE) {
 624				split->orig_block_len = max(em->block_len,
 625						    em->orig_block_len);
 626
 627				split->ram_bytes = em->ram_bytes;
 628				if (compressed) {
 629					split->block_len = em->block_len;
 630					split->block_start = em->block_start;
 631					split->orig_start = em->orig_start;
 632				} else {
 633					split->block_len = split->len;
 634					split->block_start = em->block_start
 635						+ diff;
 636					split->orig_start = em->orig_start;
 637				}
 638			} else {
 639				split->ram_bytes = split->len;
 640				split->orig_start = split->start;
 641				split->block_len = 0;
 642				split->block_start = em->block_start;
 643				split->orig_block_len = 0;
 644			}
 645
 646			if (extent_map_in_tree(em)) {
 647				replace_extent_mapping(em_tree, em, split,
 648						       modified);
 649			} else {
 650				ret = add_extent_mapping(em_tree, split,
 651							 modified);
 652				ASSERT(ret == 0); /* Logic error */
 653			}
 654			free_extent_map(split);
 655			split = NULL;
 656		}
 657next:
 658		if (extent_map_in_tree(em))
 659			remove_extent_mapping(em_tree, em);
 660		write_unlock(&em_tree->lock);
 661
 662		/* once for us */
 663		free_extent_map(em);
 664		/* once for the tree*/
 665		free_extent_map(em);
 666	}
 667	if (split)
 668		free_extent_map(split);
 669	if (split2)
 670		free_extent_map(split2);
 671}
 672
 673/*
 674 * this is very complex, but the basic idea is to drop all extents
 675 * in the range start - end.  hint_block is filled in with a block number
 676 * that would be a good hint to the block allocator for this file.
 677 *
 678 * If an extent intersects the range but is not entirely inside the range
 679 * it is either truncated or split.  Anything entirely inside the range
 680 * is deleted from the tree.
 681 *
 682 * Note: the VFS' inode number of bytes is not updated, it's up to the caller
 683 * to deal with that. We set the field 'bytes_found' of the arguments structure
 684 * with the number of allocated bytes found in the target range, so that the
 685 * caller can update the inode's number of bytes in an atomic way when
 686 * replacing extents in a range to avoid races with stat(2).
 687 */
 688int btrfs_drop_extents(struct btrfs_trans_handle *trans,
 689		       struct btrfs_root *root, struct btrfs_inode *inode,
 690		       struct btrfs_drop_extents_args *args)
 
 
 
 
 691{
 692	struct btrfs_fs_info *fs_info = root->fs_info;
 693	struct extent_buffer *leaf;
 694	struct btrfs_file_extent_item *fi;
 695	struct btrfs_ref ref = { 0 };
 696	struct btrfs_key key;
 697	struct btrfs_key new_key;
 698	u64 ino = btrfs_ino(inode);
 699	u64 search_start = args->start;
 700	u64 disk_bytenr = 0;
 701	u64 num_bytes = 0;
 702	u64 extent_offset = 0;
 703	u64 extent_end = 0;
 704	u64 last_end = args->start;
 705	int del_nr = 0;
 706	int del_slot = 0;
 707	int extent_type;
 708	int recow;
 709	int ret;
 710	int modify_tree = -1;
 711	int update_refs;
 712	int found = 0;
 713	int leafs_visited = 0;
 714	struct btrfs_path *path = args->path;
 715
 716	args->bytes_found = 0;
 717	args->extent_inserted = false;
 718
 719	/* Must always have a path if ->replace_extent is true */
 720	ASSERT(!(args->replace_extent && !args->path));
 721
 722	if (!path) {
 723		path = btrfs_alloc_path();
 724		if (!path) {
 725			ret = -ENOMEM;
 726			goto out;
 727		}
 728	}
 729
 730	if (args->drop_cache)
 731		btrfs_drop_extent_cache(inode, args->start, args->end - 1, 0);
 732
 733	if (args->start >= inode->disk_i_size && !args->replace_extent)
 734		modify_tree = 0;
 735
 736	update_refs = (root->root_key.objectid != BTRFS_TREE_LOG_OBJECTID);
 
 737	while (1) {
 738		recow = 0;
 739		ret = btrfs_lookup_file_extent(trans, root, path, ino,
 740					       search_start, modify_tree);
 741		if (ret < 0)
 742			break;
 743		if (ret > 0 && path->slots[0] > 0 && search_start == args->start) {
 744			leaf = path->nodes[0];
 745			btrfs_item_key_to_cpu(leaf, &key, path->slots[0] - 1);
 746			if (key.objectid == ino &&
 747			    key.type == BTRFS_EXTENT_DATA_KEY)
 748				path->slots[0]--;
 749		}
 750		ret = 0;
 751		leafs_visited++;
 752next_slot:
 753		leaf = path->nodes[0];
 754		if (path->slots[0] >= btrfs_header_nritems(leaf)) {
 755			BUG_ON(del_nr > 0);
 756			ret = btrfs_next_leaf(root, path);
 757			if (ret < 0)
 758				break;
 759			if (ret > 0) {
 760				ret = 0;
 761				break;
 762			}
 763			leafs_visited++;
 764			leaf = path->nodes[0];
 765			recow = 1;
 766		}
 767
 768		btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
 769
 770		if (key.objectid > ino)
 771			break;
 772		if (WARN_ON_ONCE(key.objectid < ino) ||
 773		    key.type < BTRFS_EXTENT_DATA_KEY) {
 774			ASSERT(del_nr == 0);
 775			path->slots[0]++;
 776			goto next_slot;
 777		}
 778		if (key.type > BTRFS_EXTENT_DATA_KEY || key.offset >= args->end)
 779			break;
 780
 781		fi = btrfs_item_ptr(leaf, path->slots[0],
 782				    struct btrfs_file_extent_item);
 783		extent_type = btrfs_file_extent_type(leaf, fi);
 784
 785		if (extent_type == BTRFS_FILE_EXTENT_REG ||
 786		    extent_type == BTRFS_FILE_EXTENT_PREALLOC) {
 787			disk_bytenr = btrfs_file_extent_disk_bytenr(leaf, fi);
 788			num_bytes = btrfs_file_extent_disk_num_bytes(leaf, fi);
 789			extent_offset = btrfs_file_extent_offset(leaf, fi);
 790			extent_end = key.offset +
 791				btrfs_file_extent_num_bytes(leaf, fi);
 792		} else if (extent_type == BTRFS_FILE_EXTENT_INLINE) {
 793			extent_end = key.offset +
 794				btrfs_file_extent_ram_bytes(leaf, fi);
 795		} else {
 796			/* can't happen */
 797			BUG();
 798		}
 799
 800		/*
 801		 * Don't skip extent items representing 0 byte lengths. They
 802		 * used to be created (bug) if while punching holes we hit
 803		 * -ENOSPC condition. So if we find one here, just ensure we
 804		 * delete it, otherwise we would insert a new file extent item
 805		 * with the same key (offset) as that 0 bytes length file
 806		 * extent item in the call to setup_items_for_insert() later
 807		 * in this function.
 808		 */
 809		if (extent_end == key.offset && extent_end >= search_start) {
 810			last_end = extent_end;
 811			goto delete_extent_item;
 812		}
 813
 814		if (extent_end <= search_start) {
 815			path->slots[0]++;
 816			goto next_slot;
 817		}
 818
 819		found = 1;
 820		search_start = max(key.offset, args->start);
 821		if (recow || !modify_tree) {
 822			modify_tree = -1;
 823			btrfs_release_path(path);
 824			continue;
 825		}
 826
 827		/*
 828		 *     | - range to drop - |
 829		 *  | -------- extent -------- |
 830		 */
 831		if (args->start > key.offset && args->end < extent_end) {
 832			BUG_ON(del_nr > 0);
 833			if (extent_type == BTRFS_FILE_EXTENT_INLINE) {
 834				ret = -EOPNOTSUPP;
 835				break;
 836			}
 837
 838			memcpy(&new_key, &key, sizeof(new_key));
 839			new_key.offset = args->start;
 840			ret = btrfs_duplicate_item(trans, root, path,
 841						   &new_key);
 842			if (ret == -EAGAIN) {
 843				btrfs_release_path(path);
 844				continue;
 845			}
 846			if (ret < 0)
 847				break;
 848
 849			leaf = path->nodes[0];
 850			fi = btrfs_item_ptr(leaf, path->slots[0] - 1,
 851					    struct btrfs_file_extent_item);
 852			btrfs_set_file_extent_num_bytes(leaf, fi,
 853							args->start - key.offset);
 854
 855			fi = btrfs_item_ptr(leaf, path->slots[0],
 856					    struct btrfs_file_extent_item);
 857
 858			extent_offset += args->start - key.offset;
 859			btrfs_set_file_extent_offset(leaf, fi, extent_offset);
 860			btrfs_set_file_extent_num_bytes(leaf, fi,
 861							extent_end - args->start);
 862			btrfs_mark_buffer_dirty(leaf);
 863
 864			if (update_refs && disk_bytenr > 0) {
 865				btrfs_init_generic_ref(&ref,
 866						BTRFS_ADD_DELAYED_REF,
 867						disk_bytenr, num_bytes, 0);
 868				btrfs_init_data_ref(&ref,
 869						root->root_key.objectid,
 870						new_key.objectid,
 871						args->start - extent_offset);
 872				ret = btrfs_inc_extent_ref(trans, &ref);
 873				BUG_ON(ret); /* -ENOMEM */
 874			}
 875			key.offset = args->start;
 876		}
 877		/*
 878		 * From here on out we will have actually dropped something, so
 879		 * last_end can be updated.
 880		 */
 881		last_end = extent_end;
 882
 883		/*
 884		 *  | ---- range to drop ----- |
 885		 *      | -------- extent -------- |
 886		 */
 887		if (args->start <= key.offset && args->end < extent_end) {
 888			if (extent_type == BTRFS_FILE_EXTENT_INLINE) {
 889				ret = -EOPNOTSUPP;
 890				break;
 891			}
 892
 893			memcpy(&new_key, &key, sizeof(new_key));
 894			new_key.offset = args->end;
 895			btrfs_set_item_key_safe(fs_info, path, &new_key);
 896
 897			extent_offset += args->end - key.offset;
 898			btrfs_set_file_extent_offset(leaf, fi, extent_offset);
 899			btrfs_set_file_extent_num_bytes(leaf, fi,
 900							extent_end - args->end);
 901			btrfs_mark_buffer_dirty(leaf);
 902			if (update_refs && disk_bytenr > 0)
 903				args->bytes_found += args->end - key.offset;
 904			break;
 905		}
 906
 907		search_start = extent_end;
 908		/*
 909		 *       | ---- range to drop ----- |
 910		 *  | -------- extent -------- |
 911		 */
 912		if (args->start > key.offset && args->end >= extent_end) {
 913			BUG_ON(del_nr > 0);
 914			if (extent_type == BTRFS_FILE_EXTENT_INLINE) {
 915				ret = -EOPNOTSUPP;
 916				break;
 917			}
 918
 919			btrfs_set_file_extent_num_bytes(leaf, fi,
 920							args->start - key.offset);
 921			btrfs_mark_buffer_dirty(leaf);
 922			if (update_refs && disk_bytenr > 0)
 923				args->bytes_found += extent_end - args->start;
 924			if (args->end == extent_end)
 925				break;
 926
 927			path->slots[0]++;
 928			goto next_slot;
 929		}
 930
 931		/*
 932		 *  | ---- range to drop ----- |
 933		 *    | ------ extent ------ |
 934		 */
 935		if (args->start <= key.offset && args->end >= extent_end) {
 936delete_extent_item:
 937			if (del_nr == 0) {
 938				del_slot = path->slots[0];
 939				del_nr = 1;
 940			} else {
 941				BUG_ON(del_slot + del_nr != path->slots[0]);
 942				del_nr++;
 943			}
 944
 945			if (update_refs &&
 946			    extent_type == BTRFS_FILE_EXTENT_INLINE) {
 947				args->bytes_found += extent_end - key.offset;
 
 948				extent_end = ALIGN(extent_end,
 949						   fs_info->sectorsize);
 950			} else if (update_refs && disk_bytenr > 0) {
 951				btrfs_init_generic_ref(&ref,
 952						BTRFS_DROP_DELAYED_REF,
 953						disk_bytenr, num_bytes, 0);
 954				btrfs_init_data_ref(&ref,
 955						root->root_key.objectid,
 956						key.objectid,
 957						key.offset - extent_offset);
 958				ret = btrfs_free_extent(trans, &ref);
 959				BUG_ON(ret); /* -ENOMEM */
 960				args->bytes_found += extent_end - key.offset;
 
 961			}
 962
 963			if (args->end == extent_end)
 964				break;
 965
 966			if (path->slots[0] + 1 < btrfs_header_nritems(leaf)) {
 967				path->slots[0]++;
 968				goto next_slot;
 969			}
 970
 971			ret = btrfs_del_items(trans, root, path, del_slot,
 972					      del_nr);
 973			if (ret) {
 974				btrfs_abort_transaction(trans, ret);
 975				break;
 976			}
 977
 978			del_nr = 0;
 979			del_slot = 0;
 980
 981			btrfs_release_path(path);
 982			continue;
 983		}
 984
 985		BUG();
 986	}
 987
 988	if (!ret && del_nr > 0) {
 989		/*
 990		 * Set path->slots[0] to first slot, so that after the delete
 991		 * if items are move off from our leaf to its immediate left or
 992		 * right neighbor leafs, we end up with a correct and adjusted
 993		 * path->slots[0] for our insertion (if args->replace_extent).
 994		 */
 995		path->slots[0] = del_slot;
 996		ret = btrfs_del_items(trans, root, path, del_slot, del_nr);
 997		if (ret)
 998			btrfs_abort_transaction(trans, ret);
 999	}
1000
1001	leaf = path->nodes[0];
1002	/*
1003	 * If btrfs_del_items() was called, it might have deleted a leaf, in
1004	 * which case it unlocked our path, so check path->locks[0] matches a
1005	 * write lock.
1006	 */
1007	if (!ret && args->replace_extent && leafs_visited == 1 &&
1008	    path->locks[0] == BTRFS_WRITE_LOCK &&
 
1009	    btrfs_leaf_free_space(leaf) >=
1010	    sizeof(struct btrfs_item) + args->extent_item_size) {
1011
1012		key.objectid = ino;
1013		key.type = BTRFS_EXTENT_DATA_KEY;
1014		key.offset = args->start;
1015		if (!del_nr && path->slots[0] < btrfs_header_nritems(leaf)) {
1016			struct btrfs_key slot_key;
1017
1018			btrfs_item_key_to_cpu(leaf, &slot_key, path->slots[0]);
1019			if (btrfs_comp_cpu_keys(&key, &slot_key) > 0)
1020				path->slots[0]++;
1021		}
1022		setup_items_for_insert(root, path, &key,
1023				       &args->extent_item_size, 1);
1024		args->extent_inserted = true;
 
 
 
1025	}
1026
1027	if (!args->path)
1028		btrfs_free_path(path);
1029	else if (!args->extent_inserted)
1030		btrfs_release_path(path);
1031out:
1032	args->drop_end = found ? min(args->end, last_end) : args->end;
 
 
 
 
 
 
 
 
 
1033
 
 
 
 
 
 
1034	return ret;
1035}
1036
1037static int extent_mergeable(struct extent_buffer *leaf, int slot,
1038			    u64 objectid, u64 bytenr, u64 orig_offset,
1039			    u64 *start, u64 *end)
1040{
1041	struct btrfs_file_extent_item *fi;
1042	struct btrfs_key key;
1043	u64 extent_end;
1044
1045	if (slot < 0 || slot >= btrfs_header_nritems(leaf))
1046		return 0;
1047
1048	btrfs_item_key_to_cpu(leaf, &key, slot);
1049	if (key.objectid != objectid || key.type != BTRFS_EXTENT_DATA_KEY)
1050		return 0;
1051
1052	fi = btrfs_item_ptr(leaf, slot, struct btrfs_file_extent_item);
1053	if (btrfs_file_extent_type(leaf, fi) != BTRFS_FILE_EXTENT_REG ||
1054	    btrfs_file_extent_disk_bytenr(leaf, fi) != bytenr ||
1055	    btrfs_file_extent_offset(leaf, fi) != key.offset - orig_offset ||
1056	    btrfs_file_extent_compression(leaf, fi) ||
1057	    btrfs_file_extent_encryption(leaf, fi) ||
1058	    btrfs_file_extent_other_encoding(leaf, fi))
1059		return 0;
1060
1061	extent_end = key.offset + btrfs_file_extent_num_bytes(leaf, fi);
1062	if ((*start && *start != key.offset) || (*end && *end != extent_end))
1063		return 0;
1064
1065	*start = key.offset;
1066	*end = extent_end;
1067	return 1;
1068}
1069
1070/*
1071 * Mark extent in the range start - end as written.
1072 *
1073 * This changes extent type from 'pre-allocated' to 'regular'. If only
1074 * part of extent is marked as written, the extent will be split into
1075 * two or three.
1076 */
1077int btrfs_mark_extent_written(struct btrfs_trans_handle *trans,
1078			      struct btrfs_inode *inode, u64 start, u64 end)
1079{
1080	struct btrfs_fs_info *fs_info = trans->fs_info;
1081	struct btrfs_root *root = inode->root;
1082	struct extent_buffer *leaf;
1083	struct btrfs_path *path;
1084	struct btrfs_file_extent_item *fi;
1085	struct btrfs_ref ref = { 0 };
1086	struct btrfs_key key;
1087	struct btrfs_key new_key;
1088	u64 bytenr;
1089	u64 num_bytes;
1090	u64 extent_end;
1091	u64 orig_offset;
1092	u64 other_start;
1093	u64 other_end;
1094	u64 split;
1095	int del_nr = 0;
1096	int del_slot = 0;
1097	int recow;
1098	int ret = 0;
1099	u64 ino = btrfs_ino(inode);
1100
1101	path = btrfs_alloc_path();
1102	if (!path)
1103		return -ENOMEM;
1104again:
1105	recow = 0;
1106	split = start;
1107	key.objectid = ino;
1108	key.type = BTRFS_EXTENT_DATA_KEY;
1109	key.offset = split;
1110
1111	ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
1112	if (ret < 0)
1113		goto out;
1114	if (ret > 0 && path->slots[0] > 0)
1115		path->slots[0]--;
1116
1117	leaf = path->nodes[0];
1118	btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
1119	if (key.objectid != ino ||
1120	    key.type != BTRFS_EXTENT_DATA_KEY) {
1121		ret = -EINVAL;
1122		btrfs_abort_transaction(trans, ret);
1123		goto out;
1124	}
1125	fi = btrfs_item_ptr(leaf, path->slots[0],
1126			    struct btrfs_file_extent_item);
1127	if (btrfs_file_extent_type(leaf, fi) != BTRFS_FILE_EXTENT_PREALLOC) {
1128		ret = -EINVAL;
1129		btrfs_abort_transaction(trans, ret);
1130		goto out;
1131	}
1132	extent_end = key.offset + btrfs_file_extent_num_bytes(leaf, fi);
1133	if (key.offset > start || extent_end < end) {
1134		ret = -EINVAL;
1135		btrfs_abort_transaction(trans, ret);
1136		goto out;
1137	}
1138
1139	bytenr = btrfs_file_extent_disk_bytenr(leaf, fi);
1140	num_bytes = btrfs_file_extent_disk_num_bytes(leaf, fi);
1141	orig_offset = key.offset - btrfs_file_extent_offset(leaf, fi);
1142	memcpy(&new_key, &key, sizeof(new_key));
1143
1144	if (start == key.offset && end < extent_end) {
1145		other_start = 0;
1146		other_end = start;
1147		if (extent_mergeable(leaf, path->slots[0] - 1,
1148				     ino, bytenr, orig_offset,
1149				     &other_start, &other_end)) {
1150			new_key.offset = end;
1151			btrfs_set_item_key_safe(fs_info, path, &new_key);
1152			fi = btrfs_item_ptr(leaf, path->slots[0],
1153					    struct btrfs_file_extent_item);
1154			btrfs_set_file_extent_generation(leaf, fi,
1155							 trans->transid);
1156			btrfs_set_file_extent_num_bytes(leaf, fi,
1157							extent_end - end);
1158			btrfs_set_file_extent_offset(leaf, fi,
1159						     end - orig_offset);
1160			fi = btrfs_item_ptr(leaf, path->slots[0] - 1,
1161					    struct btrfs_file_extent_item);
1162			btrfs_set_file_extent_generation(leaf, fi,
1163							 trans->transid);
1164			btrfs_set_file_extent_num_bytes(leaf, fi,
1165							end - other_start);
1166			btrfs_mark_buffer_dirty(leaf);
1167			goto out;
1168		}
1169	}
1170
1171	if (start > key.offset && end == extent_end) {
1172		other_start = end;
1173		other_end = 0;
1174		if (extent_mergeable(leaf, path->slots[0] + 1,
1175				     ino, bytenr, orig_offset,
1176				     &other_start, &other_end)) {
1177			fi = btrfs_item_ptr(leaf, path->slots[0],
1178					    struct btrfs_file_extent_item);
1179			btrfs_set_file_extent_num_bytes(leaf, fi,
1180							start - key.offset);
1181			btrfs_set_file_extent_generation(leaf, fi,
1182							 trans->transid);
1183			path->slots[0]++;
1184			new_key.offset = start;
1185			btrfs_set_item_key_safe(fs_info, path, &new_key);
1186
1187			fi = btrfs_item_ptr(leaf, path->slots[0],
1188					    struct btrfs_file_extent_item);
1189			btrfs_set_file_extent_generation(leaf, fi,
1190							 trans->transid);
1191			btrfs_set_file_extent_num_bytes(leaf, fi,
1192							other_end - start);
1193			btrfs_set_file_extent_offset(leaf, fi,
1194						     start - orig_offset);
1195			btrfs_mark_buffer_dirty(leaf);
1196			goto out;
1197		}
1198	}
1199
1200	while (start > key.offset || end < extent_end) {
1201		if (key.offset == start)
1202			split = end;
1203
1204		new_key.offset = split;
1205		ret = btrfs_duplicate_item(trans, root, path, &new_key);
1206		if (ret == -EAGAIN) {
1207			btrfs_release_path(path);
1208			goto again;
1209		}
1210		if (ret < 0) {
1211			btrfs_abort_transaction(trans, ret);
1212			goto out;
1213		}
1214
1215		leaf = path->nodes[0];
1216		fi = btrfs_item_ptr(leaf, path->slots[0] - 1,
1217				    struct btrfs_file_extent_item);
1218		btrfs_set_file_extent_generation(leaf, fi, trans->transid);
1219		btrfs_set_file_extent_num_bytes(leaf, fi,
1220						split - key.offset);
1221
1222		fi = btrfs_item_ptr(leaf, path->slots[0],
1223				    struct btrfs_file_extent_item);
1224
1225		btrfs_set_file_extent_generation(leaf, fi, trans->transid);
1226		btrfs_set_file_extent_offset(leaf, fi, split - orig_offset);
1227		btrfs_set_file_extent_num_bytes(leaf, fi,
1228						extent_end - split);
1229		btrfs_mark_buffer_dirty(leaf);
1230
1231		btrfs_init_generic_ref(&ref, BTRFS_ADD_DELAYED_REF, bytenr,
1232				       num_bytes, 0);
1233		btrfs_init_data_ref(&ref, root->root_key.objectid, ino,
1234				    orig_offset);
1235		ret = btrfs_inc_extent_ref(trans, &ref);
1236		if (ret) {
1237			btrfs_abort_transaction(trans, ret);
1238			goto out;
1239		}
1240
1241		if (split == start) {
1242			key.offset = start;
1243		} else {
1244			if (start != key.offset) {
1245				ret = -EINVAL;
1246				btrfs_abort_transaction(trans, ret);
1247				goto out;
1248			}
1249			path->slots[0]--;
1250			extent_end = end;
1251		}
1252		recow = 1;
1253	}
1254
1255	other_start = end;
1256	other_end = 0;
1257	btrfs_init_generic_ref(&ref, BTRFS_DROP_DELAYED_REF, bytenr,
1258			       num_bytes, 0);
1259	btrfs_init_data_ref(&ref, root->root_key.objectid, ino, orig_offset);
1260	if (extent_mergeable(leaf, path->slots[0] + 1,
1261			     ino, bytenr, orig_offset,
1262			     &other_start, &other_end)) {
1263		if (recow) {
1264			btrfs_release_path(path);
1265			goto again;
1266		}
1267		extent_end = other_end;
1268		del_slot = path->slots[0] + 1;
1269		del_nr++;
1270		ret = btrfs_free_extent(trans, &ref);
1271		if (ret) {
1272			btrfs_abort_transaction(trans, ret);
1273			goto out;
1274		}
1275	}
1276	other_start = 0;
1277	other_end = start;
1278	if (extent_mergeable(leaf, path->slots[0] - 1,
1279			     ino, bytenr, orig_offset,
1280			     &other_start, &other_end)) {
1281		if (recow) {
1282			btrfs_release_path(path);
1283			goto again;
1284		}
1285		key.offset = other_start;
1286		del_slot = path->slots[0];
1287		del_nr++;
1288		ret = btrfs_free_extent(trans, &ref);
1289		if (ret) {
1290			btrfs_abort_transaction(trans, ret);
1291			goto out;
1292		}
1293	}
1294	if (del_nr == 0) {
1295		fi = btrfs_item_ptr(leaf, path->slots[0],
1296			   struct btrfs_file_extent_item);
1297		btrfs_set_file_extent_type(leaf, fi,
1298					   BTRFS_FILE_EXTENT_REG);
1299		btrfs_set_file_extent_generation(leaf, fi, trans->transid);
1300		btrfs_mark_buffer_dirty(leaf);
1301	} else {
1302		fi = btrfs_item_ptr(leaf, del_slot - 1,
1303			   struct btrfs_file_extent_item);
1304		btrfs_set_file_extent_type(leaf, fi,
1305					   BTRFS_FILE_EXTENT_REG);
1306		btrfs_set_file_extent_generation(leaf, fi, trans->transid);
1307		btrfs_set_file_extent_num_bytes(leaf, fi,
1308						extent_end - key.offset);
1309		btrfs_mark_buffer_dirty(leaf);
1310
1311		ret = btrfs_del_items(trans, root, path, del_slot, del_nr);
1312		if (ret < 0) {
1313			btrfs_abort_transaction(trans, ret);
1314			goto out;
1315		}
1316	}
1317out:
1318	btrfs_free_path(path);
1319	return ret;
1320}
1321
1322/*
1323 * on error we return an unlocked page and the error value
1324 * on success we return a locked page and 0
1325 */
1326static int prepare_uptodate_page(struct inode *inode,
1327				 struct page *page, u64 pos,
1328				 bool force_uptodate)
1329{
1330	int ret = 0;
1331
1332	if (((pos & (PAGE_SIZE - 1)) || force_uptodate) &&
1333	    !PageUptodate(page)) {
1334		ret = btrfs_readpage(NULL, page);
1335		if (ret)
1336			return ret;
1337		lock_page(page);
1338		if (!PageUptodate(page)) {
1339			unlock_page(page);
1340			return -EIO;
1341		}
1342		if (page->mapping != inode->i_mapping) {
1343			unlock_page(page);
1344			return -EAGAIN;
1345		}
1346	}
1347	return 0;
1348}
1349
1350/*
1351 * this just gets pages into the page cache and locks them down.
1352 */
1353static noinline int prepare_pages(struct inode *inode, struct page **pages,
1354				  size_t num_pages, loff_t pos,
1355				  size_t write_bytes, bool force_uptodate)
1356{
1357	int i;
1358	unsigned long index = pos >> PAGE_SHIFT;
1359	gfp_t mask = btrfs_alloc_write_mask(inode->i_mapping);
1360	int err = 0;
1361	int faili;
1362
1363	for (i = 0; i < num_pages; i++) {
1364again:
1365		pages[i] = find_or_create_page(inode->i_mapping, index + i,
1366					       mask | __GFP_WRITE);
1367		if (!pages[i]) {
1368			faili = i - 1;
1369			err = -ENOMEM;
1370			goto fail;
1371		}
1372
1373		err = set_page_extent_mapped(pages[i]);
1374		if (err < 0) {
1375			faili = i;
1376			goto fail;
1377		}
1378
1379		if (i == 0)
1380			err = prepare_uptodate_page(inode, pages[i], pos,
1381						    force_uptodate);
1382		if (!err && i == num_pages - 1)
1383			err = prepare_uptodate_page(inode, pages[i],
1384						    pos + write_bytes, false);
1385		if (err) {
1386			put_page(pages[i]);
1387			if (err == -EAGAIN) {
1388				err = 0;
1389				goto again;
1390			}
1391			faili = i - 1;
1392			goto fail;
1393		}
1394		wait_on_page_writeback(pages[i]);
1395	}
1396
1397	return 0;
1398fail:
1399	while (faili >= 0) {
1400		unlock_page(pages[faili]);
1401		put_page(pages[faili]);
1402		faili--;
1403	}
1404	return err;
1405
1406}
1407
1408/*
1409 * This function locks the extent and properly waits for data=ordered extents
1410 * to finish before allowing the pages to be modified if need.
1411 *
1412 * The return value:
1413 * 1 - the extent is locked
1414 * 0 - the extent is not locked, and everything is OK
1415 * -EAGAIN - need re-prepare the pages
1416 * the other < 0 number - Something wrong happens
1417 */
1418static noinline int
1419lock_and_cleanup_extent_if_need(struct btrfs_inode *inode, struct page **pages,
1420				size_t num_pages, loff_t pos,
1421				size_t write_bytes,
1422				u64 *lockstart, u64 *lockend,
1423				struct extent_state **cached_state)
1424{
1425	struct btrfs_fs_info *fs_info = inode->root->fs_info;
1426	u64 start_pos;
1427	u64 last_pos;
1428	int i;
1429	int ret = 0;
1430
1431	start_pos = round_down(pos, fs_info->sectorsize);
1432	last_pos = round_up(pos + write_bytes, fs_info->sectorsize) - 1;
 
 
1433
1434	if (start_pos < inode->vfs_inode.i_size) {
1435		struct btrfs_ordered_extent *ordered;
1436
1437		lock_extent_bits(&inode->io_tree, start_pos, last_pos,
1438				cached_state);
1439		ordered = btrfs_lookup_ordered_range(inode, start_pos,
1440						     last_pos - start_pos + 1);
1441		if (ordered &&
1442		    ordered->file_offset + ordered->num_bytes > start_pos &&
1443		    ordered->file_offset <= last_pos) {
1444			unlock_extent_cached(&inode->io_tree, start_pos,
1445					last_pos, cached_state);
1446			for (i = 0; i < num_pages; i++) {
1447				unlock_page(pages[i]);
1448				put_page(pages[i]);
1449			}
1450			btrfs_start_ordered_extent(ordered, 1);
 
1451			btrfs_put_ordered_extent(ordered);
1452			return -EAGAIN;
1453		}
1454		if (ordered)
1455			btrfs_put_ordered_extent(ordered);
1456
1457		*lockstart = start_pos;
1458		*lockend = last_pos;
1459		ret = 1;
1460	}
1461
1462	/*
1463	 * We should be called after prepare_pages() which should have locked
1464	 * all pages in the range.
 
 
 
 
 
 
 
 
 
 
1465	 */
1466	for (i = 0; i < num_pages; i++)
 
1467		WARN_ON(!PageLocked(pages[i]));
 
1468
1469	return ret;
1470}
1471
1472static int check_can_nocow(struct btrfs_inode *inode, loff_t pos,
1473			   size_t *write_bytes, bool nowait)
1474{
1475	struct btrfs_fs_info *fs_info = inode->root->fs_info;
1476	struct btrfs_root *root = inode->root;
1477	u64 lockstart, lockend;
1478	u64 num_bytes;
1479	int ret;
1480
1481	if (!(inode->flags & (BTRFS_INODE_NODATACOW | BTRFS_INODE_PREALLOC)))
1482		return 0;
1483
1484	if (!nowait && !btrfs_drew_try_write_lock(&root->snapshot_lock))
1485		return -EAGAIN;
1486
1487	lockstart = round_down(pos, fs_info->sectorsize);
1488	lockend = round_up(pos + *write_bytes,
1489			   fs_info->sectorsize) - 1;
1490	num_bytes = lockend - lockstart + 1;
1491
1492	if (nowait) {
1493		struct btrfs_ordered_extent *ordered;
1494
1495		if (!try_lock_extent(&inode->io_tree, lockstart, lockend))
1496			return -EAGAIN;
1497
1498		ordered = btrfs_lookup_ordered_range(inode, lockstart,
1499						     num_bytes);
1500		if (ordered) {
1501			btrfs_put_ordered_extent(ordered);
1502			ret = -EAGAIN;
1503			goto out_unlock;
1504		}
1505	} else {
1506		btrfs_lock_and_flush_ordered_range(inode, lockstart,
1507						   lockend, NULL);
1508	}
1509
 
1510	ret = can_nocow_extent(&inode->vfs_inode, lockstart, &num_bytes,
1511			NULL, NULL, NULL, false);
1512	if (ret <= 0) {
1513		ret = 0;
1514		if (!nowait)
1515			btrfs_drew_write_unlock(&root->snapshot_lock);
1516	} else {
1517		*write_bytes = min_t(size_t, *write_bytes ,
1518				     num_bytes - pos + lockstart);
1519	}
1520out_unlock:
1521	unlock_extent(&inode->io_tree, lockstart, lockend);
1522
1523	return ret;
1524}
1525
1526static int check_nocow_nolock(struct btrfs_inode *inode, loff_t pos,
1527			      size_t *write_bytes)
1528{
1529	return check_can_nocow(inode, pos, write_bytes, true);
1530}
1531
1532/*
1533 * Check if we can do nocow write into the range [@pos, @pos + @write_bytes)
1534 *
1535 * @pos:	 File offset
1536 * @write_bytes: The length to write, will be updated to the nocow writeable
1537 *		 range
1538 *
1539 * This function will flush ordered extents in the range to ensure proper
1540 * nocow checks.
1541 *
1542 * Return:
1543 * >0		and update @write_bytes if we can do nocow write
1544 *  0		if we can't do nocow write
1545 * -EAGAIN	if we can't get the needed lock or there are ordered extents
1546 * 		for * (nowait == true) case
1547 * <0		if other error happened
1548 *
1549 * NOTE: Callers need to release the lock by btrfs_check_nocow_unlock().
1550 */
1551int btrfs_check_nocow_lock(struct btrfs_inode *inode, loff_t pos,
1552			   size_t *write_bytes)
1553{
1554	return check_can_nocow(inode, pos, write_bytes, false);
1555}
1556
1557void btrfs_check_nocow_unlock(struct btrfs_inode *inode)
1558{
1559	btrfs_drew_write_unlock(&inode->root->snapshot_lock);
1560}
1561
1562static void update_time_for_write(struct inode *inode)
1563{
1564	struct timespec64 now;
1565
1566	if (IS_NOCMTIME(inode))
1567		return;
1568
1569	now = current_time(inode);
1570	if (!timespec64_equal(&inode->i_mtime, &now))
1571		inode->i_mtime = now;
1572
1573	if (!timespec64_equal(&inode->i_ctime, &now))
1574		inode->i_ctime = now;
1575
1576	if (IS_I_VERSION(inode))
1577		inode_inc_iversion(inode);
1578}
1579
1580static int btrfs_write_check(struct kiocb *iocb, struct iov_iter *from,
1581			     size_t count)
1582{
1583	struct file *file = iocb->ki_filp;
1584	struct inode *inode = file_inode(file);
1585	struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
1586	loff_t pos = iocb->ki_pos;
1587	int ret;
1588	loff_t oldsize;
1589	loff_t start_pos;
1590
1591	if (iocb->ki_flags & IOCB_NOWAIT) {
1592		size_t nocow_bytes = count;
1593
1594		/* We will allocate space in case nodatacow is not set, so bail */
1595		if (check_nocow_nolock(BTRFS_I(inode), pos, &nocow_bytes) <= 0)
1596			return -EAGAIN;
1597		/*
1598		 * There are holes in the range or parts of the range that must
1599		 * be COWed (shared extents, RO block groups, etc), so just bail
1600		 * out.
1601		 */
1602		if (nocow_bytes < count)
1603			return -EAGAIN;
1604	}
1605
1606	current->backing_dev_info = inode_to_bdi(inode);
1607	ret = file_remove_privs(file);
1608	if (ret)
1609		return ret;
1610
1611	/*
1612	 * We reserve space for updating the inode when we reserve space for the
1613	 * extent we are going to write, so we will enospc out there.  We don't
1614	 * need to start yet another transaction to update the inode as we will
1615	 * update the inode when we finish writing whatever data we write.
1616	 */
1617	update_time_for_write(inode);
1618
1619	start_pos = round_down(pos, fs_info->sectorsize);
1620	oldsize = i_size_read(inode);
1621	if (start_pos > oldsize) {
1622		/* Expand hole size to cover write data, preventing empty gap */
1623		loff_t end_pos = round_up(pos + count, fs_info->sectorsize);
1624
1625		ret = btrfs_cont_expand(BTRFS_I(inode), oldsize, end_pos);
1626		if (ret) {
1627			current->backing_dev_info = NULL;
1628			return ret;
1629		}
1630	}
1631
1632	return 0;
1633}
1634
1635static noinline ssize_t btrfs_buffered_write(struct kiocb *iocb,
1636					       struct iov_iter *i)
1637{
1638	struct file *file = iocb->ki_filp;
1639	loff_t pos;
1640	struct inode *inode = file_inode(file);
1641	struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
 
1642	struct page **pages = NULL;
1643	struct extent_changeset *data_reserved = NULL;
1644	u64 release_bytes = 0;
1645	u64 lockstart;
1646	u64 lockend;
1647	size_t num_written = 0;
1648	int nrptrs;
1649	ssize_t ret;
1650	bool only_release_metadata = false;
1651	bool force_page_uptodate = false;
1652	loff_t old_isize = i_size_read(inode);
1653	unsigned int ilock_flags = 0;
1654
1655	if (iocb->ki_flags & IOCB_NOWAIT)
1656		ilock_flags |= BTRFS_ILOCK_TRY;
1657
1658	ret = btrfs_inode_lock(inode, ilock_flags);
1659	if (ret < 0)
1660		return ret;
1661
1662	ret = generic_write_checks(iocb, i);
1663	if (ret <= 0)
1664		goto out;
1665
1666	ret = btrfs_write_check(iocb, i, ret);
1667	if (ret < 0)
1668		goto out;
1669
1670	pos = iocb->ki_pos;
1671	nrptrs = min(DIV_ROUND_UP(iov_iter_count(i), PAGE_SIZE),
1672			PAGE_SIZE / (sizeof(struct page *)));
1673	nrptrs = min(nrptrs, current->nr_dirtied_pause - current->nr_dirtied);
1674	nrptrs = max(nrptrs, 8);
1675	pages = kmalloc_array(nrptrs, sizeof(struct page *), GFP_KERNEL);
1676	if (!pages) {
1677		ret = -ENOMEM;
1678		goto out;
1679	}
1680
1681	while (iov_iter_count(i) > 0) {
1682		struct extent_state *cached_state = NULL;
1683		size_t offset = offset_in_page(pos);
1684		size_t sector_offset;
1685		size_t write_bytes = min(iov_iter_count(i),
1686					 nrptrs * (size_t)PAGE_SIZE -
1687					 offset);
1688		size_t num_pages;
 
1689		size_t reserve_bytes;
1690		size_t dirty_pages;
1691		size_t copied;
1692		size_t dirty_sectors;
1693		size_t num_sectors;
1694		int extents_locked;
1695
 
 
1696		/*
1697		 * Fault pages before locking them in prepare_pages
1698		 * to avoid recursive lock
1699		 */
1700		if (unlikely(iov_iter_fault_in_readable(i, write_bytes))) {
1701			ret = -EFAULT;
1702			break;
1703		}
1704
1705		only_release_metadata = false;
1706		sector_offset = pos & (fs_info->sectorsize - 1);
 
 
1707
1708		extent_changeset_release(data_reserved);
1709		ret = btrfs_check_data_free_space(BTRFS_I(inode),
1710						  &data_reserved, pos,
1711						  write_bytes);
1712		if (ret < 0) {
1713			/*
1714			 * If we don't have to COW at the offset, reserve
1715			 * metadata only. write_bytes may get smaller than
1716			 * requested here.
1717			 */
1718			if (btrfs_check_nocow_lock(BTRFS_I(inode), pos,
1719						   &write_bytes) > 0)
 
1720				only_release_metadata = true;
1721			else
 
 
 
 
 
 
 
 
 
1722				break;
 
1723		}
1724
1725		num_pages = DIV_ROUND_UP(write_bytes + offset, PAGE_SIZE);
1726		WARN_ON(num_pages > nrptrs);
1727		reserve_bytes = round_up(write_bytes + sector_offset,
1728					 fs_info->sectorsize);
1729		WARN_ON(reserve_bytes == 0);
1730		ret = btrfs_delalloc_reserve_metadata(BTRFS_I(inode),
1731				reserve_bytes);
1732		if (ret) {
1733			if (!only_release_metadata)
1734				btrfs_free_reserved_data_space(BTRFS_I(inode),
1735						data_reserved, pos,
1736						write_bytes);
1737			else
1738				btrfs_check_nocow_unlock(BTRFS_I(inode));
1739			break;
1740		}
1741
1742		release_bytes = reserve_bytes;
1743again:
1744		/*
1745		 * This is going to setup the pages array with the number of
1746		 * pages we want, so we don't really need to worry about the
1747		 * contents of pages from loop to loop
1748		 */
1749		ret = prepare_pages(inode, pages, num_pages,
1750				    pos, write_bytes,
1751				    force_page_uptodate);
1752		if (ret) {
1753			btrfs_delalloc_release_extents(BTRFS_I(inode),
1754						       reserve_bytes);
1755			break;
1756		}
1757
1758		extents_locked = lock_and_cleanup_extent_if_need(
1759				BTRFS_I(inode), pages,
1760				num_pages, pos, write_bytes, &lockstart,
1761				&lockend, &cached_state);
1762		if (extents_locked < 0) {
1763			if (extents_locked == -EAGAIN)
1764				goto again;
1765			btrfs_delalloc_release_extents(BTRFS_I(inode),
1766						       reserve_bytes);
1767			ret = extents_locked;
1768			break;
1769		}
1770
1771		copied = btrfs_copy_from_user(pos, write_bytes, pages, i);
1772
1773		num_sectors = BTRFS_BYTES_TO_BLKS(fs_info, reserve_bytes);
1774		dirty_sectors = round_up(copied + sector_offset,
1775					fs_info->sectorsize);
1776		dirty_sectors = BTRFS_BYTES_TO_BLKS(fs_info, dirty_sectors);
1777
1778		/*
1779		 * if we have trouble faulting in the pages, fall
1780		 * back to one page at a time
1781		 */
1782		if (copied < write_bytes)
1783			nrptrs = 1;
1784
1785		if (copied == 0) {
1786			force_page_uptodate = true;
1787			dirty_sectors = 0;
1788			dirty_pages = 0;
1789		} else {
1790			force_page_uptodate = false;
1791			dirty_pages = DIV_ROUND_UP(copied + offset,
1792						   PAGE_SIZE);
1793		}
1794
1795		if (num_sectors > dirty_sectors) {
1796			/* release everything except the sectors we dirtied */
1797			release_bytes -= dirty_sectors << fs_info->sectorsize_bits;
 
1798			if (only_release_metadata) {
1799				btrfs_delalloc_release_metadata(BTRFS_I(inode),
1800							release_bytes, true);
1801			} else {
1802				u64 __pos;
1803
1804				__pos = round_down(pos,
1805						   fs_info->sectorsize) +
1806					(dirty_pages << PAGE_SHIFT);
1807				btrfs_delalloc_release_space(BTRFS_I(inode),
1808						data_reserved, __pos,
1809						release_bytes, true);
1810			}
1811		}
1812
1813		release_bytes = round_up(copied + sector_offset,
1814					fs_info->sectorsize);
1815
1816		ret = btrfs_dirty_pages(BTRFS_I(inode), pages,
1817					dirty_pages, pos, copied,
1818					&cached_state, only_release_metadata);
1819
1820		/*
1821		 * If we have not locked the extent range, because the range's
1822		 * start offset is >= i_size, we might still have a non-NULL
1823		 * cached extent state, acquired while marking the extent range
1824		 * as delalloc through btrfs_dirty_pages(). Therefore free any
1825		 * possible cached extent state to avoid a memory leak.
1826		 */
1827		if (extents_locked)
1828			unlock_extent_cached(&BTRFS_I(inode)->io_tree,
1829					     lockstart, lockend, &cached_state);
1830		else
1831			free_extent_state(cached_state);
1832
1833		btrfs_delalloc_release_extents(BTRFS_I(inode), reserve_bytes);
1834		if (ret) {
1835			btrfs_drop_pages(pages, num_pages);
1836			break;
1837		}
1838
1839		release_bytes = 0;
1840		if (only_release_metadata)
1841			btrfs_check_nocow_unlock(BTRFS_I(inode));
 
 
 
 
 
 
 
 
 
 
 
 
1842
1843		btrfs_drop_pages(pages, num_pages);
1844
1845		cond_resched();
1846
1847		balance_dirty_pages_ratelimited(inode->i_mapping);
 
 
1848
1849		pos += copied;
1850		num_written += copied;
1851	}
1852
1853	kfree(pages);
1854
1855	if (release_bytes) {
1856		if (only_release_metadata) {
1857			btrfs_check_nocow_unlock(BTRFS_I(inode));
1858			btrfs_delalloc_release_metadata(BTRFS_I(inode),
1859					release_bytes, true);
1860		} else {
1861			btrfs_delalloc_release_space(BTRFS_I(inode),
1862					data_reserved,
1863					round_down(pos, fs_info->sectorsize),
1864					release_bytes, true);
1865		}
1866	}
1867
1868	extent_changeset_free(data_reserved);
1869	if (num_written > 0) {
1870		pagecache_isize_extended(inode, old_isize, iocb->ki_pos);
1871		iocb->ki_pos += num_written;
1872	}
1873out:
1874	btrfs_inode_unlock(inode, ilock_flags);
1875	return num_written ? num_written : ret;
1876}
1877
1878static ssize_t check_direct_IO(struct btrfs_fs_info *fs_info,
1879			       const struct iov_iter *iter, loff_t offset)
1880{
1881	const u32 blocksize_mask = fs_info->sectorsize - 1;
1882
1883	if (offset & blocksize_mask)
1884		return -EINVAL;
1885
1886	if (iov_iter_alignment(iter) & blocksize_mask)
1887		return -EINVAL;
1888
1889	return 0;
1890}
1891
1892static ssize_t btrfs_direct_write(struct kiocb *iocb, struct iov_iter *from)
1893{
1894	struct file *file = iocb->ki_filp;
1895	struct inode *inode = file_inode(file);
1896	struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
1897	loff_t pos;
1898	ssize_t written = 0;
1899	ssize_t written_buffered;
1900	loff_t endbyte;
1901	ssize_t err;
1902	unsigned int ilock_flags = 0;
1903	struct iomap_dio *dio = NULL;
1904
1905	if (iocb->ki_flags & IOCB_NOWAIT)
1906		ilock_flags |= BTRFS_ILOCK_TRY;
1907
1908	/* If the write DIO is within EOF, use a shared lock */
1909	if (iocb->ki_pos + iov_iter_count(from) <= i_size_read(inode))
1910		ilock_flags |= BTRFS_ILOCK_SHARED;
1911
1912relock:
1913	err = btrfs_inode_lock(inode, ilock_flags);
1914	if (err < 0)
1915		return err;
1916
1917	err = generic_write_checks(iocb, from);
1918	if (err <= 0) {
1919		btrfs_inode_unlock(inode, ilock_flags);
1920		return err;
1921	}
1922
1923	err = btrfs_write_check(iocb, from, err);
1924	if (err < 0) {
1925		btrfs_inode_unlock(inode, ilock_flags);
1926		goto out;
1927	}
1928
1929	pos = iocb->ki_pos;
1930	/*
1931	 * Re-check since file size may have changed just before taking the
1932	 * lock or pos may have changed because of O_APPEND in generic_write_check()
1933	 */
1934	if ((ilock_flags & BTRFS_ILOCK_SHARED) &&
1935	    pos + iov_iter_count(from) > i_size_read(inode)) {
1936		btrfs_inode_unlock(inode, ilock_flags);
1937		ilock_flags &= ~BTRFS_ILOCK_SHARED;
1938		goto relock;
1939	}
1940
1941	if (check_direct_IO(fs_info, from, pos)) {
1942		btrfs_inode_unlock(inode, ilock_flags);
1943		goto buffered;
1944	}
1945
1946	dio = __iomap_dio_rw(iocb, from, &btrfs_dio_iomap_ops, &btrfs_dio_ops,
1947			     0);
1948
1949	btrfs_inode_unlock(inode, ilock_flags);
 
1950
1951	if (IS_ERR_OR_NULL(dio)) {
1952		err = PTR_ERR_OR_ZERO(dio);
1953		if (err < 0 && err != -ENOTBLK)
1954			goto out;
1955	} else {
1956		written = iomap_dio_complete(dio);
1957	}
1958
1959	if (written < 0 || !iov_iter_count(from)) {
1960		err = written;
1961		goto out;
1962	}
1963
1964buffered:
1965	pos = iocb->ki_pos;
1966	written_buffered = btrfs_buffered_write(iocb, from);
1967	if (written_buffered < 0) {
1968		err = written_buffered;
1969		goto out;
1970	}
1971	/*
1972	 * Ensure all data is persisted. We want the next direct IO read to be
1973	 * able to read what was just written.
1974	 */
1975	endbyte = pos + written_buffered - 1;
1976	err = btrfs_fdatawrite_range(inode, pos, endbyte);
1977	if (err)
1978		goto out;
1979	err = filemap_fdatawait_range(inode->i_mapping, pos, endbyte);
1980	if (err)
1981		goto out;
1982	written += written_buffered;
1983	iocb->ki_pos = pos + written_buffered;
1984	invalidate_mapping_pages(file->f_mapping, pos >> PAGE_SHIFT,
1985				 endbyte >> PAGE_SHIFT);
1986out:
1987	return written ? written : err;
1988}
1989
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1990static ssize_t btrfs_file_write_iter(struct kiocb *iocb,
1991				    struct iov_iter *from)
1992{
1993	struct file *file = iocb->ki_filp;
1994	struct btrfs_inode *inode = BTRFS_I(file_inode(file));
 
 
 
 
1995	ssize_t num_written = 0;
1996	const bool sync = iocb->ki_flags & IOCB_DSYNC;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1997
1998	/*
1999	 * If the fs flips readonly due to some impossible error, although we
2000	 * have opened a file as writable, we have to stop this write operation
2001	 * to ensure consistency.
 
2002	 */
2003	if (test_bit(BTRFS_FS_STATE_ERROR, &inode->root->fs_info->fs_state))
2004		return -EROFS;
2005
2006	if (!(iocb->ki_flags & IOCB_DIRECT) &&
2007	    (iocb->ki_flags & IOCB_NOWAIT))
2008		return -EOPNOTSUPP;
 
 
 
 
 
 
 
 
 
 
 
2009
2010	if (sync)
2011		atomic_inc(&inode->sync_writers);
2012
2013	if (iocb->ki_flags & IOCB_DIRECT)
2014		num_written = btrfs_direct_write(iocb, from);
2015	else
2016		num_written = btrfs_buffered_write(iocb, from);
 
 
 
 
 
 
2017
2018	btrfs_set_inode_last_sub_trans(inode);
2019
 
 
 
 
 
 
 
 
2020	if (num_written > 0)
2021		num_written = generic_write_sync(iocb, num_written);
2022
2023	if (sync)
2024		atomic_dec(&inode->sync_writers);
2025
2026	current->backing_dev_info = NULL;
2027	return num_written;
2028}
2029
2030int btrfs_release_file(struct inode *inode, struct file *filp)
2031{
2032	struct btrfs_file_private *private = filp->private_data;
2033
2034	if (private && private->filldir_buf)
2035		kfree(private->filldir_buf);
2036	kfree(private);
2037	filp->private_data = NULL;
2038
2039	/*
2040	 * Set by setattr when we are about to truncate a file from a non-zero
2041	 * size to a zero size.  This tries to flush down new bytes that may
2042	 * have been written if the application were using truncate to replace
2043	 * a file in place.
2044	 */
2045	if (test_and_clear_bit(BTRFS_INODE_FLUSH_ON_CLOSE,
2046			       &BTRFS_I(inode)->runtime_flags))
2047			filemap_flush(inode->i_mapping);
2048	return 0;
2049}
2050
2051static int start_ordered_ops(struct inode *inode, loff_t start, loff_t end)
2052{
2053	int ret;
2054	struct blk_plug plug;
2055
2056	/*
2057	 * This is only called in fsync, which would do synchronous writes, so
2058	 * a plug can merge adjacent IOs as much as possible.  Esp. in case of
2059	 * multiple disks using raid profile, a large IO can be split to
2060	 * several segments of stripe length (currently 64K).
2061	 */
2062	blk_start_plug(&plug);
2063	atomic_inc(&BTRFS_I(inode)->sync_writers);
2064	ret = btrfs_fdatawrite_range(inode, start, end);
2065	atomic_dec(&BTRFS_I(inode)->sync_writers);
2066	blk_finish_plug(&plug);
2067
2068	return ret;
2069}
2070
2071static inline bool skip_inode_logging(const struct btrfs_log_ctx *ctx)
2072{
2073	struct btrfs_inode *inode = BTRFS_I(ctx->inode);
2074	struct btrfs_fs_info *fs_info = inode->root->fs_info;
2075
2076	if (btrfs_inode_in_log(inode, fs_info->generation) &&
2077	    list_empty(&ctx->ordered_extents))
2078		return true;
2079
2080	/*
2081	 * If we are doing a fast fsync we can not bail out if the inode's
2082	 * last_trans is <= then the last committed transaction, because we only
2083	 * update the last_trans of the inode during ordered extent completion,
2084	 * and for a fast fsync we don't wait for that, we only wait for the
2085	 * writeback to complete.
2086	 */
2087	if (inode->last_trans <= fs_info->last_trans_committed &&
2088	    (test_bit(BTRFS_INODE_NEEDS_FULL_SYNC, &inode->runtime_flags) ||
2089	     list_empty(&ctx->ordered_extents)))
2090		return true;
2091
2092	return false;
2093}
2094
2095/*
2096 * fsync call for both files and directories.  This logs the inode into
2097 * the tree log instead of forcing full commits whenever possible.
2098 *
2099 * It needs to call filemap_fdatawait so that all ordered extent updates are
2100 * in the metadata btree are up to date for copying to the log.
2101 *
2102 * It drops the inode mutex before doing the tree log commit.  This is an
2103 * important optimization for directories because holding the mutex prevents
2104 * new operations on the dir while we write to disk.
2105 */
2106int btrfs_sync_file(struct file *file, loff_t start, loff_t end, int datasync)
2107{
2108	struct dentry *dentry = file_dentry(file);
2109	struct inode *inode = d_inode(dentry);
2110	struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
2111	struct btrfs_root *root = BTRFS_I(inode)->root;
2112	struct btrfs_trans_handle *trans;
2113	struct btrfs_log_ctx ctx;
2114	int ret = 0, err;
2115	u64 len;
2116	bool full_sync;
2117
2118	trace_btrfs_sync_file(file, datasync);
2119
2120	btrfs_init_log_ctx(&ctx, inode);
2121
2122	/*
2123	 * Always set the range to a full range, otherwise we can get into
2124	 * several problems, from missing file extent items to represent holes
2125	 * when not using the NO_HOLES feature, to log tree corruption due to
2126	 * races between hole detection during logging and completion of ordered
2127	 * extents outside the range, to missing checksums due to ordered extents
2128	 * for which we flushed only a subset of their pages.
2129	 */
2130	start = 0;
2131	end = LLONG_MAX;
2132	len = (u64)LLONG_MAX + 1;
2133
2134	/*
2135	 * We write the dirty pages in the range and wait until they complete
2136	 * out of the ->i_mutex. If so, we can flush the dirty pages by
2137	 * multi-task, and make the performance up.  See
2138	 * btrfs_wait_ordered_range for an explanation of the ASYNC check.
2139	 */
2140	ret = start_ordered_ops(inode, start, end);
2141	if (ret)
2142		goto out;
2143
2144	btrfs_inode_lock(inode, BTRFS_ILOCK_MMAP);
 
 
 
 
 
 
 
2145
2146	atomic_inc(&root->log_batch);
2147
2148	/*
2149	 * Always check for the full sync flag while holding the inode's lock,
2150	 * to avoid races with other tasks. The flag must be either set all the
2151	 * time during logging or always off all the time while logging.
2152	 */
2153	full_sync = test_bit(BTRFS_INODE_NEEDS_FULL_SYNC,
2154			     &BTRFS_I(inode)->runtime_flags);
 
 
 
 
 
2155
2156	/*
2157	 * Before we acquired the inode's lock and the mmap lock, someone may
2158	 * have dirtied more pages in the target range. We need to make sure
2159	 * that writeback for any such pages does not start while we are logging
2160	 * the inode, because if it does, any of the following might happen when
2161	 * we are not doing a full inode sync:
2162	 *
2163	 * 1) We log an extent after its writeback finishes but before its
2164	 *    checksums are added to the csum tree, leading to -EIO errors
2165	 *    when attempting to read the extent after a log replay.
2166	 *
2167	 * 2) We can end up logging an extent before its writeback finishes.
2168	 *    Therefore after the log replay we will have a file extent item
2169	 *    pointing to an unwritten extent (and no data checksums as well).
2170	 *
2171	 * So trigger writeback for any eventual new dirty pages and then we
2172	 * wait for all ordered extents to complete below.
2173	 */
2174	ret = start_ordered_ops(inode, start, end);
2175	if (ret) {
2176		btrfs_inode_unlock(inode, BTRFS_ILOCK_MMAP);
2177		goto out;
2178	}
2179
2180	/*
2181	 * We have to do this here to avoid the priority inversion of waiting on
2182	 * IO of a lower priority task while holding a transaction open.
2183	 *
2184	 * For a full fsync we wait for the ordered extents to complete while
2185	 * for a fast fsync we wait just for writeback to complete, and then
2186	 * attach the ordered extents to the transaction so that a transaction
2187	 * commit waits for their completion, to avoid data loss if we fsync,
2188	 * the current transaction commits before the ordered extents complete
2189	 * and a power failure happens right after that.
2190	 *
2191	 * For zoned filesystem, if a write IO uses a ZONE_APPEND command, the
2192	 * logical address recorded in the ordered extent may change. We need
2193	 * to wait for the IO to stabilize the logical address.
2194	 */
2195	if (full_sync || btrfs_is_zoned(fs_info)) {
2196		ret = btrfs_wait_ordered_range(inode, start, len);
2197	} else {
2198		/*
2199		 * Get our ordered extents as soon as possible to avoid doing
2200		 * checksum lookups in the csum tree, and use instead the
2201		 * checksums attached to the ordered extents.
2202		 */
2203		btrfs_get_ordered_extents_for_logging(BTRFS_I(inode),
2204						      &ctx.ordered_extents);
2205		ret = filemap_fdatawait_range(inode->i_mapping, start, end);
2206	}
2207
2208	if (ret)
2209		goto out_release_extents;
2210
2211	atomic_inc(&root->log_batch);
2212
2213	smp_mb();
2214	if (skip_inode_logging(&ctx)) {
 
2215		/*
2216		 * We've had everything committed since the last time we were
2217		 * modified so clear this flag in case it was set for whatever
2218		 * reason, it's no longer relevant.
2219		 */
2220		clear_bit(BTRFS_INODE_NEEDS_FULL_SYNC,
2221			  &BTRFS_I(inode)->runtime_flags);
2222		/*
2223		 * An ordered extent might have started before and completed
2224		 * already with io errors, in which case the inode was not
2225		 * updated and we end up here. So check the inode's mapping
2226		 * for any errors that might have happened since we last
2227		 * checked called fsync.
2228		 */
2229		ret = filemap_check_wb_err(inode->i_mapping, file->f_wb_err);
2230		goto out_release_extents;
 
 
2231	}
2232
2233	/*
2234	 * We use start here because we will need to wait on the IO to complete
2235	 * in btrfs_sync_log, which could require joining a transaction (for
2236	 * example checking cross references in the nocow path).  If we use join
2237	 * here we could get into a situation where we're waiting on IO to
2238	 * happen that is blocked on a transaction trying to commit.  With start
2239	 * we inc the extwriter counter, so we wait for all extwriters to exit
2240	 * before we start blocking joiners.  This comment is to keep somebody
2241	 * from thinking they are super smart and changing this to
2242	 * btrfs_join_transaction *cough*Josef*cough*.
2243	 */
2244	trans = btrfs_start_transaction(root, 0);
2245	if (IS_ERR(trans)) {
2246		ret = PTR_ERR(trans);
2247		goto out_release_extents;
 
 
2248	}
2249	trans->in_fsync = true;
2250
2251	ret = btrfs_log_dentry_safe(trans, dentry, &ctx);
2252	btrfs_release_log_ctx_extents(&ctx);
2253	if (ret < 0) {
2254		/* Fallthrough and commit/free transaction. */
2255		ret = 1;
2256	}
2257
2258	/* we've logged all the items and now have a consistent
2259	 * version of the file in the log.  It is possible that
2260	 * someone will come in and modify the file, but that's
2261	 * fine because the log is consistent on disk, and we
2262	 * have references to all of the file's extents
2263	 *
2264	 * It is possible that someone will come in and log the
2265	 * file again, but that will end up using the synchronization
2266	 * inside btrfs_sync_log to keep things safe.
2267	 */
2268	btrfs_inode_unlock(inode, BTRFS_ILOCK_MMAP);
 
2269
2270	if (ret != BTRFS_NO_LOG_SYNC) {
2271		if (!ret) {
2272			ret = btrfs_sync_log(trans, root, &ctx);
2273			if (!ret) {
2274				ret = btrfs_end_transaction(trans);
2275				goto out;
2276			}
2277		}
2278		if (!full_sync) {
2279			ret = btrfs_wait_ordered_range(inode, start, len);
2280			if (ret) {
2281				btrfs_end_transaction(trans);
2282				goto out;
2283			}
2284		}
2285		ret = btrfs_commit_transaction(trans);
2286	} else {
2287		ret = btrfs_end_transaction(trans);
2288	}
2289out:
2290	ASSERT(list_empty(&ctx.list));
2291	err = file_check_and_advance_wb_err(file);
2292	if (!ret)
2293		ret = err;
2294	return ret > 0 ? -EIO : ret;
2295
2296out_release_extents:
2297	btrfs_release_log_ctx_extents(&ctx);
2298	btrfs_inode_unlock(inode, BTRFS_ILOCK_MMAP);
2299	goto out;
2300}
2301
2302static const struct vm_operations_struct btrfs_file_vm_ops = {
2303	.fault		= filemap_fault,
2304	.map_pages	= filemap_map_pages,
2305	.page_mkwrite	= btrfs_page_mkwrite,
2306};
2307
2308static int btrfs_file_mmap(struct file	*filp, struct vm_area_struct *vma)
2309{
2310	struct address_space *mapping = filp->f_mapping;
2311
2312	if (!mapping->a_ops->readpage)
2313		return -ENOEXEC;
2314
2315	file_accessed(filp);
2316	vma->vm_ops = &btrfs_file_vm_ops;
2317
2318	return 0;
2319}
2320
2321static int hole_mergeable(struct btrfs_inode *inode, struct extent_buffer *leaf,
2322			  int slot, u64 start, u64 end)
2323{
2324	struct btrfs_file_extent_item *fi;
2325	struct btrfs_key key;
2326
2327	if (slot < 0 || slot >= btrfs_header_nritems(leaf))
2328		return 0;
2329
2330	btrfs_item_key_to_cpu(leaf, &key, slot);
2331	if (key.objectid != btrfs_ino(inode) ||
2332	    key.type != BTRFS_EXTENT_DATA_KEY)
2333		return 0;
2334
2335	fi = btrfs_item_ptr(leaf, slot, struct btrfs_file_extent_item);
2336
2337	if (btrfs_file_extent_type(leaf, fi) != BTRFS_FILE_EXTENT_REG)
2338		return 0;
2339
2340	if (btrfs_file_extent_disk_bytenr(leaf, fi))
2341		return 0;
2342
2343	if (key.offset == end)
2344		return 1;
2345	if (key.offset + btrfs_file_extent_num_bytes(leaf, fi) == start)
2346		return 1;
2347	return 0;
2348}
2349
2350static int fill_holes(struct btrfs_trans_handle *trans,
2351		struct btrfs_inode *inode,
2352		struct btrfs_path *path, u64 offset, u64 end)
2353{
2354	struct btrfs_fs_info *fs_info = trans->fs_info;
2355	struct btrfs_root *root = inode->root;
2356	struct extent_buffer *leaf;
2357	struct btrfs_file_extent_item *fi;
2358	struct extent_map *hole_em;
2359	struct extent_map_tree *em_tree = &inode->extent_tree;
2360	struct btrfs_key key;
2361	int ret;
2362
2363	if (btrfs_fs_incompat(fs_info, NO_HOLES))
2364		goto out;
2365
2366	key.objectid = btrfs_ino(inode);
2367	key.type = BTRFS_EXTENT_DATA_KEY;
2368	key.offset = offset;
2369
2370	ret = btrfs_search_slot(trans, root, &key, path, 0, 1);
2371	if (ret <= 0) {
2372		/*
2373		 * We should have dropped this offset, so if we find it then
2374		 * something has gone horribly wrong.
2375		 */
2376		if (ret == 0)
2377			ret = -EINVAL;
2378		return ret;
2379	}
2380
2381	leaf = path->nodes[0];
2382	if (hole_mergeable(inode, leaf, path->slots[0] - 1, offset, end)) {
2383		u64 num_bytes;
2384
2385		path->slots[0]--;
2386		fi = btrfs_item_ptr(leaf, path->slots[0],
2387				    struct btrfs_file_extent_item);
2388		num_bytes = btrfs_file_extent_num_bytes(leaf, fi) +
2389			end - offset;
2390		btrfs_set_file_extent_num_bytes(leaf, fi, num_bytes);
2391		btrfs_set_file_extent_ram_bytes(leaf, fi, num_bytes);
2392		btrfs_set_file_extent_offset(leaf, fi, 0);
2393		btrfs_mark_buffer_dirty(leaf);
2394		goto out;
2395	}
2396
2397	if (hole_mergeable(inode, leaf, path->slots[0], offset, end)) {
2398		u64 num_bytes;
2399
2400		key.offset = offset;
2401		btrfs_set_item_key_safe(fs_info, path, &key);
2402		fi = btrfs_item_ptr(leaf, path->slots[0],
2403				    struct btrfs_file_extent_item);
2404		num_bytes = btrfs_file_extent_num_bytes(leaf, fi) + end -
2405			offset;
2406		btrfs_set_file_extent_num_bytes(leaf, fi, num_bytes);
2407		btrfs_set_file_extent_ram_bytes(leaf, fi, num_bytes);
2408		btrfs_set_file_extent_offset(leaf, fi, 0);
2409		btrfs_mark_buffer_dirty(leaf);
2410		goto out;
2411	}
2412	btrfs_release_path(path);
2413
2414	ret = btrfs_insert_file_extent(trans, root, btrfs_ino(inode),
2415			offset, 0, 0, end - offset, 0, end - offset, 0, 0, 0);
2416	if (ret)
2417		return ret;
2418
2419out:
2420	btrfs_release_path(path);
2421
2422	hole_em = alloc_extent_map();
2423	if (!hole_em) {
2424		btrfs_drop_extent_cache(inode, offset, end - 1, 0);
2425		set_bit(BTRFS_INODE_NEEDS_FULL_SYNC, &inode->runtime_flags);
2426	} else {
2427		hole_em->start = offset;
2428		hole_em->len = end - offset;
2429		hole_em->ram_bytes = hole_em->len;
2430		hole_em->orig_start = offset;
2431
2432		hole_em->block_start = EXTENT_MAP_HOLE;
2433		hole_em->block_len = 0;
2434		hole_em->orig_block_len = 0;
 
2435		hole_em->compress_type = BTRFS_COMPRESS_NONE;
2436		hole_em->generation = trans->transid;
2437
2438		do {
2439			btrfs_drop_extent_cache(inode, offset, end - 1, 0);
2440			write_lock(&em_tree->lock);
2441			ret = add_extent_mapping(em_tree, hole_em, 1);
2442			write_unlock(&em_tree->lock);
2443		} while (ret == -EEXIST);
2444		free_extent_map(hole_em);
2445		if (ret)
2446			set_bit(BTRFS_INODE_NEEDS_FULL_SYNC,
2447					&inode->runtime_flags);
2448	}
2449
2450	return 0;
2451}
2452
2453/*
2454 * Find a hole extent on given inode and change start/len to the end of hole
2455 * extent.(hole/vacuum extent whose em->start <= start &&
2456 *	   em->start + em->len > start)
2457 * When a hole extent is found, return 1 and modify start/len.
2458 */
2459static int find_first_non_hole(struct btrfs_inode *inode, u64 *start, u64 *len)
2460{
2461	struct btrfs_fs_info *fs_info = inode->root->fs_info;
2462	struct extent_map *em;
2463	int ret = 0;
2464
2465	em = btrfs_get_extent(inode, NULL, 0,
2466			      round_down(*start, fs_info->sectorsize),
2467			      round_up(*len, fs_info->sectorsize));
2468	if (IS_ERR(em))
2469		return PTR_ERR(em);
2470
2471	/* Hole or vacuum extent(only exists in no-hole mode) */
2472	if (em->block_start == EXTENT_MAP_HOLE) {
2473		ret = 1;
2474		*len = em->start + em->len > *start + *len ?
2475		       0 : *start + *len - em->start - em->len;
2476		*start = em->start + em->len;
2477	}
2478	free_extent_map(em);
2479	return ret;
2480}
2481
2482static int btrfs_punch_hole_lock_range(struct inode *inode,
2483				       const u64 lockstart,
2484				       const u64 lockend,
2485				       struct extent_state **cached_state)
2486{
2487	/*
2488	 * For subpage case, if the range is not at page boundary, we could
2489	 * have pages at the leading/tailing part of the range.
2490	 * This could lead to dead loop since filemap_range_has_page()
2491	 * will always return true.
2492	 * So here we need to do extra page alignment for
2493	 * filemap_range_has_page().
2494	 */
2495	const u64 page_lockstart = round_up(lockstart, PAGE_SIZE);
2496	const u64 page_lockend = round_down(lockend + 1, PAGE_SIZE) - 1;
2497
2498	while (1) {
2499		struct btrfs_ordered_extent *ordered;
2500		int ret;
2501
2502		truncate_pagecache_range(inode, lockstart, lockend);
2503
2504		lock_extent_bits(&BTRFS_I(inode)->io_tree, lockstart, lockend,
2505				 cached_state);
2506		ordered = btrfs_lookup_first_ordered_extent(BTRFS_I(inode),
2507							    lockend);
2508
2509		/*
2510		 * We need to make sure we have no ordered extents in this range
2511		 * and nobody raced in and read a page in this range, if we did
2512		 * we need to try again.
2513		 */
2514		if ((!ordered ||
2515		    (ordered->file_offset + ordered->num_bytes <= lockstart ||
2516		     ordered->file_offset > lockend)) &&
2517		     !filemap_range_has_page(inode->i_mapping,
2518					     page_lockstart, page_lockend)) {
2519			if (ordered)
2520				btrfs_put_ordered_extent(ordered);
2521			break;
2522		}
2523		if (ordered)
2524			btrfs_put_ordered_extent(ordered);
2525		unlock_extent_cached(&BTRFS_I(inode)->io_tree, lockstart,
2526				     lockend, cached_state);
2527		ret = btrfs_wait_ordered_range(inode, lockstart,
2528					       lockend - lockstart + 1);
2529		if (ret)
2530			return ret;
2531	}
2532	return 0;
2533}
2534
2535static int btrfs_insert_replace_extent(struct btrfs_trans_handle *trans,
2536				     struct btrfs_inode *inode,
2537				     struct btrfs_path *path,
2538				     struct btrfs_replace_extent_info *extent_info,
2539				     const u64 replace_len,
2540				     const u64 bytes_to_drop)
2541{
2542	struct btrfs_fs_info *fs_info = trans->fs_info;
2543	struct btrfs_root *root = inode->root;
2544	struct btrfs_file_extent_item *extent;
2545	struct extent_buffer *leaf;
2546	struct btrfs_key key;
2547	int slot;
2548	struct btrfs_ref ref = { 0 };
 
2549	int ret;
2550
2551	if (replace_len == 0)
2552		return 0;
2553
2554	if (extent_info->disk_offset == 0 &&
2555	    btrfs_fs_incompat(fs_info, NO_HOLES)) {
2556		btrfs_update_inode_bytes(inode, 0, bytes_to_drop);
2557		return 0;
2558	}
2559
2560	key.objectid = btrfs_ino(inode);
2561	key.type = BTRFS_EXTENT_DATA_KEY;
2562	key.offset = extent_info->file_offset;
2563	ret = btrfs_insert_empty_item(trans, root, path, &key,
2564				      sizeof(struct btrfs_file_extent_item));
2565	if (ret)
2566		return ret;
2567	leaf = path->nodes[0];
2568	slot = path->slots[0];
2569	write_extent_buffer(leaf, extent_info->extent_buf,
2570			    btrfs_item_ptr_offset(leaf, slot),
2571			    sizeof(struct btrfs_file_extent_item));
2572	extent = btrfs_item_ptr(leaf, slot, struct btrfs_file_extent_item);
2573	ASSERT(btrfs_file_extent_type(leaf, extent) != BTRFS_FILE_EXTENT_INLINE);
2574	btrfs_set_file_extent_offset(leaf, extent, extent_info->data_offset);
2575	btrfs_set_file_extent_num_bytes(leaf, extent, replace_len);
2576	if (extent_info->is_new_extent)
2577		btrfs_set_file_extent_generation(leaf, extent, trans->transid);
2578	btrfs_mark_buffer_dirty(leaf);
2579	btrfs_release_path(path);
2580
2581	ret = btrfs_inode_set_file_extent_range(inode, extent_info->file_offset,
2582						replace_len);
2583	if (ret)
2584		return ret;
2585
2586	/* If it's a hole, nothing more needs to be done. */
2587	if (extent_info->disk_offset == 0) {
2588		btrfs_update_inode_bytes(inode, 0, bytes_to_drop);
2589		return 0;
2590	}
2591
2592	btrfs_update_inode_bytes(inode, replace_len, bytes_to_drop);
2593
2594	if (extent_info->is_new_extent && extent_info->insertions == 0) {
2595		key.objectid = extent_info->disk_offset;
2596		key.type = BTRFS_EXTENT_ITEM_KEY;
2597		key.offset = extent_info->disk_len;
2598		ret = btrfs_alloc_reserved_file_extent(trans, root,
2599						       btrfs_ino(inode),
2600						       extent_info->file_offset,
2601						       extent_info->qgroup_reserved,
2602						       &key);
2603	} else {
2604		u64 ref_offset;
2605
2606		btrfs_init_generic_ref(&ref, BTRFS_ADD_DELAYED_REF,
2607				       extent_info->disk_offset,
2608				       extent_info->disk_len, 0);
2609		ref_offset = extent_info->file_offset - extent_info->data_offset;
2610		btrfs_init_data_ref(&ref, root->root_key.objectid,
2611				    btrfs_ino(inode), ref_offset);
2612		ret = btrfs_inc_extent_ref(trans, &ref);
2613	}
2614
2615	extent_info->insertions++;
2616
2617	return ret;
2618}
2619
2620/*
2621 * The respective range must have been previously locked, as well as the inode.
2622 * The end offset is inclusive (last byte of the range).
2623 * @extent_info is NULL for fallocate's hole punching and non-NULL when replacing
2624 * the file range with an extent.
2625 * When not punching a hole, we don't want to end up in a state where we dropped
2626 * extents without inserting a new one, so we must abort the transaction to avoid
2627 * a corruption.
2628 */
2629int btrfs_replace_file_extents(struct btrfs_inode *inode,
2630			       struct btrfs_path *path, const u64 start,
2631			       const u64 end,
2632			       struct btrfs_replace_extent_info *extent_info,
2633			       struct btrfs_trans_handle **trans_out)
2634{
2635	struct btrfs_drop_extents_args drop_args = { 0 };
2636	struct btrfs_root *root = inode->root;
2637	struct btrfs_fs_info *fs_info = root->fs_info;
2638	u64 min_size = btrfs_calc_insert_metadata_size(fs_info, 1);
2639	u64 ino_size = round_up(inode->vfs_inode.i_size, fs_info->sectorsize);
 
2640	struct btrfs_trans_handle *trans = NULL;
2641	struct btrfs_block_rsv *rsv;
2642	unsigned int rsv_count;
2643	u64 cur_offset;
 
2644	u64 len = end - start;
2645	int ret = 0;
2646
2647	if (end <= start)
2648		return -EINVAL;
2649
2650	rsv = btrfs_alloc_block_rsv(fs_info, BTRFS_BLOCK_RSV_TEMP);
2651	if (!rsv) {
2652		ret = -ENOMEM;
2653		goto out;
2654	}
2655	rsv->size = btrfs_calc_insert_metadata_size(fs_info, 1);
2656	rsv->failfast = 1;
2657
2658	/*
2659	 * 1 - update the inode
2660	 * 1 - removing the extents in the range
2661	 * 1 - adding the hole extent if no_holes isn't set or if we are
2662	 *     replacing the range with a new extent
2663	 */
2664	if (!btrfs_fs_incompat(fs_info, NO_HOLES) || extent_info)
2665		rsv_count = 3;
2666	else
2667		rsv_count = 2;
2668
2669	trans = btrfs_start_transaction(root, rsv_count);
2670	if (IS_ERR(trans)) {
2671		ret = PTR_ERR(trans);
2672		trans = NULL;
2673		goto out_free;
2674	}
2675
2676	ret = btrfs_block_rsv_migrate(&fs_info->trans_block_rsv, rsv,
2677				      min_size, false);
2678	BUG_ON(ret);
2679	trans->block_rsv = rsv;
2680
2681	cur_offset = start;
2682	drop_args.path = path;
2683	drop_args.end = end + 1;
2684	drop_args.drop_cache = true;
2685	while (cur_offset < end) {
2686		drop_args.start = cur_offset;
2687		ret = btrfs_drop_extents(trans, root, inode, &drop_args);
2688		/* If we are punching a hole decrement the inode's byte count */
2689		if (!extent_info)
2690			btrfs_update_inode_bytes(inode, 0,
2691						 drop_args.bytes_found);
2692		if (ret != -ENOSPC) {
2693			/*
2694			 * The only time we don't want to abort is if we are
2695			 * attempting to clone a partial inline extent, in which
2696			 * case we'll get EOPNOTSUPP.  However if we aren't
2697			 * clone we need to abort no matter what, because if we
2698			 * got EOPNOTSUPP via prealloc then we messed up and
2699			 * need to abort.
2700			 */
2701			if (ret &&
2702			    (ret != -EOPNOTSUPP ||
2703			     (extent_info && extent_info->is_new_extent)))
2704				btrfs_abort_transaction(trans, ret);
2705			break;
2706		}
2707
2708		trans->block_rsv = &fs_info->trans_block_rsv;
2709
2710		if (!extent_info && cur_offset < drop_args.drop_end &&
2711		    cur_offset < ino_size) {
2712			ret = fill_holes(trans, inode, path, cur_offset,
2713					 drop_args.drop_end);
2714			if (ret) {
2715				/*
2716				 * If we failed then we didn't insert our hole
2717				 * entries for the area we dropped, so now the
2718				 * fs is corrupted, so we must abort the
2719				 * transaction.
2720				 */
2721				btrfs_abort_transaction(trans, ret);
2722				break;
2723			}
2724		} else if (!extent_info && cur_offset < drop_args.drop_end) {
2725			/*
2726			 * We are past the i_size here, but since we didn't
2727			 * insert holes we need to clear the mapped area so we
2728			 * know to not set disk_i_size in this area until a new
2729			 * file extent is inserted here.
2730			 */
2731			ret = btrfs_inode_clear_file_extent_range(inode,
2732					cur_offset,
2733					drop_args.drop_end - cur_offset);
2734			if (ret) {
2735				/*
2736				 * We couldn't clear our area, so we could
2737				 * presumably adjust up and corrupt the fs, so
2738				 * we need to abort.
2739				 */
2740				btrfs_abort_transaction(trans, ret);
2741				break;
2742			}
2743		}
2744
2745		if (extent_info &&
2746		    drop_args.drop_end > extent_info->file_offset) {
2747			u64 replace_len = drop_args.drop_end -
2748					  extent_info->file_offset;
2749
2750			ret = btrfs_insert_replace_extent(trans, inode,	path,
2751					extent_info, replace_len,
2752					drop_args.bytes_found);
2753			if (ret) {
2754				btrfs_abort_transaction(trans, ret);
2755				break;
2756			}
2757			extent_info->data_len -= replace_len;
2758			extent_info->data_offset += replace_len;
2759			extent_info->file_offset += replace_len;
2760		}
2761
 
 
2762		ret = btrfs_update_inode(trans, root, inode);
2763		if (ret)
2764			break;
2765
2766		btrfs_end_transaction(trans);
2767		btrfs_btree_balance_dirty(fs_info);
2768
2769		trans = btrfs_start_transaction(root, rsv_count);
2770		if (IS_ERR(trans)) {
2771			ret = PTR_ERR(trans);
2772			trans = NULL;
2773			break;
2774		}
2775
2776		ret = btrfs_block_rsv_migrate(&fs_info->trans_block_rsv,
2777					      rsv, min_size, false);
2778		BUG_ON(ret);	/* shouldn't happen */
2779		trans->block_rsv = rsv;
2780
2781		cur_offset = drop_args.drop_end;
2782		len = end - cur_offset;
2783		if (!extent_info && len) {
2784			ret = find_first_non_hole(inode, &cur_offset, &len);
2785			if (unlikely(ret < 0))
2786				break;
2787			if (ret && !len) {
2788				ret = 0;
2789				break;
2790			}
2791		}
2792	}
2793
2794	/*
2795	 * If we were cloning, force the next fsync to be a full one since we
2796	 * we replaced (or just dropped in the case of cloning holes when
2797	 * NO_HOLES is enabled) file extent items and did not setup new extent
2798	 * maps for the replacement extents (or holes).
2799	 */
2800	if (extent_info && !extent_info->is_new_extent)
2801		set_bit(BTRFS_INODE_NEEDS_FULL_SYNC, &inode->runtime_flags);
 
 
 
2802
2803	if (ret)
2804		goto out_trans;
2805
2806	trans->block_rsv = &fs_info->trans_block_rsv;
2807	/*
2808	 * If we are using the NO_HOLES feature we might have had already an
2809	 * hole that overlaps a part of the region [lockstart, lockend] and
2810	 * ends at (or beyond) lockend. Since we have no file extent items to
2811	 * represent holes, drop_end can be less than lockend and so we must
2812	 * make sure we have an extent map representing the existing hole (the
2813	 * call to __btrfs_drop_extents() might have dropped the existing extent
2814	 * map representing the existing hole), otherwise the fast fsync path
2815	 * will not record the existence of the hole region
2816	 * [existing_hole_start, lockend].
2817	 */
2818	if (drop_args.drop_end <= end)
2819		drop_args.drop_end = end + 1;
2820	/*
2821	 * Don't insert file hole extent item if it's for a range beyond eof
2822	 * (because it's useless) or if it represents a 0 bytes range (when
2823	 * cur_offset == drop_end).
2824	 */
2825	if (!extent_info && cur_offset < ino_size &&
2826	    cur_offset < drop_args.drop_end) {
2827		ret = fill_holes(trans, inode, path, cur_offset,
2828				 drop_args.drop_end);
2829		if (ret) {
2830			/* Same comment as above. */
2831			btrfs_abort_transaction(trans, ret);
2832			goto out_trans;
2833		}
2834	} else if (!extent_info && cur_offset < drop_args.drop_end) {
2835		/* See the comment in the loop above for the reasoning here. */
2836		ret = btrfs_inode_clear_file_extent_range(inode, cur_offset,
2837					drop_args.drop_end - cur_offset);
2838		if (ret) {
2839			btrfs_abort_transaction(trans, ret);
2840			goto out_trans;
2841		}
2842
2843	}
2844	if (extent_info) {
2845		ret = btrfs_insert_replace_extent(trans, inode, path,
2846				extent_info, extent_info->data_len,
2847				drop_args.bytes_found);
2848		if (ret) {
2849			btrfs_abort_transaction(trans, ret);
2850			goto out_trans;
2851		}
2852	}
2853
2854out_trans:
2855	if (!trans)
2856		goto out_free;
2857
2858	trans->block_rsv = &fs_info->trans_block_rsv;
2859	if (ret)
2860		btrfs_end_transaction(trans);
2861	else
2862		*trans_out = trans;
2863out_free:
2864	btrfs_free_block_rsv(fs_info, rsv);
2865out:
2866	return ret;
2867}
2868
2869static int btrfs_punch_hole(struct inode *inode, loff_t offset, loff_t len)
2870{
2871	struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
2872	struct btrfs_root *root = BTRFS_I(inode)->root;
2873	struct extent_state *cached_state = NULL;
2874	struct btrfs_path *path;
2875	struct btrfs_trans_handle *trans = NULL;
2876	u64 lockstart;
2877	u64 lockend;
2878	u64 tail_start;
2879	u64 tail_len;
2880	u64 orig_start = offset;
2881	int ret = 0;
2882	bool same_block;
2883	u64 ino_size;
2884	bool truncated_block = false;
2885	bool updated_inode = false;
2886
2887	ret = btrfs_wait_ordered_range(inode, offset, len);
2888	if (ret)
2889		return ret;
2890
2891	btrfs_inode_lock(inode, BTRFS_ILOCK_MMAP);
2892	ino_size = round_up(inode->i_size, fs_info->sectorsize);
2893	ret = find_first_non_hole(BTRFS_I(inode), &offset, &len);
2894	if (ret < 0)
2895		goto out_only_mutex;
2896	if (ret && !len) {
2897		/* Already in a large hole */
2898		ret = 0;
2899		goto out_only_mutex;
2900	}
2901
2902	lockstart = round_up(offset, btrfs_inode_sectorsize(BTRFS_I(inode)));
2903	lockend = round_down(offset + len,
2904			     btrfs_inode_sectorsize(BTRFS_I(inode))) - 1;
2905	same_block = (BTRFS_BYTES_TO_BLKS(fs_info, offset))
2906		== (BTRFS_BYTES_TO_BLKS(fs_info, offset + len - 1));
2907	/*
2908	 * We needn't truncate any block which is beyond the end of the file
2909	 * because we are sure there is no data there.
2910	 */
2911	/*
2912	 * Only do this if we are in the same block and we aren't doing the
2913	 * entire block.
2914	 */
2915	if (same_block && len < fs_info->sectorsize) {
2916		if (offset < ino_size) {
2917			truncated_block = true;
2918			ret = btrfs_truncate_block(BTRFS_I(inode), offset, len,
2919						   0);
2920		} else {
2921			ret = 0;
2922		}
2923		goto out_only_mutex;
2924	}
2925
2926	/* zero back part of the first block */
2927	if (offset < ino_size) {
2928		truncated_block = true;
2929		ret = btrfs_truncate_block(BTRFS_I(inode), offset, 0, 0);
2930		if (ret) {
2931			btrfs_inode_unlock(inode, BTRFS_ILOCK_MMAP);
2932			return ret;
2933		}
2934	}
2935
2936	/* Check the aligned pages after the first unaligned page,
2937	 * if offset != orig_start, which means the first unaligned page
2938	 * including several following pages are already in holes,
2939	 * the extra check can be skipped */
2940	if (offset == orig_start) {
2941		/* after truncate page, check hole again */
2942		len = offset + len - lockstart;
2943		offset = lockstart;
2944		ret = find_first_non_hole(BTRFS_I(inode), &offset, &len);
2945		if (ret < 0)
2946			goto out_only_mutex;
2947		if (ret && !len) {
2948			ret = 0;
2949			goto out_only_mutex;
2950		}
2951		lockstart = offset;
2952	}
2953
2954	/* Check the tail unaligned part is in a hole */
2955	tail_start = lockend + 1;
2956	tail_len = offset + len - tail_start;
2957	if (tail_len) {
2958		ret = find_first_non_hole(BTRFS_I(inode), &tail_start, &tail_len);
2959		if (unlikely(ret < 0))
2960			goto out_only_mutex;
2961		if (!ret) {
2962			/* zero the front end of the last page */
2963			if (tail_start + tail_len < ino_size) {
2964				truncated_block = true;
2965				ret = btrfs_truncate_block(BTRFS_I(inode),
2966							tail_start + tail_len,
2967							0, 1);
2968				if (ret)
2969					goto out_only_mutex;
2970			}
2971		}
2972	}
2973
2974	if (lockend < lockstart) {
2975		ret = 0;
2976		goto out_only_mutex;
2977	}
2978
2979	ret = btrfs_punch_hole_lock_range(inode, lockstart, lockend,
2980					  &cached_state);
2981	if (ret)
2982		goto out_only_mutex;
2983
2984	path = btrfs_alloc_path();
2985	if (!path) {
2986		ret = -ENOMEM;
2987		goto out;
2988	}
2989
2990	ret = btrfs_replace_file_extents(BTRFS_I(inode), path, lockstart,
2991					 lockend, NULL, &trans);
2992	btrfs_free_path(path);
2993	if (ret)
2994		goto out;
2995
2996	ASSERT(trans != NULL);
2997	inode_inc_iversion(inode);
2998	inode->i_mtime = inode->i_ctime = current_time(inode);
2999	ret = btrfs_update_inode(trans, root, BTRFS_I(inode));
3000	updated_inode = true;
3001	btrfs_end_transaction(trans);
3002	btrfs_btree_balance_dirty(fs_info);
3003out:
3004	unlock_extent_cached(&BTRFS_I(inode)->io_tree, lockstart, lockend,
3005			     &cached_state);
3006out_only_mutex:
3007	if (!updated_inode && truncated_block && !ret) {
3008		/*
3009		 * If we only end up zeroing part of a page, we still need to
3010		 * update the inode item, so that all the time fields are
3011		 * updated as well as the necessary btrfs inode in memory fields
3012		 * for detecting, at fsync time, if the inode isn't yet in the
3013		 * log tree or it's there but not up to date.
3014		 */
3015		struct timespec64 now = current_time(inode);
3016
3017		inode_inc_iversion(inode);
3018		inode->i_mtime = now;
3019		inode->i_ctime = now;
3020		trans = btrfs_start_transaction(root, 1);
3021		if (IS_ERR(trans)) {
3022			ret = PTR_ERR(trans);
3023		} else {
3024			int ret2;
3025
3026			ret = btrfs_update_inode(trans, root, BTRFS_I(inode));
3027			ret2 = btrfs_end_transaction(trans);
3028			if (!ret)
3029				ret = ret2;
3030		}
3031	}
3032	btrfs_inode_unlock(inode, BTRFS_ILOCK_MMAP);
3033	return ret;
3034}
3035
3036/* Helper structure to record which range is already reserved */
3037struct falloc_range {
3038	struct list_head list;
3039	u64 start;
3040	u64 len;
3041};
3042
3043/*
3044 * Helper function to add falloc range
3045 *
3046 * Caller should have locked the larger range of extent containing
3047 * [start, len)
3048 */
3049static int add_falloc_range(struct list_head *head, u64 start, u64 len)
3050{
 
3051	struct falloc_range *range = NULL;
3052
3053	if (!list_empty(head)) {
3054		/*
3055		 * As fallocate iterates by bytenr order, we only need to check
3056		 * the last range.
3057		 */
3058		range = list_last_entry(head, struct falloc_range, list);
3059		if (range->start + range->len == start) {
3060			range->len += len;
3061			return 0;
3062		}
 
3063	}
3064
3065	range = kmalloc(sizeof(*range), GFP_KERNEL);
3066	if (!range)
3067		return -ENOMEM;
3068	range->start = start;
3069	range->len = len;
3070	list_add_tail(&range->list, head);
3071	return 0;
3072}
3073
3074static int btrfs_fallocate_update_isize(struct inode *inode,
3075					const u64 end,
3076					const int mode)
3077{
3078	struct btrfs_trans_handle *trans;
3079	struct btrfs_root *root = BTRFS_I(inode)->root;
3080	int ret;
3081	int ret2;
3082
3083	if (mode & FALLOC_FL_KEEP_SIZE || end <= i_size_read(inode))
3084		return 0;
3085
3086	trans = btrfs_start_transaction(root, 1);
3087	if (IS_ERR(trans))
3088		return PTR_ERR(trans);
3089
3090	inode->i_ctime = current_time(inode);
3091	i_size_write(inode, end);
3092	btrfs_inode_safe_disk_i_size_write(BTRFS_I(inode), 0);
3093	ret = btrfs_update_inode(trans, root, BTRFS_I(inode));
3094	ret2 = btrfs_end_transaction(trans);
3095
3096	return ret ? ret : ret2;
3097}
3098
3099enum {
3100	RANGE_BOUNDARY_WRITTEN_EXTENT,
3101	RANGE_BOUNDARY_PREALLOC_EXTENT,
3102	RANGE_BOUNDARY_HOLE,
3103};
3104
3105static int btrfs_zero_range_check_range_boundary(struct btrfs_inode *inode,
3106						 u64 offset)
3107{
3108	const u64 sectorsize = btrfs_inode_sectorsize(inode);
3109	struct extent_map *em;
3110	int ret;
3111
3112	offset = round_down(offset, sectorsize);
3113	em = btrfs_get_extent(inode, NULL, 0, offset, sectorsize);
3114	if (IS_ERR(em))
3115		return PTR_ERR(em);
3116
3117	if (em->block_start == EXTENT_MAP_HOLE)
3118		ret = RANGE_BOUNDARY_HOLE;
3119	else if (test_bit(EXTENT_FLAG_PREALLOC, &em->flags))
3120		ret = RANGE_BOUNDARY_PREALLOC_EXTENT;
3121	else
3122		ret = RANGE_BOUNDARY_WRITTEN_EXTENT;
3123
3124	free_extent_map(em);
3125	return ret;
3126}
3127
3128static int btrfs_zero_range(struct inode *inode,
3129			    loff_t offset,
3130			    loff_t len,
3131			    const int mode)
3132{
3133	struct btrfs_fs_info *fs_info = BTRFS_I(inode)->root->fs_info;
3134	struct extent_map *em;
3135	struct extent_changeset *data_reserved = NULL;
3136	int ret;
3137	u64 alloc_hint = 0;
3138	const u64 sectorsize = btrfs_inode_sectorsize(BTRFS_I(inode));
3139	u64 alloc_start = round_down(offset, sectorsize);
3140	u64 alloc_end = round_up(offset + len, sectorsize);
3141	u64 bytes_to_reserve = 0;
3142	bool space_reserved = false;
3143
3144	inode_dio_wait(inode);
3145
3146	em = btrfs_get_extent(BTRFS_I(inode), NULL, 0, alloc_start,
3147			      alloc_end - alloc_start);
3148	if (IS_ERR(em)) {
3149		ret = PTR_ERR(em);
3150		goto out;
3151	}
3152
3153	/*
3154	 * Avoid hole punching and extent allocation for some cases. More cases
3155	 * could be considered, but these are unlikely common and we keep things
3156	 * as simple as possible for now. Also, intentionally, if the target
3157	 * range contains one or more prealloc extents together with regular
3158	 * extents and holes, we drop all the existing extents and allocate a
3159	 * new prealloc extent, so that we get a larger contiguous disk extent.
3160	 */
3161	if (em->start <= alloc_start &&
3162	    test_bit(EXTENT_FLAG_PREALLOC, &em->flags)) {
3163		const u64 em_end = em->start + em->len;
3164
3165		if (em_end >= offset + len) {
3166			/*
3167			 * The whole range is already a prealloc extent,
3168			 * do nothing except updating the inode's i_size if
3169			 * needed.
3170			 */
3171			free_extent_map(em);
3172			ret = btrfs_fallocate_update_isize(inode, offset + len,
3173							   mode);
3174			goto out;
3175		}
3176		/*
3177		 * Part of the range is already a prealloc extent, so operate
3178		 * only on the remaining part of the range.
3179		 */
3180		alloc_start = em_end;
3181		ASSERT(IS_ALIGNED(alloc_start, sectorsize));
3182		len = offset + len - alloc_start;
3183		offset = alloc_start;
3184		alloc_hint = em->block_start + em->len;
3185	}
3186	free_extent_map(em);
3187
3188	if (BTRFS_BYTES_TO_BLKS(fs_info, offset) ==
3189	    BTRFS_BYTES_TO_BLKS(fs_info, offset + len - 1)) {
3190		em = btrfs_get_extent(BTRFS_I(inode), NULL, 0, alloc_start,
3191				      sectorsize);
3192		if (IS_ERR(em)) {
3193			ret = PTR_ERR(em);
3194			goto out;
3195		}
3196
3197		if (test_bit(EXTENT_FLAG_PREALLOC, &em->flags)) {
3198			free_extent_map(em);
3199			ret = btrfs_fallocate_update_isize(inode, offset + len,
3200							   mode);
3201			goto out;
3202		}
3203		if (len < sectorsize && em->block_start != EXTENT_MAP_HOLE) {
3204			free_extent_map(em);
3205			ret = btrfs_truncate_block(BTRFS_I(inode), offset, len,
3206						   0);
3207			if (!ret)
3208				ret = btrfs_fallocate_update_isize(inode,
3209								   offset + len,
3210								   mode);
3211			return ret;
3212		}
3213		free_extent_map(em);
3214		alloc_start = round_down(offset, sectorsize);
3215		alloc_end = alloc_start + sectorsize;
3216		goto reserve_space;
3217	}
3218
3219	alloc_start = round_up(offset, sectorsize);
3220	alloc_end = round_down(offset + len, sectorsize);
3221
3222	/*
3223	 * For unaligned ranges, check the pages at the boundaries, they might
3224	 * map to an extent, in which case we need to partially zero them, or
3225	 * they might map to a hole, in which case we need our allocation range
3226	 * to cover them.
3227	 */
3228	if (!IS_ALIGNED(offset, sectorsize)) {
3229		ret = btrfs_zero_range_check_range_boundary(BTRFS_I(inode),
3230							    offset);
3231		if (ret < 0)
3232			goto out;
3233		if (ret == RANGE_BOUNDARY_HOLE) {
3234			alloc_start = round_down(offset, sectorsize);
3235			ret = 0;
3236		} else if (ret == RANGE_BOUNDARY_WRITTEN_EXTENT) {
3237			ret = btrfs_truncate_block(BTRFS_I(inode), offset, 0, 0);
3238			if (ret)
3239				goto out;
3240		} else {
3241			ret = 0;
3242		}
3243	}
3244
3245	if (!IS_ALIGNED(offset + len, sectorsize)) {
3246		ret = btrfs_zero_range_check_range_boundary(BTRFS_I(inode),
3247							    offset + len);
3248		if (ret < 0)
3249			goto out;
3250		if (ret == RANGE_BOUNDARY_HOLE) {
3251			alloc_end = round_up(offset + len, sectorsize);
3252			ret = 0;
3253		} else if (ret == RANGE_BOUNDARY_WRITTEN_EXTENT) {
3254			ret = btrfs_truncate_block(BTRFS_I(inode), offset + len,
3255						   0, 1);
3256			if (ret)
3257				goto out;
3258		} else {
3259			ret = 0;
3260		}
3261	}
3262
3263reserve_space:
3264	if (alloc_start < alloc_end) {
3265		struct extent_state *cached_state = NULL;
3266		const u64 lockstart = alloc_start;
3267		const u64 lockend = alloc_end - 1;
3268
3269		bytes_to_reserve = alloc_end - alloc_start;
3270		ret = btrfs_alloc_data_chunk_ondemand(BTRFS_I(inode),
3271						      bytes_to_reserve);
3272		if (ret < 0)
3273			goto out;
3274		space_reserved = true;
 
 
 
 
3275		ret = btrfs_punch_hole_lock_range(inode, lockstart, lockend,
3276						  &cached_state);
3277		if (ret)
3278			goto out;
3279		ret = btrfs_qgroup_reserve_data(BTRFS_I(inode), &data_reserved,
3280						alloc_start, bytes_to_reserve);
3281		if (ret) {
3282			unlock_extent_cached(&BTRFS_I(inode)->io_tree, lockstart,
3283					     lockend, &cached_state);
3284			goto out;
3285		}
3286		ret = btrfs_prealloc_file_range(inode, mode, alloc_start,
3287						alloc_end - alloc_start,
3288						i_blocksize(inode),
3289						offset + len, &alloc_hint);
3290		unlock_extent_cached(&BTRFS_I(inode)->io_tree, lockstart,
3291				     lockend, &cached_state);
3292		/* btrfs_prealloc_file_range releases reserved space on error */
3293		if (ret) {
3294			space_reserved = false;
3295			goto out;
3296		}
3297	}
3298	ret = btrfs_fallocate_update_isize(inode, offset + len, mode);
3299 out:
3300	if (ret && space_reserved)
3301		btrfs_free_reserved_data_space(BTRFS_I(inode), data_reserved,
3302					       alloc_start, bytes_to_reserve);
3303	extent_changeset_free(data_reserved);
3304
3305	return ret;
3306}
3307
3308static long btrfs_fallocate(struct file *file, int mode,
3309			    loff_t offset, loff_t len)
3310{
3311	struct inode *inode = file_inode(file);
3312	struct extent_state *cached_state = NULL;
3313	struct extent_changeset *data_reserved = NULL;
3314	struct falloc_range *range;
3315	struct falloc_range *tmp;
3316	struct list_head reserve_list;
3317	u64 cur_offset;
3318	u64 last_byte;
3319	u64 alloc_start;
3320	u64 alloc_end;
3321	u64 alloc_hint = 0;
3322	u64 locked_end;
3323	u64 actual_end = 0;
3324	struct extent_map *em;
3325	int blocksize = btrfs_inode_sectorsize(BTRFS_I(inode));
3326	int ret;
3327
3328	/* Do not allow fallocate in ZONED mode */
3329	if (btrfs_is_zoned(btrfs_sb(inode->i_sb)))
3330		return -EOPNOTSUPP;
3331
3332	alloc_start = round_down(offset, blocksize);
3333	alloc_end = round_up(offset + len, blocksize);
3334	cur_offset = alloc_start;
3335
3336	/* Make sure we aren't being give some crap mode */
3337	if (mode & ~(FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE |
3338		     FALLOC_FL_ZERO_RANGE))
3339		return -EOPNOTSUPP;
3340
3341	if (mode & FALLOC_FL_PUNCH_HOLE)
3342		return btrfs_punch_hole(inode, offset, len);
3343
3344	/*
3345	 * Only trigger disk allocation, don't trigger qgroup reserve
3346	 *
3347	 * For qgroup space, it will be checked later.
3348	 */
3349	if (!(mode & FALLOC_FL_ZERO_RANGE)) {
3350		ret = btrfs_alloc_data_chunk_ondemand(BTRFS_I(inode),
3351						      alloc_end - alloc_start);
3352		if (ret < 0)
3353			return ret;
3354	}
3355
3356	btrfs_inode_lock(inode, BTRFS_ILOCK_MMAP);
3357
3358	if (!(mode & FALLOC_FL_KEEP_SIZE) && offset + len > inode->i_size) {
3359		ret = inode_newsize_ok(inode, offset + len);
3360		if (ret)
3361			goto out;
3362	}
3363
3364	/*
3365	 * TODO: Move these two operations after we have checked
3366	 * accurate reserved space, or fallocate can still fail but
3367	 * with page truncated or size expanded.
3368	 *
3369	 * But that's a minor problem and won't do much harm BTW.
3370	 */
3371	if (alloc_start > inode->i_size) {
3372		ret = btrfs_cont_expand(BTRFS_I(inode), i_size_read(inode),
3373					alloc_start);
3374		if (ret)
3375			goto out;
3376	} else if (offset + len > inode->i_size) {
3377		/*
3378		 * If we are fallocating from the end of the file onward we
3379		 * need to zero out the end of the block if i_size lands in the
3380		 * middle of a block.
3381		 */
3382		ret = btrfs_truncate_block(BTRFS_I(inode), inode->i_size, 0, 0);
3383		if (ret)
3384			goto out;
3385	}
3386
3387	/*
3388	 * wait for ordered IO before we have any locks.  We'll loop again
3389	 * below with the locks held.
3390	 */
3391	ret = btrfs_wait_ordered_range(inode, alloc_start,
3392				       alloc_end - alloc_start);
3393	if (ret)
3394		goto out;
3395
3396	if (mode & FALLOC_FL_ZERO_RANGE) {
3397		ret = btrfs_zero_range(inode, offset, len, mode);
3398		btrfs_inode_unlock(inode, BTRFS_ILOCK_MMAP);
3399		return ret;
3400	}
3401
3402	locked_end = alloc_end - 1;
3403	while (1) {
3404		struct btrfs_ordered_extent *ordered;
3405
3406		/* the extent lock is ordered inside the running
3407		 * transaction
3408		 */
3409		lock_extent_bits(&BTRFS_I(inode)->io_tree, alloc_start,
3410				 locked_end, &cached_state);
3411		ordered = btrfs_lookup_first_ordered_extent(BTRFS_I(inode),
3412							    locked_end);
3413
3414		if (ordered &&
3415		    ordered->file_offset + ordered->num_bytes > alloc_start &&
3416		    ordered->file_offset < alloc_end) {
3417			btrfs_put_ordered_extent(ordered);
3418			unlock_extent_cached(&BTRFS_I(inode)->io_tree,
3419					     alloc_start, locked_end,
3420					     &cached_state);
3421			/*
3422			 * we can't wait on the range with the transaction
3423			 * running or with the extent lock held
3424			 */
3425			ret = btrfs_wait_ordered_range(inode, alloc_start,
3426						       alloc_end - alloc_start);
3427			if (ret)
3428				goto out;
3429		} else {
3430			if (ordered)
3431				btrfs_put_ordered_extent(ordered);
3432			break;
3433		}
3434	}
3435
3436	/* First, check if we exceed the qgroup limit */
3437	INIT_LIST_HEAD(&reserve_list);
3438	while (cur_offset < alloc_end) {
3439		em = btrfs_get_extent(BTRFS_I(inode), NULL, 0, cur_offset,
3440				      alloc_end - cur_offset);
3441		if (IS_ERR(em)) {
3442			ret = PTR_ERR(em);
3443			break;
3444		}
3445		last_byte = min(extent_map_end(em), alloc_end);
3446		actual_end = min_t(u64, extent_map_end(em), offset + len);
3447		last_byte = ALIGN(last_byte, blocksize);
3448		if (em->block_start == EXTENT_MAP_HOLE ||
3449		    (cur_offset >= inode->i_size &&
3450		     !test_bit(EXTENT_FLAG_PREALLOC, &em->flags))) {
3451			ret = add_falloc_range(&reserve_list, cur_offset,
3452					       last_byte - cur_offset);
3453			if (ret < 0) {
3454				free_extent_map(em);
3455				break;
3456			}
3457			ret = btrfs_qgroup_reserve_data(BTRFS_I(inode),
3458					&data_reserved, cur_offset,
3459					last_byte - cur_offset);
3460			if (ret < 0) {
3461				cur_offset = last_byte;
3462				free_extent_map(em);
3463				break;
3464			}
3465		} else {
3466			/*
3467			 * Do not need to reserve unwritten extent for this
3468			 * range, free reserved data space first, otherwise
3469			 * it'll result in false ENOSPC error.
3470			 */
3471			btrfs_free_reserved_data_space(BTRFS_I(inode),
3472				data_reserved, cur_offset,
3473				last_byte - cur_offset);
3474		}
3475		free_extent_map(em);
3476		cur_offset = last_byte;
3477	}
3478
3479	/*
3480	 * If ret is still 0, means we're OK to fallocate.
3481	 * Or just cleanup the list and exit.
3482	 */
3483	list_for_each_entry_safe(range, tmp, &reserve_list, list) {
3484		if (!ret)
3485			ret = btrfs_prealloc_file_range(inode, mode,
3486					range->start,
3487					range->len, i_blocksize(inode),
3488					offset + len, &alloc_hint);
3489		else
3490			btrfs_free_reserved_data_space(BTRFS_I(inode),
3491					data_reserved, range->start,
3492					range->len);
3493		list_del(&range->list);
3494		kfree(range);
3495	}
3496	if (ret < 0)
3497		goto out_unlock;
3498
3499	/*
3500	 * We didn't need to allocate any more space, but we still extended the
3501	 * size of the file so we need to update i_size and the inode item.
3502	 */
3503	ret = btrfs_fallocate_update_isize(inode, actual_end, mode);
3504out_unlock:
3505	unlock_extent_cached(&BTRFS_I(inode)->io_tree, alloc_start, locked_end,
3506			     &cached_state);
3507out:
3508	btrfs_inode_unlock(inode, BTRFS_ILOCK_MMAP);
3509	/* Let go of our reservation. */
3510	if (ret != 0 && !(mode & FALLOC_FL_ZERO_RANGE))
3511		btrfs_free_reserved_data_space(BTRFS_I(inode), data_reserved,
3512				cur_offset, alloc_end - cur_offset);
3513	extent_changeset_free(data_reserved);
3514	return ret;
3515}
3516
3517static loff_t find_desired_extent(struct btrfs_inode *inode, loff_t offset,
3518				  int whence)
3519{
3520	struct btrfs_fs_info *fs_info = inode->root->fs_info;
3521	struct extent_map *em = NULL;
3522	struct extent_state *cached_state = NULL;
3523	loff_t i_size = inode->vfs_inode.i_size;
3524	u64 lockstart;
3525	u64 lockend;
3526	u64 start;
3527	u64 len;
3528	int ret = 0;
3529
3530	if (i_size == 0 || offset >= i_size)
3531		return -ENXIO;
3532
3533	/*
3534	 * offset can be negative, in this case we start finding DATA/HOLE from
3535	 * the very start of the file.
3536	 */
3537	start = max_t(loff_t, 0, offset);
3538
3539	lockstart = round_down(start, fs_info->sectorsize);
3540	lockend = round_up(i_size, fs_info->sectorsize);
 
3541	if (lockend <= lockstart)
3542		lockend = lockstart + fs_info->sectorsize;
3543	lockend--;
3544	len = lockend - lockstart + 1;
3545
3546	lock_extent_bits(&inode->io_tree, lockstart, lockend, &cached_state);
 
3547
3548	while (start < i_size) {
3549		em = btrfs_get_extent_fiemap(inode, start, len);
3550		if (IS_ERR(em)) {
3551			ret = PTR_ERR(em);
3552			em = NULL;
3553			break;
3554		}
3555
3556		if (whence == SEEK_HOLE &&
3557		    (em->block_start == EXTENT_MAP_HOLE ||
3558		     test_bit(EXTENT_FLAG_PREALLOC, &em->flags)))
3559			break;
3560		else if (whence == SEEK_DATA &&
3561			   (em->block_start != EXTENT_MAP_HOLE &&
3562			    !test_bit(EXTENT_FLAG_PREALLOC, &em->flags)))
3563			break;
3564
3565		start = em->start + em->len;
3566		free_extent_map(em);
3567		em = NULL;
3568		cond_resched();
3569	}
3570	free_extent_map(em);
3571	unlock_extent_cached(&inode->io_tree, lockstart, lockend,
3572			     &cached_state);
3573	if (ret) {
3574		offset = ret;
3575	} else {
3576		if (whence == SEEK_DATA && start >= i_size)
3577			offset = -ENXIO;
3578		else
3579			offset = min_t(loff_t, start, i_size);
3580	}
3581
3582	return offset;
 
3583}
3584
3585static loff_t btrfs_file_llseek(struct file *file, loff_t offset, int whence)
3586{
3587	struct inode *inode = file->f_mapping->host;
 
3588
 
3589	switch (whence) {
3590	default:
3591		return generic_file_llseek(file, offset, whence);
 
 
3592	case SEEK_DATA:
3593	case SEEK_HOLE:
3594		btrfs_inode_lock(inode, BTRFS_ILOCK_SHARED);
3595		offset = find_desired_extent(BTRFS_I(inode), offset, whence);
3596		btrfs_inode_unlock(inode, BTRFS_ILOCK_SHARED);
3597		break;
 
 
 
 
 
 
3598	}
3599
3600	if (offset < 0)
3601		return offset;
3602
3603	return vfs_setpos(file, offset, inode->i_sb->s_maxbytes);
3604}
3605
3606static int btrfs_file_open(struct inode *inode, struct file *filp)
3607{
3608	filp->f_mode |= FMODE_NOWAIT | FMODE_BUF_RASYNC;
3609	return generic_file_open(inode, filp);
3610}
3611
3612static int check_direct_read(struct btrfs_fs_info *fs_info,
3613			     const struct iov_iter *iter, loff_t offset)
3614{
3615	int ret;
3616	int i, seg;
3617
3618	ret = check_direct_IO(fs_info, iter, offset);
3619	if (ret < 0)
3620		return ret;
3621
3622	if (!iter_is_iovec(iter))
3623		return 0;
3624
3625	for (seg = 0; seg < iter->nr_segs; seg++)
3626		for (i = seg + 1; i < iter->nr_segs; i++)
3627			if (iter->iov[seg].iov_base == iter->iov[i].iov_base)
3628				return -EINVAL;
3629	return 0;
3630}
3631
3632static ssize_t btrfs_direct_read(struct kiocb *iocb, struct iov_iter *to)
3633{
3634	struct inode *inode = file_inode(iocb->ki_filp);
3635	ssize_t ret;
3636
3637	if (check_direct_read(btrfs_sb(inode->i_sb), to, iocb->ki_pos))
3638		return 0;
3639
3640	btrfs_inode_lock(inode, BTRFS_ILOCK_SHARED);
3641	ret = iomap_dio_rw(iocb, to, &btrfs_dio_iomap_ops, &btrfs_dio_ops, 0);
3642	btrfs_inode_unlock(inode, BTRFS_ILOCK_SHARED);
3643	return ret;
3644}
3645
3646static ssize_t btrfs_file_read_iter(struct kiocb *iocb, struct iov_iter *to)
3647{
3648	ssize_t ret = 0;
3649
3650	if (iocb->ki_flags & IOCB_DIRECT) {
3651		ret = btrfs_direct_read(iocb, to);
3652		if (ret < 0 || !iov_iter_count(to) ||
3653		    iocb->ki_pos >= i_size_read(file_inode(iocb->ki_filp)))
3654			return ret;
3655	}
3656
3657	return filemap_read(iocb, to, ret);
3658}
3659
3660const struct file_operations btrfs_file_operations = {
3661	.llseek		= btrfs_file_llseek,
3662	.read_iter      = btrfs_file_read_iter,
3663	.splice_read	= generic_file_splice_read,
3664	.write_iter	= btrfs_file_write_iter,
3665	.splice_write	= iter_file_splice_write,
3666	.mmap		= btrfs_file_mmap,
3667	.open		= btrfs_file_open,
3668	.release	= btrfs_release_file,
3669	.fsync		= btrfs_sync_file,
3670	.fallocate	= btrfs_fallocate,
3671	.unlocked_ioctl	= btrfs_ioctl,
3672#ifdef CONFIG_COMPAT
3673	.compat_ioctl	= btrfs_compat_ioctl,
3674#endif
3675	.remap_file_range = btrfs_remap_file_range,
3676};
3677
3678void __cold btrfs_auto_defrag_exit(void)
3679{
3680	kmem_cache_destroy(btrfs_inode_defrag_cachep);
3681}
3682
3683int __init btrfs_auto_defrag_init(void)
3684{
3685	btrfs_inode_defrag_cachep = kmem_cache_create("btrfs_inode_defrag",
3686					sizeof(struct inode_defrag), 0,
3687					SLAB_MEM_SPREAD,
3688					NULL);
3689	if (!btrfs_inode_defrag_cachep)
3690		return -ENOMEM;
3691
3692	return 0;
3693}
3694
3695int btrfs_fdatawrite_range(struct inode *inode, loff_t start, loff_t end)
3696{
3697	int ret;
3698
3699	/*
3700	 * So with compression we will find and lock a dirty page and clear the
3701	 * first one as dirty, setup an async extent, and immediately return
3702	 * with the entire range locked but with nobody actually marked with
3703	 * writeback.  So we can't just filemap_write_and_wait_range() and
3704	 * expect it to work since it will just kick off a thread to do the
3705	 * actual work.  So we need to call filemap_fdatawrite_range _again_
3706	 * since it will wait on the page lock, which won't be unlocked until
3707	 * after the pages have been marked as writeback and so we're good to go
3708	 * from there.  We have to do this otherwise we'll miss the ordered
3709	 * extents and that results in badness.  Please Josef, do not think you
3710	 * know better and pull this out at some point in the future, it is
3711	 * right and you are wrong.
3712	 */
3713	ret = filemap_fdatawrite_range(inode->i_mapping, start, end);
3714	if (!ret && test_bit(BTRFS_INODE_HAS_ASYNC_EXTENT,
3715			     &BTRFS_I(inode)->runtime_flags))
3716		ret = filemap_fdatawrite_range(inode->i_mapping, start, end);
3717
3718	return ret;
3719}