Linux Audio

Check our new training course

Yocto distribution development and maintenance

Need a Yocto distribution for your embedded project?
Loading...
v6.8
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 * Copyright (C) 2007 Oracle.  All rights reserved.
 
 
 
 
 
 
 
 
 
 
 
 
 
 
   4 */
   5
   6#include <linux/fs.h>
   7#include <linux/pagemap.h>
 
   8#include <linux/time.h>
   9#include <linux/init.h>
  10#include <linux/string.h>
  11#include <linux/backing-dev.h>
 
  12#include <linux/falloc.h>
 
  13#include <linux/writeback.h>
 
  14#include <linux/compat.h>
  15#include <linux/slab.h>
  16#include <linux/btrfs.h>
  17#include <linux/uio.h>
  18#include <linux/iversion.h>
  19#include <linux/fsverity.h>
  20#include <linux/iomap.h>
  21#include "ctree.h"
  22#include "disk-io.h"
  23#include "transaction.h"
  24#include "btrfs_inode.h"
 
  25#include "print-tree.h"
  26#include "tree-log.h"
  27#include "locking.h"
  28#include "volumes.h"
  29#include "qgroup.h"
  30#include "compression.h"
  31#include "delalloc-space.h"
  32#include "reflink.h"
  33#include "subpage.h"
  34#include "fs.h"
  35#include "accessors.h"
  36#include "extent-tree.h"
  37#include "file-item.h"
  38#include "ioctl.h"
  39#include "file.h"
  40#include "super.h"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
  41
  42/* simple helper to fault in pages and copy.  This should go away
  43 * and be replaced with calls into generic code.
  44 */
  45static noinline int btrfs_copy_from_user(loff_t pos, size_t write_bytes,
 
  46					 struct page **prepared_pages,
  47					 struct iov_iter *i)
  48{
  49	size_t copied = 0;
  50	size_t total_copied = 0;
  51	int pg = 0;
  52	int offset = offset_in_page(pos);
  53
  54	while (write_bytes > 0) {
  55		size_t count = min_t(size_t,
  56				     PAGE_SIZE - offset, write_bytes);
  57		struct page *page = prepared_pages[pg];
  58		/*
  59		 * Copy data from userspace to the current page
 
 
 
  60		 */
  61		copied = copy_page_from_iter_atomic(page, offset, count, i);
 
 
  62
  63		/* Flush processor's dcache for this page */
  64		flush_dcache_page(page);
  65
  66		/*
  67		 * if we get a partial write, we can end up with
  68		 * partially up to date pages.  These add
  69		 * a lot of complexity, so make sure they don't
  70		 * happen by forcing this copy to be retried.
  71		 *
  72		 * The rest of the btrfs_file_write code will fall
  73		 * back to page at a time copies after we return 0.
  74		 */
  75		if (unlikely(copied < count)) {
  76			if (!PageUptodate(page)) {
  77				iov_iter_revert(i, copied);
  78				copied = 0;
  79			}
  80			if (!copied)
  81				break;
  82		}
  83
 
  84		write_bytes -= copied;
  85		total_copied += copied;
  86		offset += copied;
  87		if (offset == PAGE_SIZE) {
 
 
 
 
 
 
  88			pg++;
  89			offset = 0;
  90		}
  91	}
  92	return total_copied;
  93}
  94
  95/*
  96 * unlocks pages after btrfs_file_write is done with them
  97 */
  98static void btrfs_drop_pages(struct btrfs_fs_info *fs_info,
  99			     struct page **pages, size_t num_pages,
 100			     u64 pos, u64 copied)
 101{
 102	size_t i;
 103	u64 block_start = round_down(pos, fs_info->sectorsize);
 104	u64 block_len = round_up(pos + copied, fs_info->sectorsize) - block_start;
 105
 106	ASSERT(block_len <= U32_MAX);
 107	for (i = 0; i < num_pages; i++) {
 108		/* page checked is some magic around finding pages that
 109		 * have been modified without going through btrfs_set_page_dirty
 110		 * clear it here. There should be no need to mark the pages
 111		 * accessed as prepare_pages should have marked them accessed
 112		 * in prepare_pages via find_or_create_page()
 113		 */
 114		btrfs_folio_clamp_clear_checked(fs_info, page_folio(pages[i]),
 115						block_start, block_len);
 116		unlock_page(pages[i]);
 117		put_page(pages[i]);
 
 118	}
 119}
 120
 121/*
 122 * After btrfs_copy_from_user(), update the following things for delalloc:
 123 * - Mark newly dirtied pages as DELALLOC in the io tree.
 124 *   Used to advise which range is to be written back.
 125 * - Mark modified pages as Uptodate/Dirty and not needing COW fixup
 126 * - Update inode size for past EOF write
 
 127 */
 128int btrfs_dirty_pages(struct btrfs_inode *inode, struct page **pages,
 129		      size_t num_pages, loff_t pos, size_t write_bytes,
 130		      struct extent_state **cached, bool noreserve)
 
 131{
 132	struct btrfs_fs_info *fs_info = inode->root->fs_info;
 133	int err = 0;
 134	int i;
 135	u64 num_bytes;
 136	u64 start_pos;
 137	u64 end_of_last_block;
 138	u64 end_pos = pos + write_bytes;
 139	loff_t isize = i_size_read(&inode->vfs_inode);
 140	unsigned int extra_bits = 0;
 141
 142	if (write_bytes == 0)
 143		return 0;
 144
 145	if (noreserve)
 146		extra_bits |= EXTENT_NORESERVE;
 147
 148	start_pos = round_down(pos, fs_info->sectorsize);
 149	num_bytes = round_up(write_bytes + pos - start_pos,
 150			     fs_info->sectorsize);
 151	ASSERT(num_bytes <= U32_MAX);
 152
 153	end_of_last_block = start_pos + num_bytes - 1;
 154
 155	/*
 156	 * The pages may have already been dirty, clear out old accounting so
 157	 * we can set things up properly
 158	 */
 159	clear_extent_bit(&inode->io_tree, start_pos, end_of_last_block,
 160			 EXTENT_DELALLOC | EXTENT_DO_ACCOUNTING | EXTENT_DEFRAG,
 161			 cached);
 162
 163	err = btrfs_set_extent_delalloc(inode, start_pos, end_of_last_block,
 164					extra_bits, cached);
 165	if (err)
 166		return err;
 167
 168	for (i = 0; i < num_pages; i++) {
 169		struct page *p = pages[i];
 170
 171		btrfs_folio_clamp_set_uptodate(fs_info, page_folio(p),
 172					       start_pos, num_bytes);
 173		btrfs_folio_clamp_clear_checked(fs_info, page_folio(p),
 174						start_pos, num_bytes);
 175		btrfs_folio_clamp_set_dirty(fs_info, page_folio(p),
 176					    start_pos, num_bytes);
 177	}
 178
 179	/*
 180	 * we've only changed i_size in ram, and we haven't updated
 181	 * the disk i_size.  There is no need to log the inode
 182	 * at this time.
 183	 */
 184	if (end_pos > isize)
 185		i_size_write(&inode->vfs_inode, end_pos);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 186	return 0;
 187}
 188
 189/*
 190 * this is very complex, but the basic idea is to drop all extents
 191 * in the range start - end.  hint_block is filled in with a block number
 192 * that would be a good hint to the block allocator for this file.
 193 *
 194 * If an extent intersects the range but is not entirely inside the range
 195 * it is either truncated or split.  Anything entirely inside the range
 196 * is deleted from the tree.
 197 *
 198 * Note: the VFS' inode number of bytes is not updated, it's up to the caller
 199 * to deal with that. We set the field 'bytes_found' of the arguments structure
 200 * with the number of allocated bytes found in the target range, so that the
 201 * caller can update the inode's number of bytes in an atomic way when
 202 * replacing extents in a range to avoid races with stat(2).
 203 */
 204int btrfs_drop_extents(struct btrfs_trans_handle *trans,
 205		       struct btrfs_root *root, struct btrfs_inode *inode,
 206		       struct btrfs_drop_extents_args *args)
 207{
 208	struct btrfs_fs_info *fs_info = root->fs_info;
 209	struct extent_buffer *leaf;
 210	struct btrfs_file_extent_item *fi;
 211	struct btrfs_ref ref = { 0 };
 212	struct btrfs_key key;
 213	struct btrfs_key new_key;
 214	u64 ino = btrfs_ino(inode);
 215	u64 search_start = args->start;
 216	u64 disk_bytenr = 0;
 217	u64 num_bytes = 0;
 218	u64 extent_offset = 0;
 219	u64 extent_end = 0;
 220	u64 last_end = args->start;
 221	int del_nr = 0;
 222	int del_slot = 0;
 223	int extent_type;
 224	int recow;
 225	int ret;
 226	int modify_tree = -1;
 227	int update_refs;
 228	int found = 0;
 229	struct btrfs_path *path = args->path;
 230
 231	args->bytes_found = 0;
 232	args->extent_inserted = false;
 233
 234	/* Must always have a path if ->replace_extent is true */
 235	ASSERT(!(args->replace_extent && !args->path));
 236
 237	if (!path) {
 238		path = btrfs_alloc_path();
 239		if (!path) {
 240			ret = -ENOMEM;
 241			goto out;
 242		}
 243	}
 244
 245	if (args->drop_cache)
 246		btrfs_drop_extent_map_range(inode, args->start, args->end - 1, false);
 247
 248	if (args->start >= inode->disk_i_size && !args->replace_extent)
 
 
 
 
 249		modify_tree = 0;
 250
 251	update_refs = (root->root_key.objectid != BTRFS_TREE_LOG_OBJECTID);
 252	while (1) {
 253		recow = 0;
 254		ret = btrfs_lookup_file_extent(trans, root, path, ino,
 255					       search_start, modify_tree);
 256		if (ret < 0)
 257			break;
 258		if (ret > 0 && path->slots[0] > 0 && search_start == args->start) {
 259			leaf = path->nodes[0];
 260			btrfs_item_key_to_cpu(leaf, &key, path->slots[0] - 1);
 261			if (key.objectid == ino &&
 262			    key.type == BTRFS_EXTENT_DATA_KEY)
 263				path->slots[0]--;
 264		}
 265		ret = 0;
 266next_slot:
 267		leaf = path->nodes[0];
 268		if (path->slots[0] >= btrfs_header_nritems(leaf)) {
 269			BUG_ON(del_nr > 0);
 270			ret = btrfs_next_leaf(root, path);
 271			if (ret < 0)
 272				break;
 273			if (ret > 0) {
 274				ret = 0;
 275				break;
 276			}
 277			leaf = path->nodes[0];
 278			recow = 1;
 279		}
 280
 281		btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
 282
 283		if (key.objectid > ino)
 284			break;
 285		if (WARN_ON_ONCE(key.objectid < ino) ||
 286		    key.type < BTRFS_EXTENT_DATA_KEY) {
 287			ASSERT(del_nr == 0);
 288			path->slots[0]++;
 289			goto next_slot;
 290		}
 291		if (key.type > BTRFS_EXTENT_DATA_KEY || key.offset >= args->end)
 292			break;
 293
 294		fi = btrfs_item_ptr(leaf, path->slots[0],
 295				    struct btrfs_file_extent_item);
 296		extent_type = btrfs_file_extent_type(leaf, fi);
 297
 298		if (extent_type == BTRFS_FILE_EXTENT_REG ||
 299		    extent_type == BTRFS_FILE_EXTENT_PREALLOC) {
 300			disk_bytenr = btrfs_file_extent_disk_bytenr(leaf, fi);
 301			num_bytes = btrfs_file_extent_disk_num_bytes(leaf, fi);
 302			extent_offset = btrfs_file_extent_offset(leaf, fi);
 303			extent_end = key.offset +
 304				btrfs_file_extent_num_bytes(leaf, fi);
 305		} else if (extent_type == BTRFS_FILE_EXTENT_INLINE) {
 306			extent_end = key.offset +
 307				btrfs_file_extent_ram_bytes(leaf, fi);
 308		} else {
 309			/* can't happen */
 310			BUG();
 311		}
 312
 313		/*
 314		 * Don't skip extent items representing 0 byte lengths. They
 315		 * used to be created (bug) if while punching holes we hit
 316		 * -ENOSPC condition. So if we find one here, just ensure we
 317		 * delete it, otherwise we would insert a new file extent item
 318		 * with the same key (offset) as that 0 bytes length file
 319		 * extent item in the call to setup_items_for_insert() later
 320		 * in this function.
 321		 */
 322		if (extent_end == key.offset && extent_end >= search_start) {
 323			last_end = extent_end;
 324			goto delete_extent_item;
 325		}
 326
 327		if (extent_end <= search_start) {
 328			path->slots[0]++;
 329			goto next_slot;
 330		}
 331
 332		found = 1;
 333		search_start = max(key.offset, args->start);
 334		if (recow || !modify_tree) {
 335			modify_tree = -1;
 336			btrfs_release_path(path);
 337			continue;
 338		}
 339
 340		/*
 341		 *     | - range to drop - |
 342		 *  | -------- extent -------- |
 343		 */
 344		if (args->start > key.offset && args->end < extent_end) {
 345			BUG_ON(del_nr > 0);
 346			if (extent_type == BTRFS_FILE_EXTENT_INLINE) {
 347				ret = -EOPNOTSUPP;
 348				break;
 349			}
 350
 351			memcpy(&new_key, &key, sizeof(new_key));
 352			new_key.offset = args->start;
 353			ret = btrfs_duplicate_item(trans, root, path,
 354						   &new_key);
 355			if (ret == -EAGAIN) {
 356				btrfs_release_path(path);
 357				continue;
 358			}
 359			if (ret < 0)
 360				break;
 361
 362			leaf = path->nodes[0];
 363			fi = btrfs_item_ptr(leaf, path->slots[0] - 1,
 364					    struct btrfs_file_extent_item);
 365			btrfs_set_file_extent_num_bytes(leaf, fi,
 366							args->start - key.offset);
 367
 368			fi = btrfs_item_ptr(leaf, path->slots[0],
 369					    struct btrfs_file_extent_item);
 370
 371			extent_offset += args->start - key.offset;
 372			btrfs_set_file_extent_offset(leaf, fi, extent_offset);
 373			btrfs_set_file_extent_num_bytes(leaf, fi,
 374							extent_end - args->start);
 375			btrfs_mark_buffer_dirty(trans, leaf);
 376
 377			if (update_refs && disk_bytenr > 0) {
 378				btrfs_init_generic_ref(&ref,
 379						BTRFS_ADD_DELAYED_REF,
 380						disk_bytenr, num_bytes, 0,
 381						root->root_key.objectid);
 382				btrfs_init_data_ref(&ref,
 383						root->root_key.objectid,
 384						new_key.objectid,
 385						args->start - extent_offset,
 386						0, false);
 387				ret = btrfs_inc_extent_ref(trans, &ref);
 388				if (ret) {
 389					btrfs_abort_transaction(trans, ret);
 390					break;
 391				}
 392			}
 393			key.offset = args->start;
 394		}
 395		/*
 396		 * From here on out we will have actually dropped something, so
 397		 * last_end can be updated.
 398		 */
 399		last_end = extent_end;
 400
 401		/*
 402		 *  | ---- range to drop ----- |
 403		 *      | -------- extent -------- |
 404		 */
 405		if (args->start <= key.offset && args->end < extent_end) {
 406			if (extent_type == BTRFS_FILE_EXTENT_INLINE) {
 407				ret = -EOPNOTSUPP;
 408				break;
 409			}
 410
 411			memcpy(&new_key, &key, sizeof(new_key));
 412			new_key.offset = args->end;
 413			btrfs_set_item_key_safe(trans, path, &new_key);
 414
 415			extent_offset += args->end - key.offset;
 416			btrfs_set_file_extent_offset(leaf, fi, extent_offset);
 417			btrfs_set_file_extent_num_bytes(leaf, fi,
 418							extent_end - args->end);
 419			btrfs_mark_buffer_dirty(trans, leaf);
 420			if (update_refs && disk_bytenr > 0)
 421				args->bytes_found += args->end - key.offset;
 
 
 422			break;
 423		}
 424
 425		search_start = extent_end;
 426		/*
 427		 *       | ---- range to drop ----- |
 428		 *  | -------- extent -------- |
 429		 */
 430		if (args->start > key.offset && args->end >= extent_end) {
 431			BUG_ON(del_nr > 0);
 432			if (extent_type == BTRFS_FILE_EXTENT_INLINE) {
 433				ret = -EOPNOTSUPP;
 434				break;
 435			}
 436
 437			btrfs_set_file_extent_num_bytes(leaf, fi,
 438							args->start - key.offset);
 439			btrfs_mark_buffer_dirty(trans, leaf);
 440			if (update_refs && disk_bytenr > 0)
 441				args->bytes_found += extent_end - args->start;
 442			if (args->end == extent_end)
 
 
 443				break;
 444
 445			path->slots[0]++;
 446			goto next_slot;
 447		}
 448
 449		/*
 450		 *  | ---- range to drop ----- |
 451		 *    | ------ extent ------ |
 452		 */
 453		if (args->start <= key.offset && args->end >= extent_end) {
 454delete_extent_item:
 455			if (del_nr == 0) {
 456				del_slot = path->slots[0];
 457				del_nr = 1;
 458			} else {
 459				BUG_ON(del_slot + del_nr != path->slots[0]);
 460				del_nr++;
 461			}
 462
 463			if (update_refs &&
 464			    extent_type == BTRFS_FILE_EXTENT_INLINE) {
 465				args->bytes_found += extent_end - key.offset;
 466				extent_end = ALIGN(extent_end,
 467						   fs_info->sectorsize);
 468			} else if (update_refs && disk_bytenr > 0) {
 469				btrfs_init_generic_ref(&ref,
 470						BTRFS_DROP_DELAYED_REF,
 471						disk_bytenr, num_bytes, 0,
 472						root->root_key.objectid);
 473				btrfs_init_data_ref(&ref,
 474						root->root_key.objectid,
 475						key.objectid,
 476						key.offset - extent_offset, 0,
 477						false);
 478				ret = btrfs_free_extent(trans, &ref);
 479				if (ret) {
 480					btrfs_abort_transaction(trans, ret);
 481					break;
 482				}
 483				args->bytes_found += extent_end - key.offset;
 484			}
 485
 486			if (args->end == extent_end)
 487				break;
 488
 489			if (path->slots[0] + 1 < btrfs_header_nritems(leaf)) {
 490				path->slots[0]++;
 491				goto next_slot;
 492			}
 493
 494			ret = btrfs_del_items(trans, root, path, del_slot,
 495					      del_nr);
 496			if (ret) {
 497				btrfs_abort_transaction(trans, ret);
 498				break;
 499			}
 500
 501			del_nr = 0;
 502			del_slot = 0;
 503
 504			btrfs_release_path(path);
 505			continue;
 506		}
 507
 508		BUG();
 509	}
 510
 511	if (!ret && del_nr > 0) {
 512		/*
 513		 * Set path->slots[0] to first slot, so that after the delete
 514		 * if items are move off from our leaf to its immediate left or
 515		 * right neighbor leafs, we end up with a correct and adjusted
 516		 * path->slots[0] for our insertion (if args->replace_extent).
 517		 */
 518		path->slots[0] = del_slot;
 519		ret = btrfs_del_items(trans, root, path, del_slot, del_nr);
 520		if (ret)
 521			btrfs_abort_transaction(trans, ret);
 522	}
 523
 524	leaf = path->nodes[0];
 525	/*
 526	 * If btrfs_del_items() was called, it might have deleted a leaf, in
 527	 * which case it unlocked our path, so check path->locks[0] matches a
 528	 * write lock.
 529	 */
 530	if (!ret && args->replace_extent &&
 531	    path->locks[0] == BTRFS_WRITE_LOCK &&
 532	    btrfs_leaf_free_space(leaf) >=
 533	    sizeof(struct btrfs_item) + args->extent_item_size) {
 534
 535		key.objectid = ino;
 536		key.type = BTRFS_EXTENT_DATA_KEY;
 537		key.offset = args->start;
 538		if (!del_nr && path->slots[0] < btrfs_header_nritems(leaf)) {
 539			struct btrfs_key slot_key;
 540
 541			btrfs_item_key_to_cpu(leaf, &slot_key, path->slots[0]);
 542			if (btrfs_comp_cpu_keys(&key, &slot_key) > 0)
 543				path->slots[0]++;
 544		}
 545		btrfs_setup_item_for_insert(trans, root, path, &key,
 546					    args->extent_item_size);
 547		args->extent_inserted = true;
 548	}
 549
 550	if (!args->path)
 551		btrfs_free_path(path);
 552	else if (!args->extent_inserted)
 553		btrfs_release_path(path);
 554out:
 555	args->drop_end = found ? min(args->end, last_end) : args->end;
 556
 557	return ret;
 558}
 559
 560static int extent_mergeable(struct extent_buffer *leaf, int slot,
 561			    u64 objectid, u64 bytenr, u64 orig_offset,
 562			    u64 *start, u64 *end)
 563{
 564	struct btrfs_file_extent_item *fi;
 565	struct btrfs_key key;
 566	u64 extent_end;
 567
 568	if (slot < 0 || slot >= btrfs_header_nritems(leaf))
 569		return 0;
 570
 571	btrfs_item_key_to_cpu(leaf, &key, slot);
 572	if (key.objectid != objectid || key.type != BTRFS_EXTENT_DATA_KEY)
 573		return 0;
 574
 575	fi = btrfs_item_ptr(leaf, slot, struct btrfs_file_extent_item);
 576	if (btrfs_file_extent_type(leaf, fi) != BTRFS_FILE_EXTENT_REG ||
 577	    btrfs_file_extent_disk_bytenr(leaf, fi) != bytenr ||
 578	    btrfs_file_extent_offset(leaf, fi) != key.offset - orig_offset ||
 579	    btrfs_file_extent_compression(leaf, fi) ||
 580	    btrfs_file_extent_encryption(leaf, fi) ||
 581	    btrfs_file_extent_other_encoding(leaf, fi))
 582		return 0;
 583
 584	extent_end = key.offset + btrfs_file_extent_num_bytes(leaf, fi);
 585	if ((*start && *start != key.offset) || (*end && *end != extent_end))
 586		return 0;
 587
 588	*start = key.offset;
 589	*end = extent_end;
 590	return 1;
 591}
 592
 593/*
 594 * Mark extent in the range start - end as written.
 595 *
 596 * This changes extent type from 'pre-allocated' to 'regular'. If only
 597 * part of extent is marked as written, the extent will be split into
 598 * two or three.
 599 */
 600int btrfs_mark_extent_written(struct btrfs_trans_handle *trans,
 601			      struct btrfs_inode *inode, u64 start, u64 end)
 602{
 603	struct btrfs_root *root = inode->root;
 604	struct extent_buffer *leaf;
 605	struct btrfs_path *path;
 606	struct btrfs_file_extent_item *fi;
 607	struct btrfs_ref ref = { 0 };
 608	struct btrfs_key key;
 609	struct btrfs_key new_key;
 610	u64 bytenr;
 611	u64 num_bytes;
 612	u64 extent_end;
 613	u64 orig_offset;
 614	u64 other_start;
 615	u64 other_end;
 616	u64 split;
 617	int del_nr = 0;
 618	int del_slot = 0;
 619	int recow;
 620	int ret = 0;
 621	u64 ino = btrfs_ino(inode);
 622
 
 
 623	path = btrfs_alloc_path();
 624	if (!path)
 625		return -ENOMEM;
 626again:
 627	recow = 0;
 628	split = start;
 629	key.objectid = ino;
 630	key.type = BTRFS_EXTENT_DATA_KEY;
 631	key.offset = split;
 632
 633	ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
 634	if (ret < 0)
 635		goto out;
 636	if (ret > 0 && path->slots[0] > 0)
 637		path->slots[0]--;
 638
 639	leaf = path->nodes[0];
 640	btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
 641	if (key.objectid != ino ||
 642	    key.type != BTRFS_EXTENT_DATA_KEY) {
 643		ret = -EINVAL;
 644		btrfs_abort_transaction(trans, ret);
 645		goto out;
 646	}
 647	fi = btrfs_item_ptr(leaf, path->slots[0],
 648			    struct btrfs_file_extent_item);
 649	if (btrfs_file_extent_type(leaf, fi) != BTRFS_FILE_EXTENT_PREALLOC) {
 650		ret = -EINVAL;
 651		btrfs_abort_transaction(trans, ret);
 652		goto out;
 653	}
 654	extent_end = key.offset + btrfs_file_extent_num_bytes(leaf, fi);
 655	if (key.offset > start || extent_end < end) {
 656		ret = -EINVAL;
 657		btrfs_abort_transaction(trans, ret);
 658		goto out;
 659	}
 660
 661	bytenr = btrfs_file_extent_disk_bytenr(leaf, fi);
 662	num_bytes = btrfs_file_extent_disk_num_bytes(leaf, fi);
 663	orig_offset = key.offset - btrfs_file_extent_offset(leaf, fi);
 664	memcpy(&new_key, &key, sizeof(new_key));
 665
 666	if (start == key.offset && end < extent_end) {
 667		other_start = 0;
 668		other_end = start;
 669		if (extent_mergeable(leaf, path->slots[0] - 1,
 670				     ino, bytenr, orig_offset,
 671				     &other_start, &other_end)) {
 672			new_key.offset = end;
 673			btrfs_set_item_key_safe(trans, path, &new_key);
 674			fi = btrfs_item_ptr(leaf, path->slots[0],
 675					    struct btrfs_file_extent_item);
 676			btrfs_set_file_extent_generation(leaf, fi,
 677							 trans->transid);
 678			btrfs_set_file_extent_num_bytes(leaf, fi,
 679							extent_end - end);
 680			btrfs_set_file_extent_offset(leaf, fi,
 681						     end - orig_offset);
 682			fi = btrfs_item_ptr(leaf, path->slots[0] - 1,
 683					    struct btrfs_file_extent_item);
 684			btrfs_set_file_extent_generation(leaf, fi,
 685							 trans->transid);
 686			btrfs_set_file_extent_num_bytes(leaf, fi,
 687							end - other_start);
 688			btrfs_mark_buffer_dirty(trans, leaf);
 689			goto out;
 690		}
 691	}
 692
 693	if (start > key.offset && end == extent_end) {
 694		other_start = end;
 695		other_end = 0;
 696		if (extent_mergeable(leaf, path->slots[0] + 1,
 697				     ino, bytenr, orig_offset,
 698				     &other_start, &other_end)) {
 699			fi = btrfs_item_ptr(leaf, path->slots[0],
 700					    struct btrfs_file_extent_item);
 701			btrfs_set_file_extent_num_bytes(leaf, fi,
 702							start - key.offset);
 703			btrfs_set_file_extent_generation(leaf, fi,
 704							 trans->transid);
 705			path->slots[0]++;
 706			new_key.offset = start;
 707			btrfs_set_item_key_safe(trans, path, &new_key);
 708
 709			fi = btrfs_item_ptr(leaf, path->slots[0],
 710					    struct btrfs_file_extent_item);
 711			btrfs_set_file_extent_generation(leaf, fi,
 712							 trans->transid);
 713			btrfs_set_file_extent_num_bytes(leaf, fi,
 714							other_end - start);
 715			btrfs_set_file_extent_offset(leaf, fi,
 716						     start - orig_offset);
 717			btrfs_mark_buffer_dirty(trans, leaf);
 718			goto out;
 719		}
 720	}
 721
 722	while (start > key.offset || end < extent_end) {
 723		if (key.offset == start)
 724			split = end;
 725
 726		new_key.offset = split;
 727		ret = btrfs_duplicate_item(trans, root, path, &new_key);
 728		if (ret == -EAGAIN) {
 729			btrfs_release_path(path);
 730			goto again;
 731		}
 732		if (ret < 0) {
 733			btrfs_abort_transaction(trans, ret);
 734			goto out;
 735		}
 736
 737		leaf = path->nodes[0];
 738		fi = btrfs_item_ptr(leaf, path->slots[0] - 1,
 739				    struct btrfs_file_extent_item);
 740		btrfs_set_file_extent_generation(leaf, fi, trans->transid);
 741		btrfs_set_file_extent_num_bytes(leaf, fi,
 742						split - key.offset);
 743
 744		fi = btrfs_item_ptr(leaf, path->slots[0],
 745				    struct btrfs_file_extent_item);
 746
 747		btrfs_set_file_extent_generation(leaf, fi, trans->transid);
 748		btrfs_set_file_extent_offset(leaf, fi, split - orig_offset);
 749		btrfs_set_file_extent_num_bytes(leaf, fi,
 750						extent_end - split);
 751		btrfs_mark_buffer_dirty(trans, leaf);
 752
 753		btrfs_init_generic_ref(&ref, BTRFS_ADD_DELAYED_REF, bytenr,
 754				       num_bytes, 0, root->root_key.objectid);
 755		btrfs_init_data_ref(&ref, root->root_key.objectid, ino,
 756				    orig_offset, 0, false);
 757		ret = btrfs_inc_extent_ref(trans, &ref);
 758		if (ret) {
 759			btrfs_abort_transaction(trans, ret);
 760			goto out;
 761		}
 762
 763		if (split == start) {
 764			key.offset = start;
 765		} else {
 766			if (start != key.offset) {
 767				ret = -EINVAL;
 768				btrfs_abort_transaction(trans, ret);
 769				goto out;
 770			}
 771			path->slots[0]--;
 772			extent_end = end;
 773		}
 774		recow = 1;
 775	}
 776
 777	other_start = end;
 778	other_end = 0;
 779	btrfs_init_generic_ref(&ref, BTRFS_DROP_DELAYED_REF, bytenr,
 780			       num_bytes, 0, root->root_key.objectid);
 781	btrfs_init_data_ref(&ref, root->root_key.objectid, ino, orig_offset,
 782			    0, false);
 783	if (extent_mergeable(leaf, path->slots[0] + 1,
 784			     ino, bytenr, orig_offset,
 785			     &other_start, &other_end)) {
 786		if (recow) {
 787			btrfs_release_path(path);
 788			goto again;
 789		}
 790		extent_end = other_end;
 791		del_slot = path->slots[0] + 1;
 792		del_nr++;
 793		ret = btrfs_free_extent(trans, &ref);
 794		if (ret) {
 795			btrfs_abort_transaction(trans, ret);
 796			goto out;
 797		}
 798	}
 799	other_start = 0;
 800	other_end = start;
 801	if (extent_mergeable(leaf, path->slots[0] - 1,
 802			     ino, bytenr, orig_offset,
 803			     &other_start, &other_end)) {
 804		if (recow) {
 805			btrfs_release_path(path);
 806			goto again;
 807		}
 808		key.offset = other_start;
 809		del_slot = path->slots[0];
 810		del_nr++;
 811		ret = btrfs_free_extent(trans, &ref);
 812		if (ret) {
 813			btrfs_abort_transaction(trans, ret);
 814			goto out;
 815		}
 816	}
 817	if (del_nr == 0) {
 818		fi = btrfs_item_ptr(leaf, path->slots[0],
 819			   struct btrfs_file_extent_item);
 820		btrfs_set_file_extent_type(leaf, fi,
 821					   BTRFS_FILE_EXTENT_REG);
 822		btrfs_set_file_extent_generation(leaf, fi, trans->transid);
 823		btrfs_mark_buffer_dirty(trans, leaf);
 824	} else {
 825		fi = btrfs_item_ptr(leaf, del_slot - 1,
 826			   struct btrfs_file_extent_item);
 827		btrfs_set_file_extent_type(leaf, fi,
 828					   BTRFS_FILE_EXTENT_REG);
 829		btrfs_set_file_extent_generation(leaf, fi, trans->transid);
 830		btrfs_set_file_extent_num_bytes(leaf, fi,
 831						extent_end - key.offset);
 832		btrfs_mark_buffer_dirty(trans, leaf);
 833
 834		ret = btrfs_del_items(trans, root, path, del_slot, del_nr);
 835		if (ret < 0) {
 836			btrfs_abort_transaction(trans, ret);
 837			goto out;
 838		}
 839	}
 840out:
 841	btrfs_free_path(path);
 842	return ret;
 843}
 844
 845/*
 846 * on error we return an unlocked page and the error value
 847 * on success we return a locked page and 0
 848 */
 849static int prepare_uptodate_page(struct inode *inode,
 850				 struct page *page, u64 pos,
 851				 bool force_uptodate)
 852{
 853	struct folio *folio = page_folio(page);
 854	int ret = 0;
 855
 856	if (((pos & (PAGE_SIZE - 1)) || force_uptodate) &&
 857	    !PageUptodate(page)) {
 858		ret = btrfs_read_folio(NULL, folio);
 859		if (ret)
 860			return ret;
 861		lock_page(page);
 862		if (!PageUptodate(page)) {
 863			unlock_page(page);
 864			return -EIO;
 865		}
 866
 867		/*
 868		 * Since btrfs_read_folio() will unlock the folio before it
 869		 * returns, there is a window where btrfs_release_folio() can be
 870		 * called to release the page.  Here we check both inode
 871		 * mapping and PagePrivate() to make sure the page was not
 872		 * released.
 873		 *
 874		 * The private flag check is essential for subpage as we need
 875		 * to store extra bitmap using folio private.
 876		 */
 877		if (page->mapping != inode->i_mapping || !folio_test_private(folio)) {
 878			unlock_page(page);
 879			return -EAGAIN;
 880		}
 881	}
 882	return 0;
 883}
 884
 885static fgf_t get_prepare_fgp_flags(bool nowait)
 886{
 887	fgf_t fgp_flags = FGP_LOCK | FGP_ACCESSED | FGP_CREAT;
 888
 889	if (nowait)
 890		fgp_flags |= FGP_NOWAIT;
 891
 892	return fgp_flags;
 893}
 894
 895static gfp_t get_prepare_gfp_flags(struct inode *inode, bool nowait)
 896{
 897	gfp_t gfp;
 898
 899	gfp = btrfs_alloc_write_mask(inode->i_mapping);
 900	if (nowait) {
 901		gfp &= ~__GFP_DIRECT_RECLAIM;
 902		gfp |= GFP_NOWAIT;
 903	}
 904
 905	return gfp;
 906}
 907
 908/*
 909 * this just gets pages into the page cache and locks them down.
 
 
 910 */
 911static noinline int prepare_pages(struct inode *inode, struct page **pages,
 912				  size_t num_pages, loff_t pos,
 913				  size_t write_bytes, bool force_uptodate,
 914				  bool nowait)
 915{
 
 916	int i;
 917	unsigned long index = pos >> PAGE_SHIFT;
 918	gfp_t mask = get_prepare_gfp_flags(inode, nowait);
 919	fgf_t fgp_flags = get_prepare_fgp_flags(nowait);
 920	int err = 0;
 921	int faili;
 
 
 
 
 
 922
 923	for (i = 0; i < num_pages; i++) {
 924again:
 925		pages[i] = pagecache_get_page(inode->i_mapping, index + i,
 926					      fgp_flags, mask | __GFP_WRITE);
 
 927		if (!pages[i]) {
 928			faili = i - 1;
 929			if (nowait)
 930				err = -EAGAIN;
 931			else
 932				err = -ENOMEM;
 933			goto fail;
 934		}
 935
 936		err = set_page_extent_mapped(pages[i]);
 937		if (err < 0) {
 938			faili = i;
 939			goto fail;
 940		}
 941
 942		if (i == 0)
 943			err = prepare_uptodate_page(inode, pages[i], pos,
 944						    force_uptodate);
 945		if (!err && i == num_pages - 1)
 946			err = prepare_uptodate_page(inode, pages[i],
 947						    pos + write_bytes, false);
 948		if (err) {
 949			put_page(pages[i]);
 950			if (!nowait && err == -EAGAIN) {
 951				err = 0;
 952				goto again;
 953			}
 954			faili = i - 1;
 955			goto fail;
 956		}
 957		wait_on_page_writeback(pages[i]);
 958	}
 959
 960	return 0;
 961fail:
 962	while (faili >= 0) {
 963		unlock_page(pages[faili]);
 964		put_page(pages[faili]);
 965		faili--;
 966	}
 967	return err;
 968
 969}
 970
 971/*
 972 * This function locks the extent and properly waits for data=ordered extents
 973 * to finish before allowing the pages to be modified if need.
 974 *
 975 * The return value:
 976 * 1 - the extent is locked
 977 * 0 - the extent is not locked, and everything is OK
 978 * -EAGAIN - need re-prepare the pages
 979 * the other < 0 number - Something wrong happens
 980 */
 981static noinline int
 982lock_and_cleanup_extent_if_need(struct btrfs_inode *inode, struct page **pages,
 983				size_t num_pages, loff_t pos,
 984				size_t write_bytes,
 985				u64 *lockstart, u64 *lockend, bool nowait,
 986				struct extent_state **cached_state)
 987{
 988	struct btrfs_fs_info *fs_info = inode->root->fs_info;
 989	u64 start_pos;
 990	u64 last_pos;
 991	int i;
 992	int ret = 0;
 993
 994	start_pos = round_down(pos, fs_info->sectorsize);
 995	last_pos = round_up(pos + write_bytes, fs_info->sectorsize) - 1;
 996
 997	if (start_pos < inode->vfs_inode.i_size) {
 998		struct btrfs_ordered_extent *ordered;
 999
1000		if (nowait) {
1001			if (!try_lock_extent(&inode->io_tree, start_pos, last_pos,
1002					     cached_state)) {
1003				for (i = 0; i < num_pages; i++) {
1004					unlock_page(pages[i]);
1005					put_page(pages[i]);
1006					pages[i] = NULL;
1007				}
1008
1009				return -EAGAIN;
1010			}
1011		} else {
1012			lock_extent(&inode->io_tree, start_pos, last_pos, cached_state);
1013		}
1014
1015		ordered = btrfs_lookup_ordered_range(inode, start_pos,
1016						     last_pos - start_pos + 1);
1017		if (ordered &&
1018		    ordered->file_offset + ordered->num_bytes > start_pos &&
1019		    ordered->file_offset <= last_pos) {
1020			unlock_extent(&inode->io_tree, start_pos, last_pos,
1021				      cached_state);
 
 
1022			for (i = 0; i < num_pages; i++) {
1023				unlock_page(pages[i]);
1024				put_page(pages[i]);
1025			}
1026			btrfs_start_ordered_extent(ordered);
1027			btrfs_put_ordered_extent(ordered);
1028			return -EAGAIN;
1029		}
1030		if (ordered)
1031			btrfs_put_ordered_extent(ordered);
1032
1033		*lockstart = start_pos;
1034		*lockend = last_pos;
1035		ret = 1;
 
 
 
 
1036	}
1037
1038	/*
1039	 * We should be called after prepare_pages() which should have locked
1040	 * all pages in the range.
1041	 */
1042	for (i = 0; i < num_pages; i++)
1043		WARN_ON(!PageLocked(pages[i]));
1044
1045	return ret;
1046}
1047
1048/*
1049 * Check if we can do nocow write into the range [@pos, @pos + @write_bytes)
1050 *
1051 * @pos:         File offset.
1052 * @write_bytes: The length to write, will be updated to the nocow writeable
1053 *               range.
1054 *
1055 * This function will flush ordered extents in the range to ensure proper
1056 * nocow checks.
1057 *
1058 * Return:
1059 * > 0          If we can nocow, and updates @write_bytes.
1060 *  0           If we can't do a nocow write.
1061 * -EAGAIN      If we can't do a nocow write because snapshoting of the inode's
1062 *              root is in progress.
1063 * < 0          If an error happened.
1064 *
1065 * NOTE: Callers need to call btrfs_check_nocow_unlock() if we return > 0.
1066 */
1067int btrfs_check_nocow_lock(struct btrfs_inode *inode, loff_t pos,
1068			   size_t *write_bytes, bool nowait)
1069{
1070	struct btrfs_fs_info *fs_info = inode->root->fs_info;
1071	struct btrfs_root *root = inode->root;
1072	struct extent_state *cached_state = NULL;
1073	u64 lockstart, lockend;
1074	u64 num_bytes;
1075	int ret;
1076
1077	if (!(inode->flags & (BTRFS_INODE_NODATACOW | BTRFS_INODE_PREALLOC)))
1078		return 0;
1079
1080	if (!btrfs_drew_try_write_lock(&root->snapshot_lock))
1081		return -EAGAIN;
1082
1083	lockstart = round_down(pos, fs_info->sectorsize);
1084	lockend = round_up(pos + *write_bytes,
1085			   fs_info->sectorsize) - 1;
1086	num_bytes = lockend - lockstart + 1;
1087
1088	if (nowait) {
1089		if (!btrfs_try_lock_ordered_range(inode, lockstart, lockend,
1090						  &cached_state)) {
1091			btrfs_drew_write_unlock(&root->snapshot_lock);
1092			return -EAGAIN;
1093		}
1094	} else {
1095		btrfs_lock_and_flush_ordered_range(inode, lockstart, lockend,
1096						   &cached_state);
1097	}
1098	ret = can_nocow_extent(&inode->vfs_inode, lockstart, &num_bytes,
1099			NULL, NULL, NULL, nowait, false);
1100	if (ret <= 0)
1101		btrfs_drew_write_unlock(&root->snapshot_lock);
1102	else
1103		*write_bytes = min_t(size_t, *write_bytes ,
1104				     num_bytes - pos + lockstart);
1105	unlock_extent(&inode->io_tree, lockstart, lockend, &cached_state);
1106
1107	return ret;
1108}
1109
1110void btrfs_check_nocow_unlock(struct btrfs_inode *inode)
1111{
1112	btrfs_drew_write_unlock(&inode->root->snapshot_lock);
1113}
1114
1115static void update_time_for_write(struct inode *inode)
1116{
1117	struct timespec64 now, ts;
1118
1119	if (IS_NOCMTIME(inode))
1120		return;
1121
1122	now = current_time(inode);
1123	ts = inode_get_mtime(inode);
1124	if (!timespec64_equal(&ts, &now))
1125		inode_set_mtime_to_ts(inode, now);
1126
1127	ts = inode_get_ctime(inode);
1128	if (!timespec64_equal(&ts, &now))
1129		inode_set_ctime_to_ts(inode, now);
1130
1131	if (IS_I_VERSION(inode))
1132		inode_inc_iversion(inode);
1133}
1134
1135static int btrfs_write_check(struct kiocb *iocb, struct iov_iter *from,
1136			     size_t count)
1137{
1138	struct file *file = iocb->ki_filp;
1139	struct inode *inode = file_inode(file);
1140	struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
1141	loff_t pos = iocb->ki_pos;
1142	int ret;
1143	loff_t oldsize;
1144	loff_t start_pos;
1145
1146	/*
1147	 * Quickly bail out on NOWAIT writes if we don't have the nodatacow or
1148	 * prealloc flags, as without those flags we always have to COW. We will
1149	 * later check if we can really COW into the target range (using
1150	 * can_nocow_extent() at btrfs_get_blocks_direct_write()).
1151	 */
1152	if ((iocb->ki_flags & IOCB_NOWAIT) &&
1153	    !(BTRFS_I(inode)->flags & (BTRFS_INODE_NODATACOW | BTRFS_INODE_PREALLOC)))
1154		return -EAGAIN;
1155
1156	ret = file_remove_privs(file);
1157	if (ret)
1158		return ret;
1159
1160	/*
1161	 * We reserve space for updating the inode when we reserve space for the
1162	 * extent we are going to write, so we will enospc out there.  We don't
1163	 * need to start yet another transaction to update the inode as we will
1164	 * update the inode when we finish writing whatever data we write.
1165	 */
1166	update_time_for_write(inode);
1167
1168	start_pos = round_down(pos, fs_info->sectorsize);
1169	oldsize = i_size_read(inode);
1170	if (start_pos > oldsize) {
1171		/* Expand hole size to cover write data, preventing empty gap */
1172		loff_t end_pos = round_up(pos + count, fs_info->sectorsize);
1173
1174		ret = btrfs_cont_expand(BTRFS_I(inode), oldsize, end_pos);
1175		if (ret)
1176			return ret;
1177	}
 
1178
1179	return 0;
1180}
1181
1182static noinline ssize_t btrfs_buffered_write(struct kiocb *iocb,
1183					       struct iov_iter *i)
 
1184{
1185	struct file *file = iocb->ki_filp;
1186	loff_t pos;
1187	struct inode *inode = file_inode(file);
1188	struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
1189	struct page **pages = NULL;
1190	struct extent_changeset *data_reserved = NULL;
1191	u64 release_bytes = 0;
1192	u64 lockstart;
1193	u64 lockend;
1194	size_t num_written = 0;
1195	int nrptrs;
1196	ssize_t ret;
1197	bool only_release_metadata = false;
1198	bool force_page_uptodate = false;
1199	loff_t old_isize = i_size_read(inode);
1200	unsigned int ilock_flags = 0;
1201	const bool nowait = (iocb->ki_flags & IOCB_NOWAIT);
1202	unsigned int bdp_flags = (nowait ? BDP_ASYNC : 0);
1203
1204	if (nowait)
1205		ilock_flags |= BTRFS_ILOCK_TRY;
1206
1207	ret = btrfs_inode_lock(BTRFS_I(inode), ilock_flags);
1208	if (ret < 0)
1209		return ret;
1210
1211	ret = generic_write_checks(iocb, i);
1212	if (ret <= 0)
1213		goto out;
1214
1215	ret = btrfs_write_check(iocb, i, ret);
1216	if (ret < 0)
1217		goto out;
1218
1219	pos = iocb->ki_pos;
1220	nrptrs = min(DIV_ROUND_UP(iov_iter_count(i), PAGE_SIZE),
1221			PAGE_SIZE / (sizeof(struct page *)));
1222	nrptrs = min(nrptrs, current->nr_dirtied_pause - current->nr_dirtied);
1223	nrptrs = max(nrptrs, 8);
1224	pages = kmalloc_array(nrptrs, sizeof(struct page *), GFP_KERNEL);
1225	if (!pages) {
1226		ret = -ENOMEM;
1227		goto out;
1228	}
1229
1230	while (iov_iter_count(i) > 0) {
1231		struct extent_state *cached_state = NULL;
1232		size_t offset = offset_in_page(pos);
1233		size_t sector_offset;
1234		size_t write_bytes = min(iov_iter_count(i),
1235					 nrptrs * (size_t)PAGE_SIZE -
1236					 offset);
1237		size_t num_pages;
1238		size_t reserve_bytes;
1239		size_t dirty_pages;
1240		size_t copied;
1241		size_t dirty_sectors;
1242		size_t num_sectors;
1243		int extents_locked;
1244
1245		/*
1246		 * Fault pages before locking them in prepare_pages
1247		 * to avoid recursive lock
1248		 */
1249		if (unlikely(fault_in_iov_iter_readable(i, write_bytes))) {
1250			ret = -EFAULT;
1251			break;
1252		}
1253
1254		only_release_metadata = false;
1255		sector_offset = pos & (fs_info->sectorsize - 1);
1256
1257		extent_changeset_release(data_reserved);
1258		ret = btrfs_check_data_free_space(BTRFS_I(inode),
1259						  &data_reserved, pos,
1260						  write_bytes, nowait);
1261		if (ret < 0) {
1262			int can_nocow;
1263
1264			if (nowait && (ret == -ENOSPC || ret == -EAGAIN)) {
1265				ret = -EAGAIN;
1266				break;
1267			}
1268
1269			/*
1270			 * If we don't have to COW at the offset, reserve
1271			 * metadata only. write_bytes may get smaller than
1272			 * requested here.
1273			 */
1274			can_nocow = btrfs_check_nocow_lock(BTRFS_I(inode), pos,
1275							   &write_bytes, nowait);
1276			if (can_nocow < 0)
1277				ret = can_nocow;
1278			if (can_nocow > 0)
1279				ret = 0;
1280			if (ret)
1281				break;
1282			only_release_metadata = true;
1283		}
1284
1285		num_pages = DIV_ROUND_UP(write_bytes + offset, PAGE_SIZE);
1286		WARN_ON(num_pages > nrptrs);
1287		reserve_bytes = round_up(write_bytes + sector_offset,
1288					 fs_info->sectorsize);
1289		WARN_ON(reserve_bytes == 0);
1290		ret = btrfs_delalloc_reserve_metadata(BTRFS_I(inode),
1291						      reserve_bytes,
1292						      reserve_bytes, nowait);
1293		if (ret) {
1294			if (!only_release_metadata)
1295				btrfs_free_reserved_data_space(BTRFS_I(inode),
1296						data_reserved, pos,
1297						write_bytes);
1298			else
1299				btrfs_check_nocow_unlock(BTRFS_I(inode));
1300
1301			if (nowait && ret == -ENOSPC)
1302				ret = -EAGAIN;
1303			break;
1304		}
1305
1306		release_bytes = reserve_bytes;
1307again:
1308		ret = balance_dirty_pages_ratelimited_flags(inode->i_mapping, bdp_flags);
1309		if (ret) {
1310			btrfs_delalloc_release_extents(BTRFS_I(inode), reserve_bytes);
1311			break;
1312		}
1313
1314		/*
1315		 * This is going to setup the pages array with the number of
1316		 * pages we want, so we don't really need to worry about the
1317		 * contents of pages from loop to loop
1318		 */
1319		ret = prepare_pages(inode, pages, num_pages,
1320				    pos, write_bytes, force_page_uptodate, false);
 
1321		if (ret) {
1322			btrfs_delalloc_release_extents(BTRFS_I(inode),
1323						       reserve_bytes);
1324			break;
1325		}
1326
1327		extents_locked = lock_and_cleanup_extent_if_need(
1328				BTRFS_I(inode), pages,
1329				num_pages, pos, write_bytes, &lockstart,
1330				&lockend, nowait, &cached_state);
1331		if (extents_locked < 0) {
1332			if (!nowait && extents_locked == -EAGAIN)
1333				goto again;
1334
1335			btrfs_delalloc_release_extents(BTRFS_I(inode),
1336						       reserve_bytes);
1337			ret = extents_locked;
1338			break;
1339		}
1340
1341		copied = btrfs_copy_from_user(pos, write_bytes, pages, i);
1342
1343		num_sectors = BTRFS_BYTES_TO_BLKS(fs_info, reserve_bytes);
1344		dirty_sectors = round_up(copied + sector_offset,
1345					fs_info->sectorsize);
1346		dirty_sectors = BTRFS_BYTES_TO_BLKS(fs_info, dirty_sectors);
1347
1348		/*
1349		 * if we have trouble faulting in the pages, fall
1350		 * back to one page at a time
1351		 */
1352		if (copied < write_bytes)
1353			nrptrs = 1;
1354
1355		if (copied == 0) {
1356			force_page_uptodate = true;
1357			dirty_sectors = 0;
1358			dirty_pages = 0;
1359		} else {
1360			force_page_uptodate = false;
1361			dirty_pages = DIV_ROUND_UP(copied + offset,
1362						   PAGE_SIZE);
1363		}
1364
1365		if (num_sectors > dirty_sectors) {
1366			/* release everything except the sectors we dirtied */
1367			release_bytes -= dirty_sectors << fs_info->sectorsize_bits;
1368			if (only_release_metadata) {
1369				btrfs_delalloc_release_metadata(BTRFS_I(inode),
1370							release_bytes, true);
1371			} else {
1372				u64 __pos;
1373
1374				__pos = round_down(pos,
1375						   fs_info->sectorsize) +
1376					(dirty_pages << PAGE_SHIFT);
1377				btrfs_delalloc_release_space(BTRFS_I(inode),
1378						data_reserved, __pos,
1379						release_bytes, true);
1380			}
1381		}
1382
1383		release_bytes = round_up(copied + sector_offset,
1384					fs_info->sectorsize);
1385
1386		ret = btrfs_dirty_pages(BTRFS_I(inode), pages,
1387					dirty_pages, pos, copied,
1388					&cached_state, only_release_metadata);
1389
1390		/*
1391		 * If we have not locked the extent range, because the range's
1392		 * start offset is >= i_size, we might still have a non-NULL
1393		 * cached extent state, acquired while marking the extent range
1394		 * as delalloc through btrfs_dirty_pages(). Therefore free any
1395		 * possible cached extent state to avoid a memory leak.
1396		 */
1397		if (extents_locked)
1398			unlock_extent(&BTRFS_I(inode)->io_tree, lockstart,
1399				      lockend, &cached_state);
1400		else
1401			free_extent_state(cached_state);
1402
1403		btrfs_delalloc_release_extents(BTRFS_I(inode), reserve_bytes);
1404		if (ret) {
1405			btrfs_drop_pages(fs_info, pages, num_pages, pos, copied);
1406			break;
1407		}
1408
1409		release_bytes = 0;
1410		if (only_release_metadata)
1411			btrfs_check_nocow_unlock(BTRFS_I(inode));
 
 
 
 
 
 
 
 
1412
1413		btrfs_drop_pages(fs_info, pages, num_pages, pos, copied);
1414
1415		cond_resched();
1416
 
 
 
 
 
1417		pos += copied;
1418		num_written += copied;
1419	}
1420
1421	kfree(pages);
1422
1423	if (release_bytes) {
1424		if (only_release_metadata) {
1425			btrfs_check_nocow_unlock(BTRFS_I(inode));
1426			btrfs_delalloc_release_metadata(BTRFS_I(inode),
1427					release_bytes, true);
1428		} else {
1429			btrfs_delalloc_release_space(BTRFS_I(inode),
1430					data_reserved,
1431					round_down(pos, fs_info->sectorsize),
1432					release_bytes, true);
1433		}
1434	}
1435
1436	extent_changeset_free(data_reserved);
1437	if (num_written > 0) {
1438		pagecache_isize_extended(inode, old_isize, iocb->ki_pos);
1439		iocb->ki_pos += num_written;
1440	}
1441out:
1442	btrfs_inode_unlock(BTRFS_I(inode), ilock_flags);
1443	return num_written ? num_written : ret;
1444}
1445
1446static ssize_t check_direct_IO(struct btrfs_fs_info *fs_info,
1447			       const struct iov_iter *iter, loff_t offset)
1448{
1449	const u32 blocksize_mask = fs_info->sectorsize - 1;
1450
1451	if (offset & blocksize_mask)
1452		return -EINVAL;
1453
1454	if (iov_iter_alignment(iter) & blocksize_mask)
1455		return -EINVAL;
1456
1457	return 0;
1458}
1459
1460static ssize_t btrfs_direct_write(struct kiocb *iocb, struct iov_iter *from)
1461{
1462	struct file *file = iocb->ki_filp;
1463	struct inode *inode = file_inode(file);
1464	struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
1465	loff_t pos;
1466	ssize_t written = 0;
1467	ssize_t written_buffered;
1468	size_t prev_left = 0;
1469	loff_t endbyte;
1470	ssize_t err;
1471	unsigned int ilock_flags = 0;
1472	struct iomap_dio *dio;
1473
1474	if (iocb->ki_flags & IOCB_NOWAIT)
1475		ilock_flags |= BTRFS_ILOCK_TRY;
1476
1477	/*
1478	 * If the write DIO is within EOF, use a shared lock and also only if
1479	 * security bits will likely not be dropped by file_remove_privs() called
1480	 * from btrfs_write_check(). Either will need to be rechecked after the
1481	 * lock was acquired.
1482	 */
1483	if (iocb->ki_pos + iov_iter_count(from) <= i_size_read(inode) && IS_NOSEC(inode))
1484		ilock_flags |= BTRFS_ILOCK_SHARED;
1485
1486relock:
1487	err = btrfs_inode_lock(BTRFS_I(inode), ilock_flags);
1488	if (err < 0)
1489		return err;
1490
1491	/* Shared lock cannot be used with security bits set. */
1492	if ((ilock_flags & BTRFS_ILOCK_SHARED) && !IS_NOSEC(inode)) {
1493		btrfs_inode_unlock(BTRFS_I(inode), ilock_flags);
1494		ilock_flags &= ~BTRFS_ILOCK_SHARED;
1495		goto relock;
1496	}
1497
1498	err = generic_write_checks(iocb, from);
1499	if (err <= 0) {
1500		btrfs_inode_unlock(BTRFS_I(inode), ilock_flags);
1501		return err;
1502	}
1503
1504	err = btrfs_write_check(iocb, from, err);
1505	if (err < 0) {
1506		btrfs_inode_unlock(BTRFS_I(inode), ilock_flags);
1507		goto out;
1508	}
1509
1510	pos = iocb->ki_pos;
1511	/*
1512	 * Re-check since file size may have changed just before taking the
1513	 * lock or pos may have changed because of O_APPEND in generic_write_check()
1514	 */
1515	if ((ilock_flags & BTRFS_ILOCK_SHARED) &&
1516	    pos + iov_iter_count(from) > i_size_read(inode)) {
1517		btrfs_inode_unlock(BTRFS_I(inode), ilock_flags);
1518		ilock_flags &= ~BTRFS_ILOCK_SHARED;
1519		goto relock;
1520	}
1521
1522	if (check_direct_IO(fs_info, from, pos)) {
1523		btrfs_inode_unlock(BTRFS_I(inode), ilock_flags);
1524		goto buffered;
1525	}
1526
1527	/*
1528	 * The iov_iter can be mapped to the same file range we are writing to.
1529	 * If that's the case, then we will deadlock in the iomap code, because
1530	 * it first calls our callback btrfs_dio_iomap_begin(), which will create
1531	 * an ordered extent, and after that it will fault in the pages that the
1532	 * iov_iter refers to. During the fault in we end up in the readahead
1533	 * pages code (starting at btrfs_readahead()), which will lock the range,
1534	 * find that ordered extent and then wait for it to complete (at
1535	 * btrfs_lock_and_flush_ordered_range()), resulting in a deadlock since
1536	 * obviously the ordered extent can never complete as we didn't submit
1537	 * yet the respective bio(s). This always happens when the buffer is
1538	 * memory mapped to the same file range, since the iomap DIO code always
1539	 * invalidates pages in the target file range (after starting and waiting
1540	 * for any writeback).
1541	 *
1542	 * So here we disable page faults in the iov_iter and then retry if we
1543	 * got -EFAULT, faulting in the pages before the retry.
1544	 */
1545	from->nofault = true;
1546	dio = btrfs_dio_write(iocb, from, written);
1547	from->nofault = false;
1548
1549	/*
1550	 * iomap_dio_complete() will call btrfs_sync_file() if we have a dsync
1551	 * iocb, and that needs to lock the inode. So unlock it before calling
1552	 * iomap_dio_complete() to avoid a deadlock.
1553	 */
1554	btrfs_inode_unlock(BTRFS_I(inode), ilock_flags);
1555
1556	if (IS_ERR_OR_NULL(dio))
1557		err = PTR_ERR_OR_ZERO(dio);
1558	else
1559		err = iomap_dio_complete(dio);
1560
1561	/* No increment (+=) because iomap returns a cumulative value. */
1562	if (err > 0)
1563		written = err;
1564
1565	if (iov_iter_count(from) > 0 && (err == -EFAULT || err > 0)) {
1566		const size_t left = iov_iter_count(from);
1567		/*
1568		 * We have more data left to write. Try to fault in as many as
1569		 * possible of the remainder pages and retry. We do this without
1570		 * releasing and locking again the inode, to prevent races with
1571		 * truncate.
1572		 *
1573		 * Also, in case the iov refers to pages in the file range of the
1574		 * file we want to write to (due to a mmap), we could enter an
1575		 * infinite loop if we retry after faulting the pages in, since
1576		 * iomap will invalidate any pages in the range early on, before
1577		 * it tries to fault in the pages of the iov. So we keep track of
1578		 * how much was left of iov in the previous EFAULT and fallback
1579		 * to buffered IO in case we haven't made any progress.
1580		 */
1581		if (left == prev_left) {
1582			err = -ENOTBLK;
1583		} else {
1584			fault_in_iov_iter_readable(from, left);
1585			prev_left = left;
1586			goto relock;
1587		}
1588	}
1589
1590	/*
1591	 * If 'err' is -ENOTBLK or we have not written all data, then it means
1592	 * we must fallback to buffered IO.
1593	 */
1594	if ((err < 0 && err != -ENOTBLK) || !iov_iter_count(from))
1595		goto out;
1596
1597buffered:
1598	/*
1599	 * If we are in a NOWAIT context, then return -EAGAIN to signal the caller
1600	 * it must retry the operation in a context where blocking is acceptable,
1601	 * because even if we end up not blocking during the buffered IO attempt
1602	 * below, we will block when flushing and waiting for the IO.
1603	 */
1604	if (iocb->ki_flags & IOCB_NOWAIT) {
1605		err = -EAGAIN;
1606		goto out;
1607	}
1608
1609	pos = iocb->ki_pos;
1610	written_buffered = btrfs_buffered_write(iocb, from);
1611	if (written_buffered < 0) {
1612		err = written_buffered;
1613		goto out;
1614	}
1615	/*
1616	 * Ensure all data is persisted. We want the next direct IO read to be
1617	 * able to read what was just written.
1618	 */
1619	endbyte = pos + written_buffered - 1;
1620	err = btrfs_fdatawrite_range(inode, pos, endbyte);
1621	if (err)
1622		goto out;
1623	err = filemap_fdatawait_range(inode->i_mapping, pos, endbyte);
1624	if (err)
1625		goto out;
1626	written += written_buffered;
1627	iocb->ki_pos = pos + written_buffered;
1628	invalidate_mapping_pages(file->f_mapping, pos >> PAGE_SHIFT,
1629				 endbyte >> PAGE_SHIFT);
1630out:
1631	return err < 0 ? err : written;
1632}
1633
1634static ssize_t btrfs_encoded_write(struct kiocb *iocb, struct iov_iter *from,
1635			const struct btrfs_ioctl_encoded_io_args *encoded)
 
1636{
1637	struct file *file = iocb->ki_filp;
1638	struct inode *inode = file_inode(file);
1639	loff_t count;
1640	ssize_t ret;
1641
1642	btrfs_inode_lock(BTRFS_I(inode), 0);
1643	count = encoded->len;
1644	ret = generic_write_checks_count(iocb, &count);
1645	if (ret == 0 && count != encoded->len) {
1646		/*
1647		 * The write got truncated by generic_write_checks_count(). We
1648		 * can't do a partial encoded write.
1649		 */
1650		ret = -EFBIG;
1651	}
1652	if (ret || encoded->len == 0)
1653		goto out;
 
 
1654
1655	ret = btrfs_write_check(iocb, from, encoded->len);
1656	if (ret < 0)
 
 
1657		goto out;
 
1658
1659	ret = btrfs_do_encoded_write(iocb, from, encoded);
1660out:
1661	btrfs_inode_unlock(BTRFS_I(inode), 0);
1662	return ret;
1663}
1664
1665ssize_t btrfs_do_write_iter(struct kiocb *iocb, struct iov_iter *from,
1666			    const struct btrfs_ioctl_encoded_io_args *encoded)
1667{
1668	struct file *file = iocb->ki_filp;
1669	struct btrfs_inode *inode = BTRFS_I(file_inode(file));
1670	ssize_t num_written, num_sync;
1671
1672	/*
1673	 * If the fs flips readonly due to some impossible error, although we
1674	 * have opened a file as writable, we have to stop this write operation
1675	 * to ensure consistency.
 
1676	 */
1677	if (BTRFS_FS_ERROR(inode->root->fs_info))
1678		return -EROFS;
1679
1680	if (encoded && (iocb->ki_flags & IOCB_NOWAIT))
1681		return -EOPNOTSUPP;
1682
1683	if (encoded) {
1684		num_written = btrfs_encoded_write(iocb, from, encoded);
1685		num_sync = encoded->len;
1686	} else if (iocb->ki_flags & IOCB_DIRECT) {
1687		num_written = btrfs_direct_write(iocb, from);
1688		num_sync = num_written;
1689	} else {
1690		num_written = btrfs_buffered_write(iocb, from);
1691		num_sync = num_written;
1692	}
1693
1694	btrfs_set_inode_last_sub_trans(inode);
1695
1696	if (num_sync > 0) {
1697		num_sync = generic_write_sync(iocb, num_sync);
1698		if (num_sync < 0)
1699			num_written = num_sync;
1700	}
1701
1702	return num_written;
1703}
 
 
 
 
 
 
1704
1705static ssize_t btrfs_file_write_iter(struct kiocb *iocb, struct iov_iter *from)
1706{
1707	return btrfs_do_write_iter(iocb, from, NULL);
1708}
 
1709
1710int btrfs_release_file(struct inode *inode, struct file *filp)
1711{
1712	struct btrfs_file_private *private = filp->private_data;
1713
1714	if (private) {
1715		kfree(private->filldir_buf);
1716		free_extent_state(private->llseek_cached_state);
1717		kfree(private);
1718		filp->private_data = NULL;
1719	}
1720
1721	/*
1722	 * Set by setattr when we are about to truncate a file from a non-zero
1723	 * size to a zero size.  This tries to flush down new bytes that may
1724	 * have been written if the application were using truncate to replace
1725	 * a file in place.
1726	 */
1727	if (test_and_clear_bit(BTRFS_INODE_FLUSH_ON_CLOSE,
1728			       &BTRFS_I(inode)->runtime_flags))
1729			filemap_flush(inode->i_mapping);
1730	return 0;
1731}
1732
1733static int start_ordered_ops(struct inode *inode, loff_t start, loff_t end)
1734{
1735	int ret;
1736	struct blk_plug plug;
1737
1738	/*
1739	 * This is only called in fsync, which would do synchronous writes, so
1740	 * a plug can merge adjacent IOs as much as possible.  Esp. in case of
1741	 * multiple disks using raid profile, a large IO can be split to
1742	 * several segments of stripe length (currently 64K).
1743	 */
1744	blk_start_plug(&plug);
1745	ret = btrfs_fdatawrite_range(inode, start, end);
1746	blk_finish_plug(&plug);
1747
1748	return ret;
 
 
 
 
 
 
 
 
 
 
1749}
1750
1751static inline bool skip_inode_logging(const struct btrfs_log_ctx *ctx)
1752{
1753	struct btrfs_inode *inode = BTRFS_I(ctx->inode);
1754	struct btrfs_fs_info *fs_info = inode->root->fs_info;
1755
1756	if (btrfs_inode_in_log(inode, btrfs_get_fs_generation(fs_info)) &&
1757	    list_empty(&ctx->ordered_extents))
1758		return true;
1759
1760	/*
1761	 * If we are doing a fast fsync we can not bail out if the inode's
1762	 * last_trans is <= then the last committed transaction, because we only
1763	 * update the last_trans of the inode during ordered extent completion,
1764	 * and for a fast fsync we don't wait for that, we only wait for the
1765	 * writeback to complete.
1766	 */
1767	if (inode->last_trans <= btrfs_get_last_trans_committed(fs_info) &&
1768	    (test_bit(BTRFS_INODE_NEEDS_FULL_SYNC, &inode->runtime_flags) ||
1769	     list_empty(&ctx->ordered_extents)))
1770		return true;
1771
1772	return false;
 
 
1773}
1774
1775/*
1776 * fsync call for both files and directories.  This logs the inode into
1777 * the tree log instead of forcing full commits whenever possible.
1778 *
1779 * It needs to call filemap_fdatawait so that all ordered extent updates are
1780 * in the metadata btree are up to date for copying to the log.
1781 *
1782 * It drops the inode mutex before doing the tree log commit.  This is an
1783 * important optimization for directories because holding the mutex prevents
1784 * new operations on the dir while we write to disk.
1785 */
1786int btrfs_sync_file(struct file *file, loff_t start, loff_t end, int datasync)
1787{
1788	struct dentry *dentry = file_dentry(file);
1789	struct inode *inode = d_inode(dentry);
1790	struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
1791	struct btrfs_root *root = BTRFS_I(inode)->root;
 
1792	struct btrfs_trans_handle *trans;
1793	struct btrfs_log_ctx ctx;
1794	int ret = 0, err;
1795	u64 len;
1796	bool full_sync;
1797
1798	trace_btrfs_sync_file(file, datasync);
1799
1800	btrfs_init_log_ctx(&ctx, inode);
1801
1802	/*
1803	 * Always set the range to a full range, otherwise we can get into
1804	 * several problems, from missing file extent items to represent holes
1805	 * when not using the NO_HOLES feature, to log tree corruption due to
1806	 * races between hole detection during logging and completion of ordered
1807	 * extents outside the range, to missing checksums due to ordered extents
1808	 * for which we flushed only a subset of their pages.
1809	 */
1810	start = 0;
1811	end = LLONG_MAX;
1812	len = (u64)LLONG_MAX + 1;
1813
1814	/*
1815	 * We write the dirty pages in the range and wait until they complete
1816	 * out of the ->i_mutex. If so, we can flush the dirty pages by
1817	 * multi-task, and make the performance up.  See
1818	 * btrfs_wait_ordered_range for an explanation of the ASYNC check.
1819	 */
1820	ret = start_ordered_ops(inode, start, end);
1821	if (ret)
1822		goto out;
1823
1824	btrfs_inode_lock(BTRFS_I(inode), BTRFS_ILOCK_MMAP);
1825
1826	atomic_inc(&root->log_batch);
1827
1828	/*
1829	 * Before we acquired the inode's lock and the mmap lock, someone may
1830	 * have dirtied more pages in the target range. We need to make sure
1831	 * that writeback for any such pages does not start while we are logging
1832	 * the inode, because if it does, any of the following might happen when
1833	 * we are not doing a full inode sync:
1834	 *
1835	 * 1) We log an extent after its writeback finishes but before its
1836	 *    checksums are added to the csum tree, leading to -EIO errors
1837	 *    when attempting to read the extent after a log replay.
1838	 *
1839	 * 2) We can end up logging an extent before its writeback finishes.
1840	 *    Therefore after the log replay we will have a file extent item
1841	 *    pointing to an unwritten extent (and no data checksums as well).
1842	 *
1843	 * So trigger writeback for any eventual new dirty pages and then we
1844	 * wait for all ordered extents to complete below.
1845	 */
1846	ret = start_ordered_ops(inode, start, end);
1847	if (ret) {
1848		btrfs_inode_unlock(BTRFS_I(inode), BTRFS_ILOCK_MMAP);
 
 
 
1849		goto out;
1850	}
1851
1852	/*
1853	 * Always check for the full sync flag while holding the inode's lock,
1854	 * to avoid races with other tasks. The flag must be either set all the
1855	 * time during logging or always off all the time while logging.
1856	 * We check the flag here after starting delalloc above, because when
1857	 * running delalloc the full sync flag may be set if we need to drop
1858	 * extra extent map ranges due to temporary memory allocation failures.
1859	 */
1860	full_sync = test_bit(BTRFS_INODE_NEEDS_FULL_SYNC,
1861			     &BTRFS_I(inode)->runtime_flags);
1862
1863	/*
1864	 * We have to do this here to avoid the priority inversion of waiting on
1865	 * IO of a lower priority task while holding a transaction open.
1866	 *
1867	 * For a full fsync we wait for the ordered extents to complete while
1868	 * for a fast fsync we wait just for writeback to complete, and then
1869	 * attach the ordered extents to the transaction so that a transaction
1870	 * commit waits for their completion, to avoid data loss if we fsync,
1871	 * the current transaction commits before the ordered extents complete
1872	 * and a power failure happens right after that.
1873	 *
1874	 * For zoned filesystem, if a write IO uses a ZONE_APPEND command, the
1875	 * logical address recorded in the ordered extent may change. We need
1876	 * to wait for the IO to stabilize the logical address.
1877	 */
1878	if (full_sync || btrfs_is_zoned(fs_info)) {
1879		ret = btrfs_wait_ordered_range(inode, start, len);
1880	} else {
1881		/*
1882		 * Get our ordered extents as soon as possible to avoid doing
1883		 * checksum lookups in the csum tree, and use instead the
1884		 * checksums attached to the ordered extents.
1885		 */
1886		btrfs_get_ordered_extents_for_logging(BTRFS_I(inode),
1887						      &ctx.ordered_extents);
1888		ret = filemap_fdatawait_range(inode->i_mapping, start, end);
1889	}
1890
1891	if (ret)
1892		goto out_release_extents;
1893
1894	atomic_inc(&root->log_batch);
1895
1896	if (skip_inode_logging(&ctx)) {
1897		/*
1898		 * We've had everything committed since the last time we were
1899		 * modified so clear this flag in case it was set for whatever
1900		 * reason, it's no longer relevant.
1901		 */
1902		clear_bit(BTRFS_INODE_NEEDS_FULL_SYNC,
1903			  &BTRFS_I(inode)->runtime_flags);
1904		/*
1905		 * An ordered extent might have started before and completed
1906		 * already with io errors, in which case the inode was not
1907		 * updated and we end up here. So check the inode's mapping
1908		 * for any errors that might have happened since we last
1909		 * checked called fsync.
1910		 */
1911		ret = filemap_check_wb_err(inode->i_mapping, file->f_wb_err);
1912		goto out_release_extents;
1913	}
1914
1915	/*
1916	 * We use start here because we will need to wait on the IO to complete
1917	 * in btrfs_sync_log, which could require joining a transaction (for
1918	 * example checking cross references in the nocow path).  If we use join
1919	 * here we could get into a situation where we're waiting on IO to
1920	 * happen that is blocked on a transaction trying to commit.  With start
1921	 * we inc the extwriter counter, so we wait for all extwriters to exit
1922	 * before we start blocking joiners.  This comment is to keep somebody
1923	 * from thinking they are super smart and changing this to
1924	 * btrfs_join_transaction *cough*Josef*cough*.
1925	 */
1926	trans = btrfs_start_transaction(root, 0);
1927	if (IS_ERR(trans)) {
1928		ret = PTR_ERR(trans);
1929		goto out_release_extents;
 
1930	}
1931	trans->in_fsync = true;
1932
1933	ret = btrfs_log_dentry_safe(trans, dentry, &ctx);
1934	btrfs_release_log_ctx_extents(&ctx);
1935	if (ret < 0) {
1936		/* Fallthrough and commit/free transaction. */
1937		ret = BTRFS_LOG_FORCE_COMMIT;
1938	}
1939
1940	/* we've logged all the items and now have a consistent
1941	 * version of the file in the log.  It is possible that
1942	 * someone will come in and modify the file, but that's
1943	 * fine because the log is consistent on disk, and we
1944	 * have references to all of the file's extents
1945	 *
1946	 * It is possible that someone will come in and log the
1947	 * file again, but that will end up using the synchronization
1948	 * inside btrfs_sync_log to keep things safe.
1949	 */
1950	btrfs_inode_unlock(BTRFS_I(inode), BTRFS_ILOCK_MMAP);
1951
1952	if (ret == BTRFS_NO_LOG_SYNC) {
1953		ret = btrfs_end_transaction(trans);
1954		goto out;
1955	}
1956
1957	/* We successfully logged the inode, attempt to sync the log. */
1958	if (!ret) {
1959		ret = btrfs_sync_log(trans, root, &ctx);
1960		if (!ret) {
1961			ret = btrfs_end_transaction(trans);
1962			goto out;
1963		}
1964	}
1965
1966	/*
1967	 * At this point we need to commit the transaction because we had
1968	 * btrfs_need_log_full_commit() or some other error.
1969	 *
1970	 * If we didn't do a full sync we have to stop the trans handle, wait on
1971	 * the ordered extents, start it again and commit the transaction.  If
1972	 * we attempt to wait on the ordered extents here we could deadlock with
1973	 * something like fallocate() that is holding the extent lock trying to
1974	 * start a transaction while some other thread is trying to commit the
1975	 * transaction while we (fsync) are currently holding the transaction
1976	 * open.
1977	 */
1978	if (!full_sync) {
1979		ret = btrfs_end_transaction(trans);
1980		if (ret)
1981			goto out;
1982		ret = btrfs_wait_ordered_range(inode, start, len);
1983		if (ret)
1984			goto out;
1985
1986		/*
1987		 * This is safe to use here because we're only interested in
1988		 * making sure the transaction that had the ordered extents is
1989		 * committed.  We aren't waiting on anything past this point,
1990		 * we're purely getting the transaction and committing it.
1991		 */
1992		trans = btrfs_attach_transaction_barrier(root);
1993		if (IS_ERR(trans)) {
1994			ret = PTR_ERR(trans);
1995
1996			/*
1997			 * We committed the transaction and there's no currently
1998			 * running transaction, this means everything we care
1999			 * about made it to disk and we are done.
2000			 */
2001			if (ret == -ENOENT)
2002				ret = 0;
2003			goto out;
2004		}
 
 
2005	}
2006
2007	ret = btrfs_commit_transaction(trans);
2008out:
2009	ASSERT(list_empty(&ctx.list));
2010	ASSERT(list_empty(&ctx.conflict_inodes));
2011	err = file_check_and_advance_wb_err(file);
2012	if (!ret)
2013		ret = err;
2014	return ret > 0 ? -EIO : ret;
2015
2016out_release_extents:
2017	btrfs_release_log_ctx_extents(&ctx);
2018	btrfs_inode_unlock(BTRFS_I(inode), BTRFS_ILOCK_MMAP);
2019	goto out;
2020}
2021
2022static const struct vm_operations_struct btrfs_file_vm_ops = {
2023	.fault		= filemap_fault,
2024	.map_pages	= filemap_map_pages,
2025	.page_mkwrite	= btrfs_page_mkwrite,
2026};
2027
2028static int btrfs_file_mmap(struct file	*filp, struct vm_area_struct *vma)
2029{
2030	struct address_space *mapping = filp->f_mapping;
2031
2032	if (!mapping->a_ops->read_folio)
2033		return -ENOEXEC;
2034
2035	file_accessed(filp);
2036	vma->vm_ops = &btrfs_file_vm_ops;
 
2037
2038	return 0;
2039}
2040
2041static int hole_mergeable(struct btrfs_inode *inode, struct extent_buffer *leaf,
2042			  int slot, u64 start, u64 end)
2043{
2044	struct btrfs_file_extent_item *fi;
2045	struct btrfs_key key;
2046
2047	if (slot < 0 || slot >= btrfs_header_nritems(leaf))
2048		return 0;
2049
2050	btrfs_item_key_to_cpu(leaf, &key, slot);
2051	if (key.objectid != btrfs_ino(inode) ||
2052	    key.type != BTRFS_EXTENT_DATA_KEY)
2053		return 0;
2054
2055	fi = btrfs_item_ptr(leaf, slot, struct btrfs_file_extent_item);
2056
2057	if (btrfs_file_extent_type(leaf, fi) != BTRFS_FILE_EXTENT_REG)
2058		return 0;
2059
2060	if (btrfs_file_extent_disk_bytenr(leaf, fi))
2061		return 0;
2062
2063	if (key.offset == end)
2064		return 1;
2065	if (key.offset + btrfs_file_extent_num_bytes(leaf, fi) == start)
2066		return 1;
2067	return 0;
2068}
2069
2070static int fill_holes(struct btrfs_trans_handle *trans,
2071		struct btrfs_inode *inode,
2072		struct btrfs_path *path, u64 offset, u64 end)
2073{
2074	struct btrfs_fs_info *fs_info = trans->fs_info;
2075	struct btrfs_root *root = inode->root;
2076	struct extent_buffer *leaf;
2077	struct btrfs_file_extent_item *fi;
2078	struct extent_map *hole_em;
2079	struct btrfs_key key;
2080	int ret;
2081
2082	if (btrfs_fs_incompat(fs_info, NO_HOLES))
2083		goto out;
2084
2085	key.objectid = btrfs_ino(inode);
2086	key.type = BTRFS_EXTENT_DATA_KEY;
2087	key.offset = offset;
2088
2089	ret = btrfs_search_slot(trans, root, &key, path, 0, 1);
2090	if (ret <= 0) {
2091		/*
2092		 * We should have dropped this offset, so if we find it then
2093		 * something has gone horribly wrong.
2094		 */
2095		if (ret == 0)
2096			ret = -EINVAL;
2097		return ret;
2098	}
2099
2100	leaf = path->nodes[0];
2101	if (hole_mergeable(inode, leaf, path->slots[0] - 1, offset, end)) {
2102		u64 num_bytes;
2103
2104		path->slots[0]--;
2105		fi = btrfs_item_ptr(leaf, path->slots[0],
2106				    struct btrfs_file_extent_item);
2107		num_bytes = btrfs_file_extent_num_bytes(leaf, fi) +
2108			end - offset;
2109		btrfs_set_file_extent_num_bytes(leaf, fi, num_bytes);
2110		btrfs_set_file_extent_ram_bytes(leaf, fi, num_bytes);
2111		btrfs_set_file_extent_offset(leaf, fi, 0);
2112		btrfs_set_file_extent_generation(leaf, fi, trans->transid);
2113		btrfs_mark_buffer_dirty(trans, leaf);
2114		goto out;
2115	}
2116
2117	if (hole_mergeable(inode, leaf, path->slots[0], offset, end)) {
2118		u64 num_bytes;
2119
2120		key.offset = offset;
2121		btrfs_set_item_key_safe(trans, path, &key);
2122		fi = btrfs_item_ptr(leaf, path->slots[0],
2123				    struct btrfs_file_extent_item);
2124		num_bytes = btrfs_file_extent_num_bytes(leaf, fi) + end -
2125			offset;
2126		btrfs_set_file_extent_num_bytes(leaf, fi, num_bytes);
2127		btrfs_set_file_extent_ram_bytes(leaf, fi, num_bytes);
2128		btrfs_set_file_extent_offset(leaf, fi, 0);
2129		btrfs_set_file_extent_generation(leaf, fi, trans->transid);
2130		btrfs_mark_buffer_dirty(trans, leaf);
2131		goto out;
2132	}
2133	btrfs_release_path(path);
2134
2135	ret = btrfs_insert_hole_extent(trans, root, btrfs_ino(inode), offset,
2136				       end - offset);
2137	if (ret)
2138		return ret;
2139
2140out:
2141	btrfs_release_path(path);
2142
2143	hole_em = alloc_extent_map();
2144	if (!hole_em) {
2145		btrfs_drop_extent_map_range(inode, offset, end - 1, false);
2146		btrfs_set_inode_full_sync(inode);
2147	} else {
2148		hole_em->start = offset;
2149		hole_em->len = end - offset;
2150		hole_em->ram_bytes = hole_em->len;
2151		hole_em->orig_start = offset;
2152
2153		hole_em->block_start = EXTENT_MAP_HOLE;
2154		hole_em->block_len = 0;
2155		hole_em->orig_block_len = 0;
2156		hole_em->generation = trans->transid;
2157
2158		ret = btrfs_replace_extent_map_range(inode, hole_em, true);
2159		free_extent_map(hole_em);
2160		if (ret)
2161			btrfs_set_inode_full_sync(inode);
2162	}
2163
2164	return 0;
2165}
2166
2167/*
2168 * Find a hole extent on given inode and change start/len to the end of hole
2169 * extent.(hole/vacuum extent whose em->start <= start &&
2170 *	   em->start + em->len > start)
2171 * When a hole extent is found, return 1 and modify start/len.
2172 */
2173static int find_first_non_hole(struct btrfs_inode *inode, u64 *start, u64 *len)
2174{
2175	struct btrfs_fs_info *fs_info = inode->root->fs_info;
2176	struct extent_map *em;
2177	int ret = 0;
2178
2179	em = btrfs_get_extent(inode, NULL, 0,
2180			      round_down(*start, fs_info->sectorsize),
2181			      round_up(*len, fs_info->sectorsize));
2182	if (IS_ERR(em))
2183		return PTR_ERR(em);
2184
2185	/* Hole or vacuum extent(only exists in no-hole mode) */
2186	if (em->block_start == EXTENT_MAP_HOLE) {
2187		ret = 1;
2188		*len = em->start + em->len > *start + *len ?
2189		       0 : *start + *len - em->start - em->len;
2190		*start = em->start + em->len;
2191	}
2192	free_extent_map(em);
2193	return ret;
2194}
2195
2196static void btrfs_punch_hole_lock_range(struct inode *inode,
2197					const u64 lockstart,
2198					const u64 lockend,
2199					struct extent_state **cached_state)
2200{
2201	/*
2202	 * For subpage case, if the range is not at page boundary, we could
2203	 * have pages at the leading/tailing part of the range.
2204	 * This could lead to dead loop since filemap_range_has_page()
2205	 * will always return true.
2206	 * So here we need to do extra page alignment for
2207	 * filemap_range_has_page().
2208	 */
2209	const u64 page_lockstart = round_up(lockstart, PAGE_SIZE);
2210	const u64 page_lockend = round_down(lockend + 1, PAGE_SIZE) - 1;
2211
2212	while (1) {
2213		truncate_pagecache_range(inode, lockstart, lockend);
2214
2215		lock_extent(&BTRFS_I(inode)->io_tree, lockstart, lockend,
2216			    cached_state);
2217		/*
2218		 * We can't have ordered extents in the range, nor dirty/writeback
2219		 * pages, because we have locked the inode's VFS lock in exclusive
2220		 * mode, we have locked the inode's i_mmap_lock in exclusive mode,
2221		 * we have flushed all delalloc in the range and we have waited
2222		 * for any ordered extents in the range to complete.
2223		 * We can race with anyone reading pages from this range, so after
2224		 * locking the range check if we have pages in the range, and if
2225		 * we do, unlock the range and retry.
2226		 */
2227		if (!filemap_range_has_page(inode->i_mapping, page_lockstart,
2228					    page_lockend))
2229			break;
2230
2231		unlock_extent(&BTRFS_I(inode)->io_tree, lockstart, lockend,
2232			      cached_state);
2233	}
2234
2235	btrfs_assert_inode_range_clean(BTRFS_I(inode), lockstart, lockend);
2236}
2237
2238static int btrfs_insert_replace_extent(struct btrfs_trans_handle *trans,
2239				     struct btrfs_inode *inode,
2240				     struct btrfs_path *path,
2241				     struct btrfs_replace_extent_info *extent_info,
2242				     const u64 replace_len,
2243				     const u64 bytes_to_drop)
2244{
2245	struct btrfs_fs_info *fs_info = trans->fs_info;
2246	struct btrfs_root *root = inode->root;
2247	struct btrfs_file_extent_item *extent;
2248	struct extent_buffer *leaf;
2249	struct btrfs_key key;
2250	int slot;
2251	struct btrfs_ref ref = { 0 };
2252	int ret;
2253
2254	if (replace_len == 0)
2255		return 0;
2256
2257	if (extent_info->disk_offset == 0 &&
2258	    btrfs_fs_incompat(fs_info, NO_HOLES)) {
2259		btrfs_update_inode_bytes(inode, 0, bytes_to_drop);
2260		return 0;
2261	}
2262
2263	key.objectid = btrfs_ino(inode);
2264	key.type = BTRFS_EXTENT_DATA_KEY;
2265	key.offset = extent_info->file_offset;
2266	ret = btrfs_insert_empty_item(trans, root, path, &key,
2267				      sizeof(struct btrfs_file_extent_item));
2268	if (ret)
2269		return ret;
2270	leaf = path->nodes[0];
2271	slot = path->slots[0];
2272	write_extent_buffer(leaf, extent_info->extent_buf,
2273			    btrfs_item_ptr_offset(leaf, slot),
2274			    sizeof(struct btrfs_file_extent_item));
2275	extent = btrfs_item_ptr(leaf, slot, struct btrfs_file_extent_item);
2276	ASSERT(btrfs_file_extent_type(leaf, extent) != BTRFS_FILE_EXTENT_INLINE);
2277	btrfs_set_file_extent_offset(leaf, extent, extent_info->data_offset);
2278	btrfs_set_file_extent_num_bytes(leaf, extent, replace_len);
2279	if (extent_info->is_new_extent)
2280		btrfs_set_file_extent_generation(leaf, extent, trans->transid);
2281	btrfs_mark_buffer_dirty(trans, leaf);
2282	btrfs_release_path(path);
2283
2284	ret = btrfs_inode_set_file_extent_range(inode, extent_info->file_offset,
2285						replace_len);
2286	if (ret)
2287		return ret;
2288
2289	/* If it's a hole, nothing more needs to be done. */
2290	if (extent_info->disk_offset == 0) {
2291		btrfs_update_inode_bytes(inode, 0, bytes_to_drop);
2292		return 0;
2293	}
2294
2295	btrfs_update_inode_bytes(inode, replace_len, bytes_to_drop);
2296
2297	if (extent_info->is_new_extent && extent_info->insertions == 0) {
2298		key.objectid = extent_info->disk_offset;
2299		key.type = BTRFS_EXTENT_ITEM_KEY;
2300		key.offset = extent_info->disk_len;
2301		ret = btrfs_alloc_reserved_file_extent(trans, root,
2302						       btrfs_ino(inode),
2303						       extent_info->file_offset,
2304						       extent_info->qgroup_reserved,
2305						       &key);
2306	} else {
2307		u64 ref_offset;
2308
2309		btrfs_init_generic_ref(&ref, BTRFS_ADD_DELAYED_REF,
2310				       extent_info->disk_offset,
2311				       extent_info->disk_len, 0,
2312				       root->root_key.objectid);
2313		ref_offset = extent_info->file_offset - extent_info->data_offset;
2314		btrfs_init_data_ref(&ref, root->root_key.objectid,
2315				    btrfs_ino(inode), ref_offset, 0, false);
2316		ret = btrfs_inc_extent_ref(trans, &ref);
2317	}
2318
2319	extent_info->insertions++;
2320
2321	return ret;
2322}
2323
2324/*
2325 * The respective range must have been previously locked, as well as the inode.
2326 * The end offset is inclusive (last byte of the range).
2327 * @extent_info is NULL for fallocate's hole punching and non-NULL when replacing
2328 * the file range with an extent.
2329 * When not punching a hole, we don't want to end up in a state where we dropped
2330 * extents without inserting a new one, so we must abort the transaction to avoid
2331 * a corruption.
2332 */
2333int btrfs_replace_file_extents(struct btrfs_inode *inode,
2334			       struct btrfs_path *path, const u64 start,
2335			       const u64 end,
2336			       struct btrfs_replace_extent_info *extent_info,
2337			       struct btrfs_trans_handle **trans_out)
2338{
2339	struct btrfs_drop_extents_args drop_args = { 0 };
2340	struct btrfs_root *root = inode->root;
2341	struct btrfs_fs_info *fs_info = root->fs_info;
2342	u64 min_size = btrfs_calc_insert_metadata_size(fs_info, 1);
2343	u64 ino_size = round_up(inode->vfs_inode.i_size, fs_info->sectorsize);
2344	struct btrfs_trans_handle *trans = NULL;
2345	struct btrfs_block_rsv *rsv;
2346	unsigned int rsv_count;
2347	u64 cur_offset;
2348	u64 len = end - start;
2349	int ret = 0;
2350
2351	if (end <= start)
2352		return -EINVAL;
2353
2354	rsv = btrfs_alloc_block_rsv(fs_info, BTRFS_BLOCK_RSV_TEMP);
2355	if (!rsv) {
2356		ret = -ENOMEM;
2357		goto out;
2358	}
2359	rsv->size = btrfs_calc_insert_metadata_size(fs_info, 1);
2360	rsv->failfast = true;
2361
2362	/*
2363	 * 1 - update the inode
2364	 * 1 - removing the extents in the range
2365	 * 1 - adding the hole extent if no_holes isn't set or if we are
2366	 *     replacing the range with a new extent
2367	 */
2368	if (!btrfs_fs_incompat(fs_info, NO_HOLES) || extent_info)
2369		rsv_count = 3;
2370	else
2371		rsv_count = 2;
2372
2373	trans = btrfs_start_transaction(root, rsv_count);
2374	if (IS_ERR(trans)) {
2375		ret = PTR_ERR(trans);
2376		trans = NULL;
2377		goto out_free;
2378	}
2379
2380	ret = btrfs_block_rsv_migrate(&fs_info->trans_block_rsv, rsv,
2381				      min_size, false);
2382	if (WARN_ON(ret))
2383		goto out_trans;
2384	trans->block_rsv = rsv;
2385
2386	cur_offset = start;
2387	drop_args.path = path;
2388	drop_args.end = end + 1;
2389	drop_args.drop_cache = true;
2390	while (cur_offset < end) {
2391		drop_args.start = cur_offset;
2392		ret = btrfs_drop_extents(trans, root, inode, &drop_args);
2393		/* If we are punching a hole decrement the inode's byte count */
2394		if (!extent_info)
2395			btrfs_update_inode_bytes(inode, 0,
2396						 drop_args.bytes_found);
2397		if (ret != -ENOSPC) {
2398			/*
2399			 * The only time we don't want to abort is if we are
2400			 * attempting to clone a partial inline extent, in which
2401			 * case we'll get EOPNOTSUPP.  However if we aren't
2402			 * clone we need to abort no matter what, because if we
2403			 * got EOPNOTSUPP via prealloc then we messed up and
2404			 * need to abort.
2405			 */
2406			if (ret &&
2407			    (ret != -EOPNOTSUPP ||
2408			     (extent_info && extent_info->is_new_extent)))
2409				btrfs_abort_transaction(trans, ret);
2410			break;
2411		}
2412
2413		trans->block_rsv = &fs_info->trans_block_rsv;
2414
2415		if (!extent_info && cur_offset < drop_args.drop_end &&
2416		    cur_offset < ino_size) {
2417			ret = fill_holes(trans, inode, path, cur_offset,
2418					 drop_args.drop_end);
2419			if (ret) {
2420				/*
2421				 * If we failed then we didn't insert our hole
2422				 * entries for the area we dropped, so now the
2423				 * fs is corrupted, so we must abort the
2424				 * transaction.
2425				 */
2426				btrfs_abort_transaction(trans, ret);
2427				break;
2428			}
2429		} else if (!extent_info && cur_offset < drop_args.drop_end) {
2430			/*
2431			 * We are past the i_size here, but since we didn't
2432			 * insert holes we need to clear the mapped area so we
2433			 * know to not set disk_i_size in this area until a new
2434			 * file extent is inserted here.
2435			 */
2436			ret = btrfs_inode_clear_file_extent_range(inode,
2437					cur_offset,
2438					drop_args.drop_end - cur_offset);
2439			if (ret) {
2440				/*
2441				 * We couldn't clear our area, so we could
2442				 * presumably adjust up and corrupt the fs, so
2443				 * we need to abort.
2444				 */
2445				btrfs_abort_transaction(trans, ret);
2446				break;
2447			}
2448		}
2449
2450		if (extent_info &&
2451		    drop_args.drop_end > extent_info->file_offset) {
2452			u64 replace_len = drop_args.drop_end -
2453					  extent_info->file_offset;
2454
2455			ret = btrfs_insert_replace_extent(trans, inode,	path,
2456					extent_info, replace_len,
2457					drop_args.bytes_found);
2458			if (ret) {
2459				btrfs_abort_transaction(trans, ret);
2460				break;
2461			}
2462			extent_info->data_len -= replace_len;
2463			extent_info->data_offset += replace_len;
2464			extent_info->file_offset += replace_len;
2465		}
2466
2467		/*
2468		 * We are releasing our handle on the transaction, balance the
2469		 * dirty pages of the btree inode and flush delayed items, and
2470		 * then get a new transaction handle, which may now point to a
2471		 * new transaction in case someone else may have committed the
2472		 * transaction we used to replace/drop file extent items. So
2473		 * bump the inode's iversion and update mtime and ctime except
2474		 * if we are called from a dedupe context. This is because a
2475		 * power failure/crash may happen after the transaction is
2476		 * committed and before we finish replacing/dropping all the
2477		 * file extent items we need.
2478		 */
2479		inode_inc_iversion(&inode->vfs_inode);
2480
2481		if (!extent_info || extent_info->update_times)
2482			inode_set_mtime_to_ts(&inode->vfs_inode,
2483					      inode_set_ctime_current(&inode->vfs_inode));
2484
2485		ret = btrfs_update_inode(trans, inode);
2486		if (ret)
2487			break;
2488
2489		btrfs_end_transaction(trans);
2490		btrfs_btree_balance_dirty(fs_info);
2491
2492		trans = btrfs_start_transaction(root, rsv_count);
2493		if (IS_ERR(trans)) {
2494			ret = PTR_ERR(trans);
2495			trans = NULL;
2496			break;
2497		}
2498
2499		ret = btrfs_block_rsv_migrate(&fs_info->trans_block_rsv,
2500					      rsv, min_size, false);
2501		if (WARN_ON(ret))
2502			break;
2503		trans->block_rsv = rsv;
2504
2505		cur_offset = drop_args.drop_end;
2506		len = end - cur_offset;
2507		if (!extent_info && len) {
2508			ret = find_first_non_hole(inode, &cur_offset, &len);
2509			if (unlikely(ret < 0))
2510				break;
2511			if (ret && !len) {
2512				ret = 0;
2513				break;
2514			}
2515		}
2516	}
2517
2518	/*
2519	 * If we were cloning, force the next fsync to be a full one since we
2520	 * we replaced (or just dropped in the case of cloning holes when
2521	 * NO_HOLES is enabled) file extent items and did not setup new extent
2522	 * maps for the replacement extents (or holes).
2523	 */
2524	if (extent_info && !extent_info->is_new_extent)
2525		btrfs_set_inode_full_sync(inode);
2526
2527	if (ret)
2528		goto out_trans;
2529
2530	trans->block_rsv = &fs_info->trans_block_rsv;
2531	/*
2532	 * If we are using the NO_HOLES feature we might have had already an
2533	 * hole that overlaps a part of the region [lockstart, lockend] and
2534	 * ends at (or beyond) lockend. Since we have no file extent items to
2535	 * represent holes, drop_end can be less than lockend and so we must
2536	 * make sure we have an extent map representing the existing hole (the
2537	 * call to __btrfs_drop_extents() might have dropped the existing extent
2538	 * map representing the existing hole), otherwise the fast fsync path
2539	 * will not record the existence of the hole region
2540	 * [existing_hole_start, lockend].
2541	 */
2542	if (drop_args.drop_end <= end)
2543		drop_args.drop_end = end + 1;
2544	/*
2545	 * Don't insert file hole extent item if it's for a range beyond eof
2546	 * (because it's useless) or if it represents a 0 bytes range (when
2547	 * cur_offset == drop_end).
2548	 */
2549	if (!extent_info && cur_offset < ino_size &&
2550	    cur_offset < drop_args.drop_end) {
2551		ret = fill_holes(trans, inode, path, cur_offset,
2552				 drop_args.drop_end);
2553		if (ret) {
2554			/* Same comment as above. */
2555			btrfs_abort_transaction(trans, ret);
2556			goto out_trans;
2557		}
2558	} else if (!extent_info && cur_offset < drop_args.drop_end) {
2559		/* See the comment in the loop above for the reasoning here. */
2560		ret = btrfs_inode_clear_file_extent_range(inode, cur_offset,
2561					drop_args.drop_end - cur_offset);
2562		if (ret) {
2563			btrfs_abort_transaction(trans, ret);
2564			goto out_trans;
2565		}
2566
2567	}
2568	if (extent_info) {
2569		ret = btrfs_insert_replace_extent(trans, inode, path,
2570				extent_info, extent_info->data_len,
2571				drop_args.bytes_found);
2572		if (ret) {
2573			btrfs_abort_transaction(trans, ret);
2574			goto out_trans;
2575		}
2576	}
2577
2578out_trans:
2579	if (!trans)
2580		goto out_free;
2581
2582	trans->block_rsv = &fs_info->trans_block_rsv;
2583	if (ret)
2584		btrfs_end_transaction(trans);
2585	else
2586		*trans_out = trans;
2587out_free:
2588	btrfs_free_block_rsv(fs_info, rsv);
2589out:
2590	return ret;
2591}
2592
2593static int btrfs_punch_hole(struct file *file, loff_t offset, loff_t len)
2594{
2595	struct inode *inode = file_inode(file);
2596	struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
2597	struct btrfs_root *root = BTRFS_I(inode)->root;
2598	struct extent_state *cached_state = NULL;
2599	struct btrfs_path *path;
2600	struct btrfs_trans_handle *trans = NULL;
2601	u64 lockstart;
2602	u64 lockend;
2603	u64 tail_start;
2604	u64 tail_len;
2605	u64 orig_start = offset;
2606	int ret = 0;
2607	bool same_block;
2608	u64 ino_size;
2609	bool truncated_block = false;
2610	bool updated_inode = false;
2611
2612	btrfs_inode_lock(BTRFS_I(inode), BTRFS_ILOCK_MMAP);
2613
2614	ret = btrfs_wait_ordered_range(inode, offset, len);
2615	if (ret)
2616		goto out_only_mutex;
2617
2618	ino_size = round_up(inode->i_size, fs_info->sectorsize);
2619	ret = find_first_non_hole(BTRFS_I(inode), &offset, &len);
2620	if (ret < 0)
2621		goto out_only_mutex;
2622	if (ret && !len) {
2623		/* Already in a large hole */
2624		ret = 0;
2625		goto out_only_mutex;
2626	}
2627
2628	ret = file_modified(file);
2629	if (ret)
2630		goto out_only_mutex;
2631
2632	lockstart = round_up(offset, fs_info->sectorsize);
2633	lockend = round_down(offset + len, fs_info->sectorsize) - 1;
2634	same_block = (BTRFS_BYTES_TO_BLKS(fs_info, offset))
2635		== (BTRFS_BYTES_TO_BLKS(fs_info, offset + len - 1));
2636	/*
2637	 * We needn't truncate any block which is beyond the end of the file
2638	 * because we are sure there is no data there.
2639	 */
2640	/*
2641	 * Only do this if we are in the same block and we aren't doing the
2642	 * entire block.
2643	 */
2644	if (same_block && len < fs_info->sectorsize) {
2645		if (offset < ino_size) {
2646			truncated_block = true;
2647			ret = btrfs_truncate_block(BTRFS_I(inode), offset, len,
2648						   0);
2649		} else {
2650			ret = 0;
2651		}
2652		goto out_only_mutex;
2653	}
2654
2655	/* zero back part of the first block */
2656	if (offset < ino_size) {
2657		truncated_block = true;
2658		ret = btrfs_truncate_block(BTRFS_I(inode), offset, 0, 0);
2659		if (ret) {
2660			btrfs_inode_unlock(BTRFS_I(inode), BTRFS_ILOCK_MMAP);
2661			return ret;
2662		}
2663	}
2664
2665	/* Check the aligned pages after the first unaligned page,
2666	 * if offset != orig_start, which means the first unaligned page
2667	 * including several following pages are already in holes,
2668	 * the extra check can be skipped */
2669	if (offset == orig_start) {
2670		/* after truncate page, check hole again */
2671		len = offset + len - lockstart;
2672		offset = lockstart;
2673		ret = find_first_non_hole(BTRFS_I(inode), &offset, &len);
2674		if (ret < 0)
2675			goto out_only_mutex;
2676		if (ret && !len) {
2677			ret = 0;
2678			goto out_only_mutex;
2679		}
2680		lockstart = offset;
2681	}
2682
2683	/* Check the tail unaligned part is in a hole */
2684	tail_start = lockend + 1;
2685	tail_len = offset + len - tail_start;
2686	if (tail_len) {
2687		ret = find_first_non_hole(BTRFS_I(inode), &tail_start, &tail_len);
2688		if (unlikely(ret < 0))
2689			goto out_only_mutex;
2690		if (!ret) {
2691			/* zero the front end of the last page */
2692			if (tail_start + tail_len < ino_size) {
2693				truncated_block = true;
2694				ret = btrfs_truncate_block(BTRFS_I(inode),
2695							tail_start + tail_len,
2696							0, 1);
2697				if (ret)
2698					goto out_only_mutex;
2699			}
2700		}
2701	}
2702
2703	if (lockend < lockstart) {
2704		ret = 0;
2705		goto out_only_mutex;
2706	}
2707
2708	btrfs_punch_hole_lock_range(inode, lockstart, lockend, &cached_state);
2709
2710	path = btrfs_alloc_path();
2711	if (!path) {
2712		ret = -ENOMEM;
2713		goto out;
2714	}
2715
2716	ret = btrfs_replace_file_extents(BTRFS_I(inode), path, lockstart,
2717					 lockend, NULL, &trans);
2718	btrfs_free_path(path);
2719	if (ret)
2720		goto out;
2721
2722	ASSERT(trans != NULL);
2723	inode_inc_iversion(inode);
2724	inode_set_mtime_to_ts(inode, inode_set_ctime_current(inode));
2725	ret = btrfs_update_inode(trans, BTRFS_I(inode));
2726	updated_inode = true;
2727	btrfs_end_transaction(trans);
2728	btrfs_btree_balance_dirty(fs_info);
2729out:
2730	unlock_extent(&BTRFS_I(inode)->io_tree, lockstart, lockend,
2731		      &cached_state);
2732out_only_mutex:
2733	if (!updated_inode && truncated_block && !ret) {
2734		/*
2735		 * If we only end up zeroing part of a page, we still need to
2736		 * update the inode item, so that all the time fields are
2737		 * updated as well as the necessary btrfs inode in memory fields
2738		 * for detecting, at fsync time, if the inode isn't yet in the
2739		 * log tree or it's there but not up to date.
2740		 */
2741		struct timespec64 now = inode_set_ctime_current(inode);
2742
2743		inode_inc_iversion(inode);
2744		inode_set_mtime_to_ts(inode, now);
2745		trans = btrfs_start_transaction(root, 1);
2746		if (IS_ERR(trans)) {
2747			ret = PTR_ERR(trans);
2748		} else {
2749			int ret2;
2750
2751			ret = btrfs_update_inode(trans, BTRFS_I(inode));
2752			ret2 = btrfs_end_transaction(trans);
2753			if (!ret)
2754				ret = ret2;
2755		}
2756	}
2757	btrfs_inode_unlock(BTRFS_I(inode), BTRFS_ILOCK_MMAP);
2758	return ret;
2759}
2760
2761/* Helper structure to record which range is already reserved */
2762struct falloc_range {
2763	struct list_head list;
2764	u64 start;
2765	u64 len;
2766};
2767
2768/*
2769 * Helper function to add falloc range
2770 *
2771 * Caller should have locked the larger range of extent containing
2772 * [start, len)
2773 */
2774static int add_falloc_range(struct list_head *head, u64 start, u64 len)
2775{
2776	struct falloc_range *range = NULL;
2777
2778	if (!list_empty(head)) {
2779		/*
2780		 * As fallocate iterates by bytenr order, we only need to check
2781		 * the last range.
2782		 */
2783		range = list_last_entry(head, struct falloc_range, list);
2784		if (range->start + range->len == start) {
2785			range->len += len;
2786			return 0;
2787		}
2788	}
2789
2790	range = kmalloc(sizeof(*range), GFP_KERNEL);
2791	if (!range)
2792		return -ENOMEM;
2793	range->start = start;
2794	range->len = len;
2795	list_add_tail(&range->list, head);
2796	return 0;
2797}
2798
2799static int btrfs_fallocate_update_isize(struct inode *inode,
2800					const u64 end,
2801					const int mode)
2802{
2803	struct btrfs_trans_handle *trans;
2804	struct btrfs_root *root = BTRFS_I(inode)->root;
2805	int ret;
2806	int ret2;
2807
2808	if (mode & FALLOC_FL_KEEP_SIZE || end <= i_size_read(inode))
2809		return 0;
2810
2811	trans = btrfs_start_transaction(root, 1);
2812	if (IS_ERR(trans))
2813		return PTR_ERR(trans);
2814
2815	inode_set_ctime_current(inode);
2816	i_size_write(inode, end);
2817	btrfs_inode_safe_disk_i_size_write(BTRFS_I(inode), 0);
2818	ret = btrfs_update_inode(trans, BTRFS_I(inode));
2819	ret2 = btrfs_end_transaction(trans);
2820
2821	return ret ? ret : ret2;
2822}
2823
2824enum {
2825	RANGE_BOUNDARY_WRITTEN_EXTENT,
2826	RANGE_BOUNDARY_PREALLOC_EXTENT,
2827	RANGE_BOUNDARY_HOLE,
2828};
2829
2830static int btrfs_zero_range_check_range_boundary(struct btrfs_inode *inode,
2831						 u64 offset)
2832{
2833	const u64 sectorsize = inode->root->fs_info->sectorsize;
2834	struct extent_map *em;
2835	int ret;
2836
2837	offset = round_down(offset, sectorsize);
2838	em = btrfs_get_extent(inode, NULL, 0, offset, sectorsize);
2839	if (IS_ERR(em))
2840		return PTR_ERR(em);
2841
2842	if (em->block_start == EXTENT_MAP_HOLE)
2843		ret = RANGE_BOUNDARY_HOLE;
2844	else if (em->flags & EXTENT_FLAG_PREALLOC)
2845		ret = RANGE_BOUNDARY_PREALLOC_EXTENT;
2846	else
2847		ret = RANGE_BOUNDARY_WRITTEN_EXTENT;
2848
2849	free_extent_map(em);
2850	return ret;
2851}
2852
2853static int btrfs_zero_range(struct inode *inode,
2854			    loff_t offset,
2855			    loff_t len,
2856			    const int mode)
2857{
2858	struct btrfs_fs_info *fs_info = BTRFS_I(inode)->root->fs_info;
2859	struct extent_map *em;
2860	struct extent_changeset *data_reserved = NULL;
2861	int ret;
2862	u64 alloc_hint = 0;
2863	const u64 sectorsize = fs_info->sectorsize;
2864	u64 alloc_start = round_down(offset, sectorsize);
2865	u64 alloc_end = round_up(offset + len, sectorsize);
2866	u64 bytes_to_reserve = 0;
2867	bool space_reserved = false;
2868
2869	em = btrfs_get_extent(BTRFS_I(inode), NULL, 0, alloc_start,
2870			      alloc_end - alloc_start);
2871	if (IS_ERR(em)) {
2872		ret = PTR_ERR(em);
2873		goto out;
2874	}
2875
2876	/*
2877	 * Avoid hole punching and extent allocation for some cases. More cases
2878	 * could be considered, but these are unlikely common and we keep things
2879	 * as simple as possible for now. Also, intentionally, if the target
2880	 * range contains one or more prealloc extents together with regular
2881	 * extents and holes, we drop all the existing extents and allocate a
2882	 * new prealloc extent, so that we get a larger contiguous disk extent.
2883	 */
2884	if (em->start <= alloc_start && (em->flags & EXTENT_FLAG_PREALLOC)) {
2885		const u64 em_end = em->start + em->len;
2886
2887		if (em_end >= offset + len) {
2888			/*
2889			 * The whole range is already a prealloc extent,
2890			 * do nothing except updating the inode's i_size if
2891			 * needed.
2892			 */
2893			free_extent_map(em);
2894			ret = btrfs_fallocate_update_isize(inode, offset + len,
2895							   mode);
2896			goto out;
2897		}
2898		/*
2899		 * Part of the range is already a prealloc extent, so operate
2900		 * only on the remaining part of the range.
2901		 */
2902		alloc_start = em_end;
2903		ASSERT(IS_ALIGNED(alloc_start, sectorsize));
2904		len = offset + len - alloc_start;
2905		offset = alloc_start;
2906		alloc_hint = em->block_start + em->len;
2907	}
2908	free_extent_map(em);
2909
2910	if (BTRFS_BYTES_TO_BLKS(fs_info, offset) ==
2911	    BTRFS_BYTES_TO_BLKS(fs_info, offset + len - 1)) {
2912		em = btrfs_get_extent(BTRFS_I(inode), NULL, 0, alloc_start,
2913				      sectorsize);
2914		if (IS_ERR(em)) {
2915			ret = PTR_ERR(em);
2916			goto out;
2917		}
2918
2919		if (em->flags & EXTENT_FLAG_PREALLOC) {
2920			free_extent_map(em);
2921			ret = btrfs_fallocate_update_isize(inode, offset + len,
2922							   mode);
2923			goto out;
2924		}
2925		if (len < sectorsize && em->block_start != EXTENT_MAP_HOLE) {
2926			free_extent_map(em);
2927			ret = btrfs_truncate_block(BTRFS_I(inode), offset, len,
2928						   0);
2929			if (!ret)
2930				ret = btrfs_fallocate_update_isize(inode,
2931								   offset + len,
2932								   mode);
2933			return ret;
2934		}
2935		free_extent_map(em);
2936		alloc_start = round_down(offset, sectorsize);
2937		alloc_end = alloc_start + sectorsize;
2938		goto reserve_space;
2939	}
2940
2941	alloc_start = round_up(offset, sectorsize);
2942	alloc_end = round_down(offset + len, sectorsize);
2943
2944	/*
2945	 * For unaligned ranges, check the pages at the boundaries, they might
2946	 * map to an extent, in which case we need to partially zero them, or
2947	 * they might map to a hole, in which case we need our allocation range
2948	 * to cover them.
2949	 */
2950	if (!IS_ALIGNED(offset, sectorsize)) {
2951		ret = btrfs_zero_range_check_range_boundary(BTRFS_I(inode),
2952							    offset);
2953		if (ret < 0)
2954			goto out;
2955		if (ret == RANGE_BOUNDARY_HOLE) {
2956			alloc_start = round_down(offset, sectorsize);
2957			ret = 0;
2958		} else if (ret == RANGE_BOUNDARY_WRITTEN_EXTENT) {
2959			ret = btrfs_truncate_block(BTRFS_I(inode), offset, 0, 0);
2960			if (ret)
2961				goto out;
2962		} else {
2963			ret = 0;
2964		}
2965	}
2966
2967	if (!IS_ALIGNED(offset + len, sectorsize)) {
2968		ret = btrfs_zero_range_check_range_boundary(BTRFS_I(inode),
2969							    offset + len);
2970		if (ret < 0)
2971			goto out;
2972		if (ret == RANGE_BOUNDARY_HOLE) {
2973			alloc_end = round_up(offset + len, sectorsize);
2974			ret = 0;
2975		} else if (ret == RANGE_BOUNDARY_WRITTEN_EXTENT) {
2976			ret = btrfs_truncate_block(BTRFS_I(inode), offset + len,
2977						   0, 1);
2978			if (ret)
2979				goto out;
2980		} else {
2981			ret = 0;
2982		}
2983	}
2984
2985reserve_space:
2986	if (alloc_start < alloc_end) {
2987		struct extent_state *cached_state = NULL;
2988		const u64 lockstart = alloc_start;
2989		const u64 lockend = alloc_end - 1;
2990
2991		bytes_to_reserve = alloc_end - alloc_start;
2992		ret = btrfs_alloc_data_chunk_ondemand(BTRFS_I(inode),
2993						      bytes_to_reserve);
2994		if (ret < 0)
2995			goto out;
2996		space_reserved = true;
2997		btrfs_punch_hole_lock_range(inode, lockstart, lockend,
2998					    &cached_state);
2999		ret = btrfs_qgroup_reserve_data(BTRFS_I(inode), &data_reserved,
3000						alloc_start, bytes_to_reserve);
3001		if (ret) {
3002			unlock_extent(&BTRFS_I(inode)->io_tree, lockstart,
3003				      lockend, &cached_state);
3004			goto out;
3005		}
3006		ret = btrfs_prealloc_file_range(inode, mode, alloc_start,
3007						alloc_end - alloc_start,
3008						i_blocksize(inode),
3009						offset + len, &alloc_hint);
3010		unlock_extent(&BTRFS_I(inode)->io_tree, lockstart, lockend,
3011			      &cached_state);
3012		/* btrfs_prealloc_file_range releases reserved space on error */
3013		if (ret) {
3014			space_reserved = false;
3015			goto out;
3016		}
3017	}
3018	ret = btrfs_fallocate_update_isize(inode, offset + len, mode);
3019 out:
3020	if (ret && space_reserved)
3021		btrfs_free_reserved_data_space(BTRFS_I(inode), data_reserved,
3022					       alloc_start, bytes_to_reserve);
3023	extent_changeset_free(data_reserved);
3024
3025	return ret;
3026}
3027
3028static long btrfs_fallocate(struct file *file, int mode,
3029			    loff_t offset, loff_t len)
3030{
3031	struct inode *inode = file_inode(file);
3032	struct extent_state *cached_state = NULL;
3033	struct extent_changeset *data_reserved = NULL;
3034	struct falloc_range *range;
3035	struct falloc_range *tmp;
3036	LIST_HEAD(reserve_list);
3037	u64 cur_offset;
3038	u64 last_byte;
3039	u64 alloc_start;
3040	u64 alloc_end;
3041	u64 alloc_hint = 0;
3042	u64 locked_end;
3043	u64 actual_end = 0;
3044	u64 data_space_needed = 0;
3045	u64 data_space_reserved = 0;
3046	u64 qgroup_reserved = 0;
3047	struct extent_map *em;
3048	int blocksize = BTRFS_I(inode)->root->fs_info->sectorsize;
3049	int ret;
3050
3051	/* Do not allow fallocate in ZONED mode */
3052	if (btrfs_is_zoned(btrfs_sb(inode->i_sb)))
3053		return -EOPNOTSUPP;
3054
3055	alloc_start = round_down(offset, blocksize);
3056	alloc_end = round_up(offset + len, blocksize);
3057	cur_offset = alloc_start;
3058
3059	/* Make sure we aren't being give some crap mode */
3060	if (mode & ~(FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE |
3061		     FALLOC_FL_ZERO_RANGE))
3062		return -EOPNOTSUPP;
3063
3064	if (mode & FALLOC_FL_PUNCH_HOLE)
3065		return btrfs_punch_hole(file, offset, len);
3066
3067	btrfs_inode_lock(BTRFS_I(inode), BTRFS_ILOCK_MMAP);
 
 
 
3068
3069	if (!(mode & FALLOC_FL_KEEP_SIZE) && offset + len > inode->i_size) {
3070		ret = inode_newsize_ok(inode, offset + len);
3071		if (ret)
3072			goto out;
3073	}
3074
3075	ret = file_modified(file);
 
3076	if (ret)
3077		goto out;
3078
3079	/*
3080	 * TODO: Move these two operations after we have checked
3081	 * accurate reserved space, or fallocate can still fail but
3082	 * with page truncated or size expanded.
3083	 *
3084	 * But that's a minor problem and won't do much harm BTW.
3085	 */
3086	if (alloc_start > inode->i_size) {
3087		ret = btrfs_cont_expand(BTRFS_I(inode), i_size_read(inode),
3088					alloc_start);
3089		if (ret)
3090			goto out;
3091	} else if (offset + len > inode->i_size) {
3092		/*
3093		 * If we are fallocating from the end of the file onward we
3094		 * need to zero out the end of the block if i_size lands in the
3095		 * middle of a block.
3096		 */
3097		ret = btrfs_truncate_block(BTRFS_I(inode), inode->i_size, 0, 0);
3098		if (ret)
3099			goto out;
3100	}
3101
3102	/*
3103	 * We have locked the inode at the VFS level (in exclusive mode) and we
3104	 * have locked the i_mmap_lock lock (in exclusive mode). Now before
3105	 * locking the file range, flush all dealloc in the range and wait for
3106	 * all ordered extents in the range to complete. After this we can lock
3107	 * the file range and, due to the previous locking we did, we know there
3108	 * can't be more delalloc or ordered extents in the range.
3109	 */
3110	ret = btrfs_wait_ordered_range(inode, alloc_start,
3111				       alloc_end - alloc_start);
3112	if (ret)
3113		goto out;
3114
3115	if (mode & FALLOC_FL_ZERO_RANGE) {
3116		ret = btrfs_zero_range(inode, offset, len, mode);
3117		btrfs_inode_unlock(BTRFS_I(inode), BTRFS_ILOCK_MMAP);
3118		return ret;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3119	}
3120
3121	locked_end = alloc_end - 1;
3122	lock_extent(&BTRFS_I(inode)->io_tree, alloc_start, locked_end,
3123		    &cached_state);
3124
3125	btrfs_assert_inode_range_clean(BTRFS_I(inode), alloc_start, locked_end);
3126
3127	/* First, check if we exceed the qgroup limit */
3128	while (cur_offset < alloc_end) {
3129		em = btrfs_get_extent(BTRFS_I(inode), NULL, 0, cur_offset,
3130				      alloc_end - cur_offset);
3131		if (IS_ERR(em)) {
3132			ret = PTR_ERR(em);
 
3133			break;
3134		}
3135		last_byte = min(extent_map_end(em), alloc_end);
3136		actual_end = min_t(u64, extent_map_end(em), offset + len);
3137		last_byte = ALIGN(last_byte, blocksize);
 
3138		if (em->block_start == EXTENT_MAP_HOLE ||
3139		    (cur_offset >= inode->i_size &&
3140		     !(em->flags & EXTENT_FLAG_PREALLOC))) {
3141			const u64 range_len = last_byte - cur_offset;
 
 
 
 
3142
3143			ret = add_falloc_range(&reserve_list, cur_offset, range_len);
3144			if (ret < 0) {
3145				free_extent_map(em);
3146				break;
3147			}
3148			ret = btrfs_qgroup_reserve_data(BTRFS_I(inode),
3149					&data_reserved, cur_offset, range_len);
3150			if (ret < 0) {
3151				free_extent_map(em);
3152				break;
3153			}
3154			qgroup_reserved += range_len;
3155			data_space_needed += range_len;
3156		}
3157		free_extent_map(em);
3158		cur_offset = last_byte;
3159	}
3160
3161	if (!ret && data_space_needed > 0) {
3162		/*
3163		 * We are safe to reserve space here as we can't have delalloc
3164		 * in the range, see above.
3165		 */
3166		ret = btrfs_alloc_data_chunk_ondemand(BTRFS_I(inode),
3167						      data_space_needed);
3168		if (!ret)
3169			data_space_reserved = data_space_needed;
3170	}
3171
3172	/*
3173	 * If ret is still 0, means we're OK to fallocate.
3174	 * Or just cleanup the list and exit.
3175	 */
3176	list_for_each_entry_safe(range, tmp, &reserve_list, list) {
3177		if (!ret) {
3178			ret = btrfs_prealloc_file_range(inode, mode,
3179					range->start,
3180					range->len, i_blocksize(inode),
3181					offset + len, &alloc_hint);
3182			/*
3183			 * btrfs_prealloc_file_range() releases space even
3184			 * if it returns an error.
 
3185			 */
3186			data_space_reserved -= range->len;
3187			qgroup_reserved -= range->len;
3188		} else if (data_space_reserved > 0) {
3189			btrfs_free_reserved_data_space(BTRFS_I(inode),
3190					       data_reserved, range->start,
3191					       range->len);
3192			data_space_reserved -= range->len;
3193			qgroup_reserved -= range->len;
3194		} else if (qgroup_reserved > 0) {
3195			btrfs_qgroup_free_data(BTRFS_I(inode), data_reserved,
3196					       range->start, range->len, NULL);
3197			qgroup_reserved -= range->len;
3198		}
3199		list_del(&range->list);
3200		kfree(range);
3201	}
3202	if (ret < 0)
3203		goto out_unlock;
3204
3205	/*
3206	 * We didn't need to allocate any more space, but we still extended the
3207	 * size of the file so we need to update i_size and the inode item.
3208	 */
3209	ret = btrfs_fallocate_update_isize(inode, actual_end, mode);
3210out_unlock:
3211	unlock_extent(&BTRFS_I(inode)->io_tree, alloc_start, locked_end,
3212		      &cached_state);
3213out:
3214	btrfs_inode_unlock(BTRFS_I(inode), BTRFS_ILOCK_MMAP);
3215	extent_changeset_free(data_reserved);
3216	return ret;
3217}
3218
3219/*
3220 * Helper for btrfs_find_delalloc_in_range(). Find a subrange in a given range
3221 * that has unflushed and/or flushing delalloc. There might be other adjacent
3222 * subranges after the one it found, so btrfs_find_delalloc_in_range() keeps
3223 * looping while it gets adjacent subranges, and merging them together.
3224 */
3225static bool find_delalloc_subrange(struct btrfs_inode *inode, u64 start, u64 end,
3226				   struct extent_state **cached_state,
3227				   bool *search_io_tree,
3228				   u64 *delalloc_start_ret, u64 *delalloc_end_ret)
3229{
3230	u64 len = end + 1 - start;
3231	u64 delalloc_len = 0;
3232	struct btrfs_ordered_extent *oe;
3233	u64 oe_start;
3234	u64 oe_end;
3235
3236	/*
3237	 * Search the io tree first for EXTENT_DELALLOC. If we find any, it
3238	 * means we have delalloc (dirty pages) for which writeback has not
3239	 * started yet.
3240	 */
3241	if (*search_io_tree) {
3242		spin_lock(&inode->lock);
3243		if (inode->delalloc_bytes > 0) {
3244			spin_unlock(&inode->lock);
3245			*delalloc_start_ret = start;
3246			delalloc_len = count_range_bits(&inode->io_tree,
3247							delalloc_start_ret, end,
3248							len, EXTENT_DELALLOC, 1,
3249							cached_state);
3250		} else {
3251			spin_unlock(&inode->lock);
3252		}
3253	}
3254
3255	if (delalloc_len > 0) {
3256		/*
3257		 * If delalloc was found then *delalloc_start_ret has a sector size
3258		 * aligned value (rounded down).
3259		 */
3260		*delalloc_end_ret = *delalloc_start_ret + delalloc_len - 1;
3261
3262		if (*delalloc_start_ret == start) {
3263			/* Delalloc for the whole range, nothing more to do. */
3264			if (*delalloc_end_ret == end)
3265				return true;
3266			/* Else trim our search range for ordered extents. */
3267			start = *delalloc_end_ret + 1;
3268			len = end + 1 - start;
3269		}
3270	} else {
3271		/* No delalloc, future calls don't need to search again. */
3272		*search_io_tree = false;
3273	}
3274
3275	/*
3276	 * Now also check if there's any ordered extent in the range.
3277	 * We do this because:
3278	 *
3279	 * 1) When delalloc is flushed, the file range is locked, we clear the
3280	 *    EXTENT_DELALLOC bit from the io tree and create an extent map and
3281	 *    an ordered extent for the write. So we might just have been called
3282	 *    after delalloc is flushed and before the ordered extent completes
3283	 *    and inserts the new file extent item in the subvolume's btree;
3284	 *
3285	 * 2) We may have an ordered extent created by flushing delalloc for a
3286	 *    subrange that starts before the subrange we found marked with
3287	 *    EXTENT_DELALLOC in the io tree.
3288	 *
3289	 * We could also use the extent map tree to find such delalloc that is
3290	 * being flushed, but using the ordered extents tree is more efficient
3291	 * because it's usually much smaller as ordered extents are removed from
3292	 * the tree once they complete. With the extent maps, we mau have them
3293	 * in the extent map tree for a very long time, and they were either
3294	 * created by previous writes or loaded by read operations.
3295	 */
3296	oe = btrfs_lookup_first_ordered_range(inode, start, len);
3297	if (!oe)
3298		return (delalloc_len > 0);
3299
3300	/* The ordered extent may span beyond our search range. */
3301	oe_start = max(oe->file_offset, start);
3302	oe_end = min(oe->file_offset + oe->num_bytes - 1, end);
3303
3304	btrfs_put_ordered_extent(oe);
3305
3306	/* Don't have unflushed delalloc, return the ordered extent range. */
3307	if (delalloc_len == 0) {
3308		*delalloc_start_ret = oe_start;
3309		*delalloc_end_ret = oe_end;
3310		return true;
3311	}
3312
3313	/*
3314	 * We have both unflushed delalloc (io_tree) and an ordered extent.
3315	 * If the ranges are adjacent returned a combined range, otherwise
3316	 * return the leftmost range.
3317	 */
3318	if (oe_start < *delalloc_start_ret) {
3319		if (oe_end < *delalloc_start_ret)
3320			*delalloc_end_ret = oe_end;
3321		*delalloc_start_ret = oe_start;
3322	} else if (*delalloc_end_ret + 1 == oe_start) {
3323		*delalloc_end_ret = oe_end;
3324	}
3325
3326	return true;
3327}
3328
3329/*
3330 * Check if there's delalloc in a given range.
3331 *
3332 * @inode:               The inode.
3333 * @start:               The start offset of the range. It does not need to be
3334 *                       sector size aligned.
3335 * @end:                 The end offset (inclusive value) of the search range.
3336 *                       It does not need to be sector size aligned.
3337 * @cached_state:        Extent state record used for speeding up delalloc
3338 *                       searches in the inode's io_tree. Can be NULL.
3339 * @delalloc_start_ret:  Output argument, set to the start offset of the
3340 *                       subrange found with delalloc (may not be sector size
3341 *                       aligned).
3342 * @delalloc_end_ret:    Output argument, set to he end offset (inclusive value)
3343 *                       of the subrange found with delalloc.
3344 *
3345 * Returns true if a subrange with delalloc is found within the given range, and
3346 * if so it sets @delalloc_start_ret and @delalloc_end_ret with the start and
3347 * end offsets of the subrange.
3348 */
3349bool btrfs_find_delalloc_in_range(struct btrfs_inode *inode, u64 start, u64 end,
3350				  struct extent_state **cached_state,
3351				  u64 *delalloc_start_ret, u64 *delalloc_end_ret)
3352{
3353	u64 cur_offset = round_down(start, inode->root->fs_info->sectorsize);
3354	u64 prev_delalloc_end = 0;
3355	bool search_io_tree = true;
3356	bool ret = false;
3357
3358	while (cur_offset <= end) {
3359		u64 delalloc_start;
3360		u64 delalloc_end;
3361		bool delalloc;
3362
3363		delalloc = find_delalloc_subrange(inode, cur_offset, end,
3364						  cached_state, &search_io_tree,
3365						  &delalloc_start,
3366						  &delalloc_end);
3367		if (!delalloc)
3368			break;
3369
3370		if (prev_delalloc_end == 0) {
3371			/* First subrange found. */
3372			*delalloc_start_ret = max(delalloc_start, start);
3373			*delalloc_end_ret = delalloc_end;
3374			ret = true;
3375		} else if (delalloc_start == prev_delalloc_end + 1) {
3376			/* Subrange adjacent to the previous one, merge them. */
3377			*delalloc_end_ret = delalloc_end;
3378		} else {
3379			/* Subrange not adjacent to the previous one, exit. */
3380			break;
3381		}
3382
3383		prev_delalloc_end = delalloc_end;
3384		cur_offset = delalloc_end + 1;
3385		cond_resched();
3386	}
3387
 
 
 
 
 
3388	return ret;
3389}
3390
3391/*
3392 * Check if there's a hole or delalloc range in a range representing a hole (or
3393 * prealloc extent) found in the inode's subvolume btree.
3394 *
3395 * @inode:      The inode.
3396 * @whence:     Seek mode (SEEK_DATA or SEEK_HOLE).
3397 * @start:      Start offset of the hole region. It does not need to be sector
3398 *              size aligned.
3399 * @end:        End offset (inclusive value) of the hole region. It does not
3400 *              need to be sector size aligned.
3401 * @start_ret:  Return parameter, used to set the start of the subrange in the
3402 *              hole that matches the search criteria (seek mode), if such
3403 *              subrange is found (return value of the function is true).
3404 *              The value returned here may not be sector size aligned.
3405 *
3406 * Returns true if a subrange matching the given seek mode is found, and if one
3407 * is found, it updates @start_ret with the start of the subrange.
3408 */
3409static bool find_desired_extent_in_hole(struct btrfs_inode *inode, int whence,
3410					struct extent_state **cached_state,
3411					u64 start, u64 end, u64 *start_ret)
3412{
3413	u64 delalloc_start;
3414	u64 delalloc_end;
3415	bool delalloc;
3416
3417	delalloc = btrfs_find_delalloc_in_range(inode, start, end, cached_state,
3418						&delalloc_start, &delalloc_end);
3419	if (delalloc && whence == SEEK_DATA) {
3420		*start_ret = delalloc_start;
3421		return true;
3422	}
3423
3424	if (delalloc && whence == SEEK_HOLE) {
3425		/*
3426		 * We found delalloc but it starts after out start offset. So we
3427		 * have a hole between our start offset and the delalloc start.
3428		 */
3429		if (start < delalloc_start) {
3430			*start_ret = start;
3431			return true;
3432		}
3433		/*
3434		 * Delalloc range starts at our start offset.
3435		 * If the delalloc range's length is smaller than our range,
3436		 * then it means we have a hole that starts where the delalloc
3437		 * subrange ends.
3438		 */
3439		if (delalloc_end < end) {
3440			*start_ret = delalloc_end + 1;
3441			return true;
3442		}
3443
3444		/* There's delalloc for the whole range. */
3445		return false;
3446	}
3447
3448	if (!delalloc && whence == SEEK_HOLE) {
3449		*start_ret = start;
3450		return true;
3451	}
3452
3453	/*
3454	 * No delalloc in the range and we are seeking for data. The caller has
3455	 * to iterate to the next extent item in the subvolume btree.
3456	 */
3457	return false;
3458}
3459
3460static loff_t find_desired_extent(struct file *file, loff_t offset, int whence)
3461{
3462	struct btrfs_inode *inode = BTRFS_I(file->f_mapping->host);
3463	struct btrfs_file_private *private = file->private_data;
3464	struct btrfs_fs_info *fs_info = inode->root->fs_info;
3465	struct extent_state *cached_state = NULL;
3466	struct extent_state **delalloc_cached_state;
3467	const loff_t i_size = i_size_read(&inode->vfs_inode);
3468	const u64 ino = btrfs_ino(inode);
3469	struct btrfs_root *root = inode->root;
3470	struct btrfs_path *path;
3471	struct btrfs_key key;
3472	u64 last_extent_end;
3473	u64 lockstart;
3474	u64 lockend;
3475	u64 start;
3476	int ret;
3477	bool found = false;
3478
3479	if (i_size == 0 || offset >= i_size)
3480		return -ENXIO;
3481
3482	/*
3483	 * Quick path. If the inode has no prealloc extents and its number of
3484	 * bytes used matches its i_size, then it can not have holes.
3485	 */
3486	if (whence == SEEK_HOLE &&
3487	    !(inode->flags & BTRFS_INODE_PREALLOC) &&
3488	    inode_get_bytes(&inode->vfs_inode) == i_size)
3489		return i_size;
3490
3491	if (!private) {
3492		private = kzalloc(sizeof(*private), GFP_KERNEL);
3493		/*
3494		 * No worries if memory allocation failed.
3495		 * The private structure is used only for speeding up multiple
3496		 * lseek SEEK_HOLE/DATA calls to a file when there's delalloc,
3497		 * so everything will still be correct.
3498		 */
3499		file->private_data = private;
3500	}
3501
3502	if (private)
3503		delalloc_cached_state = &private->llseek_cached_state;
3504	else
3505		delalloc_cached_state = NULL;
3506
3507	/*
3508	 * offset can be negative, in this case we start finding DATA/HOLE from
3509	 * the very start of the file.
3510	 */
3511	start = max_t(loff_t, 0, offset);
3512
3513	lockstart = round_down(start, fs_info->sectorsize);
3514	lockend = round_up(i_size, fs_info->sectorsize);
3515	if (lockend <= lockstart)
3516		lockend = lockstart + fs_info->sectorsize;
3517	lockend--;
3518
3519	path = btrfs_alloc_path();
3520	if (!path)
3521		return -ENOMEM;
3522	path->reada = READA_FORWARD;
3523
3524	key.objectid = ino;
3525	key.type = BTRFS_EXTENT_DATA_KEY;
3526	key.offset = start;
3527
3528	last_extent_end = lockstart;
3529
3530	lock_extent(&inode->io_tree, lockstart, lockend, &cached_state);
3531
3532	ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
3533	if (ret < 0) {
3534		goto out;
3535	} else if (ret > 0 && path->slots[0] > 0) {
3536		btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0] - 1);
3537		if (key.objectid == ino && key.type == BTRFS_EXTENT_DATA_KEY)
3538			path->slots[0]--;
3539	}
3540
3541	while (start < i_size) {
3542		struct extent_buffer *leaf = path->nodes[0];
3543		struct btrfs_file_extent_item *extent;
3544		u64 extent_end;
3545		u8 type;
3546
3547		if (path->slots[0] >= btrfs_header_nritems(leaf)) {
3548			ret = btrfs_next_leaf(root, path);
3549			if (ret < 0)
3550				goto out;
3551			else if (ret > 0)
3552				break;
3553
3554			leaf = path->nodes[0];
 
 
 
 
 
 
 
 
 
 
 
3555		}
 
 
 
 
 
3556
3557		btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
3558		if (key.objectid != ino || key.type != BTRFS_EXTENT_DATA_KEY)
 
 
3559			break;
3560
3561		extent_end = btrfs_file_extent_end(path);
3562
3563		/*
3564		 * In the first iteration we may have a slot that points to an
3565		 * extent that ends before our start offset, so skip it.
3566		 */
3567		if (extent_end <= start) {
3568			path->slots[0]++;
3569			continue;
3570		}
3571
3572		/* We have an implicit hole, NO_HOLES feature is likely set. */
3573		if (last_extent_end < key.offset) {
3574			u64 search_start = last_extent_end;
3575			u64 found_start;
3576
3577			/*
3578			 * First iteration, @start matches @offset and it's
3579			 * within the hole.
3580			 */
3581			if (start == offset)
3582				search_start = offset;
3583
3584			found = find_desired_extent_in_hole(inode, whence,
3585							    delalloc_cached_state,
3586							    search_start,
3587							    key.offset - 1,
3588							    &found_start);
3589			if (found) {
3590				start = found_start;
3591				break;
3592			}
3593			/*
3594			 * Didn't find data or a hole (due to delalloc) in the
3595			 * implicit hole range, so need to analyze the extent.
3596			 */
3597		}
3598
3599		extent = btrfs_item_ptr(leaf, path->slots[0],
3600					struct btrfs_file_extent_item);
3601		type = btrfs_file_extent_type(leaf, extent);
3602
3603		/*
3604		 * Can't access the extent's disk_bytenr field if this is an
3605		 * inline extent, since at that offset, it's where the extent
3606		 * data starts.
3607		 */
3608		if (type == BTRFS_FILE_EXTENT_PREALLOC ||
3609		    (type == BTRFS_FILE_EXTENT_REG &&
3610		     btrfs_file_extent_disk_bytenr(leaf, extent) == 0)) {
3611			/*
3612			 * Explicit hole or prealloc extent, search for delalloc.
3613			 * A prealloc extent is treated like a hole.
3614			 */
3615			u64 search_start = key.offset;
3616			u64 found_start;
3617
3618			/*
3619			 * First iteration, @start matches @offset and it's
3620			 * within the hole.
3621			 */
3622			if (start == offset)
3623				search_start = offset;
3624
3625			found = find_desired_extent_in_hole(inode, whence,
3626							    delalloc_cached_state,
3627							    search_start,
3628							    extent_end - 1,
3629							    &found_start);
3630			if (found) {
3631				start = found_start;
3632				break;
3633			}
3634			/*
3635			 * Didn't find data or a hole (due to delalloc) in the
3636			 * implicit hole range, so need to analyze the next
3637			 * extent item.
3638			 */
3639		} else {
3640			/*
3641			 * Found a regular or inline extent.
3642			 * If we are seeking for data, adjust the start offset
3643			 * and stop, we're done.
3644			 */
3645			if (whence == SEEK_DATA) {
3646				start = max_t(u64, key.offset, offset);
3647				found = true;
 
 
 
3648				break;
3649			}
3650			/*
3651			 * Else, we are seeking for a hole, check the next file
3652			 * extent item.
3653			 */
3654		}
3655
3656		start = extent_end;
3657		last_extent_end = extent_end;
3658		path->slots[0]++;
3659		if (fatal_signal_pending(current)) {
3660			ret = -EINTR;
3661			goto out;
 
 
 
 
3662		}
 
3663		cond_resched();
3664	}
3665
3666	/* We have an implicit hole from the last extent found up to i_size. */
3667	if (!found && start < i_size) {
3668		found = find_desired_extent_in_hole(inode, whence,
3669						    delalloc_cached_state, start,
3670						    i_size - 1, &start);
3671		if (!found)
3672			start = i_size;
3673	}
3674
3675out:
3676	unlock_extent(&inode->io_tree, lockstart, lockend, &cached_state);
3677	btrfs_free_path(path);
3678
3679	if (ret < 0)
3680		return ret;
3681
3682	if (whence == SEEK_DATA && start >= i_size)
3683		return -ENXIO;
3684
3685	return min_t(loff_t, start, i_size);
3686}
3687
3688static loff_t btrfs_file_llseek(struct file *file, loff_t offset, int whence)
3689{
3690	struct inode *inode = file->f_mapping->host;
 
3691
3692	switch (whence) {
3693	default:
3694		return generic_file_llseek(file, offset, whence);
 
 
 
3695	case SEEK_DATA:
3696	case SEEK_HOLE:
3697		btrfs_inode_lock(BTRFS_I(inode), BTRFS_ILOCK_SHARED);
3698		offset = find_desired_extent(file, offset, whence);
3699		btrfs_inode_unlock(BTRFS_I(inode), BTRFS_ILOCK_SHARED);
3700		break;
3701	}
3702
3703	if (offset < 0)
3704		return offset;
3705
3706	return vfs_setpos(file, offset, inode->i_sb->s_maxbytes);
3707}
3708
3709static int btrfs_file_open(struct inode *inode, struct file *filp)
3710{
3711	int ret;
3712
3713	filp->f_mode |= FMODE_NOWAIT | FMODE_BUF_RASYNC | FMODE_BUF_WASYNC |
3714		        FMODE_CAN_ODIRECT;
3715
3716	ret = fsverity_file_open(inode, filp);
3717	if (ret)
3718		return ret;
3719	return generic_file_open(inode, filp);
3720}
3721
3722static int check_direct_read(struct btrfs_fs_info *fs_info,
3723			     const struct iov_iter *iter, loff_t offset)
3724{
3725	int ret;
3726	int i, seg;
3727
3728	ret = check_direct_IO(fs_info, iter, offset);
3729	if (ret < 0)
3730		return ret;
3731
3732	if (!iter_is_iovec(iter))
3733		return 0;
3734
3735	for (seg = 0; seg < iter->nr_segs; seg++) {
3736		for (i = seg + 1; i < iter->nr_segs; i++) {
3737			const struct iovec *iov1 = iter_iov(iter) + seg;
3738			const struct iovec *iov2 = iter_iov(iter) + i;
3739
3740			if (iov1->iov_base == iov2->iov_base)
3741				return -EINVAL;
3742		}
3743	}
3744	return 0;
3745}
3746
3747static ssize_t btrfs_direct_read(struct kiocb *iocb, struct iov_iter *to)
3748{
3749	struct inode *inode = file_inode(iocb->ki_filp);
3750	size_t prev_left = 0;
3751	ssize_t read = 0;
3752	ssize_t ret;
3753
3754	if (fsverity_active(inode))
3755		return 0;
3756
3757	if (check_direct_read(btrfs_sb(inode->i_sb), to, iocb->ki_pos))
3758		return 0;
3759
3760	btrfs_inode_lock(BTRFS_I(inode), BTRFS_ILOCK_SHARED);
3761again:
3762	/*
3763	 * This is similar to what we do for direct IO writes, see the comment
3764	 * at btrfs_direct_write(), but we also disable page faults in addition
3765	 * to disabling them only at the iov_iter level. This is because when
3766	 * reading from a hole or prealloc extent, iomap calls iov_iter_zero(),
3767	 * which can still trigger page fault ins despite having set ->nofault
3768	 * to true of our 'to' iov_iter.
3769	 *
3770	 * The difference to direct IO writes is that we deadlock when trying
3771	 * to lock the extent range in the inode's tree during he page reads
3772	 * triggered by the fault in (while for writes it is due to waiting for
3773	 * our own ordered extent). This is because for direct IO reads,
3774	 * btrfs_dio_iomap_begin() returns with the extent range locked, which
3775	 * is only unlocked in the endio callback (end_bio_extent_readpage()).
3776	 */
3777	pagefault_disable();
3778	to->nofault = true;
3779	ret = btrfs_dio_read(iocb, to, read);
3780	to->nofault = false;
3781	pagefault_enable();
3782
3783	/* No increment (+=) because iomap returns a cumulative value. */
3784	if (ret > 0)
3785		read = ret;
3786
3787	if (iov_iter_count(to) > 0 && (ret == -EFAULT || ret > 0)) {
3788		const size_t left = iov_iter_count(to);
3789
3790		if (left == prev_left) {
3791			/*
3792			 * We didn't make any progress since the last attempt,
3793			 * fallback to a buffered read for the remainder of the
3794			 * range. This is just to avoid any possibility of looping
3795			 * for too long.
3796			 */
3797			ret = read;
3798		} else {
3799			/*
3800			 * We made some progress since the last retry or this is
3801			 * the first time we are retrying. Fault in as many pages
3802			 * as possible and retry.
3803			 */
3804			fault_in_iov_iter_writeable(to, left);
3805			prev_left = left;
3806			goto again;
3807		}
3808	}
3809	btrfs_inode_unlock(BTRFS_I(inode), BTRFS_ILOCK_SHARED);
3810	return ret < 0 ? ret : read;
3811}
3812
3813static ssize_t btrfs_file_read_iter(struct kiocb *iocb, struct iov_iter *to)
3814{
3815	ssize_t ret = 0;
3816
3817	if (iocb->ki_flags & IOCB_DIRECT) {
3818		ret = btrfs_direct_read(iocb, to);
3819		if (ret < 0 || !iov_iter_count(to) ||
3820		    iocb->ki_pos >= i_size_read(file_inode(iocb->ki_filp)))
3821			return ret;
3822	}
3823
3824	return filemap_read(iocb, to, ret);
 
 
 
 
 
 
 
3825}
3826
3827const struct file_operations btrfs_file_operations = {
3828	.llseek		= btrfs_file_llseek,
3829	.read_iter      = btrfs_file_read_iter,
3830	.splice_read	= filemap_splice_read,
3831	.write_iter	= btrfs_file_write_iter,
3832	.splice_write	= iter_file_splice_write,
 
3833	.mmap		= btrfs_file_mmap,
3834	.open		= btrfs_file_open,
3835	.release	= btrfs_release_file,
3836	.get_unmapped_area = thp_get_unmapped_area,
3837	.fsync		= btrfs_sync_file,
3838	.fallocate	= btrfs_fallocate,
3839	.unlocked_ioctl	= btrfs_ioctl,
3840#ifdef CONFIG_COMPAT
3841	.compat_ioctl	= btrfs_compat_ioctl,
3842#endif
3843	.remap_file_range = btrfs_remap_file_range,
3844};
3845
3846int btrfs_fdatawrite_range(struct inode *inode, loff_t start, loff_t end)
3847{
3848	int ret;
3849
3850	/*
3851	 * So with compression we will find and lock a dirty page and clear the
3852	 * first one as dirty, setup an async extent, and immediately return
3853	 * with the entire range locked but with nobody actually marked with
3854	 * writeback.  So we can't just filemap_write_and_wait_range() and
3855	 * expect it to work since it will just kick off a thread to do the
3856	 * actual work.  So we need to call filemap_fdatawrite_range _again_
3857	 * since it will wait on the page lock, which won't be unlocked until
3858	 * after the pages have been marked as writeback and so we're good to go
3859	 * from there.  We have to do this otherwise we'll miss the ordered
3860	 * extents and that results in badness.  Please Josef, do not think you
3861	 * know better and pull this out at some point in the future, it is
3862	 * right and you are wrong.
3863	 */
3864	ret = filemap_fdatawrite_range(inode->i_mapping, start, end);
3865	if (!ret && test_bit(BTRFS_INODE_HAS_ASYNC_EXTENT,
3866			     &BTRFS_I(inode)->runtime_flags))
3867		ret = filemap_fdatawrite_range(inode->i_mapping, start, end);
3868
3869	return ret;
3870}
v3.5.6
 
   1/*
   2 * Copyright (C) 2007 Oracle.  All rights reserved.
   3 *
   4 * This program is free software; you can redistribute it and/or
   5 * modify it under the terms of the GNU General Public
   6 * License v2 as published by the Free Software Foundation.
   7 *
   8 * This program is distributed in the hope that it will be useful,
   9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
  10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
  11 * General Public License for more details.
  12 *
  13 * You should have received a copy of the GNU General Public
  14 * License along with this program; if not, write to the
  15 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
  16 * Boston, MA 021110-1307, USA.
  17 */
  18
  19#include <linux/fs.h>
  20#include <linux/pagemap.h>
  21#include <linux/highmem.h>
  22#include <linux/time.h>
  23#include <linux/init.h>
  24#include <linux/string.h>
  25#include <linux/backing-dev.h>
  26#include <linux/mpage.h>
  27#include <linux/falloc.h>
  28#include <linux/swap.h>
  29#include <linux/writeback.h>
  30#include <linux/statfs.h>
  31#include <linux/compat.h>
  32#include <linux/slab.h>
 
 
 
 
 
  33#include "ctree.h"
  34#include "disk-io.h"
  35#include "transaction.h"
  36#include "btrfs_inode.h"
  37#include "ioctl.h"
  38#include "print-tree.h"
  39#include "tree-log.h"
  40#include "locking.h"
  41#include "compat.h"
  42
  43/*
  44 * when auto defrag is enabled we
  45 * queue up these defrag structs to remember which
  46 * inodes need defragging passes
  47 */
  48struct inode_defrag {
  49	struct rb_node rb_node;
  50	/* objectid */
  51	u64 ino;
  52	/*
  53	 * transid where the defrag was added, we search for
  54	 * extents newer than this
  55	 */
  56	u64 transid;
  57
  58	/* root objectid */
  59	u64 root;
  60
  61	/* last offset we were able to defrag */
  62	u64 last_offset;
  63
  64	/* if we've wrapped around back to zero once already */
  65	int cycled;
  66};
  67
  68static int __compare_inode_defrag(struct inode_defrag *defrag1,
  69				  struct inode_defrag *defrag2)
  70{
  71	if (defrag1->root > defrag2->root)
  72		return 1;
  73	else if (defrag1->root < defrag2->root)
  74		return -1;
  75	else if (defrag1->ino > defrag2->ino)
  76		return 1;
  77	else if (defrag1->ino < defrag2->ino)
  78		return -1;
  79	else
  80		return 0;
  81}
  82
  83/* pop a record for an inode into the defrag tree.  The lock
  84 * must be held already
  85 *
  86 * If you're inserting a record for an older transid than an
  87 * existing record, the transid already in the tree is lowered
  88 *
  89 * If an existing record is found the defrag item you
  90 * pass in is freed
  91 */
  92static void __btrfs_add_inode_defrag(struct inode *inode,
  93				    struct inode_defrag *defrag)
  94{
  95	struct btrfs_root *root = BTRFS_I(inode)->root;
  96	struct inode_defrag *entry;
  97	struct rb_node **p;
  98	struct rb_node *parent = NULL;
  99	int ret;
 100
 101	p = &root->fs_info->defrag_inodes.rb_node;
 102	while (*p) {
 103		parent = *p;
 104		entry = rb_entry(parent, struct inode_defrag, rb_node);
 105
 106		ret = __compare_inode_defrag(defrag, entry);
 107		if (ret < 0)
 108			p = &parent->rb_left;
 109		else if (ret > 0)
 110			p = &parent->rb_right;
 111		else {
 112			/* if we're reinserting an entry for
 113			 * an old defrag run, make sure to
 114			 * lower the transid of our existing record
 115			 */
 116			if (defrag->transid < entry->transid)
 117				entry->transid = defrag->transid;
 118			if (defrag->last_offset > entry->last_offset)
 119				entry->last_offset = defrag->last_offset;
 120			goto exists;
 121		}
 122	}
 123	set_bit(BTRFS_INODE_IN_DEFRAG, &BTRFS_I(inode)->runtime_flags);
 124	rb_link_node(&defrag->rb_node, parent, p);
 125	rb_insert_color(&defrag->rb_node, &root->fs_info->defrag_inodes);
 126	return;
 127
 128exists:
 129	kfree(defrag);
 130	return;
 131
 132}
 133
 134/*
 135 * insert a defrag record for this inode if auto defrag is
 136 * enabled
 137 */
 138int btrfs_add_inode_defrag(struct btrfs_trans_handle *trans,
 139			   struct inode *inode)
 140{
 141	struct btrfs_root *root = BTRFS_I(inode)->root;
 142	struct inode_defrag *defrag;
 143	u64 transid;
 144
 145	if (!btrfs_test_opt(root, AUTO_DEFRAG))
 146		return 0;
 147
 148	if (btrfs_fs_closing(root->fs_info))
 149		return 0;
 150
 151	if (test_bit(BTRFS_INODE_IN_DEFRAG, &BTRFS_I(inode)->runtime_flags))
 152		return 0;
 153
 154	if (trans)
 155		transid = trans->transid;
 156	else
 157		transid = BTRFS_I(inode)->root->last_trans;
 158
 159	defrag = kzalloc(sizeof(*defrag), GFP_NOFS);
 160	if (!defrag)
 161		return -ENOMEM;
 162
 163	defrag->ino = btrfs_ino(inode);
 164	defrag->transid = transid;
 165	defrag->root = root->root_key.objectid;
 166
 167	spin_lock(&root->fs_info->defrag_inodes_lock);
 168	if (!test_bit(BTRFS_INODE_IN_DEFRAG, &BTRFS_I(inode)->runtime_flags))
 169		__btrfs_add_inode_defrag(inode, defrag);
 170	else
 171		kfree(defrag);
 172	spin_unlock(&root->fs_info->defrag_inodes_lock);
 173	return 0;
 174}
 175
 176/*
 177 * must be called with the defrag_inodes lock held
 178 */
 179struct inode_defrag *btrfs_find_defrag_inode(struct btrfs_fs_info *info,
 180					     u64 root, u64 ino,
 181					     struct rb_node **next)
 182{
 183	struct inode_defrag *entry = NULL;
 184	struct inode_defrag tmp;
 185	struct rb_node *p;
 186	struct rb_node *parent = NULL;
 187	int ret;
 188
 189	tmp.ino = ino;
 190	tmp.root = root;
 191
 192	p = info->defrag_inodes.rb_node;
 193	while (p) {
 194		parent = p;
 195		entry = rb_entry(parent, struct inode_defrag, rb_node);
 196
 197		ret = __compare_inode_defrag(&tmp, entry);
 198		if (ret < 0)
 199			p = parent->rb_left;
 200		else if (ret > 0)
 201			p = parent->rb_right;
 202		else
 203			return entry;
 204	}
 205
 206	if (next) {
 207		while (parent && __compare_inode_defrag(&tmp, entry) > 0) {
 208			parent = rb_next(parent);
 209			entry = rb_entry(parent, struct inode_defrag, rb_node);
 210		}
 211		*next = parent;
 212	}
 213	return NULL;
 214}
 215
 216/*
 217 * run through the list of inodes in the FS that need
 218 * defragging
 219 */
 220int btrfs_run_defrag_inodes(struct btrfs_fs_info *fs_info)
 221{
 222	struct inode_defrag *defrag;
 223	struct btrfs_root *inode_root;
 224	struct inode *inode;
 225	struct rb_node *n;
 226	struct btrfs_key key;
 227	struct btrfs_ioctl_defrag_range_args range;
 228	u64 first_ino = 0;
 229	u64 root_objectid = 0;
 230	int num_defrag;
 231	int defrag_batch = 1024;
 232
 233	memset(&range, 0, sizeof(range));
 234	range.len = (u64)-1;
 235
 236	atomic_inc(&fs_info->defrag_running);
 237	spin_lock(&fs_info->defrag_inodes_lock);
 238	while(1) {
 239		n = NULL;
 240
 241		/* find an inode to defrag */
 242		defrag = btrfs_find_defrag_inode(fs_info, root_objectid,
 243						 first_ino, &n);
 244		if (!defrag) {
 245			if (n) {
 246				defrag = rb_entry(n, struct inode_defrag,
 247						  rb_node);
 248			} else if (root_objectid || first_ino) {
 249				root_objectid = 0;
 250				first_ino = 0;
 251				continue;
 252			} else {
 253				break;
 254			}
 255		}
 256
 257		/* remove it from the rbtree */
 258		first_ino = defrag->ino + 1;
 259		root_objectid = defrag->root;
 260		rb_erase(&defrag->rb_node, &fs_info->defrag_inodes);
 261
 262		if (btrfs_fs_closing(fs_info))
 263			goto next_free;
 264
 265		spin_unlock(&fs_info->defrag_inodes_lock);
 266
 267		/* get the inode */
 268		key.objectid = defrag->root;
 269		btrfs_set_key_type(&key, BTRFS_ROOT_ITEM_KEY);
 270		key.offset = (u64)-1;
 271		inode_root = btrfs_read_fs_root_no_name(fs_info, &key);
 272		if (IS_ERR(inode_root))
 273			goto next;
 274
 275		key.objectid = defrag->ino;
 276		btrfs_set_key_type(&key, BTRFS_INODE_ITEM_KEY);
 277		key.offset = 0;
 278
 279		inode = btrfs_iget(fs_info->sb, &key, inode_root, NULL);
 280		if (IS_ERR(inode))
 281			goto next;
 282
 283		/* do a chunk of defrag */
 284		clear_bit(BTRFS_INODE_IN_DEFRAG, &BTRFS_I(inode)->runtime_flags);
 285		range.start = defrag->last_offset;
 286		num_defrag = btrfs_defrag_file(inode, NULL, &range, defrag->transid,
 287					       defrag_batch);
 288		/*
 289		 * if we filled the whole defrag batch, there
 290		 * must be more work to do.  Queue this defrag
 291		 * again
 292		 */
 293		if (num_defrag == defrag_batch) {
 294			defrag->last_offset = range.start;
 295			__btrfs_add_inode_defrag(inode, defrag);
 296			/*
 297			 * we don't want to kfree defrag, we added it back to
 298			 * the rbtree
 299			 */
 300			defrag = NULL;
 301		} else if (defrag->last_offset && !defrag->cycled) {
 302			/*
 303			 * we didn't fill our defrag batch, but
 304			 * we didn't start at zero.  Make sure we loop
 305			 * around to the start of the file.
 306			 */
 307			defrag->last_offset = 0;
 308			defrag->cycled = 1;
 309			__btrfs_add_inode_defrag(inode, defrag);
 310			defrag = NULL;
 311		}
 312
 313		iput(inode);
 314next:
 315		spin_lock(&fs_info->defrag_inodes_lock);
 316next_free:
 317		kfree(defrag);
 318	}
 319	spin_unlock(&fs_info->defrag_inodes_lock);
 320
 321	atomic_dec(&fs_info->defrag_running);
 322
 323	/*
 324	 * during unmount, we use the transaction_wait queue to
 325	 * wait for the defragger to stop
 326	 */
 327	wake_up(&fs_info->transaction_wait);
 328	return 0;
 329}
 330
 331/* simple helper to fault in pages and copy.  This should go away
 332 * and be replaced with calls into generic code.
 333 */
 334static noinline int btrfs_copy_from_user(loff_t pos, int num_pages,
 335					 size_t write_bytes,
 336					 struct page **prepared_pages,
 337					 struct iov_iter *i)
 338{
 339	size_t copied = 0;
 340	size_t total_copied = 0;
 341	int pg = 0;
 342	int offset = pos & (PAGE_CACHE_SIZE - 1);
 343
 344	while (write_bytes > 0) {
 345		size_t count = min_t(size_t,
 346				     PAGE_CACHE_SIZE - offset, write_bytes);
 347		struct page *page = prepared_pages[pg];
 348		/*
 349		 * Copy data from userspace to the current page
 350		 *
 351		 * Disable pagefault to avoid recursive lock since
 352		 * the pages are already locked
 353		 */
 354		pagefault_disable();
 355		copied = iov_iter_copy_from_user_atomic(page, i, offset, count);
 356		pagefault_enable();
 357
 358		/* Flush processor's dcache for this page */
 359		flush_dcache_page(page);
 360
 361		/*
 362		 * if we get a partial write, we can end up with
 363		 * partially up to date pages.  These add
 364		 * a lot of complexity, so make sure they don't
 365		 * happen by forcing this copy to be retried.
 366		 *
 367		 * The rest of the btrfs_file_write code will fall
 368		 * back to page at a time copies after we return 0.
 369		 */
 370		if (!PageUptodate(page) && copied < count)
 371			copied = 0;
 
 
 
 
 
 
 372
 373		iov_iter_advance(i, copied);
 374		write_bytes -= copied;
 375		total_copied += copied;
 376
 377		/* Return to btrfs_file_aio_write to fault page */
 378		if (unlikely(copied == 0))
 379			break;
 380
 381		if (unlikely(copied < PAGE_CACHE_SIZE - offset)) {
 382			offset += copied;
 383		} else {
 384			pg++;
 385			offset = 0;
 386		}
 387	}
 388	return total_copied;
 389}
 390
 391/*
 392 * unlocks pages after btrfs_file_write is done with them
 393 */
 394void btrfs_drop_pages(struct page **pages, size_t num_pages)
 
 
 395{
 396	size_t i;
 
 
 
 
 397	for (i = 0; i < num_pages; i++) {
 398		/* page checked is some magic around finding pages that
 399		 * have been modified without going through btrfs_set_page_dirty
 400		 * clear it here
 
 
 401		 */
 402		ClearPageChecked(pages[i]);
 
 403		unlock_page(pages[i]);
 404		mark_page_accessed(pages[i]);
 405		page_cache_release(pages[i]);
 406	}
 407}
 408
 409/*
 410 * after copy_from_user, pages need to be dirtied and we need to make
 411 * sure holes are created between the current EOF and the start of
 412 * any next extents (if required).
 413 *
 414 * this also makes the decision about creating an inline extent vs
 415 * doing real data extents, marking pages dirty and delalloc as required.
 416 */
 417int btrfs_dirty_pages(struct btrfs_root *root, struct inode *inode,
 418		      struct page **pages, size_t num_pages,
 419		      loff_t pos, size_t write_bytes,
 420		      struct extent_state **cached)
 421{
 
 422	int err = 0;
 423	int i;
 424	u64 num_bytes;
 425	u64 start_pos;
 426	u64 end_of_last_block;
 427	u64 end_pos = pos + write_bytes;
 428	loff_t isize = i_size_read(inode);
 
 429
 430	start_pos = pos & ~((u64)root->sectorsize - 1);
 431	num_bytes = (write_bytes + pos - start_pos +
 432		    root->sectorsize - 1) & ~((u64)root->sectorsize - 1);
 
 
 
 
 
 
 
 433
 434	end_of_last_block = start_pos + num_bytes - 1;
 
 
 
 
 
 
 
 
 
 435	err = btrfs_set_extent_delalloc(inode, start_pos, end_of_last_block,
 436					cached);
 437	if (err)
 438		return err;
 439
 440	for (i = 0; i < num_pages; i++) {
 441		struct page *p = pages[i];
 442		SetPageUptodate(p);
 443		ClearPageChecked(p);
 444		set_page_dirty(p);
 
 
 
 
 445	}
 446
 447	/*
 448	 * we've only changed i_size in ram, and we haven't updated
 449	 * the disk i_size.  There is no need to log the inode
 450	 * at this time.
 451	 */
 452	if (end_pos > isize)
 453		i_size_write(inode, end_pos);
 454	return 0;
 455}
 456
 457/*
 458 * this drops all the extents in the cache that intersect the range
 459 * [start, end].  Existing extents are split as required.
 460 */
 461int btrfs_drop_extent_cache(struct inode *inode, u64 start, u64 end,
 462			    int skip_pinned)
 463{
 464	struct extent_map *em;
 465	struct extent_map *split = NULL;
 466	struct extent_map *split2 = NULL;
 467	struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree;
 468	u64 len = end - start + 1;
 469	int ret;
 470	int testend = 1;
 471	unsigned long flags;
 472	int compressed = 0;
 473
 474	WARN_ON(end < start);
 475	if (end == (u64)-1) {
 476		len = (u64)-1;
 477		testend = 0;
 478	}
 479	while (1) {
 480		if (!split)
 481			split = alloc_extent_map();
 482		if (!split2)
 483			split2 = alloc_extent_map();
 484		BUG_ON(!split || !split2); /* -ENOMEM */
 485
 486		write_lock(&em_tree->lock);
 487		em = lookup_extent_mapping(em_tree, start, len);
 488		if (!em) {
 489			write_unlock(&em_tree->lock);
 490			break;
 491		}
 492		flags = em->flags;
 493		if (skip_pinned && test_bit(EXTENT_FLAG_PINNED, &em->flags)) {
 494			if (testend && em->start + em->len >= start + len) {
 495				free_extent_map(em);
 496				write_unlock(&em_tree->lock);
 497				break;
 498			}
 499			start = em->start + em->len;
 500			if (testend)
 501				len = start + len - (em->start + em->len);
 502			free_extent_map(em);
 503			write_unlock(&em_tree->lock);
 504			continue;
 505		}
 506		compressed = test_bit(EXTENT_FLAG_COMPRESSED, &em->flags);
 507		clear_bit(EXTENT_FLAG_PINNED, &em->flags);
 508		remove_extent_mapping(em_tree, em);
 509
 510		if (em->block_start < EXTENT_MAP_LAST_BYTE &&
 511		    em->start < start) {
 512			split->start = em->start;
 513			split->len = start - em->start;
 514			split->orig_start = em->orig_start;
 515			split->block_start = em->block_start;
 516
 517			if (compressed)
 518				split->block_len = em->block_len;
 519			else
 520				split->block_len = split->len;
 521
 522			split->bdev = em->bdev;
 523			split->flags = flags;
 524			split->compress_type = em->compress_type;
 525			ret = add_extent_mapping(em_tree, split);
 526			BUG_ON(ret); /* Logic error */
 527			free_extent_map(split);
 528			split = split2;
 529			split2 = NULL;
 530		}
 531		if (em->block_start < EXTENT_MAP_LAST_BYTE &&
 532		    testend && em->start + em->len > start + len) {
 533			u64 diff = start + len - em->start;
 534
 535			split->start = start + len;
 536			split->len = em->start + em->len - (start + len);
 537			split->bdev = em->bdev;
 538			split->flags = flags;
 539			split->compress_type = em->compress_type;
 540
 541			if (compressed) {
 542				split->block_len = em->block_len;
 543				split->block_start = em->block_start;
 544				split->orig_start = em->orig_start;
 545			} else {
 546				split->block_len = split->len;
 547				split->block_start = em->block_start + diff;
 548				split->orig_start = split->start;
 549			}
 550
 551			ret = add_extent_mapping(em_tree, split);
 552			BUG_ON(ret); /* Logic error */
 553			free_extent_map(split);
 554			split = NULL;
 555		}
 556		write_unlock(&em_tree->lock);
 557
 558		/* once for us */
 559		free_extent_map(em);
 560		/* once for the tree*/
 561		free_extent_map(em);
 562	}
 563	if (split)
 564		free_extent_map(split);
 565	if (split2)
 566		free_extent_map(split2);
 567	return 0;
 568}
 569
 570/*
 571 * this is very complex, but the basic idea is to drop all extents
 572 * in the range start - end.  hint_block is filled in with a block number
 573 * that would be a good hint to the block allocator for this file.
 574 *
 575 * If an extent intersects the range but is not entirely inside the range
 576 * it is either truncated or split.  Anything entirely inside the range
 577 * is deleted from the tree.
 
 
 
 
 
 
 578 */
 579int btrfs_drop_extents(struct btrfs_trans_handle *trans, struct inode *inode,
 580		       u64 start, u64 end, u64 *hint_byte, int drop_cache)
 
 581{
 582	struct btrfs_root *root = BTRFS_I(inode)->root;
 583	struct extent_buffer *leaf;
 584	struct btrfs_file_extent_item *fi;
 585	struct btrfs_path *path;
 586	struct btrfs_key key;
 587	struct btrfs_key new_key;
 588	u64 ino = btrfs_ino(inode);
 589	u64 search_start = start;
 590	u64 disk_bytenr = 0;
 591	u64 num_bytes = 0;
 592	u64 extent_offset = 0;
 593	u64 extent_end = 0;
 
 594	int del_nr = 0;
 595	int del_slot = 0;
 596	int extent_type;
 597	int recow;
 598	int ret;
 599	int modify_tree = -1;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 600
 601	if (drop_cache)
 602		btrfs_drop_extent_cache(inode, start, end - 1, 0);
 603
 604	path = btrfs_alloc_path();
 605	if (!path)
 606		return -ENOMEM;
 607
 608	if (start >= BTRFS_I(inode)->disk_i_size)
 609		modify_tree = 0;
 610
 
 611	while (1) {
 612		recow = 0;
 613		ret = btrfs_lookup_file_extent(trans, root, path, ino,
 614					       search_start, modify_tree);
 615		if (ret < 0)
 616			break;
 617		if (ret > 0 && path->slots[0] > 0 && search_start == start) {
 618			leaf = path->nodes[0];
 619			btrfs_item_key_to_cpu(leaf, &key, path->slots[0] - 1);
 620			if (key.objectid == ino &&
 621			    key.type == BTRFS_EXTENT_DATA_KEY)
 622				path->slots[0]--;
 623		}
 624		ret = 0;
 625next_slot:
 626		leaf = path->nodes[0];
 627		if (path->slots[0] >= btrfs_header_nritems(leaf)) {
 628			BUG_ON(del_nr > 0);
 629			ret = btrfs_next_leaf(root, path);
 630			if (ret < 0)
 631				break;
 632			if (ret > 0) {
 633				ret = 0;
 634				break;
 635			}
 636			leaf = path->nodes[0];
 637			recow = 1;
 638		}
 639
 640		btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
 641		if (key.objectid > ino ||
 642		    key.type > BTRFS_EXTENT_DATA_KEY || key.offset >= end)
 
 
 
 
 
 
 
 
 643			break;
 644
 645		fi = btrfs_item_ptr(leaf, path->slots[0],
 646				    struct btrfs_file_extent_item);
 647		extent_type = btrfs_file_extent_type(leaf, fi);
 648
 649		if (extent_type == BTRFS_FILE_EXTENT_REG ||
 650		    extent_type == BTRFS_FILE_EXTENT_PREALLOC) {
 651			disk_bytenr = btrfs_file_extent_disk_bytenr(leaf, fi);
 652			num_bytes = btrfs_file_extent_disk_num_bytes(leaf, fi);
 653			extent_offset = btrfs_file_extent_offset(leaf, fi);
 654			extent_end = key.offset +
 655				btrfs_file_extent_num_bytes(leaf, fi);
 656		} else if (extent_type == BTRFS_FILE_EXTENT_INLINE) {
 657			extent_end = key.offset +
 658				btrfs_file_extent_inline_len(leaf, fi);
 659		} else {
 660			WARN_ON(1);
 661			extent_end = search_start;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 662		}
 663
 664		if (extent_end <= search_start) {
 665			path->slots[0]++;
 666			goto next_slot;
 667		}
 668
 669		search_start = max(key.offset, start);
 
 670		if (recow || !modify_tree) {
 671			modify_tree = -1;
 672			btrfs_release_path(path);
 673			continue;
 674		}
 675
 676		/*
 677		 *     | - range to drop - |
 678		 *  | -------- extent -------- |
 679		 */
 680		if (start > key.offset && end < extent_end) {
 681			BUG_ON(del_nr > 0);
 682			BUG_ON(extent_type == BTRFS_FILE_EXTENT_INLINE);
 
 
 
 683
 684			memcpy(&new_key, &key, sizeof(new_key));
 685			new_key.offset = start;
 686			ret = btrfs_duplicate_item(trans, root, path,
 687						   &new_key);
 688			if (ret == -EAGAIN) {
 689				btrfs_release_path(path);
 690				continue;
 691			}
 692			if (ret < 0)
 693				break;
 694
 695			leaf = path->nodes[0];
 696			fi = btrfs_item_ptr(leaf, path->slots[0] - 1,
 697					    struct btrfs_file_extent_item);
 698			btrfs_set_file_extent_num_bytes(leaf, fi,
 699							start - key.offset);
 700
 701			fi = btrfs_item_ptr(leaf, path->slots[0],
 702					    struct btrfs_file_extent_item);
 703
 704			extent_offset += start - key.offset;
 705			btrfs_set_file_extent_offset(leaf, fi, extent_offset);
 706			btrfs_set_file_extent_num_bytes(leaf, fi,
 707							extent_end - start);
 708			btrfs_mark_buffer_dirty(leaf);
 709
 710			if (disk_bytenr > 0) {
 711				ret = btrfs_inc_extent_ref(trans, root,
 
 712						disk_bytenr, num_bytes, 0,
 
 
 713						root->root_key.objectid,
 714						new_key.objectid,
 715						start - extent_offset, 0);
 716				BUG_ON(ret); /* -ENOMEM */
 717				*hint_byte = disk_bytenr;
 
 
 
 
 718			}
 719			key.offset = start;
 720		}
 721		/*
 
 
 
 
 
 
 722		 *  | ---- range to drop ----- |
 723		 *      | -------- extent -------- |
 724		 */
 725		if (start <= key.offset && end < extent_end) {
 726			BUG_ON(extent_type == BTRFS_FILE_EXTENT_INLINE);
 
 
 
 727
 728			memcpy(&new_key, &key, sizeof(new_key));
 729			new_key.offset = end;
 730			btrfs_set_item_key_safe(trans, root, path, &new_key);
 731
 732			extent_offset += end - key.offset;
 733			btrfs_set_file_extent_offset(leaf, fi, extent_offset);
 734			btrfs_set_file_extent_num_bytes(leaf, fi,
 735							extent_end - end);
 736			btrfs_mark_buffer_dirty(leaf);
 737			if (disk_bytenr > 0) {
 738				inode_sub_bytes(inode, end - key.offset);
 739				*hint_byte = disk_bytenr;
 740			}
 741			break;
 742		}
 743
 744		search_start = extent_end;
 745		/*
 746		 *       | ---- range to drop ----- |
 747		 *  | -------- extent -------- |
 748		 */
 749		if (start > key.offset && end >= extent_end) {
 750			BUG_ON(del_nr > 0);
 751			BUG_ON(extent_type == BTRFS_FILE_EXTENT_INLINE);
 
 
 
 752
 753			btrfs_set_file_extent_num_bytes(leaf, fi,
 754							start - key.offset);
 755			btrfs_mark_buffer_dirty(leaf);
 756			if (disk_bytenr > 0) {
 757				inode_sub_bytes(inode, extent_end - start);
 758				*hint_byte = disk_bytenr;
 759			}
 760			if (end == extent_end)
 761				break;
 762
 763			path->slots[0]++;
 764			goto next_slot;
 765		}
 766
 767		/*
 768		 *  | ---- range to drop ----- |
 769		 *    | ------ extent ------ |
 770		 */
 771		if (start <= key.offset && end >= extent_end) {
 
 772			if (del_nr == 0) {
 773				del_slot = path->slots[0];
 774				del_nr = 1;
 775			} else {
 776				BUG_ON(del_slot + del_nr != path->slots[0]);
 777				del_nr++;
 778			}
 779
 780			if (extent_type == BTRFS_FILE_EXTENT_INLINE) {
 781				inode_sub_bytes(inode,
 782						extent_end - key.offset);
 783				extent_end = ALIGN(extent_end,
 784						   root->sectorsize);
 785			} else if (disk_bytenr > 0) {
 786				ret = btrfs_free_extent(trans, root,
 
 787						disk_bytenr, num_bytes, 0,
 
 
 788						root->root_key.objectid,
 789						key.objectid, key.offset -
 790						extent_offset, 0);
 791				BUG_ON(ret); /* -ENOMEM */
 792				inode_sub_bytes(inode,
 793						extent_end - key.offset);
 794				*hint_byte = disk_bytenr;
 
 
 
 795			}
 796
 797			if (end == extent_end)
 798				break;
 799
 800			if (path->slots[0] + 1 < btrfs_header_nritems(leaf)) {
 801				path->slots[0]++;
 802				goto next_slot;
 803			}
 804
 805			ret = btrfs_del_items(trans, root, path, del_slot,
 806					      del_nr);
 807			if (ret) {
 808				btrfs_abort_transaction(trans, root, ret);
 809				goto out;
 810			}
 811
 812			del_nr = 0;
 813			del_slot = 0;
 814
 815			btrfs_release_path(path);
 816			continue;
 817		}
 818
 819		BUG_ON(1);
 820	}
 821
 822	if (!ret && del_nr > 0) {
 
 
 
 
 
 
 
 823		ret = btrfs_del_items(trans, root, path, del_slot, del_nr);
 824		if (ret)
 825			btrfs_abort_transaction(trans, root, ret);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 826	}
 827
 
 
 
 
 828out:
 829	btrfs_free_path(path);
 
 830	return ret;
 831}
 832
 833static int extent_mergeable(struct extent_buffer *leaf, int slot,
 834			    u64 objectid, u64 bytenr, u64 orig_offset,
 835			    u64 *start, u64 *end)
 836{
 837	struct btrfs_file_extent_item *fi;
 838	struct btrfs_key key;
 839	u64 extent_end;
 840
 841	if (slot < 0 || slot >= btrfs_header_nritems(leaf))
 842		return 0;
 843
 844	btrfs_item_key_to_cpu(leaf, &key, slot);
 845	if (key.objectid != objectid || key.type != BTRFS_EXTENT_DATA_KEY)
 846		return 0;
 847
 848	fi = btrfs_item_ptr(leaf, slot, struct btrfs_file_extent_item);
 849	if (btrfs_file_extent_type(leaf, fi) != BTRFS_FILE_EXTENT_REG ||
 850	    btrfs_file_extent_disk_bytenr(leaf, fi) != bytenr ||
 851	    btrfs_file_extent_offset(leaf, fi) != key.offset - orig_offset ||
 852	    btrfs_file_extent_compression(leaf, fi) ||
 853	    btrfs_file_extent_encryption(leaf, fi) ||
 854	    btrfs_file_extent_other_encoding(leaf, fi))
 855		return 0;
 856
 857	extent_end = key.offset + btrfs_file_extent_num_bytes(leaf, fi);
 858	if ((*start && *start != key.offset) || (*end && *end != extent_end))
 859		return 0;
 860
 861	*start = key.offset;
 862	*end = extent_end;
 863	return 1;
 864}
 865
 866/*
 867 * Mark extent in the range start - end as written.
 868 *
 869 * This changes extent type from 'pre-allocated' to 'regular'. If only
 870 * part of extent is marked as written, the extent will be split into
 871 * two or three.
 872 */
 873int btrfs_mark_extent_written(struct btrfs_trans_handle *trans,
 874			      struct inode *inode, u64 start, u64 end)
 875{
 876	struct btrfs_root *root = BTRFS_I(inode)->root;
 877	struct extent_buffer *leaf;
 878	struct btrfs_path *path;
 879	struct btrfs_file_extent_item *fi;
 
 880	struct btrfs_key key;
 881	struct btrfs_key new_key;
 882	u64 bytenr;
 883	u64 num_bytes;
 884	u64 extent_end;
 885	u64 orig_offset;
 886	u64 other_start;
 887	u64 other_end;
 888	u64 split;
 889	int del_nr = 0;
 890	int del_slot = 0;
 891	int recow;
 892	int ret;
 893	u64 ino = btrfs_ino(inode);
 894
 895	btrfs_drop_extent_cache(inode, start, end - 1, 0);
 896
 897	path = btrfs_alloc_path();
 898	if (!path)
 899		return -ENOMEM;
 900again:
 901	recow = 0;
 902	split = start;
 903	key.objectid = ino;
 904	key.type = BTRFS_EXTENT_DATA_KEY;
 905	key.offset = split;
 906
 907	ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
 908	if (ret < 0)
 909		goto out;
 910	if (ret > 0 && path->slots[0] > 0)
 911		path->slots[0]--;
 912
 913	leaf = path->nodes[0];
 914	btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
 915	BUG_ON(key.objectid != ino || key.type != BTRFS_EXTENT_DATA_KEY);
 
 
 
 
 
 916	fi = btrfs_item_ptr(leaf, path->slots[0],
 917			    struct btrfs_file_extent_item);
 918	BUG_ON(btrfs_file_extent_type(leaf, fi) !=
 919	       BTRFS_FILE_EXTENT_PREALLOC);
 
 
 
 920	extent_end = key.offset + btrfs_file_extent_num_bytes(leaf, fi);
 921	BUG_ON(key.offset > start || extent_end < end);
 
 
 
 
 922
 923	bytenr = btrfs_file_extent_disk_bytenr(leaf, fi);
 924	num_bytes = btrfs_file_extent_disk_num_bytes(leaf, fi);
 925	orig_offset = key.offset - btrfs_file_extent_offset(leaf, fi);
 926	memcpy(&new_key, &key, sizeof(new_key));
 927
 928	if (start == key.offset && end < extent_end) {
 929		other_start = 0;
 930		other_end = start;
 931		if (extent_mergeable(leaf, path->slots[0] - 1,
 932				     ino, bytenr, orig_offset,
 933				     &other_start, &other_end)) {
 934			new_key.offset = end;
 935			btrfs_set_item_key_safe(trans, root, path, &new_key);
 936			fi = btrfs_item_ptr(leaf, path->slots[0],
 937					    struct btrfs_file_extent_item);
 
 
 938			btrfs_set_file_extent_num_bytes(leaf, fi,
 939							extent_end - end);
 940			btrfs_set_file_extent_offset(leaf, fi,
 941						     end - orig_offset);
 942			fi = btrfs_item_ptr(leaf, path->slots[0] - 1,
 943					    struct btrfs_file_extent_item);
 
 
 944			btrfs_set_file_extent_num_bytes(leaf, fi,
 945							end - other_start);
 946			btrfs_mark_buffer_dirty(leaf);
 947			goto out;
 948		}
 949	}
 950
 951	if (start > key.offset && end == extent_end) {
 952		other_start = end;
 953		other_end = 0;
 954		if (extent_mergeable(leaf, path->slots[0] + 1,
 955				     ino, bytenr, orig_offset,
 956				     &other_start, &other_end)) {
 957			fi = btrfs_item_ptr(leaf, path->slots[0],
 958					    struct btrfs_file_extent_item);
 959			btrfs_set_file_extent_num_bytes(leaf, fi,
 960							start - key.offset);
 
 
 961			path->slots[0]++;
 962			new_key.offset = start;
 963			btrfs_set_item_key_safe(trans, root, path, &new_key);
 964
 965			fi = btrfs_item_ptr(leaf, path->slots[0],
 966					    struct btrfs_file_extent_item);
 
 
 967			btrfs_set_file_extent_num_bytes(leaf, fi,
 968							other_end - start);
 969			btrfs_set_file_extent_offset(leaf, fi,
 970						     start - orig_offset);
 971			btrfs_mark_buffer_dirty(leaf);
 972			goto out;
 973		}
 974	}
 975
 976	while (start > key.offset || end < extent_end) {
 977		if (key.offset == start)
 978			split = end;
 979
 980		new_key.offset = split;
 981		ret = btrfs_duplicate_item(trans, root, path, &new_key);
 982		if (ret == -EAGAIN) {
 983			btrfs_release_path(path);
 984			goto again;
 985		}
 986		if (ret < 0) {
 987			btrfs_abort_transaction(trans, root, ret);
 988			goto out;
 989		}
 990
 991		leaf = path->nodes[0];
 992		fi = btrfs_item_ptr(leaf, path->slots[0] - 1,
 993				    struct btrfs_file_extent_item);
 
 994		btrfs_set_file_extent_num_bytes(leaf, fi,
 995						split - key.offset);
 996
 997		fi = btrfs_item_ptr(leaf, path->slots[0],
 998				    struct btrfs_file_extent_item);
 999
 
1000		btrfs_set_file_extent_offset(leaf, fi, split - orig_offset);
1001		btrfs_set_file_extent_num_bytes(leaf, fi,
1002						extent_end - split);
1003		btrfs_mark_buffer_dirty(leaf);
1004
1005		ret = btrfs_inc_extent_ref(trans, root, bytenr, num_bytes, 0,
1006					   root->root_key.objectid,
1007					   ino, orig_offset, 0);
1008		BUG_ON(ret); /* -ENOMEM */
 
 
 
 
 
1009
1010		if (split == start) {
1011			key.offset = start;
1012		} else {
1013			BUG_ON(start != key.offset);
 
 
 
 
1014			path->slots[0]--;
1015			extent_end = end;
1016		}
1017		recow = 1;
1018	}
1019
1020	other_start = end;
1021	other_end = 0;
 
 
 
 
1022	if (extent_mergeable(leaf, path->slots[0] + 1,
1023			     ino, bytenr, orig_offset,
1024			     &other_start, &other_end)) {
1025		if (recow) {
1026			btrfs_release_path(path);
1027			goto again;
1028		}
1029		extent_end = other_end;
1030		del_slot = path->slots[0] + 1;
1031		del_nr++;
1032		ret = btrfs_free_extent(trans, root, bytenr, num_bytes,
1033					0, root->root_key.objectid,
1034					ino, orig_offset, 0);
1035		BUG_ON(ret); /* -ENOMEM */
 
1036	}
1037	other_start = 0;
1038	other_end = start;
1039	if (extent_mergeable(leaf, path->slots[0] - 1,
1040			     ino, bytenr, orig_offset,
1041			     &other_start, &other_end)) {
1042		if (recow) {
1043			btrfs_release_path(path);
1044			goto again;
1045		}
1046		key.offset = other_start;
1047		del_slot = path->slots[0];
1048		del_nr++;
1049		ret = btrfs_free_extent(trans, root, bytenr, num_bytes,
1050					0, root->root_key.objectid,
1051					ino, orig_offset, 0);
1052		BUG_ON(ret); /* -ENOMEM */
 
1053	}
1054	if (del_nr == 0) {
1055		fi = btrfs_item_ptr(leaf, path->slots[0],
1056			   struct btrfs_file_extent_item);
1057		btrfs_set_file_extent_type(leaf, fi,
1058					   BTRFS_FILE_EXTENT_REG);
1059		btrfs_mark_buffer_dirty(leaf);
 
1060	} else {
1061		fi = btrfs_item_ptr(leaf, del_slot - 1,
1062			   struct btrfs_file_extent_item);
1063		btrfs_set_file_extent_type(leaf, fi,
1064					   BTRFS_FILE_EXTENT_REG);
 
1065		btrfs_set_file_extent_num_bytes(leaf, fi,
1066						extent_end - key.offset);
1067		btrfs_mark_buffer_dirty(leaf);
1068
1069		ret = btrfs_del_items(trans, root, path, del_slot, del_nr);
1070		if (ret < 0) {
1071			btrfs_abort_transaction(trans, root, ret);
1072			goto out;
1073		}
1074	}
1075out:
1076	btrfs_free_path(path);
1077	return 0;
1078}
1079
1080/*
1081 * on error we return an unlocked page and the error value
1082 * on success we return a locked page and 0
1083 */
1084static int prepare_uptodate_page(struct page *page, u64 pos,
 
1085				 bool force_uptodate)
1086{
 
1087	int ret = 0;
1088
1089	if (((pos & (PAGE_CACHE_SIZE - 1)) || force_uptodate) &&
1090	    !PageUptodate(page)) {
1091		ret = btrfs_readpage(NULL, page);
1092		if (ret)
1093			return ret;
1094		lock_page(page);
1095		if (!PageUptodate(page)) {
1096			unlock_page(page);
1097			return -EIO;
1098		}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1099	}
1100	return 0;
1101}
1102
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1103/*
1104 * this gets pages into the page cache and locks them down, it also properly
1105 * waits for data=ordered extents to finish before allowing the pages to be
1106 * modified.
1107 */
1108static noinline int prepare_pages(struct btrfs_root *root, struct file *file,
1109			 struct page **pages, size_t num_pages,
1110			 loff_t pos, unsigned long first_index,
1111			 size_t write_bytes, bool force_uptodate)
1112{
1113	struct extent_state *cached_state = NULL;
1114	int i;
1115	unsigned long index = pos >> PAGE_CACHE_SHIFT;
1116	struct inode *inode = fdentry(file)->d_inode;
1117	gfp_t mask = btrfs_alloc_write_mask(inode->i_mapping);
1118	int err = 0;
1119	int faili = 0;
1120	u64 start_pos;
1121	u64 last_pos;
1122
1123	start_pos = pos & ~((u64)root->sectorsize - 1);
1124	last_pos = ((u64)index + num_pages) << PAGE_CACHE_SHIFT;
1125
 
1126again:
1127	for (i = 0; i < num_pages; i++) {
1128		pages[i] = find_or_create_page(inode->i_mapping, index + i,
1129					       mask | __GFP_WRITE);
1130		if (!pages[i]) {
1131			faili = i - 1;
1132			err = -ENOMEM;
 
 
 
 
 
 
 
 
 
1133			goto fail;
1134		}
1135
1136		if (i == 0)
1137			err = prepare_uptodate_page(pages[i], pos,
1138						    force_uptodate);
1139		if (i == num_pages - 1)
1140			err = prepare_uptodate_page(pages[i],
1141						    pos + write_bytes, false);
1142		if (err) {
1143			page_cache_release(pages[i]);
 
 
 
 
1144			faili = i - 1;
1145			goto fail;
1146		}
1147		wait_on_page_writeback(pages[i]);
1148	}
1149	err = 0;
1150	if (start_pos < inode->i_size) {
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1151		struct btrfs_ordered_extent *ordered;
1152		lock_extent_bits(&BTRFS_I(inode)->io_tree,
1153				 start_pos, last_pos - 1, 0, &cached_state);
1154		ordered = btrfs_lookup_first_ordered_extent(inode,
1155							    last_pos - 1);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1156		if (ordered &&
1157		    ordered->file_offset + ordered->len > start_pos &&
1158		    ordered->file_offset < last_pos) {
1159			btrfs_put_ordered_extent(ordered);
1160			unlock_extent_cached(&BTRFS_I(inode)->io_tree,
1161					     start_pos, last_pos - 1,
1162					     &cached_state, GFP_NOFS);
1163			for (i = 0; i < num_pages; i++) {
1164				unlock_page(pages[i]);
1165				page_cache_release(pages[i]);
1166			}
1167			btrfs_wait_ordered_range(inode, start_pos,
1168						 last_pos - start_pos);
1169			goto again;
1170		}
1171		if (ordered)
1172			btrfs_put_ordered_extent(ordered);
1173
1174		clear_extent_bit(&BTRFS_I(inode)->io_tree, start_pos,
1175				  last_pos - 1, EXTENT_DIRTY | EXTENT_DELALLOC |
1176				  EXTENT_DO_ACCOUNTING, 0, 0, &cached_state,
1177				  GFP_NOFS);
1178		unlock_extent_cached(&BTRFS_I(inode)->io_tree,
1179				     start_pos, last_pos - 1, &cached_state,
1180				     GFP_NOFS);
1181	}
1182	for (i = 0; i < num_pages; i++) {
1183		if (clear_page_dirty_for_io(pages[i]))
1184			account_page_redirty(pages[i]);
1185		set_page_extent_mapped(pages[i]);
 
 
1186		WARN_ON(!PageLocked(pages[i]));
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1187	}
1188	return 0;
1189fail:
1190	while (faili >= 0) {
1191		unlock_page(pages[faili]);
1192		page_cache_release(pages[faili]);
1193		faili--;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1194	}
1195	return err;
1196
 
1197}
1198
1199static noinline ssize_t __btrfs_buffered_write(struct file *file,
1200					       struct iov_iter *i,
1201					       loff_t pos)
1202{
1203	struct inode *inode = fdentry(file)->d_inode;
1204	struct btrfs_root *root = BTRFS_I(inode)->root;
 
 
1205	struct page **pages = NULL;
1206	unsigned long first_index;
 
 
 
1207	size_t num_written = 0;
1208	int nrptrs;
1209	int ret = 0;
 
1210	bool force_page_uptodate = false;
 
 
 
 
 
 
 
 
 
 
 
1211
1212	nrptrs = min((iov_iter_count(i) + PAGE_CACHE_SIZE - 1) /
1213		     PAGE_CACHE_SIZE, PAGE_CACHE_SIZE /
1214		     (sizeof(struct page *)));
 
 
 
 
 
 
 
 
1215	nrptrs = min(nrptrs, current->nr_dirtied_pause - current->nr_dirtied);
1216	nrptrs = max(nrptrs, 8);
1217	pages = kmalloc(nrptrs * sizeof(struct page *), GFP_KERNEL);
1218	if (!pages)
1219		return -ENOMEM;
1220
1221	first_index = pos >> PAGE_CACHE_SHIFT;
1222
1223	while (iov_iter_count(i) > 0) {
1224		size_t offset = pos & (PAGE_CACHE_SIZE - 1);
 
 
1225		size_t write_bytes = min(iov_iter_count(i),
1226					 nrptrs * (size_t)PAGE_CACHE_SIZE -
1227					 offset);
1228		size_t num_pages = (write_bytes + offset +
1229				    PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
1230		size_t dirty_pages;
1231		size_t copied;
1232
1233		WARN_ON(num_pages > nrptrs);
 
1234
1235		/*
1236		 * Fault pages before locking them in prepare_pages
1237		 * to avoid recursive lock
1238		 */
1239		if (unlikely(iov_iter_fault_in_readable(i, write_bytes))) {
1240			ret = -EFAULT;
1241			break;
1242		}
1243
1244		ret = btrfs_delalloc_reserve_space(inode,
1245					num_pages << PAGE_CACHE_SHIFT);
1246		if (ret)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1247			break;
 
1248
1249		/*
1250		 * This is going to setup the pages array with the number of
1251		 * pages we want, so we don't really need to worry about the
1252		 * contents of pages from loop to loop
1253		 */
1254		ret = prepare_pages(root, file, pages, num_pages,
1255				    pos, first_index, write_bytes,
1256				    force_page_uptodate);
1257		if (ret) {
1258			btrfs_delalloc_release_space(inode,
1259					num_pages << PAGE_CACHE_SHIFT);
1260			break;
1261		}
1262
1263		copied = btrfs_copy_from_user(pos, num_pages,
1264					   write_bytes, pages, i);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1265
1266		/*
1267		 * if we have trouble faulting in the pages, fall
1268		 * back to one page at a time
1269		 */
1270		if (copied < write_bytes)
1271			nrptrs = 1;
1272
1273		if (copied == 0) {
1274			force_page_uptodate = true;
 
1275			dirty_pages = 0;
1276		} else {
1277			force_page_uptodate = false;
1278			dirty_pages = (copied + offset +
1279				       PAGE_CACHE_SIZE - 1) >>
1280				       PAGE_CACHE_SHIFT;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1281		}
1282
 
 
 
 
 
 
 
1283		/*
1284		 * If we had a short copy we need to release the excess delaloc
1285		 * bytes we reserved.  We need to increment outstanding_extents
1286		 * because btrfs_delalloc_release_space will decrement it, but
1287		 * we still have an outstanding extent for the chunk we actually
1288		 * managed to copy.
1289		 */
1290		if (num_pages > dirty_pages) {
1291			if (copied > 0) {
1292				spin_lock(&BTRFS_I(inode)->lock);
1293				BTRFS_I(inode)->outstanding_extents++;
1294				spin_unlock(&BTRFS_I(inode)->lock);
1295			}
1296			btrfs_delalloc_release_space(inode,
1297					(num_pages - dirty_pages) <<
1298					PAGE_CACHE_SHIFT);
 
1299		}
1300
1301		if (copied > 0) {
1302			ret = btrfs_dirty_pages(root, inode, pages,
1303						dirty_pages, pos, copied,
1304						NULL);
1305			if (ret) {
1306				btrfs_delalloc_release_space(inode,
1307					dirty_pages << PAGE_CACHE_SHIFT);
1308				btrfs_drop_pages(pages, num_pages);
1309				break;
1310			}
1311		}
1312
1313		btrfs_drop_pages(pages, num_pages);
1314
1315		cond_resched();
1316
1317		balance_dirty_pages_ratelimited_nr(inode->i_mapping,
1318						   dirty_pages);
1319		if (dirty_pages < (root->leafsize >> PAGE_CACHE_SHIFT) + 1)
1320			btrfs_btree_balance_dirty(root, 1);
1321
1322		pos += copied;
1323		num_written += copied;
1324	}
1325
1326	kfree(pages);
1327
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1328	return num_written ? num_written : ret;
1329}
1330
1331static ssize_t __btrfs_direct_write(struct kiocb *iocb,
1332				    const struct iovec *iov,
1333				    unsigned long nr_segs, loff_t pos,
1334				    loff_t *ppos, size_t count, size_t ocount)
 
 
 
 
 
 
 
 
 
 
 
1335{
1336	struct file *file = iocb->ki_filp;
1337	struct iov_iter i;
1338	ssize_t written;
 
 
1339	ssize_t written_buffered;
 
1340	loff_t endbyte;
1341	int err;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1342
1343	written = generic_file_direct_write(iocb, iov, &nr_segs, pos, ppos,
1344					    count, ocount);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1345
1346	if (written < 0 || written == count)
1347		return written;
 
1348
1349	pos += written;
1350	count -= written;
1351	iov_iter_init(&i, iov, nr_segs, count, written);
1352	written_buffered = __btrfs_buffered_write(file, &i, pos);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1353	if (written_buffered < 0) {
1354		err = written_buffered;
1355		goto out;
1356	}
 
 
 
 
1357	endbyte = pos + written_buffered - 1;
1358	err = filemap_write_and_wait_range(file->f_mapping, pos, endbyte);
 
 
 
1359	if (err)
1360		goto out;
1361	written += written_buffered;
1362	*ppos = pos + written_buffered;
1363	invalidate_mapping_pages(file->f_mapping, pos >> PAGE_CACHE_SHIFT,
1364				 endbyte >> PAGE_CACHE_SHIFT);
1365out:
1366	return written ? written : err;
1367}
1368
1369static ssize_t btrfs_file_aio_write(struct kiocb *iocb,
1370				    const struct iovec *iov,
1371				    unsigned long nr_segs, loff_t pos)
1372{
1373	struct file *file = iocb->ki_filp;
1374	struct inode *inode = fdentry(file)->d_inode;
1375	struct btrfs_root *root = BTRFS_I(inode)->root;
1376	loff_t *ppos = &iocb->ki_pos;
1377	u64 start_pos;
1378	ssize_t num_written = 0;
1379	ssize_t err = 0;
1380	size_t count, ocount;
1381
1382	vfs_check_frozen(inode->i_sb, SB_FREEZE_WRITE);
1383
1384	mutex_lock(&inode->i_mutex);
1385
1386	err = generic_segment_checks(iov, &nr_segs, &ocount, VERIFY_READ);
1387	if (err) {
1388		mutex_unlock(&inode->i_mutex);
1389		goto out;
1390	}
1391	count = ocount;
1392
1393	current->backing_dev_info = inode->i_mapping->backing_dev_info;
1394	err = generic_write_checks(file, &pos, &count, S_ISBLK(inode->i_mode));
1395	if (err) {
1396		mutex_unlock(&inode->i_mutex);
1397		goto out;
1398	}
1399
1400	if (count == 0) {
1401		mutex_unlock(&inode->i_mutex);
1402		goto out;
1403	}
 
1404
1405	err = file_remove_suid(file);
1406	if (err) {
1407		mutex_unlock(&inode->i_mutex);
1408		goto out;
1409	}
 
1410
1411	/*
1412	 * If BTRFS flips readonly due to some impossible error
1413	 * (fs_info->fs_state now has BTRFS_SUPER_FLAG_ERROR),
1414	 * although we have opened a file as writable, we have
1415	 * to stop this write operation to ensure FS consistency.
1416	 */
1417	if (root->fs_info->fs_state & BTRFS_SUPER_FLAG_ERROR) {
1418		mutex_unlock(&inode->i_mutex);
1419		err = -EROFS;
1420		goto out;
 
 
 
 
 
 
 
 
 
 
 
1421	}
1422
1423	err = file_update_time(file);
1424	if (err) {
1425		mutex_unlock(&inode->i_mutex);
1426		goto out;
 
 
1427	}
1428
1429	start_pos = round_down(pos, root->sectorsize);
1430	if (start_pos > i_size_read(inode)) {
1431		err = btrfs_cont_expand(inode, i_size_read(inode), start_pos);
1432		if (err) {
1433			mutex_unlock(&inode->i_mutex);
1434			goto out;
1435		}
1436	}
1437
1438	if (unlikely(file->f_flags & O_DIRECT)) {
1439		num_written = __btrfs_direct_write(iocb, iov, nr_segs,
1440						   pos, ppos, count, ocount);
1441	} else {
1442		struct iov_iter i;
1443
1444		iov_iter_init(&i, iov, nr_segs, count, num_written);
 
 
1445
1446		num_written = __btrfs_buffered_write(file, &i, pos);
1447		if (num_written > 0)
1448			*ppos = pos + num_written;
 
 
1449	}
1450
1451	mutex_unlock(&inode->i_mutex);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1452
1453	/*
1454	 * we want to make sure fsync finds this change
1455	 * but we haven't joined a transaction running right now.
1456	 *
1457	 * Later on, someone is sure to update the inode and get the
1458	 * real transid recorded.
1459	 *
1460	 * We set last_trans now to the fs_info generation + 1,
1461	 * this will either be one more than the running transaction
1462	 * or the generation used for the next transaction if there isn't
1463	 * one running right now.
1464	 */
1465	BTRFS_I(inode)->last_trans = root->fs_info->generation + 1;
1466	if (num_written > 0 || num_written == -EIOCBQUEUED) {
1467		err = generic_write_sync(file, pos, num_written);
1468		if (err < 0 && num_written > 0)
1469			num_written = err;
1470	}
1471out:
1472	current->backing_dev_info = NULL;
1473	return num_written ? num_written : err;
1474}
1475
1476int btrfs_release_file(struct inode *inode, struct file *filp)
1477{
 
 
 
 
 
 
 
1478	/*
1479	 * ordered_data_close is set by settattr when we are about to truncate
1480	 * a file from a non-zero size to a zero size.  This tries to
1481	 * flush down new bytes that may have been written if the
1482	 * application were using truncate to replace a file in place.
1483	 */
1484	if (test_and_clear_bit(BTRFS_INODE_ORDERED_DATA_CLOSE,
1485			       &BTRFS_I(inode)->runtime_flags)) {
1486		btrfs_add_ordered_operation(NULL, BTRFS_I(inode)->root, inode);
1487		if (inode->i_size > BTRFS_ORDERED_OPERATIONS_FLUSH_LIMIT)
1488			filemap_flush(inode->i_mapping);
1489	}
1490	if (filp->private_data)
1491		btrfs_ioctl_trans_end(filp);
1492	return 0;
1493}
1494
1495/*
1496 * fsync call for both files and directories.  This logs the inode into
1497 * the tree log instead of forcing full commits whenever possible.
1498 *
1499 * It needs to call filemap_fdatawait so that all ordered extent updates are
1500 * in the metadata btree are up to date for copying to the log.
1501 *
1502 * It drops the inode mutex before doing the tree log commit.  This is an
1503 * important optimization for directories because holding the mutex prevents
1504 * new operations on the dir while we write to disk.
1505 */
1506int btrfs_sync_file(struct file *file, loff_t start, loff_t end, int datasync)
1507{
1508	struct dentry *dentry = file->f_path.dentry;
1509	struct inode *inode = dentry->d_inode;
 
1510	struct btrfs_root *root = BTRFS_I(inode)->root;
1511	int ret = 0;
1512	struct btrfs_trans_handle *trans;
 
 
 
 
1513
1514	trace_btrfs_sync_file(file, datasync);
1515
1516	mutex_lock(&inode->i_mutex);
1517
1518	/*
1519	 * we wait first, since the writeback may change the inode, also wait
1520	 * ordered range does a filemape_write_and_wait_range which is why we
1521	 * don't do it above like other file systems.
 
 
 
1522	 */
1523	root->log_batch++;
1524	btrfs_wait_ordered_range(inode, start, end);
1525	root->log_batch++;
1526
1527	/*
1528	 * check the transaction that last modified this inode
1529	 * and see if its already been committed
 
 
1530	 */
1531	if (!BTRFS_I(inode)->last_trans) {
1532		mutex_unlock(&inode->i_mutex);
1533		goto out;
1534	}
 
 
 
1535
1536	/*
1537	 * if the last transaction that changed this file was before
1538	 * the current transaction, we can bail out now without any
1539	 * syncing
 
 
 
 
 
 
 
 
 
 
 
 
 
1540	 */
1541	smp_mb();
1542	if (btrfs_inode_in_log(inode, root->fs_info->generation) ||
1543	    BTRFS_I(inode)->last_trans <=
1544	    root->fs_info->last_trans_committed) {
1545		BTRFS_I(inode)->last_trans = 0;
1546		mutex_unlock(&inode->i_mutex);
1547		goto out;
1548	}
1549
1550	/*
1551	 * ok we haven't committed the transaction yet, lets do a commit
 
 
 
 
 
1552	 */
1553	if (file->private_data)
1554		btrfs_ioctl_trans_end(file);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1555
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1556	trans = btrfs_start_transaction(root, 0);
1557	if (IS_ERR(trans)) {
1558		ret = PTR_ERR(trans);
1559		mutex_unlock(&inode->i_mutex);
1560		goto out;
1561	}
 
1562
1563	ret = btrfs_log_dentry_safe(trans, root, dentry);
 
1564	if (ret < 0) {
1565		mutex_unlock(&inode->i_mutex);
1566		goto out;
1567	}
1568
1569	/* we've logged all the items and now have a consistent
1570	 * version of the file in the log.  It is possible that
1571	 * someone will come in and modify the file, but that's
1572	 * fine because the log is consistent on disk, and we
1573	 * have references to all of the file's extents
1574	 *
1575	 * It is possible that someone will come in and log the
1576	 * file again, but that will end up using the synchronization
1577	 * inside btrfs_sync_log to keep things safe.
1578	 */
1579	mutex_unlock(&inode->i_mutex);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1580
1581	if (ret != BTRFS_NO_LOG_SYNC) {
1582		if (ret > 0) {
1583			ret = btrfs_commit_transaction(trans, root);
1584		} else {
1585			ret = btrfs_sync_log(trans, root);
1586			if (ret == 0)
1587				ret = btrfs_end_transaction(trans, root);
1588			else
1589				ret = btrfs_commit_transaction(trans, root);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1590		}
1591	} else {
1592		ret = btrfs_end_transaction(trans, root);
1593	}
 
 
1594out:
 
 
 
 
 
1595	return ret > 0 ? -EIO : ret;
 
 
 
 
 
1596}
1597
1598static const struct vm_operations_struct btrfs_file_vm_ops = {
1599	.fault		= filemap_fault,
 
1600	.page_mkwrite	= btrfs_page_mkwrite,
1601};
1602
1603static int btrfs_file_mmap(struct file	*filp, struct vm_area_struct *vma)
1604{
1605	struct address_space *mapping = filp->f_mapping;
1606
1607	if (!mapping->a_ops->readpage)
1608		return -ENOEXEC;
1609
1610	file_accessed(filp);
1611	vma->vm_ops = &btrfs_file_vm_ops;
1612	vma->vm_flags |= VM_CAN_NONLINEAR;
1613
1614	return 0;
1615}
1616
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1617static long btrfs_fallocate(struct file *file, int mode,
1618			    loff_t offset, loff_t len)
1619{
1620	struct inode *inode = file->f_path.dentry->d_inode;
1621	struct extent_state *cached_state = NULL;
 
 
 
 
1622	u64 cur_offset;
1623	u64 last_byte;
1624	u64 alloc_start;
1625	u64 alloc_end;
1626	u64 alloc_hint = 0;
1627	u64 locked_end;
1628	u64 mask = BTRFS_I(inode)->root->sectorsize - 1;
 
 
 
1629	struct extent_map *em;
 
1630	int ret;
1631
1632	alloc_start = offset & ~mask;
1633	alloc_end =  (offset + len + mask) & ~mask;
 
1634
1635	/* We only support the FALLOC_FL_KEEP_SIZE mode */
1636	if (mode & ~FALLOC_FL_KEEP_SIZE)
 
 
 
 
 
1637		return -EOPNOTSUPP;
1638
1639	/*
1640	 * Make sure we have enough space before we do the
1641	 * allocation.
1642	 */
1643	ret = btrfs_check_data_free_space(inode, len);
1644	if (ret)
1645		return ret;
1646
1647	/*
1648	 * wait for ordered IO before we have any locks.  We'll loop again
1649	 * below with the locks held.
1650	 */
1651	btrfs_wait_ordered_range(inode, alloc_start, alloc_end - alloc_start);
1652
1653	mutex_lock(&inode->i_mutex);
1654	ret = inode_newsize_ok(inode, alloc_end);
1655	if (ret)
1656		goto out;
1657
 
 
 
 
 
 
 
1658	if (alloc_start > inode->i_size) {
1659		ret = btrfs_cont_expand(inode, i_size_read(inode),
1660					alloc_start);
1661		if (ret)
1662			goto out;
 
 
 
 
 
 
 
 
 
1663	}
1664
1665	locked_end = alloc_end - 1;
1666	while (1) {
1667		struct btrfs_ordered_extent *ordered;
 
 
 
 
 
 
 
 
 
1668
1669		/* the extent lock is ordered inside the running
1670		 * transaction
1671		 */
1672		lock_extent_bits(&BTRFS_I(inode)->io_tree, alloc_start,
1673				 locked_end, 0, &cached_state);
1674		ordered = btrfs_lookup_first_ordered_extent(inode,
1675							    alloc_end - 1);
1676		if (ordered &&
1677		    ordered->file_offset + ordered->len > alloc_start &&
1678		    ordered->file_offset < alloc_end) {
1679			btrfs_put_ordered_extent(ordered);
1680			unlock_extent_cached(&BTRFS_I(inode)->io_tree,
1681					     alloc_start, locked_end,
1682					     &cached_state, GFP_NOFS);
1683			/*
1684			 * we can't wait on the range with the transaction
1685			 * running or with the extent lock held
1686			 */
1687			btrfs_wait_ordered_range(inode, alloc_start,
1688						 alloc_end - alloc_start);
1689		} else {
1690			if (ordered)
1691				btrfs_put_ordered_extent(ordered);
1692			break;
1693		}
1694	}
1695
1696	cur_offset = alloc_start;
1697	while (1) {
1698		u64 actual_end;
 
 
1699
1700		em = btrfs_get_extent(inode, NULL, 0, cur_offset,
1701				      alloc_end - cur_offset, 0);
1702		if (IS_ERR_OR_NULL(em)) {
1703			if (!em)
1704				ret = -ENOMEM;
1705			else
1706				ret = PTR_ERR(em);
1707			break;
1708		}
1709		last_byte = min(extent_map_end(em), alloc_end);
1710		actual_end = min_t(u64, extent_map_end(em), offset + len);
1711		last_byte = (last_byte + mask) & ~mask;
1712
1713		if (em->block_start == EXTENT_MAP_HOLE ||
1714		    (cur_offset >= inode->i_size &&
1715		     !test_bit(EXTENT_FLAG_PREALLOC, &em->flags))) {
1716			ret = btrfs_prealloc_file_range(inode, mode, cur_offset,
1717							last_byte - cur_offset,
1718							1 << inode->i_blkbits,
1719							offset + len,
1720							&alloc_hint);
1721
 
 
 
 
 
 
 
1722			if (ret < 0) {
1723				free_extent_map(em);
1724				break;
1725			}
1726		} else if (actual_end > inode->i_size &&
1727			   !(mode & FALLOC_FL_KEEP_SIZE)) {
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1728			/*
1729			 * We didn't need to allocate any more space, but we
1730			 * still extended the size of the file so we need to
1731			 * update i_size.
1732			 */
1733			inode->i_ctime = CURRENT_TIME;
1734			i_size_write(inode, actual_end);
1735			btrfs_ordered_update_i_size(inode, actual_end, NULL);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1736		}
1737		free_extent_map(em);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1738
1739		cur_offset = last_byte;
1740		if (cur_offset >= alloc_end) {
1741			ret = 0;
 
 
 
 
 
 
 
1742			break;
1743		}
 
 
 
 
1744	}
1745	unlock_extent_cached(&BTRFS_I(inode)->io_tree, alloc_start, locked_end,
1746			     &cached_state, GFP_NOFS);
1747out:
1748	mutex_unlock(&inode->i_mutex);
1749	/* Let go of our reservation. */
1750	btrfs_free_reserved_data_space(inode, len);
1751	return ret;
1752}
1753
1754static int find_desired_extent(struct inode *inode, loff_t *offset, int origin)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1755{
1756	struct btrfs_root *root = BTRFS_I(inode)->root;
1757	struct extent_map *em;
 
1758	struct extent_state *cached_state = NULL;
1759	u64 lockstart = *offset;
1760	u64 lockend = i_size_read(inode);
1761	u64 start = *offset;
1762	u64 orig_start = *offset;
1763	u64 len = i_size_read(inode);
1764	u64 last_end = 0;
1765	int ret = 0;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1766
1767	lockend = max_t(u64, root->sectorsize, lockend);
 
1768	if (lockend <= lockstart)
1769		lockend = lockstart + root->sectorsize;
 
 
 
 
 
 
1770
1771	len = lockend - lockstart + 1;
 
 
 
 
1772
1773	len = max_t(u64, len, root->sectorsize);
1774	if (inode->i_size == 0)
1775		return -ENXIO;
 
 
 
 
 
 
 
1776
1777	lock_extent_bits(&BTRFS_I(inode)->io_tree, lockstart, lockend, 0,
1778			 &cached_state);
 
 
 
1779
1780	/*
1781	 * Delalloc is such a pain.  If we have a hole and we have pending
1782	 * delalloc for a portion of the hole we will get back a hole that
1783	 * exists for the entire range since it hasn't been actually written
1784	 * yet.  So to take care of this case we need to look for an extent just
1785	 * before the position we want in case there is outstanding delalloc
1786	 * going on here.
1787	 */
1788	if (origin == SEEK_HOLE && start != 0) {
1789		if (start <= root->sectorsize)
1790			em = btrfs_get_extent_fiemap(inode, NULL, 0, 0,
1791						     root->sectorsize, 0);
1792		else
1793			em = btrfs_get_extent_fiemap(inode, NULL, 0,
1794						     start - root->sectorsize,
1795						     root->sectorsize, 0);
1796		if (IS_ERR(em)) {
1797			ret = PTR_ERR(em);
1798			goto out;
1799		}
1800		last_end = em->start + em->len;
1801		if (em->block_start == EXTENT_MAP_DELALLOC)
1802			last_end = min_t(u64, last_end, inode->i_size);
1803		free_extent_map(em);
1804	}
1805
1806	while (1) {
1807		em = btrfs_get_extent_fiemap(inode, NULL, 0, start, len, 0);
1808		if (IS_ERR(em)) {
1809			ret = PTR_ERR(em);
1810			break;
 
 
 
 
 
 
 
 
 
 
1811		}
1812
1813		if (em->block_start == EXTENT_MAP_HOLE) {
1814			if (test_bit(EXTENT_FLAG_VACANCY, &em->flags)) {
1815				if (last_end <= orig_start) {
1816					free_extent_map(em);
1817					ret = -ENXIO;
1818					break;
1819				}
 
 
 
 
 
 
 
 
 
 
 
 
 
1820			}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1821
1822			if (origin == SEEK_HOLE) {
1823				*offset = start;
1824				free_extent_map(em);
 
 
 
 
 
 
 
 
 
 
 
1825				break;
1826			}
 
 
 
 
 
1827		} else {
1828			if (origin == SEEK_DATA) {
1829				if (em->block_start == EXTENT_MAP_DELALLOC) {
1830					if (start >= inode->i_size) {
1831						free_extent_map(em);
1832						ret = -ENXIO;
1833						break;
1834					}
1835				}
1836
1837				*offset = start;
1838				free_extent_map(em);
1839				break;
1840			}
 
 
 
 
1841		}
1842
1843		start = em->start + em->len;
1844		last_end = em->start + em->len;
1845
1846		if (em->block_start == EXTENT_MAP_DELALLOC)
1847			last_end = min_t(u64, last_end, inode->i_size);
1848
1849		if (test_bit(EXTENT_FLAG_VACANCY, &em->flags)) {
1850			free_extent_map(em);
1851			ret = -ENXIO;
1852			break;
1853		}
1854		free_extent_map(em);
1855		cond_resched();
1856	}
1857	if (!ret)
1858		*offset = min(*offset, inode->i_size);
 
 
 
 
 
 
 
 
1859out:
1860	unlock_extent_cached(&BTRFS_I(inode)->io_tree, lockstart, lockend,
1861			     &cached_state, GFP_NOFS);
1862	return ret;
 
 
 
 
 
 
 
1863}
1864
1865static loff_t btrfs_file_llseek(struct file *file, loff_t offset, int origin)
1866{
1867	struct inode *inode = file->f_mapping->host;
1868	int ret;
1869
1870	mutex_lock(&inode->i_mutex);
1871	switch (origin) {
1872	case SEEK_END:
1873	case SEEK_CUR:
1874		offset = generic_file_llseek(file, offset, origin);
1875		goto out;
1876	case SEEK_DATA:
1877	case SEEK_HOLE:
1878		if (offset >= i_size_read(inode)) {
1879			mutex_unlock(&inode->i_mutex);
1880			return -ENXIO;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1881		}
 
 
 
1882
1883		ret = find_desired_extent(inode, &offset, origin);
1884		if (ret) {
1885			mutex_unlock(&inode->i_mutex);
1886			return ret;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1887		}
1888	}
 
 
 
1889
1890	if (offset < 0 && !(file->f_mode & FMODE_UNSIGNED_OFFSET)) {
1891		offset = -EINVAL;
1892		goto out;
1893	}
1894	if (offset > inode->i_sb->s_maxbytes) {
1895		offset = -EINVAL;
1896		goto out;
 
 
1897	}
1898
1899	/* Special lock needed here? */
1900	if (offset != file->f_pos) {
1901		file->f_pos = offset;
1902		file->f_version = 0;
1903	}
1904out:
1905	mutex_unlock(&inode->i_mutex);
1906	return offset;
1907}
1908
1909const struct file_operations btrfs_file_operations = {
1910	.llseek		= btrfs_file_llseek,
1911	.read		= do_sync_read,
1912	.write		= do_sync_write,
1913	.aio_read       = generic_file_aio_read,
1914	.splice_read	= generic_file_splice_read,
1915	.aio_write	= btrfs_file_aio_write,
1916	.mmap		= btrfs_file_mmap,
1917	.open		= generic_file_open,
1918	.release	= btrfs_release_file,
 
1919	.fsync		= btrfs_sync_file,
1920	.fallocate	= btrfs_fallocate,
1921	.unlocked_ioctl	= btrfs_ioctl,
1922#ifdef CONFIG_COMPAT
1923	.compat_ioctl	= btrfs_ioctl,
1924#endif
 
1925};