Linux Audio

Check our new training course

Loading...
v4.6
 
 
   1#include <linux/bitops.h>
   2#include <linux/slab.h>
   3#include <linux/bio.h>
   4#include <linux/mm.h>
   5#include <linux/pagemap.h>
   6#include <linux/page-flags.h>
   7#include <linux/spinlock.h>
   8#include <linux/blkdev.h>
   9#include <linux/swap.h>
  10#include <linux/writeback.h>
  11#include <linux/pagevec.h>
  12#include <linux/prefetch.h>
  13#include <linux/cleancache.h>
  14#include "extent_io.h"
 
  15#include "extent_map.h"
  16#include "ctree.h"
  17#include "btrfs_inode.h"
  18#include "volumes.h"
  19#include "check-integrity.h"
  20#include "locking.h"
  21#include "rcu-string.h"
  22#include "backref.h"
 
  23
  24static struct kmem_cache *extent_state_cache;
  25static struct kmem_cache *extent_buffer_cache;
  26static struct bio_set *btrfs_bioset;
  27
  28static inline bool extent_state_in_tree(const struct extent_state *state)
  29{
  30	return !RB_EMPTY_NODE(&state->rb_node);
  31}
  32
  33#ifdef CONFIG_BTRFS_DEBUG
  34static LIST_HEAD(buffers);
  35static LIST_HEAD(states);
  36
  37static DEFINE_SPINLOCK(leak_lock);
  38
  39static inline
  40void btrfs_leak_debug_add(struct list_head *new, struct list_head *head)
 
  41{
  42	unsigned long flags;
  43
  44	spin_lock_irqsave(&leak_lock, flags);
  45	list_add(new, head);
  46	spin_unlock_irqrestore(&leak_lock, flags);
  47}
  48
  49static inline
  50void btrfs_leak_debug_del(struct list_head *entry)
  51{
  52	unsigned long flags;
  53
  54	spin_lock_irqsave(&leak_lock, flags);
  55	list_del(entry);
  56	spin_unlock_irqrestore(&leak_lock, flags);
  57}
  58
  59static inline
  60void btrfs_leak_debug_check(void)
  61{
  62	struct extent_state *state;
  63	struct extent_buffer *eb;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
  64
  65	while (!list_empty(&states)) {
  66		state = list_entry(states.next, struct extent_state, leak_list);
  67		pr_err("BTRFS: state leak: start %llu end %llu state %u in tree %d refs %d\n",
  68		       state->start, state->end, state->state,
  69		       extent_state_in_tree(state),
  70		       atomic_read(&state->refs));
  71		list_del(&state->leak_list);
  72		kmem_cache_free(extent_state_cache, state);
  73	}
  74
  75	while (!list_empty(&buffers)) {
  76		eb = list_entry(buffers.next, struct extent_buffer, leak_list);
  77		printk(KERN_ERR "BTRFS: buffer leak start %llu len %lu "
  78		       "refs %d\n",
  79		       eb->start, eb->len, atomic_read(&eb->refs));
  80		list_del(&eb->leak_list);
  81		kmem_cache_free(extent_buffer_cache, eb);
  82	}
  83}
  84
  85#define btrfs_debug_check_extent_io_range(tree, start, end)		\
  86	__btrfs_debug_check_extent_io_range(__func__, (tree), (start), (end))
  87static inline void __btrfs_debug_check_extent_io_range(const char *caller,
  88		struct extent_io_tree *tree, u64 start, u64 end)
  89{
  90	struct inode *inode;
  91	u64 isize;
  92
  93	if (!tree->mapping)
  94		return;
  95
  96	inode = tree->mapping->host;
  97	isize = i_size_read(inode);
  98	if (end >= PAGE_SIZE && (end % 2) == 0 && end != isize - 1) {
  99		btrfs_debug_rl(BTRFS_I(inode)->root->fs_info,
 100		    "%s: ino %llu isize %llu odd range [%llu,%llu]",
 101				caller, btrfs_ino(inode), isize, start, end);
 102	}
 103}
 104#else
 105#define btrfs_leak_debug_add(new, head)	do {} while (0)
 106#define btrfs_leak_debug_del(entry)	do {} while (0)
 107#define btrfs_leak_debug_check()	do {} while (0)
 108#define btrfs_debug_check_extent_io_range(c, s, e)	do {} while (0)
 109#endif
 110
 111#define BUFFER_LRU_MAX 64
 112
 113struct tree_entry {
 114	u64 start;
 115	u64 end;
 116	struct rb_node rb_node;
 117};
 118
 119struct extent_page_data {
 120	struct bio *bio;
 121	struct extent_io_tree *tree;
 122	get_extent_t *get_extent;
 123	unsigned long bio_flags;
 124
 125	/* tells writepage not to lock the state bits for this range
 126	 * it still does the unlocking
 127	 */
 128	unsigned int extent_locked:1;
 129
 130	/* tells the submit_bio code to use a WRITE_SYNC */
 131	unsigned int sync_io:1;
 132};
 133
 134static void add_extent_changeset(struct extent_state *state, unsigned bits,
 135				 struct extent_changeset *changeset,
 136				 int set)
 137{
 138	int ret;
 139
 140	if (!changeset)
 141		return;
 142	if (set && (state->state & bits) == bits)
 143		return;
 144	if (!set && (state->state & bits) == 0)
 145		return;
 146	changeset->bytes_changed += state->end - state->start + 1;
 147	ret = ulist_add(changeset->range_changed, state->start, state->end,
 148			GFP_ATOMIC);
 149	/* ENOMEM */
 150	BUG_ON(ret < 0);
 151}
 152
 153static noinline void flush_write_bio(void *data);
 154static inline struct btrfs_fs_info *
 155tree_fs_info(struct extent_io_tree *tree)
 156{
 157	if (!tree->mapping)
 158		return NULL;
 159	return btrfs_sb(tree->mapping->host->i_sb);
 
 
 
 
 
 
 
 
 
 160}
 161
 162int __init extent_io_init(void)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 163{
 164	extent_state_cache = kmem_cache_create("btrfs_extent_state",
 165			sizeof(struct extent_state), 0,
 166			SLAB_RECLAIM_ACCOUNT | SLAB_MEM_SPREAD, NULL);
 167	if (!extent_state_cache)
 168		return -ENOMEM;
 
 
 169
 
 
 170	extent_buffer_cache = kmem_cache_create("btrfs_extent_buffer",
 171			sizeof(struct extent_buffer), 0,
 172			SLAB_RECLAIM_ACCOUNT | SLAB_MEM_SPREAD, NULL);
 173	if (!extent_buffer_cache)
 174		goto free_state_cache;
 175
 176	btrfs_bioset = bioset_create(BIO_POOL_SIZE,
 177				     offsetof(struct btrfs_io_bio, bio));
 178	if (!btrfs_bioset)
 179		goto free_buffer_cache;
 180
 181	if (bioset_integrity_create(btrfs_bioset, BIO_POOL_SIZE))
 182		goto free_bioset;
 183
 184	return 0;
 185
 186free_bioset:
 187	bioset_free(btrfs_bioset);
 188	btrfs_bioset = NULL;
 189
 190free_buffer_cache:
 191	kmem_cache_destroy(extent_buffer_cache);
 192	extent_buffer_cache = NULL;
 193
 194free_state_cache:
 195	kmem_cache_destroy(extent_state_cache);
 196	extent_state_cache = NULL;
 197	return -ENOMEM;
 198}
 199
 200void extent_io_exit(void)
 201{
 202	btrfs_leak_debug_check();
 
 
 203
 
 
 204	/*
 205	 * Make sure all delayed rcu free are flushed before we
 206	 * destroy caches.
 207	 */
 208	rcu_barrier();
 209	kmem_cache_destroy(extent_state_cache);
 210	kmem_cache_destroy(extent_buffer_cache);
 211	if (btrfs_bioset)
 212		bioset_free(btrfs_bioset);
 213}
 214
 215void extent_io_tree_init(struct extent_io_tree *tree,
 216			 struct address_space *mapping)
 
 
 
 
 
 
 
 
 
 
 217{
 
 218	tree->state = RB_ROOT;
 219	tree->ops = NULL;
 220	tree->dirty_bytes = 0;
 221	spin_lock_init(&tree->lock);
 222	tree->mapping = mapping;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 223}
 224
 225static struct extent_state *alloc_extent_state(gfp_t mask)
 226{
 227	struct extent_state *state;
 228
 
 
 
 
 
 229	state = kmem_cache_alloc(extent_state_cache, mask);
 230	if (!state)
 231		return state;
 232	state->state = 0;
 233	state->failrec = NULL;
 234	RB_CLEAR_NODE(&state->rb_node);
 235	btrfs_leak_debug_add(&state->leak_list, &states);
 236	atomic_set(&state->refs, 1);
 237	init_waitqueue_head(&state->wq);
 238	trace_alloc_extent_state(state, mask, _RET_IP_);
 239	return state;
 240}
 241
 242void free_extent_state(struct extent_state *state)
 243{
 244	if (!state)
 245		return;
 246	if (atomic_dec_and_test(&state->refs)) {
 247		WARN_ON(extent_state_in_tree(state));
 248		btrfs_leak_debug_del(&state->leak_list);
 249		trace_free_extent_state(state, _RET_IP_);
 250		kmem_cache_free(extent_state_cache, state);
 251	}
 252}
 253
 254static struct rb_node *tree_insert(struct rb_root *root,
 255				   struct rb_node *search_start,
 256				   u64 offset,
 257				   struct rb_node *node,
 258				   struct rb_node ***p_in,
 259				   struct rb_node **parent_in)
 260{
 261	struct rb_node **p;
 262	struct rb_node *parent = NULL;
 263	struct tree_entry *entry;
 264
 265	if (p_in && parent_in) {
 266		p = *p_in;
 267		parent = *parent_in;
 268		goto do_insert;
 269	}
 270
 271	p = search_start ? &search_start : &root->rb_node;
 272	while (*p) {
 273		parent = *p;
 274		entry = rb_entry(parent, struct tree_entry, rb_node);
 275
 276		if (offset < entry->start)
 277			p = &(*p)->rb_left;
 278		else if (offset > entry->end)
 279			p = &(*p)->rb_right;
 280		else
 281			return parent;
 282	}
 283
 284do_insert:
 285	rb_link_node(node, parent, p);
 286	rb_insert_color(node, root);
 287	return NULL;
 288}
 289
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 290static struct rb_node *__etree_search(struct extent_io_tree *tree, u64 offset,
 291				      struct rb_node **prev_ret,
 292				      struct rb_node **next_ret,
 
 293				      struct rb_node ***p_ret,
 294				      struct rb_node **parent_ret)
 295{
 296	struct rb_root *root = &tree->state;
 297	struct rb_node **n = &root->rb_node;
 298	struct rb_node *prev = NULL;
 299	struct rb_node *orig_prev = NULL;
 300	struct tree_entry *entry;
 301	struct tree_entry *prev_entry = NULL;
 302
 303	while (*n) {
 304		prev = *n;
 305		entry = rb_entry(prev, struct tree_entry, rb_node);
 306		prev_entry = entry;
 307
 308		if (offset < entry->start)
 309			n = &(*n)->rb_left;
 310		else if (offset > entry->end)
 311			n = &(*n)->rb_right;
 312		else
 313			return *n;
 314	}
 315
 316	if (p_ret)
 317		*p_ret = n;
 318	if (parent_ret)
 319		*parent_ret = prev;
 320
 321	if (prev_ret) {
 322		orig_prev = prev;
 323		while (prev && offset > prev_entry->end) {
 324			prev = rb_next(prev);
 325			prev_entry = rb_entry(prev, struct tree_entry, rb_node);
 326		}
 327		*prev_ret = prev;
 328		prev = orig_prev;
 329	}
 330
 331	if (next_ret) {
 332		prev_entry = rb_entry(prev, struct tree_entry, rb_node);
 333		while (prev && offset < prev_entry->start) {
 334			prev = rb_prev(prev);
 335			prev_entry = rb_entry(prev, struct tree_entry, rb_node);
 336		}
 337		*next_ret = prev;
 338	}
 339	return NULL;
 340}
 341
 342static inline struct rb_node *
 343tree_search_for_insert(struct extent_io_tree *tree,
 344		       u64 offset,
 345		       struct rb_node ***p_ret,
 346		       struct rb_node **parent_ret)
 347{
 348	struct rb_node *prev = NULL;
 349	struct rb_node *ret;
 350
 351	ret = __etree_search(tree, offset, &prev, NULL, p_ret, parent_ret);
 352	if (!ret)
 353		return prev;
 354	return ret;
 355}
 356
 357static inline struct rb_node *tree_search(struct extent_io_tree *tree,
 358					  u64 offset)
 359{
 360	return tree_search_for_insert(tree, offset, NULL, NULL);
 361}
 362
 363static void merge_cb(struct extent_io_tree *tree, struct extent_state *new,
 364		     struct extent_state *other)
 365{
 366	if (tree->ops && tree->ops->merge_extent_hook)
 367		tree->ops->merge_extent_hook(tree->mapping->host, new,
 368					     other);
 369}
 370
 371/*
 372 * utility function to look for merge candidates inside a given range.
 373 * Any extents with matching state are merged together into a single
 374 * extent in the tree.  Extents with EXTENT_IO in their state field
 375 * are not merged because the end_io handlers need to be able to do
 376 * operations on them without sleeping (or doing allocations/splits).
 377 *
 378 * This should be called with the tree lock held.
 379 */
 380static void merge_state(struct extent_io_tree *tree,
 381		        struct extent_state *state)
 382{
 383	struct extent_state *other;
 384	struct rb_node *other_node;
 385
 386	if (state->state & (EXTENT_IOBITS | EXTENT_BOUNDARY))
 387		return;
 388
 389	other_node = rb_prev(&state->rb_node);
 390	if (other_node) {
 391		other = rb_entry(other_node, struct extent_state, rb_node);
 392		if (other->end == state->start - 1 &&
 393		    other->state == state->state) {
 394			merge_cb(tree, state, other);
 
 
 
 395			state->start = other->start;
 396			rb_erase(&other->rb_node, &tree->state);
 397			RB_CLEAR_NODE(&other->rb_node);
 398			free_extent_state(other);
 399		}
 400	}
 401	other_node = rb_next(&state->rb_node);
 402	if (other_node) {
 403		other = rb_entry(other_node, struct extent_state, rb_node);
 404		if (other->start == state->end + 1 &&
 405		    other->state == state->state) {
 406			merge_cb(tree, state, other);
 
 
 
 407			state->end = other->end;
 408			rb_erase(&other->rb_node, &tree->state);
 409			RB_CLEAR_NODE(&other->rb_node);
 410			free_extent_state(other);
 411		}
 412	}
 413}
 414
 415static void set_state_cb(struct extent_io_tree *tree,
 416			 struct extent_state *state, unsigned *bits)
 417{
 418	if (tree->ops && tree->ops->set_bit_hook)
 419		tree->ops->set_bit_hook(tree->mapping->host, state, bits);
 420}
 421
 422static void clear_state_cb(struct extent_io_tree *tree,
 423			   struct extent_state *state, unsigned *bits)
 424{
 425	if (tree->ops && tree->ops->clear_bit_hook)
 426		tree->ops->clear_bit_hook(tree->mapping->host, state, bits);
 427}
 428
 429static void set_state_bits(struct extent_io_tree *tree,
 430			   struct extent_state *state, unsigned *bits,
 431			   struct extent_changeset *changeset);
 432
 433/*
 434 * insert an extent_state struct into the tree.  'bits' are set on the
 435 * struct before it is inserted.
 436 *
 437 * This may return -EEXIST if the extent is already there, in which case the
 438 * state struct is freed.
 439 *
 440 * The tree lock is not taken internally.  This is a utility function and
 441 * probably isn't what you want to call (see set/clear_extent_bit).
 442 */
 443static int insert_state(struct extent_io_tree *tree,
 444			struct extent_state *state, u64 start, u64 end,
 445			struct rb_node ***p,
 446			struct rb_node **parent,
 447			unsigned *bits, struct extent_changeset *changeset)
 448{
 449	struct rb_node *node;
 450
 451	if (end < start)
 452		WARN(1, KERN_ERR "BTRFS: end < start %llu %llu\n",
 453		       end, start);
 
 
 454	state->start = start;
 455	state->end = end;
 456
 457	set_state_bits(tree, state, bits, changeset);
 458
 459	node = tree_insert(&tree->state, NULL, end, &state->rb_node, p, parent);
 460	if (node) {
 461		struct extent_state *found;
 462		found = rb_entry(node, struct extent_state, rb_node);
 463		printk(KERN_ERR "BTRFS: found node %llu %llu on insert of "
 464		       "%llu %llu\n",
 465		       found->start, found->end, start, end);
 466		return -EEXIST;
 467	}
 468	merge_state(tree, state);
 469	return 0;
 470}
 471
 472static void split_cb(struct extent_io_tree *tree, struct extent_state *orig,
 473		     u64 split)
 474{
 475	if (tree->ops && tree->ops->split_extent_hook)
 476		tree->ops->split_extent_hook(tree->mapping->host, orig, split);
 477}
 478
 479/*
 480 * split a given extent state struct in two, inserting the preallocated
 481 * struct 'prealloc' as the newly created second half.  'split' indicates an
 482 * offset inside 'orig' where it should be split.
 483 *
 484 * Before calling,
 485 * the tree has 'orig' at [orig->start, orig->end].  After calling, there
 486 * are two extent state structs in the tree:
 487 * prealloc: [orig->start, split - 1]
 488 * orig: [ split, orig->end ]
 489 *
 490 * The tree locks are not taken by this function. They need to be held
 491 * by the caller.
 492 */
 493static int split_state(struct extent_io_tree *tree, struct extent_state *orig,
 494		       struct extent_state *prealloc, u64 split)
 495{
 496	struct rb_node *node;
 497
 498	split_cb(tree, orig, split);
 
 499
 500	prealloc->start = orig->start;
 501	prealloc->end = split - 1;
 502	prealloc->state = orig->state;
 503	orig->start = split;
 504
 505	node = tree_insert(&tree->state, &orig->rb_node, prealloc->end,
 506			   &prealloc->rb_node, NULL, NULL);
 507	if (node) {
 508		free_extent_state(prealloc);
 509		return -EEXIST;
 510	}
 511	return 0;
 512}
 513
 514static struct extent_state *next_state(struct extent_state *state)
 515{
 516	struct rb_node *next = rb_next(&state->rb_node);
 517	if (next)
 518		return rb_entry(next, struct extent_state, rb_node);
 519	else
 520		return NULL;
 521}
 522
 523/*
 524 * utility function to clear some bits in an extent state struct.
 525 * it will optionally wake up any one waiting on this state (wake == 1).
 526 *
 527 * If no bits are set on the state struct after clearing things, the
 528 * struct is freed and removed from the tree
 529 */
 530static struct extent_state *clear_state_bit(struct extent_io_tree *tree,
 531					    struct extent_state *state,
 532					    unsigned *bits, int wake,
 533					    struct extent_changeset *changeset)
 534{
 535	struct extent_state *next;
 536	unsigned bits_to_clear = *bits & ~EXTENT_CTLBITS;
 
 537
 538	if ((bits_to_clear & EXTENT_DIRTY) && (state->state & EXTENT_DIRTY)) {
 539		u64 range = state->end - state->start + 1;
 540		WARN_ON(range > tree->dirty_bytes);
 541		tree->dirty_bytes -= range;
 542	}
 543	clear_state_cb(tree, state, bits);
 544	add_extent_changeset(state, bits_to_clear, changeset, 0);
 
 
 
 
 545	state->state &= ~bits_to_clear;
 546	if (wake)
 547		wake_up(&state->wq);
 548	if (state->state == 0) {
 549		next = next_state(state);
 550		if (extent_state_in_tree(state)) {
 551			rb_erase(&state->rb_node, &tree->state);
 552			RB_CLEAR_NODE(&state->rb_node);
 553			free_extent_state(state);
 554		} else {
 555			WARN_ON(1);
 556		}
 557	} else {
 558		merge_state(tree, state);
 559		next = next_state(state);
 560	}
 561	return next;
 562}
 563
 564static struct extent_state *
 565alloc_extent_state_atomic(struct extent_state *prealloc)
 566{
 567	if (!prealloc)
 568		prealloc = alloc_extent_state(GFP_ATOMIC);
 569
 570	return prealloc;
 571}
 572
 573static void extent_io_tree_panic(struct extent_io_tree *tree, int err)
 574{
 575	btrfs_panic(tree_fs_info(tree), err, "Locking error: "
 576		    "Extent tree was modified by another "
 577		    "thread while locked.");
 
 578}
 579
 580/*
 581 * clear some bits on a range in the tree.  This may require splitting
 582 * or inserting elements in the tree, so the gfp mask is used to
 583 * indicate which allocations or sleeping are allowed.
 584 *
 585 * pass 'wake' == 1 to kick any sleepers, and 'delete' == 1 to remove
 586 * the given range from the tree regardless of state (ie for truncate).
 587 *
 588 * the range [start, end] is inclusive.
 589 *
 590 * This takes the tree lock, and returns 0 on success and < 0 on error.
 591 */
 592static int __clear_extent_bit(struct extent_io_tree *tree, u64 start, u64 end,
 593			      unsigned bits, int wake, int delete,
 594			      struct extent_state **cached_state,
 595			      gfp_t mask, struct extent_changeset *changeset)
 596{
 597	struct extent_state *state;
 598	struct extent_state *cached;
 599	struct extent_state *prealloc = NULL;
 600	struct rb_node *node;
 601	u64 last_end;
 602	int err;
 603	int clear = 0;
 604
 605	btrfs_debug_check_extent_io_range(tree, start, end);
 
 606
 607	if (bits & EXTENT_DELALLOC)
 608		bits |= EXTENT_NORESERVE;
 609
 610	if (delete)
 611		bits |= ~EXTENT_CTLBITS;
 612	bits |= EXTENT_FIRST_DELALLOC;
 613
 614	if (bits & (EXTENT_IOBITS | EXTENT_BOUNDARY))
 615		clear = 1;
 616again:
 617	if (!prealloc && gfpflags_allow_blocking(mask)) {
 618		/*
 619		 * Don't care for allocation failure here because we might end
 620		 * up not needing the pre-allocated extent state at all, which
 621		 * is the case if we only have in the tree extent states that
 622		 * cover our input range and don't cover too any other range.
 623		 * If we end up needing a new extent state we allocate it later.
 624		 */
 625		prealloc = alloc_extent_state(mask);
 626	}
 627
 628	spin_lock(&tree->lock);
 629	if (cached_state) {
 630		cached = *cached_state;
 631
 632		if (clear) {
 633			*cached_state = NULL;
 634			cached_state = NULL;
 635		}
 636
 637		if (cached && extent_state_in_tree(cached) &&
 638		    cached->start <= start && cached->end > start) {
 639			if (clear)
 640				atomic_dec(&cached->refs);
 641			state = cached;
 642			goto hit_next;
 643		}
 644		if (clear)
 645			free_extent_state(cached);
 646	}
 647	/*
 648	 * this search will find the extents that end after
 649	 * our range starts
 650	 */
 651	node = tree_search(tree, start);
 652	if (!node)
 653		goto out;
 654	state = rb_entry(node, struct extent_state, rb_node);
 655hit_next:
 656	if (state->start > end)
 657		goto out;
 658	WARN_ON(state->end < start);
 659	last_end = state->end;
 660
 661	/* the state doesn't have the wanted bits, go ahead */
 662	if (!(state->state & bits)) {
 663		state = next_state(state);
 664		goto next;
 665	}
 666
 667	/*
 668	 *     | ---- desired range ---- |
 669	 *  | state | or
 670	 *  | ------------- state -------------- |
 671	 *
 672	 * We need to split the extent we found, and may flip
 673	 * bits on second half.
 674	 *
 675	 * If the extent we found extends past our range, we
 676	 * just split and search again.  It'll get split again
 677	 * the next time though.
 678	 *
 679	 * If the extent we found is inside our range, we clear
 680	 * the desired bit on it.
 681	 */
 682
 683	if (state->start < start) {
 684		prealloc = alloc_extent_state_atomic(prealloc);
 685		BUG_ON(!prealloc);
 686		err = split_state(tree, state, prealloc, start);
 687		if (err)
 688			extent_io_tree_panic(tree, err);
 689
 690		prealloc = NULL;
 691		if (err)
 692			goto out;
 693		if (state->end <= end) {
 694			state = clear_state_bit(tree, state, &bits, wake,
 695						changeset);
 696			goto next;
 697		}
 698		goto search_again;
 699	}
 700	/*
 701	 * | ---- desired range ---- |
 702	 *                        | state |
 703	 * We need to split the extent, and clear the bit
 704	 * on the first half
 705	 */
 706	if (state->start <= end && state->end > end) {
 707		prealloc = alloc_extent_state_atomic(prealloc);
 708		BUG_ON(!prealloc);
 709		err = split_state(tree, state, prealloc, end + 1);
 710		if (err)
 711			extent_io_tree_panic(tree, err);
 712
 713		if (wake)
 714			wake_up(&state->wq);
 715
 716		clear_state_bit(tree, prealloc, &bits, wake, changeset);
 717
 718		prealloc = NULL;
 719		goto out;
 720	}
 721
 722	state = clear_state_bit(tree, state, &bits, wake, changeset);
 723next:
 724	if (last_end == (u64)-1)
 725		goto out;
 726	start = last_end + 1;
 727	if (start <= end && state && !need_resched())
 728		goto hit_next;
 729	goto search_again;
 730
 731out:
 732	spin_unlock(&tree->lock);
 733	if (prealloc)
 734		free_extent_state(prealloc);
 735
 736	return 0;
 737
 738search_again:
 739	if (start > end)
 740		goto out;
 741	spin_unlock(&tree->lock);
 742	if (gfpflags_allow_blocking(mask))
 743		cond_resched();
 744	goto again;
 
 
 
 
 
 
 
 
 745}
 746
 747static void wait_on_state(struct extent_io_tree *tree,
 748			  struct extent_state *state)
 749		__releases(tree->lock)
 750		__acquires(tree->lock)
 751{
 752	DEFINE_WAIT(wait);
 753	prepare_to_wait(&state->wq, &wait, TASK_UNINTERRUPTIBLE);
 754	spin_unlock(&tree->lock);
 755	schedule();
 756	spin_lock(&tree->lock);
 757	finish_wait(&state->wq, &wait);
 758}
 759
 760/*
 761 * waits for one or more bits to clear on a range in the state tree.
 762 * The range [start, end] is inclusive.
 763 * The tree lock is taken by this function
 764 */
 765static void wait_extent_bit(struct extent_io_tree *tree, u64 start, u64 end,
 766			    unsigned long bits)
 767{
 768	struct extent_state *state;
 769	struct rb_node *node;
 770
 771	btrfs_debug_check_extent_io_range(tree, start, end);
 772
 773	spin_lock(&tree->lock);
 774again:
 775	while (1) {
 776		/*
 777		 * this search will find all the extents that end after
 778		 * our range starts
 779		 */
 780		node = tree_search(tree, start);
 781process_node:
 782		if (!node)
 783			break;
 784
 785		state = rb_entry(node, struct extent_state, rb_node);
 786
 787		if (state->start > end)
 788			goto out;
 789
 790		if (state->state & bits) {
 791			start = state->start;
 792			atomic_inc(&state->refs);
 793			wait_on_state(tree, state);
 794			free_extent_state(state);
 795			goto again;
 796		}
 797		start = state->end + 1;
 798
 799		if (start > end)
 800			break;
 801
 802		if (!cond_resched_lock(&tree->lock)) {
 803			node = rb_next(node);
 804			goto process_node;
 805		}
 806	}
 807out:
 808	spin_unlock(&tree->lock);
 809}
 810
 811static void set_state_bits(struct extent_io_tree *tree,
 812			   struct extent_state *state,
 813			   unsigned *bits, struct extent_changeset *changeset)
 814{
 815	unsigned bits_to_set = *bits & ~EXTENT_CTLBITS;
 
 
 
 
 816
 817	set_state_cb(tree, state, bits);
 818	if ((bits_to_set & EXTENT_DIRTY) && !(state->state & EXTENT_DIRTY)) {
 819		u64 range = state->end - state->start + 1;
 820		tree->dirty_bytes += range;
 821	}
 822	add_extent_changeset(state, bits_to_set, changeset, 1);
 
 823	state->state |= bits_to_set;
 824}
 825
 826static void cache_state_if_flags(struct extent_state *state,
 827				 struct extent_state **cached_ptr,
 828				 unsigned flags)
 829{
 830	if (cached_ptr && !(*cached_ptr)) {
 831		if (!flags || (state->state & flags)) {
 832			*cached_ptr = state;
 833			atomic_inc(&state->refs);
 834		}
 835	}
 836}
 837
 838static void cache_state(struct extent_state *state,
 839			struct extent_state **cached_ptr)
 840{
 841	return cache_state_if_flags(state, cached_ptr,
 842				    EXTENT_IOBITS | EXTENT_BOUNDARY);
 843}
 844
 845/*
 846 * set some bits on a range in the tree.  This may require allocations or
 847 * sleeping, so the gfp mask is used to indicate what is allowed.
 848 *
 849 * If any of the exclusive bits are set, this will fail with -EEXIST if some
 850 * part of the range already has the desired bits set.  The start of the
 851 * existing range is returned in failed_start in this case.
 852 *
 853 * [start, end] is inclusive This takes the tree lock.
 854 */
 855
 856static int __must_check
 857__set_extent_bit(struct extent_io_tree *tree, u64 start, u64 end,
 858		 unsigned bits, unsigned exclusive_bits,
 859		 u64 *failed_start, struct extent_state **cached_state,
 860		 gfp_t mask, struct extent_changeset *changeset)
 861{
 862	struct extent_state *state;
 863	struct extent_state *prealloc = NULL;
 864	struct rb_node *node;
 865	struct rb_node **p;
 866	struct rb_node *parent;
 867	int err = 0;
 868	u64 last_start;
 869	u64 last_end;
 870
 871	btrfs_debug_check_extent_io_range(tree, start, end);
 
 872
 873	bits |= EXTENT_FIRST_DELALLOC;
 874again:
 875	if (!prealloc && gfpflags_allow_blocking(mask)) {
 
 
 
 
 
 
 
 876		prealloc = alloc_extent_state(mask);
 877		BUG_ON(!prealloc);
 878	}
 879
 880	spin_lock(&tree->lock);
 881	if (cached_state && *cached_state) {
 882		state = *cached_state;
 883		if (state->start <= start && state->end > start &&
 884		    extent_state_in_tree(state)) {
 885			node = &state->rb_node;
 886			goto hit_next;
 887		}
 888	}
 889	/*
 890	 * this search will find all the extents that end after
 891	 * our range starts.
 892	 */
 893	node = tree_search_for_insert(tree, start, &p, &parent);
 894	if (!node) {
 895		prealloc = alloc_extent_state_atomic(prealloc);
 896		BUG_ON(!prealloc);
 897		err = insert_state(tree, prealloc, start, end,
 898				   &p, &parent, &bits, changeset);
 899		if (err)
 900			extent_io_tree_panic(tree, err);
 901
 902		cache_state(prealloc, cached_state);
 903		prealloc = NULL;
 904		goto out;
 905	}
 906	state = rb_entry(node, struct extent_state, rb_node);
 907hit_next:
 908	last_start = state->start;
 909	last_end = state->end;
 910
 911	/*
 912	 * | ---- desired range ---- |
 913	 * | state |
 914	 *
 915	 * Just lock what we found and keep going
 916	 */
 917	if (state->start == start && state->end <= end) {
 918		if (state->state & exclusive_bits) {
 919			*failed_start = state->start;
 920			err = -EEXIST;
 921			goto out;
 922		}
 923
 924		set_state_bits(tree, state, &bits, changeset);
 925		cache_state(state, cached_state);
 926		merge_state(tree, state);
 927		if (last_end == (u64)-1)
 928			goto out;
 929		start = last_end + 1;
 930		state = next_state(state);
 931		if (start < end && state && state->start == start &&
 932		    !need_resched())
 933			goto hit_next;
 934		goto search_again;
 935	}
 936
 937	/*
 938	 *     | ---- desired range ---- |
 939	 * | state |
 940	 *   or
 941	 * | ------------- state -------------- |
 942	 *
 943	 * We need to split the extent we found, and may flip bits on
 944	 * second half.
 945	 *
 946	 * If the extent we found extends past our
 947	 * range, we just split and search again.  It'll get split
 948	 * again the next time though.
 949	 *
 950	 * If the extent we found is inside our range, we set the
 951	 * desired bit on it.
 952	 */
 953	if (state->start < start) {
 954		if (state->state & exclusive_bits) {
 955			*failed_start = start;
 956			err = -EEXIST;
 957			goto out;
 958		}
 959
 
 
 
 
 
 
 
 
 
 
 960		prealloc = alloc_extent_state_atomic(prealloc);
 961		BUG_ON(!prealloc);
 962		err = split_state(tree, state, prealloc, start);
 963		if (err)
 964			extent_io_tree_panic(tree, err);
 965
 966		prealloc = NULL;
 967		if (err)
 968			goto out;
 969		if (state->end <= end) {
 970			set_state_bits(tree, state, &bits, changeset);
 971			cache_state(state, cached_state);
 972			merge_state(tree, state);
 973			if (last_end == (u64)-1)
 974				goto out;
 975			start = last_end + 1;
 976			state = next_state(state);
 977			if (start < end && state && state->start == start &&
 978			    !need_resched())
 979				goto hit_next;
 980		}
 981		goto search_again;
 982	}
 983	/*
 984	 * | ---- desired range ---- |
 985	 *     | state | or               | state |
 986	 *
 987	 * There's a hole, we need to insert something in it and
 988	 * ignore the extent we found.
 989	 */
 990	if (state->start > start) {
 991		u64 this_end;
 992		if (end < last_start)
 993			this_end = end;
 994		else
 995			this_end = last_start - 1;
 996
 997		prealloc = alloc_extent_state_atomic(prealloc);
 998		BUG_ON(!prealloc);
 999
1000		/*
1001		 * Avoid to free 'prealloc' if it can be merged with
1002		 * the later extent.
1003		 */
1004		err = insert_state(tree, prealloc, start, this_end,
1005				   NULL, NULL, &bits, changeset);
1006		if (err)
1007			extent_io_tree_panic(tree, err);
1008
1009		cache_state(prealloc, cached_state);
1010		prealloc = NULL;
1011		start = this_end + 1;
1012		goto search_again;
1013	}
1014	/*
1015	 * | ---- desired range ---- |
1016	 *                        | state |
1017	 * We need to split the extent, and set the bit
1018	 * on the first half
1019	 */
1020	if (state->start <= end && state->end > end) {
1021		if (state->state & exclusive_bits) {
1022			*failed_start = start;
1023			err = -EEXIST;
1024			goto out;
1025		}
1026
1027		prealloc = alloc_extent_state_atomic(prealloc);
1028		BUG_ON(!prealloc);
1029		err = split_state(tree, state, prealloc, end + 1);
1030		if (err)
1031			extent_io_tree_panic(tree, err);
1032
1033		set_state_bits(tree, prealloc, &bits, changeset);
1034		cache_state(prealloc, cached_state);
1035		merge_state(tree, prealloc);
1036		prealloc = NULL;
1037		goto out;
1038	}
1039
1040	goto search_again;
 
 
 
 
 
 
1041
1042out:
1043	spin_unlock(&tree->lock);
1044	if (prealloc)
1045		free_extent_state(prealloc);
1046
1047	return err;
1048
1049search_again:
1050	if (start > end)
1051		goto out;
1052	spin_unlock(&tree->lock);
1053	if (gfpflags_allow_blocking(mask))
1054		cond_resched();
1055	goto again;
1056}
1057
1058int set_extent_bit(struct extent_io_tree *tree, u64 start, u64 end,
1059		   unsigned bits, u64 * failed_start,
1060		   struct extent_state **cached_state, gfp_t mask)
1061{
1062	return __set_extent_bit(tree, start, end, bits, 0, failed_start,
1063				cached_state, mask, NULL);
1064}
1065
1066
1067/**
1068 * convert_extent_bit - convert all bits in a given range from one bit to
1069 * 			another
1070 * @tree:	the io tree to search
1071 * @start:	the start offset in bytes
1072 * @end:	the end offset in bytes (inclusive)
1073 * @bits:	the bits to set in this range
1074 * @clear_bits:	the bits to clear in this range
1075 * @cached_state:	state that we're going to cache
1076 * @mask:	the allocation mask
1077 *
1078 * This will go through and set bits for the given range.  If any states exist
1079 * already in this range they are set with the given bit and cleared of the
1080 * clear_bits.  This is only meant to be used by things that are mergeable, ie
1081 * converting from say DELALLOC to DIRTY.  This is not meant to be used with
1082 * boundary bits like LOCK.
 
 
1083 */
1084int convert_extent_bit(struct extent_io_tree *tree, u64 start, u64 end,
1085		       unsigned bits, unsigned clear_bits,
1086		       struct extent_state **cached_state, gfp_t mask)
1087{
1088	struct extent_state *state;
1089	struct extent_state *prealloc = NULL;
1090	struct rb_node *node;
1091	struct rb_node **p;
1092	struct rb_node *parent;
1093	int err = 0;
1094	u64 last_start;
1095	u64 last_end;
1096	bool first_iteration = true;
1097
1098	btrfs_debug_check_extent_io_range(tree, start, end);
 
 
1099
1100again:
1101	if (!prealloc && gfpflags_allow_blocking(mask)) {
1102		/*
1103		 * Best effort, don't worry if extent state allocation fails
1104		 * here for the first iteration. We might have a cached state
1105		 * that matches exactly the target range, in which case no
1106		 * extent state allocations are needed. We'll only know this
1107		 * after locking the tree.
1108		 */
1109		prealloc = alloc_extent_state(mask);
1110		if (!prealloc && !first_iteration)
1111			return -ENOMEM;
1112	}
1113
1114	spin_lock(&tree->lock);
1115	if (cached_state && *cached_state) {
1116		state = *cached_state;
1117		if (state->start <= start && state->end > start &&
1118		    extent_state_in_tree(state)) {
1119			node = &state->rb_node;
1120			goto hit_next;
1121		}
1122	}
1123
1124	/*
1125	 * this search will find all the extents that end after
1126	 * our range starts.
1127	 */
1128	node = tree_search_for_insert(tree, start, &p, &parent);
1129	if (!node) {
1130		prealloc = alloc_extent_state_atomic(prealloc);
1131		if (!prealloc) {
1132			err = -ENOMEM;
1133			goto out;
1134		}
1135		err = insert_state(tree, prealloc, start, end,
1136				   &p, &parent, &bits, NULL);
1137		if (err)
1138			extent_io_tree_panic(tree, err);
1139		cache_state(prealloc, cached_state);
1140		prealloc = NULL;
1141		goto out;
1142	}
1143	state = rb_entry(node, struct extent_state, rb_node);
1144hit_next:
1145	last_start = state->start;
1146	last_end = state->end;
1147
1148	/*
1149	 * | ---- desired range ---- |
1150	 * | state |
1151	 *
1152	 * Just lock what we found and keep going
1153	 */
1154	if (state->start == start && state->end <= end) {
1155		set_state_bits(tree, state, &bits, NULL);
1156		cache_state(state, cached_state);
1157		state = clear_state_bit(tree, state, &clear_bits, 0, NULL);
1158		if (last_end == (u64)-1)
1159			goto out;
1160		start = last_end + 1;
1161		if (start < end && state && state->start == start &&
1162		    !need_resched())
1163			goto hit_next;
1164		goto search_again;
1165	}
1166
1167	/*
1168	 *     | ---- desired range ---- |
1169	 * | state |
1170	 *   or
1171	 * | ------------- state -------------- |
1172	 *
1173	 * We need to split the extent we found, and may flip bits on
1174	 * second half.
1175	 *
1176	 * If the extent we found extends past our
1177	 * range, we just split and search again.  It'll get split
1178	 * again the next time though.
1179	 *
1180	 * If the extent we found is inside our range, we set the
1181	 * desired bit on it.
1182	 */
1183	if (state->start < start) {
1184		prealloc = alloc_extent_state_atomic(prealloc);
1185		if (!prealloc) {
1186			err = -ENOMEM;
1187			goto out;
1188		}
1189		err = split_state(tree, state, prealloc, start);
1190		if (err)
1191			extent_io_tree_panic(tree, err);
1192		prealloc = NULL;
1193		if (err)
1194			goto out;
1195		if (state->end <= end) {
1196			set_state_bits(tree, state, &bits, NULL);
1197			cache_state(state, cached_state);
1198			state = clear_state_bit(tree, state, &clear_bits, 0,
1199						NULL);
1200			if (last_end == (u64)-1)
1201				goto out;
1202			start = last_end + 1;
1203			if (start < end && state && state->start == start &&
1204			    !need_resched())
1205				goto hit_next;
1206		}
1207		goto search_again;
1208	}
1209	/*
1210	 * | ---- desired range ---- |
1211	 *     | state | or               | state |
1212	 *
1213	 * There's a hole, we need to insert something in it and
1214	 * ignore the extent we found.
1215	 */
1216	if (state->start > start) {
1217		u64 this_end;
1218		if (end < last_start)
1219			this_end = end;
1220		else
1221			this_end = last_start - 1;
1222
1223		prealloc = alloc_extent_state_atomic(prealloc);
1224		if (!prealloc) {
1225			err = -ENOMEM;
1226			goto out;
1227		}
1228
1229		/*
1230		 * Avoid to free 'prealloc' if it can be merged with
1231		 * the later extent.
1232		 */
1233		err = insert_state(tree, prealloc, start, this_end,
1234				   NULL, NULL, &bits, NULL);
1235		if (err)
1236			extent_io_tree_panic(tree, err);
1237		cache_state(prealloc, cached_state);
1238		prealloc = NULL;
1239		start = this_end + 1;
1240		goto search_again;
1241	}
1242	/*
1243	 * | ---- desired range ---- |
1244	 *                        | state |
1245	 * We need to split the extent, and set the bit
1246	 * on the first half
1247	 */
1248	if (state->start <= end && state->end > end) {
1249		prealloc = alloc_extent_state_atomic(prealloc);
1250		if (!prealloc) {
1251			err = -ENOMEM;
1252			goto out;
1253		}
1254
1255		err = split_state(tree, state, prealloc, end + 1);
1256		if (err)
1257			extent_io_tree_panic(tree, err);
1258
1259		set_state_bits(tree, prealloc, &bits, NULL);
1260		cache_state(prealloc, cached_state);
1261		clear_state_bit(tree, prealloc, &clear_bits, 0, NULL);
1262		prealloc = NULL;
1263		goto out;
1264	}
1265
1266	goto search_again;
 
 
 
 
 
 
1267
1268out:
1269	spin_unlock(&tree->lock);
1270	if (prealloc)
1271		free_extent_state(prealloc);
1272
1273	return err;
1274
1275search_again:
1276	if (start > end)
1277		goto out;
1278	spin_unlock(&tree->lock);
1279	if (gfpflags_allow_blocking(mask))
1280		cond_resched();
1281	first_iteration = false;
1282	goto again;
1283}
1284
1285/* wrappers around set/clear extent bit */
1286int set_record_extent_bits(struct extent_io_tree *tree, u64 start, u64 end,
1287			   unsigned bits, gfp_t mask,
1288			   struct extent_changeset *changeset)
1289{
1290	/*
1291	 * We don't support EXTENT_LOCKED yet, as current changeset will
1292	 * record any bits changed, so for EXTENT_LOCKED case, it will
1293	 * either fail with -EEXIST or changeset will record the whole
1294	 * range.
1295	 */
1296	BUG_ON(bits & EXTENT_LOCKED);
1297
1298	return __set_extent_bit(tree, start, end, bits, 0, NULL, NULL, mask,
1299				changeset);
1300}
1301
 
 
 
 
 
 
 
1302int clear_extent_bit(struct extent_io_tree *tree, u64 start, u64 end,
1303		     unsigned bits, int wake, int delete,
1304		     struct extent_state **cached, gfp_t mask)
1305{
1306	return __clear_extent_bit(tree, start, end, bits, wake, delete,
1307				  cached, mask, NULL);
1308}
1309
1310int clear_record_extent_bits(struct extent_io_tree *tree, u64 start, u64 end,
1311			     unsigned bits, gfp_t mask,
1312			     struct extent_changeset *changeset)
1313{
1314	/*
1315	 * Don't support EXTENT_LOCKED case, same reason as
1316	 * set_record_extent_bits().
1317	 */
1318	BUG_ON(bits & EXTENT_LOCKED);
1319
1320	return __clear_extent_bit(tree, start, end, bits, 0, 0, NULL, mask,
1321				  changeset);
1322}
1323
1324/*
1325 * either insert or lock state struct between start and end use mask to tell
1326 * us if waiting is desired.
1327 */
1328int lock_extent_bits(struct extent_io_tree *tree, u64 start, u64 end,
1329		     struct extent_state **cached_state)
1330{
1331	int err;
1332	u64 failed_start;
1333
1334	while (1) {
1335		err = __set_extent_bit(tree, start, end, EXTENT_LOCKED,
1336				       EXTENT_LOCKED, &failed_start,
1337				       cached_state, GFP_NOFS, NULL);
1338		if (err == -EEXIST) {
1339			wait_extent_bit(tree, failed_start, end, EXTENT_LOCKED);
1340			start = failed_start;
1341		} else
1342			break;
1343		WARN_ON(start > end);
1344	}
1345	return err;
1346}
1347
1348int try_lock_extent(struct extent_io_tree *tree, u64 start, u64 end)
1349{
1350	int err;
1351	u64 failed_start;
1352
1353	err = __set_extent_bit(tree, start, end, EXTENT_LOCKED, EXTENT_LOCKED,
1354			       &failed_start, NULL, GFP_NOFS, NULL);
1355	if (err == -EEXIST) {
1356		if (failed_start > start)
1357			clear_extent_bit(tree, start, failed_start - 1,
1358					 EXTENT_LOCKED, 1, 0, NULL, GFP_NOFS);
1359		return 0;
1360	}
1361	return 1;
1362}
1363
1364void extent_range_clear_dirty_for_io(struct inode *inode, u64 start, u64 end)
1365{
1366	unsigned long index = start >> PAGE_SHIFT;
1367	unsigned long end_index = end >> PAGE_SHIFT;
1368	struct page *page;
1369
1370	while (index <= end_index) {
1371		page = find_get_page(inode->i_mapping, index);
1372		BUG_ON(!page); /* Pages should be in the extent_io_tree */
1373		clear_page_dirty_for_io(page);
1374		put_page(page);
1375		index++;
1376	}
1377}
1378
1379void extent_range_redirty_for_io(struct inode *inode, u64 start, u64 end)
1380{
1381	unsigned long index = start >> PAGE_SHIFT;
1382	unsigned long end_index = end >> PAGE_SHIFT;
1383	struct page *page;
1384
1385	while (index <= end_index) {
1386		page = find_get_page(inode->i_mapping, index);
1387		BUG_ON(!page); /* Pages should be in the extent_io_tree */
1388		__set_page_dirty_nobuffers(page);
1389		account_page_redirty(page);
1390		put_page(page);
1391		index++;
1392	}
1393}
1394
1395/*
1396 * helper function to set both pages and extents in the tree writeback
1397 */
1398static void set_range_writeback(struct extent_io_tree *tree, u64 start, u64 end)
1399{
1400	unsigned long index = start >> PAGE_SHIFT;
1401	unsigned long end_index = end >> PAGE_SHIFT;
1402	struct page *page;
1403
1404	while (index <= end_index) {
1405		page = find_get_page(tree->mapping, index);
1406		BUG_ON(!page); /* Pages should be in the extent_io_tree */
1407		set_page_writeback(page);
1408		put_page(page);
1409		index++;
1410	}
1411}
1412
1413/* find the first state struct with 'bits' set after 'start', and
1414 * return it.  tree->lock must be held.  NULL will returned if
1415 * nothing was found after 'start'
1416 */
1417static struct extent_state *
1418find_first_extent_bit_state(struct extent_io_tree *tree,
1419			    u64 start, unsigned bits)
1420{
1421	struct rb_node *node;
1422	struct extent_state *state;
1423
1424	/*
1425	 * this search will find all the extents that end after
1426	 * our range starts.
1427	 */
1428	node = tree_search(tree, start);
1429	if (!node)
1430		goto out;
1431
1432	while (1) {
1433		state = rb_entry(node, struct extent_state, rb_node);
1434		if (state->end >= start && (state->state & bits))
1435			return state;
1436
1437		node = rb_next(node);
1438		if (!node)
1439			break;
1440	}
1441out:
1442	return NULL;
1443}
1444
1445/*
1446 * find the first offset in the io tree with 'bits' set. zero is
1447 * returned if we find something, and *start_ret and *end_ret are
1448 * set to reflect the state struct that was found.
1449 *
1450 * If nothing was found, 1 is returned. If found something, return 0.
1451 */
1452int find_first_extent_bit(struct extent_io_tree *tree, u64 start,
1453			  u64 *start_ret, u64 *end_ret, unsigned bits,
1454			  struct extent_state **cached_state)
1455{
1456	struct extent_state *state;
1457	struct rb_node *n;
1458	int ret = 1;
1459
1460	spin_lock(&tree->lock);
1461	if (cached_state && *cached_state) {
1462		state = *cached_state;
1463		if (state->end == start - 1 && extent_state_in_tree(state)) {
1464			n = rb_next(&state->rb_node);
1465			while (n) {
1466				state = rb_entry(n, struct extent_state,
1467						 rb_node);
1468				if (state->state & bits)
1469					goto got_it;
1470				n = rb_next(n);
1471			}
1472			free_extent_state(*cached_state);
1473			*cached_state = NULL;
1474			goto out;
1475		}
1476		free_extent_state(*cached_state);
1477		*cached_state = NULL;
1478	}
1479
1480	state = find_first_extent_bit_state(tree, start, bits);
1481got_it:
1482	if (state) {
1483		cache_state_if_flags(state, cached_state, 0);
1484		*start_ret = state->start;
1485		*end_ret = state->end;
1486		ret = 0;
1487	}
1488out:
1489	spin_unlock(&tree->lock);
1490	return ret;
1491}
1492
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1493/*
1494 * find a contiguous range of bytes in the file marked as delalloc, not
1495 * more than 'max_bytes'.  start and end are used to return the range,
1496 *
1497 * 1 is returned if we find something, 0 if nothing was in the tree
1498 */
1499static noinline u64 find_delalloc_range(struct extent_io_tree *tree,
1500					u64 *start, u64 *end, u64 max_bytes,
1501					struct extent_state **cached_state)
1502{
1503	struct rb_node *node;
1504	struct extent_state *state;
1505	u64 cur_start = *start;
1506	u64 found = 0;
1507	u64 total_bytes = 0;
1508
1509	spin_lock(&tree->lock);
1510
1511	/*
1512	 * this search will find all the extents that end after
1513	 * our range starts.
1514	 */
1515	node = tree_search(tree, cur_start);
1516	if (!node) {
1517		if (!found)
1518			*end = (u64)-1;
1519		goto out;
1520	}
1521
1522	while (1) {
1523		state = rb_entry(node, struct extent_state, rb_node);
1524		if (found && (state->start != cur_start ||
1525			      (state->state & EXTENT_BOUNDARY))) {
1526			goto out;
1527		}
1528		if (!(state->state & EXTENT_DELALLOC)) {
1529			if (!found)
1530				*end = state->end;
1531			goto out;
1532		}
1533		if (!found) {
1534			*start = state->start;
1535			*cached_state = state;
1536			atomic_inc(&state->refs);
1537		}
1538		found++;
1539		*end = state->end;
1540		cur_start = state->end + 1;
1541		node = rb_next(node);
1542		total_bytes += state->end - state->start + 1;
1543		if (total_bytes >= max_bytes)
1544			break;
1545		if (!node)
1546			break;
1547	}
1548out:
1549	spin_unlock(&tree->lock);
1550	return found;
1551}
1552
 
 
 
 
 
1553static noinline void __unlock_for_delalloc(struct inode *inode,
1554					   struct page *locked_page,
1555					   u64 start, u64 end)
1556{
1557	int ret;
1558	struct page *pages[16];
1559	unsigned long index = start >> PAGE_SHIFT;
1560	unsigned long end_index = end >> PAGE_SHIFT;
1561	unsigned long nr_pages = end_index - index + 1;
1562	int i;
1563
 
1564	if (index == locked_page->index && end_index == index)
1565		return;
1566
1567	while (nr_pages > 0) {
1568		ret = find_get_pages_contig(inode->i_mapping, index,
1569				     min_t(unsigned long, nr_pages,
1570				     ARRAY_SIZE(pages)), pages);
1571		for (i = 0; i < ret; i++) {
1572			if (pages[i] != locked_page)
1573				unlock_page(pages[i]);
1574			put_page(pages[i]);
1575		}
1576		nr_pages -= ret;
1577		index += ret;
1578		cond_resched();
1579	}
1580}
1581
1582static noinline int lock_delalloc_pages(struct inode *inode,
1583					struct page *locked_page,
1584					u64 delalloc_start,
1585					u64 delalloc_end)
1586{
1587	unsigned long index = delalloc_start >> PAGE_SHIFT;
1588	unsigned long start_index = index;
1589	unsigned long end_index = delalloc_end >> PAGE_SHIFT;
1590	unsigned long pages_locked = 0;
1591	struct page *pages[16];
1592	unsigned long nrpages;
1593	int ret;
1594	int i;
1595
1596	/* the caller is responsible for locking the start index */
1597	if (index == locked_page->index && index == end_index)
1598		return 0;
1599
1600	/* skip the page at the start index */
1601	nrpages = end_index - index + 1;
1602	while (nrpages > 0) {
1603		ret = find_get_pages_contig(inode->i_mapping, index,
1604				     min_t(unsigned long,
1605				     nrpages, ARRAY_SIZE(pages)), pages);
1606		if (ret == 0) {
1607			ret = -EAGAIN;
1608			goto done;
1609		}
1610		/* now we have an array of pages, lock them all */
1611		for (i = 0; i < ret; i++) {
1612			/*
1613			 * the caller is taking responsibility for
1614			 * locked_page
1615			 */
1616			if (pages[i] != locked_page) {
1617				lock_page(pages[i]);
1618				if (!PageDirty(pages[i]) ||
1619				    pages[i]->mapping != inode->i_mapping) {
1620					ret = -EAGAIN;
1621					unlock_page(pages[i]);
1622					put_page(pages[i]);
1623					goto done;
1624				}
1625			}
1626			put_page(pages[i]);
1627			pages_locked++;
1628		}
1629		nrpages -= ret;
1630		index += ret;
1631		cond_resched();
1632	}
1633	ret = 0;
1634done:
1635	if (ret && pages_locked) {
1636		__unlock_for_delalloc(inode, locked_page,
1637			      delalloc_start,
1638			      ((u64)(start_index + pages_locked - 1)) <<
1639			      PAGE_SHIFT);
1640	}
1641	return ret;
1642}
1643
1644/*
1645 * find a contiguous range of bytes in the file marked as delalloc, not
1646 * more than 'max_bytes'.  start and end are used to return the range,
1647 *
1648 * 1 is returned if we find something, 0 if nothing was in the tree
 
1649 */
1650STATIC u64 find_lock_delalloc_range(struct inode *inode,
1651				    struct extent_io_tree *tree,
1652				    struct page *locked_page, u64 *start,
1653				    u64 *end, u64 max_bytes)
1654{
 
 
1655	u64 delalloc_start;
1656	u64 delalloc_end;
1657	u64 found;
1658	struct extent_state *cached_state = NULL;
1659	int ret;
1660	int loops = 0;
1661
1662again:
1663	/* step one, find a bunch of delalloc bytes starting at start */
1664	delalloc_start = *start;
1665	delalloc_end = 0;
1666	found = find_delalloc_range(tree, &delalloc_start, &delalloc_end,
1667				    max_bytes, &cached_state);
1668	if (!found || delalloc_end <= *start) {
1669		*start = delalloc_start;
1670		*end = delalloc_end;
1671		free_extent_state(cached_state);
1672		return 0;
1673	}
1674
1675	/*
1676	 * start comes from the offset of locked_page.  We have to lock
1677	 * pages in order, so we can't process delalloc bytes before
1678	 * locked_page
1679	 */
1680	if (delalloc_start < *start)
1681		delalloc_start = *start;
1682
1683	/*
1684	 * make sure to limit the number of pages we try to lock down
1685	 */
1686	if (delalloc_end + 1 - delalloc_start > max_bytes)
1687		delalloc_end = delalloc_start + max_bytes - 1;
1688
1689	/* step two, lock all the pages after the page that has start */
1690	ret = lock_delalloc_pages(inode, locked_page,
1691				  delalloc_start, delalloc_end);
 
1692	if (ret == -EAGAIN) {
1693		/* some of the pages are gone, lets avoid looping by
1694		 * shortening the size of the delalloc range we're searching
1695		 */
1696		free_extent_state(cached_state);
1697		cached_state = NULL;
1698		if (!loops) {
1699			max_bytes = PAGE_SIZE;
1700			loops = 1;
1701			goto again;
1702		} else {
1703			found = 0;
1704			goto out_failed;
1705		}
1706	}
1707	BUG_ON(ret); /* Only valid values are 0 and -EAGAIN */
1708
1709	/* step three, lock the state bits for the whole range */
1710	lock_extent_bits(tree, delalloc_start, delalloc_end, &cached_state);
1711
1712	/* then test to make sure it is all still delalloc */
1713	ret = test_range_bit(tree, delalloc_start, delalloc_end,
1714			     EXTENT_DELALLOC, 1, cached_state);
1715	if (!ret) {
1716		unlock_extent_cached(tree, delalloc_start, delalloc_end,
1717				     &cached_state, GFP_NOFS);
1718		__unlock_for_delalloc(inode, locked_page,
1719			      delalloc_start, delalloc_end);
1720		cond_resched();
1721		goto again;
1722	}
1723	free_extent_state(cached_state);
1724	*start = delalloc_start;
1725	*end = delalloc_end;
1726out_failed:
1727	return found;
1728}
1729
1730void extent_clear_unlock_delalloc(struct inode *inode, u64 start, u64 end,
1731				 struct page *locked_page,
1732				 unsigned clear_bits,
1733				 unsigned long page_ops)
1734{
1735	struct extent_io_tree *tree = &BTRFS_I(inode)->io_tree;
1736	int ret;
 
1737	struct page *pages[16];
1738	unsigned long index = start >> PAGE_SHIFT;
1739	unsigned long end_index = end >> PAGE_SHIFT;
1740	unsigned long nr_pages = end_index - index + 1;
1741	int i;
1742
1743	clear_extent_bit(tree, start, end, clear_bits, 1, 0, NULL, GFP_NOFS);
1744	if (page_ops == 0)
1745		return;
 
1746
1747	if ((page_ops & PAGE_SET_ERROR) && nr_pages > 0)
1748		mapping_set_error(inode->i_mapping, -EIO);
1749
1750	while (nr_pages > 0) {
1751		ret = find_get_pages_contig(inode->i_mapping, index,
1752				     min_t(unsigned long,
1753				     nr_pages, ARRAY_SIZE(pages)), pages);
1754		for (i = 0; i < ret; i++) {
 
 
 
 
 
 
 
 
1755
 
1756			if (page_ops & PAGE_SET_PRIVATE2)
1757				SetPagePrivate2(pages[i]);
1758
1759			if (pages[i] == locked_page) {
1760				put_page(pages[i]);
 
1761				continue;
1762			}
1763			if (page_ops & PAGE_CLEAR_DIRTY)
1764				clear_page_dirty_for_io(pages[i]);
1765			if (page_ops & PAGE_SET_WRITEBACK)
1766				set_page_writeback(pages[i]);
1767			if (page_ops & PAGE_SET_ERROR)
1768				SetPageError(pages[i]);
1769			if (page_ops & PAGE_END_WRITEBACK)
1770				end_page_writeback(pages[i]);
1771			if (page_ops & PAGE_UNLOCK)
1772				unlock_page(pages[i]);
 
 
 
 
 
 
 
 
 
 
 
1773			put_page(pages[i]);
 
1774		}
1775		nr_pages -= ret;
1776		index += ret;
1777		cond_resched();
1778	}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1779}
1780
1781/*
1782 * count the number of bytes in the tree that have a given bit(s)
1783 * set.  This can be fairly slow, except for EXTENT_DIRTY which is
1784 * cached.  The total number found is returned.
1785 */
1786u64 count_range_bits(struct extent_io_tree *tree,
1787		     u64 *start, u64 search_end, u64 max_bytes,
1788		     unsigned bits, int contig)
1789{
1790	struct rb_node *node;
1791	struct extent_state *state;
1792	u64 cur_start = *start;
1793	u64 total_bytes = 0;
1794	u64 last = 0;
1795	int found = 0;
1796
1797	if (WARN_ON(search_end <= cur_start))
1798		return 0;
1799
1800	spin_lock(&tree->lock);
1801	if (cur_start == 0 && bits == EXTENT_DIRTY) {
1802		total_bytes = tree->dirty_bytes;
1803		goto out;
1804	}
1805	/*
1806	 * this search will find all the extents that end after
1807	 * our range starts.
1808	 */
1809	node = tree_search(tree, cur_start);
1810	if (!node)
1811		goto out;
1812
1813	while (1) {
1814		state = rb_entry(node, struct extent_state, rb_node);
1815		if (state->start > search_end)
1816			break;
1817		if (contig && found && state->start > last + 1)
1818			break;
1819		if (state->end >= cur_start && (state->state & bits) == bits) {
1820			total_bytes += min(search_end, state->end) + 1 -
1821				       max(cur_start, state->start);
1822			if (total_bytes >= max_bytes)
1823				break;
1824			if (!found) {
1825				*start = max(cur_start, state->start);
1826				found = 1;
1827			}
1828			last = state->end;
1829		} else if (contig && found) {
1830			break;
1831		}
1832		node = rb_next(node);
1833		if (!node)
1834			break;
1835	}
1836out:
1837	spin_unlock(&tree->lock);
1838	return total_bytes;
1839}
1840
1841/*
1842 * set the private field for a given byte offset in the tree.  If there isn't
1843 * an extent_state there already, this does nothing.
1844 */
1845static noinline int set_state_failrec(struct extent_io_tree *tree, u64 start,
1846		struct io_failure_record *failrec)
1847{
1848	struct rb_node *node;
1849	struct extent_state *state;
1850	int ret = 0;
1851
1852	spin_lock(&tree->lock);
1853	/*
1854	 * this search will find all the extents that end after
1855	 * our range starts.
1856	 */
1857	node = tree_search(tree, start);
1858	if (!node) {
1859		ret = -ENOENT;
1860		goto out;
1861	}
1862	state = rb_entry(node, struct extent_state, rb_node);
1863	if (state->start != start) {
1864		ret = -ENOENT;
1865		goto out;
1866	}
1867	state->failrec = failrec;
1868out:
1869	spin_unlock(&tree->lock);
1870	return ret;
1871}
1872
1873static noinline int get_state_failrec(struct extent_io_tree *tree, u64 start,
1874		struct io_failure_record **failrec)
1875{
1876	struct rb_node *node;
1877	struct extent_state *state;
1878	int ret = 0;
1879
1880	spin_lock(&tree->lock);
1881	/*
1882	 * this search will find all the extents that end after
1883	 * our range starts.
1884	 */
1885	node = tree_search(tree, start);
1886	if (!node) {
1887		ret = -ENOENT;
1888		goto out;
1889	}
1890	state = rb_entry(node, struct extent_state, rb_node);
1891	if (state->start != start) {
1892		ret = -ENOENT;
1893		goto out;
1894	}
1895	*failrec = state->failrec;
 
1896out:
1897	spin_unlock(&tree->lock);
1898	return ret;
1899}
1900
1901/*
1902 * searches a range in the state tree for a given mask.
1903 * If 'filled' == 1, this returns 1 only if every extent in the tree
1904 * has the bits set.  Otherwise, 1 is returned if any bit in the
1905 * range is found set.
1906 */
1907int test_range_bit(struct extent_io_tree *tree, u64 start, u64 end,
1908		   unsigned bits, int filled, struct extent_state *cached)
1909{
1910	struct extent_state *state = NULL;
1911	struct rb_node *node;
1912	int bitset = 0;
1913
1914	spin_lock(&tree->lock);
1915	if (cached && extent_state_in_tree(cached) && cached->start <= start &&
1916	    cached->end > start)
1917		node = &cached->rb_node;
1918	else
1919		node = tree_search(tree, start);
1920	while (node && start <= end) {
1921		state = rb_entry(node, struct extent_state, rb_node);
1922
1923		if (filled && state->start > start) {
1924			bitset = 0;
1925			break;
1926		}
1927
1928		if (state->start > end)
1929			break;
1930
1931		if (state->state & bits) {
1932			bitset = 1;
1933			if (!filled)
1934				break;
1935		} else if (filled) {
1936			bitset = 0;
1937			break;
1938		}
1939
1940		if (state->end == (u64)-1)
1941			break;
1942
1943		start = state->end + 1;
1944		if (start > end)
1945			break;
1946		node = rb_next(node);
1947		if (!node) {
1948			if (filled)
1949				bitset = 0;
1950			break;
1951		}
1952	}
1953	spin_unlock(&tree->lock);
1954	return bitset;
1955}
1956
1957/*
1958 * helper function to set a given page up to date if all the
1959 * extents in the tree for that page are up to date
1960 */
1961static void check_page_uptodate(struct extent_io_tree *tree, struct page *page)
1962{
1963	u64 start = page_offset(page);
1964	u64 end = start + PAGE_SIZE - 1;
1965	if (test_range_bit(tree, start, end, EXTENT_UPTODATE, 1, NULL))
1966		SetPageUptodate(page);
1967}
1968
1969int free_io_failure(struct inode *inode, struct io_failure_record *rec)
 
 
1970{
1971	int ret;
1972	int err = 0;
1973	struct extent_io_tree *failure_tree = &BTRFS_I(inode)->io_failure_tree;
1974
1975	set_state_failrec(failure_tree, rec->start, NULL);
1976	ret = clear_extent_bits(failure_tree, rec->start,
1977				rec->start + rec->len - 1,
1978				EXTENT_LOCKED | EXTENT_DIRTY, GFP_NOFS);
1979	if (ret)
1980		err = ret;
1981
1982	ret = clear_extent_bits(&BTRFS_I(inode)->io_tree, rec->start,
1983				rec->start + rec->len - 1,
1984				EXTENT_DAMAGED, GFP_NOFS);
1985	if (ret && !err)
1986		err = ret;
1987
1988	kfree(rec);
1989	return err;
1990}
1991
1992/*
1993 * this bypasses the standard btrfs submit functions deliberately, as
1994 * the standard behavior is to write all copies in a raid setup. here we only
1995 * want to write the one bad copy. so we do the mapping for ourselves and issue
1996 * submit_bio directly.
1997 * to avoid any synchronization issues, wait for the data after writing, which
1998 * actually prevents the read that triggered the error from finishing.
1999 * currently, there can be no more than two copies of every data bit. thus,
2000 * exactly one rewrite is required.
2001 */
2002int repair_io_failure(struct inode *inode, u64 start, u64 length, u64 logical,
2003		      struct page *page, unsigned int pg_offset, int mirror_num)
 
2004{
2005	struct btrfs_fs_info *fs_info = BTRFS_I(inode)->root->fs_info;
2006	struct bio *bio;
2007	struct btrfs_device *dev;
2008	u64 map_length = 0;
2009	u64 sector;
2010	struct btrfs_bio *bbio = NULL;
2011	struct btrfs_mapping_tree *map_tree = &fs_info->mapping_tree;
2012	int ret;
2013
2014	ASSERT(!(fs_info->sb->s_flags & MS_RDONLY));
2015	BUG_ON(!mirror_num);
2016
2017	/* we can't repair anything in raid56 yet */
2018	if (btrfs_is_parity_mirror(map_tree, logical, length, mirror_num))
2019		return 0;
2020
2021	bio = btrfs_io_bio_alloc(GFP_NOFS, 1);
2022	if (!bio)
2023		return -EIO;
2024	bio->bi_iter.bi_size = 0;
2025	map_length = length;
2026
2027	ret = btrfs_map_block(fs_info, WRITE, logical,
2028			      &map_length, &bbio, mirror_num);
2029	if (ret) {
2030		bio_put(bio);
2031		return -EIO;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2032	}
2033	BUG_ON(mirror_num != bbio->mirror_num);
2034	sector = bbio->stripes[mirror_num-1].physical >> 9;
2035	bio->bi_iter.bi_sector = sector;
2036	dev = bbio->stripes[mirror_num-1].dev;
2037	btrfs_put_bbio(bbio);
2038	if (!dev || !dev->bdev || !dev->writeable) {
 
 
2039		bio_put(bio);
2040		return -EIO;
2041	}
2042	bio->bi_bdev = dev->bdev;
 
2043	bio_add_page(bio, page, length, pg_offset);
2044
2045	if (btrfsic_submit_bio_wait(WRITE_SYNC, bio)) {
2046		/* try to remap that extent elsewhere? */
 
2047		bio_put(bio);
2048		btrfs_dev_stat_inc_and_print(dev, BTRFS_DEV_STAT_WRITE_ERRS);
2049		return -EIO;
2050	}
2051
2052	btrfs_info_rl_in_rcu(fs_info,
2053		"read error corrected: ino %llu off %llu (dev %s sector %llu)",
2054				  btrfs_ino(inode), start,
2055				  rcu_str_deref(dev->name), sector);
 
2056	bio_put(bio);
2057	return 0;
2058}
2059
2060int repair_eb_io_failure(struct btrfs_root *root, struct extent_buffer *eb,
2061			 int mirror_num)
2062{
 
2063	u64 start = eb->start;
2064	unsigned long i, num_pages = num_extent_pages(eb->start, eb->len);
2065	int ret = 0;
2066
2067	if (root->fs_info->sb->s_flags & MS_RDONLY)
2068		return -EROFS;
2069
2070	for (i = 0; i < num_pages; i++) {
2071		struct page *p = eb->pages[i];
2072
2073		ret = repair_io_failure(root->fs_info->btree_inode, start,
2074					PAGE_SIZE, start, p,
2075					start - page_offset(p), mirror_num);
2076		if (ret)
2077			break;
2078		start += PAGE_SIZE;
2079	}
2080
2081	return ret;
2082}
2083
2084/*
2085 * each time an IO finishes, we do a fast check in the IO failure tree
2086 * to see if we need to process or clean up an io_failure_record
2087 */
2088int clean_io_failure(struct inode *inode, u64 start, struct page *page,
2089		     unsigned int pg_offset)
 
 
2090{
2091	u64 private;
2092	struct io_failure_record *failrec;
2093	struct btrfs_fs_info *fs_info = BTRFS_I(inode)->root->fs_info;
2094	struct extent_state *state;
2095	int num_copies;
2096	int ret;
2097
2098	private = 0;
2099	ret = count_range_bits(&BTRFS_I(inode)->io_failure_tree, &private,
2100				(u64)-1, 1, EXTENT_DIRTY, 0);
2101	if (!ret)
2102		return 0;
2103
2104	ret = get_state_failrec(&BTRFS_I(inode)->io_failure_tree, start,
2105			&failrec);
2106	if (ret)
2107		return 0;
2108
2109	BUG_ON(!failrec->this_mirror);
2110
2111	if (failrec->in_validation) {
2112		/* there was no real error, just free the record */
2113		pr_debug("clean_io_failure: freeing dummy error at %llu\n",
2114			 failrec->start);
 
2115		goto out;
2116	}
2117	if (fs_info->sb->s_flags & MS_RDONLY)
2118		goto out;
2119
2120	spin_lock(&BTRFS_I(inode)->io_tree.lock);
2121	state = find_first_extent_bit_state(&BTRFS_I(inode)->io_tree,
2122					    failrec->start,
2123					    EXTENT_LOCKED);
2124	spin_unlock(&BTRFS_I(inode)->io_tree.lock);
2125
2126	if (state && state->start <= failrec->start &&
2127	    state->end >= failrec->start + failrec->len - 1) {
2128		num_copies = btrfs_num_copies(fs_info, failrec->logical,
2129					      failrec->len);
2130		if (num_copies > 1)  {
2131			repair_io_failure(inode, start, failrec->len,
2132					  failrec->logical, page,
2133					  pg_offset, failrec->failed_mirror);
2134		}
2135	}
2136
2137out:
2138	free_io_failure(inode, failrec);
2139
2140	return 0;
2141}
2142
2143/*
2144 * Can be called when
2145 * - hold extent lock
2146 * - under ordered extent
2147 * - the inode is freeing
2148 */
2149void btrfs_free_io_failure_record(struct inode *inode, u64 start, u64 end)
2150{
2151	struct extent_io_tree *failure_tree = &BTRFS_I(inode)->io_failure_tree;
2152	struct io_failure_record *failrec;
2153	struct extent_state *state, *next;
2154
2155	if (RB_EMPTY_ROOT(&failure_tree->state))
2156		return;
2157
2158	spin_lock(&failure_tree->lock);
2159	state = find_first_extent_bit_state(failure_tree, start, EXTENT_DIRTY);
2160	while (state) {
2161		if (state->start > end)
2162			break;
2163
2164		ASSERT(state->end <= end);
2165
2166		next = next_state(state);
2167
2168		failrec = state->failrec;
2169		free_extent_state(state);
2170		kfree(failrec);
2171
2172		state = next;
2173	}
2174	spin_unlock(&failure_tree->lock);
2175}
2176
2177int btrfs_get_io_failure_record(struct inode *inode, u64 start, u64 end,
2178		struct io_failure_record **failrec_ret)
2179{
 
2180	struct io_failure_record *failrec;
2181	struct extent_map *em;
2182	struct extent_io_tree *failure_tree = &BTRFS_I(inode)->io_failure_tree;
2183	struct extent_io_tree *tree = &BTRFS_I(inode)->io_tree;
2184	struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree;
2185	int ret;
2186	u64 logical;
2187
2188	ret = get_state_failrec(failure_tree, start, &failrec);
2189	if (ret) {
2190		failrec = kzalloc(sizeof(*failrec), GFP_NOFS);
2191		if (!failrec)
2192			return -ENOMEM;
2193
2194		failrec->start = start;
2195		failrec->len = end - start + 1;
2196		failrec->this_mirror = 0;
2197		failrec->bio_flags = 0;
2198		failrec->in_validation = 0;
2199
2200		read_lock(&em_tree->lock);
2201		em = lookup_extent_mapping(em_tree, start, failrec->len);
2202		if (!em) {
2203			read_unlock(&em_tree->lock);
2204			kfree(failrec);
2205			return -EIO;
2206		}
2207
2208		if (em->start > start || em->start + em->len <= start) {
2209			free_extent_map(em);
2210			em = NULL;
2211		}
 
 
 
 
 
 
 
 
 
2212		read_unlock(&em_tree->lock);
2213		if (!em) {
2214			kfree(failrec);
2215			return -EIO;
2216		}
2217
2218		logical = start - em->start;
2219		logical = em->block_start + logical;
2220		if (test_bit(EXTENT_FLAG_COMPRESSED, &em->flags)) {
2221			logical = em->block_start;
2222			failrec->bio_flags = EXTENT_BIO_COMPRESSED;
2223			extent_set_compress_type(&failrec->bio_flags,
2224						 em->compress_type);
2225		}
2226
2227		pr_debug("Get IO Failure Record: (new) logical=%llu, start=%llu, len=%llu\n",
2228			 logical, start, failrec->len);
2229
2230		failrec->logical = logical;
2231		free_extent_map(em);
 
 
 
 
 
 
 
2232
2233		/* set the bits in the private failure tree */
2234		ret = set_extent_bits(failure_tree, start, end,
2235					EXTENT_LOCKED | EXTENT_DIRTY, GFP_NOFS);
2236		if (ret >= 0)
2237			ret = set_state_failrec(failure_tree, start, failrec);
2238		/* set the bits in the inode's tree */
2239		if (ret >= 0)
2240			ret = set_extent_bits(tree, start, end, EXTENT_DAMAGED,
2241						GFP_NOFS);
2242		if (ret < 0) {
2243			kfree(failrec);
2244			return ret;
2245		}
2246	} else {
2247		pr_debug("Get IO Failure Record: (found) logical=%llu, start=%llu, len=%llu, validation=%d\n",
2248			 failrec->logical, failrec->start, failrec->len,
2249			 failrec->in_validation);
2250		/*
2251		 * when data can be on disk more than twice, add to failrec here
2252		 * (e.g. with a list for failed_mirror) to make
2253		 * clean_io_failure() clean all those errors at once.
2254		 */
2255	}
2256
2257	*failrec_ret = failrec;
 
 
2258
2259	return 0;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2260}
2261
2262int btrfs_check_repairable(struct inode *inode, struct bio *failed_bio,
2263			   struct io_failure_record *failrec, int failed_mirror)
 
2264{
 
2265	int num_copies;
2266
2267	num_copies = btrfs_num_copies(BTRFS_I(inode)->root->fs_info,
2268				      failrec->logical, failrec->len);
2269	if (num_copies == 1) {
2270		/*
2271		 * we only have a single copy of the data, so don't bother with
2272		 * all the retry and error correction code that follows. no
2273		 * matter what the error is, it is very likely to persist.
2274		 */
2275		pr_debug("Check Repairable: cannot repair, num_copies=%d, next_mirror %d, failed_mirror %d\n",
2276			 num_copies, failrec->this_mirror, failed_mirror);
2277		return 0;
 
2278	}
2279
2280	/*
2281	 * there are two premises:
2282	 *	a) deliver good data to the caller
2283	 *	b) correct the bad sectors on disk
2284	 */
2285	if (failed_bio->bi_vcnt > 1) {
2286		/*
2287		 * to fulfill b), we need to know the exact failing sectors, as
2288		 * we don't want to rewrite any more than the failed ones. thus,
2289		 * we need separate read requests for the failed bio
2290		 *
2291		 * if the following BUG_ON triggers, our validation request got
2292		 * merged. we need separate requests for our algorithm to work.
2293		 */
2294		BUG_ON(failrec->in_validation);
2295		failrec->in_validation = 1;
2296		failrec->this_mirror = failed_mirror;
2297	} else {
2298		/*
2299		 * we're ready to fulfill a) and b) alongside. get a good copy
2300		 * of the failed sector and if we succeed, we have setup
2301		 * everything for repair_io_failure to do the rest for us.
2302		 */
2303		if (failrec->in_validation) {
2304			BUG_ON(failrec->this_mirror != failed_mirror);
2305			failrec->in_validation = 0;
2306			failrec->this_mirror = 0;
2307		}
2308		failrec->failed_mirror = failed_mirror;
2309		failrec->this_mirror++;
2310		if (failrec->this_mirror == failed_mirror)
2311			failrec->this_mirror++;
2312	}
2313
2314	if (failrec->this_mirror > num_copies) {
2315		pr_debug("Check Repairable: (fail) num_copies=%d, next_mirror %d, failed_mirror %d\n",
2316			 num_copies, failrec->this_mirror, failed_mirror);
2317		return 0;
 
2318	}
2319
2320	return 1;
2321}
2322
2323
2324struct bio *btrfs_create_repair_bio(struct inode *inode, struct bio *failed_bio,
2325				    struct io_failure_record *failrec,
2326				    struct page *page, int pg_offset, int icsum,
2327				    bio_end_io_t *endio_func, void *data)
2328{
2329	struct bio *bio;
2330	struct btrfs_io_bio *btrfs_failed_bio;
2331	struct btrfs_io_bio *btrfs_bio;
2332
2333	bio = btrfs_io_bio_alloc(GFP_NOFS, 1);
2334	if (!bio)
2335		return NULL;
2336
2337	bio->bi_end_io = endio_func;
2338	bio->bi_iter.bi_sector = failrec->logical >> 9;
2339	bio->bi_bdev = BTRFS_I(inode)->root->fs_info->fs_devices->latest_bdev;
2340	bio->bi_iter.bi_size = 0;
2341	bio->bi_private = data;
 
 
2342
2343	btrfs_failed_bio = btrfs_io_bio(failed_bio);
2344	if (btrfs_failed_bio->csum) {
2345		struct btrfs_fs_info *fs_info = BTRFS_I(inode)->root->fs_info;
2346		u16 csum_size = btrfs_super_csum_size(fs_info->super_copy);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2347
2348		btrfs_bio = btrfs_io_bio(bio);
2349		btrfs_bio->csum = btrfs_bio->csum_inline;
2350		icsum *= csum_size;
2351		memcpy(btrfs_bio->csum, btrfs_failed_bio->csum + icsum,
2352		       csum_size);
2353	}
2354
2355	bio_add_page(bio, page, failrec->len, pg_offset);
2356
2357	return bio;
2358}
2359
2360/*
2361 * this is a generic handler for readpage errors (default
2362 * readpage_io_failed_hook). if other copies exist, read those and write back
2363 * good data to the failed position. does not investigate in remapping the
2364 * failed extent elsewhere, hoping the device will be smart enough to do this as
2365 * needed
2366 */
2367
2368static int bio_readpage_error(struct bio *failed_bio, u64 phy_offset,
2369			      struct page *page, u64 start, u64 end,
2370			      int failed_mirror)
2371{
2372	struct io_failure_record *failrec;
2373	struct inode *inode = page->mapping->host;
2374	struct extent_io_tree *tree = &BTRFS_I(inode)->io_tree;
2375	struct bio *bio;
2376	int read_mode;
2377	int ret;
2378
2379	BUG_ON(failed_bio->bi_rw & REQ_WRITE);
2380
2381	ret = btrfs_get_io_failure_record(inode, start, end, &failrec);
2382	if (ret)
2383		return ret;
2384
2385	ret = btrfs_check_repairable(inode, failed_bio, failrec, failed_mirror);
2386	if (!ret) {
2387		free_io_failure(inode, failrec);
2388		return -EIO;
2389	}
2390
2391	if (failed_bio->bi_vcnt > 1)
2392		read_mode = READ_SYNC | REQ_FAILFAST_DEV;
2393	else
2394		read_mode = READ_SYNC;
2395
2396	phy_offset >>= inode->i_sb->s_blocksize_bits;
2397	bio = btrfs_create_repair_bio(inode, failed_bio, failrec, page,
2398				      start - page_offset(page),
2399				      (int)phy_offset, failed_bio->bi_end_io,
2400				      NULL);
2401	if (!bio) {
2402		free_io_failure(inode, failrec);
2403		return -EIO;
2404	}
2405
2406	pr_debug("Repair Read Error: submitting new read[%#x] to this_mirror=%d, in_validation=%d\n",
2407		 read_mode, failrec->this_mirror, failrec->in_validation);
2408
2409	ret = tree->ops->submit_bio_hook(inode, read_mode, bio,
2410					 failrec->this_mirror,
2411					 failrec->bio_flags, 0);
2412	if (ret) {
2413		free_io_failure(inode, failrec);
2414		bio_put(bio);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2415	}
2416
2417	return ret;
2418}
2419
2420/* lots and lots of room for performance fixes in the end_bio funcs */
2421
2422void end_extent_writepage(struct page *page, int err, u64 start, u64 end)
2423{
2424	int uptodate = (err == 0);
2425	struct extent_io_tree *tree;
2426	int ret = 0;
2427
2428	tree = &BTRFS_I(page->mapping->host)->io_tree;
2429
2430	if (tree->ops && tree->ops->writepage_end_io_hook) {
2431		ret = tree->ops->writepage_end_io_hook(page, start,
2432					       end, NULL, uptodate);
2433		if (ret)
2434			uptodate = 0;
2435	}
2436
2437	if (!uptodate) {
2438		ClearPageUptodate(page);
2439		SetPageError(page);
2440		ret = ret < 0 ? ret : -EIO;
2441		mapping_set_error(page->mapping, ret);
2442	}
2443}
2444
2445/*
2446 * after a writepage IO is done, we need to:
2447 * clear the uptodate bits on error
2448 * clear the writeback bits in the extent tree for this IO
2449 * end_page_writeback if the page has no more pending IO
2450 *
2451 * Scheduling is not allowed, so the extent state tree is expected
2452 * to have one and only one object corresponding to this IO.
2453 */
2454static void end_bio_extent_writepage(struct bio *bio)
2455{
 
2456	struct bio_vec *bvec;
2457	u64 start;
2458	u64 end;
2459	int i;
2460
2461	bio_for_each_segment_all(bvec, bio, i) {
 
2462		struct page *page = bvec->bv_page;
 
 
2463
2464		/* We always issue full-page reads, but if some block
2465		 * in a page fails to read, blk_update_request() will
2466		 * advance bv_offset and adjust bv_len to compensate.
2467		 * Print a warning for nonzero offsets, and an error
2468		 * if they don't add up to a full page.  */
2469		if (bvec->bv_offset || bvec->bv_len != PAGE_SIZE) {
2470			if (bvec->bv_offset + bvec->bv_len != PAGE_SIZE)
2471				btrfs_err(BTRFS_I(page->mapping->host)->root->fs_info,
2472				   "partial page write in btrfs with offset %u and length %u",
2473					bvec->bv_offset, bvec->bv_len);
2474			else
2475				btrfs_info(BTRFS_I(page->mapping->host)->root->fs_info,
2476				   "incomplete page write in btrfs with offset %u and "
2477				   "length %u",
2478					bvec->bv_offset, bvec->bv_len);
2479		}
2480
2481		start = page_offset(page);
2482		end = start + bvec->bv_offset + bvec->bv_len - 1;
2483
2484		end_extent_writepage(page, bio->bi_error, start, end);
2485		end_page_writeback(page);
2486	}
2487
2488	bio_put(bio);
2489}
2490
2491static void
2492endio_readpage_release_extent(struct extent_io_tree *tree, u64 start, u64 len,
2493			      int uptodate)
2494{
2495	struct extent_state *cached = NULL;
2496	u64 end = start + len - 1;
2497
2498	if (uptodate && tree->track_uptodate)
2499		set_extent_uptodate(tree, start, end, &cached, GFP_ATOMIC);
2500	unlock_extent_cached(tree, start, end, &cached, GFP_ATOMIC);
2501}
2502
2503/*
2504 * after a readpage IO is done, we need to:
2505 * clear the uptodate bits on error
2506 * set the uptodate bits if things worked
2507 * set the page up to date if all extents in the tree are uptodate
2508 * clear the lock bit in the extent tree
2509 * unlock the page if there are no other extents locked for it
2510 *
2511 * Scheduling is not allowed, so the extent state tree is expected
2512 * to have one and only one object corresponding to this IO.
2513 */
2514static void end_bio_extent_readpage(struct bio *bio)
2515{
2516	struct bio_vec *bvec;
2517	int uptodate = !bio->bi_error;
2518	struct btrfs_io_bio *io_bio = btrfs_io_bio(bio);
2519	struct extent_io_tree *tree;
2520	u64 offset = 0;
2521	u64 start;
2522	u64 end;
2523	u64 len;
2524	u64 extent_start = 0;
2525	u64 extent_len = 0;
2526	int mirror;
2527	int ret;
2528	int i;
2529
2530	bio_for_each_segment_all(bvec, bio, i) {
 
2531		struct page *page = bvec->bv_page;
2532		struct inode *inode = page->mapping->host;
2533
2534		pr_debug("end_bio_extent_readpage: bi_sector=%llu, err=%d, "
2535			 "mirror=%u\n", (u64)bio->bi_iter.bi_sector,
2536			 bio->bi_error, io_bio->mirror_num);
 
 
 
 
2537		tree = &BTRFS_I(inode)->io_tree;
 
2538
2539		/* We always issue full-page reads, but if some block
2540		 * in a page fails to read, blk_update_request() will
2541		 * advance bv_offset and adjust bv_len to compensate.
2542		 * Print a warning for nonzero offsets, and an error
2543		 * if they don't add up to a full page.  */
2544		if (bvec->bv_offset || bvec->bv_len != PAGE_SIZE) {
2545			if (bvec->bv_offset + bvec->bv_len != PAGE_SIZE)
2546				btrfs_err(BTRFS_I(page->mapping->host)->root->fs_info,
2547				   "partial page read in btrfs with offset %u and length %u",
2548					bvec->bv_offset, bvec->bv_len);
2549			else
2550				btrfs_info(BTRFS_I(page->mapping->host)->root->fs_info,
2551				   "incomplete page read in btrfs with offset %u and "
2552				   "length %u",
2553					bvec->bv_offset, bvec->bv_len);
2554		}
2555
2556		start = page_offset(page);
2557		end = start + bvec->bv_offset + bvec->bv_len - 1;
2558		len = bvec->bv_len;
2559
2560		mirror = io_bio->mirror_num;
2561		if (likely(uptodate && tree->ops &&
2562			   tree->ops->readpage_end_io_hook)) {
2563			ret = tree->ops->readpage_end_io_hook(io_bio, offset,
2564							      page, start, end,
2565							      mirror);
2566			if (ret)
2567				uptodate = 0;
2568			else
2569				clean_io_failure(inode, start, page, 0);
 
 
 
2570		}
2571
2572		if (likely(uptodate))
2573			goto readpage_ok;
2574
2575		if (tree->ops && tree->ops->readpage_io_failed_hook) {
2576			ret = tree->ops->readpage_io_failed_hook(page, mirror);
2577			if (!ret && !bio->bi_error)
2578				uptodate = 1;
2579		} else {
2580			/*
2581			 * The generic bio_readpage_error handles errors the
2582			 * following way: If possible, new read requests are
2583			 * created and submitted and will end up in
2584			 * end_bio_extent_readpage as well (if we're lucky, not
2585			 * in the !uptodate case). In that case it returns 0 and
2586			 * we just go on with the next page in our bio. If it
2587			 * can't handle the error it will return -EIO and we
2588			 * remain responsible for that page.
2589			 */
2590			ret = bio_readpage_error(bio, offset, page, start, end,
2591						 mirror);
2592			if (ret == 0) {
2593				uptodate = !bio->bi_error;
 
2594				offset += len;
2595				continue;
2596			}
 
 
 
 
 
 
 
 
 
 
2597		}
2598readpage_ok:
2599		if (likely(uptodate)) {
2600			loff_t i_size = i_size_read(inode);
2601			pgoff_t end_index = i_size >> PAGE_SHIFT;
2602			unsigned off;
2603
2604			/* Zero out the end if this page straddles i_size */
2605			off = i_size & (PAGE_SIZE-1);
2606			if (page->index == end_index && off)
2607				zero_user_segment(page, off, PAGE_SIZE);
2608			SetPageUptodate(page);
2609		} else {
2610			ClearPageUptodate(page);
2611			SetPageError(page);
2612		}
2613		unlock_page(page);
2614		offset += len;
2615
2616		if (unlikely(!uptodate)) {
2617			if (extent_len) {
2618				endio_readpage_release_extent(tree,
2619							      extent_start,
2620							      extent_len, 1);
2621				extent_start = 0;
2622				extent_len = 0;
2623			}
2624			endio_readpage_release_extent(tree, start,
2625						      end - start + 1, 0);
2626		} else if (!extent_len) {
2627			extent_start = start;
2628			extent_len = end + 1 - start;
2629		} else if (extent_start + extent_len == start) {
2630			extent_len += end + 1 - start;
2631		} else {
2632			endio_readpage_release_extent(tree, extent_start,
2633						      extent_len, uptodate);
2634			extent_start = start;
2635			extent_len = end + 1 - start;
2636		}
2637	}
2638
2639	if (extent_len)
2640		endio_readpage_release_extent(tree, extent_start, extent_len,
2641					      uptodate);
2642	if (io_bio->end_io)
2643		io_bio->end_io(io_bio, bio->bi_error);
2644	bio_put(bio);
2645}
2646
2647/*
2648 * this allocates from the btrfs_bioset.  We're returning a bio right now
2649 * but you can call btrfs_io_bio for the appropriate container_of magic
 
2650 */
2651struct bio *
2652btrfs_bio_alloc(struct block_device *bdev, u64 first_sector, int nr_vecs,
2653		gfp_t gfp_flags)
2654{
2655	struct btrfs_io_bio *btrfs_bio;
2656	struct bio *bio;
2657
2658	bio = bio_alloc_bioset(gfp_flags, nr_vecs, btrfs_bioset);
2659
2660	if (bio == NULL && (current->flags & PF_MEMALLOC)) {
2661		while (!bio && (nr_vecs /= 2)) {
2662			bio = bio_alloc_bioset(gfp_flags,
2663					       nr_vecs, btrfs_bioset);
2664		}
2665	}
 
 
2666
2667	if (bio) {
2668		bio->bi_bdev = bdev;
2669		bio->bi_iter.bi_sector = first_sector;
2670		btrfs_bio = btrfs_io_bio(bio);
2671		btrfs_bio->csum = NULL;
2672		btrfs_bio->csum_allocated = NULL;
2673		btrfs_bio->end_io = NULL;
2674	}
2675	return bio;
2676}
2677
2678struct bio *btrfs_bio_clone(struct bio *bio, gfp_t gfp_mask)
2679{
2680	struct btrfs_io_bio *btrfs_bio;
2681	struct bio *new;
2682
2683	new = bio_clone_bioset(bio, gfp_mask, btrfs_bioset);
2684	if (new) {
2685		btrfs_bio = btrfs_io_bio(new);
2686		btrfs_bio->csum = NULL;
2687		btrfs_bio->csum_allocated = NULL;
2688		btrfs_bio->end_io = NULL;
2689
2690#ifdef CONFIG_BLK_CGROUP
2691		/* FIXME, put this into bio_clone_bioset */
2692		if (bio->bi_css)
2693			bio_associate_blkcg(new, bio->bi_css);
2694#endif
2695	}
2696	return new;
2697}
2698
2699/* this also allocates from the btrfs_bioset */
2700struct bio *btrfs_io_bio_alloc(gfp_t gfp_mask, unsigned int nr_iovecs)
2701{
2702	struct btrfs_io_bio *btrfs_bio;
2703	struct bio *bio;
2704
2705	bio = bio_alloc_bioset(gfp_mask, nr_iovecs, btrfs_bioset);
2706	if (bio) {
2707		btrfs_bio = btrfs_io_bio(bio);
2708		btrfs_bio->csum = NULL;
2709		btrfs_bio->csum_allocated = NULL;
2710		btrfs_bio->end_io = NULL;
2711	}
2712	return bio;
2713}
2714
2715
2716static int __must_check submit_one_bio(int rw, struct bio *bio,
2717				       int mirror_num, unsigned long bio_flags)
2718{
2719	int ret = 0;
2720	struct bio_vec *bvec = bio->bi_io_vec + bio->bi_vcnt - 1;
2721	struct page *page = bvec->bv_page;
2722	struct extent_io_tree *tree = bio->bi_private;
2723	u64 start;
2724
2725	start = page_offset(page) + bvec->bv_offset;
2726
2727	bio->bi_private = NULL;
2728
2729	bio_get(bio);
2730
2731	if (tree->ops && tree->ops->submit_bio_hook)
2732		ret = tree->ops->submit_bio_hook(page->mapping->host, rw, bio,
2733					   mirror_num, bio_flags, start);
2734	else
2735		btrfsic_submit_bio(rw, bio);
2736
2737	bio_put(bio);
2738	return ret;
2739}
2740
2741static int merge_bio(int rw, struct extent_io_tree *tree, struct page *page,
2742		     unsigned long offset, size_t size, struct bio *bio,
2743		     unsigned long bio_flags)
2744{
2745	int ret = 0;
2746	if (tree->ops && tree->ops->merge_bio_hook)
2747		ret = tree->ops->merge_bio_hook(rw, page, offset, size, bio,
2748						bio_flags);
2749	BUG_ON(ret < 0);
2750	return ret;
2751
 
 
 
2752}
2753
2754static int submit_extent_page(int rw, struct extent_io_tree *tree,
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2755			      struct writeback_control *wbc,
2756			      struct page *page, sector_t sector,
2757			      size_t size, unsigned long offset,
2758			      struct block_device *bdev,
2759			      struct bio **bio_ret,
2760			      unsigned long max_pages,
2761			      bio_end_io_t end_io_func,
2762			      int mirror_num,
2763			      unsigned long prev_bio_flags,
2764			      unsigned long bio_flags,
2765			      bool force_bio_submit)
2766{
2767	int ret = 0;
2768	struct bio *bio;
2769	int contig = 0;
2770	int old_compressed = prev_bio_flags & EXTENT_BIO_COMPRESSED;
2771	size_t page_size = min_t(size_t, size, PAGE_SIZE);
 
 
 
 
 
 
 
 
2772
2773	if (bio_ret && *bio_ret) {
2774		bio = *bio_ret;
2775		if (old_compressed)
2776			contig = bio->bi_iter.bi_sector == sector;
2777		else
2778			contig = bio_end_sector(bio) == sector;
2779
2780		if (prev_bio_flags != bio_flags || !contig ||
 
 
 
 
2781		    force_bio_submit ||
2782		    merge_bio(rw, tree, page, offset, page_size, bio, bio_flags) ||
2783		    bio_add_page(bio, page, page_size, offset) < page_size) {
2784			ret = submit_one_bio(rw, bio, mirror_num,
2785					     prev_bio_flags);
2786			if (ret < 0) {
2787				*bio_ret = NULL;
2788				return ret;
2789			}
2790			bio = NULL;
2791		} else {
2792			if (wbc)
2793				wbc_account_io(wbc, page, page_size);
2794			return 0;
2795		}
2796	}
2797
2798	bio = btrfs_bio_alloc(bdev, sector, BIO_MAX_PAGES,
2799			GFP_NOFS | __GFP_HIGH);
2800	if (!bio)
2801		return -ENOMEM;
2802
2803	bio_add_page(bio, page, page_size, offset);
2804	bio->bi_end_io = end_io_func;
2805	bio->bi_private = tree;
 
 
2806	if (wbc) {
 
 
 
 
2807		wbc_init_bio(wbc, bio);
2808		wbc_account_io(wbc, page, page_size);
2809	}
2810
2811	if (bio_ret)
2812		*bio_ret = bio;
2813	else
2814		ret = submit_one_bio(rw, bio, mirror_num, bio_flags);
2815
2816	return ret;
2817}
2818
2819static void attach_extent_buffer_page(struct extent_buffer *eb,
2820				      struct page *page)
2821{
2822	if (!PagePrivate(page)) {
2823		SetPagePrivate(page);
2824		get_page(page);
2825		set_page_private(page, (unsigned long)eb);
2826	} else {
2827		WARN_ON(page->private != (unsigned long)eb);
2828	}
2829}
2830
2831void set_page_extent_mapped(struct page *page)
2832{
2833	if (!PagePrivate(page)) {
2834		SetPagePrivate(page);
2835		get_page(page);
2836		set_page_private(page, EXTENT_PAGE_PRIVATE);
2837	}
2838}
2839
2840static struct extent_map *
2841__get_extent_map(struct inode *inode, struct page *page, size_t pg_offset,
2842		 u64 start, u64 len, get_extent_t *get_extent,
2843		 struct extent_map **em_cached)
2844{
2845	struct extent_map *em;
2846
2847	if (em_cached && *em_cached) {
2848		em = *em_cached;
2849		if (extent_map_in_tree(em) && start >= em->start &&
2850		    start < extent_map_end(em)) {
2851			atomic_inc(&em->refs);
2852			return em;
2853		}
2854
2855		free_extent_map(em);
2856		*em_cached = NULL;
2857	}
2858
2859	em = get_extent(inode, page, pg_offset, start, len, 0);
2860	if (em_cached && !IS_ERR_OR_NULL(em)) {
2861		BUG_ON(*em_cached);
2862		atomic_inc(&em->refs);
2863		*em_cached = em;
2864	}
2865	return em;
2866}
2867/*
2868 * basic readpage implementation.  Locked extent state structs are inserted
2869 * into the tree that are removed when the IO is done (by the end_io
2870 * handlers)
2871 * XXX JDM: This needs looking at to ensure proper page locking
 
2872 */
2873static int __do_readpage(struct extent_io_tree *tree,
2874			 struct page *page,
2875			 get_extent_t *get_extent,
2876			 struct extent_map **em_cached,
2877			 struct bio **bio, int mirror_num,
2878			 unsigned long *bio_flags, int rw,
2879			 u64 *prev_em_start)
2880{
2881	struct inode *inode = page->mapping->host;
2882	u64 start = page_offset(page);
2883	u64 page_end = start + PAGE_SIZE - 1;
2884	u64 end;
2885	u64 cur = start;
2886	u64 extent_offset;
2887	u64 last_byte = i_size_read(inode);
2888	u64 block_start;
2889	u64 cur_end;
2890	sector_t sector;
2891	struct extent_map *em;
2892	struct block_device *bdev;
2893	int ret;
2894	int nr = 0;
2895	size_t pg_offset = 0;
2896	size_t iosize;
2897	size_t disk_io_size;
2898	size_t blocksize = inode->i_sb->s_blocksize;
2899	unsigned long this_bio_flag = 0;
 
2900
2901	set_page_extent_mapped(page);
2902
2903	end = page_end;
2904	if (!PageUptodate(page)) {
2905		if (cleancache_get_page(page) == 0) {
2906			BUG_ON(blocksize != PAGE_SIZE);
2907			unlock_extent(tree, start, end);
2908			goto out;
2909		}
2910	}
2911
2912	if (page->index == last_byte >> PAGE_SHIFT) {
2913		char *userpage;
2914		size_t zero_offset = last_byte & (PAGE_SIZE - 1);
2915
2916		if (zero_offset) {
2917			iosize = PAGE_SIZE - zero_offset;
2918			userpage = kmap_atomic(page);
2919			memset(userpage + zero_offset, 0, iosize);
2920			flush_dcache_page(page);
2921			kunmap_atomic(userpage);
2922		}
2923	}
2924	while (cur <= end) {
2925		unsigned long pnr = (last_byte >> PAGE_SHIFT) + 1;
2926		bool force_bio_submit = false;
 
2927
2928		if (cur >= last_byte) {
2929			char *userpage;
2930			struct extent_state *cached = NULL;
2931
2932			iosize = PAGE_SIZE - pg_offset;
2933			userpage = kmap_atomic(page);
2934			memset(userpage + pg_offset, 0, iosize);
2935			flush_dcache_page(page);
2936			kunmap_atomic(userpage);
2937			set_extent_uptodate(tree, cur, cur + iosize - 1,
2938					    &cached, GFP_NOFS);
2939			unlock_extent_cached(tree, cur,
2940					     cur + iosize - 1,
2941					     &cached, GFP_NOFS);
2942			break;
2943		}
2944		em = __get_extent_map(inode, page, pg_offset, cur,
2945				      end - cur + 1, get_extent, em_cached);
2946		if (IS_ERR_OR_NULL(em)) {
2947			SetPageError(page);
2948			unlock_extent(tree, cur, end);
2949			break;
2950		}
2951		extent_offset = cur - em->start;
2952		BUG_ON(extent_map_end(em) <= cur);
2953		BUG_ON(end < cur);
2954
2955		if (test_bit(EXTENT_FLAG_COMPRESSED, &em->flags)) {
2956			this_bio_flag |= EXTENT_BIO_COMPRESSED;
2957			extent_set_compress_type(&this_bio_flag,
2958						 em->compress_type);
2959		}
2960
2961		iosize = min(extent_map_end(em) - cur, end - cur + 1);
2962		cur_end = min(extent_map_end(em) - 1, end);
2963		iosize = ALIGN(iosize, blocksize);
2964		if (this_bio_flag & EXTENT_BIO_COMPRESSED) {
2965			disk_io_size = em->block_len;
2966			sector = em->block_start >> 9;
2967		} else {
2968			sector = (em->block_start + extent_offset) >> 9;
2969			disk_io_size = iosize;
2970		}
2971		bdev = em->bdev;
2972		block_start = em->block_start;
2973		if (test_bit(EXTENT_FLAG_PREALLOC, &em->flags))
2974			block_start = EXTENT_MAP_HOLE;
2975
2976		/*
2977		 * If we have a file range that points to a compressed extent
2978		 * and it's followed by a consecutive file range that points to
2979		 * to the same compressed extent (possibly with a different
2980		 * offset and/or length, so it either points to the whole extent
2981		 * or only part of it), we must make sure we do not submit a
2982		 * single bio to populate the pages for the 2 ranges because
2983		 * this makes the compressed extent read zero out the pages
2984		 * belonging to the 2nd range. Imagine the following scenario:
2985		 *
2986		 *  File layout
2987		 *  [0 - 8K]                     [8K - 24K]
2988		 *    |                               |
2989		 *    |                               |
2990		 * points to extent X,         points to extent X,
2991		 * offset 4K, length of 8K     offset 0, length 16K
2992		 *
2993		 * [extent X, compressed length = 4K uncompressed length = 16K]
2994		 *
2995		 * If the bio to read the compressed extent covers both ranges,
2996		 * it will decompress extent X into the pages belonging to the
2997		 * first range and then it will stop, zeroing out the remaining
2998		 * pages that belong to the other range that points to extent X.
2999		 * So here we make sure we submit 2 bios, one for the first
3000		 * range and another one for the third range. Both will target
3001		 * the same physical extent from disk, but we can't currently
3002		 * make the compressed bio endio callback populate the pages
3003		 * for both ranges because each compressed bio is tightly
3004		 * coupled with a single extent map, and each range can have
3005		 * an extent map with a different offset value relative to the
3006		 * uncompressed data of our extent and different lengths. This
3007		 * is a corner case so we prioritize correctness over
3008		 * non-optimal behavior (submitting 2 bios for the same extent).
3009		 */
3010		if (test_bit(EXTENT_FLAG_COMPRESSED, &em->flags) &&
3011		    prev_em_start && *prev_em_start != (u64)-1 &&
3012		    *prev_em_start != em->orig_start)
3013			force_bio_submit = true;
3014
3015		if (prev_em_start)
3016			*prev_em_start = em->orig_start;
3017
3018		free_extent_map(em);
3019		em = NULL;
3020
3021		/* we've found a hole, just zero and go on */
3022		if (block_start == EXTENT_MAP_HOLE) {
3023			char *userpage;
3024			struct extent_state *cached = NULL;
3025
3026			userpage = kmap_atomic(page);
3027			memset(userpage + pg_offset, 0, iosize);
3028			flush_dcache_page(page);
3029			kunmap_atomic(userpage);
3030
3031			set_extent_uptodate(tree, cur, cur + iosize - 1,
3032					    &cached, GFP_NOFS);
3033			unlock_extent_cached(tree, cur,
3034					     cur + iosize - 1,
3035					     &cached, GFP_NOFS);
3036			cur = cur + iosize;
3037			pg_offset += iosize;
3038			continue;
3039		}
3040		/* the get_extent function already copied into the page */
3041		if (test_range_bit(tree, cur, cur_end,
3042				   EXTENT_UPTODATE, 1, NULL)) {
3043			check_page_uptodate(tree, page);
3044			unlock_extent(tree, cur, cur + iosize - 1);
3045			cur = cur + iosize;
3046			pg_offset += iosize;
3047			continue;
3048		}
3049		/* we have an inline extent but it didn't get marked up
3050		 * to date.  Error out
3051		 */
3052		if (block_start == EXTENT_MAP_INLINE) {
3053			SetPageError(page);
3054			unlock_extent(tree, cur, cur + iosize - 1);
3055			cur = cur + iosize;
3056			pg_offset += iosize;
3057			continue;
3058		}
3059
3060		pnr -= page->index;
3061		ret = submit_extent_page(rw, tree, NULL, page,
3062					 sector, disk_io_size, pg_offset,
3063					 bdev, bio, pnr,
3064					 end_bio_extent_readpage, mirror_num,
3065					 *bio_flags,
3066					 this_bio_flag,
3067					 force_bio_submit);
3068		if (!ret) {
3069			nr++;
3070			*bio_flags = this_bio_flag;
3071		} else {
3072			SetPageError(page);
3073			unlock_extent(tree, cur, cur + iosize - 1);
 
3074		}
3075		cur = cur + iosize;
3076		pg_offset += iosize;
3077	}
3078out:
3079	if (!nr) {
3080		if (!PageError(page))
3081			SetPageUptodate(page);
3082		unlock_page(page);
3083	}
3084	return 0;
3085}
3086
3087static inline void __do_contiguous_readpages(struct extent_io_tree *tree,
3088					     struct page *pages[], int nr_pages,
3089					     u64 start, u64 end,
3090					     get_extent_t *get_extent,
3091					     struct extent_map **em_cached,
3092					     struct bio **bio, int mirror_num,
3093					     unsigned long *bio_flags, int rw,
3094					     u64 *prev_em_start)
3095{
3096	struct inode *inode;
3097	struct btrfs_ordered_extent *ordered;
3098	int index;
3099
3100	inode = pages[0]->mapping->host;
3101	while (1) {
3102		lock_extent(tree, start, end);
3103		ordered = btrfs_lookup_ordered_range(inode, start,
3104						     end - start + 1);
3105		if (!ordered)
3106			break;
3107		unlock_extent(tree, start, end);
3108		btrfs_start_ordered_extent(inode, ordered, 1);
3109		btrfs_put_ordered_extent(ordered);
3110	}
3111
3112	for (index = 0; index < nr_pages; index++) {
3113		__do_readpage(tree, pages[index], get_extent, em_cached, bio,
3114			      mirror_num, bio_flags, rw, prev_em_start);
3115		put_page(pages[index]);
3116	}
3117}
3118
3119static void __extent_readpages(struct extent_io_tree *tree,
3120			       struct page *pages[],
3121			       int nr_pages, get_extent_t *get_extent,
3122			       struct extent_map **em_cached,
3123			       struct bio **bio, int mirror_num,
3124			       unsigned long *bio_flags, int rw,
3125			       u64 *prev_em_start)
3126{
3127	u64 start = 0;
3128	u64 end = 0;
3129	u64 page_start;
3130	int index;
3131	int first_index = 0;
3132
3133	for (index = 0; index < nr_pages; index++) {
3134		page_start = page_offset(pages[index]);
3135		if (!end) {
3136			start = page_start;
3137			end = start + PAGE_SIZE - 1;
3138			first_index = index;
3139		} else if (end + 1 == page_start) {
3140			end += PAGE_SIZE;
3141		} else {
3142			__do_contiguous_readpages(tree, &pages[first_index],
3143						  index - first_index, start,
3144						  end, get_extent, em_cached,
3145						  bio, mirror_num, bio_flags,
3146						  rw, prev_em_start);
3147			start = page_start;
3148			end = start + PAGE_SIZE - 1;
3149			first_index = index;
3150		}
3151	}
3152
3153	if (end)
3154		__do_contiguous_readpages(tree, &pages[first_index],
3155					  index - first_index, start,
3156					  end, get_extent, em_cached, bio,
3157					  mirror_num, bio_flags, rw,
3158					  prev_em_start);
3159}
3160
3161static int __extent_read_full_page(struct extent_io_tree *tree,
3162				   struct page *page,
3163				   get_extent_t *get_extent,
3164				   struct bio **bio, int mirror_num,
3165				   unsigned long *bio_flags, int rw)
 
3166{
3167	struct inode *inode = page->mapping->host;
3168	struct btrfs_ordered_extent *ordered;
3169	u64 start = page_offset(page);
3170	u64 end = start + PAGE_SIZE - 1;
3171	int ret;
3172
3173	while (1) {
3174		lock_extent(tree, start, end);
3175		ordered = btrfs_lookup_ordered_range(inode, start,
3176						PAGE_SIZE);
3177		if (!ordered)
3178			break;
3179		unlock_extent(tree, start, end);
3180		btrfs_start_ordered_extent(inode, ordered, 1);
3181		btrfs_put_ordered_extent(ordered);
3182	}
3183
3184	ret = __do_readpage(tree, page, get_extent, NULL, bio, mirror_num,
3185			    bio_flags, rw, NULL);
3186	return ret;
3187}
3188
3189int extent_read_full_page(struct extent_io_tree *tree, struct page *page,
3190			    get_extent_t *get_extent, int mirror_num)
3191{
3192	struct bio *bio = NULL;
3193	unsigned long bio_flags = 0;
3194	int ret;
3195
3196	ret = __extent_read_full_page(tree, page, get_extent, &bio, mirror_num,
3197				      &bio_flags, READ);
3198	if (bio)
3199		ret = submit_one_bio(READ, bio, mirror_num, bio_flags);
3200	return ret;
3201}
3202
3203static noinline void update_nr_written(struct page *page,
3204				      struct writeback_control *wbc,
3205				      unsigned long nr_written)
3206{
3207	wbc->nr_to_write -= nr_written;
3208	if (wbc->range_cyclic || (wbc->nr_to_write > 0 &&
3209	    wbc->range_start == 0 && wbc->range_end == LLONG_MAX))
3210		page->mapping->writeback_index = page->index + nr_written;
3211}
3212
3213/*
3214 * helper for __extent_writepage, doing all of the delayed allocation setup.
3215 *
3216 * This returns 1 if our fill_delalloc function did all the work required
3217 * to write the page (copy into inline extent).  In this case the IO has
3218 * been started and the page is already unlocked.
3219 *
3220 * This returns 0 if all went well (page still locked)
3221 * This returns < 0 if there were errors (page still locked)
3222 */
3223static noinline_for_stack int writepage_delalloc(struct inode *inode,
3224			      struct page *page, struct writeback_control *wbc,
3225			      struct extent_page_data *epd,
3226			      u64 delalloc_start,
3227			      unsigned long *nr_written)
3228{
3229	struct extent_io_tree *tree = epd->tree;
3230	u64 page_end = delalloc_start + PAGE_SIZE - 1;
3231	u64 nr_delalloc;
3232	u64 delalloc_to_write = 0;
3233	u64 delalloc_end = 0;
3234	int ret;
3235	int page_started = 0;
3236
3237	if (epd->extent_locked || !tree->ops || !tree->ops->fill_delalloc)
3238		return 0;
3239
3240	while (delalloc_end < page_end) {
3241		nr_delalloc = find_lock_delalloc_range(inode, tree,
3242					       page,
3243					       &delalloc_start,
3244					       &delalloc_end,
3245					       BTRFS_MAX_EXTENT_SIZE);
3246		if (nr_delalloc == 0) {
3247			delalloc_start = delalloc_end + 1;
3248			continue;
3249		}
3250		ret = tree->ops->fill_delalloc(inode, page,
3251					       delalloc_start,
3252					       delalloc_end,
3253					       &page_started,
3254					       nr_written);
3255		/* File system has been set read-only */
3256		if (ret) {
3257			SetPageError(page);
3258			/* fill_delalloc should be return < 0 for error
3259			 * but just in case, we use > 0 here meaning the
3260			 * IO is started, so we don't want to return > 0
3261			 * unless things are going well.
 
3262			 */
3263			ret = ret < 0 ? ret : -EIO;
3264			goto done;
3265		}
3266		/*
3267		 * delalloc_end is already one less than the total length, so
3268		 * we don't subtract one from PAGE_SIZE
3269		 */
3270		delalloc_to_write += (delalloc_end - delalloc_start +
3271				      PAGE_SIZE) >> PAGE_SHIFT;
3272		delalloc_start = delalloc_end + 1;
3273	}
3274	if (wbc->nr_to_write < delalloc_to_write) {
3275		int thresh = 8192;
3276
3277		if (delalloc_to_write < thresh * 2)
3278			thresh = delalloc_to_write;
3279		wbc->nr_to_write = min_t(u64, delalloc_to_write,
3280					 thresh);
3281	}
3282
3283	/* did the fill delalloc function already unlock and start
3284	 * the IO?
3285	 */
3286	if (page_started) {
3287		/*
3288		 * we've unlocked the page, so we can't update
3289		 * the mapping's writeback index, just update
3290		 * nr_to_write.
3291		 */
3292		wbc->nr_to_write -= *nr_written;
3293		return 1;
3294	}
3295
3296	ret = 0;
3297
3298done:
3299	return ret;
3300}
3301
3302/*
3303 * helper for __extent_writepage.  This calls the writepage start hooks,
3304 * and does the loop to map the page into extents and bios.
3305 *
3306 * We return 1 if the IO is started and the page is unlocked,
3307 * 0 if all went well (page still locked)
3308 * < 0 if there were errors (page still locked)
3309 */
3310static noinline_for_stack int __extent_writepage_io(struct inode *inode,
3311				 struct page *page,
3312				 struct writeback_control *wbc,
3313				 struct extent_page_data *epd,
3314				 loff_t i_size,
3315				 unsigned long nr_written,
3316				 int write_flags, int *nr_ret)
3317{
3318	struct extent_io_tree *tree = epd->tree;
3319	u64 start = page_offset(page);
3320	u64 page_end = start + PAGE_SIZE - 1;
3321	u64 end;
3322	u64 cur = start;
3323	u64 extent_offset;
3324	u64 block_start;
3325	u64 iosize;
3326	sector_t sector;
3327	struct extent_state *cached_state = NULL;
3328	struct extent_map *em;
3329	struct block_device *bdev;
3330	size_t pg_offset = 0;
3331	size_t blocksize;
3332	int ret = 0;
3333	int nr = 0;
 
3334	bool compressed;
3335
3336	if (tree->ops && tree->ops->writepage_start_hook) {
3337		ret = tree->ops->writepage_start_hook(page, start,
3338						      page_end);
3339		if (ret) {
3340			/* Fixup worker will requeue */
3341			if (ret == -EBUSY)
3342				wbc->pages_skipped++;
3343			else
3344				redirty_page_for_writepage(wbc, page);
3345
3346			update_nr_written(page, wbc, nr_written);
3347			unlock_page(page);
3348			ret = 1;
3349			goto done_unlocked;
3350		}
3351	}
3352
3353	/*
3354	 * we don't want to touch the inode after unlocking the page,
3355	 * so we update the mapping writeback index now
3356	 */
3357	update_nr_written(page, wbc, nr_written + 1);
3358
3359	end = page_end;
3360	if (i_size <= start) {
3361		if (tree->ops && tree->ops->writepage_end_io_hook)
3362			tree->ops->writepage_end_io_hook(page, start,
3363							 page_end, NULL, 1);
3364		goto done;
3365	}
3366
3367	blocksize = inode->i_sb->s_blocksize;
3368
3369	while (cur <= end) {
3370		u64 em_end;
 
 
3371		if (cur >= i_size) {
3372			if (tree->ops && tree->ops->writepage_end_io_hook)
3373				tree->ops->writepage_end_io_hook(page, cur,
3374							 page_end, NULL, 1);
3375			break;
3376		}
3377		em = epd->get_extent(inode, page, pg_offset, cur,
3378				     end - cur + 1, 1);
3379		if (IS_ERR_OR_NULL(em)) {
3380			SetPageError(page);
3381			ret = PTR_ERR_OR_ZERO(em);
3382			break;
3383		}
3384
3385		extent_offset = cur - em->start;
3386		em_end = extent_map_end(em);
3387		BUG_ON(em_end <= cur);
3388		BUG_ON(end < cur);
3389		iosize = min(em_end - cur, end - cur + 1);
3390		iosize = ALIGN(iosize, blocksize);
3391		sector = (em->block_start + extent_offset) >> 9;
3392		bdev = em->bdev;
3393		block_start = em->block_start;
3394		compressed = test_bit(EXTENT_FLAG_COMPRESSED, &em->flags);
3395		free_extent_map(em);
3396		em = NULL;
3397
3398		/*
3399		 * compressed and inline extents are written through other
3400		 * paths in the FS
3401		 */
3402		if (compressed || block_start == EXTENT_MAP_HOLE ||
3403		    block_start == EXTENT_MAP_INLINE) {
3404			/*
3405			 * end_io notification does not happen here for
3406			 * compressed extents
3407			 */
3408			if (!compressed && tree->ops &&
3409			    tree->ops->writepage_end_io_hook)
3410				tree->ops->writepage_end_io_hook(page, cur,
3411							 cur + iosize - 1,
3412							 NULL, 1);
3413			else if (compressed) {
3414				/* we don't want to end_page_writeback on
3415				 * a compressed extent.  this happens
3416				 * elsewhere
3417				 */
3418				nr++;
3419			}
3420
 
3421			cur += iosize;
3422			pg_offset += iosize;
3423			continue;
3424		}
3425
3426		if (tree->ops && tree->ops->writepage_io_hook) {
3427			ret = tree->ops->writepage_io_hook(page, cur,
3428						cur + iosize - 1);
3429		} else {
3430			ret = 0;
3431		}
 
 
 
 
 
 
3432		if (ret) {
3433			SetPageError(page);
3434		} else {
3435			unsigned long max_nr = (i_size >> PAGE_SHIFT) + 1;
3436
3437			set_range_writeback(tree, cur, cur + iosize - 1);
3438			if (!PageWriteback(page)) {
3439				btrfs_err(BTRFS_I(inode)->root->fs_info,
3440					   "page %lu not writeback, cur %llu end %llu",
3441				       page->index, cur, end);
3442			}
3443
3444			ret = submit_extent_page(write_flags, tree, wbc, page,
3445						 sector, iosize, pg_offset,
3446						 bdev, &epd->bio, max_nr,
3447						 end_bio_extent_writepage,
3448						 0, 0, 0, false);
3449			if (ret)
3450				SetPageError(page);
3451		}
 
3452		cur = cur + iosize;
3453		pg_offset += iosize;
3454		nr++;
3455	}
3456done:
3457	*nr_ret = nr;
3458
3459done_unlocked:
3460
3461	/* drop our reference on any cached states */
3462	free_extent_state(cached_state);
3463	return ret;
3464}
3465
3466/*
3467 * the writepage semantics are similar to regular writepage.  extent
3468 * records are inserted to lock ranges in the tree, and as dirty areas
3469 * are found, they are marked writeback.  Then the lock bits are removed
3470 * and the end_io handler clears the writeback ranges
 
 
 
3471 */
3472static int __extent_writepage(struct page *page, struct writeback_control *wbc,
3473			      void *data)
3474{
3475	struct inode *inode = page->mapping->host;
3476	struct extent_page_data *epd = data;
3477	u64 start = page_offset(page);
3478	u64 page_end = start + PAGE_SIZE - 1;
3479	int ret;
3480	int nr = 0;
3481	size_t pg_offset = 0;
3482	loff_t i_size = i_size_read(inode);
3483	unsigned long end_index = i_size >> PAGE_SHIFT;
3484	int write_flags;
3485	unsigned long nr_written = 0;
3486
3487	if (wbc->sync_mode == WB_SYNC_ALL)
3488		write_flags = WRITE_SYNC;
3489	else
3490		write_flags = WRITE;
3491
3492	trace___extent_writepage(page, inode, wbc);
3493
3494	WARN_ON(!PageLocked(page));
3495
3496	ClearPageError(page);
3497
3498	pg_offset = i_size & (PAGE_SIZE - 1);
3499	if (page->index > end_index ||
3500	   (page->index == end_index && !pg_offset)) {
3501		page->mapping->a_ops->invalidatepage(page, 0, PAGE_SIZE);
3502		unlock_page(page);
3503		return 0;
3504	}
3505
3506	if (page->index == end_index) {
3507		char *userpage;
3508
3509		userpage = kmap_atomic(page);
3510		memset(userpage + pg_offset, 0,
3511		       PAGE_SIZE - pg_offset);
3512		kunmap_atomic(userpage);
3513		flush_dcache_page(page);
3514	}
3515
3516	pg_offset = 0;
3517
3518	set_page_extent_mapped(page);
3519
3520	ret = writepage_delalloc(inode, page, wbc, epd, start, &nr_written);
3521	if (ret == 1)
3522		goto done_unlocked;
3523	if (ret)
3524		goto done;
 
 
 
3525
3526	ret = __extent_writepage_io(inode, page, wbc, epd,
3527				    i_size, nr_written, write_flags, &nr);
3528	if (ret == 1)
3529		goto done_unlocked;
3530
3531done:
3532	if (nr == 0) {
3533		/* make sure the mapping tag for page dirty gets cleared */
3534		set_page_writeback(page);
3535		end_page_writeback(page);
3536	}
3537	if (PageError(page)) {
3538		ret = ret < 0 ? ret : -EIO;
3539		end_extent_writepage(page, ret, start, page_end);
3540	}
3541	unlock_page(page);
 
3542	return ret;
3543
3544done_unlocked:
3545	return 0;
3546}
3547
3548void wait_on_extent_buffer_writeback(struct extent_buffer *eb)
3549{
3550	wait_on_bit_io(&eb->bflags, EXTENT_BUFFER_WRITEBACK,
3551		       TASK_UNINTERRUPTIBLE);
3552}
3553
3554static noinline_for_stack int
3555lock_extent_buffer_for_io(struct extent_buffer *eb,
3556			  struct btrfs_fs_info *fs_info,
 
 
 
 
 
 
 
 
 
 
 
 
3557			  struct extent_page_data *epd)
3558{
3559	unsigned long i, num_pages;
 
3560	int flush = 0;
3561	int ret = 0;
3562
3563	if (!btrfs_try_tree_write_lock(eb)) {
 
 
 
3564		flush = 1;
3565		flush_write_bio(epd);
3566		btrfs_tree_lock(eb);
3567	}
3568
3569	if (test_bit(EXTENT_BUFFER_WRITEBACK, &eb->bflags)) {
3570		btrfs_tree_unlock(eb);
3571		if (!epd->sync_io)
3572			return 0;
3573		if (!flush) {
3574			flush_write_bio(epd);
 
 
3575			flush = 1;
3576		}
3577		while (1) {
3578			wait_on_extent_buffer_writeback(eb);
3579			btrfs_tree_lock(eb);
3580			if (!test_bit(EXTENT_BUFFER_WRITEBACK, &eb->bflags))
3581				break;
3582			btrfs_tree_unlock(eb);
3583		}
3584	}
3585
3586	/*
3587	 * We need to do this to prevent races in people who check if the eb is
3588	 * under IO since we can end up having no IO bits set for a short period
3589	 * of time.
3590	 */
3591	spin_lock(&eb->refs_lock);
3592	if (test_and_clear_bit(EXTENT_BUFFER_DIRTY, &eb->bflags)) {
3593		set_bit(EXTENT_BUFFER_WRITEBACK, &eb->bflags);
3594		spin_unlock(&eb->refs_lock);
3595		btrfs_set_header_flag(eb, BTRFS_HEADER_FLAG_WRITTEN);
3596		__percpu_counter_add(&fs_info->dirty_metadata_bytes,
3597				     -eb->len,
3598				     fs_info->dirty_metadata_batch);
3599		ret = 1;
3600	} else {
3601		spin_unlock(&eb->refs_lock);
3602	}
3603
3604	btrfs_tree_unlock(eb);
3605
3606	if (!ret)
3607		return ret;
3608
3609	num_pages = num_extent_pages(eb->start, eb->len);
3610	for (i = 0; i < num_pages; i++) {
3611		struct page *p = eb->pages[i];
3612
3613		if (!trylock_page(p)) {
3614			if (!flush) {
3615				flush_write_bio(epd);
 
 
 
 
 
 
 
3616				flush = 1;
3617			}
3618			lock_page(p);
3619		}
3620	}
3621
3622	return ret;
3623}
3624
3625static void end_extent_buffer_writeback(struct extent_buffer *eb)
3626{
3627	clear_bit(EXTENT_BUFFER_WRITEBACK, &eb->bflags);
3628	smp_mb__after_atomic();
3629	wake_up_bit(&eb->bflags, EXTENT_BUFFER_WRITEBACK);
 
 
 
 
 
 
 
 
 
 
 
 
3630}
3631
3632static void set_btree_ioerr(struct page *page)
3633{
3634	struct extent_buffer *eb = (struct extent_buffer *)page->private;
3635	struct btrfs_inode *btree_ino = BTRFS_I(eb->fs_info->btree_inode);
3636
3637	SetPageError(page);
3638	if (test_and_set_bit(EXTENT_BUFFER_WRITE_ERR, &eb->bflags))
3639		return;
3640
3641	/*
 
 
 
 
 
 
 
 
3642	 * If writeback for a btree extent that doesn't belong to a log tree
3643	 * failed, increment the counter transaction->eb_write_errors.
3644	 * We do this because while the transaction is running and before it's
3645	 * committing (when we call filemap_fdata[write|wait]_range against
3646	 * the btree inode), we might have
3647	 * btree_inode->i_mapping->a_ops->writepages() called by the VM - if it
3648	 * returns an error or an error happens during writeback, when we're
3649	 * committing the transaction we wouldn't know about it, since the pages
3650	 * can be no longer dirty nor marked anymore for writeback (if a
3651	 * subsequent modification to the extent buffer didn't happen before the
3652	 * transaction commit), which makes filemap_fdata[write|wait]_range not
3653	 * able to find the pages tagged with SetPageError at transaction
3654	 * commit time. So if this happens we must abort the transaction,
3655	 * otherwise we commit a super block with btree roots that point to
3656	 * btree nodes/leafs whose content on disk is invalid - either garbage
3657	 * or the content of some node/leaf from a past generation that got
3658	 * cowed or deleted and is no longer valid.
3659	 *
3660	 * Note: setting AS_EIO/AS_ENOSPC in the btree inode's i_mapping would
3661	 * not be enough - we need to distinguish between log tree extents vs
3662	 * non-log tree extents, and the next filemap_fdatawait_range() call
3663	 * will catch and clear such errors in the mapping - and that call might
3664	 * be from a log sync and not from a transaction commit. Also, checking
3665	 * for the eb flag EXTENT_BUFFER_WRITE_ERR at transaction commit time is
3666	 * not done and would not be reliable - the eb might have been released
3667	 * from memory and reading it back again means that flag would not be
3668	 * set (since it's a runtime flag, not persisted on disk).
3669	 *
3670	 * Using the flags below in the btree inode also makes us achieve the
3671	 * goal of AS_EIO/AS_ENOSPC when writepages() returns success, started
3672	 * writeback for all dirty pages and before filemap_fdatawait_range()
3673	 * is called, the writeback for all dirty pages had already finished
3674	 * with errors - because we were not using AS_EIO/AS_ENOSPC,
3675	 * filemap_fdatawait_range() would return success, as it could not know
3676	 * that writeback errors happened (the pages were no longer tagged for
3677	 * writeback).
3678	 */
3679	switch (eb->log_index) {
3680	case -1:
3681		set_bit(BTRFS_INODE_BTREE_ERR, &btree_ino->runtime_flags);
3682		break;
3683	case 0:
3684		set_bit(BTRFS_INODE_BTREE_LOG1_ERR, &btree_ino->runtime_flags);
3685		break;
3686	case 1:
3687		set_bit(BTRFS_INODE_BTREE_LOG2_ERR, &btree_ino->runtime_flags);
3688		break;
3689	default:
3690		BUG(); /* unexpected, logic error */
3691	}
3692}
3693
3694static void end_bio_extent_buffer_writepage(struct bio *bio)
3695{
3696	struct bio_vec *bvec;
3697	struct extent_buffer *eb;
3698	int i, done;
 
3699
3700	bio_for_each_segment_all(bvec, bio, i) {
 
3701		struct page *page = bvec->bv_page;
3702
3703		eb = (struct extent_buffer *)page->private;
3704		BUG_ON(!eb);
3705		done = atomic_dec_and_test(&eb->io_pages);
3706
3707		if (bio->bi_error ||
3708		    test_bit(EXTENT_BUFFER_WRITE_ERR, &eb->bflags)) {
3709			ClearPageUptodate(page);
3710			set_btree_ioerr(page);
3711		}
3712
3713		end_page_writeback(page);
3714
3715		if (!done)
3716			continue;
3717
3718		end_extent_buffer_writeback(eb);
3719	}
3720
3721	bio_put(bio);
3722}
3723
3724static noinline_for_stack int write_one_eb(struct extent_buffer *eb,
3725			struct btrfs_fs_info *fs_info,
3726			struct writeback_control *wbc,
3727			struct extent_page_data *epd)
3728{
3729	struct block_device *bdev = fs_info->fs_devices->latest_bdev;
3730	struct extent_io_tree *tree = &BTRFS_I(fs_info->btree_inode)->io_tree;
3731	u64 offset = eb->start;
3732	unsigned long i, num_pages;
3733	unsigned long bio_flags = 0;
3734	int rw = (epd->sync_io ? WRITE_SYNC : WRITE) | REQ_META;
 
3735	int ret = 0;
3736
3737	clear_bit(EXTENT_BUFFER_WRITE_ERR, &eb->bflags);
3738	num_pages = num_extent_pages(eb->start, eb->len);
3739	atomic_set(&eb->io_pages, num_pages);
3740	if (btrfs_header_owner(eb) == BTRFS_TREE_LOG_OBJECTID)
3741		bio_flags = EXTENT_BIO_TREE_LOG;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3742
3743	for (i = 0; i < num_pages; i++) {
3744		struct page *p = eb->pages[i];
3745
3746		clear_page_dirty_for_io(p);
3747		set_page_writeback(p);
3748		ret = submit_extent_page(rw, tree, wbc, p, offset >> 9,
3749					 PAGE_SIZE, 0, bdev, &epd->bio,
3750					 -1, end_bio_extent_buffer_writepage,
3751					 0, epd->bio_flags, bio_flags, false);
3752		epd->bio_flags = bio_flags;
3753		if (ret) {
3754			set_btree_ioerr(p);
3755			end_page_writeback(p);
 
3756			if (atomic_sub_and_test(num_pages - i, &eb->io_pages))
3757				end_extent_buffer_writeback(eb);
3758			ret = -EIO;
3759			break;
3760		}
3761		offset += PAGE_SIZE;
3762		update_nr_written(p, wbc, 1);
3763		unlock_page(p);
3764	}
3765
3766	if (unlikely(ret)) {
3767		for (; i < num_pages; i++) {
3768			struct page *p = eb->pages[i];
3769			clear_page_dirty_for_io(p);
3770			unlock_page(p);
3771		}
3772	}
3773
3774	return ret;
3775}
3776
3777int btree_write_cache_pages(struct address_space *mapping,
3778				   struct writeback_control *wbc)
3779{
3780	struct extent_io_tree *tree = &BTRFS_I(mapping->host)->io_tree;
3781	struct btrfs_fs_info *fs_info = BTRFS_I(mapping->host)->root->fs_info;
3782	struct extent_buffer *eb, *prev_eb = NULL;
3783	struct extent_page_data epd = {
3784		.bio = NULL,
3785		.tree = tree,
3786		.extent_locked = 0,
3787		.sync_io = wbc->sync_mode == WB_SYNC_ALL,
3788		.bio_flags = 0,
3789	};
 
3790	int ret = 0;
3791	int done = 0;
3792	int nr_to_write_done = 0;
3793	struct pagevec pvec;
3794	int nr_pages;
3795	pgoff_t index;
3796	pgoff_t end;		/* Inclusive */
3797	int scanned = 0;
3798	int tag;
3799
3800	pagevec_init(&pvec, 0);
3801	if (wbc->range_cyclic) {
3802		index = mapping->writeback_index; /* Start from prev offset */
3803		end = -1;
 
 
 
 
 
3804	} else {
3805		index = wbc->range_start >> PAGE_SHIFT;
3806		end = wbc->range_end >> PAGE_SHIFT;
3807		scanned = 1;
3808	}
3809	if (wbc->sync_mode == WB_SYNC_ALL)
3810		tag = PAGECACHE_TAG_TOWRITE;
3811	else
3812		tag = PAGECACHE_TAG_DIRTY;
3813retry:
3814	if (wbc->sync_mode == WB_SYNC_ALL)
3815		tag_pages_for_writeback(mapping, index, end);
3816	while (!done && !nr_to_write_done && (index <= end) &&
3817	       (nr_pages = pagevec_lookup_tag(&pvec, mapping, &index, tag,
3818			min(end - index, (pgoff_t)PAGEVEC_SIZE-1) + 1))) {
3819		unsigned i;
3820
3821		scanned = 1;
3822		for (i = 0; i < nr_pages; i++) {
3823			struct page *page = pvec.pages[i];
3824
3825			if (!PagePrivate(page))
3826				continue;
3827
3828			if (!wbc->range_cyclic && page->index > end) {
3829				done = 1;
3830				break;
3831			}
3832
3833			spin_lock(&mapping->private_lock);
3834			if (!PagePrivate(page)) {
3835				spin_unlock(&mapping->private_lock);
3836				continue;
3837			}
3838
3839			eb = (struct extent_buffer *)page->private;
3840
3841			/*
3842			 * Shouldn't happen and normally this would be a BUG_ON
3843			 * but no sense in crashing the users box for something
3844			 * we can survive anyway.
3845			 */
3846			if (WARN_ON(!eb)) {
3847				spin_unlock(&mapping->private_lock);
3848				continue;
3849			}
3850
3851			if (eb == prev_eb) {
3852				spin_unlock(&mapping->private_lock);
3853				continue;
3854			}
3855
3856			ret = atomic_inc_not_zero(&eb->refs);
3857			spin_unlock(&mapping->private_lock);
3858			if (!ret)
3859				continue;
3860
3861			prev_eb = eb;
3862			ret = lock_extent_buffer_for_io(eb, fs_info, &epd);
3863			if (!ret) {
3864				free_extent_buffer(eb);
3865				continue;
 
 
 
 
3866			}
3867
3868			ret = write_one_eb(eb, fs_info, wbc, &epd);
3869			if (ret) {
3870				done = 1;
3871				free_extent_buffer(eb);
3872				break;
3873			}
3874			free_extent_buffer(eb);
3875
3876			/*
3877			 * the filesystem may choose to bump up nr_to_write.
3878			 * We have to make sure to honor the new nr_to_write
3879			 * at any time
3880			 */
3881			nr_to_write_done = wbc->nr_to_write <= 0;
3882		}
3883		pagevec_release(&pvec);
3884		cond_resched();
3885	}
3886	if (!scanned && !done) {
3887		/*
3888		 * We hit the last page and there is more work to be done: wrap
3889		 * back to the start of the file
3890		 */
3891		scanned = 1;
3892		index = 0;
3893		goto retry;
3894	}
3895	flush_write_bio(&epd);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3896	return ret;
3897}
3898
3899/**
3900 * write_cache_pages - walk the list of dirty pages of the given address space and write all of them.
3901 * @mapping: address space structure to write
3902 * @wbc: subtract the number of written pages from *@wbc->nr_to_write
3903 * @writepage: function called for each page
3904 * @data: data passed to writepage function
3905 *
3906 * If a page is already under I/O, write_cache_pages() skips it, even
3907 * if it's dirty.  This is desirable behaviour for memory-cleaning writeback,
3908 * but it is INCORRECT for data-integrity system calls such as fsync().  fsync()
3909 * and msync() need to guarantee that all the data which was dirty at the time
3910 * the call was made get new I/O started against them.  If wbc->sync_mode is
3911 * WB_SYNC_ALL then we were called for data integrity and we must wait for
3912 * existing IO to complete.
3913 */
3914static int extent_write_cache_pages(struct extent_io_tree *tree,
3915			     struct address_space *mapping,
3916			     struct writeback_control *wbc,
3917			     writepage_t writepage, void *data,
3918			     void (*flush_fn)(void *))
3919{
3920	struct inode *inode = mapping->host;
3921	int ret = 0;
3922	int done = 0;
3923	int err = 0;
3924	int nr_to_write_done = 0;
3925	struct pagevec pvec;
3926	int nr_pages;
3927	pgoff_t index;
3928	pgoff_t end;		/* Inclusive */
 
 
3929	int scanned = 0;
3930	int tag;
3931
3932	/*
3933	 * We have to hold onto the inode so that ordered extents can do their
3934	 * work when the IO finishes.  The alternative to this is failing to add
3935	 * an ordered extent if the igrab() fails there and that is a huge pain
3936	 * to deal with, so instead just hold onto the inode throughout the
3937	 * writepages operation.  If it fails here we are freeing up the inode
3938	 * anyway and we'd rather not waste our time writing out stuff that is
3939	 * going to be truncated anyway.
3940	 */
3941	if (!igrab(inode))
3942		return 0;
3943
3944	pagevec_init(&pvec, 0);
3945	if (wbc->range_cyclic) {
3946		index = mapping->writeback_index; /* Start from prev offset */
3947		end = -1;
 
 
 
 
 
3948	} else {
3949		index = wbc->range_start >> PAGE_SHIFT;
3950		end = wbc->range_end >> PAGE_SHIFT;
 
 
3951		scanned = 1;
3952	}
3953	if (wbc->sync_mode == WB_SYNC_ALL)
 
 
 
 
 
 
 
 
 
 
 
 
 
3954		tag = PAGECACHE_TAG_TOWRITE;
3955	else
3956		tag = PAGECACHE_TAG_DIRTY;
3957retry:
3958	if (wbc->sync_mode == WB_SYNC_ALL)
3959		tag_pages_for_writeback(mapping, index, end);
 
3960	while (!done && !nr_to_write_done && (index <= end) &&
3961	       (nr_pages = pagevec_lookup_tag(&pvec, mapping, &index, tag,
3962			min(end - index, (pgoff_t)PAGEVEC_SIZE-1) + 1))) {
3963		unsigned i;
3964
3965		scanned = 1;
3966		for (i = 0; i < nr_pages; i++) {
3967			struct page *page = pvec.pages[i];
3968
 
3969			/*
3970			 * At this point we hold neither mapping->tree_lock nor
3971			 * lock on the page itself: the page may be truncated or
3972			 * invalidated (changing page->mapping to NULL), or even
3973			 * swizzled back from swapper_space to tmpfs file
3974			 * mapping
3975			 */
3976			if (!trylock_page(page)) {
3977				flush_fn(data);
 
3978				lock_page(page);
3979			}
3980
3981			if (unlikely(page->mapping != mapping)) {
3982				unlock_page(page);
3983				continue;
3984			}
3985
3986			if (!wbc->range_cyclic && page->index > end) {
3987				done = 1;
3988				unlock_page(page);
3989				continue;
3990			}
3991
3992			if (wbc->sync_mode != WB_SYNC_NONE) {
3993				if (PageWriteback(page))
3994					flush_fn(data);
 
 
3995				wait_on_page_writeback(page);
3996			}
3997
3998			if (PageWriteback(page) ||
3999			    !clear_page_dirty_for_io(page)) {
4000				unlock_page(page);
4001				continue;
4002			}
4003
4004			ret = (*writepage)(page, wbc, data);
4005
4006			if (unlikely(ret == AOP_WRITEPAGE_ACTIVATE)) {
4007				unlock_page(page);
4008				ret = 0;
4009			}
4010			if (!err && ret < 0)
4011				err = ret;
4012
4013			/*
4014			 * the filesystem may choose to bump up nr_to_write.
4015			 * We have to make sure to honor the new nr_to_write
4016			 * at any time
4017			 */
4018			nr_to_write_done = wbc->nr_to_write <= 0;
4019		}
4020		pagevec_release(&pvec);
4021		cond_resched();
4022	}
4023	if (!scanned && !done && !err) {
4024		/*
4025		 * We hit the last page and there is more work to be done: wrap
4026		 * back to the start of the file
4027		 */
4028		scanned = 1;
4029		index = 0;
4030		goto retry;
4031	}
4032	btrfs_add_delayed_iput(inode);
4033	return err;
4034}
4035
4036static void flush_epd_write_bio(struct extent_page_data *epd)
4037{
4038	if (epd->bio) {
4039		int rw = WRITE;
4040		int ret;
4041
4042		if (epd->sync_io)
4043			rw = WRITE_SYNC;
4044
4045		ret = submit_one_bio(rw, epd->bio, 0, epd->bio_flags);
4046		BUG_ON(ret < 0); /* -ENOMEM */
4047		epd->bio = NULL;
 
 
 
4048	}
4049}
4050
4051static noinline void flush_write_bio(void *data)
4052{
4053	struct extent_page_data *epd = data;
4054	flush_epd_write_bio(epd);
 
4055}
4056
4057int extent_write_full_page(struct extent_io_tree *tree, struct page *page,
4058			  get_extent_t *get_extent,
4059			  struct writeback_control *wbc)
4060{
4061	int ret;
4062	struct extent_page_data epd = {
4063		.bio = NULL,
4064		.tree = tree,
4065		.get_extent = get_extent,
4066		.extent_locked = 0,
4067		.sync_io = wbc->sync_mode == WB_SYNC_ALL,
4068		.bio_flags = 0,
4069	};
4070
4071	ret = __extent_writepage(page, wbc, &epd);
 
 
 
 
 
4072
4073	flush_epd_write_bio(&epd);
 
4074	return ret;
4075}
4076
4077int extent_write_locked_range(struct extent_io_tree *tree, struct inode *inode,
4078			      u64 start, u64 end, get_extent_t *get_extent,
4079			      int mode)
4080{
4081	int ret = 0;
4082	struct address_space *mapping = inode->i_mapping;
4083	struct page *page;
4084	unsigned long nr_pages = (end - start + PAGE_SIZE) >>
4085		PAGE_SHIFT;
4086
4087	struct extent_page_data epd = {
4088		.bio = NULL,
4089		.tree = tree,
4090		.get_extent = get_extent,
4091		.extent_locked = 1,
4092		.sync_io = mode == WB_SYNC_ALL,
4093		.bio_flags = 0,
4094	};
4095	struct writeback_control wbc_writepages = {
4096		.sync_mode	= mode,
4097		.nr_to_write	= nr_pages * 2,
4098		.range_start	= start,
4099		.range_end	= end + 1,
 
 
 
4100	};
4101
 
4102	while (start <= end) {
4103		page = find_get_page(mapping, start >> PAGE_SHIFT);
4104		if (clear_page_dirty_for_io(page))
4105			ret = __extent_writepage(page, &wbc_writepages, &epd);
4106		else {
4107			if (tree->ops && tree->ops->writepage_end_io_hook)
4108				tree->ops->writepage_end_io_hook(page, start,
4109						 start + PAGE_SIZE - 1,
4110						 NULL, 1);
4111			unlock_page(page);
4112		}
4113		put_page(page);
4114		start += PAGE_SIZE;
4115	}
4116
4117	flush_epd_write_bio(&epd);
 
 
 
 
 
 
4118	return ret;
4119}
4120
4121int extent_writepages(struct extent_io_tree *tree,
4122		      struct address_space *mapping,
4123		      get_extent_t *get_extent,
4124		      struct writeback_control *wbc)
4125{
4126	int ret = 0;
4127	struct extent_page_data epd = {
4128		.bio = NULL,
4129		.tree = tree,
4130		.get_extent = get_extent,
4131		.extent_locked = 0,
4132		.sync_io = wbc->sync_mode == WB_SYNC_ALL,
4133		.bio_flags = 0,
4134	};
4135
4136	ret = extent_write_cache_pages(tree, mapping, wbc,
4137				       __extent_writepage, &epd,
4138				       flush_write_bio);
4139	flush_epd_write_bio(&epd);
 
 
 
4140	return ret;
4141}
4142
4143int extent_readpages(struct extent_io_tree *tree,
4144		     struct address_space *mapping,
4145		     struct list_head *pages, unsigned nr_pages,
4146		     get_extent_t get_extent)
4147{
4148	struct bio *bio = NULL;
4149	unsigned page_idx;
4150	unsigned long bio_flags = 0;
4151	struct page *pagepool[16];
4152	struct page *page;
4153	struct extent_map *em_cached = NULL;
4154	int nr = 0;
4155	u64 prev_em_start = (u64)-1;
 
4156
4157	for (page_idx = 0; page_idx < nr_pages; page_idx++) {
4158		page = list_entry(pages->prev, struct page, lru);
 
4159
4160		prefetchw(&page->flags);
4161		list_del(&page->lru);
4162		if (add_to_page_cache_lru(page, mapping,
4163					page->index, GFP_NOFS)) {
4164			put_page(page);
4165			continue;
4166		}
4167
4168		pagepool[nr++] = page;
4169		if (nr < ARRAY_SIZE(pagepool))
4170			continue;
4171		__extent_readpages(tree, pagepool, nr, get_extent, &em_cached,
4172				   &bio, 0, &bio_flags, READ, &prev_em_start);
4173		nr = 0;
4174	}
4175	if (nr)
4176		__extent_readpages(tree, pagepool, nr, get_extent, &em_cached,
4177				   &bio, 0, &bio_flags, READ, &prev_em_start);
4178
4179	if (em_cached)
4180		free_extent_map(em_cached);
4181
4182	BUG_ON(!list_empty(pages));
4183	if (bio)
4184		return submit_one_bio(READ, bio, 0, bio_flags);
4185	return 0;
4186}
4187
4188/*
4189 * basic invalidatepage code, this waits on any locked or writeback
4190 * ranges corresponding to the page, and then deletes any extent state
4191 * records from the tree
4192 */
4193int extent_invalidatepage(struct extent_io_tree *tree,
4194			  struct page *page, unsigned long offset)
4195{
4196	struct extent_state *cached_state = NULL;
4197	u64 start = page_offset(page);
4198	u64 end = start + PAGE_SIZE - 1;
4199	size_t blocksize = page->mapping->host->i_sb->s_blocksize;
4200
4201	start += ALIGN(offset, blocksize);
4202	if (start > end)
4203		return 0;
4204
4205	lock_extent_bits(tree, start, end, &cached_state);
4206	wait_on_page_writeback(page);
4207	clear_extent_bit(tree, start, end,
4208			 EXTENT_LOCKED | EXTENT_DIRTY | EXTENT_DELALLOC |
4209			 EXTENT_DO_ACCOUNTING,
4210			 1, 1, &cached_state, GFP_NOFS);
4211	return 0;
4212}
4213
4214/*
4215 * a helper for releasepage, this tests for areas of the page that
4216 * are locked or under IO and drops the related state bits if it is safe
4217 * to drop the page.
4218 */
4219static int try_release_extent_state(struct extent_map_tree *map,
4220				    struct extent_io_tree *tree,
4221				    struct page *page, gfp_t mask)
4222{
4223	u64 start = page_offset(page);
4224	u64 end = start + PAGE_SIZE - 1;
4225	int ret = 1;
4226
4227	if (test_range_bit(tree, start, end,
4228			   EXTENT_IOBITS, 0, NULL))
4229		ret = 0;
4230	else {
4231		if ((mask & GFP_NOFS) == GFP_NOFS)
4232			mask = GFP_NOFS;
4233		/*
4234		 * at this point we can safely clear everything except the
4235		 * locked bit and the nodatasum bit
4236		 */
4237		ret = clear_extent_bit(tree, start, end,
4238				 ~(EXTENT_LOCKED | EXTENT_NODATASUM),
4239				 0, 0, NULL, mask);
4240
4241		/* if clear_extent_bit failed for enomem reasons,
4242		 * we can't allow the release to continue.
4243		 */
4244		if (ret < 0)
4245			ret = 0;
4246		else
4247			ret = 1;
4248	}
4249	return ret;
4250}
4251
4252/*
4253 * a helper for releasepage.  As long as there are no locked extents
4254 * in the range corresponding to the page, both state records and extent
4255 * map records are removed
4256 */
4257int try_release_extent_mapping(struct extent_map_tree *map,
4258			       struct extent_io_tree *tree, struct page *page,
4259			       gfp_t mask)
4260{
4261	struct extent_map *em;
4262	u64 start = page_offset(page);
4263	u64 end = start + PAGE_SIZE - 1;
 
 
 
4264
4265	if (gfpflags_allow_blocking(mask) &&
4266	    page->mapping->host->i_size > SZ_16M) {
4267		u64 len;
4268		while (start <= end) {
 
 
 
4269			len = end - start + 1;
4270			write_lock(&map->lock);
4271			em = lookup_extent_mapping(map, start, len);
4272			if (!em) {
4273				write_unlock(&map->lock);
4274				break;
4275			}
4276			if (test_bit(EXTENT_FLAG_PINNED, &em->flags) ||
4277			    em->start != start) {
4278				write_unlock(&map->lock);
4279				free_extent_map(em);
4280				break;
4281			}
4282			if (!test_range_bit(tree, em->start,
4283					    extent_map_end(em) - 1,
4284					    EXTENT_LOCKED | EXTENT_WRITEBACK,
4285					    0, NULL)) {
4286				remove_extent_mapping(map, em);
4287				/* once for the rb tree */
4288				free_extent_map(em);
4289			}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4290			start = extent_map_end(em);
4291			write_unlock(&map->lock);
4292
4293			/* once for us */
4294			free_extent_map(em);
 
 
4295		}
4296	}
4297	return try_release_extent_state(map, tree, page, mask);
4298}
4299
4300/*
4301 * helper function for fiemap, which doesn't want to see any holes.
4302 * This maps until we find something past 'last'
4303 */
4304static struct extent_map *get_extent_skip_holes(struct inode *inode,
4305						u64 offset,
4306						u64 last,
4307						get_extent_t *get_extent)
4308{
4309	u64 sectorsize = BTRFS_I(inode)->root->sectorsize;
4310	struct extent_map *em;
4311	u64 len;
4312
4313	if (offset >= last)
4314		return NULL;
4315
4316	while (1) {
4317		len = last - offset;
4318		if (len == 0)
4319			break;
4320		len = ALIGN(len, sectorsize);
4321		em = get_extent(inode, NULL, 0, offset, len, 0);
4322		if (IS_ERR_OR_NULL(em))
4323			return em;
4324
4325		/* if this isn't a hole return it */
4326		if (!test_bit(EXTENT_FLAG_VACANCY, &em->flags) &&
4327		    em->block_start != EXTENT_MAP_HOLE) {
4328			return em;
4329		}
4330
4331		/* this is a hole, advance to the next extent */
4332		offset = extent_map_end(em);
4333		free_extent_map(em);
4334		if (offset >= last)
4335			break;
4336	}
4337	return NULL;
4338}
4339
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4340int extent_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
4341		__u64 start, __u64 len, get_extent_t *get_extent)
4342{
4343	int ret = 0;
4344	u64 off = start;
4345	u64 max = start + len;
4346	u32 flags = 0;
4347	u32 found_type;
4348	u64 last;
4349	u64 last_for_get_extent = 0;
4350	u64 disko = 0;
4351	u64 isize = i_size_read(inode);
4352	struct btrfs_key found_key;
4353	struct extent_map *em = NULL;
4354	struct extent_state *cached_state = NULL;
4355	struct btrfs_path *path;
4356	struct btrfs_root *root = BTRFS_I(inode)->root;
 
 
 
4357	int end = 0;
4358	u64 em_start = 0;
4359	u64 em_len = 0;
4360	u64 em_end = 0;
4361
4362	if (len == 0)
4363		return -EINVAL;
4364
4365	path = btrfs_alloc_path();
4366	if (!path)
4367		return -ENOMEM;
4368	path->leave_spinning = 1;
4369
4370	start = round_down(start, BTRFS_I(inode)->root->sectorsize);
4371	len = round_up(max, BTRFS_I(inode)->root->sectorsize) - start;
 
 
 
 
 
 
 
4372
4373	/*
4374	 * lookup the last file extent.  We're not using i_size here
4375	 * because there might be preallocation past i_size
4376	 */
4377	ret = btrfs_lookup_file_extent(NULL, root, path, btrfs_ino(inode), -1,
4378				       0);
4379	if (ret < 0) {
4380		btrfs_free_path(path);
4381		return ret;
 
 
 
4382	}
4383	WARN_ON(!ret);
4384	path->slots[0]--;
4385	btrfs_item_key_to_cpu(path->nodes[0], &found_key, path->slots[0]);
4386	found_type = found_key.type;
4387
4388	/* No extents, but there might be delalloc bits */
4389	if (found_key.objectid != btrfs_ino(inode) ||
4390	    found_type != BTRFS_EXTENT_DATA_KEY) {
4391		/* have to trust i_size as the end */
4392		last = (u64)-1;
4393		last_for_get_extent = isize;
4394	} else {
4395		/*
4396		 * remember the start of the last extent.  There are a
4397		 * bunch of different factors that go into the length of the
4398		 * extent, so its much less complex to remember where it started
4399		 */
4400		last = found_key.offset;
4401		last_for_get_extent = last + 1;
4402	}
4403	btrfs_release_path(path);
4404
4405	/*
4406	 * we might have some extents allocated but more delalloc past those
4407	 * extents.  so, we trust isize unless the start of the last extent is
4408	 * beyond isize
4409	 */
4410	if (last < isize) {
4411		last = (u64)-1;
4412		last_for_get_extent = isize;
4413	}
4414
4415	lock_extent_bits(&BTRFS_I(inode)->io_tree, start, start + len - 1,
4416			 &cached_state);
4417
4418	em = get_extent_skip_holes(inode, start, last_for_get_extent,
4419				   get_extent);
4420	if (!em)
4421		goto out;
4422	if (IS_ERR(em)) {
4423		ret = PTR_ERR(em);
4424		goto out;
4425	}
4426
4427	while (!end) {
4428		u64 offset_in_extent = 0;
4429
4430		/* break if the extent we found is outside the range */
4431		if (em->start >= max || extent_map_end(em) < off)
4432			break;
4433
4434		/*
4435		 * get_extent may return an extent that starts before our
4436		 * requested range.  We have to make sure the ranges
4437		 * we return to fiemap always move forward and don't
4438		 * overlap, so adjust the offsets here
4439		 */
4440		em_start = max(em->start, off);
4441
4442		/*
4443		 * record the offset from the start of the extent
4444		 * for adjusting the disk offset below.  Only do this if the
4445		 * extent isn't compressed since our in ram offset may be past
4446		 * what we have actually allocated on disk.
4447		 */
4448		if (!test_bit(EXTENT_FLAG_COMPRESSED, &em->flags))
4449			offset_in_extent = em_start - em->start;
4450		em_end = extent_map_end(em);
4451		em_len = em_end - em_start;
4452		disko = 0;
4453		flags = 0;
 
 
 
 
4454
4455		/*
4456		 * bump off for our next call to get_extent
4457		 */
4458		off = extent_map_end(em);
4459		if (off >= max)
4460			end = 1;
4461
4462		if (em->block_start == EXTENT_MAP_LAST_BYTE) {
4463			end = 1;
4464			flags |= FIEMAP_EXTENT_LAST;
4465		} else if (em->block_start == EXTENT_MAP_INLINE) {
4466			flags |= (FIEMAP_EXTENT_DATA_INLINE |
4467				  FIEMAP_EXTENT_NOT_ALIGNED);
4468		} else if (em->block_start == EXTENT_MAP_DELALLOC) {
4469			flags |= (FIEMAP_EXTENT_DELALLOC |
4470				  FIEMAP_EXTENT_UNKNOWN);
4471		} else if (fieinfo->fi_extents_max) {
4472			u64 bytenr = em->block_start -
4473				(em->start - em->orig_start);
4474
4475			disko = em->block_start + offset_in_extent;
4476
4477			/*
4478			 * As btrfs supports shared space, this information
4479			 * can be exported to userspace tools via
4480			 * flag FIEMAP_EXTENT_SHARED.  If fi_extents_max == 0
4481			 * then we're just getting a count and we can skip the
4482			 * lookup stuff.
4483			 */
4484			ret = btrfs_check_shared(NULL, root->fs_info,
4485						 root->objectid,
4486						 btrfs_ino(inode), bytenr);
4487			if (ret < 0)
4488				goto out_free;
4489			if (ret)
4490				flags |= FIEMAP_EXTENT_SHARED;
4491			ret = 0;
4492		}
4493		if (test_bit(EXTENT_FLAG_COMPRESSED, &em->flags))
4494			flags |= FIEMAP_EXTENT_ENCODED;
4495		if (test_bit(EXTENT_FLAG_PREALLOC, &em->flags))
4496			flags |= FIEMAP_EXTENT_UNWRITTEN;
4497
4498		free_extent_map(em);
4499		em = NULL;
4500		if ((em_start >= last) || em_len == (u64)-1 ||
4501		   (last == (u64)-1 && isize <= em_end)) {
4502			flags |= FIEMAP_EXTENT_LAST;
4503			end = 1;
4504		}
4505
4506		/* now scan forward to see if this is really the last extent. */
4507		em = get_extent_skip_holes(inode, off, last_for_get_extent,
4508					   get_extent);
4509		if (IS_ERR(em)) {
4510			ret = PTR_ERR(em);
4511			goto out;
4512		}
4513		if (!em) {
4514			flags |= FIEMAP_EXTENT_LAST;
4515			end = 1;
4516		}
4517		ret = fiemap_fill_next_extent(fieinfo, em_start, disko,
4518					      em_len, flags);
4519		if (ret) {
4520			if (ret == 1)
4521				ret = 0;
4522			goto out_free;
4523		}
4524	}
4525out_free:
 
 
4526	free_extent_map(em);
4527out:
4528	btrfs_free_path(path);
4529	unlock_extent_cached(&BTRFS_I(inode)->io_tree, start, start + len - 1,
4530			     &cached_state, GFP_NOFS);
 
 
 
 
 
4531	return ret;
4532}
4533
4534static void __free_extent_buffer(struct extent_buffer *eb)
4535{
4536	btrfs_leak_debug_del(&eb->leak_list);
4537	kmem_cache_free(extent_buffer_cache, eb);
4538}
4539
4540int extent_buffer_under_io(struct extent_buffer *eb)
4541{
4542	return (atomic_read(&eb->io_pages) ||
4543		test_bit(EXTENT_BUFFER_WRITEBACK, &eb->bflags) ||
4544		test_bit(EXTENT_BUFFER_DIRTY, &eb->bflags));
4545}
4546
4547/*
4548 * Helper for releasing extent buffer page.
4549 */
4550static void btrfs_release_extent_buffer_page(struct extent_buffer *eb)
4551{
4552	unsigned long index;
4553	struct page *page;
4554	int mapped = !test_bit(EXTENT_BUFFER_DUMMY, &eb->bflags);
4555
4556	BUG_ON(extent_buffer_under_io(eb));
4557
4558	index = num_extent_pages(eb->start, eb->len);
4559	if (index == 0)
4560		return;
4561
4562	do {
4563		index--;
4564		page = eb->pages[index];
4565		if (!page)
4566			continue;
4567		if (mapped)
4568			spin_lock(&page->mapping->private_lock);
4569		/*
4570		 * We do this since we'll remove the pages after we've
4571		 * removed the eb from the radix tree, so we could race
4572		 * and have this page now attached to the new eb.  So
4573		 * only clear page_private if it's still connected to
4574		 * this eb.
4575		 */
4576		if (PagePrivate(page) &&
4577		    page->private == (unsigned long)eb) {
4578			BUG_ON(test_bit(EXTENT_BUFFER_DIRTY, &eb->bflags));
4579			BUG_ON(PageDirty(page));
4580			BUG_ON(PageWriteback(page));
4581			/*
4582			 * We need to make sure we haven't be attached
4583			 * to a new eb.
4584			 */
4585			ClearPagePrivate(page);
4586			set_page_private(page, 0);
4587			/* One for the page private */
4588			put_page(page);
4589		}
4590
4591		if (mapped)
4592			spin_unlock(&page->mapping->private_lock);
4593
4594		/* One for when we alloced the page */
4595		put_page(page);
4596	} while (index != 0);
4597}
4598
4599/*
4600 * Helper for releasing the extent buffer.
4601 */
4602static inline void btrfs_release_extent_buffer(struct extent_buffer *eb)
4603{
4604	btrfs_release_extent_buffer_page(eb);
 
4605	__free_extent_buffer(eb);
4606}
4607
4608static struct extent_buffer *
4609__alloc_extent_buffer(struct btrfs_fs_info *fs_info, u64 start,
4610		      unsigned long len)
4611{
4612	struct extent_buffer *eb = NULL;
4613
4614	eb = kmem_cache_zalloc(extent_buffer_cache, GFP_NOFS|__GFP_NOFAIL);
4615	eb->start = start;
4616	eb->len = len;
4617	eb->fs_info = fs_info;
4618	eb->bflags = 0;
4619	rwlock_init(&eb->lock);
4620	atomic_set(&eb->write_locks, 0);
4621	atomic_set(&eb->read_locks, 0);
4622	atomic_set(&eb->blocking_readers, 0);
4623	atomic_set(&eb->blocking_writers, 0);
4624	atomic_set(&eb->spinning_readers, 0);
4625	atomic_set(&eb->spinning_writers, 0);
4626	eb->lock_nested = 0;
4627	init_waitqueue_head(&eb->write_lock_wq);
4628	init_waitqueue_head(&eb->read_lock_wq);
4629
4630	btrfs_leak_debug_add(&eb->leak_list, &buffers);
 
4631
4632	spin_lock_init(&eb->refs_lock);
4633	atomic_set(&eb->refs, 1);
4634	atomic_set(&eb->io_pages, 0);
4635
4636	/*
4637	 * Sanity checks, currently the maximum is 64k covered by 16x 4k pages
4638	 */
4639	BUILD_BUG_ON(BTRFS_MAX_METADATA_BLOCKSIZE
4640		> MAX_INLINE_EXTENT_BUFFER_SIZE);
4641	BUG_ON(len > MAX_INLINE_EXTENT_BUFFER_SIZE);
4642
 
 
 
 
 
 
 
4643	return eb;
4644}
4645
4646struct extent_buffer *btrfs_clone_extent_buffer(struct extent_buffer *src)
4647{
4648	unsigned long i;
4649	struct page *p;
4650	struct extent_buffer *new;
4651	unsigned long num_pages = num_extent_pages(src->start, src->len);
4652
4653	new = __alloc_extent_buffer(src->fs_info, src->start, src->len);
4654	if (new == NULL)
4655		return NULL;
4656
4657	for (i = 0; i < num_pages; i++) {
4658		p = alloc_page(GFP_NOFS);
4659		if (!p) {
4660			btrfs_release_extent_buffer(new);
4661			return NULL;
4662		}
4663		attach_extent_buffer_page(new, p);
4664		WARN_ON(PageDirty(p));
4665		SetPageUptodate(p);
4666		new->pages[i] = p;
 
4667	}
4668
4669	copy_extent_buffer(new, src, 0, 0, src->len);
4670	set_bit(EXTENT_BUFFER_UPTODATE, &new->bflags);
4671	set_bit(EXTENT_BUFFER_DUMMY, &new->bflags);
4672
4673	return new;
4674}
4675
4676struct extent_buffer *__alloc_dummy_extent_buffer(struct btrfs_fs_info *fs_info,
4677						  u64 start, unsigned long len)
4678{
4679	struct extent_buffer *eb;
4680	unsigned long num_pages;
4681	unsigned long i;
4682
4683	num_pages = num_extent_pages(start, len);
4684
4685	eb = __alloc_extent_buffer(fs_info, start, len);
4686	if (!eb)
4687		return NULL;
4688
 
4689	for (i = 0; i < num_pages; i++) {
4690		eb->pages[i] = alloc_page(GFP_NOFS);
4691		if (!eb->pages[i])
4692			goto err;
4693	}
4694	set_extent_buffer_uptodate(eb);
4695	btrfs_set_header_nritems(eb, 0);
4696	set_bit(EXTENT_BUFFER_DUMMY, &eb->bflags);
4697
4698	return eb;
4699err:
4700	for (; i > 0; i--)
4701		__free_page(eb->pages[i - 1]);
4702	__free_extent_buffer(eb);
4703	return NULL;
4704}
4705
4706struct extent_buffer *alloc_dummy_extent_buffer(struct btrfs_fs_info *fs_info,
4707						u64 start)
4708{
4709	unsigned long len;
4710
4711	if (!fs_info) {
4712		/*
4713		 * Called only from tests that don't always have a fs_info
4714		 * available, but we know that nodesize is 4096
4715		 */
4716		len = 4096;
4717	} else {
4718		len = fs_info->tree_root->nodesize;
4719	}
4720
4721	return __alloc_dummy_extent_buffer(fs_info, start, len);
4722}
4723
4724static void check_buffer_tree_ref(struct extent_buffer *eb)
4725{
4726	int refs;
4727	/* the ref bit is tricky.  We have to make sure it is set
4728	 * if we have the buffer dirty.   Otherwise the
4729	 * code to free a buffer can end up dropping a dirty
4730	 * page
4731	 *
4732	 * Once the ref bit is set, it won't go away while the
4733	 * buffer is dirty or in writeback, and it also won't
4734	 * go away while we have the reference count on the
4735	 * eb bumped.
4736	 *
4737	 * We can't just set the ref bit without bumping the
4738	 * ref on the eb because free_extent_buffer might
4739	 * see the ref bit and try to clear it.  If this happens
4740	 * free_extent_buffer might end up dropping our original
4741	 * ref by mistake and freeing the page before we are able
4742	 * to add one more ref.
4743	 *
4744	 * So bump the ref count first, then set the bit.  If someone
4745	 * beat us to it, drop the ref we added.
 
 
 
 
 
4746	 */
4747	refs = atomic_read(&eb->refs);
4748	if (refs >= 2 && test_bit(EXTENT_BUFFER_TREE_REF, &eb->bflags))
4749		return;
4750
4751	spin_lock(&eb->refs_lock);
4752	if (!test_and_set_bit(EXTENT_BUFFER_TREE_REF, &eb->bflags))
4753		atomic_inc(&eb->refs);
4754	spin_unlock(&eb->refs_lock);
4755}
4756
4757static void mark_extent_buffer_accessed(struct extent_buffer *eb,
4758		struct page *accessed)
4759{
4760	unsigned long num_pages, i;
4761
4762	check_buffer_tree_ref(eb);
4763
4764	num_pages = num_extent_pages(eb->start, eb->len);
4765	for (i = 0; i < num_pages; i++) {
4766		struct page *p = eb->pages[i];
4767
4768		if (p != accessed)
4769			mark_page_accessed(p);
4770	}
4771}
4772
4773struct extent_buffer *find_extent_buffer(struct btrfs_fs_info *fs_info,
4774					 u64 start)
4775{
4776	struct extent_buffer *eb;
4777
4778	rcu_read_lock();
4779	eb = radix_tree_lookup(&fs_info->buffer_radix,
4780			       start >> PAGE_SHIFT);
4781	if (eb && atomic_inc_not_zero(&eb->refs)) {
4782		rcu_read_unlock();
4783		/*
4784		 * Lock our eb's refs_lock to avoid races with
4785		 * free_extent_buffer. When we get our eb it might be flagged
4786		 * with EXTENT_BUFFER_STALE and another task running
4787		 * free_extent_buffer might have seen that flag set,
4788		 * eb->refs == 2, that the buffer isn't under IO (dirty and
4789		 * writeback flags not set) and it's still in the tree (flag
4790		 * EXTENT_BUFFER_TREE_REF set), therefore being in the process
4791		 * of decrementing the extent buffer's reference count twice.
4792		 * So here we could race and increment the eb's reference count,
4793		 * clear its stale flag, mark it as dirty and drop our reference
4794		 * before the other task finishes executing free_extent_buffer,
4795		 * which would later result in an attempt to free an extent
4796		 * buffer that is dirty.
4797		 */
4798		if (test_bit(EXTENT_BUFFER_STALE, &eb->bflags)) {
4799			spin_lock(&eb->refs_lock);
4800			spin_unlock(&eb->refs_lock);
4801		}
4802		mark_extent_buffer_accessed(eb, NULL);
4803		return eb;
4804	}
4805	rcu_read_unlock();
4806
4807	return NULL;
4808}
4809
4810#ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS
4811struct extent_buffer *alloc_test_extent_buffer(struct btrfs_fs_info *fs_info,
4812					       u64 start)
4813{
4814	struct extent_buffer *eb, *exists = NULL;
4815	int ret;
4816
4817	eb = find_extent_buffer(fs_info, start);
4818	if (eb)
4819		return eb;
4820	eb = alloc_dummy_extent_buffer(fs_info, start);
4821	if (!eb)
4822		return NULL;
4823	eb->fs_info = fs_info;
4824again:
4825	ret = radix_tree_preload(GFP_NOFS & ~__GFP_HIGHMEM);
4826	if (ret)
 
4827		goto free_eb;
 
4828	spin_lock(&fs_info->buffer_lock);
4829	ret = radix_tree_insert(&fs_info->buffer_radix,
4830				start >> PAGE_SHIFT, eb);
4831	spin_unlock(&fs_info->buffer_lock);
4832	radix_tree_preload_end();
4833	if (ret == -EEXIST) {
4834		exists = find_extent_buffer(fs_info, start);
4835		if (exists)
4836			goto free_eb;
4837		else
4838			goto again;
4839	}
4840	check_buffer_tree_ref(eb);
4841	set_bit(EXTENT_BUFFER_IN_TREE, &eb->bflags);
4842
4843	/*
4844	 * We will free dummy extent buffer's if they come into
4845	 * free_extent_buffer with a ref count of 2, but if we are using this we
4846	 * want the buffers to stay in memory until we're done with them, so
4847	 * bump the ref count again.
4848	 */
4849	atomic_inc(&eb->refs);
4850	return eb;
4851free_eb:
4852	btrfs_release_extent_buffer(eb);
4853	return exists;
4854}
4855#endif
4856
4857struct extent_buffer *alloc_extent_buffer(struct btrfs_fs_info *fs_info,
4858					  u64 start)
4859{
4860	unsigned long len = fs_info->tree_root->nodesize;
4861	unsigned long num_pages = num_extent_pages(start, len);
4862	unsigned long i;
4863	unsigned long index = start >> PAGE_SHIFT;
4864	struct extent_buffer *eb;
4865	struct extent_buffer *exists = NULL;
4866	struct page *p;
4867	struct address_space *mapping = fs_info->btree_inode->i_mapping;
4868	int uptodate = 1;
4869	int ret;
4870
 
 
 
 
 
4871	eb = find_extent_buffer(fs_info, start);
4872	if (eb)
4873		return eb;
4874
4875	eb = __alloc_extent_buffer(fs_info, start, len);
4876	if (!eb)
4877		return NULL;
4878
 
4879	for (i = 0; i < num_pages; i++, index++) {
4880		p = find_or_create_page(mapping, index, GFP_NOFS|__GFP_NOFAIL);
4881		if (!p)
 
4882			goto free_eb;
 
4883
4884		spin_lock(&mapping->private_lock);
4885		if (PagePrivate(p)) {
4886			/*
4887			 * We could have already allocated an eb for this page
4888			 * and attached one so lets see if we can get a ref on
4889			 * the existing eb, and if we can we know it's good and
4890			 * we can just return that one, else we know we can just
4891			 * overwrite page->private.
4892			 */
4893			exists = (struct extent_buffer *)p->private;
4894			if (atomic_inc_not_zero(&exists->refs)) {
4895				spin_unlock(&mapping->private_lock);
4896				unlock_page(p);
4897				put_page(p);
4898				mark_extent_buffer_accessed(exists, p);
4899				goto free_eb;
4900			}
4901			exists = NULL;
4902
4903			/*
4904			 * Do this so attach doesn't complain and we need to
4905			 * drop the ref the old guy had.
4906			 */
4907			ClearPagePrivate(p);
4908			WARN_ON(PageDirty(p));
4909			put_page(p);
4910		}
4911		attach_extent_buffer_page(eb, p);
4912		spin_unlock(&mapping->private_lock);
4913		WARN_ON(PageDirty(p));
4914		eb->pages[i] = p;
4915		if (!PageUptodate(p))
4916			uptodate = 0;
4917
4918		/*
4919		 * see below about how we avoid a nasty race with release page
4920		 * and why we unlock later
 
 
 
4921		 */
4922	}
4923	if (uptodate)
4924		set_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags);
4925again:
4926	ret = radix_tree_preload(GFP_NOFS & ~__GFP_HIGHMEM);
4927	if (ret)
 
4928		goto free_eb;
 
4929
4930	spin_lock(&fs_info->buffer_lock);
4931	ret = radix_tree_insert(&fs_info->buffer_radix,
4932				start >> PAGE_SHIFT, eb);
4933	spin_unlock(&fs_info->buffer_lock);
4934	radix_tree_preload_end();
4935	if (ret == -EEXIST) {
4936		exists = find_extent_buffer(fs_info, start);
4937		if (exists)
4938			goto free_eb;
4939		else
4940			goto again;
4941	}
4942	/* add one reference for the tree */
4943	check_buffer_tree_ref(eb);
4944	set_bit(EXTENT_BUFFER_IN_TREE, &eb->bflags);
4945
4946	/*
4947	 * there is a race where release page may have
4948	 * tried to find this extent buffer in the radix
4949	 * but failed.  It will tell the VM it is safe to
4950	 * reclaim the, and it will clear the page private bit.
4951	 * We must make sure to set the page private bit properly
4952	 * after the extent buffer is in the radix tree so
4953	 * it doesn't get lost
4954	 */
4955	SetPageChecked(eb->pages[0]);
4956	for (i = 1; i < num_pages; i++) {
4957		p = eb->pages[i];
4958		ClearPageChecked(p);
4959		unlock_page(p);
4960	}
4961	unlock_page(eb->pages[0]);
4962	return eb;
4963
4964free_eb:
4965	WARN_ON(!atomic_dec_and_test(&eb->refs));
4966	for (i = 0; i < num_pages; i++) {
4967		if (eb->pages[i])
4968			unlock_page(eb->pages[i]);
4969	}
4970
4971	btrfs_release_extent_buffer(eb);
4972	return exists;
4973}
4974
4975static inline void btrfs_release_extent_buffer_rcu(struct rcu_head *head)
4976{
4977	struct extent_buffer *eb =
4978			container_of(head, struct extent_buffer, rcu_head);
4979
4980	__free_extent_buffer(eb);
4981}
4982
4983/* Expects to have eb->eb_lock already held */
4984static int release_extent_buffer(struct extent_buffer *eb)
 
4985{
 
 
4986	WARN_ON(atomic_read(&eb->refs) == 0);
4987	if (atomic_dec_and_test(&eb->refs)) {
4988		if (test_and_clear_bit(EXTENT_BUFFER_IN_TREE, &eb->bflags)) {
4989			struct btrfs_fs_info *fs_info = eb->fs_info;
4990
4991			spin_unlock(&eb->refs_lock);
4992
4993			spin_lock(&fs_info->buffer_lock);
4994			radix_tree_delete(&fs_info->buffer_radix,
4995					  eb->start >> PAGE_SHIFT);
4996			spin_unlock(&fs_info->buffer_lock);
4997		} else {
4998			spin_unlock(&eb->refs_lock);
4999		}
5000
 
5001		/* Should be safe to release our pages at this point */
5002		btrfs_release_extent_buffer_page(eb);
5003#ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS
5004		if (unlikely(test_bit(EXTENT_BUFFER_DUMMY, &eb->bflags))) {
5005			__free_extent_buffer(eb);
5006			return 1;
5007		}
5008#endif
5009		call_rcu(&eb->rcu_head, btrfs_release_extent_buffer_rcu);
5010		return 1;
5011	}
5012	spin_unlock(&eb->refs_lock);
5013
5014	return 0;
5015}
5016
5017void free_extent_buffer(struct extent_buffer *eb)
5018{
5019	int refs;
5020	int old;
5021	if (!eb)
5022		return;
5023
5024	while (1) {
5025		refs = atomic_read(&eb->refs);
5026		if (refs <= 3)
 
 
5027			break;
5028		old = atomic_cmpxchg(&eb->refs, refs, refs - 1);
5029		if (old == refs)
5030			return;
5031	}
5032
5033	spin_lock(&eb->refs_lock);
5034	if (atomic_read(&eb->refs) == 2 &&
5035	    test_bit(EXTENT_BUFFER_DUMMY, &eb->bflags))
5036		atomic_dec(&eb->refs);
5037
5038	if (atomic_read(&eb->refs) == 2 &&
5039	    test_bit(EXTENT_BUFFER_STALE, &eb->bflags) &&
5040	    !extent_buffer_under_io(eb) &&
5041	    test_and_clear_bit(EXTENT_BUFFER_TREE_REF, &eb->bflags))
5042		atomic_dec(&eb->refs);
5043
5044	/*
5045	 * I know this is terrible, but it's temporary until we stop tracking
5046	 * the uptodate bits and such for the extent buffers.
5047	 */
5048	release_extent_buffer(eb);
5049}
5050
5051void free_extent_buffer_stale(struct extent_buffer *eb)
5052{
5053	if (!eb)
5054		return;
5055
5056	spin_lock(&eb->refs_lock);
5057	set_bit(EXTENT_BUFFER_STALE, &eb->bflags);
5058
5059	if (atomic_read(&eb->refs) == 2 && !extent_buffer_under_io(eb) &&
5060	    test_and_clear_bit(EXTENT_BUFFER_TREE_REF, &eb->bflags))
5061		atomic_dec(&eb->refs);
5062	release_extent_buffer(eb);
5063}
5064
5065void clear_extent_buffer_dirty(struct extent_buffer *eb)
5066{
5067	unsigned long i;
5068	unsigned long num_pages;
5069	struct page *page;
5070
5071	num_pages = num_extent_pages(eb->start, eb->len);
5072
5073	for (i = 0; i < num_pages; i++) {
5074		page = eb->pages[i];
5075		if (!PageDirty(page))
5076			continue;
5077
5078		lock_page(page);
5079		WARN_ON(!PagePrivate(page));
5080
5081		clear_page_dirty_for_io(page);
5082		spin_lock_irq(&page->mapping->tree_lock);
5083		if (!PageDirty(page)) {
5084			radix_tree_tag_clear(&page->mapping->page_tree,
5085						page_index(page),
5086						PAGECACHE_TAG_DIRTY);
5087		}
5088		spin_unlock_irq(&page->mapping->tree_lock);
5089		ClearPageError(page);
5090		unlock_page(page);
5091	}
5092	WARN_ON(atomic_read(&eb->refs) == 0);
5093}
5094
5095int set_extent_buffer_dirty(struct extent_buffer *eb)
5096{
5097	unsigned long i;
5098	unsigned long num_pages;
5099	int was_dirty = 0;
5100
5101	check_buffer_tree_ref(eb);
5102
5103	was_dirty = test_and_set_bit(EXTENT_BUFFER_DIRTY, &eb->bflags);
5104
5105	num_pages = num_extent_pages(eb->start, eb->len);
5106	WARN_ON(atomic_read(&eb->refs) == 0);
5107	WARN_ON(!test_bit(EXTENT_BUFFER_TREE_REF, &eb->bflags));
5108
 
 
 
 
 
5109	for (i = 0; i < num_pages; i++)
5110		set_page_dirty(eb->pages[i]);
 
 
5111	return was_dirty;
5112}
5113
5114void clear_extent_buffer_uptodate(struct extent_buffer *eb)
5115{
5116	unsigned long i;
5117	struct page *page;
5118	unsigned long num_pages;
5119
5120	clear_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags);
5121	num_pages = num_extent_pages(eb->start, eb->len);
5122	for (i = 0; i < num_pages; i++) {
5123		page = eb->pages[i];
5124		if (page)
5125			ClearPageUptodate(page);
5126	}
5127}
5128
5129void set_extent_buffer_uptodate(struct extent_buffer *eb)
5130{
5131	unsigned long i;
5132	struct page *page;
5133	unsigned long num_pages;
5134
5135	set_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags);
5136	num_pages = num_extent_pages(eb->start, eb->len);
5137	for (i = 0; i < num_pages; i++) {
5138		page = eb->pages[i];
5139		SetPageUptodate(page);
5140	}
5141}
5142
5143int extent_buffer_uptodate(struct extent_buffer *eb)
5144{
5145	return test_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags);
5146}
5147
5148int read_extent_buffer_pages(struct extent_io_tree *tree,
5149			     struct extent_buffer *eb, u64 start, int wait,
5150			     get_extent_t *get_extent, int mirror_num)
5151{
5152	unsigned long i;
5153	unsigned long start_i;
5154	struct page *page;
5155	int err;
5156	int ret = 0;
5157	int locked_pages = 0;
5158	int all_uptodate = 1;
5159	unsigned long num_pages;
5160	unsigned long num_reads = 0;
5161	struct bio *bio = NULL;
5162	unsigned long bio_flags = 0;
5163
5164	if (test_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags))
5165		return 0;
5166
5167	if (start) {
5168		WARN_ON(start < eb->start);
5169		start_i = (start >> PAGE_SHIFT) -
5170			(eb->start >> PAGE_SHIFT);
5171	} else {
5172		start_i = 0;
5173	}
5174
5175	num_pages = num_extent_pages(eb->start, eb->len);
5176	for (i = start_i; i < num_pages; i++) {
5177		page = eb->pages[i];
5178		if (wait == WAIT_NONE) {
5179			if (!trylock_page(page))
5180				goto unlock_exit;
5181		} else {
5182			lock_page(page);
5183		}
5184		locked_pages++;
 
 
 
 
 
 
 
 
5185		if (!PageUptodate(page)) {
5186			num_reads++;
5187			all_uptodate = 0;
5188		}
5189	}
 
5190	if (all_uptodate) {
5191		if (start_i == 0)
5192			set_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags);
5193		goto unlock_exit;
5194	}
5195
5196	clear_bit(EXTENT_BUFFER_READ_ERR, &eb->bflags);
5197	eb->read_mirror = 0;
5198	atomic_set(&eb->io_pages, num_reads);
5199	for (i = start_i; i < num_pages; i++) {
 
 
 
 
 
5200		page = eb->pages[i];
 
5201		if (!PageUptodate(page)) {
 
 
 
 
 
 
5202			ClearPageError(page);
5203			err = __extent_read_full_page(tree, page,
5204						      get_extent, &bio,
5205						      mirror_num, &bio_flags,
5206						      READ | REQ_META);
5207			if (err)
5208				ret = err;
 
 
 
 
 
 
 
 
 
 
5209		} else {
5210			unlock_page(page);
5211		}
5212	}
5213
5214	if (bio) {
5215		err = submit_one_bio(READ | REQ_META, bio, mirror_num,
5216				     bio_flags);
5217		if (err)
5218			return err;
5219	}
5220
5221	if (ret || wait != WAIT_COMPLETE)
5222		return ret;
5223
5224	for (i = start_i; i < num_pages; i++) {
5225		page = eb->pages[i];
5226		wait_on_page_locked(page);
5227		if (!PageUptodate(page))
5228			ret = -EIO;
5229	}
5230
5231	return ret;
5232
5233unlock_exit:
5234	i = start_i;
5235	while (locked_pages > 0) {
5236		page = eb->pages[i];
5237		i++;
5238		unlock_page(page);
5239		locked_pages--;
 
 
5240	}
5241	return ret;
5242}
5243
5244void read_extent_buffer(struct extent_buffer *eb, void *dstv,
5245			unsigned long start,
5246			unsigned long len)
5247{
5248	size_t cur;
5249	size_t offset;
5250	struct page *page;
5251	char *kaddr;
5252	char *dst = (char *)dstv;
5253	size_t start_offset = eb->start & ((u64)PAGE_SIZE - 1);
5254	unsigned long i = (start_offset + start) >> PAGE_SHIFT;
5255
5256	WARN_ON(start > eb->len);
5257	WARN_ON(start + len > eb->start + eb->len);
 
 
 
 
5258
5259	offset = (start_offset + start) & (PAGE_SIZE - 1);
5260
5261	while (len > 0) {
5262		page = eb->pages[i];
5263
5264		cur = min(len, (PAGE_SIZE - offset));
5265		kaddr = page_address(page);
5266		memcpy(dst, kaddr + offset, cur);
5267
5268		dst += cur;
5269		len -= cur;
5270		offset = 0;
5271		i++;
5272	}
5273}
5274
5275int read_extent_buffer_to_user(struct extent_buffer *eb, void __user *dstv,
5276			unsigned long start,
5277			unsigned long len)
5278{
5279	size_t cur;
5280	size_t offset;
5281	struct page *page;
5282	char *kaddr;
5283	char __user *dst = (char __user *)dstv;
5284	size_t start_offset = eb->start & ((u64)PAGE_SIZE - 1);
5285	unsigned long i = (start_offset + start) >> PAGE_SHIFT;
5286	int ret = 0;
5287
5288	WARN_ON(start > eb->len);
5289	WARN_ON(start + len > eb->start + eb->len);
5290
5291	offset = (start_offset + start) & (PAGE_SIZE - 1);
5292
5293	while (len > 0) {
5294		page = eb->pages[i];
5295
5296		cur = min(len, (PAGE_SIZE - offset));
5297		kaddr = page_address(page);
5298		if (copy_to_user(dst, kaddr + offset, cur)) {
5299			ret = -EFAULT;
5300			break;
5301		}
5302
5303		dst += cur;
5304		len -= cur;
5305		offset = 0;
5306		i++;
5307	}
5308
5309	return ret;
5310}
5311
5312int map_private_extent_buffer(struct extent_buffer *eb, unsigned long start,
5313			       unsigned long min_len, char **map,
5314			       unsigned long *map_start,
5315			       unsigned long *map_len)
5316{
5317	size_t offset = start & (PAGE_SIZE - 1);
5318	char *kaddr;
5319	struct page *p;
5320	size_t start_offset = eb->start & ((u64)PAGE_SIZE - 1);
5321	unsigned long i = (start_offset + start) >> PAGE_SHIFT;
5322	unsigned long end_i = (start_offset + start + min_len - 1) >>
5323		PAGE_SHIFT;
5324
5325	if (i != end_i)
5326		return -EINVAL;
5327
5328	if (i == 0) {
5329		offset = start_offset;
5330		*map_start = 0;
5331	} else {
5332		offset = 0;
5333		*map_start = ((u64)i << PAGE_SHIFT) - start_offset;
5334	}
5335
5336	if (start + min_len > eb->len) {
5337		WARN(1, KERN_ERR "btrfs bad mapping eb start %llu len %lu, "
5338		       "wanted %lu %lu\n",
5339		       eb->start, eb->len, start, min_len);
5340		return -EINVAL;
5341	}
5342
5343	p = eb->pages[i];
5344	kaddr = page_address(p);
5345	*map = kaddr + offset;
5346	*map_len = PAGE_SIZE - offset;
5347	return 0;
5348}
5349
5350int memcmp_extent_buffer(struct extent_buffer *eb, const void *ptrv,
5351			  unsigned long start,
5352			  unsigned long len)
5353{
5354	size_t cur;
5355	size_t offset;
5356	struct page *page;
5357	char *kaddr;
5358	char *ptr = (char *)ptrv;
5359	size_t start_offset = eb->start & ((u64)PAGE_SIZE - 1);
5360	unsigned long i = (start_offset + start) >> PAGE_SHIFT;
5361	int ret = 0;
5362
5363	WARN_ON(start > eb->len);
5364	WARN_ON(start + len > eb->start + eb->len);
5365
5366	offset = (start_offset + start) & (PAGE_SIZE - 1);
5367
5368	while (len > 0) {
5369		page = eb->pages[i];
5370
5371		cur = min(len, (PAGE_SIZE - offset));
5372
5373		kaddr = page_address(page);
5374		ret = memcmp(ptr, kaddr + offset, cur);
5375		if (ret)
5376			break;
5377
5378		ptr += cur;
5379		len -= cur;
5380		offset = 0;
5381		i++;
5382	}
5383	return ret;
5384}
5385
5386void write_extent_buffer(struct extent_buffer *eb, const void *srcv,
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
5387			 unsigned long start, unsigned long len)
5388{
5389	size_t cur;
5390	size_t offset;
5391	struct page *page;
5392	char *kaddr;
5393	char *src = (char *)srcv;
5394	size_t start_offset = eb->start & ((u64)PAGE_SIZE - 1);
5395	unsigned long i = (start_offset + start) >> PAGE_SHIFT;
5396
5397	WARN_ON(start > eb->len);
5398	WARN_ON(start + len > eb->start + eb->len);
5399
5400	offset = (start_offset + start) & (PAGE_SIZE - 1);
5401
5402	while (len > 0) {
5403		page = eb->pages[i];
5404		WARN_ON(!PageUptodate(page));
5405
5406		cur = min(len, PAGE_SIZE - offset);
5407		kaddr = page_address(page);
5408		memcpy(kaddr + offset, src, cur);
5409
5410		src += cur;
5411		len -= cur;
5412		offset = 0;
5413		i++;
5414	}
5415}
5416
5417void memset_extent_buffer(struct extent_buffer *eb, char c,
5418			  unsigned long start, unsigned long len)
5419{
5420	size_t cur;
5421	size_t offset;
5422	struct page *page;
5423	char *kaddr;
5424	size_t start_offset = eb->start & ((u64)PAGE_SIZE - 1);
5425	unsigned long i = (start_offset + start) >> PAGE_SHIFT;
5426
5427	WARN_ON(start > eb->len);
5428	WARN_ON(start + len > eb->start + eb->len);
5429
5430	offset = (start_offset + start) & (PAGE_SIZE - 1);
5431
5432	while (len > 0) {
5433		page = eb->pages[i];
5434		WARN_ON(!PageUptodate(page));
5435
5436		cur = min(len, PAGE_SIZE - offset);
5437		kaddr = page_address(page);
5438		memset(kaddr + offset, c, cur);
5439
5440		len -= cur;
5441		offset = 0;
5442		i++;
5443	}
5444}
5445
5446void copy_extent_buffer(struct extent_buffer *dst, struct extent_buffer *src,
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
5447			unsigned long dst_offset, unsigned long src_offset,
5448			unsigned long len)
5449{
5450	u64 dst_len = dst->len;
5451	size_t cur;
5452	size_t offset;
5453	struct page *page;
5454	char *kaddr;
5455	size_t start_offset = dst->start & ((u64)PAGE_SIZE - 1);
5456	unsigned long i = (start_offset + dst_offset) >> PAGE_SHIFT;
5457
5458	WARN_ON(src->len != dst_len);
5459
5460	offset = (start_offset + dst_offset) &
5461		(PAGE_SIZE - 1);
5462
5463	while (len > 0) {
5464		page = dst->pages[i];
5465		WARN_ON(!PageUptodate(page));
5466
5467		cur = min(len, (unsigned long)(PAGE_SIZE - offset));
5468
5469		kaddr = page_address(page);
5470		read_extent_buffer(src, kaddr + offset, src_offset, cur);
5471
5472		src_offset += cur;
5473		len -= cur;
5474		offset = 0;
5475		i++;
5476	}
5477}
5478
5479/*
5480 * The extent buffer bitmap operations are done with byte granularity because
5481 * bitmap items are not guaranteed to be aligned to a word and therefore a
5482 * single word in a bitmap may straddle two pages in the extent buffer.
5483 */
5484#define BIT_BYTE(nr) ((nr) / BITS_PER_BYTE)
5485#define BYTE_MASK ((1 << BITS_PER_BYTE) - 1)
5486#define BITMAP_FIRST_BYTE_MASK(start) \
5487	((BYTE_MASK << ((start) & (BITS_PER_BYTE - 1))) & BYTE_MASK)
5488#define BITMAP_LAST_BYTE_MASK(nbits) \
5489	(BYTE_MASK >> (-(nbits) & (BITS_PER_BYTE - 1)))
5490
5491/*
5492 * eb_bitmap_offset() - calculate the page and offset of the byte containing the
5493 * given bit number
5494 * @eb: the extent buffer
5495 * @start: offset of the bitmap item in the extent buffer
5496 * @nr: bit number
5497 * @page_index: return index of the page in the extent buffer that contains the
5498 * given bit number
5499 * @page_offset: return offset into the page given by page_index
5500 *
5501 * This helper hides the ugliness of finding the byte in an extent buffer which
5502 * contains a given bit.
5503 */
5504static inline void eb_bitmap_offset(struct extent_buffer *eb,
5505				    unsigned long start, unsigned long nr,
5506				    unsigned long *page_index,
5507				    size_t *page_offset)
5508{
5509	size_t start_offset = eb->start & ((u64)PAGE_SIZE - 1);
5510	size_t byte_offset = BIT_BYTE(nr);
5511	size_t offset;
5512
5513	/*
5514	 * The byte we want is the offset of the extent buffer + the offset of
5515	 * the bitmap item in the extent buffer + the offset of the byte in the
5516	 * bitmap item.
5517	 */
5518	offset = start_offset + start + byte_offset;
5519
5520	*page_index = offset >> PAGE_SHIFT;
5521	*page_offset = offset & (PAGE_SIZE - 1);
5522}
5523
5524/**
5525 * extent_buffer_test_bit - determine whether a bit in a bitmap item is set
5526 * @eb: the extent buffer
5527 * @start: offset of the bitmap item in the extent buffer
5528 * @nr: bit number to test
5529 */
5530int extent_buffer_test_bit(struct extent_buffer *eb, unsigned long start,
5531			   unsigned long nr)
5532{
5533	char *kaddr;
5534	struct page *page;
5535	unsigned long i;
5536	size_t offset;
5537
5538	eb_bitmap_offset(eb, start, nr, &i, &offset);
5539	page = eb->pages[i];
5540	WARN_ON(!PageUptodate(page));
5541	kaddr = page_address(page);
5542	return 1U & (kaddr[offset] >> (nr & (BITS_PER_BYTE - 1)));
5543}
5544
5545/**
5546 * extent_buffer_bitmap_set - set an area of a bitmap
5547 * @eb: the extent buffer
5548 * @start: offset of the bitmap item in the extent buffer
5549 * @pos: bit number of the first bit
5550 * @len: number of bits to set
5551 */
5552void extent_buffer_bitmap_set(struct extent_buffer *eb, unsigned long start,
5553			      unsigned long pos, unsigned long len)
5554{
5555	char *kaddr;
5556	struct page *page;
5557	unsigned long i;
5558	size_t offset;
5559	const unsigned int size = pos + len;
5560	int bits_to_set = BITS_PER_BYTE - (pos % BITS_PER_BYTE);
5561	unsigned int mask_to_set = BITMAP_FIRST_BYTE_MASK(pos);
5562
5563	eb_bitmap_offset(eb, start, pos, &i, &offset);
5564	page = eb->pages[i];
5565	WARN_ON(!PageUptodate(page));
5566	kaddr = page_address(page);
5567
5568	while (len >= bits_to_set) {
5569		kaddr[offset] |= mask_to_set;
5570		len -= bits_to_set;
5571		bits_to_set = BITS_PER_BYTE;
5572		mask_to_set = ~0U;
5573		if (++offset >= PAGE_SIZE && len > 0) {
5574			offset = 0;
5575			page = eb->pages[++i];
5576			WARN_ON(!PageUptodate(page));
5577			kaddr = page_address(page);
5578		}
5579	}
5580	if (len) {
5581		mask_to_set &= BITMAP_LAST_BYTE_MASK(size);
5582		kaddr[offset] |= mask_to_set;
5583	}
5584}
5585
5586
5587/**
5588 * extent_buffer_bitmap_clear - clear an area of a bitmap
5589 * @eb: the extent buffer
5590 * @start: offset of the bitmap item in the extent buffer
5591 * @pos: bit number of the first bit
5592 * @len: number of bits to clear
5593 */
5594void extent_buffer_bitmap_clear(struct extent_buffer *eb, unsigned long start,
5595				unsigned long pos, unsigned long len)
 
5596{
5597	char *kaddr;
5598	struct page *page;
5599	unsigned long i;
5600	size_t offset;
5601	const unsigned int size = pos + len;
5602	int bits_to_clear = BITS_PER_BYTE - (pos % BITS_PER_BYTE);
5603	unsigned int mask_to_clear = BITMAP_FIRST_BYTE_MASK(pos);
5604
5605	eb_bitmap_offset(eb, start, pos, &i, &offset);
5606	page = eb->pages[i];
5607	WARN_ON(!PageUptodate(page));
5608	kaddr = page_address(page);
5609
5610	while (len >= bits_to_clear) {
5611		kaddr[offset] &= ~mask_to_clear;
5612		len -= bits_to_clear;
5613		bits_to_clear = BITS_PER_BYTE;
5614		mask_to_clear = ~0U;
5615		if (++offset >= PAGE_SIZE && len > 0) {
5616			offset = 0;
5617			page = eb->pages[++i];
5618			WARN_ON(!PageUptodate(page));
5619			kaddr = page_address(page);
5620		}
5621	}
5622	if (len) {
5623		mask_to_clear &= BITMAP_LAST_BYTE_MASK(size);
5624		kaddr[offset] &= ~mask_to_clear;
5625	}
5626}
5627
5628static inline bool areas_overlap(unsigned long src, unsigned long dst, unsigned long len)
5629{
5630	unsigned long distance = (src > dst) ? src - dst : dst - src;
5631	return distance < len;
5632}
5633
5634static void copy_pages(struct page *dst_page, struct page *src_page,
5635		       unsigned long dst_off, unsigned long src_off,
5636		       unsigned long len)
5637{
5638	char *dst_kaddr = page_address(dst_page);
5639	char *src_kaddr;
5640	int must_memmove = 0;
5641
5642	if (dst_page != src_page) {
5643		src_kaddr = page_address(src_page);
5644	} else {
5645		src_kaddr = dst_kaddr;
5646		if (areas_overlap(src_off, dst_off, len))
5647			must_memmove = 1;
5648	}
5649
5650	if (must_memmove)
5651		memmove(dst_kaddr + dst_off, src_kaddr + src_off, len);
5652	else
5653		memcpy(dst_kaddr + dst_off, src_kaddr + src_off, len);
5654}
5655
5656void memcpy_extent_buffer(struct extent_buffer *dst, unsigned long dst_offset,
5657			   unsigned long src_offset, unsigned long len)
 
5658{
 
5659	size_t cur;
5660	size_t dst_off_in_page;
5661	size_t src_off_in_page;
5662	size_t start_offset = dst->start & ((u64)PAGE_SIZE - 1);
5663	unsigned long dst_i;
5664	unsigned long src_i;
5665
5666	if (src_offset + len > dst->len) {
5667		btrfs_err(dst->fs_info,
5668			"memmove bogus src_offset %lu move "
5669		       "len %lu dst len %lu", src_offset, len, dst->len);
5670		BUG_ON(1);
5671	}
5672	if (dst_offset + len > dst->len) {
5673		btrfs_err(dst->fs_info,
5674			"memmove bogus dst_offset %lu move "
5675		       "len %lu dst len %lu", dst_offset, len, dst->len);
5676		BUG_ON(1);
5677	}
5678
5679	while (len > 0) {
5680		dst_off_in_page = (start_offset + dst_offset) &
5681			(PAGE_SIZE - 1);
5682		src_off_in_page = (start_offset + src_offset) &
5683			(PAGE_SIZE - 1);
5684
5685		dst_i = (start_offset + dst_offset) >> PAGE_SHIFT;
5686		src_i = (start_offset + src_offset) >> PAGE_SHIFT;
5687
5688		cur = min(len, (unsigned long)(PAGE_SIZE -
5689					       src_off_in_page));
5690		cur = min_t(unsigned long, cur,
5691			(unsigned long)(PAGE_SIZE - dst_off_in_page));
5692
5693		copy_pages(dst->pages[dst_i], dst->pages[src_i],
5694			   dst_off_in_page, src_off_in_page, cur);
5695
5696		src_offset += cur;
5697		dst_offset += cur;
5698		len -= cur;
5699	}
5700}
5701
5702void memmove_extent_buffer(struct extent_buffer *dst, unsigned long dst_offset,
5703			   unsigned long src_offset, unsigned long len)
 
5704{
 
5705	size_t cur;
5706	size_t dst_off_in_page;
5707	size_t src_off_in_page;
5708	unsigned long dst_end = dst_offset + len - 1;
5709	unsigned long src_end = src_offset + len - 1;
5710	size_t start_offset = dst->start & ((u64)PAGE_SIZE - 1);
5711	unsigned long dst_i;
5712	unsigned long src_i;
5713
5714	if (src_offset + len > dst->len) {
5715		btrfs_err(dst->fs_info, "memmove bogus src_offset %lu move "
5716		       "len %lu len %lu", src_offset, len, dst->len);
5717		BUG_ON(1);
 
5718	}
5719	if (dst_offset + len > dst->len) {
5720		btrfs_err(dst->fs_info, "memmove bogus dst_offset %lu move "
5721		       "len %lu len %lu", dst_offset, len, dst->len);
5722		BUG_ON(1);
 
5723	}
5724	if (dst_offset < src_offset) {
5725		memcpy_extent_buffer(dst, dst_offset, src_offset, len);
5726		return;
5727	}
5728	while (len > 0) {
5729		dst_i = (start_offset + dst_end) >> PAGE_SHIFT;
5730		src_i = (start_offset + src_end) >> PAGE_SHIFT;
5731
5732		dst_off_in_page = (start_offset + dst_end) &
5733			(PAGE_SIZE - 1);
5734		src_off_in_page = (start_offset + src_end) &
5735			(PAGE_SIZE - 1);
5736
5737		cur = min_t(unsigned long, len, src_off_in_page + 1);
5738		cur = min(cur, dst_off_in_page + 1);
5739		copy_pages(dst->pages[dst_i], dst->pages[src_i],
5740			   dst_off_in_page - cur + 1,
5741			   src_off_in_page - cur + 1, cur);
5742
5743		dst_end -= cur;
5744		src_end -= cur;
5745		len -= cur;
5746	}
5747}
5748
5749int try_release_extent_buffer(struct page *page)
5750{
5751	struct extent_buffer *eb;
5752
5753	/*
5754	 * We need to make sure noboody is attaching this page to an eb right
5755	 * now.
5756	 */
5757	spin_lock(&page->mapping->private_lock);
5758	if (!PagePrivate(page)) {
5759		spin_unlock(&page->mapping->private_lock);
5760		return 1;
5761	}
5762
5763	eb = (struct extent_buffer *)page->private;
5764	BUG_ON(!eb);
5765
5766	/*
5767	 * This is a little awful but should be ok, we need to make sure that
5768	 * the eb doesn't disappear out from under us while we're looking at
5769	 * this page.
5770	 */
5771	spin_lock(&eb->refs_lock);
5772	if (atomic_read(&eb->refs) != 1 || extent_buffer_under_io(eb)) {
5773		spin_unlock(&eb->refs_lock);
5774		spin_unlock(&page->mapping->private_lock);
5775		return 0;
5776	}
5777	spin_unlock(&page->mapping->private_lock);
5778
5779	/*
5780	 * If tree ref isn't set then we know the ref on this eb is a real ref,
5781	 * so just return, this page will likely be freed soon anyway.
5782	 */
5783	if (!test_and_clear_bit(EXTENT_BUFFER_TREE_REF, &eb->bflags)) {
5784		spin_unlock(&eb->refs_lock);
5785		return 0;
5786	}
5787
5788	return release_extent_buffer(eb);
5789}
v5.9
   1// SPDX-License-Identifier: GPL-2.0
   2
   3#include <linux/bitops.h>
   4#include <linux/slab.h>
   5#include <linux/bio.h>
   6#include <linux/mm.h>
   7#include <linux/pagemap.h>
   8#include <linux/page-flags.h>
   9#include <linux/spinlock.h>
  10#include <linux/blkdev.h>
  11#include <linux/swap.h>
  12#include <linux/writeback.h>
  13#include <linux/pagevec.h>
  14#include <linux/prefetch.h>
  15#include <linux/cleancache.h>
  16#include "extent_io.h"
  17#include "extent-io-tree.h"
  18#include "extent_map.h"
  19#include "ctree.h"
  20#include "btrfs_inode.h"
  21#include "volumes.h"
  22#include "check-integrity.h"
  23#include "locking.h"
  24#include "rcu-string.h"
  25#include "backref.h"
  26#include "disk-io.h"
  27
  28static struct kmem_cache *extent_state_cache;
  29static struct kmem_cache *extent_buffer_cache;
  30static struct bio_set btrfs_bioset;
  31
  32static inline bool extent_state_in_tree(const struct extent_state *state)
  33{
  34	return !RB_EMPTY_NODE(&state->rb_node);
  35}
  36
  37#ifdef CONFIG_BTRFS_DEBUG
 
  38static LIST_HEAD(states);
 
  39static DEFINE_SPINLOCK(leak_lock);
  40
  41static inline void btrfs_leak_debug_add(spinlock_t *lock,
  42					struct list_head *new,
  43					struct list_head *head)
  44{
  45	unsigned long flags;
  46
  47	spin_lock_irqsave(lock, flags);
  48	list_add(new, head);
  49	spin_unlock_irqrestore(lock, flags);
  50}
  51
  52static inline void btrfs_leak_debug_del(spinlock_t *lock,
  53					struct list_head *entry)
  54{
  55	unsigned long flags;
  56
  57	spin_lock_irqsave(lock, flags);
  58	list_del(entry);
  59	spin_unlock_irqrestore(lock, flags);
  60}
  61
  62void btrfs_extent_buffer_leak_debug_check(struct btrfs_fs_info *fs_info)
 
  63{
 
  64	struct extent_buffer *eb;
  65	unsigned long flags;
  66
  67	/*
  68	 * If we didn't get into open_ctree our allocated_ebs will not be
  69	 * initialized, so just skip this.
  70	 */
  71	if (!fs_info->allocated_ebs.next)
  72		return;
  73
  74	spin_lock_irqsave(&fs_info->eb_leak_lock, flags);
  75	while (!list_empty(&fs_info->allocated_ebs)) {
  76		eb = list_first_entry(&fs_info->allocated_ebs,
  77				      struct extent_buffer, leak_list);
  78		pr_err(
  79	"BTRFS: buffer leak start %llu len %lu refs %d bflags %lu owner %llu\n",
  80		       eb->start, eb->len, atomic_read(&eb->refs), eb->bflags,
  81		       btrfs_header_owner(eb));
  82		list_del(&eb->leak_list);
  83		kmem_cache_free(extent_buffer_cache, eb);
  84	}
  85	spin_unlock_irqrestore(&fs_info->eb_leak_lock, flags);
  86}
  87
  88static inline void btrfs_extent_state_leak_debug_check(void)
  89{
  90	struct extent_state *state;
  91
  92	while (!list_empty(&states)) {
  93		state = list_entry(states.next, struct extent_state, leak_list);
  94		pr_err("BTRFS: state leak: start %llu end %llu state %u in tree %d refs %d\n",
  95		       state->start, state->end, state->state,
  96		       extent_state_in_tree(state),
  97		       refcount_read(&state->refs));
  98		list_del(&state->leak_list);
  99		kmem_cache_free(extent_state_cache, state);
 100	}
 
 
 
 
 
 
 
 
 
 101}
 102
 103#define btrfs_debug_check_extent_io_range(tree, start, end)		\
 104	__btrfs_debug_check_extent_io_range(__func__, (tree), (start), (end))
 105static inline void __btrfs_debug_check_extent_io_range(const char *caller,
 106		struct extent_io_tree *tree, u64 start, u64 end)
 107{
 108	struct inode *inode = tree->private_data;
 109	u64 isize;
 110
 111	if (!inode || !is_data_inode(inode))
 112		return;
 113
 
 114	isize = i_size_read(inode);
 115	if (end >= PAGE_SIZE && (end % 2) == 0 && end != isize - 1) {
 116		btrfs_debug_rl(BTRFS_I(inode)->root->fs_info,
 117		    "%s: ino %llu isize %llu odd range [%llu,%llu]",
 118			caller, btrfs_ino(BTRFS_I(inode)), isize, start, end);
 119	}
 120}
 121#else
 122#define btrfs_leak_debug_add(lock, new, head)	do {} while (0)
 123#define btrfs_leak_debug_del(lock, entry)	do {} while (0)
 124#define btrfs_extent_state_leak_debug_check()	do {} while (0)
 125#define btrfs_debug_check_extent_io_range(c, s, e)	do {} while (0)
 126#endif
 127
 
 
 128struct tree_entry {
 129	u64 start;
 130	u64 end;
 131	struct rb_node rb_node;
 132};
 133
 134struct extent_page_data {
 135	struct bio *bio;
 
 
 
 
 136	/* tells writepage not to lock the state bits for this range
 137	 * it still does the unlocking
 138	 */
 139	unsigned int extent_locked:1;
 140
 141	/* tells the submit_bio code to use REQ_SYNC */
 142	unsigned int sync_io:1;
 143};
 144
 145static int add_extent_changeset(struct extent_state *state, unsigned bits,
 146				 struct extent_changeset *changeset,
 147				 int set)
 148{
 149	int ret;
 150
 151	if (!changeset)
 152		return 0;
 153	if (set && (state->state & bits) == bits)
 154		return 0;
 155	if (!set && (state->state & bits) == 0)
 156		return 0;
 157	changeset->bytes_changed += state->end - state->start + 1;
 158	ret = ulist_add(&changeset->range_changed, state->start, state->end,
 159			GFP_ATOMIC);
 160	return ret;
 
 161}
 162
 163static int __must_check submit_one_bio(struct bio *bio, int mirror_num,
 164				       unsigned long bio_flags)
 
 165{
 166	blk_status_t ret = 0;
 167	struct extent_io_tree *tree = bio->bi_private;
 168
 169	bio->bi_private = NULL;
 170
 171	if (tree->ops)
 172		ret = tree->ops->submit_bio_hook(tree->private_data, bio,
 173						 mirror_num, bio_flags);
 174	else
 175		btrfsic_submit_bio(bio);
 176
 177	return blk_status_to_errno(ret);
 178}
 179
 180/* Cleanup unsubmitted bios */
 181static void end_write_bio(struct extent_page_data *epd, int ret)
 182{
 183	if (epd->bio) {
 184		epd->bio->bi_status = errno_to_blk_status(ret);
 185		bio_endio(epd->bio);
 186		epd->bio = NULL;
 187	}
 188}
 189
 190/*
 191 * Submit bio from extent page data via submit_one_bio
 192 *
 193 * Return 0 if everything is OK.
 194 * Return <0 for error.
 195 */
 196static int __must_check flush_write_bio(struct extent_page_data *epd)
 197{
 198	int ret = 0;
 199
 200	if (epd->bio) {
 201		ret = submit_one_bio(epd->bio, 0, 0);
 202		/*
 203		 * Clean up of epd->bio is handled by its endio function.
 204		 * And endio is either triggered by successful bio execution
 205		 * or the error handler of submit bio hook.
 206		 * So at this point, no matter what happened, we don't need
 207		 * to clean up epd->bio.
 208		 */
 209		epd->bio = NULL;
 210	}
 211	return ret;
 212}
 213
 214int __init extent_state_cache_init(void)
 215{
 216	extent_state_cache = kmem_cache_create("btrfs_extent_state",
 217			sizeof(struct extent_state), 0,
 218			SLAB_MEM_SPREAD, NULL);
 219	if (!extent_state_cache)
 220		return -ENOMEM;
 221	return 0;
 222}
 223
 224int __init extent_io_init(void)
 225{
 226	extent_buffer_cache = kmem_cache_create("btrfs_extent_buffer",
 227			sizeof(struct extent_buffer), 0,
 228			SLAB_MEM_SPREAD, NULL);
 229	if (!extent_buffer_cache)
 230		return -ENOMEM;
 231
 232	if (bioset_init(&btrfs_bioset, BIO_POOL_SIZE,
 233			offsetof(struct btrfs_io_bio, bio),
 234			BIOSET_NEED_BVECS))
 235		goto free_buffer_cache;
 236
 237	if (bioset_integrity_create(&btrfs_bioset, BIO_POOL_SIZE))
 238		goto free_bioset;
 239
 240	return 0;
 241
 242free_bioset:
 243	bioset_exit(&btrfs_bioset);
 
 244
 245free_buffer_cache:
 246	kmem_cache_destroy(extent_buffer_cache);
 247	extent_buffer_cache = NULL;
 
 
 
 
 248	return -ENOMEM;
 249}
 250
 251void __cold extent_state_cache_exit(void)
 252{
 253	btrfs_extent_state_leak_debug_check();
 254	kmem_cache_destroy(extent_state_cache);
 255}
 256
 257void __cold extent_io_exit(void)
 258{
 259	/*
 260	 * Make sure all delayed rcu free are flushed before we
 261	 * destroy caches.
 262	 */
 263	rcu_barrier();
 
 264	kmem_cache_destroy(extent_buffer_cache);
 265	bioset_exit(&btrfs_bioset);
 
 266}
 267
 268/*
 269 * For the file_extent_tree, we want to hold the inode lock when we lookup and
 270 * update the disk_i_size, but lockdep will complain because our io_tree we hold
 271 * the tree lock and get the inode lock when setting delalloc.  These two things
 272 * are unrelated, so make a class for the file_extent_tree so we don't get the
 273 * two locking patterns mixed up.
 274 */
 275static struct lock_class_key file_extent_tree_class;
 276
 277void extent_io_tree_init(struct btrfs_fs_info *fs_info,
 278			 struct extent_io_tree *tree, unsigned int owner,
 279			 void *private_data)
 280{
 281	tree->fs_info = fs_info;
 282	tree->state = RB_ROOT;
 283	tree->ops = NULL;
 284	tree->dirty_bytes = 0;
 285	spin_lock_init(&tree->lock);
 286	tree->private_data = private_data;
 287	tree->owner = owner;
 288	if (owner == IO_TREE_INODE_FILE_EXTENT)
 289		lockdep_set_class(&tree->lock, &file_extent_tree_class);
 290}
 291
 292void extent_io_tree_release(struct extent_io_tree *tree)
 293{
 294	spin_lock(&tree->lock);
 295	/*
 296	 * Do a single barrier for the waitqueue_active check here, the state
 297	 * of the waitqueue should not change once extent_io_tree_release is
 298	 * called.
 299	 */
 300	smp_mb();
 301	while (!RB_EMPTY_ROOT(&tree->state)) {
 302		struct rb_node *node;
 303		struct extent_state *state;
 304
 305		node = rb_first(&tree->state);
 306		state = rb_entry(node, struct extent_state, rb_node);
 307		rb_erase(&state->rb_node, &tree->state);
 308		RB_CLEAR_NODE(&state->rb_node);
 309		/*
 310		 * btree io trees aren't supposed to have tasks waiting for
 311		 * changes in the flags of extent states ever.
 312		 */
 313		ASSERT(!waitqueue_active(&state->wq));
 314		free_extent_state(state);
 315
 316		cond_resched_lock(&tree->lock);
 317	}
 318	spin_unlock(&tree->lock);
 319}
 320
 321static struct extent_state *alloc_extent_state(gfp_t mask)
 322{
 323	struct extent_state *state;
 324
 325	/*
 326	 * The given mask might be not appropriate for the slab allocator,
 327	 * drop the unsupported bits
 328	 */
 329	mask &= ~(__GFP_DMA32|__GFP_HIGHMEM);
 330	state = kmem_cache_alloc(extent_state_cache, mask);
 331	if (!state)
 332		return state;
 333	state->state = 0;
 334	state->failrec = NULL;
 335	RB_CLEAR_NODE(&state->rb_node);
 336	btrfs_leak_debug_add(&leak_lock, &state->leak_list, &states);
 337	refcount_set(&state->refs, 1);
 338	init_waitqueue_head(&state->wq);
 339	trace_alloc_extent_state(state, mask, _RET_IP_);
 340	return state;
 341}
 342
 343void free_extent_state(struct extent_state *state)
 344{
 345	if (!state)
 346		return;
 347	if (refcount_dec_and_test(&state->refs)) {
 348		WARN_ON(extent_state_in_tree(state));
 349		btrfs_leak_debug_del(&leak_lock, &state->leak_list);
 350		trace_free_extent_state(state, _RET_IP_);
 351		kmem_cache_free(extent_state_cache, state);
 352	}
 353}
 354
 355static struct rb_node *tree_insert(struct rb_root *root,
 356				   struct rb_node *search_start,
 357				   u64 offset,
 358				   struct rb_node *node,
 359				   struct rb_node ***p_in,
 360				   struct rb_node **parent_in)
 361{
 362	struct rb_node **p;
 363	struct rb_node *parent = NULL;
 364	struct tree_entry *entry;
 365
 366	if (p_in && parent_in) {
 367		p = *p_in;
 368		parent = *parent_in;
 369		goto do_insert;
 370	}
 371
 372	p = search_start ? &search_start : &root->rb_node;
 373	while (*p) {
 374		parent = *p;
 375		entry = rb_entry(parent, struct tree_entry, rb_node);
 376
 377		if (offset < entry->start)
 378			p = &(*p)->rb_left;
 379		else if (offset > entry->end)
 380			p = &(*p)->rb_right;
 381		else
 382			return parent;
 383	}
 384
 385do_insert:
 386	rb_link_node(node, parent, p);
 387	rb_insert_color(node, root);
 388	return NULL;
 389}
 390
 391/**
 392 * __etree_search - searche @tree for an entry that contains @offset. Such
 393 * entry would have entry->start <= offset && entry->end >= offset.
 394 *
 395 * @tree - the tree to search
 396 * @offset - offset that should fall within an entry in @tree
 397 * @next_ret - pointer to the first entry whose range ends after @offset
 398 * @prev - pointer to the first entry whose range begins before @offset
 399 * @p_ret - pointer where new node should be anchored (used when inserting an
 400 *	    entry in the tree)
 401 * @parent_ret - points to entry which would have been the parent of the entry,
 402 *               containing @offset
 403 *
 404 * This function returns a pointer to the entry that contains @offset byte
 405 * address. If no such entry exists, then NULL is returned and the other
 406 * pointer arguments to the function are filled, otherwise the found entry is
 407 * returned and other pointers are left untouched.
 408 */
 409static struct rb_node *__etree_search(struct extent_io_tree *tree, u64 offset,
 
 410				      struct rb_node **next_ret,
 411				      struct rb_node **prev_ret,
 412				      struct rb_node ***p_ret,
 413				      struct rb_node **parent_ret)
 414{
 415	struct rb_root *root = &tree->state;
 416	struct rb_node **n = &root->rb_node;
 417	struct rb_node *prev = NULL;
 418	struct rb_node *orig_prev = NULL;
 419	struct tree_entry *entry;
 420	struct tree_entry *prev_entry = NULL;
 421
 422	while (*n) {
 423		prev = *n;
 424		entry = rb_entry(prev, struct tree_entry, rb_node);
 425		prev_entry = entry;
 426
 427		if (offset < entry->start)
 428			n = &(*n)->rb_left;
 429		else if (offset > entry->end)
 430			n = &(*n)->rb_right;
 431		else
 432			return *n;
 433	}
 434
 435	if (p_ret)
 436		*p_ret = n;
 437	if (parent_ret)
 438		*parent_ret = prev;
 439
 440	if (next_ret) {
 441		orig_prev = prev;
 442		while (prev && offset > prev_entry->end) {
 443			prev = rb_next(prev);
 444			prev_entry = rb_entry(prev, struct tree_entry, rb_node);
 445		}
 446		*next_ret = prev;
 447		prev = orig_prev;
 448	}
 449
 450	if (prev_ret) {
 451		prev_entry = rb_entry(prev, struct tree_entry, rb_node);
 452		while (prev && offset < prev_entry->start) {
 453			prev = rb_prev(prev);
 454			prev_entry = rb_entry(prev, struct tree_entry, rb_node);
 455		}
 456		*prev_ret = prev;
 457	}
 458	return NULL;
 459}
 460
 461static inline struct rb_node *
 462tree_search_for_insert(struct extent_io_tree *tree,
 463		       u64 offset,
 464		       struct rb_node ***p_ret,
 465		       struct rb_node **parent_ret)
 466{
 467	struct rb_node *next= NULL;
 468	struct rb_node *ret;
 469
 470	ret = __etree_search(tree, offset, &next, NULL, p_ret, parent_ret);
 471	if (!ret)
 472		return next;
 473	return ret;
 474}
 475
 476static inline struct rb_node *tree_search(struct extent_io_tree *tree,
 477					  u64 offset)
 478{
 479	return tree_search_for_insert(tree, offset, NULL, NULL);
 480}
 481
 
 
 
 
 
 
 
 
 482/*
 483 * utility function to look for merge candidates inside a given range.
 484 * Any extents with matching state are merged together into a single
 485 * extent in the tree.  Extents with EXTENT_IO in their state field
 486 * are not merged because the end_io handlers need to be able to do
 487 * operations on them without sleeping (or doing allocations/splits).
 488 *
 489 * This should be called with the tree lock held.
 490 */
 491static void merge_state(struct extent_io_tree *tree,
 492		        struct extent_state *state)
 493{
 494	struct extent_state *other;
 495	struct rb_node *other_node;
 496
 497	if (state->state & (EXTENT_LOCKED | EXTENT_BOUNDARY))
 498		return;
 499
 500	other_node = rb_prev(&state->rb_node);
 501	if (other_node) {
 502		other = rb_entry(other_node, struct extent_state, rb_node);
 503		if (other->end == state->start - 1 &&
 504		    other->state == state->state) {
 505			if (tree->private_data &&
 506			    is_data_inode(tree->private_data))
 507				btrfs_merge_delalloc_extent(tree->private_data,
 508							    state, other);
 509			state->start = other->start;
 510			rb_erase(&other->rb_node, &tree->state);
 511			RB_CLEAR_NODE(&other->rb_node);
 512			free_extent_state(other);
 513		}
 514	}
 515	other_node = rb_next(&state->rb_node);
 516	if (other_node) {
 517		other = rb_entry(other_node, struct extent_state, rb_node);
 518		if (other->start == state->end + 1 &&
 519		    other->state == state->state) {
 520			if (tree->private_data &&
 521			    is_data_inode(tree->private_data))
 522				btrfs_merge_delalloc_extent(tree->private_data,
 523							    state, other);
 524			state->end = other->end;
 525			rb_erase(&other->rb_node, &tree->state);
 526			RB_CLEAR_NODE(&other->rb_node);
 527			free_extent_state(other);
 528		}
 529	}
 530}
 531
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 532static void set_state_bits(struct extent_io_tree *tree,
 533			   struct extent_state *state, unsigned *bits,
 534			   struct extent_changeset *changeset);
 535
 536/*
 537 * insert an extent_state struct into the tree.  'bits' are set on the
 538 * struct before it is inserted.
 539 *
 540 * This may return -EEXIST if the extent is already there, in which case the
 541 * state struct is freed.
 542 *
 543 * The tree lock is not taken internally.  This is a utility function and
 544 * probably isn't what you want to call (see set/clear_extent_bit).
 545 */
 546static int insert_state(struct extent_io_tree *tree,
 547			struct extent_state *state, u64 start, u64 end,
 548			struct rb_node ***p,
 549			struct rb_node **parent,
 550			unsigned *bits, struct extent_changeset *changeset)
 551{
 552	struct rb_node *node;
 553
 554	if (end < start) {
 555		btrfs_err(tree->fs_info,
 556			"insert state: end < start %llu %llu", end, start);
 557		WARN_ON(1);
 558	}
 559	state->start = start;
 560	state->end = end;
 561
 562	set_state_bits(tree, state, bits, changeset);
 563
 564	node = tree_insert(&tree->state, NULL, end, &state->rb_node, p, parent);
 565	if (node) {
 566		struct extent_state *found;
 567		found = rb_entry(node, struct extent_state, rb_node);
 568		btrfs_err(tree->fs_info,
 569		       "found node %llu %llu on insert of %llu %llu",
 570		       found->start, found->end, start, end);
 571		return -EEXIST;
 572	}
 573	merge_state(tree, state);
 574	return 0;
 575}
 576
 
 
 
 
 
 
 
 577/*
 578 * split a given extent state struct in two, inserting the preallocated
 579 * struct 'prealloc' as the newly created second half.  'split' indicates an
 580 * offset inside 'orig' where it should be split.
 581 *
 582 * Before calling,
 583 * the tree has 'orig' at [orig->start, orig->end].  After calling, there
 584 * are two extent state structs in the tree:
 585 * prealloc: [orig->start, split - 1]
 586 * orig: [ split, orig->end ]
 587 *
 588 * The tree locks are not taken by this function. They need to be held
 589 * by the caller.
 590 */
 591static int split_state(struct extent_io_tree *tree, struct extent_state *orig,
 592		       struct extent_state *prealloc, u64 split)
 593{
 594	struct rb_node *node;
 595
 596	if (tree->private_data && is_data_inode(tree->private_data))
 597		btrfs_split_delalloc_extent(tree->private_data, orig, split);
 598
 599	prealloc->start = orig->start;
 600	prealloc->end = split - 1;
 601	prealloc->state = orig->state;
 602	orig->start = split;
 603
 604	node = tree_insert(&tree->state, &orig->rb_node, prealloc->end,
 605			   &prealloc->rb_node, NULL, NULL);
 606	if (node) {
 607		free_extent_state(prealloc);
 608		return -EEXIST;
 609	}
 610	return 0;
 611}
 612
 613static struct extent_state *next_state(struct extent_state *state)
 614{
 615	struct rb_node *next = rb_next(&state->rb_node);
 616	if (next)
 617		return rb_entry(next, struct extent_state, rb_node);
 618	else
 619		return NULL;
 620}
 621
 622/*
 623 * utility function to clear some bits in an extent state struct.
 624 * it will optionally wake up anyone waiting on this state (wake == 1).
 625 *
 626 * If no bits are set on the state struct after clearing things, the
 627 * struct is freed and removed from the tree
 628 */
 629static struct extent_state *clear_state_bit(struct extent_io_tree *tree,
 630					    struct extent_state *state,
 631					    unsigned *bits, int wake,
 632					    struct extent_changeset *changeset)
 633{
 634	struct extent_state *next;
 635	unsigned bits_to_clear = *bits & ~EXTENT_CTLBITS;
 636	int ret;
 637
 638	if ((bits_to_clear & EXTENT_DIRTY) && (state->state & EXTENT_DIRTY)) {
 639		u64 range = state->end - state->start + 1;
 640		WARN_ON(range > tree->dirty_bytes);
 641		tree->dirty_bytes -= range;
 642	}
 643
 644	if (tree->private_data && is_data_inode(tree->private_data))
 645		btrfs_clear_delalloc_extent(tree->private_data, state, bits);
 646
 647	ret = add_extent_changeset(state, bits_to_clear, changeset, 0);
 648	BUG_ON(ret < 0);
 649	state->state &= ~bits_to_clear;
 650	if (wake)
 651		wake_up(&state->wq);
 652	if (state->state == 0) {
 653		next = next_state(state);
 654		if (extent_state_in_tree(state)) {
 655			rb_erase(&state->rb_node, &tree->state);
 656			RB_CLEAR_NODE(&state->rb_node);
 657			free_extent_state(state);
 658		} else {
 659			WARN_ON(1);
 660		}
 661	} else {
 662		merge_state(tree, state);
 663		next = next_state(state);
 664	}
 665	return next;
 666}
 667
 668static struct extent_state *
 669alloc_extent_state_atomic(struct extent_state *prealloc)
 670{
 671	if (!prealloc)
 672		prealloc = alloc_extent_state(GFP_ATOMIC);
 673
 674	return prealloc;
 675}
 676
 677static void extent_io_tree_panic(struct extent_io_tree *tree, int err)
 678{
 679	struct inode *inode = tree->private_data;
 680
 681	btrfs_panic(btrfs_sb(inode->i_sb), err,
 682	"locking error: extent tree was modified by another thread while locked");
 683}
 684
 685/*
 686 * clear some bits on a range in the tree.  This may require splitting
 687 * or inserting elements in the tree, so the gfp mask is used to
 688 * indicate which allocations or sleeping are allowed.
 689 *
 690 * pass 'wake' == 1 to kick any sleepers, and 'delete' == 1 to remove
 691 * the given range from the tree regardless of state (ie for truncate).
 692 *
 693 * the range [start, end] is inclusive.
 694 *
 695 * This takes the tree lock, and returns 0 on success and < 0 on error.
 696 */
 697int __clear_extent_bit(struct extent_io_tree *tree, u64 start, u64 end,
 698			      unsigned bits, int wake, int delete,
 699			      struct extent_state **cached_state,
 700			      gfp_t mask, struct extent_changeset *changeset)
 701{
 702	struct extent_state *state;
 703	struct extent_state *cached;
 704	struct extent_state *prealloc = NULL;
 705	struct rb_node *node;
 706	u64 last_end;
 707	int err;
 708	int clear = 0;
 709
 710	btrfs_debug_check_extent_io_range(tree, start, end);
 711	trace_btrfs_clear_extent_bit(tree, start, end - start + 1, bits);
 712
 713	if (bits & EXTENT_DELALLOC)
 714		bits |= EXTENT_NORESERVE;
 715
 716	if (delete)
 717		bits |= ~EXTENT_CTLBITS;
 
 718
 719	if (bits & (EXTENT_LOCKED | EXTENT_BOUNDARY))
 720		clear = 1;
 721again:
 722	if (!prealloc && gfpflags_allow_blocking(mask)) {
 723		/*
 724		 * Don't care for allocation failure here because we might end
 725		 * up not needing the pre-allocated extent state at all, which
 726		 * is the case if we only have in the tree extent states that
 727		 * cover our input range and don't cover too any other range.
 728		 * If we end up needing a new extent state we allocate it later.
 729		 */
 730		prealloc = alloc_extent_state(mask);
 731	}
 732
 733	spin_lock(&tree->lock);
 734	if (cached_state) {
 735		cached = *cached_state;
 736
 737		if (clear) {
 738			*cached_state = NULL;
 739			cached_state = NULL;
 740		}
 741
 742		if (cached && extent_state_in_tree(cached) &&
 743		    cached->start <= start && cached->end > start) {
 744			if (clear)
 745				refcount_dec(&cached->refs);
 746			state = cached;
 747			goto hit_next;
 748		}
 749		if (clear)
 750			free_extent_state(cached);
 751	}
 752	/*
 753	 * this search will find the extents that end after
 754	 * our range starts
 755	 */
 756	node = tree_search(tree, start);
 757	if (!node)
 758		goto out;
 759	state = rb_entry(node, struct extent_state, rb_node);
 760hit_next:
 761	if (state->start > end)
 762		goto out;
 763	WARN_ON(state->end < start);
 764	last_end = state->end;
 765
 766	/* the state doesn't have the wanted bits, go ahead */
 767	if (!(state->state & bits)) {
 768		state = next_state(state);
 769		goto next;
 770	}
 771
 772	/*
 773	 *     | ---- desired range ---- |
 774	 *  | state | or
 775	 *  | ------------- state -------------- |
 776	 *
 777	 * We need to split the extent we found, and may flip
 778	 * bits on second half.
 779	 *
 780	 * If the extent we found extends past our range, we
 781	 * just split and search again.  It'll get split again
 782	 * the next time though.
 783	 *
 784	 * If the extent we found is inside our range, we clear
 785	 * the desired bit on it.
 786	 */
 787
 788	if (state->start < start) {
 789		prealloc = alloc_extent_state_atomic(prealloc);
 790		BUG_ON(!prealloc);
 791		err = split_state(tree, state, prealloc, start);
 792		if (err)
 793			extent_io_tree_panic(tree, err);
 794
 795		prealloc = NULL;
 796		if (err)
 797			goto out;
 798		if (state->end <= end) {
 799			state = clear_state_bit(tree, state, &bits, wake,
 800						changeset);
 801			goto next;
 802		}
 803		goto search_again;
 804	}
 805	/*
 806	 * | ---- desired range ---- |
 807	 *                        | state |
 808	 * We need to split the extent, and clear the bit
 809	 * on the first half
 810	 */
 811	if (state->start <= end && state->end > end) {
 812		prealloc = alloc_extent_state_atomic(prealloc);
 813		BUG_ON(!prealloc);
 814		err = split_state(tree, state, prealloc, end + 1);
 815		if (err)
 816			extent_io_tree_panic(tree, err);
 817
 818		if (wake)
 819			wake_up(&state->wq);
 820
 821		clear_state_bit(tree, prealloc, &bits, wake, changeset);
 822
 823		prealloc = NULL;
 824		goto out;
 825	}
 826
 827	state = clear_state_bit(tree, state, &bits, wake, changeset);
 828next:
 829	if (last_end == (u64)-1)
 830		goto out;
 831	start = last_end + 1;
 832	if (start <= end && state && !need_resched())
 833		goto hit_next;
 
 
 
 
 
 
 
 
 834
 835search_again:
 836	if (start > end)
 837		goto out;
 838	spin_unlock(&tree->lock);
 839	if (gfpflags_allow_blocking(mask))
 840		cond_resched();
 841	goto again;
 842
 843out:
 844	spin_unlock(&tree->lock);
 845	if (prealloc)
 846		free_extent_state(prealloc);
 847
 848	return 0;
 849
 850}
 851
 852static void wait_on_state(struct extent_io_tree *tree,
 853			  struct extent_state *state)
 854		__releases(tree->lock)
 855		__acquires(tree->lock)
 856{
 857	DEFINE_WAIT(wait);
 858	prepare_to_wait(&state->wq, &wait, TASK_UNINTERRUPTIBLE);
 859	spin_unlock(&tree->lock);
 860	schedule();
 861	spin_lock(&tree->lock);
 862	finish_wait(&state->wq, &wait);
 863}
 864
 865/*
 866 * waits for one or more bits to clear on a range in the state tree.
 867 * The range [start, end] is inclusive.
 868 * The tree lock is taken by this function
 869 */
 870static void wait_extent_bit(struct extent_io_tree *tree, u64 start, u64 end,
 871			    unsigned long bits)
 872{
 873	struct extent_state *state;
 874	struct rb_node *node;
 875
 876	btrfs_debug_check_extent_io_range(tree, start, end);
 877
 878	spin_lock(&tree->lock);
 879again:
 880	while (1) {
 881		/*
 882		 * this search will find all the extents that end after
 883		 * our range starts
 884		 */
 885		node = tree_search(tree, start);
 886process_node:
 887		if (!node)
 888			break;
 889
 890		state = rb_entry(node, struct extent_state, rb_node);
 891
 892		if (state->start > end)
 893			goto out;
 894
 895		if (state->state & bits) {
 896			start = state->start;
 897			refcount_inc(&state->refs);
 898			wait_on_state(tree, state);
 899			free_extent_state(state);
 900			goto again;
 901		}
 902		start = state->end + 1;
 903
 904		if (start > end)
 905			break;
 906
 907		if (!cond_resched_lock(&tree->lock)) {
 908			node = rb_next(node);
 909			goto process_node;
 910		}
 911	}
 912out:
 913	spin_unlock(&tree->lock);
 914}
 915
 916static void set_state_bits(struct extent_io_tree *tree,
 917			   struct extent_state *state,
 918			   unsigned *bits, struct extent_changeset *changeset)
 919{
 920	unsigned bits_to_set = *bits & ~EXTENT_CTLBITS;
 921	int ret;
 922
 923	if (tree->private_data && is_data_inode(tree->private_data))
 924		btrfs_set_delalloc_extent(tree->private_data, state, bits);
 925
 
 926	if ((bits_to_set & EXTENT_DIRTY) && !(state->state & EXTENT_DIRTY)) {
 927		u64 range = state->end - state->start + 1;
 928		tree->dirty_bytes += range;
 929	}
 930	ret = add_extent_changeset(state, bits_to_set, changeset, 1);
 931	BUG_ON(ret < 0);
 932	state->state |= bits_to_set;
 933}
 934
 935static void cache_state_if_flags(struct extent_state *state,
 936				 struct extent_state **cached_ptr,
 937				 unsigned flags)
 938{
 939	if (cached_ptr && !(*cached_ptr)) {
 940		if (!flags || (state->state & flags)) {
 941			*cached_ptr = state;
 942			refcount_inc(&state->refs);
 943		}
 944	}
 945}
 946
 947static void cache_state(struct extent_state *state,
 948			struct extent_state **cached_ptr)
 949{
 950	return cache_state_if_flags(state, cached_ptr,
 951				    EXTENT_LOCKED | EXTENT_BOUNDARY);
 952}
 953
 954/*
 955 * set some bits on a range in the tree.  This may require allocations or
 956 * sleeping, so the gfp mask is used to indicate what is allowed.
 957 *
 958 * If any of the exclusive bits are set, this will fail with -EEXIST if some
 959 * part of the range already has the desired bits set.  The start of the
 960 * existing range is returned in failed_start in this case.
 961 *
 962 * [start, end] is inclusive This takes the tree lock.
 963 */
 964
 965static int __must_check
 966__set_extent_bit(struct extent_io_tree *tree, u64 start, u64 end,
 967		 unsigned bits, unsigned exclusive_bits,
 968		 u64 *failed_start, struct extent_state **cached_state,
 969		 gfp_t mask, struct extent_changeset *changeset)
 970{
 971	struct extent_state *state;
 972	struct extent_state *prealloc = NULL;
 973	struct rb_node *node;
 974	struct rb_node **p;
 975	struct rb_node *parent;
 976	int err = 0;
 977	u64 last_start;
 978	u64 last_end;
 979
 980	btrfs_debug_check_extent_io_range(tree, start, end);
 981	trace_btrfs_set_extent_bit(tree, start, end - start + 1, bits);
 982
 
 983again:
 984	if (!prealloc && gfpflags_allow_blocking(mask)) {
 985		/*
 986		 * Don't care for allocation failure here because we might end
 987		 * up not needing the pre-allocated extent state at all, which
 988		 * is the case if we only have in the tree extent states that
 989		 * cover our input range and don't cover too any other range.
 990		 * If we end up needing a new extent state we allocate it later.
 991		 */
 992		prealloc = alloc_extent_state(mask);
 
 993	}
 994
 995	spin_lock(&tree->lock);
 996	if (cached_state && *cached_state) {
 997		state = *cached_state;
 998		if (state->start <= start && state->end > start &&
 999		    extent_state_in_tree(state)) {
1000			node = &state->rb_node;
1001			goto hit_next;
1002		}
1003	}
1004	/*
1005	 * this search will find all the extents that end after
1006	 * our range starts.
1007	 */
1008	node = tree_search_for_insert(tree, start, &p, &parent);
1009	if (!node) {
1010		prealloc = alloc_extent_state_atomic(prealloc);
1011		BUG_ON(!prealloc);
1012		err = insert_state(tree, prealloc, start, end,
1013				   &p, &parent, &bits, changeset);
1014		if (err)
1015			extent_io_tree_panic(tree, err);
1016
1017		cache_state(prealloc, cached_state);
1018		prealloc = NULL;
1019		goto out;
1020	}
1021	state = rb_entry(node, struct extent_state, rb_node);
1022hit_next:
1023	last_start = state->start;
1024	last_end = state->end;
1025
1026	/*
1027	 * | ---- desired range ---- |
1028	 * | state |
1029	 *
1030	 * Just lock what we found and keep going
1031	 */
1032	if (state->start == start && state->end <= end) {
1033		if (state->state & exclusive_bits) {
1034			*failed_start = state->start;
1035			err = -EEXIST;
1036			goto out;
1037		}
1038
1039		set_state_bits(tree, state, &bits, changeset);
1040		cache_state(state, cached_state);
1041		merge_state(tree, state);
1042		if (last_end == (u64)-1)
1043			goto out;
1044		start = last_end + 1;
1045		state = next_state(state);
1046		if (start < end && state && state->start == start &&
1047		    !need_resched())
1048			goto hit_next;
1049		goto search_again;
1050	}
1051
1052	/*
1053	 *     | ---- desired range ---- |
1054	 * | state |
1055	 *   or
1056	 * | ------------- state -------------- |
1057	 *
1058	 * We need to split the extent we found, and may flip bits on
1059	 * second half.
1060	 *
1061	 * If the extent we found extends past our
1062	 * range, we just split and search again.  It'll get split
1063	 * again the next time though.
1064	 *
1065	 * If the extent we found is inside our range, we set the
1066	 * desired bit on it.
1067	 */
1068	if (state->start < start) {
1069		if (state->state & exclusive_bits) {
1070			*failed_start = start;
1071			err = -EEXIST;
1072			goto out;
1073		}
1074
1075		/*
1076		 * If this extent already has all the bits we want set, then
1077		 * skip it, not necessary to split it or do anything with it.
1078		 */
1079		if ((state->state & bits) == bits) {
1080			start = state->end + 1;
1081			cache_state(state, cached_state);
1082			goto search_again;
1083		}
1084
1085		prealloc = alloc_extent_state_atomic(prealloc);
1086		BUG_ON(!prealloc);
1087		err = split_state(tree, state, prealloc, start);
1088		if (err)
1089			extent_io_tree_panic(tree, err);
1090
1091		prealloc = NULL;
1092		if (err)
1093			goto out;
1094		if (state->end <= end) {
1095			set_state_bits(tree, state, &bits, changeset);
1096			cache_state(state, cached_state);
1097			merge_state(tree, state);
1098			if (last_end == (u64)-1)
1099				goto out;
1100			start = last_end + 1;
1101			state = next_state(state);
1102			if (start < end && state && state->start == start &&
1103			    !need_resched())
1104				goto hit_next;
1105		}
1106		goto search_again;
1107	}
1108	/*
1109	 * | ---- desired range ---- |
1110	 *     | state | or               | state |
1111	 *
1112	 * There's a hole, we need to insert something in it and
1113	 * ignore the extent we found.
1114	 */
1115	if (state->start > start) {
1116		u64 this_end;
1117		if (end < last_start)
1118			this_end = end;
1119		else
1120			this_end = last_start - 1;
1121
1122		prealloc = alloc_extent_state_atomic(prealloc);
1123		BUG_ON(!prealloc);
1124
1125		/*
1126		 * Avoid to free 'prealloc' if it can be merged with
1127		 * the later extent.
1128		 */
1129		err = insert_state(tree, prealloc, start, this_end,
1130				   NULL, NULL, &bits, changeset);
1131		if (err)
1132			extent_io_tree_panic(tree, err);
1133
1134		cache_state(prealloc, cached_state);
1135		prealloc = NULL;
1136		start = this_end + 1;
1137		goto search_again;
1138	}
1139	/*
1140	 * | ---- desired range ---- |
1141	 *                        | state |
1142	 * We need to split the extent, and set the bit
1143	 * on the first half
1144	 */
1145	if (state->start <= end && state->end > end) {
1146		if (state->state & exclusive_bits) {
1147			*failed_start = start;
1148			err = -EEXIST;
1149			goto out;
1150		}
1151
1152		prealloc = alloc_extent_state_atomic(prealloc);
1153		BUG_ON(!prealloc);
1154		err = split_state(tree, state, prealloc, end + 1);
1155		if (err)
1156			extent_io_tree_panic(tree, err);
1157
1158		set_state_bits(tree, prealloc, &bits, changeset);
1159		cache_state(prealloc, cached_state);
1160		merge_state(tree, prealloc);
1161		prealloc = NULL;
1162		goto out;
1163	}
1164
1165search_again:
1166	if (start > end)
1167		goto out;
1168	spin_unlock(&tree->lock);
1169	if (gfpflags_allow_blocking(mask))
1170		cond_resched();
1171	goto again;
1172
1173out:
1174	spin_unlock(&tree->lock);
1175	if (prealloc)
1176		free_extent_state(prealloc);
1177
1178	return err;
1179
 
 
 
 
 
 
 
1180}
1181
1182int set_extent_bit(struct extent_io_tree *tree, u64 start, u64 end,
1183		   unsigned bits, u64 * failed_start,
1184		   struct extent_state **cached_state, gfp_t mask)
1185{
1186	return __set_extent_bit(tree, start, end, bits, 0, failed_start,
1187				cached_state, mask, NULL);
1188}
1189
1190
1191/**
1192 * convert_extent_bit - convert all bits in a given range from one bit to
1193 * 			another
1194 * @tree:	the io tree to search
1195 * @start:	the start offset in bytes
1196 * @end:	the end offset in bytes (inclusive)
1197 * @bits:	the bits to set in this range
1198 * @clear_bits:	the bits to clear in this range
1199 * @cached_state:	state that we're going to cache
 
1200 *
1201 * This will go through and set bits for the given range.  If any states exist
1202 * already in this range they are set with the given bit and cleared of the
1203 * clear_bits.  This is only meant to be used by things that are mergeable, ie
1204 * converting from say DELALLOC to DIRTY.  This is not meant to be used with
1205 * boundary bits like LOCK.
1206 *
1207 * All allocations are done with GFP_NOFS.
1208 */
1209int convert_extent_bit(struct extent_io_tree *tree, u64 start, u64 end,
1210		       unsigned bits, unsigned clear_bits,
1211		       struct extent_state **cached_state)
1212{
1213	struct extent_state *state;
1214	struct extent_state *prealloc = NULL;
1215	struct rb_node *node;
1216	struct rb_node **p;
1217	struct rb_node *parent;
1218	int err = 0;
1219	u64 last_start;
1220	u64 last_end;
1221	bool first_iteration = true;
1222
1223	btrfs_debug_check_extent_io_range(tree, start, end);
1224	trace_btrfs_convert_extent_bit(tree, start, end - start + 1, bits,
1225				       clear_bits);
1226
1227again:
1228	if (!prealloc) {
1229		/*
1230		 * Best effort, don't worry if extent state allocation fails
1231		 * here for the first iteration. We might have a cached state
1232		 * that matches exactly the target range, in which case no
1233		 * extent state allocations are needed. We'll only know this
1234		 * after locking the tree.
1235		 */
1236		prealloc = alloc_extent_state(GFP_NOFS);
1237		if (!prealloc && !first_iteration)
1238			return -ENOMEM;
1239	}
1240
1241	spin_lock(&tree->lock);
1242	if (cached_state && *cached_state) {
1243		state = *cached_state;
1244		if (state->start <= start && state->end > start &&
1245		    extent_state_in_tree(state)) {
1246			node = &state->rb_node;
1247			goto hit_next;
1248		}
1249	}
1250
1251	/*
1252	 * this search will find all the extents that end after
1253	 * our range starts.
1254	 */
1255	node = tree_search_for_insert(tree, start, &p, &parent);
1256	if (!node) {
1257		prealloc = alloc_extent_state_atomic(prealloc);
1258		if (!prealloc) {
1259			err = -ENOMEM;
1260			goto out;
1261		}
1262		err = insert_state(tree, prealloc, start, end,
1263				   &p, &parent, &bits, NULL);
1264		if (err)
1265			extent_io_tree_panic(tree, err);
1266		cache_state(prealloc, cached_state);
1267		prealloc = NULL;
1268		goto out;
1269	}
1270	state = rb_entry(node, struct extent_state, rb_node);
1271hit_next:
1272	last_start = state->start;
1273	last_end = state->end;
1274
1275	/*
1276	 * | ---- desired range ---- |
1277	 * | state |
1278	 *
1279	 * Just lock what we found and keep going
1280	 */
1281	if (state->start == start && state->end <= end) {
1282		set_state_bits(tree, state, &bits, NULL);
1283		cache_state(state, cached_state);
1284		state = clear_state_bit(tree, state, &clear_bits, 0, NULL);
1285		if (last_end == (u64)-1)
1286			goto out;
1287		start = last_end + 1;
1288		if (start < end && state && state->start == start &&
1289		    !need_resched())
1290			goto hit_next;
1291		goto search_again;
1292	}
1293
1294	/*
1295	 *     | ---- desired range ---- |
1296	 * | state |
1297	 *   or
1298	 * | ------------- state -------------- |
1299	 *
1300	 * We need to split the extent we found, and may flip bits on
1301	 * second half.
1302	 *
1303	 * If the extent we found extends past our
1304	 * range, we just split and search again.  It'll get split
1305	 * again the next time though.
1306	 *
1307	 * If the extent we found is inside our range, we set the
1308	 * desired bit on it.
1309	 */
1310	if (state->start < start) {
1311		prealloc = alloc_extent_state_atomic(prealloc);
1312		if (!prealloc) {
1313			err = -ENOMEM;
1314			goto out;
1315		}
1316		err = split_state(tree, state, prealloc, start);
1317		if (err)
1318			extent_io_tree_panic(tree, err);
1319		prealloc = NULL;
1320		if (err)
1321			goto out;
1322		if (state->end <= end) {
1323			set_state_bits(tree, state, &bits, NULL);
1324			cache_state(state, cached_state);
1325			state = clear_state_bit(tree, state, &clear_bits, 0,
1326						NULL);
1327			if (last_end == (u64)-1)
1328				goto out;
1329			start = last_end + 1;
1330			if (start < end && state && state->start == start &&
1331			    !need_resched())
1332				goto hit_next;
1333		}
1334		goto search_again;
1335	}
1336	/*
1337	 * | ---- desired range ---- |
1338	 *     | state | or               | state |
1339	 *
1340	 * There's a hole, we need to insert something in it and
1341	 * ignore the extent we found.
1342	 */
1343	if (state->start > start) {
1344		u64 this_end;
1345		if (end < last_start)
1346			this_end = end;
1347		else
1348			this_end = last_start - 1;
1349
1350		prealloc = alloc_extent_state_atomic(prealloc);
1351		if (!prealloc) {
1352			err = -ENOMEM;
1353			goto out;
1354		}
1355
1356		/*
1357		 * Avoid to free 'prealloc' if it can be merged with
1358		 * the later extent.
1359		 */
1360		err = insert_state(tree, prealloc, start, this_end,
1361				   NULL, NULL, &bits, NULL);
1362		if (err)
1363			extent_io_tree_panic(tree, err);
1364		cache_state(prealloc, cached_state);
1365		prealloc = NULL;
1366		start = this_end + 1;
1367		goto search_again;
1368	}
1369	/*
1370	 * | ---- desired range ---- |
1371	 *                        | state |
1372	 * We need to split the extent, and set the bit
1373	 * on the first half
1374	 */
1375	if (state->start <= end && state->end > end) {
1376		prealloc = alloc_extent_state_atomic(prealloc);
1377		if (!prealloc) {
1378			err = -ENOMEM;
1379			goto out;
1380		}
1381
1382		err = split_state(tree, state, prealloc, end + 1);
1383		if (err)
1384			extent_io_tree_panic(tree, err);
1385
1386		set_state_bits(tree, prealloc, &bits, NULL);
1387		cache_state(prealloc, cached_state);
1388		clear_state_bit(tree, prealloc, &clear_bits, 0, NULL);
1389		prealloc = NULL;
1390		goto out;
1391	}
1392
1393search_again:
1394	if (start > end)
1395		goto out;
1396	spin_unlock(&tree->lock);
1397	cond_resched();
1398	first_iteration = false;
1399	goto again;
1400
1401out:
1402	spin_unlock(&tree->lock);
1403	if (prealloc)
1404		free_extent_state(prealloc);
1405
1406	return err;
 
 
 
 
 
 
 
 
 
1407}
1408
1409/* wrappers around set/clear extent bit */
1410int set_record_extent_bits(struct extent_io_tree *tree, u64 start, u64 end,
1411			   unsigned bits, struct extent_changeset *changeset)
 
1412{
1413	/*
1414	 * We don't support EXTENT_LOCKED yet, as current changeset will
1415	 * record any bits changed, so for EXTENT_LOCKED case, it will
1416	 * either fail with -EEXIST or changeset will record the whole
1417	 * range.
1418	 */
1419	BUG_ON(bits & EXTENT_LOCKED);
1420
1421	return __set_extent_bit(tree, start, end, bits, 0, NULL, NULL, GFP_NOFS,
1422				changeset);
1423}
1424
1425int set_extent_bits_nowait(struct extent_io_tree *tree, u64 start, u64 end,
1426			   unsigned bits)
1427{
1428	return __set_extent_bit(tree, start, end, bits, 0, NULL, NULL,
1429				GFP_NOWAIT, NULL);
1430}
1431
1432int clear_extent_bit(struct extent_io_tree *tree, u64 start, u64 end,
1433		     unsigned bits, int wake, int delete,
1434		     struct extent_state **cached)
1435{
1436	return __clear_extent_bit(tree, start, end, bits, wake, delete,
1437				  cached, GFP_NOFS, NULL);
1438}
1439
1440int clear_record_extent_bits(struct extent_io_tree *tree, u64 start, u64 end,
1441		unsigned bits, struct extent_changeset *changeset)
 
1442{
1443	/*
1444	 * Don't support EXTENT_LOCKED case, same reason as
1445	 * set_record_extent_bits().
1446	 */
1447	BUG_ON(bits & EXTENT_LOCKED);
1448
1449	return __clear_extent_bit(tree, start, end, bits, 0, 0, NULL, GFP_NOFS,
1450				  changeset);
1451}
1452
1453/*
1454 * either insert or lock state struct between start and end use mask to tell
1455 * us if waiting is desired.
1456 */
1457int lock_extent_bits(struct extent_io_tree *tree, u64 start, u64 end,
1458		     struct extent_state **cached_state)
1459{
1460	int err;
1461	u64 failed_start;
1462
1463	while (1) {
1464		err = __set_extent_bit(tree, start, end, EXTENT_LOCKED,
1465				       EXTENT_LOCKED, &failed_start,
1466				       cached_state, GFP_NOFS, NULL);
1467		if (err == -EEXIST) {
1468			wait_extent_bit(tree, failed_start, end, EXTENT_LOCKED);
1469			start = failed_start;
1470		} else
1471			break;
1472		WARN_ON(start > end);
1473	}
1474	return err;
1475}
1476
1477int try_lock_extent(struct extent_io_tree *tree, u64 start, u64 end)
1478{
1479	int err;
1480	u64 failed_start;
1481
1482	err = __set_extent_bit(tree, start, end, EXTENT_LOCKED, EXTENT_LOCKED,
1483			       &failed_start, NULL, GFP_NOFS, NULL);
1484	if (err == -EEXIST) {
1485		if (failed_start > start)
1486			clear_extent_bit(tree, start, failed_start - 1,
1487					 EXTENT_LOCKED, 1, 0, NULL);
1488		return 0;
1489	}
1490	return 1;
1491}
1492
1493void extent_range_clear_dirty_for_io(struct inode *inode, u64 start, u64 end)
1494{
1495	unsigned long index = start >> PAGE_SHIFT;
1496	unsigned long end_index = end >> PAGE_SHIFT;
1497	struct page *page;
1498
1499	while (index <= end_index) {
1500		page = find_get_page(inode->i_mapping, index);
1501		BUG_ON(!page); /* Pages should be in the extent_io_tree */
1502		clear_page_dirty_for_io(page);
1503		put_page(page);
1504		index++;
1505	}
1506}
1507
1508void extent_range_redirty_for_io(struct inode *inode, u64 start, u64 end)
1509{
1510	unsigned long index = start >> PAGE_SHIFT;
1511	unsigned long end_index = end >> PAGE_SHIFT;
1512	struct page *page;
1513
1514	while (index <= end_index) {
1515		page = find_get_page(inode->i_mapping, index);
1516		BUG_ON(!page); /* Pages should be in the extent_io_tree */
1517		__set_page_dirty_nobuffers(page);
1518		account_page_redirty(page);
1519		put_page(page);
1520		index++;
1521	}
1522}
1523
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1524/* find the first state struct with 'bits' set after 'start', and
1525 * return it.  tree->lock must be held.  NULL will returned if
1526 * nothing was found after 'start'
1527 */
1528static struct extent_state *
1529find_first_extent_bit_state(struct extent_io_tree *tree,
1530			    u64 start, unsigned bits)
1531{
1532	struct rb_node *node;
1533	struct extent_state *state;
1534
1535	/*
1536	 * this search will find all the extents that end after
1537	 * our range starts.
1538	 */
1539	node = tree_search(tree, start);
1540	if (!node)
1541		goto out;
1542
1543	while (1) {
1544		state = rb_entry(node, struct extent_state, rb_node);
1545		if (state->end >= start && (state->state & bits))
1546			return state;
1547
1548		node = rb_next(node);
1549		if (!node)
1550			break;
1551	}
1552out:
1553	return NULL;
1554}
1555
1556/*
1557 * find the first offset in the io tree with 'bits' set. zero is
1558 * returned if we find something, and *start_ret and *end_ret are
1559 * set to reflect the state struct that was found.
1560 *
1561 * If nothing was found, 1 is returned. If found something, return 0.
1562 */
1563int find_first_extent_bit(struct extent_io_tree *tree, u64 start,
1564			  u64 *start_ret, u64 *end_ret, unsigned bits,
1565			  struct extent_state **cached_state)
1566{
1567	struct extent_state *state;
 
1568	int ret = 1;
1569
1570	spin_lock(&tree->lock);
1571	if (cached_state && *cached_state) {
1572		state = *cached_state;
1573		if (state->end == start - 1 && extent_state_in_tree(state)) {
1574			while ((state = next_state(state)) != NULL) {
 
 
 
1575				if (state->state & bits)
1576					goto got_it;
 
1577			}
1578			free_extent_state(*cached_state);
1579			*cached_state = NULL;
1580			goto out;
1581		}
1582		free_extent_state(*cached_state);
1583		*cached_state = NULL;
1584	}
1585
1586	state = find_first_extent_bit_state(tree, start, bits);
1587got_it:
1588	if (state) {
1589		cache_state_if_flags(state, cached_state, 0);
1590		*start_ret = state->start;
1591		*end_ret = state->end;
1592		ret = 0;
1593	}
1594out:
1595	spin_unlock(&tree->lock);
1596	return ret;
1597}
1598
1599/**
1600 * find_contiguous_extent_bit: find a contiguous area of bits
1601 * @tree - io tree to check
1602 * @start - offset to start the search from
1603 * @start_ret - the first offset we found with the bits set
1604 * @end_ret - the final contiguous range of the bits that were set
1605 * @bits - bits to look for
1606 *
1607 * set_extent_bit and clear_extent_bit can temporarily split contiguous ranges
1608 * to set bits appropriately, and then merge them again.  During this time it
1609 * will drop the tree->lock, so use this helper if you want to find the actual
1610 * contiguous area for given bits.  We will search to the first bit we find, and
1611 * then walk down the tree until we find a non-contiguous area.  The area
1612 * returned will be the full contiguous area with the bits set.
1613 */
1614int find_contiguous_extent_bit(struct extent_io_tree *tree, u64 start,
1615			       u64 *start_ret, u64 *end_ret, unsigned bits)
1616{
1617	struct extent_state *state;
1618	int ret = 1;
1619
1620	spin_lock(&tree->lock);
1621	state = find_first_extent_bit_state(tree, start, bits);
1622	if (state) {
1623		*start_ret = state->start;
1624		*end_ret = state->end;
1625		while ((state = next_state(state)) != NULL) {
1626			if (state->start > (*end_ret + 1))
1627				break;
1628			*end_ret = state->end;
1629		}
1630		ret = 0;
1631	}
1632	spin_unlock(&tree->lock);
1633	return ret;
1634}
1635
1636/**
1637 * find_first_clear_extent_bit - find the first range that has @bits not set.
1638 * This range could start before @start.
1639 *
1640 * @tree - the tree to search
1641 * @start - the offset at/after which the found extent should start
1642 * @start_ret - records the beginning of the range
1643 * @end_ret - records the end of the range (inclusive)
1644 * @bits - the set of bits which must be unset
1645 *
1646 * Since unallocated range is also considered one which doesn't have the bits
1647 * set it's possible that @end_ret contains -1, this happens in case the range
1648 * spans (last_range_end, end of device]. In this case it's up to the caller to
1649 * trim @end_ret to the appropriate size.
1650 */
1651void find_first_clear_extent_bit(struct extent_io_tree *tree, u64 start,
1652				 u64 *start_ret, u64 *end_ret, unsigned bits)
1653{
1654	struct extent_state *state;
1655	struct rb_node *node, *prev = NULL, *next;
1656
1657	spin_lock(&tree->lock);
1658
1659	/* Find first extent with bits cleared */
1660	while (1) {
1661		node = __etree_search(tree, start, &next, &prev, NULL, NULL);
1662		if (!node && !next && !prev) {
1663			/*
1664			 * Tree is completely empty, send full range and let
1665			 * caller deal with it
1666			 */
1667			*start_ret = 0;
1668			*end_ret = -1;
1669			goto out;
1670		} else if (!node && !next) {
1671			/*
1672			 * We are past the last allocated chunk, set start at
1673			 * the end of the last extent.
1674			 */
1675			state = rb_entry(prev, struct extent_state, rb_node);
1676			*start_ret = state->end + 1;
1677			*end_ret = -1;
1678			goto out;
1679		} else if (!node) {
1680			node = next;
1681		}
1682		/*
1683		 * At this point 'node' either contains 'start' or start is
1684		 * before 'node'
1685		 */
1686		state = rb_entry(node, struct extent_state, rb_node);
1687
1688		if (in_range(start, state->start, state->end - state->start + 1)) {
1689			if (state->state & bits) {
1690				/*
1691				 * |--range with bits sets--|
1692				 *    |
1693				 *    start
1694				 */
1695				start = state->end + 1;
1696			} else {
1697				/*
1698				 * 'start' falls within a range that doesn't
1699				 * have the bits set, so take its start as
1700				 * the beginning of the desired range
1701				 *
1702				 * |--range with bits cleared----|
1703				 *      |
1704				 *      start
1705				 */
1706				*start_ret = state->start;
1707				break;
1708			}
1709		} else {
1710			/*
1711			 * |---prev range---|---hole/unset---|---node range---|
1712			 *                          |
1713			 *                        start
1714			 *
1715			 *                        or
1716			 *
1717			 * |---hole/unset--||--first node--|
1718			 * 0   |
1719			 *    start
1720			 */
1721			if (prev) {
1722				state = rb_entry(prev, struct extent_state,
1723						 rb_node);
1724				*start_ret = state->end + 1;
1725			} else {
1726				*start_ret = 0;
1727			}
1728			break;
1729		}
1730	}
1731
1732	/*
1733	 * Find the longest stretch from start until an entry which has the
1734	 * bits set
1735	 */
1736	while (1) {
1737		state = rb_entry(node, struct extent_state, rb_node);
1738		if (state->end >= start && !(state->state & bits)) {
1739			*end_ret = state->end;
1740		} else {
1741			*end_ret = state->start - 1;
1742			break;
1743		}
1744
1745		node = rb_next(node);
1746		if (!node)
1747			break;
1748	}
1749out:
1750	spin_unlock(&tree->lock);
1751}
1752
1753/*
1754 * find a contiguous range of bytes in the file marked as delalloc, not
1755 * more than 'max_bytes'.  start and end are used to return the range,
1756 *
1757 * true is returned if we find something, false if nothing was in the tree
1758 */
1759bool btrfs_find_delalloc_range(struct extent_io_tree *tree, u64 *start,
1760			       u64 *end, u64 max_bytes,
1761			       struct extent_state **cached_state)
1762{
1763	struct rb_node *node;
1764	struct extent_state *state;
1765	u64 cur_start = *start;
1766	bool found = false;
1767	u64 total_bytes = 0;
1768
1769	spin_lock(&tree->lock);
1770
1771	/*
1772	 * this search will find all the extents that end after
1773	 * our range starts.
1774	 */
1775	node = tree_search(tree, cur_start);
1776	if (!node) {
1777		*end = (u64)-1;
 
1778		goto out;
1779	}
1780
1781	while (1) {
1782		state = rb_entry(node, struct extent_state, rb_node);
1783		if (found && (state->start != cur_start ||
1784			      (state->state & EXTENT_BOUNDARY))) {
1785			goto out;
1786		}
1787		if (!(state->state & EXTENT_DELALLOC)) {
1788			if (!found)
1789				*end = state->end;
1790			goto out;
1791		}
1792		if (!found) {
1793			*start = state->start;
1794			*cached_state = state;
1795			refcount_inc(&state->refs);
1796		}
1797		found = true;
1798		*end = state->end;
1799		cur_start = state->end + 1;
1800		node = rb_next(node);
1801		total_bytes += state->end - state->start + 1;
1802		if (total_bytes >= max_bytes)
1803			break;
1804		if (!node)
1805			break;
1806	}
1807out:
1808	spin_unlock(&tree->lock);
1809	return found;
1810}
1811
1812static int __process_pages_contig(struct address_space *mapping,
1813				  struct page *locked_page,
1814				  pgoff_t start_index, pgoff_t end_index,
1815				  unsigned long page_ops, pgoff_t *index_ret);
1816
1817static noinline void __unlock_for_delalloc(struct inode *inode,
1818					   struct page *locked_page,
1819					   u64 start, u64 end)
1820{
 
 
1821	unsigned long index = start >> PAGE_SHIFT;
1822	unsigned long end_index = end >> PAGE_SHIFT;
 
 
1823
1824	ASSERT(locked_page);
1825	if (index == locked_page->index && end_index == index)
1826		return;
1827
1828	__process_pages_contig(inode->i_mapping, locked_page, index, end_index,
1829			       PAGE_UNLOCK, NULL);
 
 
 
 
 
 
 
 
 
 
 
1830}
1831
1832static noinline int lock_delalloc_pages(struct inode *inode,
1833					struct page *locked_page,
1834					u64 delalloc_start,
1835					u64 delalloc_end)
1836{
1837	unsigned long index = delalloc_start >> PAGE_SHIFT;
1838	unsigned long index_ret = index;
1839	unsigned long end_index = delalloc_end >> PAGE_SHIFT;
 
 
 
1840	int ret;
 
1841
1842	ASSERT(locked_page);
1843	if (index == locked_page->index && index == end_index)
1844		return 0;
1845
1846	ret = __process_pages_contig(inode->i_mapping, locked_page, index,
1847				     end_index, PAGE_LOCK, &index_ret);
1848	if (ret == -EAGAIN)
1849		__unlock_for_delalloc(inode, locked_page, delalloc_start,
1850				      (u64)index_ret << PAGE_SHIFT);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1851	return ret;
1852}
1853
1854/*
1855 * Find and lock a contiguous range of bytes in the file marked as delalloc, no
1856 * more than @max_bytes.  @Start and @end are used to return the range,
1857 *
1858 * Return: true if we find something
1859 *         false if nothing was in the tree
1860 */
1861EXPORT_FOR_TESTS
1862noinline_for_stack bool find_lock_delalloc_range(struct inode *inode,
1863				    struct page *locked_page, u64 *start,
1864				    u64 *end)
1865{
1866	struct extent_io_tree *tree = &BTRFS_I(inode)->io_tree;
1867	u64 max_bytes = BTRFS_MAX_EXTENT_SIZE;
1868	u64 delalloc_start;
1869	u64 delalloc_end;
1870	bool found;
1871	struct extent_state *cached_state = NULL;
1872	int ret;
1873	int loops = 0;
1874
1875again:
1876	/* step one, find a bunch of delalloc bytes starting at start */
1877	delalloc_start = *start;
1878	delalloc_end = 0;
1879	found = btrfs_find_delalloc_range(tree, &delalloc_start, &delalloc_end,
1880					  max_bytes, &cached_state);
1881	if (!found || delalloc_end <= *start) {
1882		*start = delalloc_start;
1883		*end = delalloc_end;
1884		free_extent_state(cached_state);
1885		return false;
1886	}
1887
1888	/*
1889	 * start comes from the offset of locked_page.  We have to lock
1890	 * pages in order, so we can't process delalloc bytes before
1891	 * locked_page
1892	 */
1893	if (delalloc_start < *start)
1894		delalloc_start = *start;
1895
1896	/*
1897	 * make sure to limit the number of pages we try to lock down
1898	 */
1899	if (delalloc_end + 1 - delalloc_start > max_bytes)
1900		delalloc_end = delalloc_start + max_bytes - 1;
1901
1902	/* step two, lock all the pages after the page that has start */
1903	ret = lock_delalloc_pages(inode, locked_page,
1904				  delalloc_start, delalloc_end);
1905	ASSERT(!ret || ret == -EAGAIN);
1906	if (ret == -EAGAIN) {
1907		/* some of the pages are gone, lets avoid looping by
1908		 * shortening the size of the delalloc range we're searching
1909		 */
1910		free_extent_state(cached_state);
1911		cached_state = NULL;
1912		if (!loops) {
1913			max_bytes = PAGE_SIZE;
1914			loops = 1;
1915			goto again;
1916		} else {
1917			found = false;
1918			goto out_failed;
1919		}
1920	}
 
1921
1922	/* step three, lock the state bits for the whole range */
1923	lock_extent_bits(tree, delalloc_start, delalloc_end, &cached_state);
1924
1925	/* then test to make sure it is all still delalloc */
1926	ret = test_range_bit(tree, delalloc_start, delalloc_end,
1927			     EXTENT_DELALLOC, 1, cached_state);
1928	if (!ret) {
1929		unlock_extent_cached(tree, delalloc_start, delalloc_end,
1930				     &cached_state);
1931		__unlock_for_delalloc(inode, locked_page,
1932			      delalloc_start, delalloc_end);
1933		cond_resched();
1934		goto again;
1935	}
1936	free_extent_state(cached_state);
1937	*start = delalloc_start;
1938	*end = delalloc_end;
1939out_failed:
1940	return found;
1941}
1942
1943static int __process_pages_contig(struct address_space *mapping,
1944				  struct page *locked_page,
1945				  pgoff_t start_index, pgoff_t end_index,
1946				  unsigned long page_ops, pgoff_t *index_ret)
1947{
1948	unsigned long nr_pages = end_index - start_index + 1;
1949	unsigned long pages_locked = 0;
1950	pgoff_t index = start_index;
1951	struct page *pages[16];
1952	unsigned ret;
1953	int err = 0;
 
1954	int i;
1955
1956	if (page_ops & PAGE_LOCK) {
1957		ASSERT(page_ops == PAGE_LOCK);
1958		ASSERT(index_ret && *index_ret == start_index);
1959	}
1960
1961	if ((page_ops & PAGE_SET_ERROR) && nr_pages > 0)
1962		mapping_set_error(mapping, -EIO);
1963
1964	while (nr_pages > 0) {
1965		ret = find_get_pages_contig(mapping, index,
1966				     min_t(unsigned long,
1967				     nr_pages, ARRAY_SIZE(pages)), pages);
1968		if (ret == 0) {
1969			/*
1970			 * Only if we're going to lock these pages,
1971			 * can we find nothing at @index.
1972			 */
1973			ASSERT(page_ops & PAGE_LOCK);
1974			err = -EAGAIN;
1975			goto out;
1976		}
1977
1978		for (i = 0; i < ret; i++) {
1979			if (page_ops & PAGE_SET_PRIVATE2)
1980				SetPagePrivate2(pages[i]);
1981
1982			if (locked_page && pages[i] == locked_page) {
1983				put_page(pages[i]);
1984				pages_locked++;
1985				continue;
1986			}
1987			if (page_ops & PAGE_CLEAR_DIRTY)
1988				clear_page_dirty_for_io(pages[i]);
1989			if (page_ops & PAGE_SET_WRITEBACK)
1990				set_page_writeback(pages[i]);
1991			if (page_ops & PAGE_SET_ERROR)
1992				SetPageError(pages[i]);
1993			if (page_ops & PAGE_END_WRITEBACK)
1994				end_page_writeback(pages[i]);
1995			if (page_ops & PAGE_UNLOCK)
1996				unlock_page(pages[i]);
1997			if (page_ops & PAGE_LOCK) {
1998				lock_page(pages[i]);
1999				if (!PageDirty(pages[i]) ||
2000				    pages[i]->mapping != mapping) {
2001					unlock_page(pages[i]);
2002					for (; i < ret; i++)
2003						put_page(pages[i]);
2004					err = -EAGAIN;
2005					goto out;
2006				}
2007			}
2008			put_page(pages[i]);
2009			pages_locked++;
2010		}
2011		nr_pages -= ret;
2012		index += ret;
2013		cond_resched();
2014	}
2015out:
2016	if (err && index_ret)
2017		*index_ret = start_index + pages_locked - 1;
2018	return err;
2019}
2020
2021void extent_clear_unlock_delalloc(struct btrfs_inode *inode, u64 start, u64 end,
2022				  struct page *locked_page,
2023				  unsigned clear_bits,
2024				  unsigned long page_ops)
2025{
2026	clear_extent_bit(&inode->io_tree, start, end, clear_bits, 1, 0, NULL);
2027
2028	__process_pages_contig(inode->vfs_inode.i_mapping, locked_page,
2029			       start >> PAGE_SHIFT, end >> PAGE_SHIFT,
2030			       page_ops, NULL);
2031}
2032
2033/*
2034 * count the number of bytes in the tree that have a given bit(s)
2035 * set.  This can be fairly slow, except for EXTENT_DIRTY which is
2036 * cached.  The total number found is returned.
2037 */
2038u64 count_range_bits(struct extent_io_tree *tree,
2039		     u64 *start, u64 search_end, u64 max_bytes,
2040		     unsigned bits, int contig)
2041{
2042	struct rb_node *node;
2043	struct extent_state *state;
2044	u64 cur_start = *start;
2045	u64 total_bytes = 0;
2046	u64 last = 0;
2047	int found = 0;
2048
2049	if (WARN_ON(search_end <= cur_start))
2050		return 0;
2051
2052	spin_lock(&tree->lock);
2053	if (cur_start == 0 && bits == EXTENT_DIRTY) {
2054		total_bytes = tree->dirty_bytes;
2055		goto out;
2056	}
2057	/*
2058	 * this search will find all the extents that end after
2059	 * our range starts.
2060	 */
2061	node = tree_search(tree, cur_start);
2062	if (!node)
2063		goto out;
2064
2065	while (1) {
2066		state = rb_entry(node, struct extent_state, rb_node);
2067		if (state->start > search_end)
2068			break;
2069		if (contig && found && state->start > last + 1)
2070			break;
2071		if (state->end >= cur_start && (state->state & bits) == bits) {
2072			total_bytes += min(search_end, state->end) + 1 -
2073				       max(cur_start, state->start);
2074			if (total_bytes >= max_bytes)
2075				break;
2076			if (!found) {
2077				*start = max(cur_start, state->start);
2078				found = 1;
2079			}
2080			last = state->end;
2081		} else if (contig && found) {
2082			break;
2083		}
2084		node = rb_next(node);
2085		if (!node)
2086			break;
2087	}
2088out:
2089	spin_unlock(&tree->lock);
2090	return total_bytes;
2091}
2092
2093/*
2094 * set the private field for a given byte offset in the tree.  If there isn't
2095 * an extent_state there already, this does nothing.
2096 */
2097int set_state_failrec(struct extent_io_tree *tree, u64 start,
2098		      struct io_failure_record *failrec)
2099{
2100	struct rb_node *node;
2101	struct extent_state *state;
2102	int ret = 0;
2103
2104	spin_lock(&tree->lock);
2105	/*
2106	 * this search will find all the extents that end after
2107	 * our range starts.
2108	 */
2109	node = tree_search(tree, start);
2110	if (!node) {
2111		ret = -ENOENT;
2112		goto out;
2113	}
2114	state = rb_entry(node, struct extent_state, rb_node);
2115	if (state->start != start) {
2116		ret = -ENOENT;
2117		goto out;
2118	}
2119	state->failrec = failrec;
2120out:
2121	spin_unlock(&tree->lock);
2122	return ret;
2123}
2124
2125struct io_failure_record *get_state_failrec(struct extent_io_tree *tree, u64 start)
 
2126{
2127	struct rb_node *node;
2128	struct extent_state *state;
2129	struct io_failure_record *failrec;
2130
2131	spin_lock(&tree->lock);
2132	/*
2133	 * this search will find all the extents that end after
2134	 * our range starts.
2135	 */
2136	node = tree_search(tree, start);
2137	if (!node) {
2138		failrec = ERR_PTR(-ENOENT);
2139		goto out;
2140	}
2141	state = rb_entry(node, struct extent_state, rb_node);
2142	if (state->start != start) {
2143		failrec = ERR_PTR(-ENOENT);
2144		goto out;
2145	}
2146
2147	failrec = state->failrec;
2148out:
2149	spin_unlock(&tree->lock);
2150	return failrec;
2151}
2152
2153/*
2154 * searches a range in the state tree for a given mask.
2155 * If 'filled' == 1, this returns 1 only if every extent in the tree
2156 * has the bits set.  Otherwise, 1 is returned if any bit in the
2157 * range is found set.
2158 */
2159int test_range_bit(struct extent_io_tree *tree, u64 start, u64 end,
2160		   unsigned bits, int filled, struct extent_state *cached)
2161{
2162	struct extent_state *state = NULL;
2163	struct rb_node *node;
2164	int bitset = 0;
2165
2166	spin_lock(&tree->lock);
2167	if (cached && extent_state_in_tree(cached) && cached->start <= start &&
2168	    cached->end > start)
2169		node = &cached->rb_node;
2170	else
2171		node = tree_search(tree, start);
2172	while (node && start <= end) {
2173		state = rb_entry(node, struct extent_state, rb_node);
2174
2175		if (filled && state->start > start) {
2176			bitset = 0;
2177			break;
2178		}
2179
2180		if (state->start > end)
2181			break;
2182
2183		if (state->state & bits) {
2184			bitset = 1;
2185			if (!filled)
2186				break;
2187		} else if (filled) {
2188			bitset = 0;
2189			break;
2190		}
2191
2192		if (state->end == (u64)-1)
2193			break;
2194
2195		start = state->end + 1;
2196		if (start > end)
2197			break;
2198		node = rb_next(node);
2199		if (!node) {
2200			if (filled)
2201				bitset = 0;
2202			break;
2203		}
2204	}
2205	spin_unlock(&tree->lock);
2206	return bitset;
2207}
2208
2209/*
2210 * helper function to set a given page up to date if all the
2211 * extents in the tree for that page are up to date
2212 */
2213static void check_page_uptodate(struct extent_io_tree *tree, struct page *page)
2214{
2215	u64 start = page_offset(page);
2216	u64 end = start + PAGE_SIZE - 1;
2217	if (test_range_bit(tree, start, end, EXTENT_UPTODATE, 1, NULL))
2218		SetPageUptodate(page);
2219}
2220
2221int free_io_failure(struct extent_io_tree *failure_tree,
2222		    struct extent_io_tree *io_tree,
2223		    struct io_failure_record *rec)
2224{
2225	int ret;
2226	int err = 0;
 
2227
2228	set_state_failrec(failure_tree, rec->start, NULL);
2229	ret = clear_extent_bits(failure_tree, rec->start,
2230				rec->start + rec->len - 1,
2231				EXTENT_LOCKED | EXTENT_DIRTY);
2232	if (ret)
2233		err = ret;
2234
2235	ret = clear_extent_bits(io_tree, rec->start,
2236				rec->start + rec->len - 1,
2237				EXTENT_DAMAGED);
2238	if (ret && !err)
2239		err = ret;
2240
2241	kfree(rec);
2242	return err;
2243}
2244
2245/*
2246 * this bypasses the standard btrfs submit functions deliberately, as
2247 * the standard behavior is to write all copies in a raid setup. here we only
2248 * want to write the one bad copy. so we do the mapping for ourselves and issue
2249 * submit_bio directly.
2250 * to avoid any synchronization issues, wait for the data after writing, which
2251 * actually prevents the read that triggered the error from finishing.
2252 * currently, there can be no more than two copies of every data bit. thus,
2253 * exactly one rewrite is required.
2254 */
2255int repair_io_failure(struct btrfs_fs_info *fs_info, u64 ino, u64 start,
2256		      u64 length, u64 logical, struct page *page,
2257		      unsigned int pg_offset, int mirror_num)
2258{
 
2259	struct bio *bio;
2260	struct btrfs_device *dev;
2261	u64 map_length = 0;
2262	u64 sector;
2263	struct btrfs_bio *bbio = NULL;
 
2264	int ret;
2265
2266	ASSERT(!(fs_info->sb->s_flags & SB_RDONLY));
2267	BUG_ON(!mirror_num);
2268
2269	bio = btrfs_io_bio_alloc(1);
 
 
 
 
 
 
2270	bio->bi_iter.bi_size = 0;
2271	map_length = length;
2272
2273	/*
2274	 * Avoid races with device replace and make sure our bbio has devices
2275	 * associated to its stripes that don't go away while we are doing the
2276	 * read repair operation.
2277	 */
2278	btrfs_bio_counter_inc_blocked(fs_info);
2279	if (btrfs_is_parity_mirror(fs_info, logical, length)) {
2280		/*
2281		 * Note that we don't use BTRFS_MAP_WRITE because it's supposed
2282		 * to update all raid stripes, but here we just want to correct
2283		 * bad stripe, thus BTRFS_MAP_READ is abused to only get the bad
2284		 * stripe's dev and sector.
2285		 */
2286		ret = btrfs_map_block(fs_info, BTRFS_MAP_READ, logical,
2287				      &map_length, &bbio, 0);
2288		if (ret) {
2289			btrfs_bio_counter_dec(fs_info);
2290			bio_put(bio);
2291			return -EIO;
2292		}
2293		ASSERT(bbio->mirror_num == 1);
2294	} else {
2295		ret = btrfs_map_block(fs_info, BTRFS_MAP_WRITE, logical,
2296				      &map_length, &bbio, mirror_num);
2297		if (ret) {
2298			btrfs_bio_counter_dec(fs_info);
2299			bio_put(bio);
2300			return -EIO;
2301		}
2302		BUG_ON(mirror_num != bbio->mirror_num);
2303	}
2304
2305	sector = bbio->stripes[bbio->mirror_num - 1].physical >> 9;
2306	bio->bi_iter.bi_sector = sector;
2307	dev = bbio->stripes[bbio->mirror_num - 1].dev;
2308	btrfs_put_bbio(bbio);
2309	if (!dev || !dev->bdev ||
2310	    !test_bit(BTRFS_DEV_STATE_WRITEABLE, &dev->dev_state)) {
2311		btrfs_bio_counter_dec(fs_info);
2312		bio_put(bio);
2313		return -EIO;
2314	}
2315	bio_set_dev(bio, dev->bdev);
2316	bio->bi_opf = REQ_OP_WRITE | REQ_SYNC;
2317	bio_add_page(bio, page, length, pg_offset);
2318
2319	if (btrfsic_submit_bio_wait(bio)) {
2320		/* try to remap that extent elsewhere? */
2321		btrfs_bio_counter_dec(fs_info);
2322		bio_put(bio);
2323		btrfs_dev_stat_inc_and_print(dev, BTRFS_DEV_STAT_WRITE_ERRS);
2324		return -EIO;
2325	}
2326
2327	btrfs_info_rl_in_rcu(fs_info,
2328		"read error corrected: ino %llu off %llu (dev %s sector %llu)",
2329				  ino, start,
2330				  rcu_str_deref(dev->name), sector);
2331	btrfs_bio_counter_dec(fs_info);
2332	bio_put(bio);
2333	return 0;
2334}
2335
2336int btrfs_repair_eb_io_failure(const struct extent_buffer *eb, int mirror_num)
 
2337{
2338	struct btrfs_fs_info *fs_info = eb->fs_info;
2339	u64 start = eb->start;
2340	int i, num_pages = num_extent_pages(eb);
2341	int ret = 0;
2342
2343	if (sb_rdonly(fs_info->sb))
2344		return -EROFS;
2345
2346	for (i = 0; i < num_pages; i++) {
2347		struct page *p = eb->pages[i];
2348
2349		ret = repair_io_failure(fs_info, 0, start, PAGE_SIZE, start, p,
 
2350					start - page_offset(p), mirror_num);
2351		if (ret)
2352			break;
2353		start += PAGE_SIZE;
2354	}
2355
2356	return ret;
2357}
2358
2359/*
2360 * each time an IO finishes, we do a fast check in the IO failure tree
2361 * to see if we need to process or clean up an io_failure_record
2362 */
2363int clean_io_failure(struct btrfs_fs_info *fs_info,
2364		     struct extent_io_tree *failure_tree,
2365		     struct extent_io_tree *io_tree, u64 start,
2366		     struct page *page, u64 ino, unsigned int pg_offset)
2367{
2368	u64 private;
2369	struct io_failure_record *failrec;
 
2370	struct extent_state *state;
2371	int num_copies;
2372	int ret;
2373
2374	private = 0;
2375	ret = count_range_bits(failure_tree, &private, (u64)-1, 1,
2376			       EXTENT_DIRTY, 0);
2377	if (!ret)
2378		return 0;
2379
2380	failrec = get_state_failrec(failure_tree, start);
2381	if (IS_ERR(failrec))
 
2382		return 0;
2383
2384	BUG_ON(!failrec->this_mirror);
2385
2386	if (failrec->in_validation) {
2387		/* there was no real error, just free the record */
2388		btrfs_debug(fs_info,
2389			"clean_io_failure: freeing dummy error at %llu",
2390			failrec->start);
2391		goto out;
2392	}
2393	if (sb_rdonly(fs_info->sb))
2394		goto out;
2395
2396	spin_lock(&io_tree->lock);
2397	state = find_first_extent_bit_state(io_tree,
2398					    failrec->start,
2399					    EXTENT_LOCKED);
2400	spin_unlock(&io_tree->lock);
2401
2402	if (state && state->start <= failrec->start &&
2403	    state->end >= failrec->start + failrec->len - 1) {
2404		num_copies = btrfs_num_copies(fs_info, failrec->logical,
2405					      failrec->len);
2406		if (num_copies > 1)  {
2407			repair_io_failure(fs_info, ino, start, failrec->len,
2408					  failrec->logical, page, pg_offset,
2409					  failrec->failed_mirror);
2410		}
2411	}
2412
2413out:
2414	free_io_failure(failure_tree, io_tree, failrec);
2415
2416	return 0;
2417}
2418
2419/*
2420 * Can be called when
2421 * - hold extent lock
2422 * - under ordered extent
2423 * - the inode is freeing
2424 */
2425void btrfs_free_io_failure_record(struct btrfs_inode *inode, u64 start, u64 end)
2426{
2427	struct extent_io_tree *failure_tree = &inode->io_failure_tree;
2428	struct io_failure_record *failrec;
2429	struct extent_state *state, *next;
2430
2431	if (RB_EMPTY_ROOT(&failure_tree->state))
2432		return;
2433
2434	spin_lock(&failure_tree->lock);
2435	state = find_first_extent_bit_state(failure_tree, start, EXTENT_DIRTY);
2436	while (state) {
2437		if (state->start > end)
2438			break;
2439
2440		ASSERT(state->end <= end);
2441
2442		next = next_state(state);
2443
2444		failrec = state->failrec;
2445		free_extent_state(state);
2446		kfree(failrec);
2447
2448		state = next;
2449	}
2450	spin_unlock(&failure_tree->lock);
2451}
2452
2453static struct io_failure_record *btrfs_get_io_failure_record(struct inode *inode,
2454							     u64 start, u64 end)
2455{
2456	struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
2457	struct io_failure_record *failrec;
2458	struct extent_map *em;
2459	struct extent_io_tree *failure_tree = &BTRFS_I(inode)->io_failure_tree;
2460	struct extent_io_tree *tree = &BTRFS_I(inode)->io_tree;
2461	struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree;
2462	int ret;
2463	u64 logical;
2464
2465	failrec = get_state_failrec(failure_tree, start);
2466	if (!IS_ERR(failrec)) {
2467		btrfs_debug(fs_info,
2468			"Get IO Failure Record: (found) logical=%llu, start=%llu, len=%llu, validation=%d",
2469			failrec->logical, failrec->start, failrec->len,
2470			failrec->in_validation);
2471		/*
2472		 * when data can be on disk more than twice, add to failrec here
2473		 * (e.g. with a list for failed_mirror) to make
2474		 * clean_io_failure() clean all those errors at once.
2475		 */
2476
2477		return failrec;
2478	}
 
 
 
 
 
2479
2480	failrec = kzalloc(sizeof(*failrec), GFP_NOFS);
2481	if (!failrec)
2482		return ERR_PTR(-ENOMEM);
2483
2484	failrec->start = start;
2485	failrec->len = end - start + 1;
2486	failrec->this_mirror = 0;
2487	failrec->bio_flags = 0;
2488	failrec->in_validation = 0;
2489
2490	read_lock(&em_tree->lock);
2491	em = lookup_extent_mapping(em_tree, start, failrec->len);
2492	if (!em) {
2493		read_unlock(&em_tree->lock);
2494		kfree(failrec);
2495		return ERR_PTR(-EIO);
2496	}
 
 
 
 
 
 
 
 
 
 
 
 
 
2497
2498	if (em->start > start || em->start + em->len <= start) {
2499		free_extent_map(em);
2500		em = NULL;
2501	}
2502	read_unlock(&em_tree->lock);
2503	if (!em) {
2504		kfree(failrec);
2505		return ERR_PTR(-EIO);
2506	}
2507
2508	logical = start - em->start;
2509	logical = em->block_start + logical;
2510	if (test_bit(EXTENT_FLAG_COMPRESSED, &em->flags)) {
2511		logical = em->block_start;
2512		failrec->bio_flags = EXTENT_BIO_COMPRESSED;
2513		extent_set_compress_type(&failrec->bio_flags, em->compress_type);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2514	}
2515
2516	btrfs_debug(fs_info,
2517		    "Get IO Failure Record: (new) logical=%llu, start=%llu, len=%llu",
2518		    logical, start, failrec->len);
2519
2520	failrec->logical = logical;
2521	free_extent_map(em);
2522
2523	/* Set the bits in the private failure tree */
2524	ret = set_extent_bits(failure_tree, start, end,
2525			      EXTENT_LOCKED | EXTENT_DIRTY);
2526	if (ret >= 0) {
2527		ret = set_state_failrec(failure_tree, start, failrec);
2528		/* Set the bits in the inode's tree */
2529		ret = set_extent_bits(tree, start, end, EXTENT_DAMAGED);
2530	} else if (ret < 0) {
2531		kfree(failrec);
2532		return ERR_PTR(ret);
2533	}
2534
2535	return failrec;
2536}
2537
2538static bool btrfs_check_repairable(struct inode *inode, bool needs_validation,
2539				   struct io_failure_record *failrec,
2540				   int failed_mirror)
2541{
2542	struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
2543	int num_copies;
2544
2545	num_copies = btrfs_num_copies(fs_info, failrec->logical, failrec->len);
 
2546	if (num_copies == 1) {
2547		/*
2548		 * we only have a single copy of the data, so don't bother with
2549		 * all the retry and error correction code that follows. no
2550		 * matter what the error is, it is very likely to persist.
2551		 */
2552		btrfs_debug(fs_info,
2553			"Check Repairable: cannot repair, num_copies=%d, next_mirror %d, failed_mirror %d",
2554			num_copies, failrec->this_mirror, failed_mirror);
2555		return false;
2556	}
2557
2558	/*
2559	 * there are two premises:
2560	 *	a) deliver good data to the caller
2561	 *	b) correct the bad sectors on disk
2562	 */
2563	if (needs_validation) {
2564		/*
2565		 * to fulfill b), we need to know the exact failing sectors, as
2566		 * we don't want to rewrite any more than the failed ones. thus,
2567		 * we need separate read requests for the failed bio
2568		 *
2569		 * if the following BUG_ON triggers, our validation request got
2570		 * merged. we need separate requests for our algorithm to work.
2571		 */
2572		BUG_ON(failrec->in_validation);
2573		failrec->in_validation = 1;
2574		failrec->this_mirror = failed_mirror;
2575	} else {
2576		/*
2577		 * we're ready to fulfill a) and b) alongside. get a good copy
2578		 * of the failed sector and if we succeed, we have setup
2579		 * everything for repair_io_failure to do the rest for us.
2580		 */
2581		if (failrec->in_validation) {
2582			BUG_ON(failrec->this_mirror != failed_mirror);
2583			failrec->in_validation = 0;
2584			failrec->this_mirror = 0;
2585		}
2586		failrec->failed_mirror = failed_mirror;
2587		failrec->this_mirror++;
2588		if (failrec->this_mirror == failed_mirror)
2589			failrec->this_mirror++;
2590	}
2591
2592	if (failrec->this_mirror > num_copies) {
2593		btrfs_debug(fs_info,
2594			"Check Repairable: (fail) num_copies=%d, next_mirror %d, failed_mirror %d",
2595			num_copies, failrec->this_mirror, failed_mirror);
2596		return false;
2597	}
2598
2599	return true;
2600}
2601
2602static bool btrfs_io_needs_validation(struct inode *inode, struct bio *bio)
 
 
 
 
2603{
2604	u64 len = 0;
2605	const u32 blocksize = inode->i_sb->s_blocksize;
 
 
 
 
 
2606
2607	/*
2608	 * If bi_status is BLK_STS_OK, then this was a checksum error, not an
2609	 * I/O error. In this case, we already know exactly which sector was
2610	 * bad, so we don't need to validate.
2611	 */
2612	if (bio->bi_status == BLK_STS_OK)
2613		return false;
2614
2615	/*
2616	 * We need to validate each sector individually if the failed I/O was
2617	 * for multiple sectors.
2618	 *
2619	 * There are a few possible bios that can end up here:
2620	 * 1. A buffered read bio, which is not cloned.
2621	 * 2. A direct I/O read bio, which is cloned.
2622	 * 3. A (buffered or direct) repair bio, which is not cloned.
2623	 *
2624	 * For cloned bios (case 2), we can get the size from
2625	 * btrfs_io_bio->iter; for non-cloned bios (cases 1 and 3), we can get
2626	 * it from the bvecs.
2627	 */
2628	if (bio_flagged(bio, BIO_CLONED)) {
2629		if (btrfs_io_bio(bio)->iter.bi_size > blocksize)
2630			return true;
2631	} else {
2632		struct bio_vec *bvec;
2633		int i;
2634
2635		bio_for_each_bvec_all(bvec, bio, i) {
2636			len += bvec->bv_len;
2637			if (len > blocksize)
2638				return true;
2639		}
2640	}
2641	return false;
 
 
 
2642}
2643
2644blk_status_t btrfs_submit_read_repair(struct inode *inode,
2645				      struct bio *failed_bio, u64 phy_offset,
2646				      struct page *page, unsigned int pgoff,
2647				      u64 start, u64 end, int failed_mirror,
2648				      submit_bio_hook_t *submit_bio_hook)
 
 
 
 
 
 
2649{
2650	struct io_failure_record *failrec;
2651	struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
2652	struct extent_io_tree *tree = &BTRFS_I(inode)->io_tree;
2653	struct extent_io_tree *failure_tree = &BTRFS_I(inode)->io_failure_tree;
2654	struct btrfs_io_bio *failed_io_bio = btrfs_io_bio(failed_bio);
2655	const int icsum = phy_offset >> inode->i_sb->s_blocksize_bits;
2656	bool need_validation;
2657	struct bio *repair_bio;
2658	struct btrfs_io_bio *repair_io_bio;
2659	blk_status_t status;
2660
2661	btrfs_debug(fs_info,
2662		   "repair read error: read error at %llu", start);
2663
2664	BUG_ON(bio_op(failed_bio) == REQ_OP_WRITE);
2665
2666	failrec = btrfs_get_io_failure_record(inode, start, end);
2667	if (IS_ERR(failrec))
2668		return errno_to_blk_status(PTR_ERR(failrec));
2669
2670	need_validation = btrfs_io_needs_validation(inode, failed_bio);
2671
2672	if (!btrfs_check_repairable(inode, need_validation, failrec,
2673				    failed_mirror)) {
2674		free_io_failure(failure_tree, tree, failrec);
2675		return BLK_STS_IOERR;
2676	}
2677
2678	repair_bio = btrfs_io_bio_alloc(1);
2679	repair_io_bio = btrfs_io_bio(repair_bio);
2680	repair_bio->bi_opf = REQ_OP_READ;
2681	if (need_validation)
2682		repair_bio->bi_opf |= REQ_FAILFAST_DEV;
2683	repair_bio->bi_end_io = failed_bio->bi_end_io;
2684	repair_bio->bi_iter.bi_sector = failrec->logical >> 9;
2685	repair_bio->bi_private = failed_bio->bi_private;
2686
2687	if (failed_io_bio->csum) {
2688		const u16 csum_size = btrfs_super_csum_size(fs_info->super_copy);
2689
2690		repair_io_bio->csum = repair_io_bio->csum_inline;
2691		memcpy(repair_io_bio->csum,
2692		       failed_io_bio->csum + csum_size * icsum, csum_size);
2693	}
2694
2695	bio_add_page(repair_bio, page, failrec->len, pgoff);
2696	repair_io_bio->logical = failrec->start;
2697	repair_io_bio->iter = repair_bio->bi_iter;
2698
2699	btrfs_debug(btrfs_sb(inode->i_sb),
2700"repair read error: submitting new read to mirror %d, in_validation=%d",
2701		    failrec->this_mirror, failrec->in_validation);
2702
2703	status = submit_bio_hook(inode, repair_bio, failrec->this_mirror,
2704				 failrec->bio_flags);
2705	if (status) {
2706		free_io_failure(failure_tree, tree, failrec);
2707		bio_put(repair_bio);
2708	}
2709	return status;
 
2710}
2711
2712/* lots and lots of room for performance fixes in the end_bio funcs */
2713
2714void end_extent_writepage(struct page *page, int err, u64 start, u64 end)
2715{
2716	int uptodate = (err == 0);
 
2717	int ret = 0;
2718
2719	btrfs_writepage_endio_finish_ordered(page, start, end, uptodate);
 
 
 
 
 
 
 
2720
2721	if (!uptodate) {
2722		ClearPageUptodate(page);
2723		SetPageError(page);
2724		ret = err < 0 ? err : -EIO;
2725		mapping_set_error(page->mapping, ret);
2726	}
2727}
2728
2729/*
2730 * after a writepage IO is done, we need to:
2731 * clear the uptodate bits on error
2732 * clear the writeback bits in the extent tree for this IO
2733 * end_page_writeback if the page has no more pending IO
2734 *
2735 * Scheduling is not allowed, so the extent state tree is expected
2736 * to have one and only one object corresponding to this IO.
2737 */
2738static void end_bio_extent_writepage(struct bio *bio)
2739{
2740	int error = blk_status_to_errno(bio->bi_status);
2741	struct bio_vec *bvec;
2742	u64 start;
2743	u64 end;
2744	struct bvec_iter_all iter_all;
2745
2746	ASSERT(!bio_flagged(bio, BIO_CLONED));
2747	bio_for_each_segment_all(bvec, bio, iter_all) {
2748		struct page *page = bvec->bv_page;
2749		struct inode *inode = page->mapping->host;
2750		struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
2751
2752		/* We always issue full-page reads, but if some block
2753		 * in a page fails to read, blk_update_request() will
2754		 * advance bv_offset and adjust bv_len to compensate.
2755		 * Print a warning for nonzero offsets, and an error
2756		 * if they don't add up to a full page.  */
2757		if (bvec->bv_offset || bvec->bv_len != PAGE_SIZE) {
2758			if (bvec->bv_offset + bvec->bv_len != PAGE_SIZE)
2759				btrfs_err(fs_info,
2760				   "partial page write in btrfs with offset %u and length %u",
2761					bvec->bv_offset, bvec->bv_len);
2762			else
2763				btrfs_info(fs_info,
2764				   "incomplete page write in btrfs with offset %u and length %u",
 
2765					bvec->bv_offset, bvec->bv_len);
2766		}
2767
2768		start = page_offset(page);
2769		end = start + bvec->bv_offset + bvec->bv_len - 1;
2770
2771		end_extent_writepage(page, error, start, end);
2772		end_page_writeback(page);
2773	}
2774
2775	bio_put(bio);
2776}
2777
2778static void
2779endio_readpage_release_extent(struct extent_io_tree *tree, u64 start, u64 len,
2780			      int uptodate)
2781{
2782	struct extent_state *cached = NULL;
2783	u64 end = start + len - 1;
2784
2785	if (uptodate && tree->track_uptodate)
2786		set_extent_uptodate(tree, start, end, &cached, GFP_ATOMIC);
2787	unlock_extent_cached_atomic(tree, start, end, &cached);
2788}
2789
2790/*
2791 * after a readpage IO is done, we need to:
2792 * clear the uptodate bits on error
2793 * set the uptodate bits if things worked
2794 * set the page up to date if all extents in the tree are uptodate
2795 * clear the lock bit in the extent tree
2796 * unlock the page if there are no other extents locked for it
2797 *
2798 * Scheduling is not allowed, so the extent state tree is expected
2799 * to have one and only one object corresponding to this IO.
2800 */
2801static void end_bio_extent_readpage(struct bio *bio)
2802{
2803	struct bio_vec *bvec;
2804	int uptodate = !bio->bi_status;
2805	struct btrfs_io_bio *io_bio = btrfs_io_bio(bio);
2806	struct extent_io_tree *tree, *failure_tree;
2807	u64 offset = 0;
2808	u64 start;
2809	u64 end;
2810	u64 len;
2811	u64 extent_start = 0;
2812	u64 extent_len = 0;
2813	int mirror;
2814	int ret;
2815	struct bvec_iter_all iter_all;
2816
2817	ASSERT(!bio_flagged(bio, BIO_CLONED));
2818	bio_for_each_segment_all(bvec, bio, iter_all) {
2819		struct page *page = bvec->bv_page;
2820		struct inode *inode = page->mapping->host;
2821		struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
2822		bool data_inode = btrfs_ino(BTRFS_I(inode))
2823			!= BTRFS_BTREE_INODE_OBJECTID;
2824
2825		btrfs_debug(fs_info,
2826			"end_bio_extent_readpage: bi_sector=%llu, err=%d, mirror=%u",
2827			(u64)bio->bi_iter.bi_sector, bio->bi_status,
2828			io_bio->mirror_num);
2829		tree = &BTRFS_I(inode)->io_tree;
2830		failure_tree = &BTRFS_I(inode)->io_failure_tree;
2831
2832		/* We always issue full-page reads, but if some block
2833		 * in a page fails to read, blk_update_request() will
2834		 * advance bv_offset and adjust bv_len to compensate.
2835		 * Print a warning for nonzero offsets, and an error
2836		 * if they don't add up to a full page.  */
2837		if (bvec->bv_offset || bvec->bv_len != PAGE_SIZE) {
2838			if (bvec->bv_offset + bvec->bv_len != PAGE_SIZE)
2839				btrfs_err(fs_info,
2840					"partial page read in btrfs with offset %u and length %u",
2841					bvec->bv_offset, bvec->bv_len);
2842			else
2843				btrfs_info(fs_info,
2844					"incomplete page read in btrfs with offset %u and length %u",
 
2845					bvec->bv_offset, bvec->bv_len);
2846		}
2847
2848		start = page_offset(page);
2849		end = start + bvec->bv_offset + bvec->bv_len - 1;
2850		len = bvec->bv_len;
2851
2852		mirror = io_bio->mirror_num;
2853		if (likely(uptodate)) {
 
2854			ret = tree->ops->readpage_end_io_hook(io_bio, offset,
2855							      page, start, end,
2856							      mirror);
2857			if (ret)
2858				uptodate = 0;
2859			else
2860				clean_io_failure(BTRFS_I(inode)->root->fs_info,
2861						 failure_tree, tree, start,
2862						 page,
2863						 btrfs_ino(BTRFS_I(inode)), 0);
2864		}
2865
2866		if (likely(uptodate))
2867			goto readpage_ok;
2868
2869		if (data_inode) {
2870
 
 
 
2871			/*
2872			 * The generic bio_readpage_error handles errors the
2873			 * following way: If possible, new read requests are
2874			 * created and submitted and will end up in
2875			 * end_bio_extent_readpage as well (if we're lucky,
2876			 * not in the !uptodate case). In that case it returns
2877			 * 0 and we just go on with the next page in our bio.
2878			 * If it can't handle the error it will return -EIO and
2879			 * we remain responsible for that page.
2880			 */
2881			if (!btrfs_submit_read_repair(inode, bio, offset, page,
2882						start - page_offset(page),
2883						start, end, mirror,
2884						tree->ops->submit_bio_hook)) {
2885				uptodate = !bio->bi_status;
2886				offset += len;
2887				continue;
2888			}
2889		} else {
2890			struct extent_buffer *eb;
2891
2892			eb = (struct extent_buffer *)page->private;
2893			set_bit(EXTENT_BUFFER_READ_ERR, &eb->bflags);
2894			eb->read_mirror = mirror;
2895			atomic_dec(&eb->io_pages);
2896			if (test_and_clear_bit(EXTENT_BUFFER_READAHEAD,
2897					       &eb->bflags))
2898				btree_readahead_hook(eb, -EIO);
2899		}
2900readpage_ok:
2901		if (likely(uptodate)) {
2902			loff_t i_size = i_size_read(inode);
2903			pgoff_t end_index = i_size >> PAGE_SHIFT;
2904			unsigned off;
2905
2906			/* Zero out the end if this page straddles i_size */
2907			off = offset_in_page(i_size);
2908			if (page->index == end_index && off)
2909				zero_user_segment(page, off, PAGE_SIZE);
2910			SetPageUptodate(page);
2911		} else {
2912			ClearPageUptodate(page);
2913			SetPageError(page);
2914		}
2915		unlock_page(page);
2916		offset += len;
2917
2918		if (unlikely(!uptodate)) {
2919			if (extent_len) {
2920				endio_readpage_release_extent(tree,
2921							      extent_start,
2922							      extent_len, 1);
2923				extent_start = 0;
2924				extent_len = 0;
2925			}
2926			endio_readpage_release_extent(tree, start,
2927						      end - start + 1, 0);
2928		} else if (!extent_len) {
2929			extent_start = start;
2930			extent_len = end + 1 - start;
2931		} else if (extent_start + extent_len == start) {
2932			extent_len += end + 1 - start;
2933		} else {
2934			endio_readpage_release_extent(tree, extent_start,
2935						      extent_len, uptodate);
2936			extent_start = start;
2937			extent_len = end + 1 - start;
2938		}
2939	}
2940
2941	if (extent_len)
2942		endio_readpage_release_extent(tree, extent_start, extent_len,
2943					      uptodate);
2944	btrfs_io_bio_free_csum(io_bio);
 
2945	bio_put(bio);
2946}
2947
2948/*
2949 * Initialize the members up to but not including 'bio'. Use after allocating a
2950 * new bio by bio_alloc_bioset as it does not initialize the bytes outside of
2951 * 'bio' because use of __GFP_ZERO is not supported.
2952 */
2953static inline void btrfs_io_bio_init(struct btrfs_io_bio *btrfs_bio)
 
 
2954{
2955	memset(btrfs_bio, 0, offsetof(struct btrfs_io_bio, bio));
2956}
 
 
2957
2958/*
2959 * The following helpers allocate a bio. As it's backed by a bioset, it'll
2960 * never fail.  We're returning a bio right now but you can call btrfs_io_bio
2961 * for the appropriate container_of magic
2962 */
2963struct bio *btrfs_bio_alloc(u64 first_byte)
2964{
2965	struct bio *bio;
2966
2967	bio = bio_alloc_bioset(GFP_NOFS, BIO_MAX_PAGES, &btrfs_bioset);
2968	bio->bi_iter.bi_sector = first_byte >> 9;
2969	btrfs_io_bio_init(btrfs_io_bio(bio));
 
 
 
 
 
2970	return bio;
2971}
2972
2973struct bio *btrfs_bio_clone(struct bio *bio)
2974{
2975	struct btrfs_io_bio *btrfs_bio;
2976	struct bio *new;
2977
2978	/* Bio allocation backed by a bioset does not fail */
2979	new = bio_clone_fast(bio, GFP_NOFS, &btrfs_bioset);
2980	btrfs_bio = btrfs_io_bio(new);
2981	btrfs_io_bio_init(btrfs_bio);
2982	btrfs_bio->iter = bio->bi_iter;
 
 
 
 
 
 
 
 
2983	return new;
2984}
2985
2986struct bio *btrfs_io_bio_alloc(unsigned int nr_iovecs)
 
2987{
 
2988	struct bio *bio;
2989
2990	/* Bio allocation backed by a bioset does not fail */
2991	bio = bio_alloc_bioset(GFP_NOFS, nr_iovecs, &btrfs_bioset);
2992	btrfs_io_bio_init(btrfs_io_bio(bio));
 
 
 
 
2993	return bio;
2994}
2995
2996struct bio *btrfs_bio_clone_partial(struct bio *orig, int offset, int size)
 
 
2997{
2998	struct bio *bio;
2999	struct btrfs_io_bio *btrfs_bio;
 
 
 
 
 
 
 
 
 
3000
3001	/* this will never fail when it's backed by a bioset */
3002	bio = bio_clone_fast(orig, GFP_NOFS, &btrfs_bioset);
3003	ASSERT(bio);
 
 
3004
3005	btrfs_bio = btrfs_io_bio(bio);
3006	btrfs_io_bio_init(btrfs_bio);
 
 
 
 
 
 
 
 
 
 
 
 
3007
3008	bio_trim(bio, offset >> 9, size >> 9);
3009	btrfs_bio->iter = bio->bi_iter;
3010	return bio;
3011}
3012
3013/*
3014 * @opf:	bio REQ_OP_* and REQ_* flags as one value
3015 * @wbc:	optional writeback control for io accounting
3016 * @page:	page to add to the bio
3017 * @pg_offset:	offset of the new bio or to check whether we are adding
3018 *              a contiguous page to the previous one
3019 * @size:	portion of page that we want to write
3020 * @offset:	starting offset in the page
3021 * @bio_ret:	must be valid pointer, newly allocated bio will be stored there
3022 * @end_io_func:     end_io callback for new bio
3023 * @mirror_num:	     desired mirror to read/write
3024 * @prev_bio_flags:  flags of previous bio to see if we can merge the current one
3025 * @bio_flags:	flags of the current bio to see if we can merge them
3026 */
3027static int submit_extent_page(unsigned int opf,
3028			      struct writeback_control *wbc,
3029			      struct page *page, u64 offset,
3030			      size_t size, unsigned long pg_offset,
 
3031			      struct bio **bio_ret,
 
3032			      bio_end_io_t end_io_func,
3033			      int mirror_num,
3034			      unsigned long prev_bio_flags,
3035			      unsigned long bio_flags,
3036			      bool force_bio_submit)
3037{
3038	int ret = 0;
3039	struct bio *bio;
 
 
3040	size_t page_size = min_t(size_t, size, PAGE_SIZE);
3041	sector_t sector = offset >> 9;
3042	struct extent_io_tree *tree = &BTRFS_I(page->mapping->host)->io_tree;
3043
3044	ASSERT(bio_ret);
3045
3046	if (*bio_ret) {
3047		bool contig;
3048		bool can_merge = true;
3049
 
3050		bio = *bio_ret;
3051		if (prev_bio_flags & EXTENT_BIO_COMPRESSED)
3052			contig = bio->bi_iter.bi_sector == sector;
3053		else
3054			contig = bio_end_sector(bio) == sector;
3055
3056		ASSERT(tree->ops);
3057		if (btrfs_bio_fits_in_stripe(page, page_size, bio, bio_flags))
3058			can_merge = false;
3059
3060		if (prev_bio_flags != bio_flags || !contig || !can_merge ||
3061		    force_bio_submit ||
3062		    bio_add_page(bio, page, page_size, pg_offset) < page_size) {
3063			ret = submit_one_bio(bio, mirror_num, prev_bio_flags);
 
 
3064			if (ret < 0) {
3065				*bio_ret = NULL;
3066				return ret;
3067			}
3068			bio = NULL;
3069		} else {
3070			if (wbc)
3071				wbc_account_cgroup_owner(wbc, page, page_size);
3072			return 0;
3073		}
3074	}
3075
3076	bio = btrfs_bio_alloc(offset);
3077	bio_add_page(bio, page, page_size, pg_offset);
 
 
 
 
3078	bio->bi_end_io = end_io_func;
3079	bio->bi_private = tree;
3080	bio->bi_write_hint = page->mapping->host->i_write_hint;
3081	bio->bi_opf = opf;
3082	if (wbc) {
3083		struct block_device *bdev;
3084
3085		bdev = BTRFS_I(page->mapping->host)->root->fs_info->fs_devices->latest_bdev;
3086		bio_set_dev(bio, bdev);
3087		wbc_init_bio(wbc, bio);
3088		wbc_account_cgroup_owner(wbc, page, page_size);
3089	}
3090
3091	*bio_ret = bio;
 
 
 
3092
3093	return ret;
3094}
3095
3096static void attach_extent_buffer_page(struct extent_buffer *eb,
3097				      struct page *page)
3098{
3099	if (!PagePrivate(page))
3100		attach_page_private(page, eb);
3101	else
 
 
3102		WARN_ON(page->private != (unsigned long)eb);
 
3103}
3104
3105void set_page_extent_mapped(struct page *page)
3106{
3107	if (!PagePrivate(page))
3108		attach_page_private(page, (void *)EXTENT_PAGE_PRIVATE);
 
 
 
3109}
3110
3111static struct extent_map *
3112__get_extent_map(struct inode *inode, struct page *page, size_t pg_offset,
3113		 u64 start, u64 len, get_extent_t *get_extent,
3114		 struct extent_map **em_cached)
3115{
3116	struct extent_map *em;
3117
3118	if (em_cached && *em_cached) {
3119		em = *em_cached;
3120		if (extent_map_in_tree(em) && start >= em->start &&
3121		    start < extent_map_end(em)) {
3122			refcount_inc(&em->refs);
3123			return em;
3124		}
3125
3126		free_extent_map(em);
3127		*em_cached = NULL;
3128	}
3129
3130	em = get_extent(BTRFS_I(inode), page, pg_offset, start, len);
3131	if (em_cached && !IS_ERR_OR_NULL(em)) {
3132		BUG_ON(*em_cached);
3133		refcount_inc(&em->refs);
3134		*em_cached = em;
3135	}
3136	return em;
3137}
3138/*
3139 * basic readpage implementation.  Locked extent state structs are inserted
3140 * into the tree that are removed when the IO is done (by the end_io
3141 * handlers)
3142 * XXX JDM: This needs looking at to ensure proper page locking
3143 * return 0 on success, otherwise return error
3144 */
3145static int __do_readpage(struct page *page,
 
3146			 get_extent_t *get_extent,
3147			 struct extent_map **em_cached,
3148			 struct bio **bio, int mirror_num,
3149			 unsigned long *bio_flags, unsigned int read_flags,
3150			 u64 *prev_em_start)
3151{
3152	struct inode *inode = page->mapping->host;
3153	u64 start = page_offset(page);
3154	const u64 end = start + PAGE_SIZE - 1;
 
3155	u64 cur = start;
3156	u64 extent_offset;
3157	u64 last_byte = i_size_read(inode);
3158	u64 block_start;
3159	u64 cur_end;
 
3160	struct extent_map *em;
3161	int ret = 0;
 
3162	int nr = 0;
3163	size_t pg_offset = 0;
3164	size_t iosize;
3165	size_t disk_io_size;
3166	size_t blocksize = inode->i_sb->s_blocksize;
3167	unsigned long this_bio_flag = 0;
3168	struct extent_io_tree *tree = &BTRFS_I(inode)->io_tree;
3169
3170	set_page_extent_mapped(page);
3171
 
3172	if (!PageUptodate(page)) {
3173		if (cleancache_get_page(page) == 0) {
3174			BUG_ON(blocksize != PAGE_SIZE);
3175			unlock_extent(tree, start, end);
3176			goto out;
3177		}
3178	}
3179
3180	if (page->index == last_byte >> PAGE_SHIFT) {
3181		char *userpage;
3182		size_t zero_offset = offset_in_page(last_byte);
3183
3184		if (zero_offset) {
3185			iosize = PAGE_SIZE - zero_offset;
3186			userpage = kmap_atomic(page);
3187			memset(userpage + zero_offset, 0, iosize);
3188			flush_dcache_page(page);
3189			kunmap_atomic(userpage);
3190		}
3191	}
3192	while (cur <= end) {
 
3193		bool force_bio_submit = false;
3194		u64 offset;
3195
3196		if (cur >= last_byte) {
3197			char *userpage;
3198			struct extent_state *cached = NULL;
3199
3200			iosize = PAGE_SIZE - pg_offset;
3201			userpage = kmap_atomic(page);
3202			memset(userpage + pg_offset, 0, iosize);
3203			flush_dcache_page(page);
3204			kunmap_atomic(userpage);
3205			set_extent_uptodate(tree, cur, cur + iosize - 1,
3206					    &cached, GFP_NOFS);
3207			unlock_extent_cached(tree, cur,
3208					     cur + iosize - 1, &cached);
 
3209			break;
3210		}
3211		em = __get_extent_map(inode, page, pg_offset, cur,
3212				      end - cur + 1, get_extent, em_cached);
3213		if (IS_ERR_OR_NULL(em)) {
3214			SetPageError(page);
3215			unlock_extent(tree, cur, end);
3216			break;
3217		}
3218		extent_offset = cur - em->start;
3219		BUG_ON(extent_map_end(em) <= cur);
3220		BUG_ON(end < cur);
3221
3222		if (test_bit(EXTENT_FLAG_COMPRESSED, &em->flags)) {
3223			this_bio_flag |= EXTENT_BIO_COMPRESSED;
3224			extent_set_compress_type(&this_bio_flag,
3225						 em->compress_type);
3226		}
3227
3228		iosize = min(extent_map_end(em) - cur, end - cur + 1);
3229		cur_end = min(extent_map_end(em) - 1, end);
3230		iosize = ALIGN(iosize, blocksize);
3231		if (this_bio_flag & EXTENT_BIO_COMPRESSED) {
3232			disk_io_size = em->block_len;
3233			offset = em->block_start;
3234		} else {
3235			offset = em->block_start + extent_offset;
3236			disk_io_size = iosize;
3237		}
 
3238		block_start = em->block_start;
3239		if (test_bit(EXTENT_FLAG_PREALLOC, &em->flags))
3240			block_start = EXTENT_MAP_HOLE;
3241
3242		/*
3243		 * If we have a file range that points to a compressed extent
3244		 * and it's followed by a consecutive file range that points to
3245		 * to the same compressed extent (possibly with a different
3246		 * offset and/or length, so it either points to the whole extent
3247		 * or only part of it), we must make sure we do not submit a
3248		 * single bio to populate the pages for the 2 ranges because
3249		 * this makes the compressed extent read zero out the pages
3250		 * belonging to the 2nd range. Imagine the following scenario:
3251		 *
3252		 *  File layout
3253		 *  [0 - 8K]                     [8K - 24K]
3254		 *    |                               |
3255		 *    |                               |
3256		 * points to extent X,         points to extent X,
3257		 * offset 4K, length of 8K     offset 0, length 16K
3258		 *
3259		 * [extent X, compressed length = 4K uncompressed length = 16K]
3260		 *
3261		 * If the bio to read the compressed extent covers both ranges,
3262		 * it will decompress extent X into the pages belonging to the
3263		 * first range and then it will stop, zeroing out the remaining
3264		 * pages that belong to the other range that points to extent X.
3265		 * So here we make sure we submit 2 bios, one for the first
3266		 * range and another one for the third range. Both will target
3267		 * the same physical extent from disk, but we can't currently
3268		 * make the compressed bio endio callback populate the pages
3269		 * for both ranges because each compressed bio is tightly
3270		 * coupled with a single extent map, and each range can have
3271		 * an extent map with a different offset value relative to the
3272		 * uncompressed data of our extent and different lengths. This
3273		 * is a corner case so we prioritize correctness over
3274		 * non-optimal behavior (submitting 2 bios for the same extent).
3275		 */
3276		if (test_bit(EXTENT_FLAG_COMPRESSED, &em->flags) &&
3277		    prev_em_start && *prev_em_start != (u64)-1 &&
3278		    *prev_em_start != em->start)
3279			force_bio_submit = true;
3280
3281		if (prev_em_start)
3282			*prev_em_start = em->start;
3283
3284		free_extent_map(em);
3285		em = NULL;
3286
3287		/* we've found a hole, just zero and go on */
3288		if (block_start == EXTENT_MAP_HOLE) {
3289			char *userpage;
3290			struct extent_state *cached = NULL;
3291
3292			userpage = kmap_atomic(page);
3293			memset(userpage + pg_offset, 0, iosize);
3294			flush_dcache_page(page);
3295			kunmap_atomic(userpage);
3296
3297			set_extent_uptodate(tree, cur, cur + iosize - 1,
3298					    &cached, GFP_NOFS);
3299			unlock_extent_cached(tree, cur,
3300					     cur + iosize - 1, &cached);
 
3301			cur = cur + iosize;
3302			pg_offset += iosize;
3303			continue;
3304		}
3305		/* the get_extent function already copied into the page */
3306		if (test_range_bit(tree, cur, cur_end,
3307				   EXTENT_UPTODATE, 1, NULL)) {
3308			check_page_uptodate(tree, page);
3309			unlock_extent(tree, cur, cur + iosize - 1);
3310			cur = cur + iosize;
3311			pg_offset += iosize;
3312			continue;
3313		}
3314		/* we have an inline extent but it didn't get marked up
3315		 * to date.  Error out
3316		 */
3317		if (block_start == EXTENT_MAP_INLINE) {
3318			SetPageError(page);
3319			unlock_extent(tree, cur, cur + iosize - 1);
3320			cur = cur + iosize;
3321			pg_offset += iosize;
3322			continue;
3323		}
3324
3325		ret = submit_extent_page(REQ_OP_READ | read_flags, NULL,
3326					 page, offset, disk_io_size,
3327					 pg_offset, bio,
 
3328					 end_bio_extent_readpage, mirror_num,
3329					 *bio_flags,
3330					 this_bio_flag,
3331					 force_bio_submit);
3332		if (!ret) {
3333			nr++;
3334			*bio_flags = this_bio_flag;
3335		} else {
3336			SetPageError(page);
3337			unlock_extent(tree, cur, cur + iosize - 1);
3338			goto out;
3339		}
3340		cur = cur + iosize;
3341		pg_offset += iosize;
3342	}
3343out:
3344	if (!nr) {
3345		if (!PageError(page))
3346			SetPageUptodate(page);
3347		unlock_page(page);
3348	}
3349	return ret;
3350}
3351
3352static inline void contiguous_readpages(struct page *pages[], int nr_pages,
 
3353					     u64 start, u64 end,
 
3354					     struct extent_map **em_cached,
3355					     struct bio **bio,
3356					     unsigned long *bio_flags,
3357					     u64 *prev_em_start)
3358{
3359	struct btrfs_inode *inode = BTRFS_I(pages[0]->mapping->host);
 
3360	int index;
3361
3362	btrfs_lock_and_flush_ordered_range(inode, start, end, NULL);
 
 
 
 
 
 
 
 
 
 
3363
3364	for (index = 0; index < nr_pages; index++) {
3365		__do_readpage(pages[index], btrfs_get_extent, em_cached,
3366				bio, 0, bio_flags, REQ_RAHEAD, prev_em_start);
3367		put_page(pages[index]);
3368	}
3369}
3370
3371static int __extent_read_full_page(struct page *page,
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3372				   get_extent_t *get_extent,
3373				   struct bio **bio, int mirror_num,
3374				   unsigned long *bio_flags,
3375				   unsigned int read_flags)
3376{
3377	struct btrfs_inode *inode = BTRFS_I(page->mapping->host);
 
3378	u64 start = page_offset(page);
3379	u64 end = start + PAGE_SIZE - 1;
3380	int ret;
3381
3382	btrfs_lock_and_flush_ordered_range(inode, start, end, NULL);
 
 
 
 
 
 
 
 
 
3383
3384	ret = __do_readpage(page, get_extent, NULL, bio, mirror_num,
3385			    bio_flags, read_flags, NULL);
3386	return ret;
3387}
3388
3389int extent_read_full_page(struct page *page, get_extent_t *get_extent,
3390			  int mirror_num)
3391{
3392	struct bio *bio = NULL;
3393	unsigned long bio_flags = 0;
3394	int ret;
3395
3396	ret = __extent_read_full_page(page, get_extent, &bio, mirror_num,
3397				      &bio_flags, 0);
3398	if (bio)
3399		ret = submit_one_bio(bio, mirror_num, bio_flags);
3400	return ret;
3401}
3402
3403static void update_nr_written(struct writeback_control *wbc,
3404			      unsigned long nr_written)
 
3405{
3406	wbc->nr_to_write -= nr_written;
 
 
 
3407}
3408
3409/*
3410 * helper for __extent_writepage, doing all of the delayed allocation setup.
3411 *
3412 * This returns 1 if btrfs_run_delalloc_range function did all the work required
3413 * to write the page (copy into inline extent).  In this case the IO has
3414 * been started and the page is already unlocked.
3415 *
3416 * This returns 0 if all went well (page still locked)
3417 * This returns < 0 if there were errors (page still locked)
3418 */
3419static noinline_for_stack int writepage_delalloc(struct btrfs_inode *inode,
3420		struct page *page, struct writeback_control *wbc,
3421		u64 delalloc_start, unsigned long *nr_written)
 
 
3422{
 
3423	u64 page_end = delalloc_start + PAGE_SIZE - 1;
3424	bool found;
3425	u64 delalloc_to_write = 0;
3426	u64 delalloc_end = 0;
3427	int ret;
3428	int page_started = 0;
3429
 
 
3430
3431	while (delalloc_end < page_end) {
3432		found = find_lock_delalloc_range(&inode->vfs_inode, page,
 
3433					       &delalloc_start,
3434					       &delalloc_end);
3435		if (!found) {
 
3436			delalloc_start = delalloc_end + 1;
3437			continue;
3438		}
3439		ret = btrfs_run_delalloc_range(inode, page, delalloc_start,
3440				delalloc_end, &page_started, nr_written, wbc);
 
 
 
 
3441		if (ret) {
3442			SetPageError(page);
3443			/*
3444			 * btrfs_run_delalloc_range should return < 0 for error
3445			 * but just in case, we use > 0 here meaning the IO is
3446			 * started, so we don't want to return > 0 unless
3447			 * things are going well.
3448			 */
3449			return ret < 0 ? ret : -EIO;
 
3450		}
3451		/*
3452		 * delalloc_end is already one less than the total length, so
3453		 * we don't subtract one from PAGE_SIZE
3454		 */
3455		delalloc_to_write += (delalloc_end - delalloc_start +
3456				      PAGE_SIZE) >> PAGE_SHIFT;
3457		delalloc_start = delalloc_end + 1;
3458	}
3459	if (wbc->nr_to_write < delalloc_to_write) {
3460		int thresh = 8192;
3461
3462		if (delalloc_to_write < thresh * 2)
3463			thresh = delalloc_to_write;
3464		wbc->nr_to_write = min_t(u64, delalloc_to_write,
3465					 thresh);
3466	}
3467
3468	/* did the fill delalloc function already unlock and start
3469	 * the IO?
3470	 */
3471	if (page_started) {
3472		/*
3473		 * we've unlocked the page, so we can't update
3474		 * the mapping's writeback index, just update
3475		 * nr_to_write.
3476		 */
3477		wbc->nr_to_write -= *nr_written;
3478		return 1;
3479	}
3480
3481	return 0;
 
 
 
3482}
3483
3484/*
3485 * helper for __extent_writepage.  This calls the writepage start hooks,
3486 * and does the loop to map the page into extents and bios.
3487 *
3488 * We return 1 if the IO is started and the page is unlocked,
3489 * 0 if all went well (page still locked)
3490 * < 0 if there were errors (page still locked)
3491 */
3492static noinline_for_stack int __extent_writepage_io(struct btrfs_inode *inode,
3493				 struct page *page,
3494				 struct writeback_control *wbc,
3495				 struct extent_page_data *epd,
3496				 loff_t i_size,
3497				 unsigned long nr_written,
3498				 int *nr_ret)
3499{
3500	struct extent_io_tree *tree = &inode->io_tree;
3501	u64 start = page_offset(page);
3502	u64 page_end = start + PAGE_SIZE - 1;
3503	u64 end;
3504	u64 cur = start;
3505	u64 extent_offset;
3506	u64 block_start;
3507	u64 iosize;
 
 
3508	struct extent_map *em;
 
3509	size_t pg_offset = 0;
3510	size_t blocksize;
3511	int ret = 0;
3512	int nr = 0;
3513	const unsigned int write_flags = wbc_to_write_flags(wbc);
3514	bool compressed;
3515
3516	ret = btrfs_writepage_cow_fixup(page, start, page_end);
3517	if (ret) {
3518		/* Fixup worker will requeue */
3519		redirty_page_for_writepage(wbc, page);
3520		update_nr_written(wbc, nr_written);
3521		unlock_page(page);
3522		return 1;
 
 
 
 
 
 
 
 
3523	}
3524
3525	/*
3526	 * we don't want to touch the inode after unlocking the page,
3527	 * so we update the mapping writeback index now
3528	 */
3529	update_nr_written(wbc, nr_written + 1);
3530
3531	end = page_end;
3532	blocksize = inode->vfs_inode.i_sb->s_blocksize;
 
 
 
 
 
 
 
3533
3534	while (cur <= end) {
3535		u64 em_end;
3536		u64 offset;
3537
3538		if (cur >= i_size) {
3539			btrfs_writepage_endio_finish_ordered(page, cur,
3540							     page_end, 1);
 
3541			break;
3542		}
3543		em = btrfs_get_extent(inode, NULL, 0, cur, end - cur + 1);
 
3544		if (IS_ERR_OR_NULL(em)) {
3545			SetPageError(page);
3546			ret = PTR_ERR_OR_ZERO(em);
3547			break;
3548		}
3549
3550		extent_offset = cur - em->start;
3551		em_end = extent_map_end(em);
3552		BUG_ON(em_end <= cur);
3553		BUG_ON(end < cur);
3554		iosize = min(em_end - cur, end - cur + 1);
3555		iosize = ALIGN(iosize, blocksize);
3556		offset = em->block_start + extent_offset;
 
3557		block_start = em->block_start;
3558		compressed = test_bit(EXTENT_FLAG_COMPRESSED, &em->flags);
3559		free_extent_map(em);
3560		em = NULL;
3561
3562		/*
3563		 * compressed and inline extents are written through other
3564		 * paths in the FS
3565		 */
3566		if (compressed || block_start == EXTENT_MAP_HOLE ||
3567		    block_start == EXTENT_MAP_INLINE) {
3568			if (compressed)
 
 
 
 
 
 
 
 
 
 
 
 
 
3569				nr++;
3570			else
3571				btrfs_writepage_endio_finish_ordered(page, cur,
3572							cur + iosize - 1, 1);
3573			cur += iosize;
3574			pg_offset += iosize;
3575			continue;
3576		}
3577
3578		btrfs_set_range_writeback(tree, cur, cur + iosize - 1);
3579		if (!PageWriteback(page)) {
3580			btrfs_err(inode->root->fs_info,
3581				   "page %lu not writeback, cur %llu end %llu",
3582			       page->index, cur, end);
3583		}
3584
3585		ret = submit_extent_page(REQ_OP_WRITE | write_flags, wbc,
3586					 page, offset, iosize, pg_offset,
3587					 &epd->bio,
3588					 end_bio_extent_writepage,
3589					 0, 0, 0, false);
3590		if (ret) {
3591			SetPageError(page);
3592			if (PageWriteback(page))
3593				end_page_writeback(page);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3594		}
3595
3596		cur = cur + iosize;
3597		pg_offset += iosize;
3598		nr++;
3599	}
 
3600	*nr_ret = nr;
 
 
 
 
 
3601	return ret;
3602}
3603
3604/*
3605 * the writepage semantics are similar to regular writepage.  extent
3606 * records are inserted to lock ranges in the tree, and as dirty areas
3607 * are found, they are marked writeback.  Then the lock bits are removed
3608 * and the end_io handler clears the writeback ranges
3609 *
3610 * Return 0 if everything goes well.
3611 * Return <0 for error.
3612 */
3613static int __extent_writepage(struct page *page, struct writeback_control *wbc,
3614			      struct extent_page_data *epd)
3615{
3616	struct inode *inode = page->mapping->host;
 
3617	u64 start = page_offset(page);
3618	u64 page_end = start + PAGE_SIZE - 1;
3619	int ret;
3620	int nr = 0;
3621	size_t pg_offset;
3622	loff_t i_size = i_size_read(inode);
3623	unsigned long end_index = i_size >> PAGE_SHIFT;
 
3624	unsigned long nr_written = 0;
3625
 
 
 
 
 
3626	trace___extent_writepage(page, inode, wbc);
3627
3628	WARN_ON(!PageLocked(page));
3629
3630	ClearPageError(page);
3631
3632	pg_offset = offset_in_page(i_size);
3633	if (page->index > end_index ||
3634	   (page->index == end_index && !pg_offset)) {
3635		page->mapping->a_ops->invalidatepage(page, 0, PAGE_SIZE);
3636		unlock_page(page);
3637		return 0;
3638	}
3639
3640	if (page->index == end_index) {
3641		char *userpage;
3642
3643		userpage = kmap_atomic(page);
3644		memset(userpage + pg_offset, 0,
3645		       PAGE_SIZE - pg_offset);
3646		kunmap_atomic(userpage);
3647		flush_dcache_page(page);
3648	}
3649
 
 
3650	set_page_extent_mapped(page);
3651
3652	if (!epd->extent_locked) {
3653		ret = writepage_delalloc(BTRFS_I(inode), page, wbc, start,
3654					 &nr_written);
3655		if (ret == 1)
3656			return 0;
3657		if (ret)
3658			goto done;
3659	}
3660
3661	ret = __extent_writepage_io(BTRFS_I(inode), page, wbc, epd, i_size,
3662				    nr_written, &nr);
3663	if (ret == 1)
3664		return 0;
3665
3666done:
3667	if (nr == 0) {
3668		/* make sure the mapping tag for page dirty gets cleared */
3669		set_page_writeback(page);
3670		end_page_writeback(page);
3671	}
3672	if (PageError(page)) {
3673		ret = ret < 0 ? ret : -EIO;
3674		end_extent_writepage(page, ret, start, page_end);
3675	}
3676	unlock_page(page);
3677	ASSERT(ret <= 0);
3678	return ret;
 
 
 
3679}
3680
3681void wait_on_extent_buffer_writeback(struct extent_buffer *eb)
3682{
3683	wait_on_bit_io(&eb->bflags, EXTENT_BUFFER_WRITEBACK,
3684		       TASK_UNINTERRUPTIBLE);
3685}
3686
3687static void end_extent_buffer_writeback(struct extent_buffer *eb)
3688{
3689	clear_bit(EXTENT_BUFFER_WRITEBACK, &eb->bflags);
3690	smp_mb__after_atomic();
3691	wake_up_bit(&eb->bflags, EXTENT_BUFFER_WRITEBACK);
3692}
3693
3694/*
3695 * Lock eb pages and flush the bio if we can't the locks
3696 *
3697 * Return  0 if nothing went wrong
3698 * Return >0 is same as 0, except bio is not submitted
3699 * Return <0 if something went wrong, no page is locked
3700 */
3701static noinline_for_stack int lock_extent_buffer_for_io(struct extent_buffer *eb,
3702			  struct extent_page_data *epd)
3703{
3704	struct btrfs_fs_info *fs_info = eb->fs_info;
3705	int i, num_pages, failed_page_nr;
3706	int flush = 0;
3707	int ret = 0;
3708
3709	if (!btrfs_try_tree_write_lock(eb)) {
3710		ret = flush_write_bio(epd);
3711		if (ret < 0)
3712			return ret;
3713		flush = 1;
 
3714		btrfs_tree_lock(eb);
3715	}
3716
3717	if (test_bit(EXTENT_BUFFER_WRITEBACK, &eb->bflags)) {
3718		btrfs_tree_unlock(eb);
3719		if (!epd->sync_io)
3720			return 0;
3721		if (!flush) {
3722			ret = flush_write_bio(epd);
3723			if (ret < 0)
3724				return ret;
3725			flush = 1;
3726		}
3727		while (1) {
3728			wait_on_extent_buffer_writeback(eb);
3729			btrfs_tree_lock(eb);
3730			if (!test_bit(EXTENT_BUFFER_WRITEBACK, &eb->bflags))
3731				break;
3732			btrfs_tree_unlock(eb);
3733		}
3734	}
3735
3736	/*
3737	 * We need to do this to prevent races in people who check if the eb is
3738	 * under IO since we can end up having no IO bits set for a short period
3739	 * of time.
3740	 */
3741	spin_lock(&eb->refs_lock);
3742	if (test_and_clear_bit(EXTENT_BUFFER_DIRTY, &eb->bflags)) {
3743		set_bit(EXTENT_BUFFER_WRITEBACK, &eb->bflags);
3744		spin_unlock(&eb->refs_lock);
3745		btrfs_set_header_flag(eb, BTRFS_HEADER_FLAG_WRITTEN);
3746		percpu_counter_add_batch(&fs_info->dirty_metadata_bytes,
3747					 -eb->len,
3748					 fs_info->dirty_metadata_batch);
3749		ret = 1;
3750	} else {
3751		spin_unlock(&eb->refs_lock);
3752	}
3753
3754	btrfs_tree_unlock(eb);
3755
3756	if (!ret)
3757		return ret;
3758
3759	num_pages = num_extent_pages(eb);
3760	for (i = 0; i < num_pages; i++) {
3761		struct page *p = eb->pages[i];
3762
3763		if (!trylock_page(p)) {
3764			if (!flush) {
3765				int err;
3766
3767				err = flush_write_bio(epd);
3768				if (err < 0) {
3769					ret = err;
3770					failed_page_nr = i;
3771					goto err_unlock;
3772				}
3773				flush = 1;
3774			}
3775			lock_page(p);
3776		}
3777	}
3778
3779	return ret;
3780err_unlock:
3781	/* Unlock already locked pages */
3782	for (i = 0; i < failed_page_nr; i++)
3783		unlock_page(eb->pages[i]);
3784	/*
3785	 * Clear EXTENT_BUFFER_WRITEBACK and wake up anyone waiting on it.
3786	 * Also set back EXTENT_BUFFER_DIRTY so future attempts to this eb can
3787	 * be made and undo everything done before.
3788	 */
3789	btrfs_tree_lock(eb);
3790	spin_lock(&eb->refs_lock);
3791	set_bit(EXTENT_BUFFER_DIRTY, &eb->bflags);
3792	end_extent_buffer_writeback(eb);
3793	spin_unlock(&eb->refs_lock);
3794	percpu_counter_add_batch(&fs_info->dirty_metadata_bytes, eb->len,
3795				 fs_info->dirty_metadata_batch);
3796	btrfs_clear_header_flag(eb, BTRFS_HEADER_FLAG_WRITTEN);
3797	btrfs_tree_unlock(eb);
3798	return ret;
3799}
3800
3801static void set_btree_ioerr(struct page *page)
3802{
3803	struct extent_buffer *eb = (struct extent_buffer *)page->private;
3804	struct btrfs_fs_info *fs_info;
3805
3806	SetPageError(page);
3807	if (test_and_set_bit(EXTENT_BUFFER_WRITE_ERR, &eb->bflags))
3808		return;
3809
3810	/*
3811	 * If we error out, we should add back the dirty_metadata_bytes
3812	 * to make it consistent.
3813	 */
3814	fs_info = eb->fs_info;
3815	percpu_counter_add_batch(&fs_info->dirty_metadata_bytes,
3816				 eb->len, fs_info->dirty_metadata_batch);
3817
3818	/*
3819	 * If writeback for a btree extent that doesn't belong to a log tree
3820	 * failed, increment the counter transaction->eb_write_errors.
3821	 * We do this because while the transaction is running and before it's
3822	 * committing (when we call filemap_fdata[write|wait]_range against
3823	 * the btree inode), we might have
3824	 * btree_inode->i_mapping->a_ops->writepages() called by the VM - if it
3825	 * returns an error or an error happens during writeback, when we're
3826	 * committing the transaction we wouldn't know about it, since the pages
3827	 * can be no longer dirty nor marked anymore for writeback (if a
3828	 * subsequent modification to the extent buffer didn't happen before the
3829	 * transaction commit), which makes filemap_fdata[write|wait]_range not
3830	 * able to find the pages tagged with SetPageError at transaction
3831	 * commit time. So if this happens we must abort the transaction,
3832	 * otherwise we commit a super block with btree roots that point to
3833	 * btree nodes/leafs whose content on disk is invalid - either garbage
3834	 * or the content of some node/leaf from a past generation that got
3835	 * cowed or deleted and is no longer valid.
3836	 *
3837	 * Note: setting AS_EIO/AS_ENOSPC in the btree inode's i_mapping would
3838	 * not be enough - we need to distinguish between log tree extents vs
3839	 * non-log tree extents, and the next filemap_fdatawait_range() call
3840	 * will catch and clear such errors in the mapping - and that call might
3841	 * be from a log sync and not from a transaction commit. Also, checking
3842	 * for the eb flag EXTENT_BUFFER_WRITE_ERR at transaction commit time is
3843	 * not done and would not be reliable - the eb might have been released
3844	 * from memory and reading it back again means that flag would not be
3845	 * set (since it's a runtime flag, not persisted on disk).
3846	 *
3847	 * Using the flags below in the btree inode also makes us achieve the
3848	 * goal of AS_EIO/AS_ENOSPC when writepages() returns success, started
3849	 * writeback for all dirty pages and before filemap_fdatawait_range()
3850	 * is called, the writeback for all dirty pages had already finished
3851	 * with errors - because we were not using AS_EIO/AS_ENOSPC,
3852	 * filemap_fdatawait_range() would return success, as it could not know
3853	 * that writeback errors happened (the pages were no longer tagged for
3854	 * writeback).
3855	 */
3856	switch (eb->log_index) {
3857	case -1:
3858		set_bit(BTRFS_FS_BTREE_ERR, &eb->fs_info->flags);
3859		break;
3860	case 0:
3861		set_bit(BTRFS_FS_LOG1_ERR, &eb->fs_info->flags);
3862		break;
3863	case 1:
3864		set_bit(BTRFS_FS_LOG2_ERR, &eb->fs_info->flags);
3865		break;
3866	default:
3867		BUG(); /* unexpected, logic error */
3868	}
3869}
3870
3871static void end_bio_extent_buffer_writepage(struct bio *bio)
3872{
3873	struct bio_vec *bvec;
3874	struct extent_buffer *eb;
3875	int done;
3876	struct bvec_iter_all iter_all;
3877
3878	ASSERT(!bio_flagged(bio, BIO_CLONED));
3879	bio_for_each_segment_all(bvec, bio, iter_all) {
3880		struct page *page = bvec->bv_page;
3881
3882		eb = (struct extent_buffer *)page->private;
3883		BUG_ON(!eb);
3884		done = atomic_dec_and_test(&eb->io_pages);
3885
3886		if (bio->bi_status ||
3887		    test_bit(EXTENT_BUFFER_WRITE_ERR, &eb->bflags)) {
3888			ClearPageUptodate(page);
3889			set_btree_ioerr(page);
3890		}
3891
3892		end_page_writeback(page);
3893
3894		if (!done)
3895			continue;
3896
3897		end_extent_buffer_writeback(eb);
3898	}
3899
3900	bio_put(bio);
3901}
3902
3903static noinline_for_stack int write_one_eb(struct extent_buffer *eb,
 
3904			struct writeback_control *wbc,
3905			struct extent_page_data *epd)
3906{
 
 
3907	u64 offset = eb->start;
3908	u32 nritems;
3909	int i, num_pages;
3910	unsigned long start, end;
3911	unsigned int write_flags = wbc_to_write_flags(wbc) | REQ_META;
3912	int ret = 0;
3913
3914	clear_bit(EXTENT_BUFFER_WRITE_ERR, &eb->bflags);
3915	num_pages = num_extent_pages(eb);
3916	atomic_set(&eb->io_pages, num_pages);
3917
3918	/* set btree blocks beyond nritems with 0 to avoid stale content. */
3919	nritems = btrfs_header_nritems(eb);
3920	if (btrfs_header_level(eb) > 0) {
3921		end = btrfs_node_key_ptr_offset(nritems);
3922
3923		memzero_extent_buffer(eb, end, eb->len - end);
3924	} else {
3925		/*
3926		 * leaf:
3927		 * header 0 1 2 .. N ... data_N .. data_2 data_1 data_0
3928		 */
3929		start = btrfs_item_nr_offset(nritems);
3930		end = BTRFS_LEAF_DATA_OFFSET + leaf_data_end(eb);
3931		memzero_extent_buffer(eb, start, end - start);
3932	}
3933
3934	for (i = 0; i < num_pages; i++) {
3935		struct page *p = eb->pages[i];
3936
3937		clear_page_dirty_for_io(p);
3938		set_page_writeback(p);
3939		ret = submit_extent_page(REQ_OP_WRITE | write_flags, wbc,
3940					 p, offset, PAGE_SIZE, 0,
3941					 &epd->bio,
3942					 end_bio_extent_buffer_writepage,
3943					 0, 0, 0, false);
3944		if (ret) {
3945			set_btree_ioerr(p);
3946			if (PageWriteback(p))
3947				end_page_writeback(p);
3948			if (atomic_sub_and_test(num_pages - i, &eb->io_pages))
3949				end_extent_buffer_writeback(eb);
3950			ret = -EIO;
3951			break;
3952		}
3953		offset += PAGE_SIZE;
3954		update_nr_written(wbc, 1);
3955		unlock_page(p);
3956	}
3957
3958	if (unlikely(ret)) {
3959		for (; i < num_pages; i++) {
3960			struct page *p = eb->pages[i];
3961			clear_page_dirty_for_io(p);
3962			unlock_page(p);
3963		}
3964	}
3965
3966	return ret;
3967}
3968
3969int btree_write_cache_pages(struct address_space *mapping,
3970				   struct writeback_control *wbc)
3971{
 
 
3972	struct extent_buffer *eb, *prev_eb = NULL;
3973	struct extent_page_data epd = {
3974		.bio = NULL,
 
3975		.extent_locked = 0,
3976		.sync_io = wbc->sync_mode == WB_SYNC_ALL,
 
3977	};
3978	struct btrfs_fs_info *fs_info = BTRFS_I(mapping->host)->root->fs_info;
3979	int ret = 0;
3980	int done = 0;
3981	int nr_to_write_done = 0;
3982	struct pagevec pvec;
3983	int nr_pages;
3984	pgoff_t index;
3985	pgoff_t end;		/* Inclusive */
3986	int scanned = 0;
3987	xa_mark_t tag;
3988
3989	pagevec_init(&pvec);
3990	if (wbc->range_cyclic) {
3991		index = mapping->writeback_index; /* Start from prev offset */
3992		end = -1;
3993		/*
3994		 * Start from the beginning does not need to cycle over the
3995		 * range, mark it as scanned.
3996		 */
3997		scanned = (index == 0);
3998	} else {
3999		index = wbc->range_start >> PAGE_SHIFT;
4000		end = wbc->range_end >> PAGE_SHIFT;
4001		scanned = 1;
4002	}
4003	if (wbc->sync_mode == WB_SYNC_ALL)
4004		tag = PAGECACHE_TAG_TOWRITE;
4005	else
4006		tag = PAGECACHE_TAG_DIRTY;
4007retry:
4008	if (wbc->sync_mode == WB_SYNC_ALL)
4009		tag_pages_for_writeback(mapping, index, end);
4010	while (!done && !nr_to_write_done && (index <= end) &&
4011	       (nr_pages = pagevec_lookup_range_tag(&pvec, mapping, &index, end,
4012			tag))) {
4013		unsigned i;
4014
 
4015		for (i = 0; i < nr_pages; i++) {
4016			struct page *page = pvec.pages[i];
4017
4018			if (!PagePrivate(page))
4019				continue;
4020
 
 
 
 
 
4021			spin_lock(&mapping->private_lock);
4022			if (!PagePrivate(page)) {
4023				spin_unlock(&mapping->private_lock);
4024				continue;
4025			}
4026
4027			eb = (struct extent_buffer *)page->private;
4028
4029			/*
4030			 * Shouldn't happen and normally this would be a BUG_ON
4031			 * but no sense in crashing the users box for something
4032			 * we can survive anyway.
4033			 */
4034			if (WARN_ON(!eb)) {
4035				spin_unlock(&mapping->private_lock);
4036				continue;
4037			}
4038
4039			if (eb == prev_eb) {
4040				spin_unlock(&mapping->private_lock);
4041				continue;
4042			}
4043
4044			ret = atomic_inc_not_zero(&eb->refs);
4045			spin_unlock(&mapping->private_lock);
4046			if (!ret)
4047				continue;
4048
4049			prev_eb = eb;
4050			ret = lock_extent_buffer_for_io(eb, &epd);
4051			if (!ret) {
4052				free_extent_buffer(eb);
4053				continue;
4054			} else if (ret < 0) {
4055				done = 1;
4056				free_extent_buffer(eb);
4057				break;
4058			}
4059
4060			ret = write_one_eb(eb, wbc, &epd);
4061			if (ret) {
4062				done = 1;
4063				free_extent_buffer(eb);
4064				break;
4065			}
4066			free_extent_buffer(eb);
4067
4068			/*
4069			 * the filesystem may choose to bump up nr_to_write.
4070			 * We have to make sure to honor the new nr_to_write
4071			 * at any time
4072			 */
4073			nr_to_write_done = wbc->nr_to_write <= 0;
4074		}
4075		pagevec_release(&pvec);
4076		cond_resched();
4077	}
4078	if (!scanned && !done) {
4079		/*
4080		 * We hit the last page and there is more work to be done: wrap
4081		 * back to the start of the file
4082		 */
4083		scanned = 1;
4084		index = 0;
4085		goto retry;
4086	}
4087	ASSERT(ret <= 0);
4088	if (ret < 0) {
4089		end_write_bio(&epd, ret);
4090		return ret;
4091	}
4092	/*
4093	 * If something went wrong, don't allow any metadata write bio to be
4094	 * submitted.
4095	 *
4096	 * This would prevent use-after-free if we had dirty pages not
4097	 * cleaned up, which can still happen by fuzzed images.
4098	 *
4099	 * - Bad extent tree
4100	 *   Allowing existing tree block to be allocated for other trees.
4101	 *
4102	 * - Log tree operations
4103	 *   Exiting tree blocks get allocated to log tree, bumps its
4104	 *   generation, then get cleaned in tree re-balance.
4105	 *   Such tree block will not be written back, since it's clean,
4106	 *   thus no WRITTEN flag set.
4107	 *   And after log writes back, this tree block is not traced by
4108	 *   any dirty extent_io_tree.
4109	 *
4110	 * - Offending tree block gets re-dirtied from its original owner
4111	 *   Since it has bumped generation, no WRITTEN flag, it can be
4112	 *   reused without COWing. This tree block will not be traced
4113	 *   by btrfs_transaction::dirty_pages.
4114	 *
4115	 *   Now such dirty tree block will not be cleaned by any dirty
4116	 *   extent io tree. Thus we don't want to submit such wild eb
4117	 *   if the fs already has error.
4118	 */
4119	if (!test_bit(BTRFS_FS_STATE_ERROR, &fs_info->fs_state)) {
4120		ret = flush_write_bio(&epd);
4121	} else {
4122		ret = -EROFS;
4123		end_write_bio(&epd, ret);
4124	}
4125	return ret;
4126}
4127
4128/**
4129 * write_cache_pages - walk the list of dirty pages of the given address space and write all of them.
4130 * @mapping: address space structure to write
4131 * @wbc: subtract the number of written pages from *@wbc->nr_to_write
4132 * @data: data passed to __extent_writepage function
 
4133 *
4134 * If a page is already under I/O, write_cache_pages() skips it, even
4135 * if it's dirty.  This is desirable behaviour for memory-cleaning writeback,
4136 * but it is INCORRECT for data-integrity system calls such as fsync().  fsync()
4137 * and msync() need to guarantee that all the data which was dirty at the time
4138 * the call was made get new I/O started against them.  If wbc->sync_mode is
4139 * WB_SYNC_ALL then we were called for data integrity and we must wait for
4140 * existing IO to complete.
4141 */
4142static int extent_write_cache_pages(struct address_space *mapping,
 
4143			     struct writeback_control *wbc,
4144			     struct extent_page_data *epd)
 
4145{
4146	struct inode *inode = mapping->host;
4147	int ret = 0;
4148	int done = 0;
 
4149	int nr_to_write_done = 0;
4150	struct pagevec pvec;
4151	int nr_pages;
4152	pgoff_t index;
4153	pgoff_t end;		/* Inclusive */
4154	pgoff_t done_index;
4155	int range_whole = 0;
4156	int scanned = 0;
4157	xa_mark_t tag;
4158
4159	/*
4160	 * We have to hold onto the inode so that ordered extents can do their
4161	 * work when the IO finishes.  The alternative to this is failing to add
4162	 * an ordered extent if the igrab() fails there and that is a huge pain
4163	 * to deal with, so instead just hold onto the inode throughout the
4164	 * writepages operation.  If it fails here we are freeing up the inode
4165	 * anyway and we'd rather not waste our time writing out stuff that is
4166	 * going to be truncated anyway.
4167	 */
4168	if (!igrab(inode))
4169		return 0;
4170
4171	pagevec_init(&pvec);
4172	if (wbc->range_cyclic) {
4173		index = mapping->writeback_index; /* Start from prev offset */
4174		end = -1;
4175		/*
4176		 * Start from the beginning does not need to cycle over the
4177		 * range, mark it as scanned.
4178		 */
4179		scanned = (index == 0);
4180	} else {
4181		index = wbc->range_start >> PAGE_SHIFT;
4182		end = wbc->range_end >> PAGE_SHIFT;
4183		if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX)
4184			range_whole = 1;
4185		scanned = 1;
4186	}
4187
4188	/*
4189	 * We do the tagged writepage as long as the snapshot flush bit is set
4190	 * and we are the first one who do the filemap_flush() on this inode.
4191	 *
4192	 * The nr_to_write == LONG_MAX is needed to make sure other flushers do
4193	 * not race in and drop the bit.
4194	 */
4195	if (range_whole && wbc->nr_to_write == LONG_MAX &&
4196	    test_and_clear_bit(BTRFS_INODE_SNAPSHOT_FLUSH,
4197			       &BTRFS_I(inode)->runtime_flags))
4198		wbc->tagged_writepages = 1;
4199
4200	if (wbc->sync_mode == WB_SYNC_ALL || wbc->tagged_writepages)
4201		tag = PAGECACHE_TAG_TOWRITE;
4202	else
4203		tag = PAGECACHE_TAG_DIRTY;
4204retry:
4205	if (wbc->sync_mode == WB_SYNC_ALL || wbc->tagged_writepages)
4206		tag_pages_for_writeback(mapping, index, end);
4207	done_index = index;
4208	while (!done && !nr_to_write_done && (index <= end) &&
4209			(nr_pages = pagevec_lookup_range_tag(&pvec, mapping,
4210						&index, end, tag))) {
4211		unsigned i;
4212
 
4213		for (i = 0; i < nr_pages; i++) {
4214			struct page *page = pvec.pages[i];
4215
4216			done_index = page->index + 1;
4217			/*
4218			 * At this point we hold neither the i_pages lock nor
4219			 * the page lock: the page may be truncated or
4220			 * invalidated (changing page->mapping to NULL),
4221			 * or even swizzled back from swapper_space to
4222			 * tmpfs file mapping
4223			 */
4224			if (!trylock_page(page)) {
4225				ret = flush_write_bio(epd);
4226				BUG_ON(ret < 0);
4227				lock_page(page);
4228			}
4229
4230			if (unlikely(page->mapping != mapping)) {
4231				unlock_page(page);
4232				continue;
4233			}
4234
 
 
 
 
 
 
4235			if (wbc->sync_mode != WB_SYNC_NONE) {
4236				if (PageWriteback(page)) {
4237					ret = flush_write_bio(epd);
4238					BUG_ON(ret < 0);
4239				}
4240				wait_on_page_writeback(page);
4241			}
4242
4243			if (PageWriteback(page) ||
4244			    !clear_page_dirty_for_io(page)) {
4245				unlock_page(page);
4246				continue;
4247			}
4248
4249			ret = __extent_writepage(page, wbc, epd);
4250			if (ret < 0) {
4251				done = 1;
4252				break;
 
4253			}
 
 
4254
4255			/*
4256			 * the filesystem may choose to bump up nr_to_write.
4257			 * We have to make sure to honor the new nr_to_write
4258			 * at any time
4259			 */
4260			nr_to_write_done = wbc->nr_to_write <= 0;
4261		}
4262		pagevec_release(&pvec);
4263		cond_resched();
4264	}
4265	if (!scanned && !done) {
4266		/*
4267		 * We hit the last page and there is more work to be done: wrap
4268		 * back to the start of the file
4269		 */
4270		scanned = 1;
4271		index = 0;
 
 
 
 
 
 
 
 
 
 
 
4272
4273		/*
4274		 * If we're looping we could run into a page that is locked by a
4275		 * writer and that writer could be waiting on writeback for a
4276		 * page in our current bio, and thus deadlock, so flush the
4277		 * write bio here.
4278		 */
4279		ret = flush_write_bio(epd);
4280		if (!ret)
4281			goto retry;
4282	}
 
4283
4284	if (wbc->range_cyclic || (wbc->nr_to_write > 0 && range_whole))
4285		mapping->writeback_index = done_index;
4286
4287	btrfs_add_delayed_iput(inode);
4288	return ret;
4289}
4290
4291int extent_write_full_page(struct page *page, struct writeback_control *wbc)
 
 
4292{
4293	int ret;
4294	struct extent_page_data epd = {
4295		.bio = NULL,
 
 
4296		.extent_locked = 0,
4297		.sync_io = wbc->sync_mode == WB_SYNC_ALL,
 
4298	};
4299
4300	ret = __extent_writepage(page, wbc, &epd);
4301	ASSERT(ret <= 0);
4302	if (ret < 0) {
4303		end_write_bio(&epd, ret);
4304		return ret;
4305	}
4306
4307	ret = flush_write_bio(&epd);
4308	ASSERT(ret <= 0);
4309	return ret;
4310}
4311
4312int extent_write_locked_range(struct inode *inode, u64 start, u64 end,
 
4313			      int mode)
4314{
4315	int ret = 0;
4316	struct address_space *mapping = inode->i_mapping;
4317	struct page *page;
4318	unsigned long nr_pages = (end - start + PAGE_SIZE) >>
4319		PAGE_SHIFT;
4320
4321	struct extent_page_data epd = {
4322		.bio = NULL,
 
 
4323		.extent_locked = 1,
4324		.sync_io = mode == WB_SYNC_ALL,
 
4325	};
4326	struct writeback_control wbc_writepages = {
4327		.sync_mode	= mode,
4328		.nr_to_write	= nr_pages * 2,
4329		.range_start	= start,
4330		.range_end	= end + 1,
4331		/* We're called from an async helper function */
4332		.punt_to_cgroup	= 1,
4333		.no_cgroup_owner = 1,
4334	};
4335
4336	wbc_attach_fdatawrite_inode(&wbc_writepages, inode);
4337	while (start <= end) {
4338		page = find_get_page(mapping, start >> PAGE_SHIFT);
4339		if (clear_page_dirty_for_io(page))
4340			ret = __extent_writepage(page, &wbc_writepages, &epd);
4341		else {
4342			btrfs_writepage_endio_finish_ordered(page, start,
4343						    start + PAGE_SIZE - 1, 1);
 
 
4344			unlock_page(page);
4345		}
4346		put_page(page);
4347		start += PAGE_SIZE;
4348	}
4349
4350	ASSERT(ret <= 0);
4351	if (ret == 0)
4352		ret = flush_write_bio(&epd);
4353	else
4354		end_write_bio(&epd, ret);
4355
4356	wbc_detach_inode(&wbc_writepages);
4357	return ret;
4358}
4359
4360int extent_writepages(struct address_space *mapping,
 
 
4361		      struct writeback_control *wbc)
4362{
4363	int ret = 0;
4364	struct extent_page_data epd = {
4365		.bio = NULL,
 
 
4366		.extent_locked = 0,
4367		.sync_io = wbc->sync_mode == WB_SYNC_ALL,
 
4368	};
4369
4370	ret = extent_write_cache_pages(mapping, wbc, &epd);
4371	ASSERT(ret <= 0);
4372	if (ret < 0) {
4373		end_write_bio(&epd, ret);
4374		return ret;
4375	}
4376	ret = flush_write_bio(&epd);
4377	return ret;
4378}
4379
4380void extent_readahead(struct readahead_control *rac)
 
 
 
4381{
4382	struct bio *bio = NULL;
 
4383	unsigned long bio_flags = 0;
4384	struct page *pagepool[16];
 
4385	struct extent_map *em_cached = NULL;
 
4386	u64 prev_em_start = (u64)-1;
4387	int nr;
4388
4389	while ((nr = readahead_page_batch(rac, pagepool))) {
4390		u64 contig_start = page_offset(pagepool[0]);
4391		u64 contig_end = page_offset(pagepool[nr - 1]) + PAGE_SIZE - 1;
4392
4393		ASSERT(contig_start + nr * PAGE_SIZE - 1 == contig_end);
 
 
 
 
 
 
4394
4395		contiguous_readpages(pagepool, nr, contig_start, contig_end,
4396				&em_cached, &bio, &bio_flags, &prev_em_start);
4397	}
 
 
 
 
 
 
 
4398
4399	if (em_cached)
4400		free_extent_map(em_cached);
4401
4402	if (bio) {
4403		if (submit_one_bio(bio, 0, bio_flags))
4404			return;
4405	}
4406}
4407
4408/*
4409 * basic invalidatepage code, this waits on any locked or writeback
4410 * ranges corresponding to the page, and then deletes any extent state
4411 * records from the tree
4412 */
4413int extent_invalidatepage(struct extent_io_tree *tree,
4414			  struct page *page, unsigned long offset)
4415{
4416	struct extent_state *cached_state = NULL;
4417	u64 start = page_offset(page);
4418	u64 end = start + PAGE_SIZE - 1;
4419	size_t blocksize = page->mapping->host->i_sb->s_blocksize;
4420
4421	start += ALIGN(offset, blocksize);
4422	if (start > end)
4423		return 0;
4424
4425	lock_extent_bits(tree, start, end, &cached_state);
4426	wait_on_page_writeback(page);
4427	clear_extent_bit(tree, start, end, EXTENT_LOCKED | EXTENT_DELALLOC |
4428			 EXTENT_DO_ACCOUNTING, 1, 1, &cached_state);
 
 
4429	return 0;
4430}
4431
4432/*
4433 * a helper for releasepage, this tests for areas of the page that
4434 * are locked or under IO and drops the related state bits if it is safe
4435 * to drop the page.
4436 */
4437static int try_release_extent_state(struct extent_io_tree *tree,
 
4438				    struct page *page, gfp_t mask)
4439{
4440	u64 start = page_offset(page);
4441	u64 end = start + PAGE_SIZE - 1;
4442	int ret = 1;
4443
4444	if (test_range_bit(tree, start, end, EXTENT_LOCKED, 0, NULL)) {
 
4445		ret = 0;
4446	} else {
 
 
4447		/*
4448		 * at this point we can safely clear everything except the
4449		 * locked bit and the nodatasum bit
4450		 */
4451		ret = __clear_extent_bit(tree, start, end,
4452				 ~(EXTENT_LOCKED | EXTENT_NODATASUM),
4453				 0, 0, NULL, mask, NULL);
4454
4455		/* if clear_extent_bit failed for enomem reasons,
4456		 * we can't allow the release to continue.
4457		 */
4458		if (ret < 0)
4459			ret = 0;
4460		else
4461			ret = 1;
4462	}
4463	return ret;
4464}
4465
4466/*
4467 * a helper for releasepage.  As long as there are no locked extents
4468 * in the range corresponding to the page, both state records and extent
4469 * map records are removed
4470 */
4471int try_release_extent_mapping(struct page *page, gfp_t mask)
 
 
4472{
4473	struct extent_map *em;
4474	u64 start = page_offset(page);
4475	u64 end = start + PAGE_SIZE - 1;
4476	struct btrfs_inode *btrfs_inode = BTRFS_I(page->mapping->host);
4477	struct extent_io_tree *tree = &btrfs_inode->io_tree;
4478	struct extent_map_tree *map = &btrfs_inode->extent_tree;
4479
4480	if (gfpflags_allow_blocking(mask) &&
4481	    page->mapping->host->i_size > SZ_16M) {
4482		u64 len;
4483		while (start <= end) {
4484			struct btrfs_fs_info *fs_info;
4485			u64 cur_gen;
4486
4487			len = end - start + 1;
4488			write_lock(&map->lock);
4489			em = lookup_extent_mapping(map, start, len);
4490			if (!em) {
4491				write_unlock(&map->lock);
4492				break;
4493			}
4494			if (test_bit(EXTENT_FLAG_PINNED, &em->flags) ||
4495			    em->start != start) {
4496				write_unlock(&map->lock);
4497				free_extent_map(em);
4498				break;
4499			}
4500			if (test_range_bit(tree, em->start,
4501					   extent_map_end(em) - 1,
4502					   EXTENT_LOCKED, 0, NULL))
4503				goto next;
4504			/*
4505			 * If it's not in the list of modified extents, used
4506			 * by a fast fsync, we can remove it. If it's being
4507			 * logged we can safely remove it since fsync took an
4508			 * extra reference on the em.
4509			 */
4510			if (list_empty(&em->list) ||
4511			    test_bit(EXTENT_FLAG_LOGGING, &em->flags))
4512				goto remove_em;
4513			/*
4514			 * If it's in the list of modified extents, remove it
4515			 * only if its generation is older then the current one,
4516			 * in which case we don't need it for a fast fsync.
4517			 * Otherwise don't remove it, we could be racing with an
4518			 * ongoing fast fsync that could miss the new extent.
4519			 */
4520			fs_info = btrfs_inode->root->fs_info;
4521			spin_lock(&fs_info->trans_lock);
4522			cur_gen = fs_info->generation;
4523			spin_unlock(&fs_info->trans_lock);
4524			if (em->generation >= cur_gen)
4525				goto next;
4526remove_em:
4527			/*
4528			 * We only remove extent maps that are not in the list of
4529			 * modified extents or that are in the list but with a
4530			 * generation lower then the current generation, so there
4531			 * is no need to set the full fsync flag on the inode (it
4532			 * hurts the fsync performance for workloads with a data
4533			 * size that exceeds or is close to the system's memory).
4534			 */
4535			remove_extent_mapping(map, em);
4536			/* once for the rb tree */
4537			free_extent_map(em);
4538next:
4539			start = extent_map_end(em);
4540			write_unlock(&map->lock);
4541
4542			/* once for us */
4543			free_extent_map(em);
4544
4545			cond_resched(); /* Allow large-extent preemption. */
4546		}
4547	}
4548	return try_release_extent_state(tree, page, mask);
4549}
4550
4551/*
4552 * helper function for fiemap, which doesn't want to see any holes.
4553 * This maps until we find something past 'last'
4554 */
4555static struct extent_map *get_extent_skip_holes(struct inode *inode,
4556						u64 offset, u64 last)
 
 
4557{
4558	u64 sectorsize = btrfs_inode_sectorsize(inode);
4559	struct extent_map *em;
4560	u64 len;
4561
4562	if (offset >= last)
4563		return NULL;
4564
4565	while (1) {
4566		len = last - offset;
4567		if (len == 0)
4568			break;
4569		len = ALIGN(len, sectorsize);
4570		em = btrfs_get_extent_fiemap(BTRFS_I(inode), offset, len);
4571		if (IS_ERR_OR_NULL(em))
4572			return em;
4573
4574		/* if this isn't a hole return it */
4575		if (em->block_start != EXTENT_MAP_HOLE)
 
4576			return em;
 
4577
4578		/* this is a hole, advance to the next extent */
4579		offset = extent_map_end(em);
4580		free_extent_map(em);
4581		if (offset >= last)
4582			break;
4583	}
4584	return NULL;
4585}
4586
4587/*
4588 * To cache previous fiemap extent
4589 *
4590 * Will be used for merging fiemap extent
4591 */
4592struct fiemap_cache {
4593	u64 offset;
4594	u64 phys;
4595	u64 len;
4596	u32 flags;
4597	bool cached;
4598};
4599
4600/*
4601 * Helper to submit fiemap extent.
4602 *
4603 * Will try to merge current fiemap extent specified by @offset, @phys,
4604 * @len and @flags with cached one.
4605 * And only when we fails to merge, cached one will be submitted as
4606 * fiemap extent.
4607 *
4608 * Return value is the same as fiemap_fill_next_extent().
4609 */
4610static int emit_fiemap_extent(struct fiemap_extent_info *fieinfo,
4611				struct fiemap_cache *cache,
4612				u64 offset, u64 phys, u64 len, u32 flags)
4613{
4614	int ret = 0;
4615
4616	if (!cache->cached)
4617		goto assign;
4618
4619	/*
4620	 * Sanity check, extent_fiemap() should have ensured that new
4621	 * fiemap extent won't overlap with cached one.
4622	 * Not recoverable.
4623	 *
4624	 * NOTE: Physical address can overlap, due to compression
4625	 */
4626	if (cache->offset + cache->len > offset) {
4627		WARN_ON(1);
4628		return -EINVAL;
4629	}
4630
4631	/*
4632	 * Only merges fiemap extents if
4633	 * 1) Their logical addresses are continuous
4634	 *
4635	 * 2) Their physical addresses are continuous
4636	 *    So truly compressed (physical size smaller than logical size)
4637	 *    extents won't get merged with each other
4638	 *
4639	 * 3) Share same flags except FIEMAP_EXTENT_LAST
4640	 *    So regular extent won't get merged with prealloc extent
4641	 */
4642	if (cache->offset + cache->len  == offset &&
4643	    cache->phys + cache->len == phys  &&
4644	    (cache->flags & ~FIEMAP_EXTENT_LAST) ==
4645			(flags & ~FIEMAP_EXTENT_LAST)) {
4646		cache->len += len;
4647		cache->flags |= flags;
4648		goto try_submit_last;
4649	}
4650
4651	/* Not mergeable, need to submit cached one */
4652	ret = fiemap_fill_next_extent(fieinfo, cache->offset, cache->phys,
4653				      cache->len, cache->flags);
4654	cache->cached = false;
4655	if (ret)
4656		return ret;
4657assign:
4658	cache->cached = true;
4659	cache->offset = offset;
4660	cache->phys = phys;
4661	cache->len = len;
4662	cache->flags = flags;
4663try_submit_last:
4664	if (cache->flags & FIEMAP_EXTENT_LAST) {
4665		ret = fiemap_fill_next_extent(fieinfo, cache->offset,
4666				cache->phys, cache->len, cache->flags);
4667		cache->cached = false;
4668	}
4669	return ret;
4670}
4671
4672/*
4673 * Emit last fiemap cache
4674 *
4675 * The last fiemap cache may still be cached in the following case:
4676 * 0		      4k		    8k
4677 * |<- Fiemap range ->|
4678 * |<------------  First extent ----------->|
4679 *
4680 * In this case, the first extent range will be cached but not emitted.
4681 * So we must emit it before ending extent_fiemap().
4682 */
4683static int emit_last_fiemap_cache(struct fiemap_extent_info *fieinfo,
4684				  struct fiemap_cache *cache)
4685{
4686	int ret;
4687
4688	if (!cache->cached)
4689		return 0;
4690
4691	ret = fiemap_fill_next_extent(fieinfo, cache->offset, cache->phys,
4692				      cache->len, cache->flags);
4693	cache->cached = false;
4694	if (ret > 0)
4695		ret = 0;
4696	return ret;
4697}
4698
4699int extent_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
4700		  u64 start, u64 len)
4701{
4702	int ret = 0;
4703	u64 off = start;
4704	u64 max = start + len;
4705	u32 flags = 0;
4706	u32 found_type;
4707	u64 last;
4708	u64 last_for_get_extent = 0;
4709	u64 disko = 0;
4710	u64 isize = i_size_read(inode);
4711	struct btrfs_key found_key;
4712	struct extent_map *em = NULL;
4713	struct extent_state *cached_state = NULL;
4714	struct btrfs_path *path;
4715	struct btrfs_root *root = BTRFS_I(inode)->root;
4716	struct fiemap_cache cache = { 0 };
4717	struct ulist *roots;
4718	struct ulist *tmp_ulist;
4719	int end = 0;
4720	u64 em_start = 0;
4721	u64 em_len = 0;
4722	u64 em_end = 0;
4723
4724	if (len == 0)
4725		return -EINVAL;
4726
4727	path = btrfs_alloc_path();
4728	if (!path)
4729		return -ENOMEM;
4730	path->leave_spinning = 1;
4731
4732	roots = ulist_alloc(GFP_KERNEL);
4733	tmp_ulist = ulist_alloc(GFP_KERNEL);
4734	if (!roots || !tmp_ulist) {
4735		ret = -ENOMEM;
4736		goto out_free_ulist;
4737	}
4738
4739	start = round_down(start, btrfs_inode_sectorsize(inode));
4740	len = round_up(max, btrfs_inode_sectorsize(inode)) - start;
4741
4742	/*
4743	 * lookup the last file extent.  We're not using i_size here
4744	 * because there might be preallocation past i_size
4745	 */
4746	ret = btrfs_lookup_file_extent(NULL, root, path,
4747			btrfs_ino(BTRFS_I(inode)), -1, 0);
4748	if (ret < 0) {
4749		goto out_free_ulist;
4750	} else {
4751		WARN_ON(!ret);
4752		if (ret == 1)
4753			ret = 0;
4754	}
4755
4756	path->slots[0]--;
4757	btrfs_item_key_to_cpu(path->nodes[0], &found_key, path->slots[0]);
4758	found_type = found_key.type;
4759
4760	/* No extents, but there might be delalloc bits */
4761	if (found_key.objectid != btrfs_ino(BTRFS_I(inode)) ||
4762	    found_type != BTRFS_EXTENT_DATA_KEY) {
4763		/* have to trust i_size as the end */
4764		last = (u64)-1;
4765		last_for_get_extent = isize;
4766	} else {
4767		/*
4768		 * remember the start of the last extent.  There are a
4769		 * bunch of different factors that go into the length of the
4770		 * extent, so its much less complex to remember where it started
4771		 */
4772		last = found_key.offset;
4773		last_for_get_extent = last + 1;
4774	}
4775	btrfs_release_path(path);
4776
4777	/*
4778	 * we might have some extents allocated but more delalloc past those
4779	 * extents.  so, we trust isize unless the start of the last extent is
4780	 * beyond isize
4781	 */
4782	if (last < isize) {
4783		last = (u64)-1;
4784		last_for_get_extent = isize;
4785	}
4786
4787	lock_extent_bits(&BTRFS_I(inode)->io_tree, start, start + len - 1,
4788			 &cached_state);
4789
4790	em = get_extent_skip_holes(inode, start, last_for_get_extent);
 
4791	if (!em)
4792		goto out;
4793	if (IS_ERR(em)) {
4794		ret = PTR_ERR(em);
4795		goto out;
4796	}
4797
4798	while (!end) {
4799		u64 offset_in_extent = 0;
4800
4801		/* break if the extent we found is outside the range */
4802		if (em->start >= max || extent_map_end(em) < off)
4803			break;
4804
4805		/*
4806		 * get_extent may return an extent that starts before our
4807		 * requested range.  We have to make sure the ranges
4808		 * we return to fiemap always move forward and don't
4809		 * overlap, so adjust the offsets here
4810		 */
4811		em_start = max(em->start, off);
4812
4813		/*
4814		 * record the offset from the start of the extent
4815		 * for adjusting the disk offset below.  Only do this if the
4816		 * extent isn't compressed since our in ram offset may be past
4817		 * what we have actually allocated on disk.
4818		 */
4819		if (!test_bit(EXTENT_FLAG_COMPRESSED, &em->flags))
4820			offset_in_extent = em_start - em->start;
4821		em_end = extent_map_end(em);
4822		em_len = em_end - em_start;
 
4823		flags = 0;
4824		if (em->block_start < EXTENT_MAP_LAST_BYTE)
4825			disko = em->block_start + offset_in_extent;
4826		else
4827			disko = 0;
4828
4829		/*
4830		 * bump off for our next call to get_extent
4831		 */
4832		off = extent_map_end(em);
4833		if (off >= max)
4834			end = 1;
4835
4836		if (em->block_start == EXTENT_MAP_LAST_BYTE) {
4837			end = 1;
4838			flags |= FIEMAP_EXTENT_LAST;
4839		} else if (em->block_start == EXTENT_MAP_INLINE) {
4840			flags |= (FIEMAP_EXTENT_DATA_INLINE |
4841				  FIEMAP_EXTENT_NOT_ALIGNED);
4842		} else if (em->block_start == EXTENT_MAP_DELALLOC) {
4843			flags |= (FIEMAP_EXTENT_DELALLOC |
4844				  FIEMAP_EXTENT_UNKNOWN);
4845		} else if (fieinfo->fi_extents_max) {
4846			u64 bytenr = em->block_start -
4847				(em->start - em->orig_start);
4848
 
 
4849			/*
4850			 * As btrfs supports shared space, this information
4851			 * can be exported to userspace tools via
4852			 * flag FIEMAP_EXTENT_SHARED.  If fi_extents_max == 0
4853			 * then we're just getting a count and we can skip the
4854			 * lookup stuff.
4855			 */
4856			ret = btrfs_check_shared(root,
4857						 btrfs_ino(BTRFS_I(inode)),
4858						 bytenr, roots, tmp_ulist);
4859			if (ret < 0)
4860				goto out_free;
4861			if (ret)
4862				flags |= FIEMAP_EXTENT_SHARED;
4863			ret = 0;
4864		}
4865		if (test_bit(EXTENT_FLAG_COMPRESSED, &em->flags))
4866			flags |= FIEMAP_EXTENT_ENCODED;
4867		if (test_bit(EXTENT_FLAG_PREALLOC, &em->flags))
4868			flags |= FIEMAP_EXTENT_UNWRITTEN;
4869
4870		free_extent_map(em);
4871		em = NULL;
4872		if ((em_start >= last) || em_len == (u64)-1 ||
4873		   (last == (u64)-1 && isize <= em_end)) {
4874			flags |= FIEMAP_EXTENT_LAST;
4875			end = 1;
4876		}
4877
4878		/* now scan forward to see if this is really the last extent. */
4879		em = get_extent_skip_holes(inode, off, last_for_get_extent);
 
4880		if (IS_ERR(em)) {
4881			ret = PTR_ERR(em);
4882			goto out;
4883		}
4884		if (!em) {
4885			flags |= FIEMAP_EXTENT_LAST;
4886			end = 1;
4887		}
4888		ret = emit_fiemap_extent(fieinfo, &cache, em_start, disko,
4889					   em_len, flags);
4890		if (ret) {
4891			if (ret == 1)
4892				ret = 0;
4893			goto out_free;
4894		}
4895	}
4896out_free:
4897	if (!ret)
4898		ret = emit_last_fiemap_cache(fieinfo, &cache);
4899	free_extent_map(em);
4900out:
 
4901	unlock_extent_cached(&BTRFS_I(inode)->io_tree, start, start + len - 1,
4902			     &cached_state);
4903
4904out_free_ulist:
4905	btrfs_free_path(path);
4906	ulist_free(roots);
4907	ulist_free(tmp_ulist);
4908	return ret;
4909}
4910
4911static void __free_extent_buffer(struct extent_buffer *eb)
4912{
 
4913	kmem_cache_free(extent_buffer_cache, eb);
4914}
4915
4916int extent_buffer_under_io(const struct extent_buffer *eb)
4917{
4918	return (atomic_read(&eb->io_pages) ||
4919		test_bit(EXTENT_BUFFER_WRITEBACK, &eb->bflags) ||
4920		test_bit(EXTENT_BUFFER_DIRTY, &eb->bflags));
4921}
4922
4923/*
4924 * Release all pages attached to the extent buffer.
4925 */
4926static void btrfs_release_extent_buffer_pages(struct extent_buffer *eb)
4927{
4928	int i;
4929	int num_pages;
4930	int mapped = !test_bit(EXTENT_BUFFER_UNMAPPED, &eb->bflags);
4931
4932	BUG_ON(extent_buffer_under_io(eb));
4933
4934	num_pages = num_extent_pages(eb);
4935	for (i = 0; i < num_pages; i++) {
4936		struct page *page = eb->pages[i];
4937
 
 
 
4938		if (!page)
4939			continue;
4940		if (mapped)
4941			spin_lock(&page->mapping->private_lock);
4942		/*
4943		 * We do this since we'll remove the pages after we've
4944		 * removed the eb from the radix tree, so we could race
4945		 * and have this page now attached to the new eb.  So
4946		 * only clear page_private if it's still connected to
4947		 * this eb.
4948		 */
4949		if (PagePrivate(page) &&
4950		    page->private == (unsigned long)eb) {
4951			BUG_ON(test_bit(EXTENT_BUFFER_DIRTY, &eb->bflags));
4952			BUG_ON(PageDirty(page));
4953			BUG_ON(PageWriteback(page));
4954			/*
4955			 * We need to make sure we haven't be attached
4956			 * to a new eb.
4957			 */
4958			detach_page_private(page);
 
 
 
4959		}
4960
4961		if (mapped)
4962			spin_unlock(&page->mapping->private_lock);
4963
4964		/* One for when we allocated the page */
4965		put_page(page);
4966	}
4967}
4968
4969/*
4970 * Helper for releasing the extent buffer.
4971 */
4972static inline void btrfs_release_extent_buffer(struct extent_buffer *eb)
4973{
4974	btrfs_release_extent_buffer_pages(eb);
4975	btrfs_leak_debug_del(&eb->fs_info->eb_leak_lock, &eb->leak_list);
4976	__free_extent_buffer(eb);
4977}
4978
4979static struct extent_buffer *
4980__alloc_extent_buffer(struct btrfs_fs_info *fs_info, u64 start,
4981		      unsigned long len)
4982{
4983	struct extent_buffer *eb = NULL;
4984
4985	eb = kmem_cache_zalloc(extent_buffer_cache, GFP_NOFS|__GFP_NOFAIL);
4986	eb->start = start;
4987	eb->len = len;
4988	eb->fs_info = fs_info;
4989	eb->bflags = 0;
4990	rwlock_init(&eb->lock);
 
 
4991	atomic_set(&eb->blocking_readers, 0);
4992	eb->blocking_writers = 0;
4993	eb->lock_nested = false;
 
 
4994	init_waitqueue_head(&eb->write_lock_wq);
4995	init_waitqueue_head(&eb->read_lock_wq);
4996
4997	btrfs_leak_debug_add(&fs_info->eb_leak_lock, &eb->leak_list,
4998			     &fs_info->allocated_ebs);
4999
5000	spin_lock_init(&eb->refs_lock);
5001	atomic_set(&eb->refs, 1);
5002	atomic_set(&eb->io_pages, 0);
5003
5004	/*
5005	 * Sanity checks, currently the maximum is 64k covered by 16x 4k pages
5006	 */
5007	BUILD_BUG_ON(BTRFS_MAX_METADATA_BLOCKSIZE
5008		> MAX_INLINE_EXTENT_BUFFER_SIZE);
5009	BUG_ON(len > MAX_INLINE_EXTENT_BUFFER_SIZE);
5010
5011#ifdef CONFIG_BTRFS_DEBUG
5012	eb->spinning_writers = 0;
5013	atomic_set(&eb->spinning_readers, 0);
5014	atomic_set(&eb->read_locks, 0);
5015	eb->write_locks = 0;
5016#endif
5017
5018	return eb;
5019}
5020
5021struct extent_buffer *btrfs_clone_extent_buffer(const struct extent_buffer *src)
5022{
5023	int i;
5024	struct page *p;
5025	struct extent_buffer *new;
5026	int num_pages = num_extent_pages(src);
5027
5028	new = __alloc_extent_buffer(src->fs_info, src->start, src->len);
5029	if (new == NULL)
5030		return NULL;
5031
5032	for (i = 0; i < num_pages; i++) {
5033		p = alloc_page(GFP_NOFS);
5034		if (!p) {
5035			btrfs_release_extent_buffer(new);
5036			return NULL;
5037		}
5038		attach_extent_buffer_page(new, p);
5039		WARN_ON(PageDirty(p));
5040		SetPageUptodate(p);
5041		new->pages[i] = p;
5042		copy_page(page_address(p), page_address(src->pages[i]));
5043	}
5044
 
5045	set_bit(EXTENT_BUFFER_UPTODATE, &new->bflags);
5046	set_bit(EXTENT_BUFFER_UNMAPPED, &new->bflags);
5047
5048	return new;
5049}
5050
5051struct extent_buffer *__alloc_dummy_extent_buffer(struct btrfs_fs_info *fs_info,
5052						  u64 start, unsigned long len)
5053{
5054	struct extent_buffer *eb;
5055	int num_pages;
5056	int i;
 
 
5057
5058	eb = __alloc_extent_buffer(fs_info, start, len);
5059	if (!eb)
5060		return NULL;
5061
5062	num_pages = num_extent_pages(eb);
5063	for (i = 0; i < num_pages; i++) {
5064		eb->pages[i] = alloc_page(GFP_NOFS);
5065		if (!eb->pages[i])
5066			goto err;
5067	}
5068	set_extent_buffer_uptodate(eb);
5069	btrfs_set_header_nritems(eb, 0);
5070	set_bit(EXTENT_BUFFER_UNMAPPED, &eb->bflags);
5071
5072	return eb;
5073err:
5074	for (; i > 0; i--)
5075		__free_page(eb->pages[i - 1]);
5076	__free_extent_buffer(eb);
5077	return NULL;
5078}
5079
5080struct extent_buffer *alloc_dummy_extent_buffer(struct btrfs_fs_info *fs_info,
5081						u64 start)
5082{
5083	return __alloc_dummy_extent_buffer(fs_info, start, fs_info->nodesize);
 
 
 
 
 
 
 
 
 
 
 
 
5084}
5085
5086static void check_buffer_tree_ref(struct extent_buffer *eb)
5087{
5088	int refs;
5089	/*
5090	 * The TREE_REF bit is first set when the extent_buffer is added
5091	 * to the radix tree. It is also reset, if unset, when a new reference
5092	 * is created by find_extent_buffer.
5093	 *
5094	 * It is only cleared in two cases: freeing the last non-tree
5095	 * reference to the extent_buffer when its STALE bit is set or
5096	 * calling releasepage when the tree reference is the only reference.
 
5097	 *
5098	 * In both cases, care is taken to ensure that the extent_buffer's
5099	 * pages are not under io. However, releasepage can be concurrently
5100	 * called with creating new references, which is prone to race
5101	 * conditions between the calls to check_buffer_tree_ref in those
5102	 * codepaths and clearing TREE_REF in try_release_extent_buffer.
 
5103	 *
5104	 * The actual lifetime of the extent_buffer in the radix tree is
5105	 * adequately protected by the refcount, but the TREE_REF bit and
5106	 * its corresponding reference are not. To protect against this
5107	 * class of races, we call check_buffer_tree_ref from the codepaths
5108	 * which trigger io after they set eb->io_pages. Note that once io is
5109	 * initiated, TREE_REF can no longer be cleared, so that is the
5110	 * moment at which any such race is best fixed.
5111	 */
5112	refs = atomic_read(&eb->refs);
5113	if (refs >= 2 && test_bit(EXTENT_BUFFER_TREE_REF, &eb->bflags))
5114		return;
5115
5116	spin_lock(&eb->refs_lock);
5117	if (!test_and_set_bit(EXTENT_BUFFER_TREE_REF, &eb->bflags))
5118		atomic_inc(&eb->refs);
5119	spin_unlock(&eb->refs_lock);
5120}
5121
5122static void mark_extent_buffer_accessed(struct extent_buffer *eb,
5123		struct page *accessed)
5124{
5125	int num_pages, i;
5126
5127	check_buffer_tree_ref(eb);
5128
5129	num_pages = num_extent_pages(eb);
5130	for (i = 0; i < num_pages; i++) {
5131		struct page *p = eb->pages[i];
5132
5133		if (p != accessed)
5134			mark_page_accessed(p);
5135	}
5136}
5137
5138struct extent_buffer *find_extent_buffer(struct btrfs_fs_info *fs_info,
5139					 u64 start)
5140{
5141	struct extent_buffer *eb;
5142
5143	rcu_read_lock();
5144	eb = radix_tree_lookup(&fs_info->buffer_radix,
5145			       start >> PAGE_SHIFT);
5146	if (eb && atomic_inc_not_zero(&eb->refs)) {
5147		rcu_read_unlock();
5148		/*
5149		 * Lock our eb's refs_lock to avoid races with
5150		 * free_extent_buffer. When we get our eb it might be flagged
5151		 * with EXTENT_BUFFER_STALE and another task running
5152		 * free_extent_buffer might have seen that flag set,
5153		 * eb->refs == 2, that the buffer isn't under IO (dirty and
5154		 * writeback flags not set) and it's still in the tree (flag
5155		 * EXTENT_BUFFER_TREE_REF set), therefore being in the process
5156		 * of decrementing the extent buffer's reference count twice.
5157		 * So here we could race and increment the eb's reference count,
5158		 * clear its stale flag, mark it as dirty and drop our reference
5159		 * before the other task finishes executing free_extent_buffer,
5160		 * which would later result in an attempt to free an extent
5161		 * buffer that is dirty.
5162		 */
5163		if (test_bit(EXTENT_BUFFER_STALE, &eb->bflags)) {
5164			spin_lock(&eb->refs_lock);
5165			spin_unlock(&eb->refs_lock);
5166		}
5167		mark_extent_buffer_accessed(eb, NULL);
5168		return eb;
5169	}
5170	rcu_read_unlock();
5171
5172	return NULL;
5173}
5174
5175#ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS
5176struct extent_buffer *alloc_test_extent_buffer(struct btrfs_fs_info *fs_info,
5177					u64 start)
5178{
5179	struct extent_buffer *eb, *exists = NULL;
5180	int ret;
5181
5182	eb = find_extent_buffer(fs_info, start);
5183	if (eb)
5184		return eb;
5185	eb = alloc_dummy_extent_buffer(fs_info, start);
5186	if (!eb)
5187		return ERR_PTR(-ENOMEM);
5188	eb->fs_info = fs_info;
5189again:
5190	ret = radix_tree_preload(GFP_NOFS);
5191	if (ret) {
5192		exists = ERR_PTR(ret);
5193		goto free_eb;
5194	}
5195	spin_lock(&fs_info->buffer_lock);
5196	ret = radix_tree_insert(&fs_info->buffer_radix,
5197				start >> PAGE_SHIFT, eb);
5198	spin_unlock(&fs_info->buffer_lock);
5199	radix_tree_preload_end();
5200	if (ret == -EEXIST) {
5201		exists = find_extent_buffer(fs_info, start);
5202		if (exists)
5203			goto free_eb;
5204		else
5205			goto again;
5206	}
5207	check_buffer_tree_ref(eb);
5208	set_bit(EXTENT_BUFFER_IN_TREE, &eb->bflags);
5209
 
 
 
 
 
 
 
5210	return eb;
5211free_eb:
5212	btrfs_release_extent_buffer(eb);
5213	return exists;
5214}
5215#endif
5216
5217struct extent_buffer *alloc_extent_buffer(struct btrfs_fs_info *fs_info,
5218					  u64 start)
5219{
5220	unsigned long len = fs_info->nodesize;
5221	int num_pages;
5222	int i;
5223	unsigned long index = start >> PAGE_SHIFT;
5224	struct extent_buffer *eb;
5225	struct extent_buffer *exists = NULL;
5226	struct page *p;
5227	struct address_space *mapping = fs_info->btree_inode->i_mapping;
5228	int uptodate = 1;
5229	int ret;
5230
5231	if (!IS_ALIGNED(start, fs_info->sectorsize)) {
5232		btrfs_err(fs_info, "bad tree block start %llu", start);
5233		return ERR_PTR(-EINVAL);
5234	}
5235
5236	eb = find_extent_buffer(fs_info, start);
5237	if (eb)
5238		return eb;
5239
5240	eb = __alloc_extent_buffer(fs_info, start, len);
5241	if (!eb)
5242		return ERR_PTR(-ENOMEM);
5243
5244	num_pages = num_extent_pages(eb);
5245	for (i = 0; i < num_pages; i++, index++) {
5246		p = find_or_create_page(mapping, index, GFP_NOFS|__GFP_NOFAIL);
5247		if (!p) {
5248			exists = ERR_PTR(-ENOMEM);
5249			goto free_eb;
5250		}
5251
5252		spin_lock(&mapping->private_lock);
5253		if (PagePrivate(p)) {
5254			/*
5255			 * We could have already allocated an eb for this page
5256			 * and attached one so lets see if we can get a ref on
5257			 * the existing eb, and if we can we know it's good and
5258			 * we can just return that one, else we know we can just
5259			 * overwrite page->private.
5260			 */
5261			exists = (struct extent_buffer *)p->private;
5262			if (atomic_inc_not_zero(&exists->refs)) {
5263				spin_unlock(&mapping->private_lock);
5264				unlock_page(p);
5265				put_page(p);
5266				mark_extent_buffer_accessed(exists, p);
5267				goto free_eb;
5268			}
5269			exists = NULL;
5270
5271			/*
5272			 * Do this so attach doesn't complain and we need to
5273			 * drop the ref the old guy had.
5274			 */
5275			ClearPagePrivate(p);
5276			WARN_ON(PageDirty(p));
5277			put_page(p);
5278		}
5279		attach_extent_buffer_page(eb, p);
5280		spin_unlock(&mapping->private_lock);
5281		WARN_ON(PageDirty(p));
5282		eb->pages[i] = p;
5283		if (!PageUptodate(p))
5284			uptodate = 0;
5285
5286		/*
5287		 * We can't unlock the pages just yet since the extent buffer
5288		 * hasn't been properly inserted in the radix tree, this
5289		 * opens a race with btree_releasepage which can free a page
5290		 * while we are still filling in all pages for the buffer and
5291		 * we could crash.
5292		 */
5293	}
5294	if (uptodate)
5295		set_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags);
5296again:
5297	ret = radix_tree_preload(GFP_NOFS);
5298	if (ret) {
5299		exists = ERR_PTR(ret);
5300		goto free_eb;
5301	}
5302
5303	spin_lock(&fs_info->buffer_lock);
5304	ret = radix_tree_insert(&fs_info->buffer_radix,
5305				start >> PAGE_SHIFT, eb);
5306	spin_unlock(&fs_info->buffer_lock);
5307	radix_tree_preload_end();
5308	if (ret == -EEXIST) {
5309		exists = find_extent_buffer(fs_info, start);
5310		if (exists)
5311			goto free_eb;
5312		else
5313			goto again;
5314	}
5315	/* add one reference for the tree */
5316	check_buffer_tree_ref(eb);
5317	set_bit(EXTENT_BUFFER_IN_TREE, &eb->bflags);
5318
5319	/*
5320	 * Now it's safe to unlock the pages because any calls to
5321	 * btree_releasepage will correctly detect that a page belongs to a
5322	 * live buffer and won't free them prematurely.
5323	 */
5324	for (i = 0; i < num_pages; i++)
5325		unlock_page(eb->pages[i]);
 
 
 
 
 
 
 
 
 
5326	return eb;
5327
5328free_eb:
5329	WARN_ON(!atomic_dec_and_test(&eb->refs));
5330	for (i = 0; i < num_pages; i++) {
5331		if (eb->pages[i])
5332			unlock_page(eb->pages[i]);
5333	}
5334
5335	btrfs_release_extent_buffer(eb);
5336	return exists;
5337}
5338
5339static inline void btrfs_release_extent_buffer_rcu(struct rcu_head *head)
5340{
5341	struct extent_buffer *eb =
5342			container_of(head, struct extent_buffer, rcu_head);
5343
5344	__free_extent_buffer(eb);
5345}
5346
 
5347static int release_extent_buffer(struct extent_buffer *eb)
5348	__releases(&eb->refs_lock)
5349{
5350	lockdep_assert_held(&eb->refs_lock);
5351
5352	WARN_ON(atomic_read(&eb->refs) == 0);
5353	if (atomic_dec_and_test(&eb->refs)) {
5354		if (test_and_clear_bit(EXTENT_BUFFER_IN_TREE, &eb->bflags)) {
5355			struct btrfs_fs_info *fs_info = eb->fs_info;
5356
5357			spin_unlock(&eb->refs_lock);
5358
5359			spin_lock(&fs_info->buffer_lock);
5360			radix_tree_delete(&fs_info->buffer_radix,
5361					  eb->start >> PAGE_SHIFT);
5362			spin_unlock(&fs_info->buffer_lock);
5363		} else {
5364			spin_unlock(&eb->refs_lock);
5365		}
5366
5367		btrfs_leak_debug_del(&eb->fs_info->eb_leak_lock, &eb->leak_list);
5368		/* Should be safe to release our pages at this point */
5369		btrfs_release_extent_buffer_pages(eb);
5370#ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS
5371		if (unlikely(test_bit(EXTENT_BUFFER_UNMAPPED, &eb->bflags))) {
5372			__free_extent_buffer(eb);
5373			return 1;
5374		}
5375#endif
5376		call_rcu(&eb->rcu_head, btrfs_release_extent_buffer_rcu);
5377		return 1;
5378	}
5379	spin_unlock(&eb->refs_lock);
5380
5381	return 0;
5382}
5383
5384void free_extent_buffer(struct extent_buffer *eb)
5385{
5386	int refs;
5387	int old;
5388	if (!eb)
5389		return;
5390
5391	while (1) {
5392		refs = atomic_read(&eb->refs);
5393		if ((!test_bit(EXTENT_BUFFER_UNMAPPED, &eb->bflags) && refs <= 3)
5394		    || (test_bit(EXTENT_BUFFER_UNMAPPED, &eb->bflags) &&
5395			refs == 1))
5396			break;
5397		old = atomic_cmpxchg(&eb->refs, refs, refs - 1);
5398		if (old == refs)
5399			return;
5400	}
5401
5402	spin_lock(&eb->refs_lock);
5403	if (atomic_read(&eb->refs) == 2 &&
 
 
 
 
5404	    test_bit(EXTENT_BUFFER_STALE, &eb->bflags) &&
5405	    !extent_buffer_under_io(eb) &&
5406	    test_and_clear_bit(EXTENT_BUFFER_TREE_REF, &eb->bflags))
5407		atomic_dec(&eb->refs);
5408
5409	/*
5410	 * I know this is terrible, but it's temporary until we stop tracking
5411	 * the uptodate bits and such for the extent buffers.
5412	 */
5413	release_extent_buffer(eb);
5414}
5415
5416void free_extent_buffer_stale(struct extent_buffer *eb)
5417{
5418	if (!eb)
5419		return;
5420
5421	spin_lock(&eb->refs_lock);
5422	set_bit(EXTENT_BUFFER_STALE, &eb->bflags);
5423
5424	if (atomic_read(&eb->refs) == 2 && !extent_buffer_under_io(eb) &&
5425	    test_and_clear_bit(EXTENT_BUFFER_TREE_REF, &eb->bflags))
5426		atomic_dec(&eb->refs);
5427	release_extent_buffer(eb);
5428}
5429
5430void clear_extent_buffer_dirty(const struct extent_buffer *eb)
5431{
5432	int i;
5433	int num_pages;
5434	struct page *page;
5435
5436	num_pages = num_extent_pages(eb);
5437
5438	for (i = 0; i < num_pages; i++) {
5439		page = eb->pages[i];
5440		if (!PageDirty(page))
5441			continue;
5442
5443		lock_page(page);
5444		WARN_ON(!PagePrivate(page));
5445
5446		clear_page_dirty_for_io(page);
5447		xa_lock_irq(&page->mapping->i_pages);
5448		if (!PageDirty(page))
5449			__xa_clear_mark(&page->mapping->i_pages,
5450					page_index(page), PAGECACHE_TAG_DIRTY);
5451		xa_unlock_irq(&page->mapping->i_pages);
 
 
5452		ClearPageError(page);
5453		unlock_page(page);
5454	}
5455	WARN_ON(atomic_read(&eb->refs) == 0);
5456}
5457
5458bool set_extent_buffer_dirty(struct extent_buffer *eb)
5459{
5460	int i;
5461	int num_pages;
5462	bool was_dirty;
5463
5464	check_buffer_tree_ref(eb);
5465
5466	was_dirty = test_and_set_bit(EXTENT_BUFFER_DIRTY, &eb->bflags);
5467
5468	num_pages = num_extent_pages(eb);
5469	WARN_ON(atomic_read(&eb->refs) == 0);
5470	WARN_ON(!test_bit(EXTENT_BUFFER_TREE_REF, &eb->bflags));
5471
5472	if (!was_dirty)
5473		for (i = 0; i < num_pages; i++)
5474			set_page_dirty(eb->pages[i]);
5475
5476#ifdef CONFIG_BTRFS_DEBUG
5477	for (i = 0; i < num_pages; i++)
5478		ASSERT(PageDirty(eb->pages[i]));
5479#endif
5480
5481	return was_dirty;
5482}
5483
5484void clear_extent_buffer_uptodate(struct extent_buffer *eb)
5485{
5486	int i;
5487	struct page *page;
5488	int num_pages;
5489
5490	clear_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags);
5491	num_pages = num_extent_pages(eb);
5492	for (i = 0; i < num_pages; i++) {
5493		page = eb->pages[i];
5494		if (page)
5495			ClearPageUptodate(page);
5496	}
5497}
5498
5499void set_extent_buffer_uptodate(struct extent_buffer *eb)
5500{
5501	int i;
5502	struct page *page;
5503	int num_pages;
5504
5505	set_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags);
5506	num_pages = num_extent_pages(eb);
5507	for (i = 0; i < num_pages; i++) {
5508		page = eb->pages[i];
5509		SetPageUptodate(page);
5510	}
5511}
5512
5513int read_extent_buffer_pages(struct extent_buffer *eb, int wait, int mirror_num)
5514{
5515	int i;
 
 
 
 
 
 
 
 
5516	struct page *page;
5517	int err;
5518	int ret = 0;
5519	int locked_pages = 0;
5520	int all_uptodate = 1;
5521	int num_pages;
5522	unsigned long num_reads = 0;
5523	struct bio *bio = NULL;
5524	unsigned long bio_flags = 0;
5525
5526	if (test_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags))
5527		return 0;
5528
5529	num_pages = num_extent_pages(eb);
5530	for (i = 0; i < num_pages; i++) {
 
 
 
 
 
 
 
 
5531		page = eb->pages[i];
5532		if (wait == WAIT_NONE) {
5533			if (!trylock_page(page))
5534				goto unlock_exit;
5535		} else {
5536			lock_page(page);
5537		}
5538		locked_pages++;
5539	}
5540	/*
5541	 * We need to firstly lock all pages to make sure that
5542	 * the uptodate bit of our pages won't be affected by
5543	 * clear_extent_buffer_uptodate().
5544	 */
5545	for (i = 0; i < num_pages; i++) {
5546		page = eb->pages[i];
5547		if (!PageUptodate(page)) {
5548			num_reads++;
5549			all_uptodate = 0;
5550		}
5551	}
5552
5553	if (all_uptodate) {
5554		set_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags);
 
5555		goto unlock_exit;
5556	}
5557
5558	clear_bit(EXTENT_BUFFER_READ_ERR, &eb->bflags);
5559	eb->read_mirror = 0;
5560	atomic_set(&eb->io_pages, num_reads);
5561	/*
5562	 * It is possible for releasepage to clear the TREE_REF bit before we
5563	 * set io_pages. See check_buffer_tree_ref for a more detailed comment.
5564	 */
5565	check_buffer_tree_ref(eb);
5566	for (i = 0; i < num_pages; i++) {
5567		page = eb->pages[i];
5568
5569		if (!PageUptodate(page)) {
5570			if (ret) {
5571				atomic_dec(&eb->io_pages);
5572				unlock_page(page);
5573				continue;
5574			}
5575
5576			ClearPageError(page);
5577			err = __extent_read_full_page(page,
5578						      btree_get_extent, &bio,
5579						      mirror_num, &bio_flags,
5580						      REQ_META);
5581			if (err) {
5582				ret = err;
5583				/*
5584				 * We use &bio in above __extent_read_full_page,
5585				 * so we ensure that if it returns error, the
5586				 * current page fails to add itself to bio and
5587				 * it's been unlocked.
5588				 *
5589				 * We must dec io_pages by ourselves.
5590				 */
5591				atomic_dec(&eb->io_pages);
5592			}
5593		} else {
5594			unlock_page(page);
5595		}
5596	}
5597
5598	if (bio) {
5599		err = submit_one_bio(bio, mirror_num, bio_flags);
 
5600		if (err)
5601			return err;
5602	}
5603
5604	if (ret || wait != WAIT_COMPLETE)
5605		return ret;
5606
5607	for (i = 0; i < num_pages; i++) {
5608		page = eb->pages[i];
5609		wait_on_page_locked(page);
5610		if (!PageUptodate(page))
5611			ret = -EIO;
5612	}
5613
5614	return ret;
5615
5616unlock_exit:
 
5617	while (locked_pages > 0) {
 
 
 
5618		locked_pages--;
5619		page = eb->pages[locked_pages];
5620		unlock_page(page);
5621	}
5622	return ret;
5623}
5624
5625void read_extent_buffer(const struct extent_buffer *eb, void *dstv,
5626			unsigned long start, unsigned long len)
 
5627{
5628	size_t cur;
5629	size_t offset;
5630	struct page *page;
5631	char *kaddr;
5632	char *dst = (char *)dstv;
5633	unsigned long i = start >> PAGE_SHIFT;
 
5634
5635	if (start + len > eb->len) {
5636		WARN(1, KERN_ERR "btrfs bad mapping eb start %llu len %lu, wanted %lu %lu\n",
5637		     eb->start, eb->len, start, len);
5638		memset(dst, 0, len);
5639		return;
5640	}
5641
5642	offset = offset_in_page(start);
5643
5644	while (len > 0) {
5645		page = eb->pages[i];
5646
5647		cur = min(len, (PAGE_SIZE - offset));
5648		kaddr = page_address(page);
5649		memcpy(dst, kaddr + offset, cur);
5650
5651		dst += cur;
5652		len -= cur;
5653		offset = 0;
5654		i++;
5655	}
5656}
5657
5658int read_extent_buffer_to_user_nofault(const struct extent_buffer *eb,
5659				       void __user *dstv,
5660				       unsigned long start, unsigned long len)
5661{
5662	size_t cur;
5663	size_t offset;
5664	struct page *page;
5665	char *kaddr;
5666	char __user *dst = (char __user *)dstv;
5667	unsigned long i = start >> PAGE_SHIFT;
 
5668	int ret = 0;
5669
5670	WARN_ON(start > eb->len);
5671	WARN_ON(start + len > eb->start + eb->len);
5672
5673	offset = offset_in_page(start);
5674
5675	while (len > 0) {
5676		page = eb->pages[i];
5677
5678		cur = min(len, (PAGE_SIZE - offset));
5679		kaddr = page_address(page);
5680		if (copy_to_user_nofault(dst, kaddr + offset, cur)) {
5681			ret = -EFAULT;
5682			break;
5683		}
5684
5685		dst += cur;
5686		len -= cur;
5687		offset = 0;
5688		i++;
5689	}
5690
5691	return ret;
5692}
5693
5694int memcmp_extent_buffer(const struct extent_buffer *eb, const void *ptrv,
5695			 unsigned long start, unsigned long len)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
5696{
5697	size_t cur;
5698	size_t offset;
5699	struct page *page;
5700	char *kaddr;
5701	char *ptr = (char *)ptrv;
5702	unsigned long i = start >> PAGE_SHIFT;
 
5703	int ret = 0;
5704
5705	WARN_ON(start > eb->len);
5706	WARN_ON(start + len > eb->start + eb->len);
5707
5708	offset = offset_in_page(start);
5709
5710	while (len > 0) {
5711		page = eb->pages[i];
5712
5713		cur = min(len, (PAGE_SIZE - offset));
5714
5715		kaddr = page_address(page);
5716		ret = memcmp(ptr, kaddr + offset, cur);
5717		if (ret)
5718			break;
5719
5720		ptr += cur;
5721		len -= cur;
5722		offset = 0;
5723		i++;
5724	}
5725	return ret;
5726}
5727
5728void write_extent_buffer_chunk_tree_uuid(const struct extent_buffer *eb,
5729		const void *srcv)
5730{
5731	char *kaddr;
5732
5733	WARN_ON(!PageUptodate(eb->pages[0]));
5734	kaddr = page_address(eb->pages[0]);
5735	memcpy(kaddr + offsetof(struct btrfs_header, chunk_tree_uuid), srcv,
5736			BTRFS_FSID_SIZE);
5737}
5738
5739void write_extent_buffer_fsid(const struct extent_buffer *eb, const void *srcv)
5740{
5741	char *kaddr;
5742
5743	WARN_ON(!PageUptodate(eb->pages[0]));
5744	kaddr = page_address(eb->pages[0]);
5745	memcpy(kaddr + offsetof(struct btrfs_header, fsid), srcv,
5746			BTRFS_FSID_SIZE);
5747}
5748
5749void write_extent_buffer(const struct extent_buffer *eb, const void *srcv,
5750			 unsigned long start, unsigned long len)
5751{
5752	size_t cur;
5753	size_t offset;
5754	struct page *page;
5755	char *kaddr;
5756	char *src = (char *)srcv;
5757	unsigned long i = start >> PAGE_SHIFT;
 
5758
5759	WARN_ON(start > eb->len);
5760	WARN_ON(start + len > eb->start + eb->len);
5761
5762	offset = offset_in_page(start);
5763
5764	while (len > 0) {
5765		page = eb->pages[i];
5766		WARN_ON(!PageUptodate(page));
5767
5768		cur = min(len, PAGE_SIZE - offset);
5769		kaddr = page_address(page);
5770		memcpy(kaddr + offset, src, cur);
5771
5772		src += cur;
5773		len -= cur;
5774		offset = 0;
5775		i++;
5776	}
5777}
5778
5779void memzero_extent_buffer(const struct extent_buffer *eb, unsigned long start,
5780		unsigned long len)
5781{
5782	size_t cur;
5783	size_t offset;
5784	struct page *page;
5785	char *kaddr;
5786	unsigned long i = start >> PAGE_SHIFT;
 
5787
5788	WARN_ON(start > eb->len);
5789	WARN_ON(start + len > eb->start + eb->len);
5790
5791	offset = offset_in_page(start);
5792
5793	while (len > 0) {
5794		page = eb->pages[i];
5795		WARN_ON(!PageUptodate(page));
5796
5797		cur = min(len, PAGE_SIZE - offset);
5798		kaddr = page_address(page);
5799		memset(kaddr + offset, 0, cur);
5800
5801		len -= cur;
5802		offset = 0;
5803		i++;
5804	}
5805}
5806
5807void copy_extent_buffer_full(const struct extent_buffer *dst,
5808			     const struct extent_buffer *src)
5809{
5810	int i;
5811	int num_pages;
5812
5813	ASSERT(dst->len == src->len);
5814
5815	num_pages = num_extent_pages(dst);
5816	for (i = 0; i < num_pages; i++)
5817		copy_page(page_address(dst->pages[i]),
5818				page_address(src->pages[i]));
5819}
5820
5821void copy_extent_buffer(const struct extent_buffer *dst,
5822			const struct extent_buffer *src,
5823			unsigned long dst_offset, unsigned long src_offset,
5824			unsigned long len)
5825{
5826	u64 dst_len = dst->len;
5827	size_t cur;
5828	size_t offset;
5829	struct page *page;
5830	char *kaddr;
5831	unsigned long i = dst_offset >> PAGE_SHIFT;
 
5832
5833	WARN_ON(src->len != dst_len);
5834
5835	offset = offset_in_page(dst_offset);
 
5836
5837	while (len > 0) {
5838		page = dst->pages[i];
5839		WARN_ON(!PageUptodate(page));
5840
5841		cur = min(len, (unsigned long)(PAGE_SIZE - offset));
5842
5843		kaddr = page_address(page);
5844		read_extent_buffer(src, kaddr + offset, src_offset, cur);
5845
5846		src_offset += cur;
5847		len -= cur;
5848		offset = 0;
5849		i++;
5850	}
5851}
5852
5853/*
 
 
 
 
 
 
 
 
 
 
 
 
5854 * eb_bitmap_offset() - calculate the page and offset of the byte containing the
5855 * given bit number
5856 * @eb: the extent buffer
5857 * @start: offset of the bitmap item in the extent buffer
5858 * @nr: bit number
5859 * @page_index: return index of the page in the extent buffer that contains the
5860 * given bit number
5861 * @page_offset: return offset into the page given by page_index
5862 *
5863 * This helper hides the ugliness of finding the byte in an extent buffer which
5864 * contains a given bit.
5865 */
5866static inline void eb_bitmap_offset(const struct extent_buffer *eb,
5867				    unsigned long start, unsigned long nr,
5868				    unsigned long *page_index,
5869				    size_t *page_offset)
5870{
 
5871	size_t byte_offset = BIT_BYTE(nr);
5872	size_t offset;
5873
5874	/*
5875	 * The byte we want is the offset of the extent buffer + the offset of
5876	 * the bitmap item in the extent buffer + the offset of the byte in the
5877	 * bitmap item.
5878	 */
5879	offset = start + byte_offset;
5880
5881	*page_index = offset >> PAGE_SHIFT;
5882	*page_offset = offset_in_page(offset);
5883}
5884
5885/**
5886 * extent_buffer_test_bit - determine whether a bit in a bitmap item is set
5887 * @eb: the extent buffer
5888 * @start: offset of the bitmap item in the extent buffer
5889 * @nr: bit number to test
5890 */
5891int extent_buffer_test_bit(const struct extent_buffer *eb, unsigned long start,
5892			   unsigned long nr)
5893{
5894	u8 *kaddr;
5895	struct page *page;
5896	unsigned long i;
5897	size_t offset;
5898
5899	eb_bitmap_offset(eb, start, nr, &i, &offset);
5900	page = eb->pages[i];
5901	WARN_ON(!PageUptodate(page));
5902	kaddr = page_address(page);
5903	return 1U & (kaddr[offset] >> (nr & (BITS_PER_BYTE - 1)));
5904}
5905
5906/**
5907 * extent_buffer_bitmap_set - set an area of a bitmap
5908 * @eb: the extent buffer
5909 * @start: offset of the bitmap item in the extent buffer
5910 * @pos: bit number of the first bit
5911 * @len: number of bits to set
5912 */
5913void extent_buffer_bitmap_set(const struct extent_buffer *eb, unsigned long start,
5914			      unsigned long pos, unsigned long len)
5915{
5916	u8 *kaddr;
5917	struct page *page;
5918	unsigned long i;
5919	size_t offset;
5920	const unsigned int size = pos + len;
5921	int bits_to_set = BITS_PER_BYTE - (pos % BITS_PER_BYTE);
5922	u8 mask_to_set = BITMAP_FIRST_BYTE_MASK(pos);
5923
5924	eb_bitmap_offset(eb, start, pos, &i, &offset);
5925	page = eb->pages[i];
5926	WARN_ON(!PageUptodate(page));
5927	kaddr = page_address(page);
5928
5929	while (len >= bits_to_set) {
5930		kaddr[offset] |= mask_to_set;
5931		len -= bits_to_set;
5932		bits_to_set = BITS_PER_BYTE;
5933		mask_to_set = ~0;
5934		if (++offset >= PAGE_SIZE && len > 0) {
5935			offset = 0;
5936			page = eb->pages[++i];
5937			WARN_ON(!PageUptodate(page));
5938			kaddr = page_address(page);
5939		}
5940	}
5941	if (len) {
5942		mask_to_set &= BITMAP_LAST_BYTE_MASK(size);
5943		kaddr[offset] |= mask_to_set;
5944	}
5945}
5946
5947
5948/**
5949 * extent_buffer_bitmap_clear - clear an area of a bitmap
5950 * @eb: the extent buffer
5951 * @start: offset of the bitmap item in the extent buffer
5952 * @pos: bit number of the first bit
5953 * @len: number of bits to clear
5954 */
5955void extent_buffer_bitmap_clear(const struct extent_buffer *eb,
5956				unsigned long start, unsigned long pos,
5957				unsigned long len)
5958{
5959	u8 *kaddr;
5960	struct page *page;
5961	unsigned long i;
5962	size_t offset;
5963	const unsigned int size = pos + len;
5964	int bits_to_clear = BITS_PER_BYTE - (pos % BITS_PER_BYTE);
5965	u8 mask_to_clear = BITMAP_FIRST_BYTE_MASK(pos);
5966
5967	eb_bitmap_offset(eb, start, pos, &i, &offset);
5968	page = eb->pages[i];
5969	WARN_ON(!PageUptodate(page));
5970	kaddr = page_address(page);
5971
5972	while (len >= bits_to_clear) {
5973		kaddr[offset] &= ~mask_to_clear;
5974		len -= bits_to_clear;
5975		bits_to_clear = BITS_PER_BYTE;
5976		mask_to_clear = ~0;
5977		if (++offset >= PAGE_SIZE && len > 0) {
5978			offset = 0;
5979			page = eb->pages[++i];
5980			WARN_ON(!PageUptodate(page));
5981			kaddr = page_address(page);
5982		}
5983	}
5984	if (len) {
5985		mask_to_clear &= BITMAP_LAST_BYTE_MASK(size);
5986		kaddr[offset] &= ~mask_to_clear;
5987	}
5988}
5989
5990static inline bool areas_overlap(unsigned long src, unsigned long dst, unsigned long len)
5991{
5992	unsigned long distance = (src > dst) ? src - dst : dst - src;
5993	return distance < len;
5994}
5995
5996static void copy_pages(struct page *dst_page, struct page *src_page,
5997		       unsigned long dst_off, unsigned long src_off,
5998		       unsigned long len)
5999{
6000	char *dst_kaddr = page_address(dst_page);
6001	char *src_kaddr;
6002	int must_memmove = 0;
6003
6004	if (dst_page != src_page) {
6005		src_kaddr = page_address(src_page);
6006	} else {
6007		src_kaddr = dst_kaddr;
6008		if (areas_overlap(src_off, dst_off, len))
6009			must_memmove = 1;
6010	}
6011
6012	if (must_memmove)
6013		memmove(dst_kaddr + dst_off, src_kaddr + src_off, len);
6014	else
6015		memcpy(dst_kaddr + dst_off, src_kaddr + src_off, len);
6016}
6017
6018void memcpy_extent_buffer(const struct extent_buffer *dst,
6019			  unsigned long dst_offset, unsigned long src_offset,
6020			  unsigned long len)
6021{
6022	struct btrfs_fs_info *fs_info = dst->fs_info;
6023	size_t cur;
6024	size_t dst_off_in_page;
6025	size_t src_off_in_page;
 
6026	unsigned long dst_i;
6027	unsigned long src_i;
6028
6029	if (src_offset + len > dst->len) {
6030		btrfs_err(fs_info,
6031			"memmove bogus src_offset %lu move len %lu dst len %lu",
6032			 src_offset, len, dst->len);
6033		BUG();
6034	}
6035	if (dst_offset + len > dst->len) {
6036		btrfs_err(fs_info,
6037			"memmove bogus dst_offset %lu move len %lu dst len %lu",
6038			 dst_offset, len, dst->len);
6039		BUG();
6040	}
6041
6042	while (len > 0) {
6043		dst_off_in_page = offset_in_page(dst_offset);
6044		src_off_in_page = offset_in_page(src_offset);
 
 
6045
6046		dst_i = dst_offset >> PAGE_SHIFT;
6047		src_i = src_offset >> PAGE_SHIFT;
6048
6049		cur = min(len, (unsigned long)(PAGE_SIZE -
6050					       src_off_in_page));
6051		cur = min_t(unsigned long, cur,
6052			(unsigned long)(PAGE_SIZE - dst_off_in_page));
6053
6054		copy_pages(dst->pages[dst_i], dst->pages[src_i],
6055			   dst_off_in_page, src_off_in_page, cur);
6056
6057		src_offset += cur;
6058		dst_offset += cur;
6059		len -= cur;
6060	}
6061}
6062
6063void memmove_extent_buffer(const struct extent_buffer *dst,
6064			   unsigned long dst_offset, unsigned long src_offset,
6065			   unsigned long len)
6066{
6067	struct btrfs_fs_info *fs_info = dst->fs_info;
6068	size_t cur;
6069	size_t dst_off_in_page;
6070	size_t src_off_in_page;
6071	unsigned long dst_end = dst_offset + len - 1;
6072	unsigned long src_end = src_offset + len - 1;
 
6073	unsigned long dst_i;
6074	unsigned long src_i;
6075
6076	if (src_offset + len > dst->len) {
6077		btrfs_err(fs_info,
6078			  "memmove bogus src_offset %lu move len %lu len %lu",
6079			  src_offset, len, dst->len);
6080		BUG();
6081	}
6082	if (dst_offset + len > dst->len) {
6083		btrfs_err(fs_info,
6084			  "memmove bogus dst_offset %lu move len %lu len %lu",
6085			  dst_offset, len, dst->len);
6086		BUG();
6087	}
6088	if (dst_offset < src_offset) {
6089		memcpy_extent_buffer(dst, dst_offset, src_offset, len);
6090		return;
6091	}
6092	while (len > 0) {
6093		dst_i = dst_end >> PAGE_SHIFT;
6094		src_i = src_end >> PAGE_SHIFT;
6095
6096		dst_off_in_page = offset_in_page(dst_end);
6097		src_off_in_page = offset_in_page(src_end);
 
 
6098
6099		cur = min_t(unsigned long, len, src_off_in_page + 1);
6100		cur = min(cur, dst_off_in_page + 1);
6101		copy_pages(dst->pages[dst_i], dst->pages[src_i],
6102			   dst_off_in_page - cur + 1,
6103			   src_off_in_page - cur + 1, cur);
6104
6105		dst_end -= cur;
6106		src_end -= cur;
6107		len -= cur;
6108	}
6109}
6110
6111int try_release_extent_buffer(struct page *page)
6112{
6113	struct extent_buffer *eb;
6114
6115	/*
6116	 * We need to make sure nobody is attaching this page to an eb right
6117	 * now.
6118	 */
6119	spin_lock(&page->mapping->private_lock);
6120	if (!PagePrivate(page)) {
6121		spin_unlock(&page->mapping->private_lock);
6122		return 1;
6123	}
6124
6125	eb = (struct extent_buffer *)page->private;
6126	BUG_ON(!eb);
6127
6128	/*
6129	 * This is a little awful but should be ok, we need to make sure that
6130	 * the eb doesn't disappear out from under us while we're looking at
6131	 * this page.
6132	 */
6133	spin_lock(&eb->refs_lock);
6134	if (atomic_read(&eb->refs) != 1 || extent_buffer_under_io(eb)) {
6135		spin_unlock(&eb->refs_lock);
6136		spin_unlock(&page->mapping->private_lock);
6137		return 0;
6138	}
6139	spin_unlock(&page->mapping->private_lock);
6140
6141	/*
6142	 * If tree ref isn't set then we know the ref on this eb is a real ref,
6143	 * so just return, this page will likely be freed soon anyway.
6144	 */
6145	if (!test_and_clear_bit(EXTENT_BUFFER_TREE_REF, &eb->bflags)) {
6146		spin_unlock(&eb->refs_lock);
6147		return 0;
6148	}
6149
6150	return release_extent_buffer(eb);
6151}