Linux Audio

Check our new training course

Loading...
v3.1
   1#include <linux/bitops.h>
   2#include <linux/slab.h>
   3#include <linux/bio.h>
   4#include <linux/mm.h>
   5#include <linux/pagemap.h>
   6#include <linux/page-flags.h>
   7#include <linux/module.h>
   8#include <linux/spinlock.h>
   9#include <linux/blkdev.h>
  10#include <linux/swap.h>
  11#include <linux/writeback.h>
  12#include <linux/pagevec.h>
  13#include <linux/prefetch.h>
  14#include <linux/cleancache.h>
  15#include "extent_io.h"
  16#include "extent_map.h"
  17#include "compat.h"
  18#include "ctree.h"
  19#include "btrfs_inode.h"
 
 
 
 
 
  20
  21static struct kmem_cache *extent_state_cache;
  22static struct kmem_cache *extent_buffer_cache;
 
  23
 
  24static LIST_HEAD(buffers);
  25static LIST_HEAD(states);
  26
  27#define LEAK_DEBUG 0
  28#if LEAK_DEBUG
  29static DEFINE_SPINLOCK(leak_lock);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
  30#endif
  31
  32#define BUFFER_LRU_MAX 64
  33
  34struct tree_entry {
  35	u64 start;
  36	u64 end;
  37	struct rb_node rb_node;
  38};
  39
  40struct extent_page_data {
  41	struct bio *bio;
  42	struct extent_io_tree *tree;
  43	get_extent_t *get_extent;
 
  44
  45	/* tells writepage not to lock the state bits for this range
  46	 * it still does the unlocking
  47	 */
  48	unsigned int extent_locked:1;
  49
  50	/* tells the submit_bio code to use a WRITE_SYNC */
  51	unsigned int sync_io:1;
  52};
  53
 
 
 
 
 
 
 
 
 
  54int __init extent_io_init(void)
  55{
  56	extent_state_cache = kmem_cache_create("extent_state",
  57			sizeof(struct extent_state), 0,
  58			SLAB_RECLAIM_ACCOUNT | SLAB_MEM_SPREAD, NULL);
  59	if (!extent_state_cache)
  60		return -ENOMEM;
  61
  62	extent_buffer_cache = kmem_cache_create("extent_buffers",
  63			sizeof(struct extent_buffer), 0,
  64			SLAB_RECLAIM_ACCOUNT | SLAB_MEM_SPREAD, NULL);
  65	if (!extent_buffer_cache)
  66		goto free_state_cache;
 
 
 
 
 
 
 
 
 
  67	return 0;
  68
 
 
 
 
 
 
 
 
  69free_state_cache:
  70	kmem_cache_destroy(extent_state_cache);
 
  71	return -ENOMEM;
  72}
  73
  74void extent_io_exit(void)
  75{
  76	struct extent_state *state;
  77	struct extent_buffer *eb;
  78
  79	while (!list_empty(&states)) {
  80		state = list_entry(states.next, struct extent_state, leak_list);
  81		printk(KERN_ERR "btrfs state leak: start %llu end %llu "
  82		       "state %lu in tree %p refs %d\n",
  83		       (unsigned long long)state->start,
  84		       (unsigned long long)state->end,
  85		       state->state, state->tree, atomic_read(&state->refs));
  86		list_del(&state->leak_list);
  87		kmem_cache_free(extent_state_cache, state);
  88
  89	}
  90
  91	while (!list_empty(&buffers)) {
  92		eb = list_entry(buffers.next, struct extent_buffer, leak_list);
  93		printk(KERN_ERR "btrfs buffer leak start %llu len %lu "
  94		       "refs %d\n", (unsigned long long)eb->start,
  95		       eb->len, atomic_read(&eb->refs));
  96		list_del(&eb->leak_list);
  97		kmem_cache_free(extent_buffer_cache, eb);
  98	}
  99	if (extent_state_cache)
 100		kmem_cache_destroy(extent_state_cache);
 101	if (extent_buffer_cache)
 102		kmem_cache_destroy(extent_buffer_cache);
 
 
 103}
 104
 105void extent_io_tree_init(struct extent_io_tree *tree,
 106			 struct address_space *mapping)
 107{
 108	tree->state = RB_ROOT;
 109	INIT_RADIX_TREE(&tree->buffer, GFP_ATOMIC);
 110	tree->ops = NULL;
 111	tree->dirty_bytes = 0;
 112	spin_lock_init(&tree->lock);
 113	spin_lock_init(&tree->buffer_lock);
 114	tree->mapping = mapping;
 115}
 116
 117static struct extent_state *alloc_extent_state(gfp_t mask)
 118{
 119	struct extent_state *state;
 120#if LEAK_DEBUG
 121	unsigned long flags;
 122#endif
 123
 124	state = kmem_cache_alloc(extent_state_cache, mask);
 125	if (!state)
 126		return state;
 127	state->state = 0;
 128	state->private = 0;
 129	state->tree = NULL;
 130#if LEAK_DEBUG
 131	spin_lock_irqsave(&leak_lock, flags);
 132	list_add(&state->leak_list, &states);
 133	spin_unlock_irqrestore(&leak_lock, flags);
 134#endif
 135	atomic_set(&state->refs, 1);
 136	init_waitqueue_head(&state->wq);
 
 137	return state;
 138}
 139
 140void free_extent_state(struct extent_state *state)
 141{
 142	if (!state)
 143		return;
 144	if (atomic_dec_and_test(&state->refs)) {
 145#if LEAK_DEBUG
 146		unsigned long flags;
 147#endif
 148		WARN_ON(state->tree);
 149#if LEAK_DEBUG
 150		spin_lock_irqsave(&leak_lock, flags);
 151		list_del(&state->leak_list);
 152		spin_unlock_irqrestore(&leak_lock, flags);
 153#endif
 154		kmem_cache_free(extent_state_cache, state);
 155	}
 156}
 157
 158static struct rb_node *tree_insert(struct rb_root *root, u64 offset,
 159				   struct rb_node *node)
 
 
 
 
 160{
 161	struct rb_node **p = &root->rb_node;
 162	struct rb_node *parent = NULL;
 163	struct tree_entry *entry;
 164
 
 
 
 
 
 
 
 165	while (*p) {
 166		parent = *p;
 167		entry = rb_entry(parent, struct tree_entry, rb_node);
 168
 169		if (offset < entry->start)
 170			p = &(*p)->rb_left;
 171		else if (offset > entry->end)
 172			p = &(*p)->rb_right;
 173		else
 174			return parent;
 175	}
 176
 177	entry = rb_entry(node, struct tree_entry, rb_node);
 178	rb_link_node(node, parent, p);
 179	rb_insert_color(node, root);
 180	return NULL;
 181}
 182
 183static struct rb_node *__etree_search(struct extent_io_tree *tree, u64 offset,
 184				     struct rb_node **prev_ret,
 185				     struct rb_node **next_ret)
 
 
 186{
 187	struct rb_root *root = &tree->state;
 188	struct rb_node *n = root->rb_node;
 189	struct rb_node *prev = NULL;
 190	struct rb_node *orig_prev = NULL;
 191	struct tree_entry *entry;
 192	struct tree_entry *prev_entry = NULL;
 193
 194	while (n) {
 195		entry = rb_entry(n, struct tree_entry, rb_node);
 196		prev = n;
 197		prev_entry = entry;
 198
 199		if (offset < entry->start)
 200			n = n->rb_left;
 201		else if (offset > entry->end)
 202			n = n->rb_right;
 203		else
 204			return n;
 205	}
 206
 
 
 
 
 
 207	if (prev_ret) {
 208		orig_prev = prev;
 209		while (prev && offset > prev_entry->end) {
 210			prev = rb_next(prev);
 211			prev_entry = rb_entry(prev, struct tree_entry, rb_node);
 212		}
 213		*prev_ret = prev;
 214		prev = orig_prev;
 215	}
 216
 217	if (next_ret) {
 218		prev_entry = rb_entry(prev, struct tree_entry, rb_node);
 219		while (prev && offset < prev_entry->start) {
 220			prev = rb_prev(prev);
 221			prev_entry = rb_entry(prev, struct tree_entry, rb_node);
 222		}
 223		*next_ret = prev;
 224	}
 225	return NULL;
 226}
 227
 228static inline struct rb_node *tree_search(struct extent_io_tree *tree,
 229					  u64 offset)
 
 
 
 230{
 231	struct rb_node *prev = NULL;
 232	struct rb_node *ret;
 233
 234	ret = __etree_search(tree, offset, &prev, NULL);
 235	if (!ret)
 236		return prev;
 237	return ret;
 238}
 239
 
 
 
 
 
 
 240static void merge_cb(struct extent_io_tree *tree, struct extent_state *new,
 241		     struct extent_state *other)
 242{
 243	if (tree->ops && tree->ops->merge_extent_hook)
 244		tree->ops->merge_extent_hook(tree->mapping->host, new,
 245					     other);
 246}
 247
 248/*
 249 * utility function to look for merge candidates inside a given range.
 250 * Any extents with matching state are merged together into a single
 251 * extent in the tree.  Extents with EXTENT_IO in their state field
 252 * are not merged because the end_io handlers need to be able to do
 253 * operations on them without sleeping (or doing allocations/splits).
 254 *
 255 * This should be called with the tree lock held.
 256 */
 257static void merge_state(struct extent_io_tree *tree,
 258		        struct extent_state *state)
 259{
 260	struct extent_state *other;
 261	struct rb_node *other_node;
 262
 263	if (state->state & (EXTENT_IOBITS | EXTENT_BOUNDARY))
 264		return;
 265
 266	other_node = rb_prev(&state->rb_node);
 267	if (other_node) {
 268		other = rb_entry(other_node, struct extent_state, rb_node);
 269		if (other->end == state->start - 1 &&
 270		    other->state == state->state) {
 271			merge_cb(tree, state, other);
 272			state->start = other->start;
 273			other->tree = NULL;
 274			rb_erase(&other->rb_node, &tree->state);
 275			free_extent_state(other);
 276		}
 277	}
 278	other_node = rb_next(&state->rb_node);
 279	if (other_node) {
 280		other = rb_entry(other_node, struct extent_state, rb_node);
 281		if (other->start == state->end + 1 &&
 282		    other->state == state->state) {
 283			merge_cb(tree, state, other);
 284			state->end = other->end;
 285			other->tree = NULL;
 286			rb_erase(&other->rb_node, &tree->state);
 287			free_extent_state(other);
 288		}
 289	}
 290}
 291
 292static void set_state_cb(struct extent_io_tree *tree,
 293			 struct extent_state *state, int *bits)
 294{
 295	if (tree->ops && tree->ops->set_bit_hook)
 296		tree->ops->set_bit_hook(tree->mapping->host, state, bits);
 297}
 298
 299static void clear_state_cb(struct extent_io_tree *tree,
 300			   struct extent_state *state, int *bits)
 301{
 302	if (tree->ops && tree->ops->clear_bit_hook)
 303		tree->ops->clear_bit_hook(tree->mapping->host, state, bits);
 304}
 305
 306static void set_state_bits(struct extent_io_tree *tree,
 307			   struct extent_state *state, int *bits);
 308
 309/*
 310 * insert an extent_state struct into the tree.  'bits' are set on the
 311 * struct before it is inserted.
 312 *
 313 * This may return -EEXIST if the extent is already there, in which case the
 314 * state struct is freed.
 315 *
 316 * The tree lock is not taken internally.  This is a utility function and
 317 * probably isn't what you want to call (see set/clear_extent_bit).
 318 */
 319static int insert_state(struct extent_io_tree *tree,
 320			struct extent_state *state, u64 start, u64 end,
 321			int *bits)
 
 
 322{
 323	struct rb_node *node;
 324
 325	if (end < start) {
 326		printk(KERN_ERR "btrfs end < start %llu %llu\n",
 327		       (unsigned long long)end,
 328		       (unsigned long long)start);
 329		WARN_ON(1);
 330	}
 331	state->start = start;
 332	state->end = end;
 333
 334	set_state_bits(tree, state, bits);
 335
 336	node = tree_insert(&tree->state, end, &state->rb_node);
 337	if (node) {
 338		struct extent_state *found;
 339		found = rb_entry(node, struct extent_state, rb_node);
 340		printk(KERN_ERR "btrfs found node %llu %llu on insert of "
 341		       "%llu %llu\n", (unsigned long long)found->start,
 342		       (unsigned long long)found->end,
 343		       (unsigned long long)start, (unsigned long long)end);
 344		return -EEXIST;
 345	}
 346	state->tree = tree;
 347	merge_state(tree, state);
 348	return 0;
 349}
 350
 351static void split_cb(struct extent_io_tree *tree, struct extent_state *orig,
 352		     u64 split)
 353{
 354	if (tree->ops && tree->ops->split_extent_hook)
 355		tree->ops->split_extent_hook(tree->mapping->host, orig, split);
 356}
 357
 358/*
 359 * split a given extent state struct in two, inserting the preallocated
 360 * struct 'prealloc' as the newly created second half.  'split' indicates an
 361 * offset inside 'orig' where it should be split.
 362 *
 363 * Before calling,
 364 * the tree has 'orig' at [orig->start, orig->end].  After calling, there
 365 * are two extent state structs in the tree:
 366 * prealloc: [orig->start, split - 1]
 367 * orig: [ split, orig->end ]
 368 *
 369 * The tree locks are not taken by this function. They need to be held
 370 * by the caller.
 371 */
 372static int split_state(struct extent_io_tree *tree, struct extent_state *orig,
 373		       struct extent_state *prealloc, u64 split)
 374{
 375	struct rb_node *node;
 376
 377	split_cb(tree, orig, split);
 378
 379	prealloc->start = orig->start;
 380	prealloc->end = split - 1;
 381	prealloc->state = orig->state;
 382	orig->start = split;
 383
 384	node = tree_insert(&tree->state, prealloc->end, &prealloc->rb_node);
 
 385	if (node) {
 386		free_extent_state(prealloc);
 387		return -EEXIST;
 388	}
 389	prealloc->tree = tree;
 390	return 0;
 391}
 392
 
 
 
 
 
 
 
 
 
 393/*
 394 * utility function to clear some bits in an extent state struct.
 395 * it will optionally wake up any one waiting on this state (wake == 1), or
 396 * forcibly remove the state from the tree (delete == 1).
 397 *
 398 * If no bits are set on the state struct after clearing things, the
 399 * struct is freed and removed from the tree
 400 */
 401static int clear_state_bit(struct extent_io_tree *tree,
 402			    struct extent_state *state,
 403			    int *bits, int wake)
 404{
 405	int bits_to_clear = *bits & ~EXTENT_CTLBITS;
 406	int ret = state->state & bits_to_clear;
 407
 408	if ((bits_to_clear & EXTENT_DIRTY) && (state->state & EXTENT_DIRTY)) {
 409		u64 range = state->end - state->start + 1;
 410		WARN_ON(range > tree->dirty_bytes);
 411		tree->dirty_bytes -= range;
 412	}
 413	clear_state_cb(tree, state, bits);
 414	state->state &= ~bits_to_clear;
 415	if (wake)
 416		wake_up(&state->wq);
 417	if (state->state == 0) {
 
 418		if (state->tree) {
 419			rb_erase(&state->rb_node, &tree->state);
 420			state->tree = NULL;
 421			free_extent_state(state);
 422		} else {
 423			WARN_ON(1);
 424		}
 425	} else {
 426		merge_state(tree, state);
 
 427	}
 428	return ret;
 429}
 430
 431static struct extent_state *
 432alloc_extent_state_atomic(struct extent_state *prealloc)
 433{
 434	if (!prealloc)
 435		prealloc = alloc_extent_state(GFP_ATOMIC);
 436
 437	return prealloc;
 438}
 439
 
 
 
 
 
 
 
 440/*
 441 * clear some bits on a range in the tree.  This may require splitting
 442 * or inserting elements in the tree, so the gfp mask is used to
 443 * indicate which allocations or sleeping are allowed.
 444 *
 445 * pass 'wake' == 1 to kick any sleepers, and 'delete' == 1 to remove
 446 * the given range from the tree regardless of state (ie for truncate).
 447 *
 448 * the range [start, end] is inclusive.
 449 *
 450 * This takes the tree lock, and returns < 0 on error, > 0 if any of the
 451 * bits were already set, or zero if none of the bits were already set.
 452 */
 453int clear_extent_bit(struct extent_io_tree *tree, u64 start, u64 end,
 454		     int bits, int wake, int delete,
 455		     struct extent_state **cached_state,
 456		     gfp_t mask)
 457{
 458	struct extent_state *state;
 459	struct extent_state *cached;
 460	struct extent_state *prealloc = NULL;
 461	struct rb_node *next_node;
 462	struct rb_node *node;
 463	u64 last_end;
 464	int err;
 465	int set = 0;
 466	int clear = 0;
 467
 
 
 
 
 
 468	if (delete)
 469		bits |= ~EXTENT_CTLBITS;
 470	bits |= EXTENT_FIRST_DELALLOC;
 471
 472	if (bits & (EXTENT_IOBITS | EXTENT_BOUNDARY))
 473		clear = 1;
 474again:
 475	if (!prealloc && (mask & __GFP_WAIT)) {
 476		prealloc = alloc_extent_state(mask);
 477		if (!prealloc)
 478			return -ENOMEM;
 479	}
 480
 481	spin_lock(&tree->lock);
 482	if (cached_state) {
 483		cached = *cached_state;
 484
 485		if (clear) {
 486			*cached_state = NULL;
 487			cached_state = NULL;
 488		}
 489
 490		if (cached && cached->tree && cached->start <= start &&
 491		    cached->end > start) {
 492			if (clear)
 493				atomic_dec(&cached->refs);
 494			state = cached;
 495			goto hit_next;
 496		}
 497		if (clear)
 498			free_extent_state(cached);
 499	}
 500	/*
 501	 * this search will find the extents that end after
 502	 * our range starts
 503	 */
 504	node = tree_search(tree, start);
 505	if (!node)
 506		goto out;
 507	state = rb_entry(node, struct extent_state, rb_node);
 508hit_next:
 509	if (state->start > end)
 510		goto out;
 511	WARN_ON(state->end < start);
 512	last_end = state->end;
 513
 
 
 
 
 
 
 514	/*
 515	 *     | ---- desired range ---- |
 516	 *  | state | or
 517	 *  | ------------- state -------------- |
 518	 *
 519	 * We need to split the extent we found, and may flip
 520	 * bits on second half.
 521	 *
 522	 * If the extent we found extends past our range, we
 523	 * just split and search again.  It'll get split again
 524	 * the next time though.
 525	 *
 526	 * If the extent we found is inside our range, we clear
 527	 * the desired bit on it.
 528	 */
 529
 530	if (state->start < start) {
 531		prealloc = alloc_extent_state_atomic(prealloc);
 532		BUG_ON(!prealloc);
 533		err = split_state(tree, state, prealloc, start);
 534		BUG_ON(err == -EEXIST);
 
 
 535		prealloc = NULL;
 536		if (err)
 537			goto out;
 538		if (state->end <= end) {
 539			set |= clear_state_bit(tree, state, &bits, wake);
 540			if (last_end == (u64)-1)
 541				goto out;
 542			start = last_end + 1;
 543		}
 544		goto search_again;
 545	}
 546	/*
 547	 * | ---- desired range ---- |
 548	 *                        | state |
 549	 * We need to split the extent, and clear the bit
 550	 * on the first half
 551	 */
 552	if (state->start <= end && state->end > end) {
 553		prealloc = alloc_extent_state_atomic(prealloc);
 554		BUG_ON(!prealloc);
 555		err = split_state(tree, state, prealloc, end + 1);
 556		BUG_ON(err == -EEXIST);
 
 
 557		if (wake)
 558			wake_up(&state->wq);
 559
 560		set |= clear_state_bit(tree, prealloc, &bits, wake);
 561
 562		prealloc = NULL;
 563		goto out;
 564	}
 565
 566	if (state->end < end && prealloc && !need_resched())
 567		next_node = rb_next(&state->rb_node);
 568	else
 569		next_node = NULL;
 570
 571	set |= clear_state_bit(tree, state, &bits, wake);
 572	if (last_end == (u64)-1)
 573		goto out;
 574	start = last_end + 1;
 575	if (start <= end && next_node) {
 576		state = rb_entry(next_node, struct extent_state,
 577				 rb_node);
 578		if (state->start == start)
 579			goto hit_next;
 580	}
 581	goto search_again;
 582
 583out:
 584	spin_unlock(&tree->lock);
 585	if (prealloc)
 586		free_extent_state(prealloc);
 587
 588	return set;
 589
 590search_again:
 591	if (start > end)
 592		goto out;
 593	spin_unlock(&tree->lock);
 594	if (mask & __GFP_WAIT)
 595		cond_resched();
 596	goto again;
 597}
 598
 599static int wait_on_state(struct extent_io_tree *tree,
 600			 struct extent_state *state)
 601		__releases(tree->lock)
 602		__acquires(tree->lock)
 603{
 604	DEFINE_WAIT(wait);
 605	prepare_to_wait(&state->wq, &wait, TASK_UNINTERRUPTIBLE);
 606	spin_unlock(&tree->lock);
 607	schedule();
 608	spin_lock(&tree->lock);
 609	finish_wait(&state->wq, &wait);
 610	return 0;
 611}
 612
 613/*
 614 * waits for one or more bits to clear on a range in the state tree.
 615 * The range [start, end] is inclusive.
 616 * The tree lock is taken by this function
 617 */
 618int wait_extent_bit(struct extent_io_tree *tree, u64 start, u64 end, int bits)
 
 619{
 620	struct extent_state *state;
 621	struct rb_node *node;
 622
 
 
 623	spin_lock(&tree->lock);
 624again:
 625	while (1) {
 626		/*
 627		 * this search will find all the extents that end after
 628		 * our range starts
 629		 */
 630		node = tree_search(tree, start);
 
 631		if (!node)
 632			break;
 633
 634		state = rb_entry(node, struct extent_state, rb_node);
 635
 636		if (state->start > end)
 637			goto out;
 638
 639		if (state->state & bits) {
 640			start = state->start;
 641			atomic_inc(&state->refs);
 642			wait_on_state(tree, state);
 643			free_extent_state(state);
 644			goto again;
 645		}
 646		start = state->end + 1;
 647
 648		if (start > end)
 649			break;
 650
 651		cond_resched_lock(&tree->lock);
 
 
 
 652	}
 653out:
 654	spin_unlock(&tree->lock);
 655	return 0;
 656}
 657
 658static void set_state_bits(struct extent_io_tree *tree,
 659			   struct extent_state *state,
 660			   int *bits)
 661{
 662	int bits_to_set = *bits & ~EXTENT_CTLBITS;
 663
 664	set_state_cb(tree, state, bits);
 665	if ((bits_to_set & EXTENT_DIRTY) && !(state->state & EXTENT_DIRTY)) {
 666		u64 range = state->end - state->start + 1;
 667		tree->dirty_bytes += range;
 668	}
 669	state->state |= bits_to_set;
 670}
 671
 672static void cache_state(struct extent_state *state,
 673			struct extent_state **cached_ptr)
 674{
 675	if (cached_ptr && !(*cached_ptr)) {
 676		if (state->state & (EXTENT_IOBITS | EXTENT_BOUNDARY)) {
 677			*cached_ptr = state;
 678			atomic_inc(&state->refs);
 679		}
 680	}
 681}
 682
 683static void uncache_state(struct extent_state **cached_ptr)
 684{
 685	if (cached_ptr && (*cached_ptr)) {
 686		struct extent_state *state = *cached_ptr;
 687		*cached_ptr = NULL;
 688		free_extent_state(state);
 689	}
 690}
 691
 692/*
 693 * set some bits on a range in the tree.  This may require allocations or
 694 * sleeping, so the gfp mask is used to indicate what is allowed.
 695 *
 696 * If any of the exclusive bits are set, this will fail with -EEXIST if some
 697 * part of the range already has the desired bits set.  The start of the
 698 * existing range is returned in failed_start in this case.
 699 *
 700 * [start, end] is inclusive This takes the tree lock.
 701 */
 702
 703int set_extent_bit(struct extent_io_tree *tree, u64 start, u64 end,
 704		   int bits, int exclusive_bits, u64 *failed_start,
 705		   struct extent_state **cached_state, gfp_t mask)
 
 
 706{
 707	struct extent_state *state;
 708	struct extent_state *prealloc = NULL;
 709	struct rb_node *node;
 
 
 710	int err = 0;
 711	u64 last_start;
 712	u64 last_end;
 713
 
 
 714	bits |= EXTENT_FIRST_DELALLOC;
 715again:
 716	if (!prealloc && (mask & __GFP_WAIT)) {
 717		prealloc = alloc_extent_state(mask);
 718		BUG_ON(!prealloc);
 719	}
 720
 721	spin_lock(&tree->lock);
 722	if (cached_state && *cached_state) {
 723		state = *cached_state;
 724		if (state->start <= start && state->end > start &&
 725		    state->tree) {
 726			node = &state->rb_node;
 727			goto hit_next;
 728		}
 729	}
 730	/*
 731	 * this search will find all the extents that end after
 732	 * our range starts.
 733	 */
 734	node = tree_search(tree, start);
 735	if (!node) {
 736		prealloc = alloc_extent_state_atomic(prealloc);
 737		BUG_ON(!prealloc);
 738		err = insert_state(tree, prealloc, start, end, &bits);
 
 
 
 
 
 739		prealloc = NULL;
 740		BUG_ON(err == -EEXIST);
 741		goto out;
 742	}
 743	state = rb_entry(node, struct extent_state, rb_node);
 744hit_next:
 745	last_start = state->start;
 746	last_end = state->end;
 747
 748	/*
 749	 * | ---- desired range ---- |
 750	 * | state |
 751	 *
 752	 * Just lock what we found and keep going
 753	 */
 754	if (state->start == start && state->end <= end) {
 755		struct rb_node *next_node;
 756		if (state->state & exclusive_bits) {
 757			*failed_start = state->start;
 758			err = -EEXIST;
 759			goto out;
 760		}
 761
 762		set_state_bits(tree, state, &bits);
 763
 764		cache_state(state, cached_state);
 765		merge_state(tree, state);
 766		if (last_end == (u64)-1)
 767			goto out;
 768
 769		start = last_end + 1;
 770		next_node = rb_next(&state->rb_node);
 771		if (next_node && start < end && prealloc && !need_resched()) {
 772			state = rb_entry(next_node, struct extent_state,
 773					 rb_node);
 774			if (state->start == start)
 775				goto hit_next;
 776		}
 777		goto search_again;
 778	}
 779
 780	/*
 781	 *     | ---- desired range ---- |
 782	 * | state |
 783	 *   or
 784	 * | ------------- state -------------- |
 785	 *
 786	 * We need to split the extent we found, and may flip bits on
 787	 * second half.
 788	 *
 789	 * If the extent we found extends past our
 790	 * range, we just split and search again.  It'll get split
 791	 * again the next time though.
 792	 *
 793	 * If the extent we found is inside our range, we set the
 794	 * desired bit on it.
 795	 */
 796	if (state->start < start) {
 797		if (state->state & exclusive_bits) {
 798			*failed_start = start;
 799			err = -EEXIST;
 800			goto out;
 801		}
 802
 803		prealloc = alloc_extent_state_atomic(prealloc);
 804		BUG_ON(!prealloc);
 805		err = split_state(tree, state, prealloc, start);
 806		BUG_ON(err == -EEXIST);
 
 
 807		prealloc = NULL;
 808		if (err)
 809			goto out;
 810		if (state->end <= end) {
 811			set_state_bits(tree, state, &bits);
 812			cache_state(state, cached_state);
 813			merge_state(tree, state);
 814			if (last_end == (u64)-1)
 815				goto out;
 816			start = last_end + 1;
 
 
 
 
 817		}
 818		goto search_again;
 819	}
 820	/*
 821	 * | ---- desired range ---- |
 822	 *     | state | or               | state |
 823	 *
 824	 * There's a hole, we need to insert something in it and
 825	 * ignore the extent we found.
 826	 */
 827	if (state->start > start) {
 828		u64 this_end;
 829		if (end < last_start)
 830			this_end = end;
 831		else
 832			this_end = last_start - 1;
 833
 834		prealloc = alloc_extent_state_atomic(prealloc);
 835		BUG_ON(!prealloc);
 836
 837		/*
 838		 * Avoid to free 'prealloc' if it can be merged with
 839		 * the later extent.
 840		 */
 841		err = insert_state(tree, prealloc, start, this_end,
 842				   &bits);
 843		BUG_ON(err == -EEXIST);
 844		if (err) {
 845			free_extent_state(prealloc);
 846			prealloc = NULL;
 847			goto out;
 848		}
 849		cache_state(prealloc, cached_state);
 850		prealloc = NULL;
 851		start = this_end + 1;
 852		goto search_again;
 853	}
 854	/*
 855	 * | ---- desired range ---- |
 856	 *                        | state |
 857	 * We need to split the extent, and set the bit
 858	 * on the first half
 859	 */
 860	if (state->start <= end && state->end > end) {
 861		if (state->state & exclusive_bits) {
 862			*failed_start = start;
 863			err = -EEXIST;
 864			goto out;
 865		}
 866
 867		prealloc = alloc_extent_state_atomic(prealloc);
 868		BUG_ON(!prealloc);
 869		err = split_state(tree, state, prealloc, end + 1);
 870		BUG_ON(err == -EEXIST);
 
 871
 872		set_state_bits(tree, prealloc, &bits);
 873		cache_state(prealloc, cached_state);
 874		merge_state(tree, prealloc);
 875		prealloc = NULL;
 876		goto out;
 877	}
 878
 879	goto search_again;
 880
 881out:
 882	spin_unlock(&tree->lock);
 883	if (prealloc)
 884		free_extent_state(prealloc);
 885
 886	return err;
 887
 888search_again:
 889	if (start > end)
 890		goto out;
 891	spin_unlock(&tree->lock);
 892	if (mask & __GFP_WAIT)
 893		cond_resched();
 894	goto again;
 895}
 896
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 897/* wrappers around set/clear extent bit */
 898int set_extent_dirty(struct extent_io_tree *tree, u64 start, u64 end,
 899		     gfp_t mask)
 900{
 901	return set_extent_bit(tree, start, end, EXTENT_DIRTY, 0, NULL,
 902			      NULL, mask);
 903}
 904
 905int set_extent_bits(struct extent_io_tree *tree, u64 start, u64 end,
 906		    int bits, gfp_t mask)
 907{
 908	return set_extent_bit(tree, start, end, bits, 0, NULL,
 909			      NULL, mask);
 910}
 911
 912int clear_extent_bits(struct extent_io_tree *tree, u64 start, u64 end,
 913		      int bits, gfp_t mask)
 914{
 915	return clear_extent_bit(tree, start, end, bits, 0, 0, NULL, mask);
 916}
 917
 918int set_extent_delalloc(struct extent_io_tree *tree, u64 start, u64 end,
 919			struct extent_state **cached_state, gfp_t mask)
 920{
 921	return set_extent_bit(tree, start, end,
 922			      EXTENT_DELALLOC | EXTENT_DIRTY | EXTENT_UPTODATE,
 923			      0, NULL, cached_state, mask);
 
 
 
 
 
 
 
 
 924}
 925
 926int clear_extent_dirty(struct extent_io_tree *tree, u64 start, u64 end,
 927		       gfp_t mask)
 928{
 929	return clear_extent_bit(tree, start, end,
 930				EXTENT_DIRTY | EXTENT_DELALLOC |
 931				EXTENT_DO_ACCOUNTING, 0, 0, NULL, mask);
 932}
 933
 934int set_extent_new(struct extent_io_tree *tree, u64 start, u64 end,
 935		     gfp_t mask)
 936{
 937	return set_extent_bit(tree, start, end, EXTENT_NEW, 0, NULL,
 938			      NULL, mask);
 939}
 940
 941int set_extent_uptodate(struct extent_io_tree *tree, u64 start, u64 end,
 942			struct extent_state **cached_state, gfp_t mask)
 943{
 944	return set_extent_bit(tree, start, end, EXTENT_UPTODATE, 0,
 945			      NULL, cached_state, mask);
 946}
 947
 948static int clear_extent_uptodate(struct extent_io_tree *tree, u64 start,
 949				 u64 end, struct extent_state **cached_state,
 950				 gfp_t mask)
 951{
 952	return clear_extent_bit(tree, start, end, EXTENT_UPTODATE, 0, 0,
 953				cached_state, mask);
 954}
 955
 956/*
 957 * either insert or lock state struct between start and end use mask to tell
 958 * us if waiting is desired.
 959 */
 960int lock_extent_bits(struct extent_io_tree *tree, u64 start, u64 end,
 961		     int bits, struct extent_state **cached_state, gfp_t mask)
 962{
 963	int err;
 964	u64 failed_start;
 965	while (1) {
 966		err = set_extent_bit(tree, start, end, EXTENT_LOCKED | bits,
 967				     EXTENT_LOCKED, &failed_start,
 968				     cached_state, mask);
 969		if (err == -EEXIST && (mask & __GFP_WAIT)) {
 970			wait_extent_bit(tree, failed_start, end, EXTENT_LOCKED);
 971			start = failed_start;
 972		} else {
 973			break;
 974		}
 975		WARN_ON(start > end);
 976	}
 977	return err;
 978}
 979
 980int lock_extent(struct extent_io_tree *tree, u64 start, u64 end, gfp_t mask)
 981{
 982	return lock_extent_bits(tree, start, end, 0, NULL, mask);
 983}
 984
 985int try_lock_extent(struct extent_io_tree *tree, u64 start, u64 end,
 986		    gfp_t mask)
 987{
 988	int err;
 989	u64 failed_start;
 990
 991	err = set_extent_bit(tree, start, end, EXTENT_LOCKED, EXTENT_LOCKED,
 992			     &failed_start, NULL, mask);
 993	if (err == -EEXIST) {
 994		if (failed_start > start)
 995			clear_extent_bit(tree, start, failed_start - 1,
 996					 EXTENT_LOCKED, 1, 0, NULL, mask);
 997		return 0;
 998	}
 999	return 1;
1000}
1001
1002int unlock_extent_cached(struct extent_io_tree *tree, u64 start, u64 end,
1003			 struct extent_state **cached, gfp_t mask)
1004{
1005	return clear_extent_bit(tree, start, end, EXTENT_LOCKED, 1, 0, cached,
1006				mask);
1007}
1008
1009int unlock_extent(struct extent_io_tree *tree, u64 start, u64 end, gfp_t mask)
1010{
1011	return clear_extent_bit(tree, start, end, EXTENT_LOCKED, 1, 0, NULL,
1012				mask);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1013}
1014
1015/*
1016 * helper function to set both pages and extents in the tree writeback
1017 */
1018static int set_range_writeback(struct extent_io_tree *tree, u64 start, u64 end)
1019{
1020	unsigned long index = start >> PAGE_CACHE_SHIFT;
1021	unsigned long end_index = end >> PAGE_CACHE_SHIFT;
1022	struct page *page;
1023
1024	while (index <= end_index) {
1025		page = find_get_page(tree->mapping, index);
1026		BUG_ON(!page);
1027		set_page_writeback(page);
1028		page_cache_release(page);
1029		index++;
1030	}
1031	return 0;
1032}
1033
1034/* find the first state struct with 'bits' set after 'start', and
1035 * return it.  tree->lock must be held.  NULL will returned if
1036 * nothing was found after 'start'
1037 */
1038struct extent_state *find_first_extent_bit_state(struct extent_io_tree *tree,
1039						 u64 start, int bits)
 
1040{
1041	struct rb_node *node;
1042	struct extent_state *state;
1043
1044	/*
1045	 * this search will find all the extents that end after
1046	 * our range starts.
1047	 */
1048	node = tree_search(tree, start);
1049	if (!node)
1050		goto out;
1051
1052	while (1) {
1053		state = rb_entry(node, struct extent_state, rb_node);
1054		if (state->end >= start && (state->state & bits))
1055			return state;
1056
1057		node = rb_next(node);
1058		if (!node)
1059			break;
1060	}
1061out:
1062	return NULL;
1063}
1064
1065/*
1066 * find the first offset in the io tree with 'bits' set. zero is
1067 * returned if we find something, and *start_ret and *end_ret are
1068 * set to reflect the state struct that was found.
1069 *
1070 * If nothing was found, 1 is returned, < 0 on error
1071 */
1072int find_first_extent_bit(struct extent_io_tree *tree, u64 start,
1073			  u64 *start_ret, u64 *end_ret, int bits)
 
1074{
1075	struct extent_state *state;
 
1076	int ret = 1;
1077
1078	spin_lock(&tree->lock);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1079	state = find_first_extent_bit_state(tree, start, bits);
 
1080	if (state) {
 
1081		*start_ret = state->start;
1082		*end_ret = state->end;
1083		ret = 0;
1084	}
 
1085	spin_unlock(&tree->lock);
1086	return ret;
1087}
1088
1089/*
1090 * find a contiguous range of bytes in the file marked as delalloc, not
1091 * more than 'max_bytes'.  start and end are used to return the range,
1092 *
1093 * 1 is returned if we find something, 0 if nothing was in the tree
1094 */
1095static noinline u64 find_delalloc_range(struct extent_io_tree *tree,
1096					u64 *start, u64 *end, u64 max_bytes,
1097					struct extent_state **cached_state)
1098{
1099	struct rb_node *node;
1100	struct extent_state *state;
1101	u64 cur_start = *start;
1102	u64 found = 0;
1103	u64 total_bytes = 0;
1104
1105	spin_lock(&tree->lock);
1106
1107	/*
1108	 * this search will find all the extents that end after
1109	 * our range starts.
1110	 */
1111	node = tree_search(tree, cur_start);
1112	if (!node) {
1113		if (!found)
1114			*end = (u64)-1;
1115		goto out;
1116	}
1117
1118	while (1) {
1119		state = rb_entry(node, struct extent_state, rb_node);
1120		if (found && (state->start != cur_start ||
1121			      (state->state & EXTENT_BOUNDARY))) {
1122			goto out;
1123		}
1124		if (!(state->state & EXTENT_DELALLOC)) {
1125			if (!found)
1126				*end = state->end;
1127			goto out;
1128		}
1129		if (!found) {
1130			*start = state->start;
1131			*cached_state = state;
1132			atomic_inc(&state->refs);
1133		}
1134		found++;
1135		*end = state->end;
1136		cur_start = state->end + 1;
1137		node = rb_next(node);
1138		if (!node)
1139			break;
1140		total_bytes += state->end - state->start + 1;
1141		if (total_bytes >= max_bytes)
1142			break;
 
 
1143	}
1144out:
1145	spin_unlock(&tree->lock);
1146	return found;
1147}
1148
1149static noinline int __unlock_for_delalloc(struct inode *inode,
1150					  struct page *locked_page,
1151					  u64 start, u64 end)
1152{
1153	int ret;
1154	struct page *pages[16];
1155	unsigned long index = start >> PAGE_CACHE_SHIFT;
1156	unsigned long end_index = end >> PAGE_CACHE_SHIFT;
1157	unsigned long nr_pages = end_index - index + 1;
1158	int i;
1159
1160	if (index == locked_page->index && end_index == index)
1161		return 0;
1162
1163	while (nr_pages > 0) {
1164		ret = find_get_pages_contig(inode->i_mapping, index,
1165				     min_t(unsigned long, nr_pages,
1166				     ARRAY_SIZE(pages)), pages);
1167		for (i = 0; i < ret; i++) {
1168			if (pages[i] != locked_page)
1169				unlock_page(pages[i]);
1170			page_cache_release(pages[i]);
1171		}
1172		nr_pages -= ret;
1173		index += ret;
1174		cond_resched();
1175	}
1176	return 0;
1177}
1178
1179static noinline int lock_delalloc_pages(struct inode *inode,
1180					struct page *locked_page,
1181					u64 delalloc_start,
1182					u64 delalloc_end)
1183{
1184	unsigned long index = delalloc_start >> PAGE_CACHE_SHIFT;
1185	unsigned long start_index = index;
1186	unsigned long end_index = delalloc_end >> PAGE_CACHE_SHIFT;
1187	unsigned long pages_locked = 0;
1188	struct page *pages[16];
1189	unsigned long nrpages;
1190	int ret;
1191	int i;
1192
1193	/* the caller is responsible for locking the start index */
1194	if (index == locked_page->index && index == end_index)
1195		return 0;
1196
1197	/* skip the page at the start index */
1198	nrpages = end_index - index + 1;
1199	while (nrpages > 0) {
1200		ret = find_get_pages_contig(inode->i_mapping, index,
1201				     min_t(unsigned long,
1202				     nrpages, ARRAY_SIZE(pages)), pages);
1203		if (ret == 0) {
1204			ret = -EAGAIN;
1205			goto done;
1206		}
1207		/* now we have an array of pages, lock them all */
1208		for (i = 0; i < ret; i++) {
1209			/*
1210			 * the caller is taking responsibility for
1211			 * locked_page
1212			 */
1213			if (pages[i] != locked_page) {
1214				lock_page(pages[i]);
1215				if (!PageDirty(pages[i]) ||
1216				    pages[i]->mapping != inode->i_mapping) {
1217					ret = -EAGAIN;
1218					unlock_page(pages[i]);
1219					page_cache_release(pages[i]);
1220					goto done;
1221				}
1222			}
1223			page_cache_release(pages[i]);
1224			pages_locked++;
1225		}
1226		nrpages -= ret;
1227		index += ret;
1228		cond_resched();
1229	}
1230	ret = 0;
1231done:
1232	if (ret && pages_locked) {
1233		__unlock_for_delalloc(inode, locked_page,
1234			      delalloc_start,
1235			      ((u64)(start_index + pages_locked - 1)) <<
1236			      PAGE_CACHE_SHIFT);
1237	}
1238	return ret;
1239}
1240
1241/*
1242 * find a contiguous range of bytes in the file marked as delalloc, not
1243 * more than 'max_bytes'.  start and end are used to return the range,
1244 *
1245 * 1 is returned if we find something, 0 if nothing was in the tree
1246 */
1247static noinline u64 find_lock_delalloc_range(struct inode *inode,
1248					     struct extent_io_tree *tree,
1249					     struct page *locked_page,
1250					     u64 *start, u64 *end,
1251					     u64 max_bytes)
1252{
1253	u64 delalloc_start;
1254	u64 delalloc_end;
1255	u64 found;
1256	struct extent_state *cached_state = NULL;
1257	int ret;
1258	int loops = 0;
1259
1260again:
1261	/* step one, find a bunch of delalloc bytes starting at start */
1262	delalloc_start = *start;
1263	delalloc_end = 0;
1264	found = find_delalloc_range(tree, &delalloc_start, &delalloc_end,
1265				    max_bytes, &cached_state);
1266	if (!found || delalloc_end <= *start) {
1267		*start = delalloc_start;
1268		*end = delalloc_end;
1269		free_extent_state(cached_state);
1270		return found;
1271	}
1272
1273	/*
1274	 * start comes from the offset of locked_page.  We have to lock
1275	 * pages in order, so we can't process delalloc bytes before
1276	 * locked_page
1277	 */
1278	if (delalloc_start < *start)
1279		delalloc_start = *start;
1280
1281	/*
1282	 * make sure to limit the number of pages we try to lock down
1283	 * if we're looping.
1284	 */
1285	if (delalloc_end + 1 - delalloc_start > max_bytes && loops)
1286		delalloc_end = delalloc_start + PAGE_CACHE_SIZE - 1;
1287
1288	/* step two, lock all the pages after the page that has start */
1289	ret = lock_delalloc_pages(inode, locked_page,
1290				  delalloc_start, delalloc_end);
1291	if (ret == -EAGAIN) {
1292		/* some of the pages are gone, lets avoid looping by
1293		 * shortening the size of the delalloc range we're searching
1294		 */
1295		free_extent_state(cached_state);
1296		if (!loops) {
1297			unsigned long offset = (*start) & (PAGE_CACHE_SIZE - 1);
1298			max_bytes = PAGE_CACHE_SIZE - offset;
1299			loops = 1;
1300			goto again;
1301		} else {
1302			found = 0;
1303			goto out_failed;
1304		}
1305	}
1306	BUG_ON(ret);
1307
1308	/* step three, lock the state bits for the whole range */
1309	lock_extent_bits(tree, delalloc_start, delalloc_end,
1310			 0, &cached_state, GFP_NOFS);
1311
1312	/* then test to make sure it is all still delalloc */
1313	ret = test_range_bit(tree, delalloc_start, delalloc_end,
1314			     EXTENT_DELALLOC, 1, cached_state);
1315	if (!ret) {
1316		unlock_extent_cached(tree, delalloc_start, delalloc_end,
1317				     &cached_state, GFP_NOFS);
1318		__unlock_for_delalloc(inode, locked_page,
1319			      delalloc_start, delalloc_end);
1320		cond_resched();
1321		goto again;
1322	}
1323	free_extent_state(cached_state);
1324	*start = delalloc_start;
1325	*end = delalloc_end;
1326out_failed:
1327	return found;
1328}
1329
1330int extent_clear_unlock_delalloc(struct inode *inode,
1331				struct extent_io_tree *tree,
1332				u64 start, u64 end, struct page *locked_page,
1333				unsigned long op)
1334{
 
1335	int ret;
1336	struct page *pages[16];
1337	unsigned long index = start >> PAGE_CACHE_SHIFT;
1338	unsigned long end_index = end >> PAGE_CACHE_SHIFT;
1339	unsigned long nr_pages = end_index - index + 1;
1340	int i;
1341	int clear_bits = 0;
1342
1343	if (op & EXTENT_CLEAR_UNLOCK)
1344		clear_bits |= EXTENT_LOCKED;
1345	if (op & EXTENT_CLEAR_DIRTY)
1346		clear_bits |= EXTENT_DIRTY;
1347
1348	if (op & EXTENT_CLEAR_DELALLOC)
1349		clear_bits |= EXTENT_DELALLOC;
1350
1351	clear_extent_bit(tree, start, end, clear_bits, 1, 0, NULL, GFP_NOFS);
1352	if (!(op & (EXTENT_CLEAR_UNLOCK_PAGE | EXTENT_CLEAR_DIRTY |
1353		    EXTENT_SET_WRITEBACK | EXTENT_END_WRITEBACK |
1354		    EXTENT_SET_PRIVATE2)))
1355		return 0;
1356
1357	while (nr_pages > 0) {
1358		ret = find_get_pages_contig(inode->i_mapping, index,
1359				     min_t(unsigned long,
1360				     nr_pages, ARRAY_SIZE(pages)), pages);
1361		for (i = 0; i < ret; i++) {
1362
1363			if (op & EXTENT_SET_PRIVATE2)
1364				SetPagePrivate2(pages[i]);
1365
1366			if (pages[i] == locked_page) {
1367				page_cache_release(pages[i]);
1368				continue;
1369			}
1370			if (op & EXTENT_CLEAR_DIRTY)
1371				clear_page_dirty_for_io(pages[i]);
1372			if (op & EXTENT_SET_WRITEBACK)
1373				set_page_writeback(pages[i]);
1374			if (op & EXTENT_END_WRITEBACK)
1375				end_page_writeback(pages[i]);
1376			if (op & EXTENT_CLEAR_UNLOCK_PAGE)
1377				unlock_page(pages[i]);
1378			page_cache_release(pages[i]);
1379		}
1380		nr_pages -= ret;
1381		index += ret;
1382		cond_resched();
1383	}
1384	return 0;
1385}
1386
1387/*
1388 * count the number of bytes in the tree that have a given bit(s)
1389 * set.  This can be fairly slow, except for EXTENT_DIRTY which is
1390 * cached.  The total number found is returned.
1391 */
1392u64 count_range_bits(struct extent_io_tree *tree,
1393		     u64 *start, u64 search_end, u64 max_bytes,
1394		     unsigned long bits, int contig)
1395{
1396	struct rb_node *node;
1397	struct extent_state *state;
1398	u64 cur_start = *start;
1399	u64 total_bytes = 0;
1400	u64 last = 0;
1401	int found = 0;
1402
1403	if (search_end <= cur_start) {
1404		WARN_ON(1);
1405		return 0;
1406	}
1407
1408	spin_lock(&tree->lock);
1409	if (cur_start == 0 && bits == EXTENT_DIRTY) {
1410		total_bytes = tree->dirty_bytes;
1411		goto out;
1412	}
1413	/*
1414	 * this search will find all the extents that end after
1415	 * our range starts.
1416	 */
1417	node = tree_search(tree, cur_start);
1418	if (!node)
1419		goto out;
1420
1421	while (1) {
1422		state = rb_entry(node, struct extent_state, rb_node);
1423		if (state->start > search_end)
1424			break;
1425		if (contig && found && state->start > last + 1)
1426			break;
1427		if (state->end >= cur_start && (state->state & bits) == bits) {
1428			total_bytes += min(search_end, state->end) + 1 -
1429				       max(cur_start, state->start);
1430			if (total_bytes >= max_bytes)
1431				break;
1432			if (!found) {
1433				*start = max(cur_start, state->start);
1434				found = 1;
1435			}
1436			last = state->end;
1437		} else if (contig && found) {
1438			break;
1439		}
1440		node = rb_next(node);
1441		if (!node)
1442			break;
1443	}
1444out:
1445	spin_unlock(&tree->lock);
1446	return total_bytes;
1447}
1448
1449/*
1450 * set the private field for a given byte offset in the tree.  If there isn't
1451 * an extent_state there already, this does nothing.
1452 */
1453int set_state_private(struct extent_io_tree *tree, u64 start, u64 private)
1454{
1455	struct rb_node *node;
1456	struct extent_state *state;
1457	int ret = 0;
1458
1459	spin_lock(&tree->lock);
1460	/*
1461	 * this search will find all the extents that end after
1462	 * our range starts.
1463	 */
1464	node = tree_search(tree, start);
1465	if (!node) {
1466		ret = -ENOENT;
1467		goto out;
1468	}
1469	state = rb_entry(node, struct extent_state, rb_node);
1470	if (state->start != start) {
1471		ret = -ENOENT;
1472		goto out;
1473	}
1474	state->private = private;
1475out:
1476	spin_unlock(&tree->lock);
1477	return ret;
1478}
1479
1480int get_state_private(struct extent_io_tree *tree, u64 start, u64 *private)
1481{
1482	struct rb_node *node;
1483	struct extent_state *state;
1484	int ret = 0;
1485
1486	spin_lock(&tree->lock);
1487	/*
1488	 * this search will find all the extents that end after
1489	 * our range starts.
1490	 */
1491	node = tree_search(tree, start);
1492	if (!node) {
1493		ret = -ENOENT;
1494		goto out;
1495	}
1496	state = rb_entry(node, struct extent_state, rb_node);
1497	if (state->start != start) {
1498		ret = -ENOENT;
1499		goto out;
1500	}
1501	*private = state->private;
1502out:
1503	spin_unlock(&tree->lock);
1504	return ret;
1505}
1506
1507/*
1508 * searches a range in the state tree for a given mask.
1509 * If 'filled' == 1, this returns 1 only if every extent in the tree
1510 * has the bits set.  Otherwise, 1 is returned if any bit in the
1511 * range is found set.
1512 */
1513int test_range_bit(struct extent_io_tree *tree, u64 start, u64 end,
1514		   int bits, int filled, struct extent_state *cached)
1515{
1516	struct extent_state *state = NULL;
1517	struct rb_node *node;
1518	int bitset = 0;
1519
1520	spin_lock(&tree->lock);
1521	if (cached && cached->tree && cached->start <= start &&
1522	    cached->end > start)
1523		node = &cached->rb_node;
1524	else
1525		node = tree_search(tree, start);
1526	while (node && start <= end) {
1527		state = rb_entry(node, struct extent_state, rb_node);
1528
1529		if (filled && state->start > start) {
1530			bitset = 0;
1531			break;
1532		}
1533
1534		if (state->start > end)
1535			break;
1536
1537		if (state->state & bits) {
1538			bitset = 1;
1539			if (!filled)
1540				break;
1541		} else if (filled) {
1542			bitset = 0;
1543			break;
1544		}
1545
1546		if (state->end == (u64)-1)
1547			break;
1548
1549		start = state->end + 1;
1550		if (start > end)
1551			break;
1552		node = rb_next(node);
1553		if (!node) {
1554			if (filled)
1555				bitset = 0;
1556			break;
1557		}
1558	}
1559	spin_unlock(&tree->lock);
1560	return bitset;
1561}
1562
1563/*
1564 * helper function to set a given page up to date if all the
1565 * extents in the tree for that page are up to date
1566 */
1567static int check_page_uptodate(struct extent_io_tree *tree,
1568			       struct page *page)
1569{
1570	u64 start = (u64)page->index << PAGE_CACHE_SHIFT;
1571	u64 end = start + PAGE_CACHE_SIZE - 1;
1572	if (test_range_bit(tree, start, end, EXTENT_UPTODATE, 1, NULL))
1573		SetPageUptodate(page);
1574	return 0;
1575}
1576
1577/*
1578 * helper function to unlock a page if all the extents in the tree
1579 * for that page are unlocked
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1580 */
1581static int check_page_locked(struct extent_io_tree *tree,
1582			     struct page *page)
 
1583{
1584	u64 start = (u64)page->index << PAGE_CACHE_SHIFT;
1585	u64 end = start + PAGE_CACHE_SIZE - 1;
1586	if (!test_range_bit(tree, start, end, EXTENT_LOCKED, 0, NULL))
1587		unlock_page(page);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1588	return 0;
1589}
1590
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1591/*
1592 * helper function to end page writeback if all the extents
1593 * in the tree for that page are done with writeback
1594 */
1595static int check_page_writeback(struct extent_io_tree *tree,
1596			     struct page *page)
1597{
1598	end_page_writeback(page);
1599	return 0;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1600}
1601
1602/* lots and lots of room for performance fixes in the end_bio funcs */
1603
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1604/*
1605 * after a writepage IO is done, we need to:
1606 * clear the uptodate bits on error
1607 * clear the writeback bits in the extent tree for this IO
1608 * end_page_writeback if the page has no more pending IO
1609 *
1610 * Scheduling is not allowed, so the extent state tree is expected
1611 * to have one and only one object corresponding to this IO.
1612 */
1613static void end_bio_extent_writepage(struct bio *bio, int err)
1614{
1615	int uptodate = err == 0;
1616	struct bio_vec *bvec = bio->bi_io_vec + bio->bi_vcnt - 1;
1617	struct extent_io_tree *tree;
1618	u64 start;
1619	u64 end;
1620	int whole_page;
1621	int ret;
1622
1623	do {
1624		struct page *page = bvec->bv_page;
1625		tree = &BTRFS_I(page->mapping->host)->io_tree;
1626
1627		start = ((u64)page->index << PAGE_CACHE_SHIFT) +
1628			 bvec->bv_offset;
1629		end = start + bvec->bv_len - 1;
1630
1631		if (bvec->bv_offset == 0 && bvec->bv_len == PAGE_CACHE_SIZE)
1632			whole_page = 1;
1633		else
1634			whole_page = 0;
1635
1636		if (--bvec >= bio->bi_io_vec)
1637			prefetchw(&bvec->bv_page->flags);
1638		if (tree->ops && tree->ops->writepage_end_io_hook) {
1639			ret = tree->ops->writepage_end_io_hook(page, start,
1640						       end, NULL, uptodate);
1641			if (ret)
1642				uptodate = 0;
1643		}
1644
1645		if (!uptodate && tree->ops &&
1646		    tree->ops->writepage_io_failed_hook) {
1647			ret = tree->ops->writepage_io_failed_hook(bio, page,
1648							 start, end, NULL);
1649			if (ret == 0) {
1650				uptodate = (err == 0);
1651				continue;
1652			}
1653		}
1654
1655		if (!uptodate) {
1656			clear_extent_uptodate(tree, start, end, NULL, GFP_NOFS);
1657			ClearPageUptodate(page);
1658			SetPageError(page);
1659		}
1660
1661		if (whole_page)
1662			end_page_writeback(page);
1663		else
1664			check_page_writeback(tree, page);
1665	} while (bvec >= bio->bi_io_vec);
1666
1667	bio_put(bio);
1668}
1669
 
 
 
 
 
 
 
 
 
 
 
 
1670/*
1671 * after a readpage IO is done, we need to:
1672 * clear the uptodate bits on error
1673 * set the uptodate bits if things worked
1674 * set the page up to date if all extents in the tree are uptodate
1675 * clear the lock bit in the extent tree
1676 * unlock the page if there are no other extents locked for it
1677 *
1678 * Scheduling is not allowed, so the extent state tree is expected
1679 * to have one and only one object corresponding to this IO.
1680 */
1681static void end_bio_extent_readpage(struct bio *bio, int err)
1682{
 
1683	int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
1684	struct bio_vec *bvec_end = bio->bi_io_vec + bio->bi_vcnt - 1;
1685	struct bio_vec *bvec = bio->bi_io_vec;
1686	struct extent_io_tree *tree;
 
1687	u64 start;
1688	u64 end;
1689	int whole_page;
 
 
 
1690	int ret;
 
1691
1692	if (err)
1693		uptodate = 0;
1694
1695	do {
1696		struct page *page = bvec->bv_page;
1697		struct extent_state *cached = NULL;
1698		struct extent_state *state;
1699
1700		tree = &BTRFS_I(page->mapping->host)->io_tree;
1701
1702		start = ((u64)page->index << PAGE_CACHE_SHIFT) +
1703			bvec->bv_offset;
1704		end = start + bvec->bv_len - 1;
1705
1706		if (bvec->bv_offset == 0 && bvec->bv_len == PAGE_CACHE_SIZE)
1707			whole_page = 1;
1708		else
1709			whole_page = 0;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1710
1711		if (++bvec <= bvec_end)
1712			prefetchw(&bvec->bv_page->flags);
1713
1714		spin_lock(&tree->lock);
1715		state = find_first_extent_bit_state(tree, start, EXTENT_LOCKED);
1716		if (state && state->start == start) {
 
 
 
1717			/*
1718			 * take a reference on the state, unlock will drop
1719			 * the ref
 
 
 
 
 
 
1720			 */
1721			cache_state(state, &cached);
1722		}
1723		spin_unlock(&tree->lock);
1724
1725		if (uptodate && tree->ops && tree->ops->readpage_end_io_hook) {
1726			ret = tree->ops->readpage_end_io_hook(page, start, end,
1727							      state);
1728			if (ret)
1729				uptodate = 0;
1730		}
1731		if (!uptodate && tree->ops &&
1732		    tree->ops->readpage_io_failed_hook) {
1733			ret = tree->ops->readpage_io_failed_hook(bio, page,
1734							 start, end, NULL);
1735			if (ret == 0) {
1736				uptodate =
1737					test_bit(BIO_UPTODATE, &bio->bi_flags);
1738				if (err)
1739					uptodate = 0;
1740				uncache_state(&cached);
1741				continue;
1742			}
1743		}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1744
1745		if (uptodate) {
1746			set_extent_uptodate(tree, start, end, &cached,
1747					    GFP_ATOMIC);
1748		}
1749		unlock_extent_cached(tree, start, end, &cached, GFP_ATOMIC);
1750
1751		if (whole_page) {
1752			if (uptodate) {
1753				SetPageUptodate(page);
1754			} else {
1755				ClearPageUptodate(page);
1756				SetPageError(page);
1757			}
1758			unlock_page(page);
 
 
 
 
 
 
1759		} else {
1760			if (uptodate) {
1761				check_page_uptodate(tree, page);
1762			} else {
1763				ClearPageUptodate(page);
1764				SetPageError(page);
1765			}
1766			check_page_locked(tree, page);
1767		}
1768	} while (bvec <= bvec_end);
1769
 
 
 
 
 
1770	bio_put(bio);
1771}
1772
 
 
 
 
1773struct bio *
1774btrfs_bio_alloc(struct block_device *bdev, u64 first_sector, int nr_vecs,
1775		gfp_t gfp_flags)
1776{
 
1777	struct bio *bio;
1778
1779	bio = bio_alloc(gfp_flags, nr_vecs);
1780
1781	if (bio == NULL && (current->flags & PF_MEMALLOC)) {
1782		while (!bio && (nr_vecs /= 2))
1783			bio = bio_alloc(gfp_flags, nr_vecs);
 
 
1784	}
1785
1786	if (bio) {
1787		bio->bi_size = 0;
1788		bio->bi_bdev = bdev;
1789		bio->bi_sector = first_sector;
 
 
 
 
1790	}
1791	return bio;
1792}
1793
1794static int submit_one_bio(int rw, struct bio *bio, int mirror_num,
1795			  unsigned long bio_flags)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1796{
1797	int ret = 0;
1798	struct bio_vec *bvec = bio->bi_io_vec + bio->bi_vcnt - 1;
1799	struct page *page = bvec->bv_page;
1800	struct extent_io_tree *tree = bio->bi_private;
1801	u64 start;
1802
1803	start = ((u64)page->index << PAGE_CACHE_SHIFT) + bvec->bv_offset;
1804
1805	bio->bi_private = NULL;
1806
1807	bio_get(bio);
1808
1809	if (tree->ops && tree->ops->submit_bio_hook)
1810		ret = tree->ops->submit_bio_hook(page->mapping->host, rw, bio,
1811					   mirror_num, bio_flags, start);
1812	else
1813		submit_bio(rw, bio);
 
1814	if (bio_flagged(bio, BIO_EOPNOTSUPP))
1815		ret = -EOPNOTSUPP;
1816	bio_put(bio);
1817	return ret;
1818}
1819
 
 
 
 
 
 
 
 
 
 
 
 
 
1820static int submit_extent_page(int rw, struct extent_io_tree *tree,
1821			      struct page *page, sector_t sector,
1822			      size_t size, unsigned long offset,
1823			      struct block_device *bdev,
1824			      struct bio **bio_ret,
1825			      unsigned long max_pages,
1826			      bio_end_io_t end_io_func,
1827			      int mirror_num,
1828			      unsigned long prev_bio_flags,
1829			      unsigned long bio_flags)
1830{
1831	int ret = 0;
1832	struct bio *bio;
1833	int nr;
1834	int contig = 0;
1835	int this_compressed = bio_flags & EXTENT_BIO_COMPRESSED;
1836	int old_compressed = prev_bio_flags & EXTENT_BIO_COMPRESSED;
1837	size_t page_size = min_t(size_t, size, PAGE_CACHE_SIZE);
1838
1839	if (bio_ret && *bio_ret) {
1840		bio = *bio_ret;
1841		if (old_compressed)
1842			contig = bio->bi_sector == sector;
1843		else
1844			contig = bio->bi_sector + (bio->bi_size >> 9) ==
1845				sector;
1846
1847		if (prev_bio_flags != bio_flags || !contig ||
1848		    (tree->ops && tree->ops->merge_bio_hook &&
1849		     tree->ops->merge_bio_hook(page, offset, page_size, bio,
1850					       bio_flags)) ||
1851		    bio_add_page(bio, page, page_size, offset) < page_size) {
1852			ret = submit_one_bio(rw, bio, mirror_num,
1853					     prev_bio_flags);
 
 
1854			bio = NULL;
1855		} else {
1856			return 0;
1857		}
1858	}
1859	if (this_compressed)
1860		nr = BIO_MAX_PAGES;
1861	else
1862		nr = bio_get_nr_vecs(bdev);
1863
1864	bio = btrfs_bio_alloc(bdev, sector, nr, GFP_NOFS | __GFP_HIGH);
1865	if (!bio)
1866		return -ENOMEM;
1867
1868	bio_add_page(bio, page, page_size, offset);
1869	bio->bi_end_io = end_io_func;
1870	bio->bi_private = tree;
1871
1872	if (bio_ret)
1873		*bio_ret = bio;
1874	else
1875		ret = submit_one_bio(rw, bio, mirror_num, bio_flags);
1876
1877	return ret;
1878}
1879
 
 
 
 
 
 
 
 
 
 
 
 
1880void set_page_extent_mapped(struct page *page)
1881{
1882	if (!PagePrivate(page)) {
1883		SetPagePrivate(page);
1884		page_cache_get(page);
1885		set_page_private(page, EXTENT_PAGE_PRIVATE);
1886	}
1887}
1888
1889static void set_page_extent_head(struct page *page, unsigned long len)
 
 
 
1890{
1891	WARN_ON(!PagePrivate(page));
1892	set_page_private(page, EXTENT_PAGE_PRIVATE_FIRST_PAGE | len << 2);
1893}
 
 
 
 
 
 
 
 
 
 
1894
 
 
 
 
 
 
 
 
1895/*
1896 * basic readpage implementation.  Locked extent state structs are inserted
1897 * into the tree that are removed when the IO is done (by the end_io
1898 * handlers)
 
1899 */
1900static int __extent_read_full_page(struct extent_io_tree *tree,
1901				   struct page *page,
1902				   get_extent_t *get_extent,
1903				   struct bio **bio, int mirror_num,
1904				   unsigned long *bio_flags)
 
1905{
1906	struct inode *inode = page->mapping->host;
1907	u64 start = (u64)page->index << PAGE_CACHE_SHIFT;
1908	u64 page_end = start + PAGE_CACHE_SIZE - 1;
1909	u64 end;
1910	u64 cur = start;
1911	u64 extent_offset;
1912	u64 last_byte = i_size_read(inode);
1913	u64 block_start;
1914	u64 cur_end;
1915	sector_t sector;
1916	struct extent_map *em;
1917	struct block_device *bdev;
1918	struct btrfs_ordered_extent *ordered;
1919	int ret;
1920	int nr = 0;
 
1921	size_t pg_offset = 0;
1922	size_t iosize;
1923	size_t disk_io_size;
1924	size_t blocksize = inode->i_sb->s_blocksize;
1925	unsigned long this_bio_flag = 0;
1926
1927	set_page_extent_mapped(page);
1928
 
1929	if (!PageUptodate(page)) {
1930		if (cleancache_get_page(page) == 0) {
1931			BUG_ON(blocksize != PAGE_SIZE);
 
1932			goto out;
1933		}
1934	}
1935
1936	end = page_end;
1937	while (1) {
1938		lock_extent(tree, start, end, GFP_NOFS);
1939		ordered = btrfs_lookup_ordered_extent(inode, start);
1940		if (!ordered)
1941			break;
1942		unlock_extent(tree, start, end, GFP_NOFS);
1943		btrfs_start_ordered_extent(inode, ordered, 1);
1944		btrfs_put_ordered_extent(ordered);
1945	}
1946
1947	if (page->index == last_byte >> PAGE_CACHE_SHIFT) {
1948		char *userpage;
1949		size_t zero_offset = last_byte & (PAGE_CACHE_SIZE - 1);
1950
1951		if (zero_offset) {
1952			iosize = PAGE_CACHE_SIZE - zero_offset;
1953			userpage = kmap_atomic(page, KM_USER0);
1954			memset(userpage + zero_offset, 0, iosize);
1955			flush_dcache_page(page);
1956			kunmap_atomic(userpage, KM_USER0);
1957		}
1958	}
1959	while (cur <= end) {
 
 
1960		if (cur >= last_byte) {
1961			char *userpage;
1962			struct extent_state *cached = NULL;
1963
1964			iosize = PAGE_CACHE_SIZE - pg_offset;
1965			userpage = kmap_atomic(page, KM_USER0);
1966			memset(userpage + pg_offset, 0, iosize);
1967			flush_dcache_page(page);
1968			kunmap_atomic(userpage, KM_USER0);
1969			set_extent_uptodate(tree, cur, cur + iosize - 1,
1970					    &cached, GFP_NOFS);
1971			unlock_extent_cached(tree, cur, cur + iosize - 1,
1972					     &cached, GFP_NOFS);
 
 
1973			break;
1974		}
1975		em = get_extent(inode, page, pg_offset, cur,
1976				end - cur + 1, 0);
1977		if (IS_ERR_OR_NULL(em)) {
1978			SetPageError(page);
1979			unlock_extent(tree, cur, end, GFP_NOFS);
 
1980			break;
1981		}
1982		extent_offset = cur - em->start;
1983		BUG_ON(extent_map_end(em) <= cur);
1984		BUG_ON(end < cur);
1985
1986		if (test_bit(EXTENT_FLAG_COMPRESSED, &em->flags)) {
1987			this_bio_flag = EXTENT_BIO_COMPRESSED;
1988			extent_set_compress_type(&this_bio_flag,
1989						 em->compress_type);
1990		}
1991
1992		iosize = min(extent_map_end(em) - cur, end - cur + 1);
1993		cur_end = min(extent_map_end(em) - 1, end);
1994		iosize = (iosize + blocksize - 1) & ~((u64)blocksize - 1);
1995		if (this_bio_flag & EXTENT_BIO_COMPRESSED) {
1996			disk_io_size = em->block_len;
1997			sector = em->block_start >> 9;
1998		} else {
1999			sector = (em->block_start + extent_offset) >> 9;
2000			disk_io_size = iosize;
2001		}
2002		bdev = em->bdev;
2003		block_start = em->block_start;
2004		if (test_bit(EXTENT_FLAG_PREALLOC, &em->flags))
2005			block_start = EXTENT_MAP_HOLE;
2006		free_extent_map(em);
2007		em = NULL;
2008
2009		/* we've found a hole, just zero and go on */
2010		if (block_start == EXTENT_MAP_HOLE) {
2011			char *userpage;
2012			struct extent_state *cached = NULL;
2013
2014			userpage = kmap_atomic(page, KM_USER0);
2015			memset(userpage + pg_offset, 0, iosize);
2016			flush_dcache_page(page);
2017			kunmap_atomic(userpage, KM_USER0);
2018
2019			set_extent_uptodate(tree, cur, cur + iosize - 1,
2020					    &cached, GFP_NOFS);
2021			unlock_extent_cached(tree, cur, cur + iosize - 1,
2022			                     &cached, GFP_NOFS);
2023			cur = cur + iosize;
2024			pg_offset += iosize;
2025			continue;
2026		}
2027		/* the get_extent function already copied into the page */
2028		if (test_range_bit(tree, cur, cur_end,
2029				   EXTENT_UPTODATE, 1, NULL)) {
2030			check_page_uptodate(tree, page);
2031			unlock_extent(tree, cur, cur + iosize - 1, GFP_NOFS);
 
2032			cur = cur + iosize;
2033			pg_offset += iosize;
2034			continue;
2035		}
2036		/* we have an inline extent but it didn't get marked up
2037		 * to date.  Error out
2038		 */
2039		if (block_start == EXTENT_MAP_INLINE) {
2040			SetPageError(page);
2041			unlock_extent(tree, cur, cur + iosize - 1, GFP_NOFS);
 
2042			cur = cur + iosize;
2043			pg_offset += iosize;
2044			continue;
2045		}
2046
2047		ret = 0;
2048		if (tree->ops && tree->ops->readpage_io_hook) {
2049			ret = tree->ops->readpage_io_hook(page, cur,
2050							  cur + iosize - 1);
2051		}
2052		if (!ret) {
2053			unsigned long pnr = (last_byte >> PAGE_CACHE_SHIFT) + 1;
2054			pnr -= page->index;
2055			ret = submit_extent_page(READ, tree, page,
2056					 sector, disk_io_size, pg_offset,
2057					 bdev, bio, pnr,
2058					 end_bio_extent_readpage, mirror_num,
2059					 *bio_flags,
2060					 this_bio_flag);
 
2061			nr++;
2062			*bio_flags = this_bio_flag;
2063		}
2064		if (ret)
2065			SetPageError(page);
 
 
 
2066		cur = cur + iosize;
2067		pg_offset += iosize;
2068	}
2069out:
2070	if (!nr) {
2071		if (!PageError(page))
2072			SetPageUptodate(page);
2073		unlock_page(page);
2074	}
2075	return 0;
2076}
2077
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2078int extent_read_full_page(struct extent_io_tree *tree, struct page *page,
2079			    get_extent_t *get_extent)
2080{
2081	struct bio *bio = NULL;
2082	unsigned long bio_flags = 0;
2083	int ret;
2084
2085	ret = __extent_read_full_page(tree, page, get_extent, &bio, 0,
2086				      &bio_flags);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2087	if (bio)
2088		ret = submit_one_bio(READ, bio, 0, bio_flags);
2089	return ret;
2090}
2091
2092static noinline void update_nr_written(struct page *page,
2093				      struct writeback_control *wbc,
2094				      unsigned long nr_written)
2095{
2096	wbc->nr_to_write -= nr_written;
2097	if (wbc->range_cyclic || (wbc->nr_to_write > 0 &&
2098	    wbc->range_start == 0 && wbc->range_end == LLONG_MAX))
2099		page->mapping->writeback_index = page->index + nr_written;
2100}
2101
2102/*
2103 * the writepage semantics are similar to regular writepage.  extent
2104 * records are inserted to lock ranges in the tree, and as dirty areas
2105 * are found, they are marked writeback.  Then the lock bits are removed
2106 * and the end_io handler clears the writeback ranges
2107 */
2108static int __extent_writepage(struct page *page, struct writeback_control *wbc,
2109			      void *data)
2110{
2111	struct inode *inode = page->mapping->host;
2112	struct extent_page_data *epd = data;
2113	struct extent_io_tree *tree = epd->tree;
2114	u64 start = (u64)page->index << PAGE_CACHE_SHIFT;
2115	u64 delalloc_start;
2116	u64 page_end = start + PAGE_CACHE_SIZE - 1;
2117	u64 end;
2118	u64 cur = start;
2119	u64 extent_offset;
2120	u64 last_byte = i_size_read(inode);
2121	u64 block_start;
2122	u64 iosize;
2123	sector_t sector;
2124	struct extent_state *cached_state = NULL;
2125	struct extent_map *em;
2126	struct block_device *bdev;
2127	int ret;
2128	int nr = 0;
2129	size_t pg_offset = 0;
2130	size_t blocksize;
2131	loff_t i_size = i_size_read(inode);
2132	unsigned long end_index = i_size >> PAGE_CACHE_SHIFT;
2133	u64 nr_delalloc;
2134	u64 delalloc_end;
2135	int page_started;
2136	int compressed;
2137	int write_flags;
2138	unsigned long nr_written = 0;
 
2139
2140	if (wbc->sync_mode == WB_SYNC_ALL)
2141		write_flags = WRITE_SYNC;
2142	else
2143		write_flags = WRITE;
2144
2145	trace___extent_writepage(page, inode, wbc);
2146
2147	WARN_ON(!PageLocked(page));
 
 
 
2148	pg_offset = i_size & (PAGE_CACHE_SIZE - 1);
2149	if (page->index > end_index ||
2150	   (page->index == end_index && !pg_offset)) {
2151		page->mapping->a_ops->invalidatepage(page, 0);
2152		unlock_page(page);
2153		return 0;
2154	}
2155
2156	if (page->index == end_index) {
2157		char *userpage;
2158
2159		userpage = kmap_atomic(page, KM_USER0);
2160		memset(userpage + pg_offset, 0,
2161		       PAGE_CACHE_SIZE - pg_offset);
2162		kunmap_atomic(userpage, KM_USER0);
2163		flush_dcache_page(page);
2164	}
2165	pg_offset = 0;
2166
2167	set_page_extent_mapped(page);
2168
 
 
 
2169	delalloc_start = start;
2170	delalloc_end = 0;
2171	page_started = 0;
2172	if (!epd->extent_locked) {
2173		u64 delalloc_to_write = 0;
2174		/*
2175		 * make sure the wbc mapping index is at least updated
2176		 * to this page.
2177		 */
2178		update_nr_written(page, wbc, 0);
2179
2180		while (delalloc_end < page_end) {
2181			nr_delalloc = find_lock_delalloc_range(inode, tree,
2182						       page,
2183						       &delalloc_start,
2184						       &delalloc_end,
2185						       128 * 1024 * 1024);
2186			if (nr_delalloc == 0) {
2187				delalloc_start = delalloc_end + 1;
2188				continue;
2189			}
2190			tree->ops->fill_delalloc(inode, page, delalloc_start,
2191						 delalloc_end, &page_started,
2192						 &nr_written);
 
 
 
 
 
 
 
2193			/*
2194			 * delalloc_end is already one less than the total
2195			 * length, so we don't subtract one from
2196			 * PAGE_CACHE_SIZE
2197			 */
2198			delalloc_to_write += (delalloc_end - delalloc_start +
2199					      PAGE_CACHE_SIZE) >>
2200					      PAGE_CACHE_SHIFT;
2201			delalloc_start = delalloc_end + 1;
2202		}
2203		if (wbc->nr_to_write < delalloc_to_write) {
2204			int thresh = 8192;
2205
2206			if (delalloc_to_write < thresh * 2)
2207				thresh = delalloc_to_write;
2208			wbc->nr_to_write = min_t(u64, delalloc_to_write,
2209						 thresh);
2210		}
2211
2212		/* did the fill delalloc function already unlock and start
2213		 * the IO?
2214		 */
2215		if (page_started) {
2216			ret = 0;
2217			/*
2218			 * we've unlocked the page, so we can't update
2219			 * the mapping's writeback index, just update
2220			 * nr_to_write.
2221			 */
2222			wbc->nr_to_write -= nr_written;
2223			goto done_unlocked;
2224		}
2225	}
2226	if (tree->ops && tree->ops->writepage_start_hook) {
2227		ret = tree->ops->writepage_start_hook(page, start,
2228						      page_end);
2229		if (ret == -EAGAIN) {
2230			redirty_page_for_writepage(wbc, page);
 
 
 
 
2231			update_nr_written(page, wbc, nr_written);
2232			unlock_page(page);
2233			ret = 0;
2234			goto done_unlocked;
2235		}
2236	}
2237
2238	/*
2239	 * we don't want to touch the inode after unlocking the page,
2240	 * so we update the mapping writeback index now
2241	 */
2242	update_nr_written(page, wbc, nr_written + 1);
2243
2244	end = page_end;
2245	if (last_byte <= start) {
2246		if (tree->ops && tree->ops->writepage_end_io_hook)
2247			tree->ops->writepage_end_io_hook(page, start,
2248							 page_end, NULL, 1);
2249		goto done;
2250	}
2251
2252	blocksize = inode->i_sb->s_blocksize;
2253
2254	while (cur <= end) {
2255		if (cur >= last_byte) {
2256			if (tree->ops && tree->ops->writepage_end_io_hook)
2257				tree->ops->writepage_end_io_hook(page, cur,
2258							 page_end, NULL, 1);
2259			break;
2260		}
2261		em = epd->get_extent(inode, page, pg_offset, cur,
2262				     end - cur + 1, 1);
2263		if (IS_ERR_OR_NULL(em)) {
2264			SetPageError(page);
2265			break;
2266		}
2267
2268		extent_offset = cur - em->start;
2269		BUG_ON(extent_map_end(em) <= cur);
2270		BUG_ON(end < cur);
2271		iosize = min(extent_map_end(em) - cur, end - cur + 1);
2272		iosize = (iosize + blocksize - 1) & ~((u64)blocksize - 1);
2273		sector = (em->block_start + extent_offset) >> 9;
2274		bdev = em->bdev;
2275		block_start = em->block_start;
2276		compressed = test_bit(EXTENT_FLAG_COMPRESSED, &em->flags);
2277		free_extent_map(em);
2278		em = NULL;
2279
2280		/*
2281		 * compressed and inline extents are written through other
2282		 * paths in the FS
2283		 */
2284		if (compressed || block_start == EXTENT_MAP_HOLE ||
2285		    block_start == EXTENT_MAP_INLINE) {
2286			/*
2287			 * end_io notification does not happen here for
2288			 * compressed extents
2289			 */
2290			if (!compressed && tree->ops &&
2291			    tree->ops->writepage_end_io_hook)
2292				tree->ops->writepage_end_io_hook(page, cur,
2293							 cur + iosize - 1,
2294							 NULL, 1);
2295			else if (compressed) {
2296				/* we don't want to end_page_writeback on
2297				 * a compressed extent.  this happens
2298				 * elsewhere
2299				 */
2300				nr++;
2301			}
2302
2303			cur += iosize;
2304			pg_offset += iosize;
2305			continue;
2306		}
2307		/* leave this out until we have a page_mkwrite call */
2308		if (0 && !test_range_bit(tree, cur, cur + iosize - 1,
2309				   EXTENT_DIRTY, 0, NULL)) {
2310			cur = cur + iosize;
2311			pg_offset += iosize;
2312			continue;
2313		}
2314
2315		if (tree->ops && tree->ops->writepage_io_hook) {
2316			ret = tree->ops->writepage_io_hook(page, cur,
2317						cur + iosize - 1);
2318		} else {
2319			ret = 0;
2320		}
2321		if (ret) {
2322			SetPageError(page);
2323		} else {
2324			unsigned long max_nr = end_index + 1;
2325
2326			set_range_writeback(tree, cur, cur + iosize - 1);
2327			if (!PageWriteback(page)) {
2328				printk(KERN_ERR "btrfs warning page %lu not "
2329				       "writeback, cur %llu end %llu\n",
2330				       page->index, (unsigned long long)cur,
2331				       (unsigned long long)end);
2332			}
2333
2334			ret = submit_extent_page(write_flags, tree, page,
2335						 sector, iosize, pg_offset,
2336						 bdev, &epd->bio, max_nr,
2337						 end_bio_extent_writepage,
2338						 0, 0, 0);
2339			if (ret)
2340				SetPageError(page);
2341		}
2342		cur = cur + iosize;
2343		pg_offset += iosize;
2344		nr++;
2345	}
2346done:
2347	if (nr == 0) {
2348		/* make sure the mapping tag for page dirty gets cleared */
2349		set_page_writeback(page);
2350		end_page_writeback(page);
2351	}
2352	unlock_page(page);
2353
2354done_unlocked:
2355
2356	/* drop our reference on any cached states */
2357	free_extent_state(cached_state);
2358	return 0;
2359}
2360
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2361/**
2362 * write_cache_pages - walk the list of dirty pages of the given address space and write all of them.
2363 * @mapping: address space structure to write
2364 * @wbc: subtract the number of written pages from *@wbc->nr_to_write
2365 * @writepage: function called for each page
2366 * @data: data passed to writepage function
2367 *
2368 * If a page is already under I/O, write_cache_pages() skips it, even
2369 * if it's dirty.  This is desirable behaviour for memory-cleaning writeback,
2370 * but it is INCORRECT for data-integrity system calls such as fsync().  fsync()
2371 * and msync() need to guarantee that all the data which was dirty at the time
2372 * the call was made get new I/O started against them.  If wbc->sync_mode is
2373 * WB_SYNC_ALL then we were called for data integrity and we must wait for
2374 * existing IO to complete.
2375 */
2376static int extent_write_cache_pages(struct extent_io_tree *tree,
2377			     struct address_space *mapping,
2378			     struct writeback_control *wbc,
2379			     writepage_t writepage, void *data,
2380			     void (*flush_fn)(void *))
2381{
 
2382	int ret = 0;
2383	int done = 0;
2384	int nr_to_write_done = 0;
2385	struct pagevec pvec;
2386	int nr_pages;
2387	pgoff_t index;
2388	pgoff_t end;		/* Inclusive */
2389	int scanned = 0;
2390	int tag;
2391
 
 
 
 
 
 
 
 
 
 
 
 
2392	pagevec_init(&pvec, 0);
2393	if (wbc->range_cyclic) {
2394		index = mapping->writeback_index; /* Start from prev offset */
2395		end = -1;
2396	} else {
2397		index = wbc->range_start >> PAGE_CACHE_SHIFT;
2398		end = wbc->range_end >> PAGE_CACHE_SHIFT;
2399		scanned = 1;
2400	}
2401	if (wbc->sync_mode == WB_SYNC_ALL)
2402		tag = PAGECACHE_TAG_TOWRITE;
2403	else
2404		tag = PAGECACHE_TAG_DIRTY;
2405retry:
2406	if (wbc->sync_mode == WB_SYNC_ALL)
2407		tag_pages_for_writeback(mapping, index, end);
2408	while (!done && !nr_to_write_done && (index <= end) &&
2409	       (nr_pages = pagevec_lookup_tag(&pvec, mapping, &index, tag,
2410			min(end - index, (pgoff_t)PAGEVEC_SIZE-1) + 1))) {
2411		unsigned i;
2412
2413		scanned = 1;
2414		for (i = 0; i < nr_pages; i++) {
2415			struct page *page = pvec.pages[i];
2416
2417			/*
2418			 * At this point we hold neither mapping->tree_lock nor
2419			 * lock on the page itself: the page may be truncated or
2420			 * invalidated (changing page->mapping to NULL), or even
2421			 * swizzled back from swapper_space to tmpfs file
2422			 * mapping
2423			 */
2424			if (tree->ops && tree->ops->write_cache_pages_lock_hook)
2425				tree->ops->write_cache_pages_lock_hook(page);
2426			else
2427				lock_page(page);
 
2428
2429			if (unlikely(page->mapping != mapping)) {
2430				unlock_page(page);
2431				continue;
2432			}
2433
2434			if (!wbc->range_cyclic && page->index > end) {
2435				done = 1;
2436				unlock_page(page);
2437				continue;
2438			}
2439
2440			if (wbc->sync_mode != WB_SYNC_NONE) {
2441				if (PageWriteback(page))
2442					flush_fn(data);
2443				wait_on_page_writeback(page);
2444			}
2445
2446			if (PageWriteback(page) ||
2447			    !clear_page_dirty_for_io(page)) {
2448				unlock_page(page);
2449				continue;
2450			}
2451
2452			ret = (*writepage)(page, wbc, data);
2453
2454			if (unlikely(ret == AOP_WRITEPAGE_ACTIVATE)) {
2455				unlock_page(page);
2456				ret = 0;
2457			}
2458			if (ret)
2459				done = 1;
2460
2461			/*
2462			 * the filesystem may choose to bump up nr_to_write.
2463			 * We have to make sure to honor the new nr_to_write
2464			 * at any time
2465			 */
2466			nr_to_write_done = wbc->nr_to_write <= 0;
2467		}
2468		pagevec_release(&pvec);
2469		cond_resched();
2470	}
2471	if (!scanned && !done) {
2472		/*
2473		 * We hit the last page and there is more work to be done: wrap
2474		 * back to the start of the file
2475		 */
2476		scanned = 1;
2477		index = 0;
2478		goto retry;
2479	}
 
2480	return ret;
2481}
2482
2483static void flush_epd_write_bio(struct extent_page_data *epd)
2484{
2485	if (epd->bio) {
 
 
 
2486		if (epd->sync_io)
2487			submit_one_bio(WRITE_SYNC, epd->bio, 0, 0);
2488		else
2489			submit_one_bio(WRITE, epd->bio, 0, 0);
 
2490		epd->bio = NULL;
2491	}
2492}
2493
2494static noinline void flush_write_bio(void *data)
2495{
2496	struct extent_page_data *epd = data;
2497	flush_epd_write_bio(epd);
2498}
2499
2500int extent_write_full_page(struct extent_io_tree *tree, struct page *page,
2501			  get_extent_t *get_extent,
2502			  struct writeback_control *wbc)
2503{
2504	int ret;
2505	struct extent_page_data epd = {
2506		.bio = NULL,
2507		.tree = tree,
2508		.get_extent = get_extent,
2509		.extent_locked = 0,
2510		.sync_io = wbc->sync_mode == WB_SYNC_ALL,
 
2511	};
2512
2513	ret = __extent_writepage(page, wbc, &epd);
2514
2515	flush_epd_write_bio(&epd);
2516	return ret;
2517}
2518
2519int extent_write_locked_range(struct extent_io_tree *tree, struct inode *inode,
2520			      u64 start, u64 end, get_extent_t *get_extent,
2521			      int mode)
2522{
2523	int ret = 0;
2524	struct address_space *mapping = inode->i_mapping;
2525	struct page *page;
2526	unsigned long nr_pages = (end - start + PAGE_CACHE_SIZE) >>
2527		PAGE_CACHE_SHIFT;
2528
2529	struct extent_page_data epd = {
2530		.bio = NULL,
2531		.tree = tree,
2532		.get_extent = get_extent,
2533		.extent_locked = 1,
2534		.sync_io = mode == WB_SYNC_ALL,
 
2535	};
2536	struct writeback_control wbc_writepages = {
2537		.sync_mode	= mode,
2538		.nr_to_write	= nr_pages * 2,
2539		.range_start	= start,
2540		.range_end	= end + 1,
2541	};
2542
2543	while (start <= end) {
2544		page = find_get_page(mapping, start >> PAGE_CACHE_SHIFT);
2545		if (clear_page_dirty_for_io(page))
2546			ret = __extent_writepage(page, &wbc_writepages, &epd);
2547		else {
2548			if (tree->ops && tree->ops->writepage_end_io_hook)
2549				tree->ops->writepage_end_io_hook(page, start,
2550						 start + PAGE_CACHE_SIZE - 1,
2551						 NULL, 1);
2552			unlock_page(page);
2553		}
2554		page_cache_release(page);
2555		start += PAGE_CACHE_SIZE;
2556	}
2557
2558	flush_epd_write_bio(&epd);
2559	return ret;
2560}
2561
2562int extent_writepages(struct extent_io_tree *tree,
2563		      struct address_space *mapping,
2564		      get_extent_t *get_extent,
2565		      struct writeback_control *wbc)
2566{
2567	int ret = 0;
2568	struct extent_page_data epd = {
2569		.bio = NULL,
2570		.tree = tree,
2571		.get_extent = get_extent,
2572		.extent_locked = 0,
2573		.sync_io = wbc->sync_mode == WB_SYNC_ALL,
 
2574	};
2575
2576	ret = extent_write_cache_pages(tree, mapping, wbc,
2577				       __extent_writepage, &epd,
2578				       flush_write_bio);
2579	flush_epd_write_bio(&epd);
2580	return ret;
2581}
2582
2583int extent_readpages(struct extent_io_tree *tree,
2584		     struct address_space *mapping,
2585		     struct list_head *pages, unsigned nr_pages,
2586		     get_extent_t get_extent)
2587{
2588	struct bio *bio = NULL;
2589	unsigned page_idx;
2590	unsigned long bio_flags = 0;
 
 
 
 
2591
2592	for (page_idx = 0; page_idx < nr_pages; page_idx++) {
2593		struct page *page = list_entry(pages->prev, struct page, lru);
2594
2595		prefetchw(&page->flags);
2596		list_del(&page->lru);
2597		if (!add_to_page_cache_lru(page, mapping,
2598					page->index, GFP_NOFS)) {
2599			__extent_read_full_page(tree, page, get_extent,
2600						&bio, 0, &bio_flags);
2601		}
2602		page_cache_release(page);
2603	}
 
 
 
 
 
 
 
 
 
 
 
 
 
2604	BUG_ON(!list_empty(pages));
2605	if (bio)
2606		submit_one_bio(READ, bio, 0, bio_flags);
2607	return 0;
2608}
2609
2610/*
2611 * basic invalidatepage code, this waits on any locked or writeback
2612 * ranges corresponding to the page, and then deletes any extent state
2613 * records from the tree
2614 */
2615int extent_invalidatepage(struct extent_io_tree *tree,
2616			  struct page *page, unsigned long offset)
2617{
2618	struct extent_state *cached_state = NULL;
2619	u64 start = ((u64)page->index << PAGE_CACHE_SHIFT);
2620	u64 end = start + PAGE_CACHE_SIZE - 1;
2621	size_t blocksize = page->mapping->host->i_sb->s_blocksize;
2622
2623	start += (offset + blocksize - 1) & ~(blocksize - 1);
2624	if (start > end)
2625		return 0;
2626
2627	lock_extent_bits(tree, start, end, 0, &cached_state, GFP_NOFS);
2628	wait_on_page_writeback(page);
2629	clear_extent_bit(tree, start, end,
2630			 EXTENT_LOCKED | EXTENT_DIRTY | EXTENT_DELALLOC |
2631			 EXTENT_DO_ACCOUNTING,
2632			 1, 1, &cached_state, GFP_NOFS);
2633	return 0;
2634}
2635
2636/*
2637 * a helper for releasepage, this tests for areas of the page that
2638 * are locked or under IO and drops the related state bits if it is safe
2639 * to drop the page.
2640 */
2641int try_release_extent_state(struct extent_map_tree *map,
2642			     struct extent_io_tree *tree, struct page *page,
2643			     gfp_t mask)
2644{
2645	u64 start = (u64)page->index << PAGE_CACHE_SHIFT;
2646	u64 end = start + PAGE_CACHE_SIZE - 1;
2647	int ret = 1;
2648
2649	if (test_range_bit(tree, start, end,
2650			   EXTENT_IOBITS, 0, NULL))
2651		ret = 0;
2652	else {
2653		if ((mask & GFP_NOFS) == GFP_NOFS)
2654			mask = GFP_NOFS;
2655		/*
2656		 * at this point we can safely clear everything except the
2657		 * locked bit and the nodatasum bit
2658		 */
2659		ret = clear_extent_bit(tree, start, end,
2660				 ~(EXTENT_LOCKED | EXTENT_NODATASUM),
2661				 0, 0, NULL, mask);
2662
2663		/* if clear_extent_bit failed for enomem reasons,
2664		 * we can't allow the release to continue.
2665		 */
2666		if (ret < 0)
2667			ret = 0;
2668		else
2669			ret = 1;
2670	}
2671	return ret;
2672}
2673
2674/*
2675 * a helper for releasepage.  As long as there are no locked extents
2676 * in the range corresponding to the page, both state records and extent
2677 * map records are removed
2678 */
2679int try_release_extent_mapping(struct extent_map_tree *map,
2680			       struct extent_io_tree *tree, struct page *page,
2681			       gfp_t mask)
2682{
2683	struct extent_map *em;
2684	u64 start = (u64)page->index << PAGE_CACHE_SHIFT;
2685	u64 end = start + PAGE_CACHE_SIZE - 1;
2686
2687	if ((mask & __GFP_WAIT) &&
2688	    page->mapping->host->i_size > 16 * 1024 * 1024) {
2689		u64 len;
2690		while (start <= end) {
2691			len = end - start + 1;
2692			write_lock(&map->lock);
2693			em = lookup_extent_mapping(map, start, len);
2694			if (IS_ERR_OR_NULL(em)) {
2695				write_unlock(&map->lock);
2696				break;
2697			}
2698			if (test_bit(EXTENT_FLAG_PINNED, &em->flags) ||
2699			    em->start != start) {
2700				write_unlock(&map->lock);
2701				free_extent_map(em);
2702				break;
2703			}
2704			if (!test_range_bit(tree, em->start,
2705					    extent_map_end(em) - 1,
2706					    EXTENT_LOCKED | EXTENT_WRITEBACK,
2707					    0, NULL)) {
2708				remove_extent_mapping(map, em);
2709				/* once for the rb tree */
2710				free_extent_map(em);
2711			}
2712			start = extent_map_end(em);
2713			write_unlock(&map->lock);
2714
2715			/* once for us */
2716			free_extent_map(em);
2717		}
2718	}
2719	return try_release_extent_state(map, tree, page, mask);
2720}
2721
2722/*
2723 * helper function for fiemap, which doesn't want to see any holes.
2724 * This maps until we find something past 'last'
2725 */
2726static struct extent_map *get_extent_skip_holes(struct inode *inode,
2727						u64 offset,
2728						u64 last,
2729						get_extent_t *get_extent)
2730{
2731	u64 sectorsize = BTRFS_I(inode)->root->sectorsize;
2732	struct extent_map *em;
2733	u64 len;
2734
2735	if (offset >= last)
2736		return NULL;
2737
2738	while(1) {
2739		len = last - offset;
2740		if (len == 0)
2741			break;
2742		len = (len + sectorsize - 1) & ~(sectorsize - 1);
2743		em = get_extent(inode, NULL, 0, offset, len, 0);
2744		if (IS_ERR_OR_NULL(em))
2745			return em;
2746
2747		/* if this isn't a hole return it */
2748		if (!test_bit(EXTENT_FLAG_VACANCY, &em->flags) &&
2749		    em->block_start != EXTENT_MAP_HOLE) {
2750			return em;
2751		}
2752
2753		/* this is a hole, advance to the next extent */
2754		offset = extent_map_end(em);
2755		free_extent_map(em);
2756		if (offset >= last)
2757			break;
2758	}
2759	return NULL;
2760}
2761
 
 
 
 
 
 
 
 
 
 
 
 
 
2762int extent_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
2763		__u64 start, __u64 len, get_extent_t *get_extent)
2764{
2765	int ret = 0;
2766	u64 off = start;
2767	u64 max = start + len;
2768	u32 flags = 0;
2769	u32 found_type;
2770	u64 last;
2771	u64 last_for_get_extent = 0;
2772	u64 disko = 0;
2773	u64 isize = i_size_read(inode);
2774	struct btrfs_key found_key;
2775	struct extent_map *em = NULL;
2776	struct extent_state *cached_state = NULL;
2777	struct btrfs_path *path;
2778	struct btrfs_file_extent_item *item;
2779	int end = 0;
2780	u64 em_start = 0;
2781	u64 em_len = 0;
2782	u64 em_end = 0;
2783	unsigned long emflags;
2784
2785	if (len == 0)
2786		return -EINVAL;
2787
2788	path = btrfs_alloc_path();
2789	if (!path)
2790		return -ENOMEM;
2791	path->leave_spinning = 1;
2792
 
 
 
2793	/*
2794	 * lookup the last file extent.  We're not using i_size here
2795	 * because there might be preallocation past i_size
2796	 */
2797	ret = btrfs_lookup_file_extent(NULL, BTRFS_I(inode)->root,
2798				       path, btrfs_ino(inode), -1, 0);
2799	if (ret < 0) {
2800		btrfs_free_path(path);
2801		return ret;
2802	}
2803	WARN_ON(!ret);
2804	path->slots[0]--;
2805	item = btrfs_item_ptr(path->nodes[0], path->slots[0],
2806			      struct btrfs_file_extent_item);
2807	btrfs_item_key_to_cpu(path->nodes[0], &found_key, path->slots[0]);
2808	found_type = btrfs_key_type(&found_key);
2809
2810	/* No extents, but there might be delalloc bits */
2811	if (found_key.objectid != btrfs_ino(inode) ||
2812	    found_type != BTRFS_EXTENT_DATA_KEY) {
2813		/* have to trust i_size as the end */
2814		last = (u64)-1;
2815		last_for_get_extent = isize;
2816	} else {
2817		/*
2818		 * remember the start of the last extent.  There are a
2819		 * bunch of different factors that go into the length of the
2820		 * extent, so its much less complex to remember where it started
2821		 */
2822		last = found_key.offset;
2823		last_for_get_extent = last + 1;
2824	}
2825	btrfs_free_path(path);
2826
2827	/*
2828	 * we might have some extents allocated but more delalloc past those
2829	 * extents.  so, we trust isize unless the start of the last extent is
2830	 * beyond isize
2831	 */
2832	if (last < isize) {
2833		last = (u64)-1;
2834		last_for_get_extent = isize;
2835	}
2836
2837	lock_extent_bits(&BTRFS_I(inode)->io_tree, start, start + len, 0,
2838			 &cached_state, GFP_NOFS);
2839
2840	em = get_extent_skip_holes(inode, off, last_for_get_extent,
2841				   get_extent);
2842	if (!em)
2843		goto out;
2844	if (IS_ERR(em)) {
2845		ret = PTR_ERR(em);
2846		goto out;
2847	}
2848
2849	while (!end) {
2850		u64 offset_in_extent;
2851
2852		/* break if the extent we found is outside the range */
2853		if (em->start >= max || extent_map_end(em) < off)
2854			break;
2855
2856		/*
2857		 * get_extent may return an extent that starts before our
2858		 * requested range.  We have to make sure the ranges
2859		 * we return to fiemap always move forward and don't
2860		 * overlap, so adjust the offsets here
2861		 */
2862		em_start = max(em->start, off);
2863
2864		/*
2865		 * record the offset from the start of the extent
2866		 * for adjusting the disk offset below
 
 
2867		 */
2868		offset_in_extent = em_start - em->start;
 
2869		em_end = extent_map_end(em);
2870		em_len = em_end - em_start;
2871		emflags = em->flags;
2872		disko = 0;
2873		flags = 0;
2874
2875		/*
2876		 * bump off for our next call to get_extent
2877		 */
2878		off = extent_map_end(em);
2879		if (off >= max)
2880			end = 1;
2881
2882		if (em->block_start == EXTENT_MAP_LAST_BYTE) {
2883			end = 1;
2884			flags |= FIEMAP_EXTENT_LAST;
2885		} else if (em->block_start == EXTENT_MAP_INLINE) {
2886			flags |= (FIEMAP_EXTENT_DATA_INLINE |
2887				  FIEMAP_EXTENT_NOT_ALIGNED);
2888		} else if (em->block_start == EXTENT_MAP_DELALLOC) {
2889			flags |= (FIEMAP_EXTENT_DELALLOC |
2890				  FIEMAP_EXTENT_UNKNOWN);
2891		} else {
 
 
2892			disko = em->block_start + offset_in_extent;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2893		}
2894		if (test_bit(EXTENT_FLAG_COMPRESSED, &em->flags))
2895			flags |= FIEMAP_EXTENT_ENCODED;
2896
2897		free_extent_map(em);
2898		em = NULL;
2899		if ((em_start >= last) || em_len == (u64)-1 ||
2900		   (last == (u64)-1 && isize <= em_end)) {
2901			flags |= FIEMAP_EXTENT_LAST;
2902			end = 1;
2903		}
2904
2905		/* now scan forward to see if this is really the last extent. */
2906		em = get_extent_skip_holes(inode, off, last_for_get_extent,
2907					   get_extent);
2908		if (IS_ERR(em)) {
2909			ret = PTR_ERR(em);
2910			goto out;
2911		}
2912		if (!em) {
2913			flags |= FIEMAP_EXTENT_LAST;
2914			end = 1;
2915		}
2916		ret = fiemap_fill_next_extent(fieinfo, em_start, disko,
2917					      em_len, flags);
2918		if (ret)
2919			goto out_free;
2920	}
2921out_free:
2922	free_extent_map(em);
2923out:
2924	unlock_extent_cached(&BTRFS_I(inode)->io_tree, start, start + len,
 
2925			     &cached_state, GFP_NOFS);
2926	return ret;
2927}
2928
2929static inline struct page *extent_buffer_page(struct extent_buffer *eb,
2930					      unsigned long i)
2931{
2932	struct page *p;
2933	struct address_space *mapping;
 
2934
2935	if (i == 0)
2936		return eb->first_page;
2937	i += eb->start >> PAGE_CACHE_SHIFT;
2938	mapping = eb->first_page->mapping;
2939	if (!mapping)
2940		return NULL;
2941
2942	/*
2943	 * extent_buffer_page is only called after pinning the page
2944	 * by increasing the reference count.  So we know the page must
2945	 * be in the radix tree.
2946	 */
2947	rcu_read_lock();
2948	p = radix_tree_lookup(&mapping->page_tree, i);
2949	rcu_read_unlock();
 
 
 
 
2950
2951	return p;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2952}
2953
2954static inline unsigned long num_extent_pages(u64 start, u64 len)
 
 
 
2955{
2956	return ((start + len + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT) -
2957		(start >> PAGE_CACHE_SHIFT);
2958}
2959
2960static struct extent_buffer *__alloc_extent_buffer(struct extent_io_tree *tree,
2961						   u64 start,
2962						   unsigned long len,
2963						   gfp_t mask)
2964{
2965	struct extent_buffer *eb = NULL;
2966#if LEAK_DEBUG
2967	unsigned long flags;
2968#endif
2969
2970	eb = kmem_cache_zalloc(extent_buffer_cache, mask);
2971	if (eb == NULL)
2972		return NULL;
2973	eb->start = start;
2974	eb->len = len;
 
 
2975	rwlock_init(&eb->lock);
2976	atomic_set(&eb->write_locks, 0);
2977	atomic_set(&eb->read_locks, 0);
2978	atomic_set(&eb->blocking_readers, 0);
2979	atomic_set(&eb->blocking_writers, 0);
2980	atomic_set(&eb->spinning_readers, 0);
2981	atomic_set(&eb->spinning_writers, 0);
 
2982	init_waitqueue_head(&eb->write_lock_wq);
2983	init_waitqueue_head(&eb->read_lock_wq);
2984
2985#if LEAK_DEBUG
2986	spin_lock_irqsave(&leak_lock, flags);
2987	list_add(&eb->leak_list, &buffers);
2988	spin_unlock_irqrestore(&leak_lock, flags);
2989#endif
2990	atomic_set(&eb->refs, 1);
 
 
 
 
 
 
 
 
2991
2992	return eb;
2993}
2994
2995static void __free_extent_buffer(struct extent_buffer *eb)
2996{
2997#if LEAK_DEBUG
2998	unsigned long flags;
2999	spin_lock_irqsave(&leak_lock, flags);
3000	list_del(&eb->leak_list);
3001	spin_unlock_irqrestore(&leak_lock, flags);
3002#endif
3003	kmem_cache_free(extent_buffer_cache, eb);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3004}
3005
3006/*
3007 * Helper for releasing extent buffer page.
3008 */
3009static void btrfs_release_extent_buffer_page(struct extent_buffer *eb,
3010						unsigned long start_idx)
3011{
3012	unsigned long index;
3013	struct page *page;
 
3014
3015	if (!eb->first_page)
3016		return;
 
3017
3018	index = num_extent_pages(eb->start, eb->len);
3019	if (start_idx >= index)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3020		return;
3021
3022	do {
3023		index--;
3024		page = extent_buffer_page(eb, index);
3025		if (page)
3026			page_cache_release(page);
3027	} while (index != start_idx);
3028}
3029
3030/*
3031 * Helper for releasing the extent buffer.
3032 */
3033static inline void btrfs_release_extent_buffer(struct extent_buffer *eb)
3034{
3035	btrfs_release_extent_buffer_page(eb, 0);
3036	__free_extent_buffer(eb);
 
 
 
 
 
 
 
3037}
3038
3039struct extent_buffer *alloc_extent_buffer(struct extent_io_tree *tree,
3040					  u64 start, unsigned long len,
3041					  struct page *page0)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3042{
3043	unsigned long num_pages = num_extent_pages(start, len);
3044	unsigned long i;
3045	unsigned long index = start >> PAGE_CACHE_SHIFT;
3046	struct extent_buffer *eb;
3047	struct extent_buffer *exists = NULL;
3048	struct page *p;
3049	struct address_space *mapping = tree->mapping;
3050	int uptodate = 1;
3051	int ret;
3052
3053	rcu_read_lock();
3054	eb = radix_tree_lookup(&tree->buffer, start >> PAGE_CACHE_SHIFT);
3055	if (eb && atomic_inc_not_zero(&eb->refs)) {
3056		rcu_read_unlock();
3057		mark_page_accessed(eb->first_page);
3058		return eb;
3059	}
3060	rcu_read_unlock();
3061
3062	eb = __alloc_extent_buffer(tree, start, len, GFP_NOFS);
3063	if (!eb)
3064		return NULL;
3065
3066	if (page0) {
3067		eb->first_page = page0;
3068		i = 1;
3069		index++;
3070		page_cache_get(page0);
3071		mark_page_accessed(page0);
3072		set_page_extent_mapped(page0);
3073		set_page_extent_head(page0, len);
3074		uptodate = PageUptodate(page0);
3075	} else {
3076		i = 0;
3077	}
3078	for (; i < num_pages; i++, index++) {
3079		p = find_or_create_page(mapping, index, GFP_NOFS);
3080		if (!p) {
3081			WARN_ON(1);
3082			goto free_eb;
3083		}
3084		set_page_extent_mapped(p);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3085		mark_page_accessed(p);
3086		if (i == 0) {
3087			eb->first_page = p;
3088			set_page_extent_head(p, len);
3089		} else {
3090			set_page_private(p, EXTENT_PAGE_PRIVATE);
3091		}
3092		if (!PageUptodate(p))
3093			uptodate = 0;
3094
3095		/*
3096		 * see below about how we avoid a nasty race with release page
3097		 * and why we unlock later
3098		 */
3099		if (i != 0)
3100			unlock_page(p);
3101	}
3102	if (uptodate)
3103		set_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags);
3104
3105	ret = radix_tree_preload(GFP_NOFS & ~__GFP_HIGHMEM);
3106	if (ret)
3107		goto free_eb;
3108
3109	spin_lock(&tree->buffer_lock);
3110	ret = radix_tree_insert(&tree->buffer, start >> PAGE_CACHE_SHIFT, eb);
 
 
 
3111	if (ret == -EEXIST) {
3112		exists = radix_tree_lookup(&tree->buffer,
3113						start >> PAGE_CACHE_SHIFT);
3114		/* add one reference for the caller */
3115		atomic_inc(&exists->refs);
3116		spin_unlock(&tree->buffer_lock);
3117		radix_tree_preload_end();
3118		goto free_eb;
3119	}
3120	/* add one reference for the tree */
3121	atomic_inc(&eb->refs);
3122	spin_unlock(&tree->buffer_lock);
3123	radix_tree_preload_end();
3124
3125	/*
3126	 * there is a race where release page may have
3127	 * tried to find this extent buffer in the radix
3128	 * but failed.  It will tell the VM it is safe to
3129	 * reclaim the, and it will clear the page private bit.
3130	 * We must make sure to set the page private bit properly
3131	 * after the extent buffer is in the radix tree so
3132	 * it doesn't get lost
3133	 */
3134	set_page_extent_mapped(eb->first_page);
3135	set_page_extent_head(eb->first_page, eb->len);
3136	if (!page0)
3137		unlock_page(eb->first_page);
 
 
 
3138	return eb;
3139
3140free_eb:
3141	if (eb->first_page && !page0)
3142		unlock_page(eb->first_page);
 
 
3143
3144	if (!atomic_dec_and_test(&eb->refs))
3145		return exists;
3146	btrfs_release_extent_buffer(eb);
3147	return exists;
3148}
3149
3150struct extent_buffer *find_extent_buffer(struct extent_io_tree *tree,
3151					 u64 start, unsigned long len)
 
 
 
 
 
 
 
 
3152{
3153	struct extent_buffer *eb;
 
 
 
 
 
 
 
 
 
 
 
 
 
3154
3155	rcu_read_lock();
3156	eb = radix_tree_lookup(&tree->buffer, start >> PAGE_CACHE_SHIFT);
3157	if (eb && atomic_inc_not_zero(&eb->refs)) {
3158		rcu_read_unlock();
3159		mark_page_accessed(eb->first_page);
3160		return eb;
3161	}
3162	rcu_read_unlock();
3163
3164	return NULL;
3165}
3166
3167void free_extent_buffer(struct extent_buffer *eb)
3168{
 
 
3169	if (!eb)
3170		return;
3171
3172	if (!atomic_dec_and_test(&eb->refs))
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3173		return;
3174
3175	WARN_ON(1);
 
 
 
 
 
 
3176}
3177
3178int clear_extent_buffer_dirty(struct extent_io_tree *tree,
3179			      struct extent_buffer *eb)
3180{
3181	unsigned long i;
3182	unsigned long num_pages;
3183	struct page *page;
3184
3185	num_pages = num_extent_pages(eb->start, eb->len);
3186
3187	for (i = 0; i < num_pages; i++) {
3188		page = extent_buffer_page(eb, i);
3189		if (!PageDirty(page))
3190			continue;
3191
3192		lock_page(page);
3193		WARN_ON(!PagePrivate(page));
3194
3195		set_page_extent_mapped(page);
3196		if (i == 0)
3197			set_page_extent_head(page, eb->len);
3198
3199		clear_page_dirty_for_io(page);
3200		spin_lock_irq(&page->mapping->tree_lock);
3201		if (!PageDirty(page)) {
3202			radix_tree_tag_clear(&page->mapping->page_tree,
3203						page_index(page),
3204						PAGECACHE_TAG_DIRTY);
3205		}
3206		spin_unlock_irq(&page->mapping->tree_lock);
 
3207		unlock_page(page);
3208	}
3209	return 0;
3210}
3211
3212int set_extent_buffer_dirty(struct extent_io_tree *tree,
3213			     struct extent_buffer *eb)
3214{
3215	unsigned long i;
3216	unsigned long num_pages;
3217	int was_dirty = 0;
3218
 
 
3219	was_dirty = test_and_set_bit(EXTENT_BUFFER_DIRTY, &eb->bflags);
 
3220	num_pages = num_extent_pages(eb->start, eb->len);
 
 
 
3221	for (i = 0; i < num_pages; i++)
3222		__set_page_dirty_nobuffers(extent_buffer_page(eb, i));
3223	return was_dirty;
3224}
3225
3226static int __eb_straddles_pages(u64 start, u64 len)
3227{
3228	if (len < PAGE_CACHE_SIZE)
3229		return 1;
3230	if (start & (PAGE_CACHE_SIZE - 1))
3231		return 1;
3232	if ((start + len) & (PAGE_CACHE_SIZE - 1))
3233		return 1;
3234	return 0;
3235}
3236
3237static int eb_straddles_pages(struct extent_buffer *eb)
3238{
3239	return __eb_straddles_pages(eb->start, eb->len);
3240}
3241
3242int clear_extent_buffer_uptodate(struct extent_io_tree *tree,
3243				struct extent_buffer *eb,
3244				struct extent_state **cached_state)
3245{
3246	unsigned long i;
3247	struct page *page;
3248	unsigned long num_pages;
3249
3250	num_pages = num_extent_pages(eb->start, eb->len);
3251	clear_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags);
3252
3253	if (eb_straddles_pages(eb)) {
3254		clear_extent_uptodate(tree, eb->start, eb->start + eb->len - 1,
3255				      cached_state, GFP_NOFS);
3256	}
3257	for (i = 0; i < num_pages; i++) {
3258		page = extent_buffer_page(eb, i);
3259		if (page)
3260			ClearPageUptodate(page);
3261	}
3262	return 0;
3263}
3264
3265int set_extent_buffer_uptodate(struct extent_io_tree *tree,
3266				struct extent_buffer *eb)
3267{
3268	unsigned long i;
3269	struct page *page;
3270	unsigned long num_pages;
3271
 
3272	num_pages = num_extent_pages(eb->start, eb->len);
3273
3274	if (eb_straddles_pages(eb)) {
3275		set_extent_uptodate(tree, eb->start, eb->start + eb->len - 1,
3276				    NULL, GFP_NOFS);
3277	}
3278	for (i = 0; i < num_pages; i++) {
3279		page = extent_buffer_page(eb, i);
3280		if ((i == 0 && (eb->start & (PAGE_CACHE_SIZE - 1))) ||
3281		    ((i == num_pages - 1) &&
3282		     ((eb->start + eb->len) & (PAGE_CACHE_SIZE - 1)))) {
3283			check_page_uptodate(tree, page);
3284			continue;
3285		}
3286		SetPageUptodate(page);
3287	}
3288	return 0;
3289}
3290
3291int extent_range_uptodate(struct extent_io_tree *tree,
3292			  u64 start, u64 end)
3293{
3294	struct page *page;
3295	int ret;
3296	int pg_uptodate = 1;
3297	int uptodate;
3298	unsigned long index;
3299
3300	if (__eb_straddles_pages(start, end - start + 1)) {
3301		ret = test_range_bit(tree, start, end,
3302				     EXTENT_UPTODATE, 1, NULL);
3303		if (ret)
3304			return 1;
3305	}
3306	while (start <= end) {
3307		index = start >> PAGE_CACHE_SHIFT;
3308		page = find_get_page(tree->mapping, index);
3309		uptodate = PageUptodate(page);
3310		page_cache_release(page);
3311		if (!uptodate) {
3312			pg_uptodate = 0;
3313			break;
3314		}
3315		start += PAGE_CACHE_SIZE;
3316	}
3317	return pg_uptodate;
3318}
3319
3320int extent_buffer_uptodate(struct extent_io_tree *tree,
3321			   struct extent_buffer *eb,
3322			   struct extent_state *cached_state)
3323{
3324	int ret = 0;
3325	unsigned long num_pages;
3326	unsigned long i;
3327	struct page *page;
3328	int pg_uptodate = 1;
3329
3330	if (test_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags))
3331		return 1;
3332
3333	if (eb_straddles_pages(eb)) {
3334		ret = test_range_bit(tree, eb->start, eb->start + eb->len - 1,
3335				   EXTENT_UPTODATE, 1, cached_state);
3336		if (ret)
3337			return ret;
3338	}
3339
3340	num_pages = num_extent_pages(eb->start, eb->len);
3341	for (i = 0; i < num_pages; i++) {
3342		page = extent_buffer_page(eb, i);
3343		if (!PageUptodate(page)) {
3344			pg_uptodate = 0;
3345			break;
3346		}
3347	}
3348	return pg_uptodate;
3349}
3350
3351int read_extent_buffer_pages(struct extent_io_tree *tree,
3352			     struct extent_buffer *eb,
3353			     u64 start, int wait,
3354			     get_extent_t *get_extent, int mirror_num)
3355{
3356	unsigned long i;
3357	unsigned long start_i;
3358	struct page *page;
3359	int err;
3360	int ret = 0;
3361	int locked_pages = 0;
3362	int all_uptodate = 1;
3363	int inc_all_pages = 0;
3364	unsigned long num_pages;
 
3365	struct bio *bio = NULL;
3366	unsigned long bio_flags = 0;
3367
3368	if (test_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags))
3369		return 0;
3370
3371	if (eb_straddles_pages(eb)) {
3372		if (test_range_bit(tree, eb->start, eb->start + eb->len - 1,
3373				   EXTENT_UPTODATE, 1, NULL)) {
3374			return 0;
3375		}
3376	}
3377
3378	if (start) {
3379		WARN_ON(start < eb->start);
3380		start_i = (start >> PAGE_CACHE_SHIFT) -
3381			(eb->start >> PAGE_CACHE_SHIFT);
3382	} else {
3383		start_i = 0;
3384	}
3385
3386	num_pages = num_extent_pages(eb->start, eb->len);
3387	for (i = start_i; i < num_pages; i++) {
3388		page = extent_buffer_page(eb, i);
3389		if (!wait) {
3390			if (!trylock_page(page))
3391				goto unlock_exit;
3392		} else {
3393			lock_page(page);
3394		}
3395		locked_pages++;
3396		if (!PageUptodate(page))
 
3397			all_uptodate = 0;
 
3398	}
3399	if (all_uptodate) {
3400		if (start_i == 0)
3401			set_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags);
3402		goto unlock_exit;
3403	}
3404
 
 
 
3405	for (i = start_i; i < num_pages; i++) {
3406		page = extent_buffer_page(eb, i);
3407
3408		WARN_ON(!PagePrivate(page));
3409
3410		set_page_extent_mapped(page);
3411		if (i == 0)
3412			set_page_extent_head(page, eb->len);
3413
3414		if (inc_all_pages)
3415			page_cache_get(page);
3416		if (!PageUptodate(page)) {
3417			if (start_i == 0)
3418				inc_all_pages = 1;
3419			ClearPageError(page);
3420			err = __extent_read_full_page(tree, page,
3421						      get_extent, &bio,
3422						      mirror_num, &bio_flags);
 
3423			if (err)
3424				ret = err;
3425		} else {
3426			unlock_page(page);
3427		}
3428	}
3429
3430	if (bio)
3431		submit_one_bio(READ, bio, mirror_num, bio_flags);
 
 
 
 
3432
3433	if (ret || !wait)
3434		return ret;
3435
3436	for (i = start_i; i < num_pages; i++) {
3437		page = extent_buffer_page(eb, i);
3438		wait_on_page_locked(page);
3439		if (!PageUptodate(page))
3440			ret = -EIO;
3441	}
3442
3443	if (!ret)
3444		set_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags);
3445	return ret;
3446
3447unlock_exit:
3448	i = start_i;
3449	while (locked_pages > 0) {
3450		page = extent_buffer_page(eb, i);
3451		i++;
3452		unlock_page(page);
3453		locked_pages--;
3454	}
3455	return ret;
3456}
3457
3458void read_extent_buffer(struct extent_buffer *eb, void *dstv,
3459			unsigned long start,
3460			unsigned long len)
3461{
3462	size_t cur;
3463	size_t offset;
3464	struct page *page;
3465	char *kaddr;
3466	char *dst = (char *)dstv;
3467	size_t start_offset = eb->start & ((u64)PAGE_CACHE_SIZE - 1);
3468	unsigned long i = (start_offset + start) >> PAGE_CACHE_SHIFT;
3469
3470	WARN_ON(start > eb->len);
3471	WARN_ON(start + len > eb->start + eb->len);
3472
3473	offset = (start_offset + start) & ((unsigned long)PAGE_CACHE_SIZE - 1);
3474
3475	while (len > 0) {
3476		page = extent_buffer_page(eb, i);
3477
3478		cur = min(len, (PAGE_CACHE_SIZE - offset));
3479		kaddr = page_address(page);
3480		memcpy(dst, kaddr + offset, cur);
3481
3482		dst += cur;
3483		len -= cur;
3484		offset = 0;
3485		i++;
3486	}
3487}
3488
3489int map_private_extent_buffer(struct extent_buffer *eb, unsigned long start,
3490			       unsigned long min_len, char **map,
3491			       unsigned long *map_start,
3492			       unsigned long *map_len)
3493{
3494	size_t offset = start & (PAGE_CACHE_SIZE - 1);
3495	char *kaddr;
3496	struct page *p;
3497	size_t start_offset = eb->start & ((u64)PAGE_CACHE_SIZE - 1);
3498	unsigned long i = (start_offset + start) >> PAGE_CACHE_SHIFT;
3499	unsigned long end_i = (start_offset + start + min_len - 1) >>
3500		PAGE_CACHE_SHIFT;
3501
3502	if (i != end_i)
3503		return -EINVAL;
3504
3505	if (i == 0) {
3506		offset = start_offset;
3507		*map_start = 0;
3508	} else {
3509		offset = 0;
3510		*map_start = ((u64)i << PAGE_CACHE_SHIFT) - start_offset;
3511	}
3512
3513	if (start + min_len > eb->len) {
3514		printk(KERN_ERR "btrfs bad mapping eb start %llu len %lu, "
3515		       "wanted %lu %lu\n", (unsigned long long)eb->start,
3516		       eb->len, start, min_len);
3517		WARN_ON(1);
3518		return -EINVAL;
3519	}
3520
3521	p = extent_buffer_page(eb, i);
3522	kaddr = page_address(p);
3523	*map = kaddr + offset;
3524	*map_len = PAGE_CACHE_SIZE - offset;
3525	return 0;
3526}
3527
3528int memcmp_extent_buffer(struct extent_buffer *eb, const void *ptrv,
3529			  unsigned long start,
3530			  unsigned long len)
3531{
3532	size_t cur;
3533	size_t offset;
3534	struct page *page;
3535	char *kaddr;
3536	char *ptr = (char *)ptrv;
3537	size_t start_offset = eb->start & ((u64)PAGE_CACHE_SIZE - 1);
3538	unsigned long i = (start_offset + start) >> PAGE_CACHE_SHIFT;
3539	int ret = 0;
3540
3541	WARN_ON(start > eb->len);
3542	WARN_ON(start + len > eb->start + eb->len);
3543
3544	offset = (start_offset + start) & ((unsigned long)PAGE_CACHE_SIZE - 1);
3545
3546	while (len > 0) {
3547		page = extent_buffer_page(eb, i);
3548
3549		cur = min(len, (PAGE_CACHE_SIZE - offset));
3550
3551		kaddr = page_address(page);
3552		ret = memcmp(ptr, kaddr + offset, cur);
3553		if (ret)
3554			break;
3555
3556		ptr += cur;
3557		len -= cur;
3558		offset = 0;
3559		i++;
3560	}
3561	return ret;
3562}
3563
3564void write_extent_buffer(struct extent_buffer *eb, const void *srcv,
3565			 unsigned long start, unsigned long len)
3566{
3567	size_t cur;
3568	size_t offset;
3569	struct page *page;
3570	char *kaddr;
3571	char *src = (char *)srcv;
3572	size_t start_offset = eb->start & ((u64)PAGE_CACHE_SIZE - 1);
3573	unsigned long i = (start_offset + start) >> PAGE_CACHE_SHIFT;
3574
3575	WARN_ON(start > eb->len);
3576	WARN_ON(start + len > eb->start + eb->len);
3577
3578	offset = (start_offset + start) & ((unsigned long)PAGE_CACHE_SIZE - 1);
3579
3580	while (len > 0) {
3581		page = extent_buffer_page(eb, i);
3582		WARN_ON(!PageUptodate(page));
3583
3584		cur = min(len, PAGE_CACHE_SIZE - offset);
3585		kaddr = page_address(page);
3586		memcpy(kaddr + offset, src, cur);
3587
3588		src += cur;
3589		len -= cur;
3590		offset = 0;
3591		i++;
3592	}
3593}
3594
3595void memset_extent_buffer(struct extent_buffer *eb, char c,
3596			  unsigned long start, unsigned long len)
3597{
3598	size_t cur;
3599	size_t offset;
3600	struct page *page;
3601	char *kaddr;
3602	size_t start_offset = eb->start & ((u64)PAGE_CACHE_SIZE - 1);
3603	unsigned long i = (start_offset + start) >> PAGE_CACHE_SHIFT;
3604
3605	WARN_ON(start > eb->len);
3606	WARN_ON(start + len > eb->start + eb->len);
3607
3608	offset = (start_offset + start) & ((unsigned long)PAGE_CACHE_SIZE - 1);
3609
3610	while (len > 0) {
3611		page = extent_buffer_page(eb, i);
3612		WARN_ON(!PageUptodate(page));
3613
3614		cur = min(len, PAGE_CACHE_SIZE - offset);
3615		kaddr = page_address(page);
3616		memset(kaddr + offset, c, cur);
3617
3618		len -= cur;
3619		offset = 0;
3620		i++;
3621	}
3622}
3623
3624void copy_extent_buffer(struct extent_buffer *dst, struct extent_buffer *src,
3625			unsigned long dst_offset, unsigned long src_offset,
3626			unsigned long len)
3627{
3628	u64 dst_len = dst->len;
3629	size_t cur;
3630	size_t offset;
3631	struct page *page;
3632	char *kaddr;
3633	size_t start_offset = dst->start & ((u64)PAGE_CACHE_SIZE - 1);
3634	unsigned long i = (start_offset + dst_offset) >> PAGE_CACHE_SHIFT;
3635
3636	WARN_ON(src->len != dst_len);
3637
3638	offset = (start_offset + dst_offset) &
3639		((unsigned long)PAGE_CACHE_SIZE - 1);
3640
3641	while (len > 0) {
3642		page = extent_buffer_page(dst, i);
3643		WARN_ON(!PageUptodate(page));
3644
3645		cur = min(len, (unsigned long)(PAGE_CACHE_SIZE - offset));
3646
3647		kaddr = page_address(page);
3648		read_extent_buffer(src, kaddr + offset, src_offset, cur);
3649
3650		src_offset += cur;
3651		len -= cur;
3652		offset = 0;
3653		i++;
3654	}
3655}
3656
3657static void move_pages(struct page *dst_page, struct page *src_page,
3658		       unsigned long dst_off, unsigned long src_off,
3659		       unsigned long len)
3660{
3661	char *dst_kaddr = page_address(dst_page);
3662	if (dst_page == src_page) {
3663		memmove(dst_kaddr + dst_off, dst_kaddr + src_off, len);
3664	} else {
3665		char *src_kaddr = page_address(src_page);
3666		char *p = dst_kaddr + dst_off + len;
3667		char *s = src_kaddr + src_off + len;
3668
3669		while (len--)
3670			*--p = *--s;
3671	}
3672}
3673
3674static inline bool areas_overlap(unsigned long src, unsigned long dst, unsigned long len)
3675{
3676	unsigned long distance = (src > dst) ? src - dst : dst - src;
3677	return distance < len;
3678}
3679
3680static void copy_pages(struct page *dst_page, struct page *src_page,
3681		       unsigned long dst_off, unsigned long src_off,
3682		       unsigned long len)
3683{
3684	char *dst_kaddr = page_address(dst_page);
3685	char *src_kaddr;
 
3686
3687	if (dst_page != src_page) {
3688		src_kaddr = page_address(src_page);
3689	} else {
3690		src_kaddr = dst_kaddr;
3691		BUG_ON(areas_overlap(src_off, dst_off, len));
 
3692	}
3693
3694	memcpy(dst_kaddr + dst_off, src_kaddr + src_off, len);
 
 
 
3695}
3696
3697void memcpy_extent_buffer(struct extent_buffer *dst, unsigned long dst_offset,
3698			   unsigned long src_offset, unsigned long len)
3699{
3700	size_t cur;
3701	size_t dst_off_in_page;
3702	size_t src_off_in_page;
3703	size_t start_offset = dst->start & ((u64)PAGE_CACHE_SIZE - 1);
3704	unsigned long dst_i;
3705	unsigned long src_i;
3706
3707	if (src_offset + len > dst->len) {
3708		printk(KERN_ERR "btrfs memmove bogus src_offset %lu move "
3709		       "len %lu dst len %lu\n", src_offset, len, dst->len);
3710		BUG_ON(1);
3711	}
3712	if (dst_offset + len > dst->len) {
3713		printk(KERN_ERR "btrfs memmove bogus dst_offset %lu move "
3714		       "len %lu dst len %lu\n", dst_offset, len, dst->len);
3715		BUG_ON(1);
3716	}
3717
3718	while (len > 0) {
3719		dst_off_in_page = (start_offset + dst_offset) &
3720			((unsigned long)PAGE_CACHE_SIZE - 1);
3721		src_off_in_page = (start_offset + src_offset) &
3722			((unsigned long)PAGE_CACHE_SIZE - 1);
3723
3724		dst_i = (start_offset + dst_offset) >> PAGE_CACHE_SHIFT;
3725		src_i = (start_offset + src_offset) >> PAGE_CACHE_SHIFT;
3726
3727		cur = min(len, (unsigned long)(PAGE_CACHE_SIZE -
3728					       src_off_in_page));
3729		cur = min_t(unsigned long, cur,
3730			(unsigned long)(PAGE_CACHE_SIZE - dst_off_in_page));
3731
3732		copy_pages(extent_buffer_page(dst, dst_i),
3733			   extent_buffer_page(dst, src_i),
3734			   dst_off_in_page, src_off_in_page, cur);
3735
3736		src_offset += cur;
3737		dst_offset += cur;
3738		len -= cur;
3739	}
3740}
3741
3742void memmove_extent_buffer(struct extent_buffer *dst, unsigned long dst_offset,
3743			   unsigned long src_offset, unsigned long len)
3744{
3745	size_t cur;
3746	size_t dst_off_in_page;
3747	size_t src_off_in_page;
3748	unsigned long dst_end = dst_offset + len - 1;
3749	unsigned long src_end = src_offset + len - 1;
3750	size_t start_offset = dst->start & ((u64)PAGE_CACHE_SIZE - 1);
3751	unsigned long dst_i;
3752	unsigned long src_i;
3753
3754	if (src_offset + len > dst->len) {
3755		printk(KERN_ERR "btrfs memmove bogus src_offset %lu move "
3756		       "len %lu len %lu\n", src_offset, len, dst->len);
3757		BUG_ON(1);
3758	}
3759	if (dst_offset + len > dst->len) {
3760		printk(KERN_ERR "btrfs memmove bogus dst_offset %lu move "
3761		       "len %lu len %lu\n", dst_offset, len, dst->len);
3762		BUG_ON(1);
3763	}
3764	if (!areas_overlap(src_offset, dst_offset, len)) {
3765		memcpy_extent_buffer(dst, dst_offset, src_offset, len);
3766		return;
3767	}
3768	while (len > 0) {
3769		dst_i = (start_offset + dst_end) >> PAGE_CACHE_SHIFT;
3770		src_i = (start_offset + src_end) >> PAGE_CACHE_SHIFT;
3771
3772		dst_off_in_page = (start_offset + dst_end) &
3773			((unsigned long)PAGE_CACHE_SIZE - 1);
3774		src_off_in_page = (start_offset + src_end) &
3775			((unsigned long)PAGE_CACHE_SIZE - 1);
3776
3777		cur = min_t(unsigned long, len, src_off_in_page + 1);
3778		cur = min(cur, dst_off_in_page + 1);
3779		move_pages(extent_buffer_page(dst, dst_i),
3780			   extent_buffer_page(dst, src_i),
3781			   dst_off_in_page - cur + 1,
3782			   src_off_in_page - cur + 1, cur);
3783
3784		dst_end -= cur;
3785		src_end -= cur;
3786		len -= cur;
3787	}
3788}
3789
3790static inline void btrfs_release_extent_buffer_rcu(struct rcu_head *head)
3791{
3792	struct extent_buffer *eb =
3793			container_of(head, struct extent_buffer, rcu_head);
3794
3795	btrfs_release_extent_buffer(eb);
3796}
3797
3798int try_release_extent_buffer(struct extent_io_tree *tree, struct page *page)
3799{
3800	u64 start = page_offset(page);
3801	struct extent_buffer *eb;
3802	int ret = 1;
3803
3804	spin_lock(&tree->buffer_lock);
3805	eb = radix_tree_lookup(&tree->buffer, start >> PAGE_CACHE_SHIFT);
3806	if (!eb) {
3807		spin_unlock(&tree->buffer_lock);
3808		return ret;
 
 
 
3809	}
3810
3811	if (test_bit(EXTENT_BUFFER_DIRTY, &eb->bflags)) {
3812		ret = 0;
3813		goto out;
3814	}
3815
3816	/*
3817	 * set @eb->refs to 0 if it is already 1, and then release the @eb.
3818	 * Or go back.
 
3819	 */
3820	if (atomic_cmpxchg(&eb->refs, 1, 0) != 1) {
3821		ret = 0;
3822		goto out;
 
 
3823	}
 
3824
3825	radix_tree_delete(&tree->buffer, start >> PAGE_CACHE_SHIFT);
3826out:
3827	spin_unlock(&tree->buffer_lock);
 
 
 
 
 
3828
3829	/* at this point we can safely release the extent buffer */
3830	if (atomic_read(&eb->refs) == 0)
3831		call_rcu(&eb->rcu_head, btrfs_release_extent_buffer_rcu);
3832	return ret;
3833}
v3.15
   1#include <linux/bitops.h>
   2#include <linux/slab.h>
   3#include <linux/bio.h>
   4#include <linux/mm.h>
   5#include <linux/pagemap.h>
   6#include <linux/page-flags.h>
 
   7#include <linux/spinlock.h>
   8#include <linux/blkdev.h>
   9#include <linux/swap.h>
  10#include <linux/writeback.h>
  11#include <linux/pagevec.h>
  12#include <linux/prefetch.h>
  13#include <linux/cleancache.h>
  14#include "extent_io.h"
  15#include "extent_map.h"
 
  16#include "ctree.h"
  17#include "btrfs_inode.h"
  18#include "volumes.h"
  19#include "check-integrity.h"
  20#include "locking.h"
  21#include "rcu-string.h"
  22#include "backref.h"
  23
  24static struct kmem_cache *extent_state_cache;
  25static struct kmem_cache *extent_buffer_cache;
  26static struct bio_set *btrfs_bioset;
  27
  28#ifdef CONFIG_BTRFS_DEBUG
  29static LIST_HEAD(buffers);
  30static LIST_HEAD(states);
  31
 
 
  32static DEFINE_SPINLOCK(leak_lock);
  33
  34static inline
  35void btrfs_leak_debug_add(struct list_head *new, struct list_head *head)
  36{
  37	unsigned long flags;
  38
  39	spin_lock_irqsave(&leak_lock, flags);
  40	list_add(new, head);
  41	spin_unlock_irqrestore(&leak_lock, flags);
  42}
  43
  44static inline
  45void btrfs_leak_debug_del(struct list_head *entry)
  46{
  47	unsigned long flags;
  48
  49	spin_lock_irqsave(&leak_lock, flags);
  50	list_del(entry);
  51	spin_unlock_irqrestore(&leak_lock, flags);
  52}
  53
  54static inline
  55void btrfs_leak_debug_check(void)
  56{
  57	struct extent_state *state;
  58	struct extent_buffer *eb;
  59
  60	while (!list_empty(&states)) {
  61		state = list_entry(states.next, struct extent_state, leak_list);
  62		printk(KERN_ERR "BTRFS: state leak: start %llu end %llu "
  63		       "state %lu in tree %p refs %d\n",
  64		       state->start, state->end, state->state, state->tree,
  65		       atomic_read(&state->refs));
  66		list_del(&state->leak_list);
  67		kmem_cache_free(extent_state_cache, state);
  68	}
  69
  70	while (!list_empty(&buffers)) {
  71		eb = list_entry(buffers.next, struct extent_buffer, leak_list);
  72		printk(KERN_ERR "BTRFS: buffer leak start %llu len %lu "
  73		       "refs %d\n",
  74		       eb->start, eb->len, atomic_read(&eb->refs));
  75		list_del(&eb->leak_list);
  76		kmem_cache_free(extent_buffer_cache, eb);
  77	}
  78}
  79
  80#define btrfs_debug_check_extent_io_range(tree, start, end)		\
  81	__btrfs_debug_check_extent_io_range(__func__, (tree), (start), (end))
  82static inline void __btrfs_debug_check_extent_io_range(const char *caller,
  83		struct extent_io_tree *tree, u64 start, u64 end)
  84{
  85	struct inode *inode;
  86	u64 isize;
  87
  88	if (!tree->mapping)
  89		return;
  90
  91	inode = tree->mapping->host;
  92	isize = i_size_read(inode);
  93	if (end >= PAGE_SIZE && (end % 2) == 0 && end != isize - 1) {
  94		printk_ratelimited(KERN_DEBUG
  95		    "BTRFS: %s: ino %llu isize %llu odd range [%llu,%llu]\n",
  96				caller, btrfs_ino(inode), isize, start, end);
  97	}
  98}
  99#else
 100#define btrfs_leak_debug_add(new, head)	do {} while (0)
 101#define btrfs_leak_debug_del(entry)	do {} while (0)
 102#define btrfs_leak_debug_check()	do {} while (0)
 103#define btrfs_debug_check_extent_io_range(c, s, e)	do {} while (0)
 104#endif
 105
 106#define BUFFER_LRU_MAX 64
 107
 108struct tree_entry {
 109	u64 start;
 110	u64 end;
 111	struct rb_node rb_node;
 112};
 113
 114struct extent_page_data {
 115	struct bio *bio;
 116	struct extent_io_tree *tree;
 117	get_extent_t *get_extent;
 118	unsigned long bio_flags;
 119
 120	/* tells writepage not to lock the state bits for this range
 121	 * it still does the unlocking
 122	 */
 123	unsigned int extent_locked:1;
 124
 125	/* tells the submit_bio code to use a WRITE_SYNC */
 126	unsigned int sync_io:1;
 127};
 128
 129static noinline void flush_write_bio(void *data);
 130static inline struct btrfs_fs_info *
 131tree_fs_info(struct extent_io_tree *tree)
 132{
 133	if (!tree->mapping)
 134		return NULL;
 135	return btrfs_sb(tree->mapping->host->i_sb);
 136}
 137
 138int __init extent_io_init(void)
 139{
 140	extent_state_cache = kmem_cache_create("btrfs_extent_state",
 141			sizeof(struct extent_state), 0,
 142			SLAB_RECLAIM_ACCOUNT | SLAB_MEM_SPREAD, NULL);
 143	if (!extent_state_cache)
 144		return -ENOMEM;
 145
 146	extent_buffer_cache = kmem_cache_create("btrfs_extent_buffer",
 147			sizeof(struct extent_buffer), 0,
 148			SLAB_RECLAIM_ACCOUNT | SLAB_MEM_SPREAD, NULL);
 149	if (!extent_buffer_cache)
 150		goto free_state_cache;
 151
 152	btrfs_bioset = bioset_create(BIO_POOL_SIZE,
 153				     offsetof(struct btrfs_io_bio, bio));
 154	if (!btrfs_bioset)
 155		goto free_buffer_cache;
 156
 157	if (bioset_integrity_create(btrfs_bioset, BIO_POOL_SIZE))
 158		goto free_bioset;
 159
 160	return 0;
 161
 162free_bioset:
 163	bioset_free(btrfs_bioset);
 164	btrfs_bioset = NULL;
 165
 166free_buffer_cache:
 167	kmem_cache_destroy(extent_buffer_cache);
 168	extent_buffer_cache = NULL;
 169
 170free_state_cache:
 171	kmem_cache_destroy(extent_state_cache);
 172	extent_state_cache = NULL;
 173	return -ENOMEM;
 174}
 175
 176void extent_io_exit(void)
 177{
 178	btrfs_leak_debug_check();
 
 179
 180	/*
 181	 * Make sure all delayed rcu free are flushed before we
 182	 * destroy caches.
 183	 */
 184	rcu_barrier();
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 185	if (extent_state_cache)
 186		kmem_cache_destroy(extent_state_cache);
 187	if (extent_buffer_cache)
 188		kmem_cache_destroy(extent_buffer_cache);
 189	if (btrfs_bioset)
 190		bioset_free(btrfs_bioset);
 191}
 192
 193void extent_io_tree_init(struct extent_io_tree *tree,
 194			 struct address_space *mapping)
 195{
 196	tree->state = RB_ROOT;
 
 197	tree->ops = NULL;
 198	tree->dirty_bytes = 0;
 199	spin_lock_init(&tree->lock);
 
 200	tree->mapping = mapping;
 201}
 202
 203static struct extent_state *alloc_extent_state(gfp_t mask)
 204{
 205	struct extent_state *state;
 
 
 
 206
 207	state = kmem_cache_alloc(extent_state_cache, mask);
 208	if (!state)
 209		return state;
 210	state->state = 0;
 211	state->private = 0;
 212	state->tree = NULL;
 213	btrfs_leak_debug_add(&state->leak_list, &states);
 
 
 
 
 214	atomic_set(&state->refs, 1);
 215	init_waitqueue_head(&state->wq);
 216	trace_alloc_extent_state(state, mask, _RET_IP_);
 217	return state;
 218}
 219
 220void free_extent_state(struct extent_state *state)
 221{
 222	if (!state)
 223		return;
 224	if (atomic_dec_and_test(&state->refs)) {
 
 
 
 225		WARN_ON(state->tree);
 226		btrfs_leak_debug_del(&state->leak_list);
 227		trace_free_extent_state(state, _RET_IP_);
 
 
 
 228		kmem_cache_free(extent_state_cache, state);
 229	}
 230}
 231
 232static struct rb_node *tree_insert(struct rb_root *root,
 233				   struct rb_node *search_start,
 234				   u64 offset,
 235				   struct rb_node *node,
 236				   struct rb_node ***p_in,
 237				   struct rb_node **parent_in)
 238{
 239	struct rb_node **p;
 240	struct rb_node *parent = NULL;
 241	struct tree_entry *entry;
 242
 243	if (p_in && parent_in) {
 244		p = *p_in;
 245		parent = *parent_in;
 246		goto do_insert;
 247	}
 248
 249	p = search_start ? &search_start : &root->rb_node;
 250	while (*p) {
 251		parent = *p;
 252		entry = rb_entry(parent, struct tree_entry, rb_node);
 253
 254		if (offset < entry->start)
 255			p = &(*p)->rb_left;
 256		else if (offset > entry->end)
 257			p = &(*p)->rb_right;
 258		else
 259			return parent;
 260	}
 261
 262do_insert:
 263	rb_link_node(node, parent, p);
 264	rb_insert_color(node, root);
 265	return NULL;
 266}
 267
 268static struct rb_node *__etree_search(struct extent_io_tree *tree, u64 offset,
 269				      struct rb_node **prev_ret,
 270				      struct rb_node **next_ret,
 271				      struct rb_node ***p_ret,
 272				      struct rb_node **parent_ret)
 273{
 274	struct rb_root *root = &tree->state;
 275	struct rb_node **n = &root->rb_node;
 276	struct rb_node *prev = NULL;
 277	struct rb_node *orig_prev = NULL;
 278	struct tree_entry *entry;
 279	struct tree_entry *prev_entry = NULL;
 280
 281	while (*n) {
 282		prev = *n;
 283		entry = rb_entry(prev, struct tree_entry, rb_node);
 284		prev_entry = entry;
 285
 286		if (offset < entry->start)
 287			n = &(*n)->rb_left;
 288		else if (offset > entry->end)
 289			n = &(*n)->rb_right;
 290		else
 291			return *n;
 292	}
 293
 294	if (p_ret)
 295		*p_ret = n;
 296	if (parent_ret)
 297		*parent_ret = prev;
 298
 299	if (prev_ret) {
 300		orig_prev = prev;
 301		while (prev && offset > prev_entry->end) {
 302			prev = rb_next(prev);
 303			prev_entry = rb_entry(prev, struct tree_entry, rb_node);
 304		}
 305		*prev_ret = prev;
 306		prev = orig_prev;
 307	}
 308
 309	if (next_ret) {
 310		prev_entry = rb_entry(prev, struct tree_entry, rb_node);
 311		while (prev && offset < prev_entry->start) {
 312			prev = rb_prev(prev);
 313			prev_entry = rb_entry(prev, struct tree_entry, rb_node);
 314		}
 315		*next_ret = prev;
 316	}
 317	return NULL;
 318}
 319
 320static inline struct rb_node *
 321tree_search_for_insert(struct extent_io_tree *tree,
 322		       u64 offset,
 323		       struct rb_node ***p_ret,
 324		       struct rb_node **parent_ret)
 325{
 326	struct rb_node *prev = NULL;
 327	struct rb_node *ret;
 328
 329	ret = __etree_search(tree, offset, &prev, NULL, p_ret, parent_ret);
 330	if (!ret)
 331		return prev;
 332	return ret;
 333}
 334
 335static inline struct rb_node *tree_search(struct extent_io_tree *tree,
 336					  u64 offset)
 337{
 338	return tree_search_for_insert(tree, offset, NULL, NULL);
 339}
 340
 341static void merge_cb(struct extent_io_tree *tree, struct extent_state *new,
 342		     struct extent_state *other)
 343{
 344	if (tree->ops && tree->ops->merge_extent_hook)
 345		tree->ops->merge_extent_hook(tree->mapping->host, new,
 346					     other);
 347}
 348
 349/*
 350 * utility function to look for merge candidates inside a given range.
 351 * Any extents with matching state are merged together into a single
 352 * extent in the tree.  Extents with EXTENT_IO in their state field
 353 * are not merged because the end_io handlers need to be able to do
 354 * operations on them without sleeping (or doing allocations/splits).
 355 *
 356 * This should be called with the tree lock held.
 357 */
 358static void merge_state(struct extent_io_tree *tree,
 359		        struct extent_state *state)
 360{
 361	struct extent_state *other;
 362	struct rb_node *other_node;
 363
 364	if (state->state & (EXTENT_IOBITS | EXTENT_BOUNDARY))
 365		return;
 366
 367	other_node = rb_prev(&state->rb_node);
 368	if (other_node) {
 369		other = rb_entry(other_node, struct extent_state, rb_node);
 370		if (other->end == state->start - 1 &&
 371		    other->state == state->state) {
 372			merge_cb(tree, state, other);
 373			state->start = other->start;
 374			other->tree = NULL;
 375			rb_erase(&other->rb_node, &tree->state);
 376			free_extent_state(other);
 377		}
 378	}
 379	other_node = rb_next(&state->rb_node);
 380	if (other_node) {
 381		other = rb_entry(other_node, struct extent_state, rb_node);
 382		if (other->start == state->end + 1 &&
 383		    other->state == state->state) {
 384			merge_cb(tree, state, other);
 385			state->end = other->end;
 386			other->tree = NULL;
 387			rb_erase(&other->rb_node, &tree->state);
 388			free_extent_state(other);
 389		}
 390	}
 391}
 392
 393static void set_state_cb(struct extent_io_tree *tree,
 394			 struct extent_state *state, unsigned long *bits)
 395{
 396	if (tree->ops && tree->ops->set_bit_hook)
 397		tree->ops->set_bit_hook(tree->mapping->host, state, bits);
 398}
 399
 400static void clear_state_cb(struct extent_io_tree *tree,
 401			   struct extent_state *state, unsigned long *bits)
 402{
 403	if (tree->ops && tree->ops->clear_bit_hook)
 404		tree->ops->clear_bit_hook(tree->mapping->host, state, bits);
 405}
 406
 407static void set_state_bits(struct extent_io_tree *tree,
 408			   struct extent_state *state, unsigned long *bits);
 409
 410/*
 411 * insert an extent_state struct into the tree.  'bits' are set on the
 412 * struct before it is inserted.
 413 *
 414 * This may return -EEXIST if the extent is already there, in which case the
 415 * state struct is freed.
 416 *
 417 * The tree lock is not taken internally.  This is a utility function and
 418 * probably isn't what you want to call (see set/clear_extent_bit).
 419 */
 420static int insert_state(struct extent_io_tree *tree,
 421			struct extent_state *state, u64 start, u64 end,
 422			struct rb_node ***p,
 423			struct rb_node **parent,
 424			unsigned long *bits)
 425{
 426	struct rb_node *node;
 427
 428	if (end < start)
 429		WARN(1, KERN_ERR "BTRFS: end < start %llu %llu\n",
 430		       end, start);
 
 
 
 431	state->start = start;
 432	state->end = end;
 433
 434	set_state_bits(tree, state, bits);
 435
 436	node = tree_insert(&tree->state, NULL, end, &state->rb_node, p, parent);
 437	if (node) {
 438		struct extent_state *found;
 439		found = rb_entry(node, struct extent_state, rb_node);
 440		printk(KERN_ERR "BTRFS: found node %llu %llu on insert of "
 441		       "%llu %llu\n",
 442		       found->start, found->end, start, end);
 
 443		return -EEXIST;
 444	}
 445	state->tree = tree;
 446	merge_state(tree, state);
 447	return 0;
 448}
 449
 450static void split_cb(struct extent_io_tree *tree, struct extent_state *orig,
 451		     u64 split)
 452{
 453	if (tree->ops && tree->ops->split_extent_hook)
 454		tree->ops->split_extent_hook(tree->mapping->host, orig, split);
 455}
 456
 457/*
 458 * split a given extent state struct in two, inserting the preallocated
 459 * struct 'prealloc' as the newly created second half.  'split' indicates an
 460 * offset inside 'orig' where it should be split.
 461 *
 462 * Before calling,
 463 * the tree has 'orig' at [orig->start, orig->end].  After calling, there
 464 * are two extent state structs in the tree:
 465 * prealloc: [orig->start, split - 1]
 466 * orig: [ split, orig->end ]
 467 *
 468 * The tree locks are not taken by this function. They need to be held
 469 * by the caller.
 470 */
 471static int split_state(struct extent_io_tree *tree, struct extent_state *orig,
 472		       struct extent_state *prealloc, u64 split)
 473{
 474	struct rb_node *node;
 475
 476	split_cb(tree, orig, split);
 477
 478	prealloc->start = orig->start;
 479	prealloc->end = split - 1;
 480	prealloc->state = orig->state;
 481	orig->start = split;
 482
 483	node = tree_insert(&tree->state, &orig->rb_node, prealloc->end,
 484			   &prealloc->rb_node, NULL, NULL);
 485	if (node) {
 486		free_extent_state(prealloc);
 487		return -EEXIST;
 488	}
 489	prealloc->tree = tree;
 490	return 0;
 491}
 492
 493static struct extent_state *next_state(struct extent_state *state)
 494{
 495	struct rb_node *next = rb_next(&state->rb_node);
 496	if (next)
 497		return rb_entry(next, struct extent_state, rb_node);
 498	else
 499		return NULL;
 500}
 501
 502/*
 503 * utility function to clear some bits in an extent state struct.
 504 * it will optionally wake up any one waiting on this state (wake == 1).
 
 505 *
 506 * If no bits are set on the state struct after clearing things, the
 507 * struct is freed and removed from the tree
 508 */
 509static struct extent_state *clear_state_bit(struct extent_io_tree *tree,
 510					    struct extent_state *state,
 511					    unsigned long *bits, int wake)
 512{
 513	struct extent_state *next;
 514	unsigned long bits_to_clear = *bits & ~EXTENT_CTLBITS;
 515
 516	if ((bits_to_clear & EXTENT_DIRTY) && (state->state & EXTENT_DIRTY)) {
 517		u64 range = state->end - state->start + 1;
 518		WARN_ON(range > tree->dirty_bytes);
 519		tree->dirty_bytes -= range;
 520	}
 521	clear_state_cb(tree, state, bits);
 522	state->state &= ~bits_to_clear;
 523	if (wake)
 524		wake_up(&state->wq);
 525	if (state->state == 0) {
 526		next = next_state(state);
 527		if (state->tree) {
 528			rb_erase(&state->rb_node, &tree->state);
 529			state->tree = NULL;
 530			free_extent_state(state);
 531		} else {
 532			WARN_ON(1);
 533		}
 534	} else {
 535		merge_state(tree, state);
 536		next = next_state(state);
 537	}
 538	return next;
 539}
 540
 541static struct extent_state *
 542alloc_extent_state_atomic(struct extent_state *prealloc)
 543{
 544	if (!prealloc)
 545		prealloc = alloc_extent_state(GFP_ATOMIC);
 546
 547	return prealloc;
 548}
 549
 550static void extent_io_tree_panic(struct extent_io_tree *tree, int err)
 551{
 552	btrfs_panic(tree_fs_info(tree), err, "Locking error: "
 553		    "Extent tree was modified by another "
 554		    "thread while locked.");
 555}
 556
 557/*
 558 * clear some bits on a range in the tree.  This may require splitting
 559 * or inserting elements in the tree, so the gfp mask is used to
 560 * indicate which allocations or sleeping are allowed.
 561 *
 562 * pass 'wake' == 1 to kick any sleepers, and 'delete' == 1 to remove
 563 * the given range from the tree regardless of state (ie for truncate).
 564 *
 565 * the range [start, end] is inclusive.
 566 *
 567 * This takes the tree lock, and returns 0 on success and < 0 on error.
 
 568 */
 569int clear_extent_bit(struct extent_io_tree *tree, u64 start, u64 end,
 570		     unsigned long bits, int wake, int delete,
 571		     struct extent_state **cached_state,
 572		     gfp_t mask)
 573{
 574	struct extent_state *state;
 575	struct extent_state *cached;
 576	struct extent_state *prealloc = NULL;
 
 577	struct rb_node *node;
 578	u64 last_end;
 579	int err;
 
 580	int clear = 0;
 581
 582	btrfs_debug_check_extent_io_range(tree, start, end);
 583
 584	if (bits & EXTENT_DELALLOC)
 585		bits |= EXTENT_NORESERVE;
 586
 587	if (delete)
 588		bits |= ~EXTENT_CTLBITS;
 589	bits |= EXTENT_FIRST_DELALLOC;
 590
 591	if (bits & (EXTENT_IOBITS | EXTENT_BOUNDARY))
 592		clear = 1;
 593again:
 594	if (!prealloc && (mask & __GFP_WAIT)) {
 595		prealloc = alloc_extent_state(mask);
 596		if (!prealloc)
 597			return -ENOMEM;
 598	}
 599
 600	spin_lock(&tree->lock);
 601	if (cached_state) {
 602		cached = *cached_state;
 603
 604		if (clear) {
 605			*cached_state = NULL;
 606			cached_state = NULL;
 607		}
 608
 609		if (cached && cached->tree && cached->start <= start &&
 610		    cached->end > start) {
 611			if (clear)
 612				atomic_dec(&cached->refs);
 613			state = cached;
 614			goto hit_next;
 615		}
 616		if (clear)
 617			free_extent_state(cached);
 618	}
 619	/*
 620	 * this search will find the extents that end after
 621	 * our range starts
 622	 */
 623	node = tree_search(tree, start);
 624	if (!node)
 625		goto out;
 626	state = rb_entry(node, struct extent_state, rb_node);
 627hit_next:
 628	if (state->start > end)
 629		goto out;
 630	WARN_ON(state->end < start);
 631	last_end = state->end;
 632
 633	/* the state doesn't have the wanted bits, go ahead */
 634	if (!(state->state & bits)) {
 635		state = next_state(state);
 636		goto next;
 637	}
 638
 639	/*
 640	 *     | ---- desired range ---- |
 641	 *  | state | or
 642	 *  | ------------- state -------------- |
 643	 *
 644	 * We need to split the extent we found, and may flip
 645	 * bits on second half.
 646	 *
 647	 * If the extent we found extends past our range, we
 648	 * just split and search again.  It'll get split again
 649	 * the next time though.
 650	 *
 651	 * If the extent we found is inside our range, we clear
 652	 * the desired bit on it.
 653	 */
 654
 655	if (state->start < start) {
 656		prealloc = alloc_extent_state_atomic(prealloc);
 657		BUG_ON(!prealloc);
 658		err = split_state(tree, state, prealloc, start);
 659		if (err)
 660			extent_io_tree_panic(tree, err);
 661
 662		prealloc = NULL;
 663		if (err)
 664			goto out;
 665		if (state->end <= end) {
 666			state = clear_state_bit(tree, state, &bits, wake);
 667			goto next;
 
 
 668		}
 669		goto search_again;
 670	}
 671	/*
 672	 * | ---- desired range ---- |
 673	 *                        | state |
 674	 * We need to split the extent, and clear the bit
 675	 * on the first half
 676	 */
 677	if (state->start <= end && state->end > end) {
 678		prealloc = alloc_extent_state_atomic(prealloc);
 679		BUG_ON(!prealloc);
 680		err = split_state(tree, state, prealloc, end + 1);
 681		if (err)
 682			extent_io_tree_panic(tree, err);
 683
 684		if (wake)
 685			wake_up(&state->wq);
 686
 687		clear_state_bit(tree, prealloc, &bits, wake);
 688
 689		prealloc = NULL;
 690		goto out;
 691	}
 692
 693	state = clear_state_bit(tree, state, &bits, wake);
 694next:
 
 
 
 
 695	if (last_end == (u64)-1)
 696		goto out;
 697	start = last_end + 1;
 698	if (start <= end && state && !need_resched())
 699		goto hit_next;
 
 
 
 
 700	goto search_again;
 701
 702out:
 703	spin_unlock(&tree->lock);
 704	if (prealloc)
 705		free_extent_state(prealloc);
 706
 707	return 0;
 708
 709search_again:
 710	if (start > end)
 711		goto out;
 712	spin_unlock(&tree->lock);
 713	if (mask & __GFP_WAIT)
 714		cond_resched();
 715	goto again;
 716}
 717
 718static void wait_on_state(struct extent_io_tree *tree,
 719			  struct extent_state *state)
 720		__releases(tree->lock)
 721		__acquires(tree->lock)
 722{
 723	DEFINE_WAIT(wait);
 724	prepare_to_wait(&state->wq, &wait, TASK_UNINTERRUPTIBLE);
 725	spin_unlock(&tree->lock);
 726	schedule();
 727	spin_lock(&tree->lock);
 728	finish_wait(&state->wq, &wait);
 
 729}
 730
 731/*
 732 * waits for one or more bits to clear on a range in the state tree.
 733 * The range [start, end] is inclusive.
 734 * The tree lock is taken by this function
 735 */
 736static void wait_extent_bit(struct extent_io_tree *tree, u64 start, u64 end,
 737			    unsigned long bits)
 738{
 739	struct extent_state *state;
 740	struct rb_node *node;
 741
 742	btrfs_debug_check_extent_io_range(tree, start, end);
 743
 744	spin_lock(&tree->lock);
 745again:
 746	while (1) {
 747		/*
 748		 * this search will find all the extents that end after
 749		 * our range starts
 750		 */
 751		node = tree_search(tree, start);
 752process_node:
 753		if (!node)
 754			break;
 755
 756		state = rb_entry(node, struct extent_state, rb_node);
 757
 758		if (state->start > end)
 759			goto out;
 760
 761		if (state->state & bits) {
 762			start = state->start;
 763			atomic_inc(&state->refs);
 764			wait_on_state(tree, state);
 765			free_extent_state(state);
 766			goto again;
 767		}
 768		start = state->end + 1;
 769
 770		if (start > end)
 771			break;
 772
 773		if (!cond_resched_lock(&tree->lock)) {
 774			node = rb_next(node);
 775			goto process_node;
 776		}
 777	}
 778out:
 779	spin_unlock(&tree->lock);
 
 780}
 781
 782static void set_state_bits(struct extent_io_tree *tree,
 783			   struct extent_state *state,
 784			   unsigned long *bits)
 785{
 786	unsigned long bits_to_set = *bits & ~EXTENT_CTLBITS;
 787
 788	set_state_cb(tree, state, bits);
 789	if ((bits_to_set & EXTENT_DIRTY) && !(state->state & EXTENT_DIRTY)) {
 790		u64 range = state->end - state->start + 1;
 791		tree->dirty_bytes += range;
 792	}
 793	state->state |= bits_to_set;
 794}
 795
 796static void cache_state(struct extent_state *state,
 797			struct extent_state **cached_ptr)
 798{
 799	if (cached_ptr && !(*cached_ptr)) {
 800		if (state->state & (EXTENT_IOBITS | EXTENT_BOUNDARY)) {
 801			*cached_ptr = state;
 802			atomic_inc(&state->refs);
 803		}
 804	}
 805}
 806
 
 
 
 
 
 
 
 
 
 807/*
 808 * set some bits on a range in the tree.  This may require allocations or
 809 * sleeping, so the gfp mask is used to indicate what is allowed.
 810 *
 811 * If any of the exclusive bits are set, this will fail with -EEXIST if some
 812 * part of the range already has the desired bits set.  The start of the
 813 * existing range is returned in failed_start in this case.
 814 *
 815 * [start, end] is inclusive This takes the tree lock.
 816 */
 817
 818static int __must_check
 819__set_extent_bit(struct extent_io_tree *tree, u64 start, u64 end,
 820		 unsigned long bits, unsigned long exclusive_bits,
 821		 u64 *failed_start, struct extent_state **cached_state,
 822		 gfp_t mask)
 823{
 824	struct extent_state *state;
 825	struct extent_state *prealloc = NULL;
 826	struct rb_node *node;
 827	struct rb_node **p;
 828	struct rb_node *parent;
 829	int err = 0;
 830	u64 last_start;
 831	u64 last_end;
 832
 833	btrfs_debug_check_extent_io_range(tree, start, end);
 834
 835	bits |= EXTENT_FIRST_DELALLOC;
 836again:
 837	if (!prealloc && (mask & __GFP_WAIT)) {
 838		prealloc = alloc_extent_state(mask);
 839		BUG_ON(!prealloc);
 840	}
 841
 842	spin_lock(&tree->lock);
 843	if (cached_state && *cached_state) {
 844		state = *cached_state;
 845		if (state->start <= start && state->end > start &&
 846		    state->tree) {
 847			node = &state->rb_node;
 848			goto hit_next;
 849		}
 850	}
 851	/*
 852	 * this search will find all the extents that end after
 853	 * our range starts.
 854	 */
 855	node = tree_search_for_insert(tree, start, &p, &parent);
 856	if (!node) {
 857		prealloc = alloc_extent_state_atomic(prealloc);
 858		BUG_ON(!prealloc);
 859		err = insert_state(tree, prealloc, start, end,
 860				   &p, &parent, &bits);
 861		if (err)
 862			extent_io_tree_panic(tree, err);
 863
 864		cache_state(prealloc, cached_state);
 865		prealloc = NULL;
 
 866		goto out;
 867	}
 868	state = rb_entry(node, struct extent_state, rb_node);
 869hit_next:
 870	last_start = state->start;
 871	last_end = state->end;
 872
 873	/*
 874	 * | ---- desired range ---- |
 875	 * | state |
 876	 *
 877	 * Just lock what we found and keep going
 878	 */
 879	if (state->start == start && state->end <= end) {
 
 880		if (state->state & exclusive_bits) {
 881			*failed_start = state->start;
 882			err = -EEXIST;
 883			goto out;
 884		}
 885
 886		set_state_bits(tree, state, &bits);
 
 887		cache_state(state, cached_state);
 888		merge_state(tree, state);
 889		if (last_end == (u64)-1)
 890			goto out;
 
 891		start = last_end + 1;
 892		state = next_state(state);
 893		if (start < end && state && state->start == start &&
 894		    !need_resched())
 895			goto hit_next;
 
 
 
 896		goto search_again;
 897	}
 898
 899	/*
 900	 *     | ---- desired range ---- |
 901	 * | state |
 902	 *   or
 903	 * | ------------- state -------------- |
 904	 *
 905	 * We need to split the extent we found, and may flip bits on
 906	 * second half.
 907	 *
 908	 * If the extent we found extends past our
 909	 * range, we just split and search again.  It'll get split
 910	 * again the next time though.
 911	 *
 912	 * If the extent we found is inside our range, we set the
 913	 * desired bit on it.
 914	 */
 915	if (state->start < start) {
 916		if (state->state & exclusive_bits) {
 917			*failed_start = start;
 918			err = -EEXIST;
 919			goto out;
 920		}
 921
 922		prealloc = alloc_extent_state_atomic(prealloc);
 923		BUG_ON(!prealloc);
 924		err = split_state(tree, state, prealloc, start);
 925		if (err)
 926			extent_io_tree_panic(tree, err);
 927
 928		prealloc = NULL;
 929		if (err)
 930			goto out;
 931		if (state->end <= end) {
 932			set_state_bits(tree, state, &bits);
 933			cache_state(state, cached_state);
 934			merge_state(tree, state);
 935			if (last_end == (u64)-1)
 936				goto out;
 937			start = last_end + 1;
 938			state = next_state(state);
 939			if (start < end && state && state->start == start &&
 940			    !need_resched())
 941				goto hit_next;
 942		}
 943		goto search_again;
 944	}
 945	/*
 946	 * | ---- desired range ---- |
 947	 *     | state | or               | state |
 948	 *
 949	 * There's a hole, we need to insert something in it and
 950	 * ignore the extent we found.
 951	 */
 952	if (state->start > start) {
 953		u64 this_end;
 954		if (end < last_start)
 955			this_end = end;
 956		else
 957			this_end = last_start - 1;
 958
 959		prealloc = alloc_extent_state_atomic(prealloc);
 960		BUG_ON(!prealloc);
 961
 962		/*
 963		 * Avoid to free 'prealloc' if it can be merged with
 964		 * the later extent.
 965		 */
 966		err = insert_state(tree, prealloc, start, this_end,
 967				   NULL, NULL, &bits);
 968		if (err)
 969			extent_io_tree_panic(tree, err);
 970
 
 
 
 971		cache_state(prealloc, cached_state);
 972		prealloc = NULL;
 973		start = this_end + 1;
 974		goto search_again;
 975	}
 976	/*
 977	 * | ---- desired range ---- |
 978	 *                        | state |
 979	 * We need to split the extent, and set the bit
 980	 * on the first half
 981	 */
 982	if (state->start <= end && state->end > end) {
 983		if (state->state & exclusive_bits) {
 984			*failed_start = start;
 985			err = -EEXIST;
 986			goto out;
 987		}
 988
 989		prealloc = alloc_extent_state_atomic(prealloc);
 990		BUG_ON(!prealloc);
 991		err = split_state(tree, state, prealloc, end + 1);
 992		if (err)
 993			extent_io_tree_panic(tree, err);
 994
 995		set_state_bits(tree, prealloc, &bits);
 996		cache_state(prealloc, cached_state);
 997		merge_state(tree, prealloc);
 998		prealloc = NULL;
 999		goto out;
1000	}
1001
1002	goto search_again;
1003
1004out:
1005	spin_unlock(&tree->lock);
1006	if (prealloc)
1007		free_extent_state(prealloc);
1008
1009	return err;
1010
1011search_again:
1012	if (start > end)
1013		goto out;
1014	spin_unlock(&tree->lock);
1015	if (mask & __GFP_WAIT)
1016		cond_resched();
1017	goto again;
1018}
1019
1020int set_extent_bit(struct extent_io_tree *tree, u64 start, u64 end,
1021		   unsigned long bits, u64 * failed_start,
1022		   struct extent_state **cached_state, gfp_t mask)
1023{
1024	return __set_extent_bit(tree, start, end, bits, 0, failed_start,
1025				cached_state, mask);
1026}
1027
1028
1029/**
1030 * convert_extent_bit - convert all bits in a given range from one bit to
1031 * 			another
1032 * @tree:	the io tree to search
1033 * @start:	the start offset in bytes
1034 * @end:	the end offset in bytes (inclusive)
1035 * @bits:	the bits to set in this range
1036 * @clear_bits:	the bits to clear in this range
1037 * @cached_state:	state that we're going to cache
1038 * @mask:	the allocation mask
1039 *
1040 * This will go through and set bits for the given range.  If any states exist
1041 * already in this range they are set with the given bit and cleared of the
1042 * clear_bits.  This is only meant to be used by things that are mergeable, ie
1043 * converting from say DELALLOC to DIRTY.  This is not meant to be used with
1044 * boundary bits like LOCK.
1045 */
1046int convert_extent_bit(struct extent_io_tree *tree, u64 start, u64 end,
1047		       unsigned long bits, unsigned long clear_bits,
1048		       struct extent_state **cached_state, gfp_t mask)
1049{
1050	struct extent_state *state;
1051	struct extent_state *prealloc = NULL;
1052	struct rb_node *node;
1053	struct rb_node **p;
1054	struct rb_node *parent;
1055	int err = 0;
1056	u64 last_start;
1057	u64 last_end;
1058
1059	btrfs_debug_check_extent_io_range(tree, start, end);
1060
1061again:
1062	if (!prealloc && (mask & __GFP_WAIT)) {
1063		prealloc = alloc_extent_state(mask);
1064		if (!prealloc)
1065			return -ENOMEM;
1066	}
1067
1068	spin_lock(&tree->lock);
1069	if (cached_state && *cached_state) {
1070		state = *cached_state;
1071		if (state->start <= start && state->end > start &&
1072		    state->tree) {
1073			node = &state->rb_node;
1074			goto hit_next;
1075		}
1076	}
1077
1078	/*
1079	 * this search will find all the extents that end after
1080	 * our range starts.
1081	 */
1082	node = tree_search_for_insert(tree, start, &p, &parent);
1083	if (!node) {
1084		prealloc = alloc_extent_state_atomic(prealloc);
1085		if (!prealloc) {
1086			err = -ENOMEM;
1087			goto out;
1088		}
1089		err = insert_state(tree, prealloc, start, end,
1090				   &p, &parent, &bits);
1091		if (err)
1092			extent_io_tree_panic(tree, err);
1093		cache_state(prealloc, cached_state);
1094		prealloc = NULL;
1095		goto out;
1096	}
1097	state = rb_entry(node, struct extent_state, rb_node);
1098hit_next:
1099	last_start = state->start;
1100	last_end = state->end;
1101
1102	/*
1103	 * | ---- desired range ---- |
1104	 * | state |
1105	 *
1106	 * Just lock what we found and keep going
1107	 */
1108	if (state->start == start && state->end <= end) {
1109		set_state_bits(tree, state, &bits);
1110		cache_state(state, cached_state);
1111		state = clear_state_bit(tree, state, &clear_bits, 0);
1112		if (last_end == (u64)-1)
1113			goto out;
1114		start = last_end + 1;
1115		if (start < end && state && state->start == start &&
1116		    !need_resched())
1117			goto hit_next;
1118		goto search_again;
1119	}
1120
1121	/*
1122	 *     | ---- desired range ---- |
1123	 * | state |
1124	 *   or
1125	 * | ------------- state -------------- |
1126	 *
1127	 * We need to split the extent we found, and may flip bits on
1128	 * second half.
1129	 *
1130	 * If the extent we found extends past our
1131	 * range, we just split and search again.  It'll get split
1132	 * again the next time though.
1133	 *
1134	 * If the extent we found is inside our range, we set the
1135	 * desired bit on it.
1136	 */
1137	if (state->start < start) {
1138		prealloc = alloc_extent_state_atomic(prealloc);
1139		if (!prealloc) {
1140			err = -ENOMEM;
1141			goto out;
1142		}
1143		err = split_state(tree, state, prealloc, start);
1144		if (err)
1145			extent_io_tree_panic(tree, err);
1146		prealloc = NULL;
1147		if (err)
1148			goto out;
1149		if (state->end <= end) {
1150			set_state_bits(tree, state, &bits);
1151			cache_state(state, cached_state);
1152			state = clear_state_bit(tree, state, &clear_bits, 0);
1153			if (last_end == (u64)-1)
1154				goto out;
1155			start = last_end + 1;
1156			if (start < end && state && state->start == start &&
1157			    !need_resched())
1158				goto hit_next;
1159		}
1160		goto search_again;
1161	}
1162	/*
1163	 * | ---- desired range ---- |
1164	 *     | state | or               | state |
1165	 *
1166	 * There's a hole, we need to insert something in it and
1167	 * ignore the extent we found.
1168	 */
1169	if (state->start > start) {
1170		u64 this_end;
1171		if (end < last_start)
1172			this_end = end;
1173		else
1174			this_end = last_start - 1;
1175
1176		prealloc = alloc_extent_state_atomic(prealloc);
1177		if (!prealloc) {
1178			err = -ENOMEM;
1179			goto out;
1180		}
1181
1182		/*
1183		 * Avoid to free 'prealloc' if it can be merged with
1184		 * the later extent.
1185		 */
1186		err = insert_state(tree, prealloc, start, this_end,
1187				   NULL, NULL, &bits);
1188		if (err)
1189			extent_io_tree_panic(tree, err);
1190		cache_state(prealloc, cached_state);
1191		prealloc = NULL;
1192		start = this_end + 1;
1193		goto search_again;
1194	}
1195	/*
1196	 * | ---- desired range ---- |
1197	 *                        | state |
1198	 * We need to split the extent, and set the bit
1199	 * on the first half
1200	 */
1201	if (state->start <= end && state->end > end) {
1202		prealloc = alloc_extent_state_atomic(prealloc);
1203		if (!prealloc) {
1204			err = -ENOMEM;
1205			goto out;
1206		}
1207
1208		err = split_state(tree, state, prealloc, end + 1);
1209		if (err)
1210			extent_io_tree_panic(tree, err);
1211
1212		set_state_bits(tree, prealloc, &bits);
1213		cache_state(prealloc, cached_state);
1214		clear_state_bit(tree, prealloc, &clear_bits, 0);
1215		prealloc = NULL;
1216		goto out;
1217	}
1218
1219	goto search_again;
1220
1221out:
1222	spin_unlock(&tree->lock);
1223	if (prealloc)
1224		free_extent_state(prealloc);
1225
1226	return err;
1227
1228search_again:
1229	if (start > end)
1230		goto out;
1231	spin_unlock(&tree->lock);
1232	if (mask & __GFP_WAIT)
1233		cond_resched();
1234	goto again;
1235}
1236
1237/* wrappers around set/clear extent bit */
1238int set_extent_dirty(struct extent_io_tree *tree, u64 start, u64 end,
1239		     gfp_t mask)
1240{
1241	return set_extent_bit(tree, start, end, EXTENT_DIRTY, NULL,
1242			      NULL, mask);
1243}
1244
1245int set_extent_bits(struct extent_io_tree *tree, u64 start, u64 end,
1246		    unsigned long bits, gfp_t mask)
1247{
1248	return set_extent_bit(tree, start, end, bits, NULL,
1249			      NULL, mask);
1250}
1251
1252int clear_extent_bits(struct extent_io_tree *tree, u64 start, u64 end,
1253		      unsigned long bits, gfp_t mask)
1254{
1255	return clear_extent_bit(tree, start, end, bits, 0, 0, NULL, mask);
1256}
1257
1258int set_extent_delalloc(struct extent_io_tree *tree, u64 start, u64 end,
1259			struct extent_state **cached_state, gfp_t mask)
1260{
1261	return set_extent_bit(tree, start, end,
1262			      EXTENT_DELALLOC | EXTENT_UPTODATE,
1263			      NULL, cached_state, mask);
1264}
1265
1266int set_extent_defrag(struct extent_io_tree *tree, u64 start, u64 end,
1267		      struct extent_state **cached_state, gfp_t mask)
1268{
1269	return set_extent_bit(tree, start, end,
1270			      EXTENT_DELALLOC | EXTENT_UPTODATE | EXTENT_DEFRAG,
1271			      NULL, cached_state, mask);
1272}
1273
1274int clear_extent_dirty(struct extent_io_tree *tree, u64 start, u64 end,
1275		       gfp_t mask)
1276{
1277	return clear_extent_bit(tree, start, end,
1278				EXTENT_DIRTY | EXTENT_DELALLOC |
1279				EXTENT_DO_ACCOUNTING, 0, 0, NULL, mask);
1280}
1281
1282int set_extent_new(struct extent_io_tree *tree, u64 start, u64 end,
1283		     gfp_t mask)
1284{
1285	return set_extent_bit(tree, start, end, EXTENT_NEW, NULL,
1286			      NULL, mask);
1287}
1288
1289int set_extent_uptodate(struct extent_io_tree *tree, u64 start, u64 end,
1290			struct extent_state **cached_state, gfp_t mask)
1291{
1292	return set_extent_bit(tree, start, end, EXTENT_UPTODATE, NULL,
1293			      cached_state, mask);
1294}
1295
1296int clear_extent_uptodate(struct extent_io_tree *tree, u64 start, u64 end,
1297			  struct extent_state **cached_state, gfp_t mask)
 
1298{
1299	return clear_extent_bit(tree, start, end, EXTENT_UPTODATE, 0, 0,
1300				cached_state, mask);
1301}
1302
1303/*
1304 * either insert or lock state struct between start and end use mask to tell
1305 * us if waiting is desired.
1306 */
1307int lock_extent_bits(struct extent_io_tree *tree, u64 start, u64 end,
1308		     unsigned long bits, struct extent_state **cached_state)
1309{
1310	int err;
1311	u64 failed_start;
1312	while (1) {
1313		err = __set_extent_bit(tree, start, end, EXTENT_LOCKED | bits,
1314				       EXTENT_LOCKED, &failed_start,
1315				       cached_state, GFP_NOFS);
1316		if (err == -EEXIST) {
1317			wait_extent_bit(tree, failed_start, end, EXTENT_LOCKED);
1318			start = failed_start;
1319		} else
1320			break;
 
1321		WARN_ON(start > end);
1322	}
1323	return err;
1324}
1325
1326int lock_extent(struct extent_io_tree *tree, u64 start, u64 end)
1327{
1328	return lock_extent_bits(tree, start, end, 0, NULL);
1329}
1330
1331int try_lock_extent(struct extent_io_tree *tree, u64 start, u64 end)
 
1332{
1333	int err;
1334	u64 failed_start;
1335
1336	err = __set_extent_bit(tree, start, end, EXTENT_LOCKED, EXTENT_LOCKED,
1337			       &failed_start, NULL, GFP_NOFS);
1338	if (err == -EEXIST) {
1339		if (failed_start > start)
1340			clear_extent_bit(tree, start, failed_start - 1,
1341					 EXTENT_LOCKED, 1, 0, NULL, GFP_NOFS);
1342		return 0;
1343	}
1344	return 1;
1345}
1346
1347int unlock_extent_cached(struct extent_io_tree *tree, u64 start, u64 end,
1348			 struct extent_state **cached, gfp_t mask)
1349{
1350	return clear_extent_bit(tree, start, end, EXTENT_LOCKED, 1, 0, cached,
1351				mask);
1352}
1353
1354int unlock_extent(struct extent_io_tree *tree, u64 start, u64 end)
1355{
1356	return clear_extent_bit(tree, start, end, EXTENT_LOCKED, 1, 0, NULL,
1357				GFP_NOFS);
1358}
1359
1360int extent_range_clear_dirty_for_io(struct inode *inode, u64 start, u64 end)
1361{
1362	unsigned long index = start >> PAGE_CACHE_SHIFT;
1363	unsigned long end_index = end >> PAGE_CACHE_SHIFT;
1364	struct page *page;
1365
1366	while (index <= end_index) {
1367		page = find_get_page(inode->i_mapping, index);
1368		BUG_ON(!page); /* Pages should be in the extent_io_tree */
1369		clear_page_dirty_for_io(page);
1370		page_cache_release(page);
1371		index++;
1372	}
1373	return 0;
1374}
1375
1376int extent_range_redirty_for_io(struct inode *inode, u64 start, u64 end)
1377{
1378	unsigned long index = start >> PAGE_CACHE_SHIFT;
1379	unsigned long end_index = end >> PAGE_CACHE_SHIFT;
1380	struct page *page;
1381
1382	while (index <= end_index) {
1383		page = find_get_page(inode->i_mapping, index);
1384		BUG_ON(!page); /* Pages should be in the extent_io_tree */
1385		account_page_redirty(page);
1386		__set_page_dirty_nobuffers(page);
1387		page_cache_release(page);
1388		index++;
1389	}
1390	return 0;
1391}
1392
1393/*
1394 * helper function to set both pages and extents in the tree writeback
1395 */
1396static int set_range_writeback(struct extent_io_tree *tree, u64 start, u64 end)
1397{
1398	unsigned long index = start >> PAGE_CACHE_SHIFT;
1399	unsigned long end_index = end >> PAGE_CACHE_SHIFT;
1400	struct page *page;
1401
1402	while (index <= end_index) {
1403		page = find_get_page(tree->mapping, index);
1404		BUG_ON(!page); /* Pages should be in the extent_io_tree */
1405		set_page_writeback(page);
1406		page_cache_release(page);
1407		index++;
1408	}
1409	return 0;
1410}
1411
1412/* find the first state struct with 'bits' set after 'start', and
1413 * return it.  tree->lock must be held.  NULL will returned if
1414 * nothing was found after 'start'
1415 */
1416static struct extent_state *
1417find_first_extent_bit_state(struct extent_io_tree *tree,
1418			    u64 start, unsigned long bits)
1419{
1420	struct rb_node *node;
1421	struct extent_state *state;
1422
1423	/*
1424	 * this search will find all the extents that end after
1425	 * our range starts.
1426	 */
1427	node = tree_search(tree, start);
1428	if (!node)
1429		goto out;
1430
1431	while (1) {
1432		state = rb_entry(node, struct extent_state, rb_node);
1433		if (state->end >= start && (state->state & bits))
1434			return state;
1435
1436		node = rb_next(node);
1437		if (!node)
1438			break;
1439	}
1440out:
1441	return NULL;
1442}
1443
1444/*
1445 * find the first offset in the io tree with 'bits' set. zero is
1446 * returned if we find something, and *start_ret and *end_ret are
1447 * set to reflect the state struct that was found.
1448 *
1449 * If nothing was found, 1 is returned. If found something, return 0.
1450 */
1451int find_first_extent_bit(struct extent_io_tree *tree, u64 start,
1452			  u64 *start_ret, u64 *end_ret, unsigned long bits,
1453			  struct extent_state **cached_state)
1454{
1455	struct extent_state *state;
1456	struct rb_node *n;
1457	int ret = 1;
1458
1459	spin_lock(&tree->lock);
1460	if (cached_state && *cached_state) {
1461		state = *cached_state;
1462		if (state->end == start - 1 && state->tree) {
1463			n = rb_next(&state->rb_node);
1464			while (n) {
1465				state = rb_entry(n, struct extent_state,
1466						 rb_node);
1467				if (state->state & bits)
1468					goto got_it;
1469				n = rb_next(n);
1470			}
1471			free_extent_state(*cached_state);
1472			*cached_state = NULL;
1473			goto out;
1474		}
1475		free_extent_state(*cached_state);
1476		*cached_state = NULL;
1477	}
1478
1479	state = find_first_extent_bit_state(tree, start, bits);
1480got_it:
1481	if (state) {
1482		cache_state(state, cached_state);
1483		*start_ret = state->start;
1484		*end_ret = state->end;
1485		ret = 0;
1486	}
1487out:
1488	spin_unlock(&tree->lock);
1489	return ret;
1490}
1491
1492/*
1493 * find a contiguous range of bytes in the file marked as delalloc, not
1494 * more than 'max_bytes'.  start and end are used to return the range,
1495 *
1496 * 1 is returned if we find something, 0 if nothing was in the tree
1497 */
1498static noinline u64 find_delalloc_range(struct extent_io_tree *tree,
1499					u64 *start, u64 *end, u64 max_bytes,
1500					struct extent_state **cached_state)
1501{
1502	struct rb_node *node;
1503	struct extent_state *state;
1504	u64 cur_start = *start;
1505	u64 found = 0;
1506	u64 total_bytes = 0;
1507
1508	spin_lock(&tree->lock);
1509
1510	/*
1511	 * this search will find all the extents that end after
1512	 * our range starts.
1513	 */
1514	node = tree_search(tree, cur_start);
1515	if (!node) {
1516		if (!found)
1517			*end = (u64)-1;
1518		goto out;
1519	}
1520
1521	while (1) {
1522		state = rb_entry(node, struct extent_state, rb_node);
1523		if (found && (state->start != cur_start ||
1524			      (state->state & EXTENT_BOUNDARY))) {
1525			goto out;
1526		}
1527		if (!(state->state & EXTENT_DELALLOC)) {
1528			if (!found)
1529				*end = state->end;
1530			goto out;
1531		}
1532		if (!found) {
1533			*start = state->start;
1534			*cached_state = state;
1535			atomic_inc(&state->refs);
1536		}
1537		found++;
1538		*end = state->end;
1539		cur_start = state->end + 1;
1540		node = rb_next(node);
 
 
1541		total_bytes += state->end - state->start + 1;
1542		if (total_bytes >= max_bytes)
1543			break;
1544		if (!node)
1545			break;
1546	}
1547out:
1548	spin_unlock(&tree->lock);
1549	return found;
1550}
1551
1552static noinline void __unlock_for_delalloc(struct inode *inode,
1553					   struct page *locked_page,
1554					   u64 start, u64 end)
1555{
1556	int ret;
1557	struct page *pages[16];
1558	unsigned long index = start >> PAGE_CACHE_SHIFT;
1559	unsigned long end_index = end >> PAGE_CACHE_SHIFT;
1560	unsigned long nr_pages = end_index - index + 1;
1561	int i;
1562
1563	if (index == locked_page->index && end_index == index)
1564		return;
1565
1566	while (nr_pages > 0) {
1567		ret = find_get_pages_contig(inode->i_mapping, index,
1568				     min_t(unsigned long, nr_pages,
1569				     ARRAY_SIZE(pages)), pages);
1570		for (i = 0; i < ret; i++) {
1571			if (pages[i] != locked_page)
1572				unlock_page(pages[i]);
1573			page_cache_release(pages[i]);
1574		}
1575		nr_pages -= ret;
1576		index += ret;
1577		cond_resched();
1578	}
 
1579}
1580
1581static noinline int lock_delalloc_pages(struct inode *inode,
1582					struct page *locked_page,
1583					u64 delalloc_start,
1584					u64 delalloc_end)
1585{
1586	unsigned long index = delalloc_start >> PAGE_CACHE_SHIFT;
1587	unsigned long start_index = index;
1588	unsigned long end_index = delalloc_end >> PAGE_CACHE_SHIFT;
1589	unsigned long pages_locked = 0;
1590	struct page *pages[16];
1591	unsigned long nrpages;
1592	int ret;
1593	int i;
1594
1595	/* the caller is responsible for locking the start index */
1596	if (index == locked_page->index && index == end_index)
1597		return 0;
1598
1599	/* skip the page at the start index */
1600	nrpages = end_index - index + 1;
1601	while (nrpages > 0) {
1602		ret = find_get_pages_contig(inode->i_mapping, index,
1603				     min_t(unsigned long,
1604				     nrpages, ARRAY_SIZE(pages)), pages);
1605		if (ret == 0) {
1606			ret = -EAGAIN;
1607			goto done;
1608		}
1609		/* now we have an array of pages, lock them all */
1610		for (i = 0; i < ret; i++) {
1611			/*
1612			 * the caller is taking responsibility for
1613			 * locked_page
1614			 */
1615			if (pages[i] != locked_page) {
1616				lock_page(pages[i]);
1617				if (!PageDirty(pages[i]) ||
1618				    pages[i]->mapping != inode->i_mapping) {
1619					ret = -EAGAIN;
1620					unlock_page(pages[i]);
1621					page_cache_release(pages[i]);
1622					goto done;
1623				}
1624			}
1625			page_cache_release(pages[i]);
1626			pages_locked++;
1627		}
1628		nrpages -= ret;
1629		index += ret;
1630		cond_resched();
1631	}
1632	ret = 0;
1633done:
1634	if (ret && pages_locked) {
1635		__unlock_for_delalloc(inode, locked_page,
1636			      delalloc_start,
1637			      ((u64)(start_index + pages_locked - 1)) <<
1638			      PAGE_CACHE_SHIFT);
1639	}
1640	return ret;
1641}
1642
1643/*
1644 * find a contiguous range of bytes in the file marked as delalloc, not
1645 * more than 'max_bytes'.  start and end are used to return the range,
1646 *
1647 * 1 is returned if we find something, 0 if nothing was in the tree
1648 */
1649STATIC u64 find_lock_delalloc_range(struct inode *inode,
1650				    struct extent_io_tree *tree,
1651				    struct page *locked_page, u64 *start,
1652				    u64 *end, u64 max_bytes)
 
1653{
1654	u64 delalloc_start;
1655	u64 delalloc_end;
1656	u64 found;
1657	struct extent_state *cached_state = NULL;
1658	int ret;
1659	int loops = 0;
1660
1661again:
1662	/* step one, find a bunch of delalloc bytes starting at start */
1663	delalloc_start = *start;
1664	delalloc_end = 0;
1665	found = find_delalloc_range(tree, &delalloc_start, &delalloc_end,
1666				    max_bytes, &cached_state);
1667	if (!found || delalloc_end <= *start) {
1668		*start = delalloc_start;
1669		*end = delalloc_end;
1670		free_extent_state(cached_state);
1671		return 0;
1672	}
1673
1674	/*
1675	 * start comes from the offset of locked_page.  We have to lock
1676	 * pages in order, so we can't process delalloc bytes before
1677	 * locked_page
1678	 */
1679	if (delalloc_start < *start)
1680		delalloc_start = *start;
1681
1682	/*
1683	 * make sure to limit the number of pages we try to lock down
 
1684	 */
1685	if (delalloc_end + 1 - delalloc_start > max_bytes)
1686		delalloc_end = delalloc_start + max_bytes - 1;
1687
1688	/* step two, lock all the pages after the page that has start */
1689	ret = lock_delalloc_pages(inode, locked_page,
1690				  delalloc_start, delalloc_end);
1691	if (ret == -EAGAIN) {
1692		/* some of the pages are gone, lets avoid looping by
1693		 * shortening the size of the delalloc range we're searching
1694		 */
1695		free_extent_state(cached_state);
1696		if (!loops) {
1697			max_bytes = PAGE_CACHE_SIZE;
 
1698			loops = 1;
1699			goto again;
1700		} else {
1701			found = 0;
1702			goto out_failed;
1703		}
1704	}
1705	BUG_ON(ret); /* Only valid values are 0 and -EAGAIN */
1706
1707	/* step three, lock the state bits for the whole range */
1708	lock_extent_bits(tree, delalloc_start, delalloc_end, 0, &cached_state);
 
1709
1710	/* then test to make sure it is all still delalloc */
1711	ret = test_range_bit(tree, delalloc_start, delalloc_end,
1712			     EXTENT_DELALLOC, 1, cached_state);
1713	if (!ret) {
1714		unlock_extent_cached(tree, delalloc_start, delalloc_end,
1715				     &cached_state, GFP_NOFS);
1716		__unlock_for_delalloc(inode, locked_page,
1717			      delalloc_start, delalloc_end);
1718		cond_resched();
1719		goto again;
1720	}
1721	free_extent_state(cached_state);
1722	*start = delalloc_start;
1723	*end = delalloc_end;
1724out_failed:
1725	return found;
1726}
1727
1728int extent_clear_unlock_delalloc(struct inode *inode, u64 start, u64 end,
1729				 struct page *locked_page,
1730				 unsigned long clear_bits,
1731				 unsigned long page_ops)
1732{
1733	struct extent_io_tree *tree = &BTRFS_I(inode)->io_tree;
1734	int ret;
1735	struct page *pages[16];
1736	unsigned long index = start >> PAGE_CACHE_SHIFT;
1737	unsigned long end_index = end >> PAGE_CACHE_SHIFT;
1738	unsigned long nr_pages = end_index - index + 1;
1739	int i;
 
 
 
 
 
 
 
 
 
1740
1741	clear_extent_bit(tree, start, end, clear_bits, 1, 0, NULL, GFP_NOFS);
1742	if (page_ops == 0)
 
 
1743		return 0;
1744
1745	while (nr_pages > 0) {
1746		ret = find_get_pages_contig(inode->i_mapping, index,
1747				     min_t(unsigned long,
1748				     nr_pages, ARRAY_SIZE(pages)), pages);
1749		for (i = 0; i < ret; i++) {
1750
1751			if (page_ops & PAGE_SET_PRIVATE2)
1752				SetPagePrivate2(pages[i]);
1753
1754			if (pages[i] == locked_page) {
1755				page_cache_release(pages[i]);
1756				continue;
1757			}
1758			if (page_ops & PAGE_CLEAR_DIRTY)
1759				clear_page_dirty_for_io(pages[i]);
1760			if (page_ops & PAGE_SET_WRITEBACK)
1761				set_page_writeback(pages[i]);
1762			if (page_ops & PAGE_END_WRITEBACK)
1763				end_page_writeback(pages[i]);
1764			if (page_ops & PAGE_UNLOCK)
1765				unlock_page(pages[i]);
1766			page_cache_release(pages[i]);
1767		}
1768		nr_pages -= ret;
1769		index += ret;
1770		cond_resched();
1771	}
1772	return 0;
1773}
1774
1775/*
1776 * count the number of bytes in the tree that have a given bit(s)
1777 * set.  This can be fairly slow, except for EXTENT_DIRTY which is
1778 * cached.  The total number found is returned.
1779 */
1780u64 count_range_bits(struct extent_io_tree *tree,
1781		     u64 *start, u64 search_end, u64 max_bytes,
1782		     unsigned long bits, int contig)
1783{
1784	struct rb_node *node;
1785	struct extent_state *state;
1786	u64 cur_start = *start;
1787	u64 total_bytes = 0;
1788	u64 last = 0;
1789	int found = 0;
1790
1791	if (WARN_ON(search_end <= cur_start))
 
1792		return 0;
 
1793
1794	spin_lock(&tree->lock);
1795	if (cur_start == 0 && bits == EXTENT_DIRTY) {
1796		total_bytes = tree->dirty_bytes;
1797		goto out;
1798	}
1799	/*
1800	 * this search will find all the extents that end after
1801	 * our range starts.
1802	 */
1803	node = tree_search(tree, cur_start);
1804	if (!node)
1805		goto out;
1806
1807	while (1) {
1808		state = rb_entry(node, struct extent_state, rb_node);
1809		if (state->start > search_end)
1810			break;
1811		if (contig && found && state->start > last + 1)
1812			break;
1813		if (state->end >= cur_start && (state->state & bits) == bits) {
1814			total_bytes += min(search_end, state->end) + 1 -
1815				       max(cur_start, state->start);
1816			if (total_bytes >= max_bytes)
1817				break;
1818			if (!found) {
1819				*start = max(cur_start, state->start);
1820				found = 1;
1821			}
1822			last = state->end;
1823		} else if (contig && found) {
1824			break;
1825		}
1826		node = rb_next(node);
1827		if (!node)
1828			break;
1829	}
1830out:
1831	spin_unlock(&tree->lock);
1832	return total_bytes;
1833}
1834
1835/*
1836 * set the private field for a given byte offset in the tree.  If there isn't
1837 * an extent_state there already, this does nothing.
1838 */
1839static int set_state_private(struct extent_io_tree *tree, u64 start, u64 private)
1840{
1841	struct rb_node *node;
1842	struct extent_state *state;
1843	int ret = 0;
1844
1845	spin_lock(&tree->lock);
1846	/*
1847	 * this search will find all the extents that end after
1848	 * our range starts.
1849	 */
1850	node = tree_search(tree, start);
1851	if (!node) {
1852		ret = -ENOENT;
1853		goto out;
1854	}
1855	state = rb_entry(node, struct extent_state, rb_node);
1856	if (state->start != start) {
1857		ret = -ENOENT;
1858		goto out;
1859	}
1860	state->private = private;
1861out:
1862	spin_unlock(&tree->lock);
1863	return ret;
1864}
1865
1866int get_state_private(struct extent_io_tree *tree, u64 start, u64 *private)
1867{
1868	struct rb_node *node;
1869	struct extent_state *state;
1870	int ret = 0;
1871
1872	spin_lock(&tree->lock);
1873	/*
1874	 * this search will find all the extents that end after
1875	 * our range starts.
1876	 */
1877	node = tree_search(tree, start);
1878	if (!node) {
1879		ret = -ENOENT;
1880		goto out;
1881	}
1882	state = rb_entry(node, struct extent_state, rb_node);
1883	if (state->start != start) {
1884		ret = -ENOENT;
1885		goto out;
1886	}
1887	*private = state->private;
1888out:
1889	spin_unlock(&tree->lock);
1890	return ret;
1891}
1892
1893/*
1894 * searches a range in the state tree for a given mask.
1895 * If 'filled' == 1, this returns 1 only if every extent in the tree
1896 * has the bits set.  Otherwise, 1 is returned if any bit in the
1897 * range is found set.
1898 */
1899int test_range_bit(struct extent_io_tree *tree, u64 start, u64 end,
1900		   unsigned long bits, int filled, struct extent_state *cached)
1901{
1902	struct extent_state *state = NULL;
1903	struct rb_node *node;
1904	int bitset = 0;
1905
1906	spin_lock(&tree->lock);
1907	if (cached && cached->tree && cached->start <= start &&
1908	    cached->end > start)
1909		node = &cached->rb_node;
1910	else
1911		node = tree_search(tree, start);
1912	while (node && start <= end) {
1913		state = rb_entry(node, struct extent_state, rb_node);
1914
1915		if (filled && state->start > start) {
1916			bitset = 0;
1917			break;
1918		}
1919
1920		if (state->start > end)
1921			break;
1922
1923		if (state->state & bits) {
1924			bitset = 1;
1925			if (!filled)
1926				break;
1927		} else if (filled) {
1928			bitset = 0;
1929			break;
1930		}
1931
1932		if (state->end == (u64)-1)
1933			break;
1934
1935		start = state->end + 1;
1936		if (start > end)
1937			break;
1938		node = rb_next(node);
1939		if (!node) {
1940			if (filled)
1941				bitset = 0;
1942			break;
1943		}
1944	}
1945	spin_unlock(&tree->lock);
1946	return bitset;
1947}
1948
1949/*
1950 * helper function to set a given page up to date if all the
1951 * extents in the tree for that page are up to date
1952 */
1953static void check_page_uptodate(struct extent_io_tree *tree, struct page *page)
 
1954{
1955	u64 start = page_offset(page);
1956	u64 end = start + PAGE_CACHE_SIZE - 1;
1957	if (test_range_bit(tree, start, end, EXTENT_UPTODATE, 1, NULL))
1958		SetPageUptodate(page);
 
1959}
1960
1961/*
1962 * When IO fails, either with EIO or csum verification fails, we
1963 * try other mirrors that might have a good copy of the data.  This
1964 * io_failure_record is used to record state as we go through all the
1965 * mirrors.  If another mirror has good data, the page is set up to date
1966 * and things continue.  If a good mirror can't be found, the original
1967 * bio end_io callback is called to indicate things have failed.
1968 */
1969struct io_failure_record {
1970	struct page *page;
1971	u64 start;
1972	u64 len;
1973	u64 logical;
1974	unsigned long bio_flags;
1975	int this_mirror;
1976	int failed_mirror;
1977	int in_validation;
1978};
1979
1980static int free_io_failure(struct inode *inode, struct io_failure_record *rec,
1981				int did_repair)
1982{
1983	int ret;
1984	int err = 0;
1985	struct extent_io_tree *failure_tree = &BTRFS_I(inode)->io_failure_tree;
1986
1987	set_state_private(failure_tree, rec->start, 0);
1988	ret = clear_extent_bits(failure_tree, rec->start,
1989				rec->start + rec->len - 1,
1990				EXTENT_LOCKED | EXTENT_DIRTY, GFP_NOFS);
1991	if (ret)
1992		err = ret;
1993
1994	ret = clear_extent_bits(&BTRFS_I(inode)->io_tree, rec->start,
1995				rec->start + rec->len - 1,
1996				EXTENT_DAMAGED, GFP_NOFS);
1997	if (ret && !err)
1998		err = ret;
1999
2000	kfree(rec);
2001	return err;
2002}
2003
2004/*
2005 * this bypasses the standard btrfs submit functions deliberately, as
2006 * the standard behavior is to write all copies in a raid setup. here we only
2007 * want to write the one bad copy. so we do the mapping for ourselves and issue
2008 * submit_bio directly.
2009 * to avoid any synchronization issues, wait for the data after writing, which
2010 * actually prevents the read that triggered the error from finishing.
2011 * currently, there can be no more than two copies of every data bit. thus,
2012 * exactly one rewrite is required.
2013 */
2014int repair_io_failure(struct btrfs_fs_info *fs_info, u64 start,
2015			u64 length, u64 logical, struct page *page,
2016			int mirror_num)
2017{
2018	struct bio *bio;
2019	struct btrfs_device *dev;
2020	u64 map_length = 0;
2021	u64 sector;
2022	struct btrfs_bio *bbio = NULL;
2023	struct btrfs_mapping_tree *map_tree = &fs_info->mapping_tree;
2024	int ret;
2025
2026	ASSERT(!(fs_info->sb->s_flags & MS_RDONLY));
2027	BUG_ON(!mirror_num);
2028
2029	/* we can't repair anything in raid56 yet */
2030	if (btrfs_is_parity_mirror(map_tree, logical, length, mirror_num))
2031		return 0;
2032
2033	bio = btrfs_io_bio_alloc(GFP_NOFS, 1);
2034	if (!bio)
2035		return -EIO;
2036	bio->bi_iter.bi_size = 0;
2037	map_length = length;
2038
2039	ret = btrfs_map_block(fs_info, WRITE, logical,
2040			      &map_length, &bbio, mirror_num);
2041	if (ret) {
2042		bio_put(bio);
2043		return -EIO;
2044	}
2045	BUG_ON(mirror_num != bbio->mirror_num);
2046	sector = bbio->stripes[mirror_num-1].physical >> 9;
2047	bio->bi_iter.bi_sector = sector;
2048	dev = bbio->stripes[mirror_num-1].dev;
2049	kfree(bbio);
2050	if (!dev || !dev->bdev || !dev->writeable) {
2051		bio_put(bio);
2052		return -EIO;
2053	}
2054	bio->bi_bdev = dev->bdev;
2055	bio_add_page(bio, page, length, start - page_offset(page));
2056
2057	if (btrfsic_submit_bio_wait(WRITE_SYNC, bio)) {
2058		/* try to remap that extent elsewhere? */
2059		bio_put(bio);
2060		btrfs_dev_stat_inc_and_print(dev, BTRFS_DEV_STAT_WRITE_ERRS);
2061		return -EIO;
2062	}
2063
2064	printk_ratelimited_in_rcu(KERN_INFO
2065			"BTRFS: read error corrected: ino %lu off %llu "
2066		    "(dev %s sector %llu)\n", page->mapping->host->i_ino,
2067		    start, rcu_str_deref(dev->name), sector);
2068
2069	bio_put(bio);
2070	return 0;
2071}
2072
2073int repair_eb_io_failure(struct btrfs_root *root, struct extent_buffer *eb,
2074			 int mirror_num)
2075{
2076	u64 start = eb->start;
2077	unsigned long i, num_pages = num_extent_pages(eb->start, eb->len);
2078	int ret = 0;
2079
2080	if (root->fs_info->sb->s_flags & MS_RDONLY)
2081		return -EROFS;
2082
2083	for (i = 0; i < num_pages; i++) {
2084		struct page *p = extent_buffer_page(eb, i);
2085		ret = repair_io_failure(root->fs_info, start, PAGE_CACHE_SIZE,
2086					start, p, mirror_num);
2087		if (ret)
2088			break;
2089		start += PAGE_CACHE_SIZE;
2090	}
2091
2092	return ret;
2093}
2094
2095/*
2096 * each time an IO finishes, we do a fast check in the IO failure tree
2097 * to see if we need to process or clean up an io_failure_record
2098 */
2099static int clean_io_failure(u64 start, struct page *page)
 
2100{
2101	u64 private;
2102	u64 private_failure;
2103	struct io_failure_record *failrec;
2104	struct inode *inode = page->mapping->host;
2105	struct btrfs_fs_info *fs_info = BTRFS_I(inode)->root->fs_info;
2106	struct extent_state *state;
2107	int num_copies;
2108	int did_repair = 0;
2109	int ret;
2110
2111	private = 0;
2112	ret = count_range_bits(&BTRFS_I(inode)->io_failure_tree, &private,
2113				(u64)-1, 1, EXTENT_DIRTY, 0);
2114	if (!ret)
2115		return 0;
2116
2117	ret = get_state_private(&BTRFS_I(inode)->io_failure_tree, start,
2118				&private_failure);
2119	if (ret)
2120		return 0;
2121
2122	failrec = (struct io_failure_record *)(unsigned long) private_failure;
2123	BUG_ON(!failrec->this_mirror);
2124
2125	if (failrec->in_validation) {
2126		/* there was no real error, just free the record */
2127		pr_debug("clean_io_failure: freeing dummy error at %llu\n",
2128			 failrec->start);
2129		did_repair = 1;
2130		goto out;
2131	}
2132	if (fs_info->sb->s_flags & MS_RDONLY)
2133		goto out;
2134
2135	spin_lock(&BTRFS_I(inode)->io_tree.lock);
2136	state = find_first_extent_bit_state(&BTRFS_I(inode)->io_tree,
2137					    failrec->start,
2138					    EXTENT_LOCKED);
2139	spin_unlock(&BTRFS_I(inode)->io_tree.lock);
2140
2141	if (state && state->start <= failrec->start &&
2142	    state->end >= failrec->start + failrec->len - 1) {
2143		num_copies = btrfs_num_copies(fs_info, failrec->logical,
2144					      failrec->len);
2145		if (num_copies > 1)  {
2146			ret = repair_io_failure(fs_info, start, failrec->len,
2147						failrec->logical, page,
2148						failrec->failed_mirror);
2149			did_repair = !ret;
2150		}
2151		ret = 0;
2152	}
2153
2154out:
2155	if (!ret)
2156		ret = free_io_failure(inode, failrec, did_repair);
2157
2158	return ret;
2159}
2160
2161/*
2162 * this is a generic handler for readpage errors (default
2163 * readpage_io_failed_hook). if other copies exist, read those and write back
2164 * good data to the failed position. does not investigate in remapping the
2165 * failed extent elsewhere, hoping the device will be smart enough to do this as
2166 * needed
2167 */
2168
2169static int bio_readpage_error(struct bio *failed_bio, u64 phy_offset,
2170			      struct page *page, u64 start, u64 end,
2171			      int failed_mirror)
2172{
2173	struct io_failure_record *failrec = NULL;
2174	u64 private;
2175	struct extent_map *em;
2176	struct inode *inode = page->mapping->host;
2177	struct extent_io_tree *failure_tree = &BTRFS_I(inode)->io_failure_tree;
2178	struct extent_io_tree *tree = &BTRFS_I(inode)->io_tree;
2179	struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree;
2180	struct bio *bio;
2181	struct btrfs_io_bio *btrfs_failed_bio;
2182	struct btrfs_io_bio *btrfs_bio;
2183	int num_copies;
2184	int ret;
2185	int read_mode;
2186	u64 logical;
2187
2188	BUG_ON(failed_bio->bi_rw & REQ_WRITE);
2189
2190	ret = get_state_private(failure_tree, start, &private);
2191	if (ret) {
2192		failrec = kzalloc(sizeof(*failrec), GFP_NOFS);
2193		if (!failrec)
2194			return -ENOMEM;
2195		failrec->start = start;
2196		failrec->len = end - start + 1;
2197		failrec->this_mirror = 0;
2198		failrec->bio_flags = 0;
2199		failrec->in_validation = 0;
2200
2201		read_lock(&em_tree->lock);
2202		em = lookup_extent_mapping(em_tree, start, failrec->len);
2203		if (!em) {
2204			read_unlock(&em_tree->lock);
2205			kfree(failrec);
2206			return -EIO;
2207		}
2208
2209		if (em->start > start || em->start + em->len <= start) {
2210			free_extent_map(em);
2211			em = NULL;
2212		}
2213		read_unlock(&em_tree->lock);
2214
2215		if (!em) {
2216			kfree(failrec);
2217			return -EIO;
2218		}
2219		logical = start - em->start;
2220		logical = em->block_start + logical;
2221		if (test_bit(EXTENT_FLAG_COMPRESSED, &em->flags)) {
2222			logical = em->block_start;
2223			failrec->bio_flags = EXTENT_BIO_COMPRESSED;
2224			extent_set_compress_type(&failrec->bio_flags,
2225						 em->compress_type);
2226		}
2227		pr_debug("bio_readpage_error: (new) logical=%llu, start=%llu, "
2228			 "len=%llu\n", logical, start, failrec->len);
2229		failrec->logical = logical;
2230		free_extent_map(em);
2231
2232		/* set the bits in the private failure tree */
2233		ret = set_extent_bits(failure_tree, start, end,
2234					EXTENT_LOCKED | EXTENT_DIRTY, GFP_NOFS);
2235		if (ret >= 0)
2236			ret = set_state_private(failure_tree, start,
2237						(u64)(unsigned long)failrec);
2238		/* set the bits in the inode's tree */
2239		if (ret >= 0)
2240			ret = set_extent_bits(tree, start, end, EXTENT_DAMAGED,
2241						GFP_NOFS);
2242		if (ret < 0) {
2243			kfree(failrec);
2244			return ret;
2245		}
2246	} else {
2247		failrec = (struct io_failure_record *)(unsigned long)private;
2248		pr_debug("bio_readpage_error: (found) logical=%llu, "
2249			 "start=%llu, len=%llu, validation=%d\n",
2250			 failrec->logical, failrec->start, failrec->len,
2251			 failrec->in_validation);
2252		/*
2253		 * when data can be on disk more than twice, add to failrec here
2254		 * (e.g. with a list for failed_mirror) to make
2255		 * clean_io_failure() clean all those errors at once.
2256		 */
2257	}
2258	num_copies = btrfs_num_copies(BTRFS_I(inode)->root->fs_info,
2259				      failrec->logical, failrec->len);
2260	if (num_copies == 1) {
2261		/*
2262		 * we only have a single copy of the data, so don't bother with
2263		 * all the retry and error correction code that follows. no
2264		 * matter what the error is, it is very likely to persist.
2265		 */
2266		pr_debug("bio_readpage_error: cannot repair, num_copies=%d, next_mirror %d, failed_mirror %d\n",
2267			 num_copies, failrec->this_mirror, failed_mirror);
2268		free_io_failure(inode, failrec, 0);
2269		return -EIO;
2270	}
2271
2272	/*
2273	 * there are two premises:
2274	 *	a) deliver good data to the caller
2275	 *	b) correct the bad sectors on disk
2276	 */
2277	if (failed_bio->bi_vcnt > 1) {
2278		/*
2279		 * to fulfill b), we need to know the exact failing sectors, as
2280		 * we don't want to rewrite any more than the failed ones. thus,
2281		 * we need separate read requests for the failed bio
2282		 *
2283		 * if the following BUG_ON triggers, our validation request got
2284		 * merged. we need separate requests for our algorithm to work.
2285		 */
2286		BUG_ON(failrec->in_validation);
2287		failrec->in_validation = 1;
2288		failrec->this_mirror = failed_mirror;
2289		read_mode = READ_SYNC | REQ_FAILFAST_DEV;
2290	} else {
2291		/*
2292		 * we're ready to fulfill a) and b) alongside. get a good copy
2293		 * of the failed sector and if we succeed, we have setup
2294		 * everything for repair_io_failure to do the rest for us.
2295		 */
2296		if (failrec->in_validation) {
2297			BUG_ON(failrec->this_mirror != failed_mirror);
2298			failrec->in_validation = 0;
2299			failrec->this_mirror = 0;
2300		}
2301		failrec->failed_mirror = failed_mirror;
2302		failrec->this_mirror++;
2303		if (failrec->this_mirror == failed_mirror)
2304			failrec->this_mirror++;
2305		read_mode = READ_SYNC;
2306	}
2307
2308	if (failrec->this_mirror > num_copies) {
2309		pr_debug("bio_readpage_error: (fail) num_copies=%d, next_mirror %d, failed_mirror %d\n",
2310			 num_copies, failrec->this_mirror, failed_mirror);
2311		free_io_failure(inode, failrec, 0);
2312		return -EIO;
2313	}
2314
2315	bio = btrfs_io_bio_alloc(GFP_NOFS, 1);
2316	if (!bio) {
2317		free_io_failure(inode, failrec, 0);
2318		return -EIO;
2319	}
2320	bio->bi_end_io = failed_bio->bi_end_io;
2321	bio->bi_iter.bi_sector = failrec->logical >> 9;
2322	bio->bi_bdev = BTRFS_I(inode)->root->fs_info->fs_devices->latest_bdev;
2323	bio->bi_iter.bi_size = 0;
2324
2325	btrfs_failed_bio = btrfs_io_bio(failed_bio);
2326	if (btrfs_failed_bio->csum) {
2327		struct btrfs_fs_info *fs_info = BTRFS_I(inode)->root->fs_info;
2328		u16 csum_size = btrfs_super_csum_size(fs_info->super_copy);
2329
2330		btrfs_bio = btrfs_io_bio(bio);
2331		btrfs_bio->csum = btrfs_bio->csum_inline;
2332		phy_offset >>= inode->i_sb->s_blocksize_bits;
2333		phy_offset *= csum_size;
2334		memcpy(btrfs_bio->csum, btrfs_failed_bio->csum + phy_offset,
2335		       csum_size);
2336	}
2337
2338	bio_add_page(bio, page, failrec->len, start - page_offset(page));
2339
2340	pr_debug("bio_readpage_error: submitting new read[%#x] to "
2341		 "this_mirror=%d, num_copies=%d, in_validation=%d\n", read_mode,
2342		 failrec->this_mirror, num_copies, failrec->in_validation);
2343
2344	ret = tree->ops->submit_bio_hook(inode, read_mode, bio,
2345					 failrec->this_mirror,
2346					 failrec->bio_flags, 0);
2347	return ret;
2348}
2349
2350/* lots and lots of room for performance fixes in the end_bio funcs */
2351
2352int end_extent_writepage(struct page *page, int err, u64 start, u64 end)
2353{
2354	int uptodate = (err == 0);
2355	struct extent_io_tree *tree;
2356	int ret;
2357
2358	tree = &BTRFS_I(page->mapping->host)->io_tree;
2359
2360	if (tree->ops && tree->ops->writepage_end_io_hook) {
2361		ret = tree->ops->writepage_end_io_hook(page, start,
2362					       end, NULL, uptodate);
2363		if (ret)
2364			uptodate = 0;
2365	}
2366
2367	if (!uptodate) {
2368		ClearPageUptodate(page);
2369		SetPageError(page);
2370	}
2371	return 0;
2372}
2373
2374/*
2375 * after a writepage IO is done, we need to:
2376 * clear the uptodate bits on error
2377 * clear the writeback bits in the extent tree for this IO
2378 * end_page_writeback if the page has no more pending IO
2379 *
2380 * Scheduling is not allowed, so the extent state tree is expected
2381 * to have one and only one object corresponding to this IO.
2382 */
2383static void end_bio_extent_writepage(struct bio *bio, int err)
2384{
2385	struct bio_vec *bvec;
 
 
2386	u64 start;
2387	u64 end;
2388	int i;
 
2389
2390	bio_for_each_segment_all(bvec, bio, i) {
2391		struct page *page = bvec->bv_page;
 
2392
2393		/* We always issue full-page reads, but if some block
2394		 * in a page fails to read, blk_update_request() will
2395		 * advance bv_offset and adjust bv_len to compensate.
2396		 * Print a warning for nonzero offsets, and an error
2397		 * if they don't add up to a full page.  */
2398		if (bvec->bv_offset || bvec->bv_len != PAGE_CACHE_SIZE) {
2399			if (bvec->bv_offset + bvec->bv_len != PAGE_CACHE_SIZE)
2400				btrfs_err(BTRFS_I(page->mapping->host)->root->fs_info,
2401				   "partial page write in btrfs with offset %u and length %u",
2402					bvec->bv_offset, bvec->bv_len);
2403			else
2404				btrfs_info(BTRFS_I(page->mapping->host)->root->fs_info,
2405				   "incomplete page write in btrfs with offset %u and "
2406				   "length %u",
2407					bvec->bv_offset, bvec->bv_len);
 
2408		}
2409
2410		start = page_offset(page);
2411		end = start + bvec->bv_offset + bvec->bv_len - 1;
 
 
 
 
 
 
 
2412
2413		if (end_extent_writepage(page, err, start, end))
2414			continue;
 
 
 
2415
2416		end_page_writeback(page);
2417	}
 
 
 
2418
2419	bio_put(bio);
2420}
2421
2422static void
2423endio_readpage_release_extent(struct extent_io_tree *tree, u64 start, u64 len,
2424			      int uptodate)
2425{
2426	struct extent_state *cached = NULL;
2427	u64 end = start + len - 1;
2428
2429	if (uptodate && tree->track_uptodate)
2430		set_extent_uptodate(tree, start, end, &cached, GFP_ATOMIC);
2431	unlock_extent_cached(tree, start, end, &cached, GFP_ATOMIC);
2432}
2433
2434/*
2435 * after a readpage IO is done, we need to:
2436 * clear the uptodate bits on error
2437 * set the uptodate bits if things worked
2438 * set the page up to date if all extents in the tree are uptodate
2439 * clear the lock bit in the extent tree
2440 * unlock the page if there are no other extents locked for it
2441 *
2442 * Scheduling is not allowed, so the extent state tree is expected
2443 * to have one and only one object corresponding to this IO.
2444 */
2445static void end_bio_extent_readpage(struct bio *bio, int err)
2446{
2447	struct bio_vec *bvec;
2448	int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
2449	struct btrfs_io_bio *io_bio = btrfs_io_bio(bio);
 
2450	struct extent_io_tree *tree;
2451	u64 offset = 0;
2452	u64 start;
2453	u64 end;
2454	u64 len;
2455	u64 extent_start = 0;
2456	u64 extent_len = 0;
2457	int mirror;
2458	int ret;
2459	int i;
2460
2461	if (err)
2462		uptodate = 0;
2463
2464	bio_for_each_segment_all(bvec, bio, i) {
2465		struct page *page = bvec->bv_page;
2466		struct inode *inode = page->mapping->host;
 
2467
2468		pr_debug("end_bio_extent_readpage: bi_sector=%llu, err=%d, "
2469			 "mirror=%lu\n", (u64)bio->bi_iter.bi_sector, err,
2470			 io_bio->mirror_num);
2471		tree = &BTRFS_I(inode)->io_tree;
2472
2473		/* We always issue full-page reads, but if some block
2474		 * in a page fails to read, blk_update_request() will
2475		 * advance bv_offset and adjust bv_len to compensate.
2476		 * Print a warning for nonzero offsets, and an error
2477		 * if they don't add up to a full page.  */
2478		if (bvec->bv_offset || bvec->bv_len != PAGE_CACHE_SIZE) {
2479			if (bvec->bv_offset + bvec->bv_len != PAGE_CACHE_SIZE)
2480				btrfs_err(BTRFS_I(page->mapping->host)->root->fs_info,
2481				   "partial page read in btrfs with offset %u and length %u",
2482					bvec->bv_offset, bvec->bv_len);
2483			else
2484				btrfs_info(BTRFS_I(page->mapping->host)->root->fs_info,
2485				   "incomplete page read in btrfs with offset %u and "
2486				   "length %u",
2487					bvec->bv_offset, bvec->bv_len);
2488		}
2489
2490		start = page_offset(page);
2491		end = start + bvec->bv_offset + bvec->bv_len - 1;
2492		len = bvec->bv_len;
2493
2494		mirror = io_bio->mirror_num;
2495		if (likely(uptodate && tree->ops &&
2496			   tree->ops->readpage_end_io_hook)) {
2497			ret = tree->ops->readpage_end_io_hook(io_bio, offset,
2498							      page, start, end,
2499							      mirror);
2500			if (ret)
2501				uptodate = 0;
2502			else
2503				clean_io_failure(start, page);
2504		}
2505
2506		if (likely(uptodate))
2507			goto readpage_ok;
2508
2509		if (tree->ops && tree->ops->readpage_io_failed_hook) {
2510			ret = tree->ops->readpage_io_failed_hook(page, mirror);
2511			if (!ret && !err &&
2512			    test_bit(BIO_UPTODATE, &bio->bi_flags))
2513				uptodate = 1;
2514		} else {
2515			/*
2516			 * The generic bio_readpage_error handles errors the
2517			 * following way: If possible, new read requests are
2518			 * created and submitted and will end up in
2519			 * end_bio_extent_readpage as well (if we're lucky, not
2520			 * in the !uptodate case). In that case it returns 0 and
2521			 * we just go on with the next page in our bio. If it
2522			 * can't handle the error it will return -EIO and we
2523			 * remain responsible for that page.
2524			 */
2525			ret = bio_readpage_error(bio, offset, page, start, end,
2526						 mirror);
 
 
 
 
 
 
 
 
 
 
 
 
2527			if (ret == 0) {
2528				uptodate =
2529					test_bit(BIO_UPTODATE, &bio->bi_flags);
2530				if (err)
2531					uptodate = 0;
 
2532				continue;
2533			}
2534		}
2535readpage_ok:
2536		if (likely(uptodate)) {
2537			loff_t i_size = i_size_read(inode);
2538			pgoff_t end_index = i_size >> PAGE_CACHE_SHIFT;
2539			unsigned offset;
2540
2541			/* Zero out the end if this page straddles i_size */
2542			offset = i_size & (PAGE_CACHE_SIZE-1);
2543			if (page->index == end_index && offset)
2544				zero_user_segment(page, offset, PAGE_CACHE_SIZE);
2545			SetPageUptodate(page);
2546		} else {
2547			ClearPageUptodate(page);
2548			SetPageError(page);
2549		}
2550		unlock_page(page);
2551		offset += len;
2552
2553		if (unlikely(!uptodate)) {
2554			if (extent_len) {
2555				endio_readpage_release_extent(tree,
2556							      extent_start,
2557							      extent_len, 1);
2558				extent_start = 0;
2559				extent_len = 0;
 
 
 
 
 
2560			}
2561			endio_readpage_release_extent(tree, start,
2562						      end - start + 1, 0);
2563		} else if (!extent_len) {
2564			extent_start = start;
2565			extent_len = end + 1 - start;
2566		} else if (extent_start + extent_len == start) {
2567			extent_len += end + 1 - start;
2568		} else {
2569			endio_readpage_release_extent(tree, extent_start,
2570						      extent_len, uptodate);
2571			extent_start = start;
2572			extent_len = end + 1 - start;
 
 
 
2573		}
2574	}
2575
2576	if (extent_len)
2577		endio_readpage_release_extent(tree, extent_start, extent_len,
2578					      uptodate);
2579	if (io_bio->end_io)
2580		io_bio->end_io(io_bio, err);
2581	bio_put(bio);
2582}
2583
2584/*
2585 * this allocates from the btrfs_bioset.  We're returning a bio right now
2586 * but you can call btrfs_io_bio for the appropriate container_of magic
2587 */
2588struct bio *
2589btrfs_bio_alloc(struct block_device *bdev, u64 first_sector, int nr_vecs,
2590		gfp_t gfp_flags)
2591{
2592	struct btrfs_io_bio *btrfs_bio;
2593	struct bio *bio;
2594
2595	bio = bio_alloc_bioset(gfp_flags, nr_vecs, btrfs_bioset);
2596
2597	if (bio == NULL && (current->flags & PF_MEMALLOC)) {
2598		while (!bio && (nr_vecs /= 2)) {
2599			bio = bio_alloc_bioset(gfp_flags,
2600					       nr_vecs, btrfs_bioset);
2601		}
2602	}
2603
2604	if (bio) {
 
2605		bio->bi_bdev = bdev;
2606		bio->bi_iter.bi_sector = first_sector;
2607		btrfs_bio = btrfs_io_bio(bio);
2608		btrfs_bio->csum = NULL;
2609		btrfs_bio->csum_allocated = NULL;
2610		btrfs_bio->end_io = NULL;
2611	}
2612	return bio;
2613}
2614
2615struct bio *btrfs_bio_clone(struct bio *bio, gfp_t gfp_mask)
2616{
2617	return bio_clone_bioset(bio, gfp_mask, btrfs_bioset);
2618}
2619
2620
2621/* this also allocates from the btrfs_bioset */
2622struct bio *btrfs_io_bio_alloc(gfp_t gfp_mask, unsigned int nr_iovecs)
2623{
2624	struct btrfs_io_bio *btrfs_bio;
2625	struct bio *bio;
2626
2627	bio = bio_alloc_bioset(gfp_mask, nr_iovecs, btrfs_bioset);
2628	if (bio) {
2629		btrfs_bio = btrfs_io_bio(bio);
2630		btrfs_bio->csum = NULL;
2631		btrfs_bio->csum_allocated = NULL;
2632		btrfs_bio->end_io = NULL;
2633	}
2634	return bio;
2635}
2636
2637
2638static int __must_check submit_one_bio(int rw, struct bio *bio,
2639				       int mirror_num, unsigned long bio_flags)
2640{
2641	int ret = 0;
2642	struct bio_vec *bvec = bio->bi_io_vec + bio->bi_vcnt - 1;
2643	struct page *page = bvec->bv_page;
2644	struct extent_io_tree *tree = bio->bi_private;
2645	u64 start;
2646
2647	start = page_offset(page) + bvec->bv_offset;
2648
2649	bio->bi_private = NULL;
2650
2651	bio_get(bio);
2652
2653	if (tree->ops && tree->ops->submit_bio_hook)
2654		ret = tree->ops->submit_bio_hook(page->mapping->host, rw, bio,
2655					   mirror_num, bio_flags, start);
2656	else
2657		btrfsic_submit_bio(rw, bio);
2658
2659	if (bio_flagged(bio, BIO_EOPNOTSUPP))
2660		ret = -EOPNOTSUPP;
2661	bio_put(bio);
2662	return ret;
2663}
2664
2665static int merge_bio(int rw, struct extent_io_tree *tree, struct page *page,
2666		     unsigned long offset, size_t size, struct bio *bio,
2667		     unsigned long bio_flags)
2668{
2669	int ret = 0;
2670	if (tree->ops && tree->ops->merge_bio_hook)
2671		ret = tree->ops->merge_bio_hook(rw, page, offset, size, bio,
2672						bio_flags);
2673	BUG_ON(ret < 0);
2674	return ret;
2675
2676}
2677
2678static int submit_extent_page(int rw, struct extent_io_tree *tree,
2679			      struct page *page, sector_t sector,
2680			      size_t size, unsigned long offset,
2681			      struct block_device *bdev,
2682			      struct bio **bio_ret,
2683			      unsigned long max_pages,
2684			      bio_end_io_t end_io_func,
2685			      int mirror_num,
2686			      unsigned long prev_bio_flags,
2687			      unsigned long bio_flags)
2688{
2689	int ret = 0;
2690	struct bio *bio;
2691	int nr;
2692	int contig = 0;
2693	int this_compressed = bio_flags & EXTENT_BIO_COMPRESSED;
2694	int old_compressed = prev_bio_flags & EXTENT_BIO_COMPRESSED;
2695	size_t page_size = min_t(size_t, size, PAGE_CACHE_SIZE);
2696
2697	if (bio_ret && *bio_ret) {
2698		bio = *bio_ret;
2699		if (old_compressed)
2700			contig = bio->bi_iter.bi_sector == sector;
2701		else
2702			contig = bio_end_sector(bio) == sector;
 
2703
2704		if (prev_bio_flags != bio_flags || !contig ||
2705		    merge_bio(rw, tree, page, offset, page_size, bio, bio_flags) ||
 
 
2706		    bio_add_page(bio, page, page_size, offset) < page_size) {
2707			ret = submit_one_bio(rw, bio, mirror_num,
2708					     prev_bio_flags);
2709			if (ret < 0)
2710				return ret;
2711			bio = NULL;
2712		} else {
2713			return 0;
2714		}
2715	}
2716	if (this_compressed)
2717		nr = BIO_MAX_PAGES;
2718	else
2719		nr = bio_get_nr_vecs(bdev);
2720
2721	bio = btrfs_bio_alloc(bdev, sector, nr, GFP_NOFS | __GFP_HIGH);
2722	if (!bio)
2723		return -ENOMEM;
2724
2725	bio_add_page(bio, page, page_size, offset);
2726	bio->bi_end_io = end_io_func;
2727	bio->bi_private = tree;
2728
2729	if (bio_ret)
2730		*bio_ret = bio;
2731	else
2732		ret = submit_one_bio(rw, bio, mirror_num, bio_flags);
2733
2734	return ret;
2735}
2736
2737static void attach_extent_buffer_page(struct extent_buffer *eb,
2738				      struct page *page)
2739{
2740	if (!PagePrivate(page)) {
2741		SetPagePrivate(page);
2742		page_cache_get(page);
2743		set_page_private(page, (unsigned long)eb);
2744	} else {
2745		WARN_ON(page->private != (unsigned long)eb);
2746	}
2747}
2748
2749void set_page_extent_mapped(struct page *page)
2750{
2751	if (!PagePrivate(page)) {
2752		SetPagePrivate(page);
2753		page_cache_get(page);
2754		set_page_private(page, EXTENT_PAGE_PRIVATE);
2755	}
2756}
2757
2758static struct extent_map *
2759__get_extent_map(struct inode *inode, struct page *page, size_t pg_offset,
2760		 u64 start, u64 len, get_extent_t *get_extent,
2761		 struct extent_map **em_cached)
2762{
2763	struct extent_map *em;
2764
2765	if (em_cached && *em_cached) {
2766		em = *em_cached;
2767		if (extent_map_in_tree(em) && start >= em->start &&
2768		    start < extent_map_end(em)) {
2769			atomic_inc(&em->refs);
2770			return em;
2771		}
2772
2773		free_extent_map(em);
2774		*em_cached = NULL;
2775	}
2776
2777	em = get_extent(inode, page, pg_offset, start, len, 0);
2778	if (em_cached && !IS_ERR_OR_NULL(em)) {
2779		BUG_ON(*em_cached);
2780		atomic_inc(&em->refs);
2781		*em_cached = em;
2782	}
2783	return em;
2784}
2785/*
2786 * basic readpage implementation.  Locked extent state structs are inserted
2787 * into the tree that are removed when the IO is done (by the end_io
2788 * handlers)
2789 * XXX JDM: This needs looking at to ensure proper page locking
2790 */
2791static int __do_readpage(struct extent_io_tree *tree,
2792			 struct page *page,
2793			 get_extent_t *get_extent,
2794			 struct extent_map **em_cached,
2795			 struct bio **bio, int mirror_num,
2796			 unsigned long *bio_flags, int rw)
2797{
2798	struct inode *inode = page->mapping->host;
2799	u64 start = page_offset(page);
2800	u64 page_end = start + PAGE_CACHE_SIZE - 1;
2801	u64 end;
2802	u64 cur = start;
2803	u64 extent_offset;
2804	u64 last_byte = i_size_read(inode);
2805	u64 block_start;
2806	u64 cur_end;
2807	sector_t sector;
2808	struct extent_map *em;
2809	struct block_device *bdev;
 
2810	int ret;
2811	int nr = 0;
2812	int parent_locked = *bio_flags & EXTENT_BIO_PARENT_LOCKED;
2813	size_t pg_offset = 0;
2814	size_t iosize;
2815	size_t disk_io_size;
2816	size_t blocksize = inode->i_sb->s_blocksize;
2817	unsigned long this_bio_flag = *bio_flags & EXTENT_BIO_PARENT_LOCKED;
2818
2819	set_page_extent_mapped(page);
2820
2821	end = page_end;
2822	if (!PageUptodate(page)) {
2823		if (cleancache_get_page(page) == 0) {
2824			BUG_ON(blocksize != PAGE_SIZE);
2825			unlock_extent(tree, start, end);
2826			goto out;
2827		}
2828	}
2829
 
 
 
 
 
 
 
 
 
 
 
2830	if (page->index == last_byte >> PAGE_CACHE_SHIFT) {
2831		char *userpage;
2832		size_t zero_offset = last_byte & (PAGE_CACHE_SIZE - 1);
2833
2834		if (zero_offset) {
2835			iosize = PAGE_CACHE_SIZE - zero_offset;
2836			userpage = kmap_atomic(page);
2837			memset(userpage + zero_offset, 0, iosize);
2838			flush_dcache_page(page);
2839			kunmap_atomic(userpage);
2840		}
2841	}
2842	while (cur <= end) {
2843		unsigned long pnr = (last_byte >> PAGE_CACHE_SHIFT) + 1;
2844
2845		if (cur >= last_byte) {
2846			char *userpage;
2847			struct extent_state *cached = NULL;
2848
2849			iosize = PAGE_CACHE_SIZE - pg_offset;
2850			userpage = kmap_atomic(page);
2851			memset(userpage + pg_offset, 0, iosize);
2852			flush_dcache_page(page);
2853			kunmap_atomic(userpage);
2854			set_extent_uptodate(tree, cur, cur + iosize - 1,
2855					    &cached, GFP_NOFS);
2856			if (!parent_locked)
2857				unlock_extent_cached(tree, cur,
2858						     cur + iosize - 1,
2859						     &cached, GFP_NOFS);
2860			break;
2861		}
2862		em = __get_extent_map(inode, page, pg_offset, cur,
2863				      end - cur + 1, get_extent, em_cached);
2864		if (IS_ERR_OR_NULL(em)) {
2865			SetPageError(page);
2866			if (!parent_locked)
2867				unlock_extent(tree, cur, end);
2868			break;
2869		}
2870		extent_offset = cur - em->start;
2871		BUG_ON(extent_map_end(em) <= cur);
2872		BUG_ON(end < cur);
2873
2874		if (test_bit(EXTENT_FLAG_COMPRESSED, &em->flags)) {
2875			this_bio_flag |= EXTENT_BIO_COMPRESSED;
2876			extent_set_compress_type(&this_bio_flag,
2877						 em->compress_type);
2878		}
2879
2880		iosize = min(extent_map_end(em) - cur, end - cur + 1);
2881		cur_end = min(extent_map_end(em) - 1, end);
2882		iosize = ALIGN(iosize, blocksize);
2883		if (this_bio_flag & EXTENT_BIO_COMPRESSED) {
2884			disk_io_size = em->block_len;
2885			sector = em->block_start >> 9;
2886		} else {
2887			sector = (em->block_start + extent_offset) >> 9;
2888			disk_io_size = iosize;
2889		}
2890		bdev = em->bdev;
2891		block_start = em->block_start;
2892		if (test_bit(EXTENT_FLAG_PREALLOC, &em->flags))
2893			block_start = EXTENT_MAP_HOLE;
2894		free_extent_map(em);
2895		em = NULL;
2896
2897		/* we've found a hole, just zero and go on */
2898		if (block_start == EXTENT_MAP_HOLE) {
2899			char *userpage;
2900			struct extent_state *cached = NULL;
2901
2902			userpage = kmap_atomic(page);
2903			memset(userpage + pg_offset, 0, iosize);
2904			flush_dcache_page(page);
2905			kunmap_atomic(userpage);
2906
2907			set_extent_uptodate(tree, cur, cur + iosize - 1,
2908					    &cached, GFP_NOFS);
2909			unlock_extent_cached(tree, cur, cur + iosize - 1,
2910			                     &cached, GFP_NOFS);
2911			cur = cur + iosize;
2912			pg_offset += iosize;
2913			continue;
2914		}
2915		/* the get_extent function already copied into the page */
2916		if (test_range_bit(tree, cur, cur_end,
2917				   EXTENT_UPTODATE, 1, NULL)) {
2918			check_page_uptodate(tree, page);
2919			if (!parent_locked)
2920				unlock_extent(tree, cur, cur + iosize - 1);
2921			cur = cur + iosize;
2922			pg_offset += iosize;
2923			continue;
2924		}
2925		/* we have an inline extent but it didn't get marked up
2926		 * to date.  Error out
2927		 */
2928		if (block_start == EXTENT_MAP_INLINE) {
2929			SetPageError(page);
2930			if (!parent_locked)
2931				unlock_extent(tree, cur, cur + iosize - 1);
2932			cur = cur + iosize;
2933			pg_offset += iosize;
2934			continue;
2935		}
2936
2937		pnr -= page->index;
2938		ret = submit_extent_page(rw, tree, page,
 
 
 
 
 
 
 
2939					 sector, disk_io_size, pg_offset,
2940					 bdev, bio, pnr,
2941					 end_bio_extent_readpage, mirror_num,
2942					 *bio_flags,
2943					 this_bio_flag);
2944		if (!ret) {
2945			nr++;
2946			*bio_flags = this_bio_flag;
2947		} else {
 
2948			SetPageError(page);
2949			if (!parent_locked)
2950				unlock_extent(tree, cur, cur + iosize - 1);
2951		}
2952		cur = cur + iosize;
2953		pg_offset += iosize;
2954	}
2955out:
2956	if (!nr) {
2957		if (!PageError(page))
2958			SetPageUptodate(page);
2959		unlock_page(page);
2960	}
2961	return 0;
2962}
2963
2964static inline void __do_contiguous_readpages(struct extent_io_tree *tree,
2965					     struct page *pages[], int nr_pages,
2966					     u64 start, u64 end,
2967					     get_extent_t *get_extent,
2968					     struct extent_map **em_cached,
2969					     struct bio **bio, int mirror_num,
2970					     unsigned long *bio_flags, int rw)
2971{
2972	struct inode *inode;
2973	struct btrfs_ordered_extent *ordered;
2974	int index;
2975
2976	inode = pages[0]->mapping->host;
2977	while (1) {
2978		lock_extent(tree, start, end);
2979		ordered = btrfs_lookup_ordered_range(inode, start,
2980						     end - start + 1);
2981		if (!ordered)
2982			break;
2983		unlock_extent(tree, start, end);
2984		btrfs_start_ordered_extent(inode, ordered, 1);
2985		btrfs_put_ordered_extent(ordered);
2986	}
2987
2988	for (index = 0; index < nr_pages; index++) {
2989		__do_readpage(tree, pages[index], get_extent, em_cached, bio,
2990			      mirror_num, bio_flags, rw);
2991		page_cache_release(pages[index]);
2992	}
2993}
2994
2995static void __extent_readpages(struct extent_io_tree *tree,
2996			       struct page *pages[],
2997			       int nr_pages, get_extent_t *get_extent,
2998			       struct extent_map **em_cached,
2999			       struct bio **bio, int mirror_num,
3000			       unsigned long *bio_flags, int rw)
3001{
3002	u64 start = 0;
3003	u64 end = 0;
3004	u64 page_start;
3005	int index;
3006	int first_index = 0;
3007
3008	for (index = 0; index < nr_pages; index++) {
3009		page_start = page_offset(pages[index]);
3010		if (!end) {
3011			start = page_start;
3012			end = start + PAGE_CACHE_SIZE - 1;
3013			first_index = index;
3014		} else if (end + 1 == page_start) {
3015			end += PAGE_CACHE_SIZE;
3016		} else {
3017			__do_contiguous_readpages(tree, &pages[first_index],
3018						  index - first_index, start,
3019						  end, get_extent, em_cached,
3020						  bio, mirror_num, bio_flags,
3021						  rw);
3022			start = page_start;
3023			end = start + PAGE_CACHE_SIZE - 1;
3024			first_index = index;
3025		}
3026	}
3027
3028	if (end)
3029		__do_contiguous_readpages(tree, &pages[first_index],
3030					  index - first_index, start,
3031					  end, get_extent, em_cached, bio,
3032					  mirror_num, bio_flags, rw);
3033}
3034
3035static int __extent_read_full_page(struct extent_io_tree *tree,
3036				   struct page *page,
3037				   get_extent_t *get_extent,
3038				   struct bio **bio, int mirror_num,
3039				   unsigned long *bio_flags, int rw)
3040{
3041	struct inode *inode = page->mapping->host;
3042	struct btrfs_ordered_extent *ordered;
3043	u64 start = page_offset(page);
3044	u64 end = start + PAGE_CACHE_SIZE - 1;
3045	int ret;
3046
3047	while (1) {
3048		lock_extent(tree, start, end);
3049		ordered = btrfs_lookup_ordered_extent(inode, start);
3050		if (!ordered)
3051			break;
3052		unlock_extent(tree, start, end);
3053		btrfs_start_ordered_extent(inode, ordered, 1);
3054		btrfs_put_ordered_extent(ordered);
3055	}
3056
3057	ret = __do_readpage(tree, page, get_extent, NULL, bio, mirror_num,
3058			    bio_flags, rw);
3059	return ret;
3060}
3061
3062int extent_read_full_page(struct extent_io_tree *tree, struct page *page,
3063			    get_extent_t *get_extent, int mirror_num)
3064{
3065	struct bio *bio = NULL;
3066	unsigned long bio_flags = 0;
3067	int ret;
3068
3069	ret = __extent_read_full_page(tree, page, get_extent, &bio, mirror_num,
3070				      &bio_flags, READ);
3071	if (bio)
3072		ret = submit_one_bio(READ, bio, mirror_num, bio_flags);
3073	return ret;
3074}
3075
3076int extent_read_full_page_nolock(struct extent_io_tree *tree, struct page *page,
3077				 get_extent_t *get_extent, int mirror_num)
3078{
3079	struct bio *bio = NULL;
3080	unsigned long bio_flags = EXTENT_BIO_PARENT_LOCKED;
3081	int ret;
3082
3083	ret = __do_readpage(tree, page, get_extent, NULL, &bio, mirror_num,
3084				      &bio_flags, READ);
3085	if (bio)
3086		ret = submit_one_bio(READ, bio, mirror_num, bio_flags);
3087	return ret;
3088}
3089
3090static noinline void update_nr_written(struct page *page,
3091				      struct writeback_control *wbc,
3092				      unsigned long nr_written)
3093{
3094	wbc->nr_to_write -= nr_written;
3095	if (wbc->range_cyclic || (wbc->nr_to_write > 0 &&
3096	    wbc->range_start == 0 && wbc->range_end == LLONG_MAX))
3097		page->mapping->writeback_index = page->index + nr_written;
3098}
3099
3100/*
3101 * the writepage semantics are similar to regular writepage.  extent
3102 * records are inserted to lock ranges in the tree, and as dirty areas
3103 * are found, they are marked writeback.  Then the lock bits are removed
3104 * and the end_io handler clears the writeback ranges
3105 */
3106static int __extent_writepage(struct page *page, struct writeback_control *wbc,
3107			      void *data)
3108{
3109	struct inode *inode = page->mapping->host;
3110	struct extent_page_data *epd = data;
3111	struct extent_io_tree *tree = epd->tree;
3112	u64 start = page_offset(page);
3113	u64 delalloc_start;
3114	u64 page_end = start + PAGE_CACHE_SIZE - 1;
3115	u64 end;
3116	u64 cur = start;
3117	u64 extent_offset;
3118	u64 last_byte = i_size_read(inode);
3119	u64 block_start;
3120	u64 iosize;
3121	sector_t sector;
3122	struct extent_state *cached_state = NULL;
3123	struct extent_map *em;
3124	struct block_device *bdev;
3125	int ret;
3126	int nr = 0;
3127	size_t pg_offset = 0;
3128	size_t blocksize;
3129	loff_t i_size = i_size_read(inode);
3130	unsigned long end_index = i_size >> PAGE_CACHE_SHIFT;
3131	u64 nr_delalloc;
3132	u64 delalloc_end;
3133	int page_started;
3134	int compressed;
3135	int write_flags;
3136	unsigned long nr_written = 0;
3137	bool fill_delalloc = true;
3138
3139	if (wbc->sync_mode == WB_SYNC_ALL)
3140		write_flags = WRITE_SYNC;
3141	else
3142		write_flags = WRITE;
3143
3144	trace___extent_writepage(page, inode, wbc);
3145
3146	WARN_ON(!PageLocked(page));
3147
3148	ClearPageError(page);
3149
3150	pg_offset = i_size & (PAGE_CACHE_SIZE - 1);
3151	if (page->index > end_index ||
3152	   (page->index == end_index && !pg_offset)) {
3153		page->mapping->a_ops->invalidatepage(page, 0, PAGE_CACHE_SIZE);
3154		unlock_page(page);
3155		return 0;
3156	}
3157
3158	if (page->index == end_index) {
3159		char *userpage;
3160
3161		userpage = kmap_atomic(page);
3162		memset(userpage + pg_offset, 0,
3163		       PAGE_CACHE_SIZE - pg_offset);
3164		kunmap_atomic(userpage);
3165		flush_dcache_page(page);
3166	}
3167	pg_offset = 0;
3168
3169	set_page_extent_mapped(page);
3170
3171	if (!tree->ops || !tree->ops->fill_delalloc)
3172		fill_delalloc = false;
3173
3174	delalloc_start = start;
3175	delalloc_end = 0;
3176	page_started = 0;
3177	if (!epd->extent_locked && fill_delalloc) {
3178		u64 delalloc_to_write = 0;
3179		/*
3180		 * make sure the wbc mapping index is at least updated
3181		 * to this page.
3182		 */
3183		update_nr_written(page, wbc, 0);
3184
3185		while (delalloc_end < page_end) {
3186			nr_delalloc = find_lock_delalloc_range(inode, tree,
3187						       page,
3188						       &delalloc_start,
3189						       &delalloc_end,
3190						       128 * 1024 * 1024);
3191			if (nr_delalloc == 0) {
3192				delalloc_start = delalloc_end + 1;
3193				continue;
3194			}
3195			ret = tree->ops->fill_delalloc(inode, page,
3196						       delalloc_start,
3197						       delalloc_end,
3198						       &page_started,
3199						       &nr_written);
3200			/* File system has been set read-only */
3201			if (ret) {
3202				SetPageError(page);
3203				goto done;
3204			}
3205			/*
3206			 * delalloc_end is already one less than the total
3207			 * length, so we don't subtract one from
3208			 * PAGE_CACHE_SIZE
3209			 */
3210			delalloc_to_write += (delalloc_end - delalloc_start +
3211					      PAGE_CACHE_SIZE) >>
3212					      PAGE_CACHE_SHIFT;
3213			delalloc_start = delalloc_end + 1;
3214		}
3215		if (wbc->nr_to_write < delalloc_to_write) {
3216			int thresh = 8192;
3217
3218			if (delalloc_to_write < thresh * 2)
3219				thresh = delalloc_to_write;
3220			wbc->nr_to_write = min_t(u64, delalloc_to_write,
3221						 thresh);
3222		}
3223
3224		/* did the fill delalloc function already unlock and start
3225		 * the IO?
3226		 */
3227		if (page_started) {
3228			ret = 0;
3229			/*
3230			 * we've unlocked the page, so we can't update
3231			 * the mapping's writeback index, just update
3232			 * nr_to_write.
3233			 */
3234			wbc->nr_to_write -= nr_written;
3235			goto done_unlocked;
3236		}
3237	}
3238	if (tree->ops && tree->ops->writepage_start_hook) {
3239		ret = tree->ops->writepage_start_hook(page, start,
3240						      page_end);
3241		if (ret) {
3242			/* Fixup worker will requeue */
3243			if (ret == -EBUSY)
3244				wbc->pages_skipped++;
3245			else
3246				redirty_page_for_writepage(wbc, page);
3247			update_nr_written(page, wbc, nr_written);
3248			unlock_page(page);
3249			ret = 0;
3250			goto done_unlocked;
3251		}
3252	}
3253
3254	/*
3255	 * we don't want to touch the inode after unlocking the page,
3256	 * so we update the mapping writeback index now
3257	 */
3258	update_nr_written(page, wbc, nr_written + 1);
3259
3260	end = page_end;
3261	if (last_byte <= start) {
3262		if (tree->ops && tree->ops->writepage_end_io_hook)
3263			tree->ops->writepage_end_io_hook(page, start,
3264							 page_end, NULL, 1);
3265		goto done;
3266	}
3267
3268	blocksize = inode->i_sb->s_blocksize;
3269
3270	while (cur <= end) {
3271		if (cur >= last_byte) {
3272			if (tree->ops && tree->ops->writepage_end_io_hook)
3273				tree->ops->writepage_end_io_hook(page, cur,
3274							 page_end, NULL, 1);
3275			break;
3276		}
3277		em = epd->get_extent(inode, page, pg_offset, cur,
3278				     end - cur + 1, 1);
3279		if (IS_ERR_OR_NULL(em)) {
3280			SetPageError(page);
3281			break;
3282		}
3283
3284		extent_offset = cur - em->start;
3285		BUG_ON(extent_map_end(em) <= cur);
3286		BUG_ON(end < cur);
3287		iosize = min(extent_map_end(em) - cur, end - cur + 1);
3288		iosize = ALIGN(iosize, blocksize);
3289		sector = (em->block_start + extent_offset) >> 9;
3290		bdev = em->bdev;
3291		block_start = em->block_start;
3292		compressed = test_bit(EXTENT_FLAG_COMPRESSED, &em->flags);
3293		free_extent_map(em);
3294		em = NULL;
3295
3296		/*
3297		 * compressed and inline extents are written through other
3298		 * paths in the FS
3299		 */
3300		if (compressed || block_start == EXTENT_MAP_HOLE ||
3301		    block_start == EXTENT_MAP_INLINE) {
3302			/*
3303			 * end_io notification does not happen here for
3304			 * compressed extents
3305			 */
3306			if (!compressed && tree->ops &&
3307			    tree->ops->writepage_end_io_hook)
3308				tree->ops->writepage_end_io_hook(page, cur,
3309							 cur + iosize - 1,
3310							 NULL, 1);
3311			else if (compressed) {
3312				/* we don't want to end_page_writeback on
3313				 * a compressed extent.  this happens
3314				 * elsewhere
3315				 */
3316				nr++;
3317			}
3318
3319			cur += iosize;
3320			pg_offset += iosize;
3321			continue;
3322		}
3323		/* leave this out until we have a page_mkwrite call */
3324		if (0 && !test_range_bit(tree, cur, cur + iosize - 1,
3325				   EXTENT_DIRTY, 0, NULL)) {
3326			cur = cur + iosize;
3327			pg_offset += iosize;
3328			continue;
3329		}
3330
3331		if (tree->ops && tree->ops->writepage_io_hook) {
3332			ret = tree->ops->writepage_io_hook(page, cur,
3333						cur + iosize - 1);
3334		} else {
3335			ret = 0;
3336		}
3337		if (ret) {
3338			SetPageError(page);
3339		} else {
3340			unsigned long max_nr = end_index + 1;
3341
3342			set_range_writeback(tree, cur, cur + iosize - 1);
3343			if (!PageWriteback(page)) {
3344				btrfs_err(BTRFS_I(inode)->root->fs_info,
3345					   "page %lu not writeback, cur %llu end %llu",
3346				       page->index, cur, end);
 
3347			}
3348
3349			ret = submit_extent_page(write_flags, tree, page,
3350						 sector, iosize, pg_offset,
3351						 bdev, &epd->bio, max_nr,
3352						 end_bio_extent_writepage,
3353						 0, 0, 0);
3354			if (ret)
3355				SetPageError(page);
3356		}
3357		cur = cur + iosize;
3358		pg_offset += iosize;
3359		nr++;
3360	}
3361done:
3362	if (nr == 0) {
3363		/* make sure the mapping tag for page dirty gets cleared */
3364		set_page_writeback(page);
3365		end_page_writeback(page);
3366	}
3367	unlock_page(page);
3368
3369done_unlocked:
3370
3371	/* drop our reference on any cached states */
3372	free_extent_state(cached_state);
3373	return 0;
3374}
3375
3376static int eb_wait(void *word)
3377{
3378	io_schedule();
3379	return 0;
3380}
3381
3382void wait_on_extent_buffer_writeback(struct extent_buffer *eb)
3383{
3384	wait_on_bit(&eb->bflags, EXTENT_BUFFER_WRITEBACK, eb_wait,
3385		    TASK_UNINTERRUPTIBLE);
3386}
3387
3388static int lock_extent_buffer_for_io(struct extent_buffer *eb,
3389				     struct btrfs_fs_info *fs_info,
3390				     struct extent_page_data *epd)
3391{
3392	unsigned long i, num_pages;
3393	int flush = 0;
3394	int ret = 0;
3395
3396	if (!btrfs_try_tree_write_lock(eb)) {
3397		flush = 1;
3398		flush_write_bio(epd);
3399		btrfs_tree_lock(eb);
3400	}
3401
3402	if (test_bit(EXTENT_BUFFER_WRITEBACK, &eb->bflags)) {
3403		btrfs_tree_unlock(eb);
3404		if (!epd->sync_io)
3405			return 0;
3406		if (!flush) {
3407			flush_write_bio(epd);
3408			flush = 1;
3409		}
3410		while (1) {
3411			wait_on_extent_buffer_writeback(eb);
3412			btrfs_tree_lock(eb);
3413			if (!test_bit(EXTENT_BUFFER_WRITEBACK, &eb->bflags))
3414				break;
3415			btrfs_tree_unlock(eb);
3416		}
3417	}
3418
3419	/*
3420	 * We need to do this to prevent races in people who check if the eb is
3421	 * under IO since we can end up having no IO bits set for a short period
3422	 * of time.
3423	 */
3424	spin_lock(&eb->refs_lock);
3425	if (test_and_clear_bit(EXTENT_BUFFER_DIRTY, &eb->bflags)) {
3426		set_bit(EXTENT_BUFFER_WRITEBACK, &eb->bflags);
3427		spin_unlock(&eb->refs_lock);
3428		btrfs_set_header_flag(eb, BTRFS_HEADER_FLAG_WRITTEN);
3429		__percpu_counter_add(&fs_info->dirty_metadata_bytes,
3430				     -eb->len,
3431				     fs_info->dirty_metadata_batch);
3432		ret = 1;
3433	} else {
3434		spin_unlock(&eb->refs_lock);
3435	}
3436
3437	btrfs_tree_unlock(eb);
3438
3439	if (!ret)
3440		return ret;
3441
3442	num_pages = num_extent_pages(eb->start, eb->len);
3443	for (i = 0; i < num_pages; i++) {
3444		struct page *p = extent_buffer_page(eb, i);
3445
3446		if (!trylock_page(p)) {
3447			if (!flush) {
3448				flush_write_bio(epd);
3449				flush = 1;
3450			}
3451			lock_page(p);
3452		}
3453	}
3454
3455	return ret;
3456}
3457
3458static void end_extent_buffer_writeback(struct extent_buffer *eb)
3459{
3460	clear_bit(EXTENT_BUFFER_WRITEBACK, &eb->bflags);
3461	smp_mb__after_clear_bit();
3462	wake_up_bit(&eb->bflags, EXTENT_BUFFER_WRITEBACK);
3463}
3464
3465static void end_bio_extent_buffer_writepage(struct bio *bio, int err)
3466{
3467	struct bio_vec *bvec;
3468	struct extent_buffer *eb;
3469	int i, done;
3470
3471	bio_for_each_segment_all(bvec, bio, i) {
3472		struct page *page = bvec->bv_page;
3473
3474		eb = (struct extent_buffer *)page->private;
3475		BUG_ON(!eb);
3476		done = atomic_dec_and_test(&eb->io_pages);
3477
3478		if (err || test_bit(EXTENT_BUFFER_IOERR, &eb->bflags)) {
3479			set_bit(EXTENT_BUFFER_IOERR, &eb->bflags);
3480			ClearPageUptodate(page);
3481			SetPageError(page);
3482		}
3483
3484		end_page_writeback(page);
3485
3486		if (!done)
3487			continue;
3488
3489		end_extent_buffer_writeback(eb);
3490	}
3491
3492	bio_put(bio);
3493}
3494
3495static int write_one_eb(struct extent_buffer *eb,
3496			struct btrfs_fs_info *fs_info,
3497			struct writeback_control *wbc,
3498			struct extent_page_data *epd)
3499{
3500	struct block_device *bdev = fs_info->fs_devices->latest_bdev;
3501	struct extent_io_tree *tree = &BTRFS_I(fs_info->btree_inode)->io_tree;
3502	u64 offset = eb->start;
3503	unsigned long i, num_pages;
3504	unsigned long bio_flags = 0;
3505	int rw = (epd->sync_io ? WRITE_SYNC : WRITE) | REQ_META;
3506	int ret = 0;
3507
3508	clear_bit(EXTENT_BUFFER_IOERR, &eb->bflags);
3509	num_pages = num_extent_pages(eb->start, eb->len);
3510	atomic_set(&eb->io_pages, num_pages);
3511	if (btrfs_header_owner(eb) == BTRFS_TREE_LOG_OBJECTID)
3512		bio_flags = EXTENT_BIO_TREE_LOG;
3513
3514	for (i = 0; i < num_pages; i++) {
3515		struct page *p = extent_buffer_page(eb, i);
3516
3517		clear_page_dirty_for_io(p);
3518		set_page_writeback(p);
3519		ret = submit_extent_page(rw, tree, p, offset >> 9,
3520					 PAGE_CACHE_SIZE, 0, bdev, &epd->bio,
3521					 -1, end_bio_extent_buffer_writepage,
3522					 0, epd->bio_flags, bio_flags);
3523		epd->bio_flags = bio_flags;
3524		if (ret) {
3525			set_bit(EXTENT_BUFFER_IOERR, &eb->bflags);
3526			SetPageError(p);
3527			if (atomic_sub_and_test(num_pages - i, &eb->io_pages))
3528				end_extent_buffer_writeback(eb);
3529			ret = -EIO;
3530			break;
3531		}
3532		offset += PAGE_CACHE_SIZE;
3533		update_nr_written(p, wbc, 1);
3534		unlock_page(p);
3535	}
3536
3537	if (unlikely(ret)) {
3538		for (; i < num_pages; i++) {
3539			struct page *p = extent_buffer_page(eb, i);
3540			unlock_page(p);
3541		}
3542	}
3543
3544	return ret;
3545}
3546
3547int btree_write_cache_pages(struct address_space *mapping,
3548				   struct writeback_control *wbc)
3549{
3550	struct extent_io_tree *tree = &BTRFS_I(mapping->host)->io_tree;
3551	struct btrfs_fs_info *fs_info = BTRFS_I(mapping->host)->root->fs_info;
3552	struct extent_buffer *eb, *prev_eb = NULL;
3553	struct extent_page_data epd = {
3554		.bio = NULL,
3555		.tree = tree,
3556		.extent_locked = 0,
3557		.sync_io = wbc->sync_mode == WB_SYNC_ALL,
3558		.bio_flags = 0,
3559	};
3560	int ret = 0;
3561	int done = 0;
3562	int nr_to_write_done = 0;
3563	struct pagevec pvec;
3564	int nr_pages;
3565	pgoff_t index;
3566	pgoff_t end;		/* Inclusive */
3567	int scanned = 0;
3568	int tag;
3569
3570	pagevec_init(&pvec, 0);
3571	if (wbc->range_cyclic) {
3572		index = mapping->writeback_index; /* Start from prev offset */
3573		end = -1;
3574	} else {
3575		index = wbc->range_start >> PAGE_CACHE_SHIFT;
3576		end = wbc->range_end >> PAGE_CACHE_SHIFT;
3577		scanned = 1;
3578	}
3579	if (wbc->sync_mode == WB_SYNC_ALL)
3580		tag = PAGECACHE_TAG_TOWRITE;
3581	else
3582		tag = PAGECACHE_TAG_DIRTY;
3583retry:
3584	if (wbc->sync_mode == WB_SYNC_ALL)
3585		tag_pages_for_writeback(mapping, index, end);
3586	while (!done && !nr_to_write_done && (index <= end) &&
3587	       (nr_pages = pagevec_lookup_tag(&pvec, mapping, &index, tag,
3588			min(end - index, (pgoff_t)PAGEVEC_SIZE-1) + 1))) {
3589		unsigned i;
3590
3591		scanned = 1;
3592		for (i = 0; i < nr_pages; i++) {
3593			struct page *page = pvec.pages[i];
3594
3595			if (!PagePrivate(page))
3596				continue;
3597
3598			if (!wbc->range_cyclic && page->index > end) {
3599				done = 1;
3600				break;
3601			}
3602
3603			spin_lock(&mapping->private_lock);
3604			if (!PagePrivate(page)) {
3605				spin_unlock(&mapping->private_lock);
3606				continue;
3607			}
3608
3609			eb = (struct extent_buffer *)page->private;
3610
3611			/*
3612			 * Shouldn't happen and normally this would be a BUG_ON
3613			 * but no sense in crashing the users box for something
3614			 * we can survive anyway.
3615			 */
3616			if (WARN_ON(!eb)) {
3617				spin_unlock(&mapping->private_lock);
3618				continue;
3619			}
3620
3621			if (eb == prev_eb) {
3622				spin_unlock(&mapping->private_lock);
3623				continue;
3624			}
3625
3626			ret = atomic_inc_not_zero(&eb->refs);
3627			spin_unlock(&mapping->private_lock);
3628			if (!ret)
3629				continue;
3630
3631			prev_eb = eb;
3632			ret = lock_extent_buffer_for_io(eb, fs_info, &epd);
3633			if (!ret) {
3634				free_extent_buffer(eb);
3635				continue;
3636			}
3637
3638			ret = write_one_eb(eb, fs_info, wbc, &epd);
3639			if (ret) {
3640				done = 1;
3641				free_extent_buffer(eb);
3642				break;
3643			}
3644			free_extent_buffer(eb);
3645
3646			/*
3647			 * the filesystem may choose to bump up nr_to_write.
3648			 * We have to make sure to honor the new nr_to_write
3649			 * at any time
3650			 */
3651			nr_to_write_done = wbc->nr_to_write <= 0;
3652		}
3653		pagevec_release(&pvec);
3654		cond_resched();
3655	}
3656	if (!scanned && !done) {
3657		/*
3658		 * We hit the last page and there is more work to be done: wrap
3659		 * back to the start of the file
3660		 */
3661		scanned = 1;
3662		index = 0;
3663		goto retry;
3664	}
3665	flush_write_bio(&epd);
3666	return ret;
3667}
3668
3669/**
3670 * write_cache_pages - walk the list of dirty pages of the given address space and write all of them.
3671 * @mapping: address space structure to write
3672 * @wbc: subtract the number of written pages from *@wbc->nr_to_write
3673 * @writepage: function called for each page
3674 * @data: data passed to writepage function
3675 *
3676 * If a page is already under I/O, write_cache_pages() skips it, even
3677 * if it's dirty.  This is desirable behaviour for memory-cleaning writeback,
3678 * but it is INCORRECT for data-integrity system calls such as fsync().  fsync()
3679 * and msync() need to guarantee that all the data which was dirty at the time
3680 * the call was made get new I/O started against them.  If wbc->sync_mode is
3681 * WB_SYNC_ALL then we were called for data integrity and we must wait for
3682 * existing IO to complete.
3683 */
3684static int extent_write_cache_pages(struct extent_io_tree *tree,
3685			     struct address_space *mapping,
3686			     struct writeback_control *wbc,
3687			     writepage_t writepage, void *data,
3688			     void (*flush_fn)(void *))
3689{
3690	struct inode *inode = mapping->host;
3691	int ret = 0;
3692	int done = 0;
3693	int nr_to_write_done = 0;
3694	struct pagevec pvec;
3695	int nr_pages;
3696	pgoff_t index;
3697	pgoff_t end;		/* Inclusive */
3698	int scanned = 0;
3699	int tag;
3700
3701	/*
3702	 * We have to hold onto the inode so that ordered extents can do their
3703	 * work when the IO finishes.  The alternative to this is failing to add
3704	 * an ordered extent if the igrab() fails there and that is a huge pain
3705	 * to deal with, so instead just hold onto the inode throughout the
3706	 * writepages operation.  If it fails here we are freeing up the inode
3707	 * anyway and we'd rather not waste our time writing out stuff that is
3708	 * going to be truncated anyway.
3709	 */
3710	if (!igrab(inode))
3711		return 0;
3712
3713	pagevec_init(&pvec, 0);
3714	if (wbc->range_cyclic) {
3715		index = mapping->writeback_index; /* Start from prev offset */
3716		end = -1;
3717	} else {
3718		index = wbc->range_start >> PAGE_CACHE_SHIFT;
3719		end = wbc->range_end >> PAGE_CACHE_SHIFT;
3720		scanned = 1;
3721	}
3722	if (wbc->sync_mode == WB_SYNC_ALL)
3723		tag = PAGECACHE_TAG_TOWRITE;
3724	else
3725		tag = PAGECACHE_TAG_DIRTY;
3726retry:
3727	if (wbc->sync_mode == WB_SYNC_ALL)
3728		tag_pages_for_writeback(mapping, index, end);
3729	while (!done && !nr_to_write_done && (index <= end) &&
3730	       (nr_pages = pagevec_lookup_tag(&pvec, mapping, &index, tag,
3731			min(end - index, (pgoff_t)PAGEVEC_SIZE-1) + 1))) {
3732		unsigned i;
3733
3734		scanned = 1;
3735		for (i = 0; i < nr_pages; i++) {
3736			struct page *page = pvec.pages[i];
3737
3738			/*
3739			 * At this point we hold neither mapping->tree_lock nor
3740			 * lock on the page itself: the page may be truncated or
3741			 * invalidated (changing page->mapping to NULL), or even
3742			 * swizzled back from swapper_space to tmpfs file
3743			 * mapping
3744			 */
3745			if (!trylock_page(page)) {
3746				flush_fn(data);
 
3747				lock_page(page);
3748			}
3749
3750			if (unlikely(page->mapping != mapping)) {
3751				unlock_page(page);
3752				continue;
3753			}
3754
3755			if (!wbc->range_cyclic && page->index > end) {
3756				done = 1;
3757				unlock_page(page);
3758				continue;
3759			}
3760
3761			if (wbc->sync_mode != WB_SYNC_NONE) {
3762				if (PageWriteback(page))
3763					flush_fn(data);
3764				wait_on_page_writeback(page);
3765			}
3766
3767			if (PageWriteback(page) ||
3768			    !clear_page_dirty_for_io(page)) {
3769				unlock_page(page);
3770				continue;
3771			}
3772
3773			ret = (*writepage)(page, wbc, data);
3774
3775			if (unlikely(ret == AOP_WRITEPAGE_ACTIVATE)) {
3776				unlock_page(page);
3777				ret = 0;
3778			}
3779			if (ret)
3780				done = 1;
3781
3782			/*
3783			 * the filesystem may choose to bump up nr_to_write.
3784			 * We have to make sure to honor the new nr_to_write
3785			 * at any time
3786			 */
3787			nr_to_write_done = wbc->nr_to_write <= 0;
3788		}
3789		pagevec_release(&pvec);
3790		cond_resched();
3791	}
3792	if (!scanned && !done) {
3793		/*
3794		 * We hit the last page and there is more work to be done: wrap
3795		 * back to the start of the file
3796		 */
3797		scanned = 1;
3798		index = 0;
3799		goto retry;
3800	}
3801	btrfs_add_delayed_iput(inode);
3802	return ret;
3803}
3804
3805static void flush_epd_write_bio(struct extent_page_data *epd)
3806{
3807	if (epd->bio) {
3808		int rw = WRITE;
3809		int ret;
3810
3811		if (epd->sync_io)
3812			rw = WRITE_SYNC;
3813
3814		ret = submit_one_bio(rw, epd->bio, 0, epd->bio_flags);
3815		BUG_ON(ret < 0); /* -ENOMEM */
3816		epd->bio = NULL;
3817	}
3818}
3819
3820static noinline void flush_write_bio(void *data)
3821{
3822	struct extent_page_data *epd = data;
3823	flush_epd_write_bio(epd);
3824}
3825
3826int extent_write_full_page(struct extent_io_tree *tree, struct page *page,
3827			  get_extent_t *get_extent,
3828			  struct writeback_control *wbc)
3829{
3830	int ret;
3831	struct extent_page_data epd = {
3832		.bio = NULL,
3833		.tree = tree,
3834		.get_extent = get_extent,
3835		.extent_locked = 0,
3836		.sync_io = wbc->sync_mode == WB_SYNC_ALL,
3837		.bio_flags = 0,
3838	};
3839
3840	ret = __extent_writepage(page, wbc, &epd);
3841
3842	flush_epd_write_bio(&epd);
3843	return ret;
3844}
3845
3846int extent_write_locked_range(struct extent_io_tree *tree, struct inode *inode,
3847			      u64 start, u64 end, get_extent_t *get_extent,
3848			      int mode)
3849{
3850	int ret = 0;
3851	struct address_space *mapping = inode->i_mapping;
3852	struct page *page;
3853	unsigned long nr_pages = (end - start + PAGE_CACHE_SIZE) >>
3854		PAGE_CACHE_SHIFT;
3855
3856	struct extent_page_data epd = {
3857		.bio = NULL,
3858		.tree = tree,
3859		.get_extent = get_extent,
3860		.extent_locked = 1,
3861		.sync_io = mode == WB_SYNC_ALL,
3862		.bio_flags = 0,
3863	};
3864	struct writeback_control wbc_writepages = {
3865		.sync_mode	= mode,
3866		.nr_to_write	= nr_pages * 2,
3867		.range_start	= start,
3868		.range_end	= end + 1,
3869	};
3870
3871	while (start <= end) {
3872		page = find_get_page(mapping, start >> PAGE_CACHE_SHIFT);
3873		if (clear_page_dirty_for_io(page))
3874			ret = __extent_writepage(page, &wbc_writepages, &epd);
3875		else {
3876			if (tree->ops && tree->ops->writepage_end_io_hook)
3877				tree->ops->writepage_end_io_hook(page, start,
3878						 start + PAGE_CACHE_SIZE - 1,
3879						 NULL, 1);
3880			unlock_page(page);
3881		}
3882		page_cache_release(page);
3883		start += PAGE_CACHE_SIZE;
3884	}
3885
3886	flush_epd_write_bio(&epd);
3887	return ret;
3888}
3889
3890int extent_writepages(struct extent_io_tree *tree,
3891		      struct address_space *mapping,
3892		      get_extent_t *get_extent,
3893		      struct writeback_control *wbc)
3894{
3895	int ret = 0;
3896	struct extent_page_data epd = {
3897		.bio = NULL,
3898		.tree = tree,
3899		.get_extent = get_extent,
3900		.extent_locked = 0,
3901		.sync_io = wbc->sync_mode == WB_SYNC_ALL,
3902		.bio_flags = 0,
3903	};
3904
3905	ret = extent_write_cache_pages(tree, mapping, wbc,
3906				       __extent_writepage, &epd,
3907				       flush_write_bio);
3908	flush_epd_write_bio(&epd);
3909	return ret;
3910}
3911
3912int extent_readpages(struct extent_io_tree *tree,
3913		     struct address_space *mapping,
3914		     struct list_head *pages, unsigned nr_pages,
3915		     get_extent_t get_extent)
3916{
3917	struct bio *bio = NULL;
3918	unsigned page_idx;
3919	unsigned long bio_flags = 0;
3920	struct page *pagepool[16];
3921	struct page *page;
3922	struct extent_map *em_cached = NULL;
3923	int nr = 0;
3924
3925	for (page_idx = 0; page_idx < nr_pages; page_idx++) {
3926		page = list_entry(pages->prev, struct page, lru);
3927
3928		prefetchw(&page->flags);
3929		list_del(&page->lru);
3930		if (add_to_page_cache_lru(page, mapping,
3931					page->index, GFP_NOFS)) {
3932			page_cache_release(page);
3933			continue;
3934		}
3935
3936		pagepool[nr++] = page;
3937		if (nr < ARRAY_SIZE(pagepool))
3938			continue;
3939		__extent_readpages(tree, pagepool, nr, get_extent, &em_cached,
3940				   &bio, 0, &bio_flags, READ);
3941		nr = 0;
3942	}
3943	if (nr)
3944		__extent_readpages(tree, pagepool, nr, get_extent, &em_cached,
3945				   &bio, 0, &bio_flags, READ);
3946
3947	if (em_cached)
3948		free_extent_map(em_cached);
3949
3950	BUG_ON(!list_empty(pages));
3951	if (bio)
3952		return submit_one_bio(READ, bio, 0, bio_flags);
3953	return 0;
3954}
3955
3956/*
3957 * basic invalidatepage code, this waits on any locked or writeback
3958 * ranges corresponding to the page, and then deletes any extent state
3959 * records from the tree
3960 */
3961int extent_invalidatepage(struct extent_io_tree *tree,
3962			  struct page *page, unsigned long offset)
3963{
3964	struct extent_state *cached_state = NULL;
3965	u64 start = page_offset(page);
3966	u64 end = start + PAGE_CACHE_SIZE - 1;
3967	size_t blocksize = page->mapping->host->i_sb->s_blocksize;
3968
3969	start += ALIGN(offset, blocksize);
3970	if (start > end)
3971		return 0;
3972
3973	lock_extent_bits(tree, start, end, 0, &cached_state);
3974	wait_on_page_writeback(page);
3975	clear_extent_bit(tree, start, end,
3976			 EXTENT_LOCKED | EXTENT_DIRTY | EXTENT_DELALLOC |
3977			 EXTENT_DO_ACCOUNTING,
3978			 1, 1, &cached_state, GFP_NOFS);
3979	return 0;
3980}
3981
3982/*
3983 * a helper for releasepage, this tests for areas of the page that
3984 * are locked or under IO and drops the related state bits if it is safe
3985 * to drop the page.
3986 */
3987static int try_release_extent_state(struct extent_map_tree *map,
3988				    struct extent_io_tree *tree,
3989				    struct page *page, gfp_t mask)
3990{
3991	u64 start = page_offset(page);
3992	u64 end = start + PAGE_CACHE_SIZE - 1;
3993	int ret = 1;
3994
3995	if (test_range_bit(tree, start, end,
3996			   EXTENT_IOBITS, 0, NULL))
3997		ret = 0;
3998	else {
3999		if ((mask & GFP_NOFS) == GFP_NOFS)
4000			mask = GFP_NOFS;
4001		/*
4002		 * at this point we can safely clear everything except the
4003		 * locked bit and the nodatasum bit
4004		 */
4005		ret = clear_extent_bit(tree, start, end,
4006				 ~(EXTENT_LOCKED | EXTENT_NODATASUM),
4007				 0, 0, NULL, mask);
4008
4009		/* if clear_extent_bit failed for enomem reasons,
4010		 * we can't allow the release to continue.
4011		 */
4012		if (ret < 0)
4013			ret = 0;
4014		else
4015			ret = 1;
4016	}
4017	return ret;
4018}
4019
4020/*
4021 * a helper for releasepage.  As long as there are no locked extents
4022 * in the range corresponding to the page, both state records and extent
4023 * map records are removed
4024 */
4025int try_release_extent_mapping(struct extent_map_tree *map,
4026			       struct extent_io_tree *tree, struct page *page,
4027			       gfp_t mask)
4028{
4029	struct extent_map *em;
4030	u64 start = page_offset(page);
4031	u64 end = start + PAGE_CACHE_SIZE - 1;
4032
4033	if ((mask & __GFP_WAIT) &&
4034	    page->mapping->host->i_size > 16 * 1024 * 1024) {
4035		u64 len;
4036		while (start <= end) {
4037			len = end - start + 1;
4038			write_lock(&map->lock);
4039			em = lookup_extent_mapping(map, start, len);
4040			if (!em) {
4041				write_unlock(&map->lock);
4042				break;
4043			}
4044			if (test_bit(EXTENT_FLAG_PINNED, &em->flags) ||
4045			    em->start != start) {
4046				write_unlock(&map->lock);
4047				free_extent_map(em);
4048				break;
4049			}
4050			if (!test_range_bit(tree, em->start,
4051					    extent_map_end(em) - 1,
4052					    EXTENT_LOCKED | EXTENT_WRITEBACK,
4053					    0, NULL)) {
4054				remove_extent_mapping(map, em);
4055				/* once for the rb tree */
4056				free_extent_map(em);
4057			}
4058			start = extent_map_end(em);
4059			write_unlock(&map->lock);
4060
4061			/* once for us */
4062			free_extent_map(em);
4063		}
4064	}
4065	return try_release_extent_state(map, tree, page, mask);
4066}
4067
4068/*
4069 * helper function for fiemap, which doesn't want to see any holes.
4070 * This maps until we find something past 'last'
4071 */
4072static struct extent_map *get_extent_skip_holes(struct inode *inode,
4073						u64 offset,
4074						u64 last,
4075						get_extent_t *get_extent)
4076{
4077	u64 sectorsize = BTRFS_I(inode)->root->sectorsize;
4078	struct extent_map *em;
4079	u64 len;
4080
4081	if (offset >= last)
4082		return NULL;
4083
4084	while (1) {
4085		len = last - offset;
4086		if (len == 0)
4087			break;
4088		len = ALIGN(len, sectorsize);
4089		em = get_extent(inode, NULL, 0, offset, len, 0);
4090		if (IS_ERR_OR_NULL(em))
4091			return em;
4092
4093		/* if this isn't a hole return it */
4094		if (!test_bit(EXTENT_FLAG_VACANCY, &em->flags) &&
4095		    em->block_start != EXTENT_MAP_HOLE) {
4096			return em;
4097		}
4098
4099		/* this is a hole, advance to the next extent */
4100		offset = extent_map_end(em);
4101		free_extent_map(em);
4102		if (offset >= last)
4103			break;
4104	}
4105	return NULL;
4106}
4107
4108static noinline int count_ext_ref(u64 inum, u64 offset, u64 root_id, void *ctx)
4109{
4110	unsigned long cnt = *((unsigned long *)ctx);
4111
4112	cnt++;
4113	*((unsigned long *)ctx) = cnt;
4114
4115	/* Now we're sure that the extent is shared. */
4116	if (cnt > 1)
4117		return 1;
4118	return 0;
4119}
4120
4121int extent_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
4122		__u64 start, __u64 len, get_extent_t *get_extent)
4123{
4124	int ret = 0;
4125	u64 off = start;
4126	u64 max = start + len;
4127	u32 flags = 0;
4128	u32 found_type;
4129	u64 last;
4130	u64 last_for_get_extent = 0;
4131	u64 disko = 0;
4132	u64 isize = i_size_read(inode);
4133	struct btrfs_key found_key;
4134	struct extent_map *em = NULL;
4135	struct extent_state *cached_state = NULL;
4136	struct btrfs_path *path;
 
4137	int end = 0;
4138	u64 em_start = 0;
4139	u64 em_len = 0;
4140	u64 em_end = 0;
 
4141
4142	if (len == 0)
4143		return -EINVAL;
4144
4145	path = btrfs_alloc_path();
4146	if (!path)
4147		return -ENOMEM;
4148	path->leave_spinning = 1;
4149
4150	start = ALIGN(start, BTRFS_I(inode)->root->sectorsize);
4151	len = ALIGN(len, BTRFS_I(inode)->root->sectorsize);
4152
4153	/*
4154	 * lookup the last file extent.  We're not using i_size here
4155	 * because there might be preallocation past i_size
4156	 */
4157	ret = btrfs_lookup_file_extent(NULL, BTRFS_I(inode)->root,
4158				       path, btrfs_ino(inode), -1, 0);
4159	if (ret < 0) {
4160		btrfs_free_path(path);
4161		return ret;
4162	}
4163	WARN_ON(!ret);
4164	path->slots[0]--;
 
 
4165	btrfs_item_key_to_cpu(path->nodes[0], &found_key, path->slots[0]);
4166	found_type = btrfs_key_type(&found_key);
4167
4168	/* No extents, but there might be delalloc bits */
4169	if (found_key.objectid != btrfs_ino(inode) ||
4170	    found_type != BTRFS_EXTENT_DATA_KEY) {
4171		/* have to trust i_size as the end */
4172		last = (u64)-1;
4173		last_for_get_extent = isize;
4174	} else {
4175		/*
4176		 * remember the start of the last extent.  There are a
4177		 * bunch of different factors that go into the length of the
4178		 * extent, so its much less complex to remember where it started
4179		 */
4180		last = found_key.offset;
4181		last_for_get_extent = last + 1;
4182	}
4183	btrfs_release_path(path);
4184
4185	/*
4186	 * we might have some extents allocated but more delalloc past those
4187	 * extents.  so, we trust isize unless the start of the last extent is
4188	 * beyond isize
4189	 */
4190	if (last < isize) {
4191		last = (u64)-1;
4192		last_for_get_extent = isize;
4193	}
4194
4195	lock_extent_bits(&BTRFS_I(inode)->io_tree, start, start + len - 1, 0,
4196			 &cached_state);
4197
4198	em = get_extent_skip_holes(inode, start, last_for_get_extent,
4199				   get_extent);
4200	if (!em)
4201		goto out;
4202	if (IS_ERR(em)) {
4203		ret = PTR_ERR(em);
4204		goto out;
4205	}
4206
4207	while (!end) {
4208		u64 offset_in_extent = 0;
4209
4210		/* break if the extent we found is outside the range */
4211		if (em->start >= max || extent_map_end(em) < off)
4212			break;
4213
4214		/*
4215		 * get_extent may return an extent that starts before our
4216		 * requested range.  We have to make sure the ranges
4217		 * we return to fiemap always move forward and don't
4218		 * overlap, so adjust the offsets here
4219		 */
4220		em_start = max(em->start, off);
4221
4222		/*
4223		 * record the offset from the start of the extent
4224		 * for adjusting the disk offset below.  Only do this if the
4225		 * extent isn't compressed since our in ram offset may be past
4226		 * what we have actually allocated on disk.
4227		 */
4228		if (!test_bit(EXTENT_FLAG_COMPRESSED, &em->flags))
4229			offset_in_extent = em_start - em->start;
4230		em_end = extent_map_end(em);
4231		em_len = em_end - em_start;
 
4232		disko = 0;
4233		flags = 0;
4234
4235		/*
4236		 * bump off for our next call to get_extent
4237		 */
4238		off = extent_map_end(em);
4239		if (off >= max)
4240			end = 1;
4241
4242		if (em->block_start == EXTENT_MAP_LAST_BYTE) {
4243			end = 1;
4244			flags |= FIEMAP_EXTENT_LAST;
4245		} else if (em->block_start == EXTENT_MAP_INLINE) {
4246			flags |= (FIEMAP_EXTENT_DATA_INLINE |
4247				  FIEMAP_EXTENT_NOT_ALIGNED);
4248		} else if (em->block_start == EXTENT_MAP_DELALLOC) {
4249			flags |= (FIEMAP_EXTENT_DELALLOC |
4250				  FIEMAP_EXTENT_UNKNOWN);
4251		} else {
4252			unsigned long ref_cnt = 0;
4253
4254			disko = em->block_start + offset_in_extent;
4255
4256			/*
4257			 * As btrfs supports shared space, this information
4258			 * can be exported to userspace tools via
4259			 * flag FIEMAP_EXTENT_SHARED.
4260			 */
4261			ret = iterate_inodes_from_logical(
4262					em->block_start,
4263					BTRFS_I(inode)->root->fs_info,
4264					path, count_ext_ref, &ref_cnt);
4265			if (ret < 0 && ret != -ENOENT)
4266				goto out_free;
4267
4268			if (ref_cnt > 1)
4269				flags |= FIEMAP_EXTENT_SHARED;
4270		}
4271		if (test_bit(EXTENT_FLAG_COMPRESSED, &em->flags))
4272			flags |= FIEMAP_EXTENT_ENCODED;
4273
4274		free_extent_map(em);
4275		em = NULL;
4276		if ((em_start >= last) || em_len == (u64)-1 ||
4277		   (last == (u64)-1 && isize <= em_end)) {
4278			flags |= FIEMAP_EXTENT_LAST;
4279			end = 1;
4280		}
4281
4282		/* now scan forward to see if this is really the last extent. */
4283		em = get_extent_skip_holes(inode, off, last_for_get_extent,
4284					   get_extent);
4285		if (IS_ERR(em)) {
4286			ret = PTR_ERR(em);
4287			goto out;
4288		}
4289		if (!em) {
4290			flags |= FIEMAP_EXTENT_LAST;
4291			end = 1;
4292		}
4293		ret = fiemap_fill_next_extent(fieinfo, em_start, disko,
4294					      em_len, flags);
4295		if (ret)
4296			goto out_free;
4297	}
4298out_free:
4299	free_extent_map(em);
4300out:
4301	btrfs_free_path(path);
4302	unlock_extent_cached(&BTRFS_I(inode)->io_tree, start, start + len - 1,
4303			     &cached_state, GFP_NOFS);
4304	return ret;
4305}
4306
4307static void __free_extent_buffer(struct extent_buffer *eb)
 
4308{
4309	btrfs_leak_debug_del(&eb->leak_list);
4310	kmem_cache_free(extent_buffer_cache, eb);
4311}
4312
4313int extent_buffer_under_io(struct extent_buffer *eb)
4314{
4315	return (atomic_read(&eb->io_pages) ||
4316		test_bit(EXTENT_BUFFER_WRITEBACK, &eb->bflags) ||
4317		test_bit(EXTENT_BUFFER_DIRTY, &eb->bflags));
4318}
4319
4320/*
4321 * Helper for releasing extent buffer page.
4322 */
4323static void btrfs_release_extent_buffer_page(struct extent_buffer *eb,
4324						unsigned long start_idx)
4325{
4326	unsigned long index;
4327	unsigned long num_pages;
4328	struct page *page;
4329	int mapped = !test_bit(EXTENT_BUFFER_DUMMY, &eb->bflags);
4330
4331	BUG_ON(extent_buffer_under_io(eb));
4332
4333	num_pages = num_extent_pages(eb->start, eb->len);
4334	index = start_idx + num_pages;
4335	if (start_idx >= index)
4336		return;
4337
4338	do {
4339		index--;
4340		page = extent_buffer_page(eb, index);
4341		if (page && mapped) {
4342			spin_lock(&page->mapping->private_lock);
4343			/*
4344			 * We do this since we'll remove the pages after we've
4345			 * removed the eb from the radix tree, so we could race
4346			 * and have this page now attached to the new eb.  So
4347			 * only clear page_private if it's still connected to
4348			 * this eb.
4349			 */
4350			if (PagePrivate(page) &&
4351			    page->private == (unsigned long)eb) {
4352				BUG_ON(test_bit(EXTENT_BUFFER_DIRTY, &eb->bflags));
4353				BUG_ON(PageDirty(page));
4354				BUG_ON(PageWriteback(page));
4355				/*
4356				 * We need to make sure we haven't be attached
4357				 * to a new eb.
4358				 */
4359				ClearPagePrivate(page);
4360				set_page_private(page, 0);
4361				/* One for the page private */
4362				page_cache_release(page);
4363			}
4364			spin_unlock(&page->mapping->private_lock);
4365
4366		}
4367		if (page) {
4368			/* One for when we alloced the page */
4369			page_cache_release(page);
4370		}
4371	} while (index != start_idx);
4372}
4373
4374/*
4375 * Helper for releasing the extent buffer.
4376 */
4377static inline void btrfs_release_extent_buffer(struct extent_buffer *eb)
4378{
4379	btrfs_release_extent_buffer_page(eb, 0);
4380	__free_extent_buffer(eb);
4381}
4382
4383static struct extent_buffer *
4384__alloc_extent_buffer(struct btrfs_fs_info *fs_info, u64 start,
4385		      unsigned long len, gfp_t mask)
 
4386{
4387	struct extent_buffer *eb = NULL;
 
 
 
4388
4389	eb = kmem_cache_zalloc(extent_buffer_cache, mask);
4390	if (eb == NULL)
4391		return NULL;
4392	eb->start = start;
4393	eb->len = len;
4394	eb->fs_info = fs_info;
4395	eb->bflags = 0;
4396	rwlock_init(&eb->lock);
4397	atomic_set(&eb->write_locks, 0);
4398	atomic_set(&eb->read_locks, 0);
4399	atomic_set(&eb->blocking_readers, 0);
4400	atomic_set(&eb->blocking_writers, 0);
4401	atomic_set(&eb->spinning_readers, 0);
4402	atomic_set(&eb->spinning_writers, 0);
4403	eb->lock_nested = 0;
4404	init_waitqueue_head(&eb->write_lock_wq);
4405	init_waitqueue_head(&eb->read_lock_wq);
4406
4407	btrfs_leak_debug_add(&eb->leak_list, &buffers);
4408
4409	spin_lock_init(&eb->refs_lock);
 
 
4410	atomic_set(&eb->refs, 1);
4411	atomic_set(&eb->io_pages, 0);
4412
4413	/*
4414	 * Sanity checks, currently the maximum is 64k covered by 16x 4k pages
4415	 */
4416	BUILD_BUG_ON(BTRFS_MAX_METADATA_BLOCKSIZE
4417		> MAX_INLINE_EXTENT_BUFFER_SIZE);
4418	BUG_ON(len > MAX_INLINE_EXTENT_BUFFER_SIZE);
4419
4420	return eb;
4421}
4422
4423struct extent_buffer *btrfs_clone_extent_buffer(struct extent_buffer *src)
4424{
4425	unsigned long i;
4426	struct page *p;
4427	struct extent_buffer *new;
4428	unsigned long num_pages = num_extent_pages(src->start, src->len);
4429
4430	new = __alloc_extent_buffer(NULL, src->start, src->len, GFP_NOFS);
4431	if (new == NULL)
4432		return NULL;
4433
4434	for (i = 0; i < num_pages; i++) {
4435		p = alloc_page(GFP_NOFS);
4436		if (!p) {
4437			btrfs_release_extent_buffer(new);
4438			return NULL;
4439		}
4440		attach_extent_buffer_page(new, p);
4441		WARN_ON(PageDirty(p));
4442		SetPageUptodate(p);
4443		new->pages[i] = p;
4444	}
4445
4446	copy_extent_buffer(new, src, 0, 0, src->len);
4447	set_bit(EXTENT_BUFFER_UPTODATE, &new->bflags);
4448	set_bit(EXTENT_BUFFER_DUMMY, &new->bflags);
4449
4450	return new;
4451}
4452
4453struct extent_buffer *alloc_dummy_extent_buffer(u64 start, unsigned long len)
 
 
 
 
4454{
4455	struct extent_buffer *eb;
4456	unsigned long num_pages = num_extent_pages(0, len);
4457	unsigned long i;
4458
4459	eb = __alloc_extent_buffer(NULL, start, len, GFP_NOFS);
4460	if (!eb)
4461		return NULL;
4462
4463	for (i = 0; i < num_pages; i++) {
4464		eb->pages[i] = alloc_page(GFP_NOFS);
4465		if (!eb->pages[i])
4466			goto err;
4467	}
4468	set_extent_buffer_uptodate(eb);
4469	btrfs_set_header_nritems(eb, 0);
4470	set_bit(EXTENT_BUFFER_DUMMY, &eb->bflags);
4471
4472	return eb;
4473err:
4474	for (; i > 0; i--)
4475		__free_page(eb->pages[i - 1]);
4476	__free_extent_buffer(eb);
4477	return NULL;
4478}
4479
4480static void check_buffer_tree_ref(struct extent_buffer *eb)
4481{
4482	int refs;
4483	/* the ref bit is tricky.  We have to make sure it is set
4484	 * if we have the buffer dirty.   Otherwise the
4485	 * code to free a buffer can end up dropping a dirty
4486	 * page
4487	 *
4488	 * Once the ref bit is set, it won't go away while the
4489	 * buffer is dirty or in writeback, and it also won't
4490	 * go away while we have the reference count on the
4491	 * eb bumped.
4492	 *
4493	 * We can't just set the ref bit without bumping the
4494	 * ref on the eb because free_extent_buffer might
4495	 * see the ref bit and try to clear it.  If this happens
4496	 * free_extent_buffer might end up dropping our original
4497	 * ref by mistake and freeing the page before we are able
4498	 * to add one more ref.
4499	 *
4500	 * So bump the ref count first, then set the bit.  If someone
4501	 * beat us to it, drop the ref we added.
4502	 */
4503	refs = atomic_read(&eb->refs);
4504	if (refs >= 2 && test_bit(EXTENT_BUFFER_TREE_REF, &eb->bflags))
4505		return;
4506
4507	spin_lock(&eb->refs_lock);
4508	if (!test_and_set_bit(EXTENT_BUFFER_TREE_REF, &eb->bflags))
4509		atomic_inc(&eb->refs);
4510	spin_unlock(&eb->refs_lock);
 
 
4511}
4512
4513static void mark_extent_buffer_accessed(struct extent_buffer *eb)
 
 
 
4514{
4515	unsigned long num_pages, i;
4516
4517	check_buffer_tree_ref(eb);
4518
4519	num_pages = num_extent_pages(eb->start, eb->len);
4520	for (i = 0; i < num_pages; i++) {
4521		struct page *p = extent_buffer_page(eb, i);
4522		mark_page_accessed(p);
4523	}
4524}
4525
4526struct extent_buffer *find_extent_buffer(struct btrfs_fs_info *fs_info,
4527					 u64 start)
4528{
4529	struct extent_buffer *eb;
4530
4531	rcu_read_lock();
4532	eb = radix_tree_lookup(&fs_info->buffer_radix,
4533			       start >> PAGE_CACHE_SHIFT);
4534	if (eb && atomic_inc_not_zero(&eb->refs)) {
4535		rcu_read_unlock();
4536		mark_extent_buffer_accessed(eb);
4537		return eb;
4538	}
4539	rcu_read_unlock();
4540
4541	return NULL;
4542}
4543
4544struct extent_buffer *alloc_extent_buffer(struct btrfs_fs_info *fs_info,
4545					  u64 start, unsigned long len)
4546{
4547	unsigned long num_pages = num_extent_pages(start, len);
4548	unsigned long i;
4549	unsigned long index = start >> PAGE_CACHE_SHIFT;
4550	struct extent_buffer *eb;
4551	struct extent_buffer *exists = NULL;
4552	struct page *p;
4553	struct address_space *mapping = fs_info->btree_inode->i_mapping;
4554	int uptodate = 1;
4555	int ret;
4556
4557	eb = find_extent_buffer(fs_info, start);
4558	if (eb)
 
 
 
4559		return eb;
 
 
4560
4561	eb = __alloc_extent_buffer(fs_info, start, len, GFP_NOFS);
4562	if (!eb)
4563		return NULL;
4564
4565	for (i = 0; i < num_pages; i++, index++) {
 
 
 
 
 
 
 
 
 
 
 
 
4566		p = find_or_create_page(mapping, index, GFP_NOFS);
4567		if (!p)
 
4568			goto free_eb;
4569
4570		spin_lock(&mapping->private_lock);
4571		if (PagePrivate(p)) {
4572			/*
4573			 * We could have already allocated an eb for this page
4574			 * and attached one so lets see if we can get a ref on
4575			 * the existing eb, and if we can we know it's good and
4576			 * we can just return that one, else we know we can just
4577			 * overwrite page->private.
4578			 */
4579			exists = (struct extent_buffer *)p->private;
4580			if (atomic_inc_not_zero(&exists->refs)) {
4581				spin_unlock(&mapping->private_lock);
4582				unlock_page(p);
4583				page_cache_release(p);
4584				mark_extent_buffer_accessed(exists);
4585				goto free_eb;
4586			}
4587
4588			/*
4589			 * Do this so attach doesn't complain and we need to
4590			 * drop the ref the old guy had.
4591			 */
4592			ClearPagePrivate(p);
4593			WARN_ON(PageDirty(p));
4594			page_cache_release(p);
4595		}
4596		attach_extent_buffer_page(eb, p);
4597		spin_unlock(&mapping->private_lock);
4598		WARN_ON(PageDirty(p));
4599		mark_page_accessed(p);
4600		eb->pages[i] = p;
 
 
 
 
 
4601		if (!PageUptodate(p))
4602			uptodate = 0;
4603
4604		/*
4605		 * see below about how we avoid a nasty race with release page
4606		 * and why we unlock later
4607		 */
 
 
4608	}
4609	if (uptodate)
4610		set_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags);
4611again:
4612	ret = radix_tree_preload(GFP_NOFS & ~__GFP_HIGHMEM);
4613	if (ret)
4614		goto free_eb;
4615
4616	spin_lock(&fs_info->buffer_lock);
4617	ret = radix_tree_insert(&fs_info->buffer_radix,
4618				start >> PAGE_CACHE_SHIFT, eb);
4619	spin_unlock(&fs_info->buffer_lock);
4620	radix_tree_preload_end();
4621	if (ret == -EEXIST) {
4622		exists = find_extent_buffer(fs_info, start);
4623		if (exists)
4624			goto free_eb;
4625		else
4626			goto again;
 
 
4627	}
4628	/* add one reference for the tree */
4629	check_buffer_tree_ref(eb);
4630	set_bit(EXTENT_BUFFER_IN_TREE, &eb->bflags);
 
4631
4632	/*
4633	 * there is a race where release page may have
4634	 * tried to find this extent buffer in the radix
4635	 * but failed.  It will tell the VM it is safe to
4636	 * reclaim the, and it will clear the page private bit.
4637	 * We must make sure to set the page private bit properly
4638	 * after the extent buffer is in the radix tree so
4639	 * it doesn't get lost
4640	 */
4641	SetPageChecked(eb->pages[0]);
4642	for (i = 1; i < num_pages; i++) {
4643		p = extent_buffer_page(eb, i);
4644		ClearPageChecked(p);
4645		unlock_page(p);
4646	}
4647	unlock_page(eb->pages[0]);
4648	return eb;
4649
4650free_eb:
4651	for (i = 0; i < num_pages; i++) {
4652		if (eb->pages[i])
4653			unlock_page(eb->pages[i]);
4654	}
4655
4656	WARN_ON(!atomic_dec_and_test(&eb->refs));
 
4657	btrfs_release_extent_buffer(eb);
4658	return exists;
4659}
4660
4661static inline void btrfs_release_extent_buffer_rcu(struct rcu_head *head)
4662{
4663	struct extent_buffer *eb =
4664			container_of(head, struct extent_buffer, rcu_head);
4665
4666	__free_extent_buffer(eb);
4667}
4668
4669/* Expects to have eb->eb_lock already held */
4670static int release_extent_buffer(struct extent_buffer *eb)
4671{
4672	WARN_ON(atomic_read(&eb->refs) == 0);
4673	if (atomic_dec_and_test(&eb->refs)) {
4674		if (test_and_clear_bit(EXTENT_BUFFER_IN_TREE, &eb->bflags)) {
4675			struct btrfs_fs_info *fs_info = eb->fs_info;
4676
4677			spin_unlock(&eb->refs_lock);
4678
4679			spin_lock(&fs_info->buffer_lock);
4680			radix_tree_delete(&fs_info->buffer_radix,
4681					  eb->start >> PAGE_CACHE_SHIFT);
4682			spin_unlock(&fs_info->buffer_lock);
4683		} else {
4684			spin_unlock(&eb->refs_lock);
4685		}
4686
4687		/* Should be safe to release our pages at this point */
4688		btrfs_release_extent_buffer_page(eb, 0);
4689		call_rcu(&eb->rcu_head, btrfs_release_extent_buffer_rcu);
4690		return 1;
 
 
4691	}
4692	spin_unlock(&eb->refs_lock);
4693
4694	return 0;
4695}
4696
4697void free_extent_buffer(struct extent_buffer *eb)
4698{
4699	int refs;
4700	int old;
4701	if (!eb)
4702		return;
4703
4704	while (1) {
4705		refs = atomic_read(&eb->refs);
4706		if (refs <= 3)
4707			break;
4708		old = atomic_cmpxchg(&eb->refs, refs, refs - 1);
4709		if (old == refs)
4710			return;
4711	}
4712
4713	spin_lock(&eb->refs_lock);
4714	if (atomic_read(&eb->refs) == 2 &&
4715	    test_bit(EXTENT_BUFFER_DUMMY, &eb->bflags))
4716		atomic_dec(&eb->refs);
4717
4718	if (atomic_read(&eb->refs) == 2 &&
4719	    test_bit(EXTENT_BUFFER_STALE, &eb->bflags) &&
4720	    !extent_buffer_under_io(eb) &&
4721	    test_and_clear_bit(EXTENT_BUFFER_TREE_REF, &eb->bflags))
4722		atomic_dec(&eb->refs);
4723
4724	/*
4725	 * I know this is terrible, but it's temporary until we stop tracking
4726	 * the uptodate bits and such for the extent buffers.
4727	 */
4728	release_extent_buffer(eb);
4729}
4730
4731void free_extent_buffer_stale(struct extent_buffer *eb)
4732{
4733	if (!eb)
4734		return;
4735
4736	spin_lock(&eb->refs_lock);
4737	set_bit(EXTENT_BUFFER_STALE, &eb->bflags);
4738
4739	if (atomic_read(&eb->refs) == 2 && !extent_buffer_under_io(eb) &&
4740	    test_and_clear_bit(EXTENT_BUFFER_TREE_REF, &eb->bflags))
4741		atomic_dec(&eb->refs);
4742	release_extent_buffer(eb);
4743}
4744
4745void clear_extent_buffer_dirty(struct extent_buffer *eb)
 
4746{
4747	unsigned long i;
4748	unsigned long num_pages;
4749	struct page *page;
4750
4751	num_pages = num_extent_pages(eb->start, eb->len);
4752
4753	for (i = 0; i < num_pages; i++) {
4754		page = extent_buffer_page(eb, i);
4755		if (!PageDirty(page))
4756			continue;
4757
4758		lock_page(page);
4759		WARN_ON(!PagePrivate(page));
4760
 
 
 
 
4761		clear_page_dirty_for_io(page);
4762		spin_lock_irq(&page->mapping->tree_lock);
4763		if (!PageDirty(page)) {
4764			radix_tree_tag_clear(&page->mapping->page_tree,
4765						page_index(page),
4766						PAGECACHE_TAG_DIRTY);
4767		}
4768		spin_unlock_irq(&page->mapping->tree_lock);
4769		ClearPageError(page);
4770		unlock_page(page);
4771	}
4772	WARN_ON(atomic_read(&eb->refs) == 0);
4773}
4774
4775int set_extent_buffer_dirty(struct extent_buffer *eb)
 
4776{
4777	unsigned long i;
4778	unsigned long num_pages;
4779	int was_dirty = 0;
4780
4781	check_buffer_tree_ref(eb);
4782
4783	was_dirty = test_and_set_bit(EXTENT_BUFFER_DIRTY, &eb->bflags);
4784
4785	num_pages = num_extent_pages(eb->start, eb->len);
4786	WARN_ON(atomic_read(&eb->refs) == 0);
4787	WARN_ON(!test_bit(EXTENT_BUFFER_TREE_REF, &eb->bflags));
4788
4789	for (i = 0; i < num_pages; i++)
4790		set_page_dirty(extent_buffer_page(eb, i));
4791	return was_dirty;
4792}
4793
4794int clear_extent_buffer_uptodate(struct extent_buffer *eb)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4795{
4796	unsigned long i;
4797	struct page *page;
4798	unsigned long num_pages;
4799
 
4800	clear_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags);
4801	num_pages = num_extent_pages(eb->start, eb->len);
 
 
 
 
4802	for (i = 0; i < num_pages; i++) {
4803		page = extent_buffer_page(eb, i);
4804		if (page)
4805			ClearPageUptodate(page);
4806	}
4807	return 0;
4808}
4809
4810int set_extent_buffer_uptodate(struct extent_buffer *eb)
 
4811{
4812	unsigned long i;
4813	struct page *page;
4814	unsigned long num_pages;
4815
4816	set_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags);
4817	num_pages = num_extent_pages(eb->start, eb->len);
 
 
 
 
 
4818	for (i = 0; i < num_pages; i++) {
4819		page = extent_buffer_page(eb, i);
 
 
 
 
 
 
4820		SetPageUptodate(page);
4821	}
4822	return 0;
4823}
4824
4825int extent_buffer_uptodate(struct extent_buffer *eb)
 
4826{
4827	return test_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4828}
4829
4830int read_extent_buffer_pages(struct extent_io_tree *tree,
4831			     struct extent_buffer *eb, u64 start, int wait,
 
4832			     get_extent_t *get_extent, int mirror_num)
4833{
4834	unsigned long i;
4835	unsigned long start_i;
4836	struct page *page;
4837	int err;
4838	int ret = 0;
4839	int locked_pages = 0;
4840	int all_uptodate = 1;
 
4841	unsigned long num_pages;
4842	unsigned long num_reads = 0;
4843	struct bio *bio = NULL;
4844	unsigned long bio_flags = 0;
4845
4846	if (test_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags))
4847		return 0;
4848
 
 
 
 
 
 
 
4849	if (start) {
4850		WARN_ON(start < eb->start);
4851		start_i = (start >> PAGE_CACHE_SHIFT) -
4852			(eb->start >> PAGE_CACHE_SHIFT);
4853	} else {
4854		start_i = 0;
4855	}
4856
4857	num_pages = num_extent_pages(eb->start, eb->len);
4858	for (i = start_i; i < num_pages; i++) {
4859		page = extent_buffer_page(eb, i);
4860		if (wait == WAIT_NONE) {
4861			if (!trylock_page(page))
4862				goto unlock_exit;
4863		} else {
4864			lock_page(page);
4865		}
4866		locked_pages++;
4867		if (!PageUptodate(page)) {
4868			num_reads++;
4869			all_uptodate = 0;
4870		}
4871	}
4872	if (all_uptodate) {
4873		if (start_i == 0)
4874			set_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags);
4875		goto unlock_exit;
4876	}
4877
4878	clear_bit(EXTENT_BUFFER_IOERR, &eb->bflags);
4879	eb->read_mirror = 0;
4880	atomic_set(&eb->io_pages, num_reads);
4881	for (i = start_i; i < num_pages; i++) {
4882		page = extent_buffer_page(eb, i);
 
 
 
 
 
 
 
 
 
4883		if (!PageUptodate(page)) {
 
 
4884			ClearPageError(page);
4885			err = __extent_read_full_page(tree, page,
4886						      get_extent, &bio,
4887						      mirror_num, &bio_flags,
4888						      READ | REQ_META);
4889			if (err)
4890				ret = err;
4891		} else {
4892			unlock_page(page);
4893		}
4894	}
4895
4896	if (bio) {
4897		err = submit_one_bio(READ | REQ_META, bio, mirror_num,
4898				     bio_flags);
4899		if (err)
4900			return err;
4901	}
4902
4903	if (ret || wait != WAIT_COMPLETE)
4904		return ret;
4905
4906	for (i = start_i; i < num_pages; i++) {
4907		page = extent_buffer_page(eb, i);
4908		wait_on_page_locked(page);
4909		if (!PageUptodate(page))
4910			ret = -EIO;
4911	}
4912
 
 
4913	return ret;
4914
4915unlock_exit:
4916	i = start_i;
4917	while (locked_pages > 0) {
4918		page = extent_buffer_page(eb, i);
4919		i++;
4920		unlock_page(page);
4921		locked_pages--;
4922	}
4923	return ret;
4924}
4925
4926void read_extent_buffer(struct extent_buffer *eb, void *dstv,
4927			unsigned long start,
4928			unsigned long len)
4929{
4930	size_t cur;
4931	size_t offset;
4932	struct page *page;
4933	char *kaddr;
4934	char *dst = (char *)dstv;
4935	size_t start_offset = eb->start & ((u64)PAGE_CACHE_SIZE - 1);
4936	unsigned long i = (start_offset + start) >> PAGE_CACHE_SHIFT;
4937
4938	WARN_ON(start > eb->len);
4939	WARN_ON(start + len > eb->start + eb->len);
4940
4941	offset = (start_offset + start) & (PAGE_CACHE_SIZE - 1);
4942
4943	while (len > 0) {
4944		page = extent_buffer_page(eb, i);
4945
4946		cur = min(len, (PAGE_CACHE_SIZE - offset));
4947		kaddr = page_address(page);
4948		memcpy(dst, kaddr + offset, cur);
4949
4950		dst += cur;
4951		len -= cur;
4952		offset = 0;
4953		i++;
4954	}
4955}
4956
4957int map_private_extent_buffer(struct extent_buffer *eb, unsigned long start,
4958			       unsigned long min_len, char **map,
4959			       unsigned long *map_start,
4960			       unsigned long *map_len)
4961{
4962	size_t offset = start & (PAGE_CACHE_SIZE - 1);
4963	char *kaddr;
4964	struct page *p;
4965	size_t start_offset = eb->start & ((u64)PAGE_CACHE_SIZE - 1);
4966	unsigned long i = (start_offset + start) >> PAGE_CACHE_SHIFT;
4967	unsigned long end_i = (start_offset + start + min_len - 1) >>
4968		PAGE_CACHE_SHIFT;
4969
4970	if (i != end_i)
4971		return -EINVAL;
4972
4973	if (i == 0) {
4974		offset = start_offset;
4975		*map_start = 0;
4976	} else {
4977		offset = 0;
4978		*map_start = ((u64)i << PAGE_CACHE_SHIFT) - start_offset;
4979	}
4980
4981	if (start + min_len > eb->len) {
4982		WARN(1, KERN_ERR "btrfs bad mapping eb start %llu len %lu, "
4983		       "wanted %lu %lu\n",
4984		       eb->start, eb->len, start, min_len);
 
4985		return -EINVAL;
4986	}
4987
4988	p = extent_buffer_page(eb, i);
4989	kaddr = page_address(p);
4990	*map = kaddr + offset;
4991	*map_len = PAGE_CACHE_SIZE - offset;
4992	return 0;
4993}
4994
4995int memcmp_extent_buffer(struct extent_buffer *eb, const void *ptrv,
4996			  unsigned long start,
4997			  unsigned long len)
4998{
4999	size_t cur;
5000	size_t offset;
5001	struct page *page;
5002	char *kaddr;
5003	char *ptr = (char *)ptrv;
5004	size_t start_offset = eb->start & ((u64)PAGE_CACHE_SIZE - 1);
5005	unsigned long i = (start_offset + start) >> PAGE_CACHE_SHIFT;
5006	int ret = 0;
5007
5008	WARN_ON(start > eb->len);
5009	WARN_ON(start + len > eb->start + eb->len);
5010
5011	offset = (start_offset + start) & (PAGE_CACHE_SIZE - 1);
5012
5013	while (len > 0) {
5014		page = extent_buffer_page(eb, i);
5015
5016		cur = min(len, (PAGE_CACHE_SIZE - offset));
5017
5018		kaddr = page_address(page);
5019		ret = memcmp(ptr, kaddr + offset, cur);
5020		if (ret)
5021			break;
5022
5023		ptr += cur;
5024		len -= cur;
5025		offset = 0;
5026		i++;
5027	}
5028	return ret;
5029}
5030
5031void write_extent_buffer(struct extent_buffer *eb, const void *srcv,
5032			 unsigned long start, unsigned long len)
5033{
5034	size_t cur;
5035	size_t offset;
5036	struct page *page;
5037	char *kaddr;
5038	char *src = (char *)srcv;
5039	size_t start_offset = eb->start & ((u64)PAGE_CACHE_SIZE - 1);
5040	unsigned long i = (start_offset + start) >> PAGE_CACHE_SHIFT;
5041
5042	WARN_ON(start > eb->len);
5043	WARN_ON(start + len > eb->start + eb->len);
5044
5045	offset = (start_offset + start) & (PAGE_CACHE_SIZE - 1);
5046
5047	while (len > 0) {
5048		page = extent_buffer_page(eb, i);
5049		WARN_ON(!PageUptodate(page));
5050
5051		cur = min(len, PAGE_CACHE_SIZE - offset);
5052		kaddr = page_address(page);
5053		memcpy(kaddr + offset, src, cur);
5054
5055		src += cur;
5056		len -= cur;
5057		offset = 0;
5058		i++;
5059	}
5060}
5061
5062void memset_extent_buffer(struct extent_buffer *eb, char c,
5063			  unsigned long start, unsigned long len)
5064{
5065	size_t cur;
5066	size_t offset;
5067	struct page *page;
5068	char *kaddr;
5069	size_t start_offset = eb->start & ((u64)PAGE_CACHE_SIZE - 1);
5070	unsigned long i = (start_offset + start) >> PAGE_CACHE_SHIFT;
5071
5072	WARN_ON(start > eb->len);
5073	WARN_ON(start + len > eb->start + eb->len);
5074
5075	offset = (start_offset + start) & (PAGE_CACHE_SIZE - 1);
5076
5077	while (len > 0) {
5078		page = extent_buffer_page(eb, i);
5079		WARN_ON(!PageUptodate(page));
5080
5081		cur = min(len, PAGE_CACHE_SIZE - offset);
5082		kaddr = page_address(page);
5083		memset(kaddr + offset, c, cur);
5084
5085		len -= cur;
5086		offset = 0;
5087		i++;
5088	}
5089}
5090
5091void copy_extent_buffer(struct extent_buffer *dst, struct extent_buffer *src,
5092			unsigned long dst_offset, unsigned long src_offset,
5093			unsigned long len)
5094{
5095	u64 dst_len = dst->len;
5096	size_t cur;
5097	size_t offset;
5098	struct page *page;
5099	char *kaddr;
5100	size_t start_offset = dst->start & ((u64)PAGE_CACHE_SIZE - 1);
5101	unsigned long i = (start_offset + dst_offset) >> PAGE_CACHE_SHIFT;
5102
5103	WARN_ON(src->len != dst_len);
5104
5105	offset = (start_offset + dst_offset) &
5106		(PAGE_CACHE_SIZE - 1);
5107
5108	while (len > 0) {
5109		page = extent_buffer_page(dst, i);
5110		WARN_ON(!PageUptodate(page));
5111
5112		cur = min(len, (unsigned long)(PAGE_CACHE_SIZE - offset));
5113
5114		kaddr = page_address(page);
5115		read_extent_buffer(src, kaddr + offset, src_offset, cur);
5116
5117		src_offset += cur;
5118		len -= cur;
5119		offset = 0;
5120		i++;
5121	}
5122}
5123
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
5124static inline bool areas_overlap(unsigned long src, unsigned long dst, unsigned long len)
5125{
5126	unsigned long distance = (src > dst) ? src - dst : dst - src;
5127	return distance < len;
5128}
5129
5130static void copy_pages(struct page *dst_page, struct page *src_page,
5131		       unsigned long dst_off, unsigned long src_off,
5132		       unsigned long len)
5133{
5134	char *dst_kaddr = page_address(dst_page);
5135	char *src_kaddr;
5136	int must_memmove = 0;
5137
5138	if (dst_page != src_page) {
5139		src_kaddr = page_address(src_page);
5140	} else {
5141		src_kaddr = dst_kaddr;
5142		if (areas_overlap(src_off, dst_off, len))
5143			must_memmove = 1;
5144	}
5145
5146	if (must_memmove)
5147		memmove(dst_kaddr + dst_off, src_kaddr + src_off, len);
5148	else
5149		memcpy(dst_kaddr + dst_off, src_kaddr + src_off, len);
5150}
5151
5152void memcpy_extent_buffer(struct extent_buffer *dst, unsigned long dst_offset,
5153			   unsigned long src_offset, unsigned long len)
5154{
5155	size_t cur;
5156	size_t dst_off_in_page;
5157	size_t src_off_in_page;
5158	size_t start_offset = dst->start & ((u64)PAGE_CACHE_SIZE - 1);
5159	unsigned long dst_i;
5160	unsigned long src_i;
5161
5162	if (src_offset + len > dst->len) {
5163		printk(KERN_ERR "BTRFS: memmove bogus src_offset %lu move "
5164		       "len %lu dst len %lu\n", src_offset, len, dst->len);
5165		BUG_ON(1);
5166	}
5167	if (dst_offset + len > dst->len) {
5168		printk(KERN_ERR "BTRFS: memmove bogus dst_offset %lu move "
5169		       "len %lu dst len %lu\n", dst_offset, len, dst->len);
5170		BUG_ON(1);
5171	}
5172
5173	while (len > 0) {
5174		dst_off_in_page = (start_offset + dst_offset) &
5175			(PAGE_CACHE_SIZE - 1);
5176		src_off_in_page = (start_offset + src_offset) &
5177			(PAGE_CACHE_SIZE - 1);
5178
5179		dst_i = (start_offset + dst_offset) >> PAGE_CACHE_SHIFT;
5180		src_i = (start_offset + src_offset) >> PAGE_CACHE_SHIFT;
5181
5182		cur = min(len, (unsigned long)(PAGE_CACHE_SIZE -
5183					       src_off_in_page));
5184		cur = min_t(unsigned long, cur,
5185			(unsigned long)(PAGE_CACHE_SIZE - dst_off_in_page));
5186
5187		copy_pages(extent_buffer_page(dst, dst_i),
5188			   extent_buffer_page(dst, src_i),
5189			   dst_off_in_page, src_off_in_page, cur);
5190
5191		src_offset += cur;
5192		dst_offset += cur;
5193		len -= cur;
5194	}
5195}
5196
5197void memmove_extent_buffer(struct extent_buffer *dst, unsigned long dst_offset,
5198			   unsigned long src_offset, unsigned long len)
5199{
5200	size_t cur;
5201	size_t dst_off_in_page;
5202	size_t src_off_in_page;
5203	unsigned long dst_end = dst_offset + len - 1;
5204	unsigned long src_end = src_offset + len - 1;
5205	size_t start_offset = dst->start & ((u64)PAGE_CACHE_SIZE - 1);
5206	unsigned long dst_i;
5207	unsigned long src_i;
5208
5209	if (src_offset + len > dst->len) {
5210		printk(KERN_ERR "BTRFS: memmove bogus src_offset %lu move "
5211		       "len %lu len %lu\n", src_offset, len, dst->len);
5212		BUG_ON(1);
5213	}
5214	if (dst_offset + len > dst->len) {
5215		printk(KERN_ERR "BTRFS: memmove bogus dst_offset %lu move "
5216		       "len %lu len %lu\n", dst_offset, len, dst->len);
5217		BUG_ON(1);
5218	}
5219	if (dst_offset < src_offset) {
5220		memcpy_extent_buffer(dst, dst_offset, src_offset, len);
5221		return;
5222	}
5223	while (len > 0) {
5224		dst_i = (start_offset + dst_end) >> PAGE_CACHE_SHIFT;
5225		src_i = (start_offset + src_end) >> PAGE_CACHE_SHIFT;
5226
5227		dst_off_in_page = (start_offset + dst_end) &
5228			(PAGE_CACHE_SIZE - 1);
5229		src_off_in_page = (start_offset + src_end) &
5230			(PAGE_CACHE_SIZE - 1);
5231
5232		cur = min_t(unsigned long, len, src_off_in_page + 1);
5233		cur = min(cur, dst_off_in_page + 1);
5234		copy_pages(extent_buffer_page(dst, dst_i),
5235			   extent_buffer_page(dst, src_i),
5236			   dst_off_in_page - cur + 1,
5237			   src_off_in_page - cur + 1, cur);
5238
5239		dst_end -= cur;
5240		src_end -= cur;
5241		len -= cur;
5242	}
5243}
5244
5245int try_release_extent_buffer(struct page *page)
 
 
 
 
 
 
 
 
5246{
 
5247	struct extent_buffer *eb;
 
5248
5249	/*
5250	 * We need to make sure noboody is attaching this page to an eb right
5251	 * now.
5252	 */
5253	spin_lock(&page->mapping->private_lock);
5254	if (!PagePrivate(page)) {
5255		spin_unlock(&page->mapping->private_lock);
5256		return 1;
5257	}
5258
5259	eb = (struct extent_buffer *)page->private;
5260	BUG_ON(!eb);
 
 
5261
5262	/*
5263	 * This is a little awful but should be ok, we need to make sure that
5264	 * the eb doesn't disappear out from under us while we're looking at
5265	 * this page.
5266	 */
5267	spin_lock(&eb->refs_lock);
5268	if (atomic_read(&eb->refs) != 1 || extent_buffer_under_io(eb)) {
5269		spin_unlock(&eb->refs_lock);
5270		spin_unlock(&page->mapping->private_lock);
5271		return 0;
5272	}
5273	spin_unlock(&page->mapping->private_lock);
5274
5275	/*
5276	 * If tree ref isn't set then we know the ref on this eb is a real ref,
5277	 * so just return, this page will likely be freed soon anyway.
5278	 */
5279	if (!test_and_clear_bit(EXTENT_BUFFER_TREE_REF, &eb->bflags)) {
5280		spin_unlock(&eb->refs_lock);
5281		return 0;
5282	}
5283
5284	return release_extent_buffer(eb);
 
 
 
5285}