Linux Audio

Check our new training course

Embedded Linux training

Mar 31-Apr 8, 2025
Register
Loading...
v3.1
 
 
   1#include <linux/bitops.h>
   2#include <linux/slab.h>
   3#include <linux/bio.h>
   4#include <linux/mm.h>
   5#include <linux/pagemap.h>
   6#include <linux/page-flags.h>
   7#include <linux/module.h>
   8#include <linux/spinlock.h>
   9#include <linux/blkdev.h>
  10#include <linux/swap.h>
  11#include <linux/writeback.h>
  12#include <linux/pagevec.h>
  13#include <linux/prefetch.h>
  14#include <linux/cleancache.h>
  15#include "extent_io.h"
 
  16#include "extent_map.h"
  17#include "compat.h"
  18#include "ctree.h"
  19#include "btrfs_inode.h"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
  20
  21static struct kmem_cache *extent_state_cache;
  22static struct kmem_cache *extent_buffer_cache;
  23
  24static LIST_HEAD(buffers);
  25static LIST_HEAD(states);
  26
  27#define LEAK_DEBUG 0
  28#if LEAK_DEBUG
  29static DEFINE_SPINLOCK(leak_lock);
  30#endif
  31
  32#define BUFFER_LRU_MAX 64
  33
  34struct tree_entry {
  35	u64 start;
  36	u64 end;
  37	struct rb_node rb_node;
  38};
  39
  40struct extent_page_data {
  41	struct bio *bio;
  42	struct extent_io_tree *tree;
  43	get_extent_t *get_extent;
  44
  45	/* tells writepage not to lock the state bits for this range
  46	 * it still does the unlocking
  47	 */
  48	unsigned int extent_locked:1;
  49
  50	/* tells the submit_bio code to use a WRITE_SYNC */
  51	unsigned int sync_io:1;
  52};
  53
  54int __init extent_io_init(void)
  55{
  56	extent_state_cache = kmem_cache_create("extent_state",
  57			sizeof(struct extent_state), 0,
  58			SLAB_RECLAIM_ACCOUNT | SLAB_MEM_SPREAD, NULL);
  59	if (!extent_state_cache)
  60		return -ENOMEM;
  61
  62	extent_buffer_cache = kmem_cache_create("extent_buffers",
  63			sizeof(struct extent_buffer), 0,
  64			SLAB_RECLAIM_ACCOUNT | SLAB_MEM_SPREAD, NULL);
  65	if (!extent_buffer_cache)
  66		goto free_state_cache;
  67	return 0;
  68
  69free_state_cache:
  70	kmem_cache_destroy(extent_state_cache);
  71	return -ENOMEM;
  72}
  73
  74void extent_io_exit(void)
  75{
  76	struct extent_state *state;
  77	struct extent_buffer *eb;
  78
  79	while (!list_empty(&states)) {
  80		state = list_entry(states.next, struct extent_state, leak_list);
  81		printk(KERN_ERR "btrfs state leak: start %llu end %llu "
  82		       "state %lu in tree %p refs %d\n",
  83		       (unsigned long long)state->start,
  84		       (unsigned long long)state->end,
  85		       state->state, state->tree, atomic_read(&state->refs));
  86		list_del(&state->leak_list);
  87		kmem_cache_free(extent_state_cache, state);
  88
  89	}
  90
  91	while (!list_empty(&buffers)) {
  92		eb = list_entry(buffers.next, struct extent_buffer, leak_list);
  93		printk(KERN_ERR "btrfs buffer leak start %llu len %lu "
  94		       "refs %d\n", (unsigned long long)eb->start,
  95		       eb->len, atomic_read(&eb->refs));
  96		list_del(&eb->leak_list);
  97		kmem_cache_free(extent_buffer_cache, eb);
  98	}
  99	if (extent_state_cache)
 100		kmem_cache_destroy(extent_state_cache);
 101	if (extent_buffer_cache)
 102		kmem_cache_destroy(extent_buffer_cache);
 103}
 104
 105void extent_io_tree_init(struct extent_io_tree *tree,
 106			 struct address_space *mapping)
 107{
 108	tree->state = RB_ROOT;
 109	INIT_RADIX_TREE(&tree->buffer, GFP_ATOMIC);
 110	tree->ops = NULL;
 111	tree->dirty_bytes = 0;
 112	spin_lock_init(&tree->lock);
 113	spin_lock_init(&tree->buffer_lock);
 114	tree->mapping = mapping;
 115}
 116
 117static struct extent_state *alloc_extent_state(gfp_t mask)
 118{
 119	struct extent_state *state;
 120#if LEAK_DEBUG
 121	unsigned long flags;
 122#endif
 123
 124	state = kmem_cache_alloc(extent_state_cache, mask);
 125	if (!state)
 126		return state;
 127	state->state = 0;
 128	state->private = 0;
 129	state->tree = NULL;
 130#if LEAK_DEBUG
 131	spin_lock_irqsave(&leak_lock, flags);
 132	list_add(&state->leak_list, &states);
 133	spin_unlock_irqrestore(&leak_lock, flags);
 134#endif
 135	atomic_set(&state->refs, 1);
 136	init_waitqueue_head(&state->wq);
 137	return state;
 138}
 139
 140void free_extent_state(struct extent_state *state)
 141{
 142	if (!state)
 
 
 143		return;
 144	if (atomic_dec_and_test(&state->refs)) {
 145#if LEAK_DEBUG
 146		unsigned long flags;
 147#endif
 148		WARN_ON(state->tree);
 149#if LEAK_DEBUG
 150		spin_lock_irqsave(&leak_lock, flags);
 151		list_del(&state->leak_list);
 152		spin_unlock_irqrestore(&leak_lock, flags);
 153#endif
 154		kmem_cache_free(extent_state_cache, state);
 155	}
 156}
 157
 158static struct rb_node *tree_insert(struct rb_root *root, u64 offset,
 159				   struct rb_node *node)
 160{
 161	struct rb_node **p = &root->rb_node;
 162	struct rb_node *parent = NULL;
 163	struct tree_entry *entry;
 164
 165	while (*p) {
 166		parent = *p;
 167		entry = rb_entry(parent, struct tree_entry, rb_node);
 168
 169		if (offset < entry->start)
 170			p = &(*p)->rb_left;
 171		else if (offset > entry->end)
 172			p = &(*p)->rb_right;
 173		else
 174			return parent;
 175	}
 176
 177	entry = rb_entry(node, struct tree_entry, rb_node);
 178	rb_link_node(node, parent, p);
 179	rb_insert_color(node, root);
 180	return NULL;
 181}
 182
 183static struct rb_node *__etree_search(struct extent_io_tree *tree, u64 offset,
 184				     struct rb_node **prev_ret,
 185				     struct rb_node **next_ret)
 186{
 187	struct rb_root *root = &tree->state;
 188	struct rb_node *n = root->rb_node;
 189	struct rb_node *prev = NULL;
 190	struct rb_node *orig_prev = NULL;
 191	struct tree_entry *entry;
 192	struct tree_entry *prev_entry = NULL;
 193
 194	while (n) {
 195		entry = rb_entry(n, struct tree_entry, rb_node);
 196		prev = n;
 197		prev_entry = entry;
 198
 199		if (offset < entry->start)
 200			n = n->rb_left;
 201		else if (offset > entry->end)
 202			n = n->rb_right;
 203		else
 204			return n;
 205	}
 206
 207	if (prev_ret) {
 208		orig_prev = prev;
 209		while (prev && offset > prev_entry->end) {
 210			prev = rb_next(prev);
 211			prev_entry = rb_entry(prev, struct tree_entry, rb_node);
 212		}
 213		*prev_ret = prev;
 214		prev = orig_prev;
 215	}
 216
 217	if (next_ret) {
 218		prev_entry = rb_entry(prev, struct tree_entry, rb_node);
 219		while (prev && offset < prev_entry->start) {
 220			prev = rb_prev(prev);
 221			prev_entry = rb_entry(prev, struct tree_entry, rb_node);
 222		}
 223		*next_ret = prev;
 224	}
 225	return NULL;
 226}
 227
 228static inline struct rb_node *tree_search(struct extent_io_tree *tree,
 229					  u64 offset)
 230{
 231	struct rb_node *prev = NULL;
 232	struct rb_node *ret;
 233
 234	ret = __etree_search(tree, offset, &prev, NULL);
 235	if (!ret)
 236		return prev;
 237	return ret;
 238}
 239
 240static void merge_cb(struct extent_io_tree *tree, struct extent_state *new,
 241		     struct extent_state *other)
 242{
 243	if (tree->ops && tree->ops->merge_extent_hook)
 244		tree->ops->merge_extent_hook(tree->mapping->host, new,
 245					     other);
 246}
 
 
 
 
 247
 248/*
 249 * utility function to look for merge candidates inside a given range.
 250 * Any extents with matching state are merged together into a single
 251 * extent in the tree.  Extents with EXTENT_IO in their state field
 252 * are not merged because the end_io handlers need to be able to do
 253 * operations on them without sleeping (or doing allocations/splits).
 254 *
 255 * This should be called with the tree lock held.
 256 */
 257static void merge_state(struct extent_io_tree *tree,
 258		        struct extent_state *state)
 259{
 260	struct extent_state *other;
 261	struct rb_node *other_node;
 262
 263	if (state->state & (EXTENT_IOBITS | EXTENT_BOUNDARY))
 264		return;
 265
 266	other_node = rb_prev(&state->rb_node);
 267	if (other_node) {
 268		other = rb_entry(other_node, struct extent_state, rb_node);
 269		if (other->end == state->start - 1 &&
 270		    other->state == state->state) {
 271			merge_cb(tree, state, other);
 272			state->start = other->start;
 273			other->tree = NULL;
 274			rb_erase(&other->rb_node, &tree->state);
 275			free_extent_state(other);
 276		}
 277	}
 278	other_node = rb_next(&state->rb_node);
 279	if (other_node) {
 280		other = rb_entry(other_node, struct extent_state, rb_node);
 281		if (other->start == state->end + 1 &&
 282		    other->state == state->state) {
 283			merge_cb(tree, state, other);
 284			state->end = other->end;
 285			other->tree = NULL;
 286			rb_erase(&other->rb_node, &tree->state);
 287			free_extent_state(other);
 288		}
 289	}
 290}
 291
 292static void set_state_cb(struct extent_io_tree *tree,
 293			 struct extent_state *state, int *bits)
 294{
 295	if (tree->ops && tree->ops->set_bit_hook)
 296		tree->ops->set_bit_hook(tree->mapping->host, state, bits);
 297}
 298
 299static void clear_state_cb(struct extent_io_tree *tree,
 300			   struct extent_state *state, int *bits)
 301{
 302	if (tree->ops && tree->ops->clear_bit_hook)
 303		tree->ops->clear_bit_hook(tree->mapping->host, state, bits);
 304}
 305
 306static void set_state_bits(struct extent_io_tree *tree,
 307			   struct extent_state *state, int *bits);
 308
 309/*
 310 * insert an extent_state struct into the tree.  'bits' are set on the
 311 * struct before it is inserted.
 312 *
 313 * This may return -EEXIST if the extent is already there, in which case the
 314 * state struct is freed.
 315 *
 316 * The tree lock is not taken internally.  This is a utility function and
 317 * probably isn't what you want to call (see set/clear_extent_bit).
 318 */
 319static int insert_state(struct extent_io_tree *tree,
 320			struct extent_state *state, u64 start, u64 end,
 321			int *bits)
 322{
 323	struct rb_node *node;
 324
 325	if (end < start) {
 326		printk(KERN_ERR "btrfs end < start %llu %llu\n",
 327		       (unsigned long long)end,
 328		       (unsigned long long)start);
 329		WARN_ON(1);
 330	}
 331	state->start = start;
 332	state->end = end;
 333
 334	set_state_bits(tree, state, bits);
 
 335
 336	node = tree_insert(&tree->state, end, &state->rb_node);
 337	if (node) {
 338		struct extent_state *found;
 339		found = rb_entry(node, struct extent_state, rb_node);
 340		printk(KERN_ERR "btrfs found node %llu %llu on insert of "
 341		       "%llu %llu\n", (unsigned long long)found->start,
 342		       (unsigned long long)found->end,
 343		       (unsigned long long)start, (unsigned long long)end);
 344		return -EEXIST;
 345	}
 346	state->tree = tree;
 347	merge_state(tree, state);
 348	return 0;
 349}
 350
 351static void split_cb(struct extent_io_tree *tree, struct extent_state *orig,
 352		     u64 split)
 353{
 354	if (tree->ops && tree->ops->split_extent_hook)
 355		tree->ops->split_extent_hook(tree->mapping->host, orig, split);
 356}
 357
 358/*
 359 * split a given extent state struct in two, inserting the preallocated
 360 * struct 'prealloc' as the newly created second half.  'split' indicates an
 361 * offset inside 'orig' where it should be split.
 362 *
 363 * Before calling,
 364 * the tree has 'orig' at [orig->start, orig->end].  After calling, there
 365 * are two extent state structs in the tree:
 366 * prealloc: [orig->start, split - 1]
 367 * orig: [ split, orig->end ]
 368 *
 369 * The tree locks are not taken by this function. They need to be held
 370 * by the caller.
 371 */
 372static int split_state(struct extent_io_tree *tree, struct extent_state *orig,
 373		       struct extent_state *prealloc, u64 split)
 374{
 375	struct rb_node *node;
 376
 377	split_cb(tree, orig, split);
 378
 379	prealloc->start = orig->start;
 380	prealloc->end = split - 1;
 381	prealloc->state = orig->state;
 382	orig->start = split;
 383
 384	node = tree_insert(&tree->state, prealloc->end, &prealloc->rb_node);
 385	if (node) {
 386		free_extent_state(prealloc);
 387		return -EEXIST;
 388	}
 389	prealloc->tree = tree;
 390	return 0;
 391}
 392
 393/*
 394 * utility function to clear some bits in an extent state struct.
 395 * it will optionally wake up any one waiting on this state (wake == 1), or
 396 * forcibly remove the state from the tree (delete == 1).
 397 *
 398 * If no bits are set on the state struct after clearing things, the
 399 * struct is freed and removed from the tree
 400 */
 401static int clear_state_bit(struct extent_io_tree *tree,
 402			    struct extent_state *state,
 403			    int *bits, int wake)
 404{
 405	int bits_to_clear = *bits & ~EXTENT_CTLBITS;
 406	int ret = state->state & bits_to_clear;
 407
 408	if ((bits_to_clear & EXTENT_DIRTY) && (state->state & EXTENT_DIRTY)) {
 409		u64 range = state->end - state->start + 1;
 410		WARN_ON(range > tree->dirty_bytes);
 411		tree->dirty_bytes -= range;
 412	}
 413	clear_state_cb(tree, state, bits);
 414	state->state &= ~bits_to_clear;
 415	if (wake)
 416		wake_up(&state->wq);
 417	if (state->state == 0) {
 418		if (state->tree) {
 419			rb_erase(&state->rb_node, &tree->state);
 420			state->tree = NULL;
 421			free_extent_state(state);
 422		} else {
 423			WARN_ON(1);
 424		}
 425	} else {
 426		merge_state(tree, state);
 427	}
 428	return ret;
 429}
 430
 431static struct extent_state *
 432alloc_extent_state_atomic(struct extent_state *prealloc)
 433{
 434	if (!prealloc)
 435		prealloc = alloc_extent_state(GFP_ATOMIC);
 
 
 
 436
 437	return prealloc;
 438}
 439
 440/*
 441 * clear some bits on a range in the tree.  This may require splitting
 442 * or inserting elements in the tree, so the gfp mask is used to
 443 * indicate which allocations or sleeping are allowed.
 444 *
 445 * pass 'wake' == 1 to kick any sleepers, and 'delete' == 1 to remove
 446 * the given range from the tree regardless of state (ie for truncate).
 447 *
 448 * the range [start, end] is inclusive.
 449 *
 450 * This takes the tree lock, and returns < 0 on error, > 0 if any of the
 451 * bits were already set, or zero if none of the bits were already set.
 452 */
 453int clear_extent_bit(struct extent_io_tree *tree, u64 start, u64 end,
 454		     int bits, int wake, int delete,
 455		     struct extent_state **cached_state,
 456		     gfp_t mask)
 457{
 458	struct extent_state *state;
 459	struct extent_state *cached;
 460	struct extent_state *prealloc = NULL;
 461	struct rb_node *next_node;
 462	struct rb_node *node;
 463	u64 last_end;
 464	int err;
 465	int set = 0;
 466	int clear = 0;
 467
 468	if (delete)
 469		bits |= ~EXTENT_CTLBITS;
 470	bits |= EXTENT_FIRST_DELALLOC;
 471
 472	if (bits & (EXTENT_IOBITS | EXTENT_BOUNDARY))
 473		clear = 1;
 474again:
 475	if (!prealloc && (mask & __GFP_WAIT)) {
 476		prealloc = alloc_extent_state(mask);
 477		if (!prealloc)
 478			return -ENOMEM;
 479	}
 480
 481	spin_lock(&tree->lock);
 482	if (cached_state) {
 483		cached = *cached_state;
 484
 485		if (clear) {
 486			*cached_state = NULL;
 487			cached_state = NULL;
 488		}
 489
 490		if (cached && cached->tree && cached->start <= start &&
 491		    cached->end > start) {
 492			if (clear)
 493				atomic_dec(&cached->refs);
 494			state = cached;
 495			goto hit_next;
 496		}
 497		if (clear)
 498			free_extent_state(cached);
 499	}
 500	/*
 501	 * this search will find the extents that end after
 502	 * our range starts
 503	 */
 504	node = tree_search(tree, start);
 505	if (!node)
 506		goto out;
 507	state = rb_entry(node, struct extent_state, rb_node);
 508hit_next:
 509	if (state->start > end)
 510		goto out;
 511	WARN_ON(state->end < start);
 512	last_end = state->end;
 513
 514	/*
 515	 *     | ---- desired range ---- |
 516	 *  | state | or
 517	 *  | ------------- state -------------- |
 518	 *
 519	 * We need to split the extent we found, and may flip
 520	 * bits on second half.
 521	 *
 522	 * If the extent we found extends past our range, we
 523	 * just split and search again.  It'll get split again
 524	 * the next time though.
 525	 *
 526	 * If the extent we found is inside our range, we clear
 527	 * the desired bit on it.
 528	 */
 529
 530	if (state->start < start) {
 531		prealloc = alloc_extent_state_atomic(prealloc);
 532		BUG_ON(!prealloc);
 533		err = split_state(tree, state, prealloc, start);
 534		BUG_ON(err == -EEXIST);
 535		prealloc = NULL;
 536		if (err)
 537			goto out;
 538		if (state->end <= end) {
 539			set |= clear_state_bit(tree, state, &bits, wake);
 540			if (last_end == (u64)-1)
 541				goto out;
 542			start = last_end + 1;
 543		}
 544		goto search_again;
 545	}
 546	/*
 547	 * | ---- desired range ---- |
 548	 *                        | state |
 549	 * We need to split the extent, and clear the bit
 550	 * on the first half
 551	 */
 552	if (state->start <= end && state->end > end) {
 553		prealloc = alloc_extent_state_atomic(prealloc);
 554		BUG_ON(!prealloc);
 555		err = split_state(tree, state, prealloc, end + 1);
 556		BUG_ON(err == -EEXIST);
 557		if (wake)
 558			wake_up(&state->wq);
 559
 560		set |= clear_state_bit(tree, prealloc, &bits, wake);
 561
 562		prealloc = NULL;
 563		goto out;
 564	}
 565
 566	if (state->end < end && prealloc && !need_resched())
 567		next_node = rb_next(&state->rb_node);
 568	else
 569		next_node = NULL;
 
 570
 571	set |= clear_state_bit(tree, state, &bits, wake);
 572	if (last_end == (u64)-1)
 573		goto out;
 574	start = last_end + 1;
 575	if (start <= end && next_node) {
 576		state = rb_entry(next_node, struct extent_state,
 577				 rb_node);
 578		if (state->start == start)
 579			goto hit_next;
 580	}
 581	goto search_again;
 582
 583out:
 584	spin_unlock(&tree->lock);
 585	if (prealloc)
 586		free_extent_state(prealloc);
 587
 588	return set;
 589
 590search_again:
 591	if (start > end)
 592		goto out;
 593	spin_unlock(&tree->lock);
 594	if (mask & __GFP_WAIT)
 595		cond_resched();
 596	goto again;
 597}
 598
 599static int wait_on_state(struct extent_io_tree *tree,
 600			 struct extent_state *state)
 601		__releases(tree->lock)
 602		__acquires(tree->lock)
 603{
 604	DEFINE_WAIT(wait);
 605	prepare_to_wait(&state->wq, &wait, TASK_UNINTERRUPTIBLE);
 606	spin_unlock(&tree->lock);
 607	schedule();
 608	spin_lock(&tree->lock);
 609	finish_wait(&state->wq, &wait);
 610	return 0;
 611}
 612
 613/*
 614 * waits for one or more bits to clear on a range in the state tree.
 615 * The range [start, end] is inclusive.
 616 * The tree lock is taken by this function
 617 */
 618int wait_extent_bit(struct extent_io_tree *tree, u64 start, u64 end, int bits)
 619{
 620	struct extent_state *state;
 621	struct rb_node *node;
 622
 623	spin_lock(&tree->lock);
 624again:
 625	while (1) {
 626		/*
 627		 * this search will find all the extents that end after
 628		 * our range starts
 629		 */
 630		node = tree_search(tree, start);
 631		if (!node)
 632			break;
 
 
 
 
 
 
 
 
 
 
 
 
 
 633
 634		state = rb_entry(node, struct extent_state, rb_node);
 
 
 635
 636		if (state->start > end)
 637			goto out;
 
 
 638
 639		if (state->state & bits) {
 640			start = state->start;
 641			atomic_inc(&state->refs);
 642			wait_on_state(tree, state);
 643			free_extent_state(state);
 644			goto again;
 645		}
 646		start = state->end + 1;
 647
 648		if (start > end)
 649			break;
 650
 651		cond_resched_lock(&tree->lock);
 652	}
 653out:
 654	spin_unlock(&tree->lock);
 655	return 0;
 656}
 657
 658static void set_state_bits(struct extent_io_tree *tree,
 659			   struct extent_state *state,
 660			   int *bits)
 661{
 662	int bits_to_set = *bits & ~EXTENT_CTLBITS;
 
 663
 664	set_state_cb(tree, state, bits);
 665	if ((bits_to_set & EXTENT_DIRTY) && !(state->state & EXTENT_DIRTY)) {
 666		u64 range = state->end - state->start + 1;
 667		tree->dirty_bytes += range;
 668	}
 669	state->state |= bits_to_set;
 670}
 671
 672static void cache_state(struct extent_state *state,
 673			struct extent_state **cached_ptr)
 674{
 675	if (cached_ptr && !(*cached_ptr)) {
 676		if (state->state & (EXTENT_IOBITS | EXTENT_BOUNDARY)) {
 677			*cached_ptr = state;
 678			atomic_inc(&state->refs);
 679		}
 680	}
 681}
 682
 683static void uncache_state(struct extent_state **cached_ptr)
 684{
 685	if (cached_ptr && (*cached_ptr)) {
 686		struct extent_state *state = *cached_ptr;
 687		*cached_ptr = NULL;
 688		free_extent_state(state);
 689	}
 690}
 691
 692/*
 693 * set some bits on a range in the tree.  This may require allocations or
 694 * sleeping, so the gfp mask is used to indicate what is allowed.
 695 *
 696 * If any of the exclusive bits are set, this will fail with -EEXIST if some
 697 * part of the range already has the desired bits set.  The start of the
 698 * existing range is returned in failed_start in this case.
 699 *
 700 * [start, end] is inclusive This takes the tree lock.
 701 */
 702
 703int set_extent_bit(struct extent_io_tree *tree, u64 start, u64 end,
 704		   int bits, int exclusive_bits, u64 *failed_start,
 705		   struct extent_state **cached_state, gfp_t mask)
 706{
 707	struct extent_state *state;
 708	struct extent_state *prealloc = NULL;
 709	struct rb_node *node;
 710	int err = 0;
 711	u64 last_start;
 712	u64 last_end;
 
 713
 714	bits |= EXTENT_FIRST_DELALLOC;
 715again:
 716	if (!prealloc && (mask & __GFP_WAIT)) {
 717		prealloc = alloc_extent_state(mask);
 718		BUG_ON(!prealloc);
 719	}
 720
 721	spin_lock(&tree->lock);
 722	if (cached_state && *cached_state) {
 723		state = *cached_state;
 724		if (state->start <= start && state->end > start &&
 725		    state->tree) {
 726			node = &state->rb_node;
 727			goto hit_next;
 728		}
 729	}
 730	/*
 731	 * this search will find all the extents that end after
 732	 * our range starts.
 733	 */
 734	node = tree_search(tree, start);
 735	if (!node) {
 736		prealloc = alloc_extent_state_atomic(prealloc);
 737		BUG_ON(!prealloc);
 738		err = insert_state(tree, prealloc, start, end, &bits);
 739		prealloc = NULL;
 740		BUG_ON(err == -EEXIST);
 741		goto out;
 742	}
 743	state = rb_entry(node, struct extent_state, rb_node);
 744hit_next:
 745	last_start = state->start;
 746	last_end = state->end;
 747
 748	/*
 749	 * | ---- desired range ---- |
 750	 * | state |
 751	 *
 752	 * Just lock what we found and keep going
 753	 */
 754	if (state->start == start && state->end <= end) {
 755		struct rb_node *next_node;
 756		if (state->state & exclusive_bits) {
 757			*failed_start = state->start;
 758			err = -EEXIST;
 759			goto out;
 760		}
 761
 762		set_state_bits(tree, state, &bits);
 
 
 763
 764		cache_state(state, cached_state);
 765		merge_state(tree, state);
 766		if (last_end == (u64)-1)
 767			goto out;
 768
 769		start = last_end + 1;
 770		next_node = rb_next(&state->rb_node);
 771		if (next_node && start < end && prealloc && !need_resched()) {
 772			state = rb_entry(next_node, struct extent_state,
 773					 rb_node);
 774			if (state->start == start)
 775				goto hit_next;
 776		}
 777		goto search_again;
 778	}
 779
 780	/*
 781	 *     | ---- desired range ---- |
 782	 * | state |
 783	 *   or
 784	 * | ------------- state -------------- |
 785	 *
 786	 * We need to split the extent we found, and may flip bits on
 787	 * second half.
 788	 *
 789	 * If the extent we found extends past our
 790	 * range, we just split and search again.  It'll get split
 791	 * again the next time though.
 792	 *
 793	 * If the extent we found is inside our range, we set the
 794	 * desired bit on it.
 795	 */
 796	if (state->start < start) {
 797		if (state->state & exclusive_bits) {
 798			*failed_start = start;
 799			err = -EEXIST;
 800			goto out;
 801		}
 802
 803		prealloc = alloc_extent_state_atomic(prealloc);
 804		BUG_ON(!prealloc);
 805		err = split_state(tree, state, prealloc, start);
 806		BUG_ON(err == -EEXIST);
 807		prealloc = NULL;
 808		if (err)
 809			goto out;
 810		if (state->end <= end) {
 811			set_state_bits(tree, state, &bits);
 812			cache_state(state, cached_state);
 813			merge_state(tree, state);
 814			if (last_end == (u64)-1)
 815				goto out;
 816			start = last_end + 1;
 817		}
 818		goto search_again;
 819	}
 820	/*
 821	 * | ---- desired range ---- |
 822	 *     | state | or               | state |
 823	 *
 824	 * There's a hole, we need to insert something in it and
 825	 * ignore the extent we found.
 826	 */
 827	if (state->start > start) {
 828		u64 this_end;
 829		if (end < last_start)
 830			this_end = end;
 831		else
 832			this_end = last_start - 1;
 833
 834		prealloc = alloc_extent_state_atomic(prealloc);
 835		BUG_ON(!prealloc);
 
 
 
 836
 837		/*
 838		 * Avoid to free 'prealloc' if it can be merged with
 839		 * the later extent.
 840		 */
 841		err = insert_state(tree, prealloc, start, this_end,
 842				   &bits);
 843		BUG_ON(err == -EEXIST);
 844		if (err) {
 845			free_extent_state(prealloc);
 846			prealloc = NULL;
 847			goto out;
 848		}
 849		cache_state(prealloc, cached_state);
 850		prealloc = NULL;
 851		start = this_end + 1;
 852		goto search_again;
 853	}
 854	/*
 855	 * | ---- desired range ---- |
 856	 *                        | state |
 857	 * We need to split the extent, and set the bit
 858	 * on the first half
 859	 */
 860	if (state->start <= end && state->end > end) {
 861		if (state->state & exclusive_bits) {
 862			*failed_start = start;
 863			err = -EEXIST;
 864			goto out;
 865		}
 866
 867		prealloc = alloc_extent_state_atomic(prealloc);
 868		BUG_ON(!prealloc);
 869		err = split_state(tree, state, prealloc, end + 1);
 870		BUG_ON(err == -EEXIST);
 871
 872		set_state_bits(tree, prealloc, &bits);
 873		cache_state(prealloc, cached_state);
 874		merge_state(tree, prealloc);
 875		prealloc = NULL;
 876		goto out;
 877	}
 878
 879	goto search_again;
 880
 881out:
 882	spin_unlock(&tree->lock);
 883	if (prealloc)
 884		free_extent_state(prealloc);
 885
 886	return err;
 887
 888search_again:
 889	if (start > end)
 890		goto out;
 891	spin_unlock(&tree->lock);
 892	if (mask & __GFP_WAIT)
 893		cond_resched();
 894	goto again;
 895}
 896
 897/* wrappers around set/clear extent bit */
 898int set_extent_dirty(struct extent_io_tree *tree, u64 start, u64 end,
 899		     gfp_t mask)
 900{
 901	return set_extent_bit(tree, start, end, EXTENT_DIRTY, 0, NULL,
 902			      NULL, mask);
 903}
 904
 905int set_extent_bits(struct extent_io_tree *tree, u64 start, u64 end,
 906		    int bits, gfp_t mask)
 907{
 908	return set_extent_bit(tree, start, end, bits, 0, NULL,
 909			      NULL, mask);
 910}
 911
 912int clear_extent_bits(struct extent_io_tree *tree, u64 start, u64 end,
 913		      int bits, gfp_t mask)
 914{
 915	return clear_extent_bit(tree, start, end, bits, 0, 0, NULL, mask);
 916}
 917
 918int set_extent_delalloc(struct extent_io_tree *tree, u64 start, u64 end,
 919			struct extent_state **cached_state, gfp_t mask)
 920{
 921	return set_extent_bit(tree, start, end,
 922			      EXTENT_DELALLOC | EXTENT_DIRTY | EXTENT_UPTODATE,
 923			      0, NULL, cached_state, mask);
 924}
 925
 926int clear_extent_dirty(struct extent_io_tree *tree, u64 start, u64 end,
 927		       gfp_t mask)
 928{
 929	return clear_extent_bit(tree, start, end,
 930				EXTENT_DIRTY | EXTENT_DELALLOC |
 931				EXTENT_DO_ACCOUNTING, 0, 0, NULL, mask);
 932}
 933
 934int set_extent_new(struct extent_io_tree *tree, u64 start, u64 end,
 935		     gfp_t mask)
 936{
 937	return set_extent_bit(tree, start, end, EXTENT_NEW, 0, NULL,
 938			      NULL, mask);
 939}
 940
 941int set_extent_uptodate(struct extent_io_tree *tree, u64 start, u64 end,
 942			struct extent_state **cached_state, gfp_t mask)
 943{
 944	return set_extent_bit(tree, start, end, EXTENT_UPTODATE, 0,
 945			      NULL, cached_state, mask);
 946}
 947
 948static int clear_extent_uptodate(struct extent_io_tree *tree, u64 start,
 949				 u64 end, struct extent_state **cached_state,
 950				 gfp_t mask)
 951{
 952	return clear_extent_bit(tree, start, end, EXTENT_UPTODATE, 0, 0,
 953				cached_state, mask);
 954}
 955
 956/*
 957 * either insert or lock state struct between start and end use mask to tell
 958 * us if waiting is desired.
 959 */
 960int lock_extent_bits(struct extent_io_tree *tree, u64 start, u64 end,
 961		     int bits, struct extent_state **cached_state, gfp_t mask)
 962{
 963	int err;
 964	u64 failed_start;
 965	while (1) {
 966		err = set_extent_bit(tree, start, end, EXTENT_LOCKED | bits,
 967				     EXTENT_LOCKED, &failed_start,
 968				     cached_state, mask);
 969		if (err == -EEXIST && (mask & __GFP_WAIT)) {
 970			wait_extent_bit(tree, failed_start, end, EXTENT_LOCKED);
 971			start = failed_start;
 972		} else {
 973			break;
 974		}
 975		WARN_ON(start > end);
 976	}
 977	return err;
 978}
 979
 980int lock_extent(struct extent_io_tree *tree, u64 start, u64 end, gfp_t mask)
 981{
 982	return lock_extent_bits(tree, start, end, 0, NULL, mask);
 983}
 984
 985int try_lock_extent(struct extent_io_tree *tree, u64 start, u64 end,
 986		    gfp_t mask)
 987{
 988	int err;
 989	u64 failed_start;
 990
 991	err = set_extent_bit(tree, start, end, EXTENT_LOCKED, EXTENT_LOCKED,
 992			     &failed_start, NULL, mask);
 993	if (err == -EEXIST) {
 994		if (failed_start > start)
 995			clear_extent_bit(tree, start, failed_start - 1,
 996					 EXTENT_LOCKED, 1, 0, NULL, mask);
 997		return 0;
 998	}
 999	return 1;
1000}
1001
1002int unlock_extent_cached(struct extent_io_tree *tree, u64 start, u64 end,
1003			 struct extent_state **cached, gfp_t mask)
1004{
1005	return clear_extent_bit(tree, start, end, EXTENT_LOCKED, 1, 0, cached,
1006				mask);
1007}
1008
1009int unlock_extent(struct extent_io_tree *tree, u64 start, u64 end, gfp_t mask)
1010{
1011	return clear_extent_bit(tree, start, end, EXTENT_LOCKED, 1, 0, NULL,
1012				mask);
1013}
1014
1015/*
1016 * helper function to set both pages and extents in the tree writeback
1017 */
1018static int set_range_writeback(struct extent_io_tree *tree, u64 start, u64 end)
1019{
1020	unsigned long index = start >> PAGE_CACHE_SHIFT;
1021	unsigned long end_index = end >> PAGE_CACHE_SHIFT;
1022	struct page *page;
1023
1024	while (index <= end_index) {
1025		page = find_get_page(tree->mapping, index);
1026		BUG_ON(!page);
1027		set_page_writeback(page);
1028		page_cache_release(page);
1029		index++;
1030	}
1031	return 0;
1032}
1033
1034/* find the first state struct with 'bits' set after 'start', and
1035 * return it.  tree->lock must be held.  NULL will returned if
1036 * nothing was found after 'start'
1037 */
1038struct extent_state *find_first_extent_bit_state(struct extent_io_tree *tree,
1039						 u64 start, int bits)
1040{
1041	struct rb_node *node;
1042	struct extent_state *state;
1043
1044	/*
1045	 * this search will find all the extents that end after
1046	 * our range starts.
1047	 */
1048	node = tree_search(tree, start);
1049	if (!node)
1050		goto out;
1051
1052	while (1) {
1053		state = rb_entry(node, struct extent_state, rb_node);
1054		if (state->end >= start && (state->state & bits))
1055			return state;
1056
1057		node = rb_next(node);
1058		if (!node)
1059			break;
1060	}
1061out:
1062	return NULL;
 
 
 
1063}
1064
1065/*
1066 * find the first offset in the io tree with 'bits' set. zero is
1067 * returned if we find something, and *start_ret and *end_ret are
1068 * set to reflect the state struct that was found.
1069 *
1070 * If nothing was found, 1 is returned, < 0 on error
1071 */
1072int find_first_extent_bit(struct extent_io_tree *tree, u64 start,
1073			  u64 *start_ret, u64 *end_ret, int bits)
1074{
1075	struct extent_state *state;
1076	int ret = 1;
1077
1078	spin_lock(&tree->lock);
1079	state = find_first_extent_bit_state(tree, start, bits);
1080	if (state) {
1081		*start_ret = state->start;
1082		*end_ret = state->end;
1083		ret = 0;
1084	}
1085	spin_unlock(&tree->lock);
1086	return ret;
1087}
1088
1089/*
1090 * find a contiguous range of bytes in the file marked as delalloc, not
1091 * more than 'max_bytes'.  start and end are used to return the range,
1092 *
1093 * 1 is returned if we find something, 0 if nothing was in the tree
1094 */
1095static noinline u64 find_delalloc_range(struct extent_io_tree *tree,
1096					u64 *start, u64 *end, u64 max_bytes,
1097					struct extent_state **cached_state)
1098{
1099	struct rb_node *node;
1100	struct extent_state *state;
1101	u64 cur_start = *start;
1102	u64 found = 0;
1103	u64 total_bytes = 0;
1104
1105	spin_lock(&tree->lock);
1106
1107	/*
1108	 * this search will find all the extents that end after
1109	 * our range starts.
1110	 */
1111	node = tree_search(tree, cur_start);
1112	if (!node) {
1113		if (!found)
1114			*end = (u64)-1;
1115		goto out;
1116	}
1117
1118	while (1) {
1119		state = rb_entry(node, struct extent_state, rb_node);
1120		if (found && (state->start != cur_start ||
1121			      (state->state & EXTENT_BOUNDARY))) {
1122			goto out;
1123		}
1124		if (!(state->state & EXTENT_DELALLOC)) {
1125			if (!found)
1126				*end = state->end;
1127			goto out;
1128		}
1129		if (!found) {
1130			*start = state->start;
1131			*cached_state = state;
1132			atomic_inc(&state->refs);
1133		}
1134		found++;
1135		*end = state->end;
1136		cur_start = state->end + 1;
1137		node = rb_next(node);
1138		if (!node)
1139			break;
1140		total_bytes += state->end - state->start + 1;
1141		if (total_bytes >= max_bytes)
1142			break;
1143	}
1144out:
1145	spin_unlock(&tree->lock);
1146	return found;
1147}
1148
1149static noinline int __unlock_for_delalloc(struct inode *inode,
1150					  struct page *locked_page,
1151					  u64 start, u64 end)
1152{
1153	int ret;
1154	struct page *pages[16];
1155	unsigned long index = start >> PAGE_CACHE_SHIFT;
1156	unsigned long end_index = end >> PAGE_CACHE_SHIFT;
1157	unsigned long nr_pages = end_index - index + 1;
1158	int i;
1159
1160	if (index == locked_page->index && end_index == index)
1161		return 0;
1162
1163	while (nr_pages > 0) {
1164		ret = find_get_pages_contig(inode->i_mapping, index,
1165				     min_t(unsigned long, nr_pages,
1166				     ARRAY_SIZE(pages)), pages);
1167		for (i = 0; i < ret; i++) {
1168			if (pages[i] != locked_page)
1169				unlock_page(pages[i]);
1170			page_cache_release(pages[i]);
1171		}
1172		nr_pages -= ret;
1173		index += ret;
1174		cond_resched();
1175	}
1176	return 0;
1177}
1178
1179static noinline int lock_delalloc_pages(struct inode *inode,
1180					struct page *locked_page,
1181					u64 delalloc_start,
1182					u64 delalloc_end)
1183{
1184	unsigned long index = delalloc_start >> PAGE_CACHE_SHIFT;
1185	unsigned long start_index = index;
1186	unsigned long end_index = delalloc_end >> PAGE_CACHE_SHIFT;
1187	unsigned long pages_locked = 0;
1188	struct page *pages[16];
1189	unsigned long nrpages;
1190	int ret;
1191	int i;
1192
1193	/* the caller is responsible for locking the start index */
1194	if (index == locked_page->index && index == end_index)
1195		return 0;
1196
1197	/* skip the page at the start index */
1198	nrpages = end_index - index + 1;
1199	while (nrpages > 0) {
1200		ret = find_get_pages_contig(inode->i_mapping, index,
1201				     min_t(unsigned long,
1202				     nrpages, ARRAY_SIZE(pages)), pages);
1203		if (ret == 0) {
1204			ret = -EAGAIN;
1205			goto done;
1206		}
1207		/* now we have an array of pages, lock them all */
1208		for (i = 0; i < ret; i++) {
1209			/*
1210			 * the caller is taking responsibility for
1211			 * locked_page
1212			 */
1213			if (pages[i] != locked_page) {
1214				lock_page(pages[i]);
1215				if (!PageDirty(pages[i]) ||
1216				    pages[i]->mapping != inode->i_mapping) {
1217					ret = -EAGAIN;
1218					unlock_page(pages[i]);
1219					page_cache_release(pages[i]);
1220					goto done;
1221				}
1222			}
1223			page_cache_release(pages[i]);
1224			pages_locked++;
1225		}
1226		nrpages -= ret;
1227		index += ret;
1228		cond_resched();
1229	}
1230	ret = 0;
1231done:
1232	if (ret && pages_locked) {
1233		__unlock_for_delalloc(inode, locked_page,
1234			      delalloc_start,
1235			      ((u64)(start_index + pages_locked - 1)) <<
1236			      PAGE_CACHE_SHIFT);
1237	}
1238	return ret;
1239}
1240
1241/*
1242 * find a contiguous range of bytes in the file marked as delalloc, not
1243 * more than 'max_bytes'.  start and end are used to return the range,
1244 *
1245 * 1 is returned if we find something, 0 if nothing was in the tree
 
1246 */
1247static noinline u64 find_lock_delalloc_range(struct inode *inode,
1248					     struct extent_io_tree *tree,
1249					     struct page *locked_page,
1250					     u64 *start, u64 *end,
1251					     u64 max_bytes)
1252{
 
 
 
 
 
1253	u64 delalloc_start;
1254	u64 delalloc_end;
1255	u64 found;
1256	struct extent_state *cached_state = NULL;
1257	int ret;
1258	int loops = 0;
1259
 
 
 
 
 
 
1260again:
1261	/* step one, find a bunch of delalloc bytes starting at start */
1262	delalloc_start = *start;
1263	delalloc_end = 0;
1264	found = find_delalloc_range(tree, &delalloc_start, &delalloc_end,
1265				    max_bytes, &cached_state);
1266	if (!found || delalloc_end <= *start) {
1267		*start = delalloc_start;
1268		*end = delalloc_end;
 
 
1269		free_extent_state(cached_state);
1270		return found;
1271	}
1272
1273	/*
1274	 * start comes from the offset of locked_page.  We have to lock
1275	 * pages in order, so we can't process delalloc bytes before
1276	 * locked_page
1277	 */
1278	if (delalloc_start < *start)
1279		delalloc_start = *start;
1280
1281	/*
1282	 * make sure to limit the number of pages we try to lock down
1283	 * if we're looping.
1284	 */
1285	if (delalloc_end + 1 - delalloc_start > max_bytes && loops)
1286		delalloc_end = delalloc_start + PAGE_CACHE_SIZE - 1;
1287
1288	/* step two, lock all the pages after the page that has start */
1289	ret = lock_delalloc_pages(inode, locked_page,
1290				  delalloc_start, delalloc_end);
 
1291	if (ret == -EAGAIN) {
1292		/* some of the pages are gone, lets avoid looping by
1293		 * shortening the size of the delalloc range we're searching
1294		 */
1295		free_extent_state(cached_state);
 
1296		if (!loops) {
1297			unsigned long offset = (*start) & (PAGE_CACHE_SIZE - 1);
1298			max_bytes = PAGE_CACHE_SIZE - offset;
1299			loops = 1;
1300			goto again;
1301		} else {
1302			found = 0;
1303			goto out_failed;
1304		}
1305	}
1306	BUG_ON(ret);
1307
1308	/* step three, lock the state bits for the whole range */
1309	lock_extent_bits(tree, delalloc_start, delalloc_end,
1310			 0, &cached_state, GFP_NOFS);
1311
1312	/* then test to make sure it is all still delalloc */
1313	ret = test_range_bit(tree, delalloc_start, delalloc_end,
1314			     EXTENT_DELALLOC, 1, cached_state);
1315	if (!ret) {
1316		unlock_extent_cached(tree, delalloc_start, delalloc_end,
1317				     &cached_state, GFP_NOFS);
1318		__unlock_for_delalloc(inode, locked_page,
1319			      delalloc_start, delalloc_end);
1320		cond_resched();
1321		goto again;
1322	}
1323	free_extent_state(cached_state);
1324	*start = delalloc_start;
1325	*end = delalloc_end;
1326out_failed:
1327	return found;
1328}
1329
1330int extent_clear_unlock_delalloc(struct inode *inode,
1331				struct extent_io_tree *tree,
1332				u64 start, u64 end, struct page *locked_page,
1333				unsigned long op)
1334{
1335	int ret;
1336	struct page *pages[16];
1337	unsigned long index = start >> PAGE_CACHE_SHIFT;
1338	unsigned long end_index = end >> PAGE_CACHE_SHIFT;
1339	unsigned long nr_pages = end_index - index + 1;
1340	int i;
1341	int clear_bits = 0;
1342
1343	if (op & EXTENT_CLEAR_UNLOCK)
1344		clear_bits |= EXTENT_LOCKED;
1345	if (op & EXTENT_CLEAR_DIRTY)
1346		clear_bits |= EXTENT_DIRTY;
1347
1348	if (op & EXTENT_CLEAR_DELALLOC)
1349		clear_bits |= EXTENT_DELALLOC;
1350
1351	clear_extent_bit(tree, start, end, clear_bits, 1, 0, NULL, GFP_NOFS);
1352	if (!(op & (EXTENT_CLEAR_UNLOCK_PAGE | EXTENT_CLEAR_DIRTY |
1353		    EXTENT_SET_WRITEBACK | EXTENT_END_WRITEBACK |
1354		    EXTENT_SET_PRIVATE2)))
1355		return 0;
1356
1357	while (nr_pages > 0) {
1358		ret = find_get_pages_contig(inode->i_mapping, index,
1359				     min_t(unsigned long,
1360				     nr_pages, ARRAY_SIZE(pages)), pages);
1361		for (i = 0; i < ret; i++) {
1362
1363			if (op & EXTENT_SET_PRIVATE2)
1364				SetPagePrivate2(pages[i]);
 
 
 
 
 
 
1365
1366			if (pages[i] == locked_page) {
1367				page_cache_release(pages[i]);
1368				continue;
1369			}
1370			if (op & EXTENT_CLEAR_DIRTY)
1371				clear_page_dirty_for_io(pages[i]);
1372			if (op & EXTENT_SET_WRITEBACK)
1373				set_page_writeback(pages[i]);
1374			if (op & EXTENT_END_WRITEBACK)
1375				end_page_writeback(pages[i]);
1376			if (op & EXTENT_CLEAR_UNLOCK_PAGE)
1377				unlock_page(pages[i]);
1378			page_cache_release(pages[i]);
1379		}
1380		nr_pages -= ret;
1381		index += ret;
1382		cond_resched();
1383	}
1384	return 0;
1385}
1386
1387/*
1388 * count the number of bytes in the tree that have a given bit(s)
1389 * set.  This can be fairly slow, except for EXTENT_DIRTY which is
1390 * cached.  The total number found is returned.
 
 
 
 
 
1391 */
1392u64 count_range_bits(struct extent_io_tree *tree,
1393		     u64 *start, u64 search_end, u64 max_bytes,
1394		     unsigned long bits, int contig)
1395{
1396	struct rb_node *node;
1397	struct extent_state *state;
1398	u64 cur_start = *start;
1399	u64 total_bytes = 0;
1400	u64 last = 0;
1401	int found = 0;
1402
1403	if (search_end <= cur_start) {
1404		WARN_ON(1);
1405		return 0;
1406	}
1407
1408	spin_lock(&tree->lock);
1409	if (cur_start == 0 && bits == EXTENT_DIRTY) {
1410		total_bytes = tree->dirty_bytes;
1411		goto out;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1412	}
1413	/*
1414	 * this search will find all the extents that end after
1415	 * our range starts.
1416	 */
1417	node = tree_search(tree, cur_start);
1418	if (!node)
1419		goto out;
1420
1421	while (1) {
1422		state = rb_entry(node, struct extent_state, rb_node);
1423		if (state->start > search_end)
1424			break;
1425		if (contig && found && state->start > last + 1)
1426			break;
1427		if (state->end >= cur_start && (state->state & bits) == bits) {
1428			total_bytes += min(search_end, state->end) + 1 -
1429				       max(cur_start, state->start);
1430			if (total_bytes >= max_bytes)
1431				break;
1432			if (!found) {
1433				*start = max(cur_start, state->start);
1434				found = 1;
1435			}
1436			last = state->end;
1437		} else if (contig && found) {
1438			break;
1439		}
1440		node = rb_next(node);
1441		if (!node)
1442			break;
1443	}
1444out:
1445	spin_unlock(&tree->lock);
1446	return total_bytes;
1447}
1448
1449/*
1450 * set the private field for a given byte offset in the tree.  If there isn't
1451 * an extent_state there already, this does nothing.
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1452 */
1453int set_state_private(struct extent_io_tree *tree, u64 start, u64 private)
 
 
1454{
1455	struct rb_node *node;
1456	struct extent_state *state;
1457	int ret = 0;
 
 
 
1458
1459	spin_lock(&tree->lock);
1460	/*
1461	 * this search will find all the extents that end after
1462	 * our range starts.
 
 
 
 
 
 
 
1463	 */
1464	node = tree_search(tree, start);
1465	if (!node) {
1466		ret = -ENOENT;
1467		goto out;
1468	}
1469	state = rb_entry(node, struct extent_state, rb_node);
1470	if (state->start != start) {
1471		ret = -ENOENT;
1472		goto out;
1473	}
1474	state->private = private;
1475out:
1476	spin_unlock(&tree->lock);
1477	return ret;
 
 
 
 
 
 
 
 
 
 
1478}
1479
1480int get_state_private(struct extent_io_tree *tree, u64 start, u64 *private)
1481{
1482	struct rb_node *node;
1483	struct extent_state *state;
1484	int ret = 0;
1485
1486	spin_lock(&tree->lock);
1487	/*
1488	 * this search will find all the extents that end after
1489	 * our range starts.
1490	 */
1491	node = tree_search(tree, start);
1492	if (!node) {
1493		ret = -ENOENT;
1494		goto out;
1495	}
1496	state = rb_entry(node, struct extent_state, rb_node);
1497	if (state->start != start) {
1498		ret = -ENOENT;
1499		goto out;
1500	}
1501	*private = state->private;
1502out:
1503	spin_unlock(&tree->lock);
1504	return ret;
1505}
1506
1507/*
1508 * searches a range in the state tree for a given mask.
1509 * If 'filled' == 1, this returns 1 only if every extent in the tree
1510 * has the bits set.  Otherwise, 1 is returned if any bit in the
1511 * range is found set.
1512 */
1513int test_range_bit(struct extent_io_tree *tree, u64 start, u64 end,
1514		   int bits, int filled, struct extent_state *cached)
1515{
1516	struct extent_state *state = NULL;
1517	struct rb_node *node;
1518	int bitset = 0;
1519
1520	spin_lock(&tree->lock);
1521	if (cached && cached->tree && cached->start <= start &&
1522	    cached->end > start)
1523		node = &cached->rb_node;
1524	else
1525		node = tree_search(tree, start);
1526	while (node && start <= end) {
1527		state = rb_entry(node, struct extent_state, rb_node);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1528
1529		if (filled && state->start > start) {
1530			bitset = 0;
1531			break;
1532		}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1533
1534		if (state->start > end)
1535			break;
 
 
 
 
 
 
 
 
 
 
 
 
1536
1537		if (state->state & bits) {
1538			bitset = 1;
1539			if (!filled)
1540				break;
1541		} else if (filled) {
1542			bitset = 0;
1543			break;
1544		}
1545
1546		if (state->end == (u64)-1)
1547			break;
1548
1549		start = state->end + 1;
1550		if (start > end)
1551			break;
1552		node = rb_next(node);
1553		if (!node) {
1554			if (filled)
1555				bitset = 0;
1556			break;
1557		}
1558	}
1559	spin_unlock(&tree->lock);
1560	return bitset;
 
1561}
1562
1563/*
1564 * helper function to set a given page up to date if all the
1565 * extents in the tree for that page are up to date
 
 
 
 
 
 
 
 
1566 */
1567static int check_page_uptodate(struct extent_io_tree *tree,
1568			       struct page *page)
1569{
1570	u64 start = (u64)page->index << PAGE_CACHE_SHIFT;
1571	u64 end = start + PAGE_CACHE_SIZE - 1;
1572	if (test_range_bit(tree, start, end, EXTENT_UPTODATE, 1, NULL))
1573		SetPageUptodate(page);
1574	return 0;
1575}
1576
1577/*
1578 * helper function to unlock a page if all the extents in the tree
1579 * for that page are unlocked
1580 */
1581static int check_page_locked(struct extent_io_tree *tree,
1582			     struct page *page)
1583{
1584	u64 start = (u64)page->index << PAGE_CACHE_SHIFT;
1585	u64 end = start + PAGE_CACHE_SIZE - 1;
1586	if (!test_range_bit(tree, start, end, EXTENT_LOCKED, 0, NULL))
1587		unlock_page(page);
1588	return 0;
1589}
1590
1591/*
1592 * helper function to end page writeback if all the extents
1593 * in the tree for that page are done with writeback
1594 */
1595static int check_page_writeback(struct extent_io_tree *tree,
1596			     struct page *page)
1597{
1598	end_page_writeback(page);
 
 
1599	return 0;
1600}
1601
1602/* lots and lots of room for performance fixes in the end_bio funcs */
1603
1604/*
1605 * after a writepage IO is done, we need to:
1606 * clear the uptodate bits on error
1607 * clear the writeback bits in the extent tree for this IO
1608 * end_page_writeback if the page has no more pending IO
1609 *
1610 * Scheduling is not allowed, so the extent state tree is expected
1611 * to have one and only one object corresponding to this IO.
1612 */
1613static void end_bio_extent_writepage(struct bio *bio, int err)
1614{
1615	int uptodate = err == 0;
1616	struct bio_vec *bvec = bio->bi_io_vec + bio->bi_vcnt - 1;
1617	struct extent_io_tree *tree;
1618	u64 start;
1619	u64 end;
1620	int whole_page;
1621	int ret;
1622
1623	do {
1624		struct page *page = bvec->bv_page;
1625		tree = &BTRFS_I(page->mapping->host)->io_tree;
1626
1627		start = ((u64)page->index << PAGE_CACHE_SHIFT) +
1628			 bvec->bv_offset;
1629		end = start + bvec->bv_len - 1;
1630
1631		if (bvec->bv_offset == 0 && bvec->bv_len == PAGE_CACHE_SIZE)
1632			whole_page = 1;
1633		else
1634			whole_page = 0;
 
 
1635
1636		if (--bvec >= bio->bi_io_vec)
1637			prefetchw(&bvec->bv_page->flags);
1638		if (tree->ops && tree->ops->writepage_end_io_hook) {
1639			ret = tree->ops->writepage_end_io_hook(page, start,
1640						       end, NULL, uptodate);
1641			if (ret)
1642				uptodate = 0;
1643		}
1644
1645		if (!uptodate && tree->ops &&
1646		    tree->ops->writepage_io_failed_hook) {
1647			ret = tree->ops->writepage_io_failed_hook(bio, page,
1648							 start, end, NULL);
1649			if (ret == 0) {
1650				uptodate = (err == 0);
1651				continue;
1652			}
1653		}
1654
1655		if (!uptodate) {
1656			clear_extent_uptodate(tree, start, end, NULL, GFP_NOFS);
1657			ClearPageUptodate(page);
1658			SetPageError(page);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1659		}
1660
1661		if (whole_page)
1662			end_page_writeback(page);
1663		else
1664			check_page_writeback(tree, page);
1665	} while (bvec >= bio->bi_io_vec);
1666
1667	bio_put(bio);
 
 
1668}
1669
1670/*
1671 * after a readpage IO is done, we need to:
1672 * clear the uptodate bits on error
1673 * set the uptodate bits if things worked
1674 * set the page up to date if all extents in the tree are uptodate
1675 * clear the lock bit in the extent tree
1676 * unlock the page if there are no other extents locked for it
1677 *
1678 * Scheduling is not allowed, so the extent state tree is expected
1679 * to have one and only one object corresponding to this IO.
 
 
1680 */
1681static void end_bio_extent_readpage(struct bio *bio, int err)
1682{
1683	int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
1684	struct bio_vec *bvec_end = bio->bi_io_vec + bio->bi_vcnt - 1;
1685	struct bio_vec *bvec = bio->bi_io_vec;
1686	struct extent_io_tree *tree;
1687	u64 start;
1688	u64 end;
1689	int whole_page;
1690	int ret;
1691
1692	if (err)
1693		uptodate = 0;
1694
1695	do {
1696		struct page *page = bvec->bv_page;
1697		struct extent_state *cached = NULL;
1698		struct extent_state *state;
1699
1700		tree = &BTRFS_I(page->mapping->host)->io_tree;
1701
1702		start = ((u64)page->index << PAGE_CACHE_SHIFT) +
1703			bvec->bv_offset;
1704		end = start + bvec->bv_len - 1;
1705
1706		if (bvec->bv_offset == 0 && bvec->bv_len == PAGE_CACHE_SIZE)
1707			whole_page = 1;
1708		else
1709			whole_page = 0;
1710
1711		if (++bvec <= bvec_end)
1712			prefetchw(&bvec->bv_page->flags);
1713
1714		spin_lock(&tree->lock);
1715		state = find_first_extent_bit_state(tree, start, EXTENT_LOCKED);
1716		if (state && state->start == start) {
1717			/*
1718			 * take a reference on the state, unlock will drop
1719			 * the ref
1720			 */
1721			cache_state(state, &cached);
1722		}
1723		spin_unlock(&tree->lock);
1724
1725		if (uptodate && tree->ops && tree->ops->readpage_end_io_hook) {
1726			ret = tree->ops->readpage_end_io_hook(page, start, end,
1727							      state);
1728			if (ret)
1729				uptodate = 0;
1730		}
1731		if (!uptodate && tree->ops &&
1732		    tree->ops->readpage_io_failed_hook) {
1733			ret = tree->ops->readpage_io_failed_hook(bio, page,
1734							 start, end, NULL);
1735			if (ret == 0) {
1736				uptodate =
1737					test_bit(BIO_UPTODATE, &bio->bi_flags);
1738				if (err)
1739					uptodate = 0;
1740				uncache_state(&cached);
1741				continue;
1742			}
1743		}
1744
1745		if (uptodate) {
1746			set_extent_uptodate(tree, start, end, &cached,
1747					    GFP_ATOMIC);
1748		}
1749		unlock_extent_cached(tree, start, end, &cached, GFP_ATOMIC);
1750
1751		if (whole_page) {
1752			if (uptodate) {
1753				SetPageUptodate(page);
1754			} else {
1755				ClearPageUptodate(page);
1756				SetPageError(page);
1757			}
1758			unlock_page(page);
1759		} else {
1760			if (uptodate) {
1761				check_page_uptodate(tree, page);
1762			} else {
1763				ClearPageUptodate(page);
1764				SetPageError(page);
1765			}
1766			check_page_locked(tree, page);
1767		}
1768	} while (bvec <= bvec_end);
1769
1770	bio_put(bio);
1771}
1772
1773struct bio *
1774btrfs_bio_alloc(struct block_device *bdev, u64 first_sector, int nr_vecs,
1775		gfp_t gfp_flags)
1776{
1777	struct bio *bio;
1778
1779	bio = bio_alloc(gfp_flags, nr_vecs);
 
 
1780
1781	if (bio == NULL && (current->flags & PF_MEMALLOC)) {
1782		while (!bio && (nr_vecs /= 2))
1783			bio = bio_alloc(gfp_flags, nr_vecs);
1784	}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1785
1786	if (bio) {
1787		bio->bi_size = 0;
1788		bio->bi_bdev = bdev;
1789		bio->bi_sector = first_sector;
1790	}
1791	return bio;
1792}
1793
1794static int submit_one_bio(int rw, struct bio *bio, int mirror_num,
1795			  unsigned long bio_flags)
 
1796{
 
1797	int ret = 0;
1798	struct bio_vec *bvec = bio->bi_io_vec + bio->bi_vcnt - 1;
1799	struct page *page = bvec->bv_page;
1800	struct extent_io_tree *tree = bio->bi_private;
1801	u64 start;
1802
1803	start = ((u64)page->index << PAGE_CACHE_SHIFT) + bvec->bv_offset;
 
 
 
 
 
 
 
1804
1805	bio->bi_private = NULL;
 
 
 
 
 
 
1806
1807	bio_get(bio);
 
 
 
 
1808
1809	if (tree->ops && tree->ops->submit_bio_hook)
1810		ret = tree->ops->submit_bio_hook(page->mapping->host, rw, bio,
1811					   mirror_num, bio_flags, start);
1812	else
1813		submit_bio(rw, bio);
1814	if (bio_flagged(bio, BIO_EOPNOTSUPP))
1815		ret = -EOPNOTSUPP;
1816	bio_put(bio);
1817	return ret;
1818}
1819
1820static int submit_extent_page(int rw, struct extent_io_tree *tree,
1821			      struct page *page, sector_t sector,
1822			      size_t size, unsigned long offset,
1823			      struct block_device *bdev,
1824			      struct bio **bio_ret,
1825			      unsigned long max_pages,
1826			      bio_end_io_t end_io_func,
1827			      int mirror_num,
1828			      unsigned long prev_bio_flags,
1829			      unsigned long bio_flags)
1830{
1831	int ret = 0;
1832	struct bio *bio;
1833	int nr;
1834	int contig = 0;
1835	int this_compressed = bio_flags & EXTENT_BIO_COMPRESSED;
1836	int old_compressed = prev_bio_flags & EXTENT_BIO_COMPRESSED;
1837	size_t page_size = min_t(size_t, size, PAGE_CACHE_SIZE);
1838
1839	if (bio_ret && *bio_ret) {
1840		bio = *bio_ret;
1841		if (old_compressed)
1842			contig = bio->bi_sector == sector;
1843		else
1844			contig = bio->bi_sector + (bio->bi_size >> 9) ==
1845				sector;
1846
1847		if (prev_bio_flags != bio_flags || !contig ||
1848		    (tree->ops && tree->ops->merge_bio_hook &&
1849		     tree->ops->merge_bio_hook(page, offset, page_size, bio,
1850					       bio_flags)) ||
1851		    bio_add_page(bio, page, page_size, offset) < page_size) {
1852			ret = submit_one_bio(rw, bio, mirror_num,
1853					     prev_bio_flags);
1854			bio = NULL;
1855		} else {
1856			return 0;
1857		}
1858	}
1859	if (this_compressed)
1860		nr = BIO_MAX_PAGES;
1861	else
1862		nr = bio_get_nr_vecs(bdev);
1863
1864	bio = btrfs_bio_alloc(bdev, sector, nr, GFP_NOFS | __GFP_HIGH);
1865	if (!bio)
1866		return -ENOMEM;
1867
1868	bio_add_page(bio, page, page_size, offset);
1869	bio->bi_end_io = end_io_func;
1870	bio->bi_private = tree;
1871
1872	if (bio_ret)
1873		*bio_ret = bio;
1874	else
1875		ret = submit_one_bio(rw, bio, mirror_num, bio_flags);
1876
1877	return ret;
 
 
 
 
1878}
1879
1880void set_page_extent_mapped(struct page *page)
1881{
1882	if (!PagePrivate(page)) {
1883		SetPagePrivate(page);
1884		page_cache_get(page);
1885		set_page_private(page, EXTENT_PAGE_PRIVATE);
1886	}
 
 
 
 
 
 
 
 
1887}
1888
1889static void set_page_extent_head(struct page *page, unsigned long len)
 
1890{
1891	WARN_ON(!PagePrivate(page));
1892	set_page_private(page, EXTENT_PAGE_PRIVATE_FIRST_PAGE | len << 2);
1893}
1894
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1895/*
1896 * basic readpage implementation.  Locked extent state structs are inserted
1897 * into the tree that are removed when the IO is done (by the end_io
1898 * handlers)
 
 
1899 */
1900static int __extent_read_full_page(struct extent_io_tree *tree,
1901				   struct page *page,
1902				   get_extent_t *get_extent,
1903				   struct bio **bio, int mirror_num,
1904				   unsigned long *bio_flags)
1905{
1906	struct inode *inode = page->mapping->host;
1907	u64 start = (u64)page->index << PAGE_CACHE_SHIFT;
1908	u64 page_end = start + PAGE_CACHE_SIZE - 1;
1909	u64 end;
1910	u64 cur = start;
1911	u64 extent_offset;
1912	u64 last_byte = i_size_read(inode);
1913	u64 block_start;
1914	u64 cur_end;
1915	sector_t sector;
1916	struct extent_map *em;
1917	struct block_device *bdev;
1918	struct btrfs_ordered_extent *ordered;
1919	int ret;
1920	int nr = 0;
1921	size_t pg_offset = 0;
1922	size_t iosize;
1923	size_t disk_io_size;
1924	size_t blocksize = inode->i_sb->s_blocksize;
1925	unsigned long this_bio_flag = 0;
1926
1927	set_page_extent_mapped(page);
1928
1929	if (!PageUptodate(page)) {
1930		if (cleancache_get_page(page) == 0) {
1931			BUG_ON(blocksize != PAGE_SIZE);
1932			goto out;
1933		}
1934	}
1935
1936	end = page_end;
1937	while (1) {
1938		lock_extent(tree, start, end, GFP_NOFS);
1939		ordered = btrfs_lookup_ordered_extent(inode, start);
1940		if (!ordered)
1941			break;
1942		unlock_extent(tree, start, end, GFP_NOFS);
1943		btrfs_start_ordered_extent(inode, ordered, 1);
1944		btrfs_put_ordered_extent(ordered);
1945	}
1946
1947	if (page->index == last_byte >> PAGE_CACHE_SHIFT) {
1948		char *userpage;
1949		size_t zero_offset = last_byte & (PAGE_CACHE_SIZE - 1);
1950
1951		if (zero_offset) {
1952			iosize = PAGE_CACHE_SIZE - zero_offset;
1953			userpage = kmap_atomic(page, KM_USER0);
1954			memset(userpage + zero_offset, 0, iosize);
1955			flush_dcache_page(page);
1956			kunmap_atomic(userpage, KM_USER0);
1957		}
1958	}
 
 
1959	while (cur <= end) {
1960		if (cur >= last_byte) {
1961			char *userpage;
1962			struct extent_state *cached = NULL;
1963
1964			iosize = PAGE_CACHE_SIZE - pg_offset;
1965			userpage = kmap_atomic(page, KM_USER0);
1966			memset(userpage + pg_offset, 0, iosize);
1967			flush_dcache_page(page);
1968			kunmap_atomic(userpage, KM_USER0);
1969			set_extent_uptodate(tree, cur, cur + iosize - 1,
1970					    &cached, GFP_NOFS);
1971			unlock_extent_cached(tree, cur, cur + iosize - 1,
1972					     &cached, GFP_NOFS);
1973			break;
1974		}
1975		em = get_extent(inode, page, pg_offset, cur,
1976				end - cur + 1, 0);
1977		if (IS_ERR_OR_NULL(em)) {
1978			SetPageError(page);
1979			unlock_extent(tree, cur, end, GFP_NOFS);
1980			break;
1981		}
1982		extent_offset = cur - em->start;
1983		BUG_ON(extent_map_end(em) <= cur);
1984		BUG_ON(end < cur);
1985
1986		if (test_bit(EXTENT_FLAG_COMPRESSED, &em->flags)) {
1987			this_bio_flag = EXTENT_BIO_COMPRESSED;
1988			extent_set_compress_type(&this_bio_flag,
1989						 em->compress_type);
1990		}
1991
1992		iosize = min(extent_map_end(em) - cur, end - cur + 1);
1993		cur_end = min(extent_map_end(em) - 1, end);
1994		iosize = (iosize + blocksize - 1) & ~((u64)blocksize - 1);
1995		if (this_bio_flag & EXTENT_BIO_COMPRESSED) {
1996			disk_io_size = em->block_len;
1997			sector = em->block_start >> 9;
1998		} else {
1999			sector = (em->block_start + extent_offset) >> 9;
2000			disk_io_size = iosize;
2001		}
2002		bdev = em->bdev;
2003		block_start = em->block_start;
2004		if (test_bit(EXTENT_FLAG_PREALLOC, &em->flags))
2005			block_start = EXTENT_MAP_HOLE;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2006		free_extent_map(em);
2007		em = NULL;
2008
2009		/* we've found a hole, just zero and go on */
2010		if (block_start == EXTENT_MAP_HOLE) {
2011			char *userpage;
2012			struct extent_state *cached = NULL;
2013
2014			userpage = kmap_atomic(page, KM_USER0);
2015			memset(userpage + pg_offset, 0, iosize);
2016			flush_dcache_page(page);
2017			kunmap_atomic(userpage, KM_USER0);
2018
2019			set_extent_uptodate(tree, cur, cur + iosize - 1,
2020					    &cached, GFP_NOFS);
2021			unlock_extent_cached(tree, cur, cur + iosize - 1,
2022			                     &cached, GFP_NOFS);
2023			cur = cur + iosize;
2024			pg_offset += iosize;
2025			continue;
2026		}
2027		/* the get_extent function already copied into the page */
2028		if (test_range_bit(tree, cur, cur_end,
2029				   EXTENT_UPTODATE, 1, NULL)) {
2030			check_page_uptodate(tree, page);
2031			unlock_extent(tree, cur, cur + iosize - 1, GFP_NOFS);
2032			cur = cur + iosize;
2033			pg_offset += iosize;
2034			continue;
2035		}
2036		/* we have an inline extent but it didn't get marked up
2037		 * to date.  Error out
2038		 */
2039		if (block_start == EXTENT_MAP_INLINE) {
2040			SetPageError(page);
2041			unlock_extent(tree, cur, cur + iosize - 1, GFP_NOFS);
2042			cur = cur + iosize;
2043			pg_offset += iosize;
2044			continue;
2045		}
2046
2047		ret = 0;
2048		if (tree->ops && tree->ops->readpage_io_hook) {
2049			ret = tree->ops->readpage_io_hook(page, cur,
2050							  cur + iosize - 1);
2051		}
2052		if (!ret) {
2053			unsigned long pnr = (last_byte >> PAGE_CACHE_SHIFT) + 1;
2054			pnr -= page->index;
2055			ret = submit_extent_page(READ, tree, page,
2056					 sector, disk_io_size, pg_offset,
2057					 bdev, bio, pnr,
2058					 end_bio_extent_readpage, mirror_num,
2059					 *bio_flags,
2060					 this_bio_flag);
2061			nr++;
2062			*bio_flags = this_bio_flag;
2063		}
2064		if (ret)
2065			SetPageError(page);
 
 
 
2066		cur = cur + iosize;
2067		pg_offset += iosize;
2068	}
2069out:
2070	if (!nr) {
2071		if (!PageError(page))
2072			SetPageUptodate(page);
2073		unlock_page(page);
2074	}
2075	return 0;
2076}
2077
2078int extent_read_full_page(struct extent_io_tree *tree, struct page *page,
2079			    get_extent_t *get_extent)
2080{
2081	struct bio *bio = NULL;
2082	unsigned long bio_flags = 0;
 
 
 
 
2083	int ret;
2084
2085	ret = __extent_read_full_page(tree, page, get_extent, &bio, 0,
2086				      &bio_flags);
2087	if (bio)
2088		ret = submit_one_bio(READ, bio, 0, bio_flags);
 
 
 
 
 
 
2089	return ret;
2090}
2091
2092static noinline void update_nr_written(struct page *page,
2093				      struct writeback_control *wbc,
2094				      unsigned long nr_written)
2095{
2096	wbc->nr_to_write -= nr_written;
2097	if (wbc->range_cyclic || (wbc->nr_to_write > 0 &&
2098	    wbc->range_start == 0 && wbc->range_end == LLONG_MAX))
2099		page->mapping->writeback_index = page->index + nr_written;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2100}
2101
2102/*
2103 * the writepage semantics are similar to regular writepage.  extent
2104 * records are inserted to lock ranges in the tree, and as dirty areas
2105 * are found, they are marked writeback.  Then the lock bits are removed
2106 * and the end_io handler clears the writeback ranges
 
 
 
2107 */
2108static int __extent_writepage(struct page *page, struct writeback_control *wbc,
2109			      void *data)
2110{
 
2111	struct inode *inode = page->mapping->host;
2112	struct extent_page_data *epd = data;
2113	struct extent_io_tree *tree = epd->tree;
2114	u64 start = (u64)page->index << PAGE_CACHE_SHIFT;
2115	u64 delalloc_start;
2116	u64 page_end = start + PAGE_CACHE_SIZE - 1;
2117	u64 end;
2118	u64 cur = start;
2119	u64 extent_offset;
2120	u64 last_byte = i_size_read(inode);
2121	u64 block_start;
2122	u64 iosize;
2123	sector_t sector;
2124	struct extent_state *cached_state = NULL;
2125	struct extent_map *em;
2126	struct block_device *bdev;
2127	int ret;
2128	int nr = 0;
2129	size_t pg_offset = 0;
2130	size_t blocksize;
2131	loff_t i_size = i_size_read(inode);
2132	unsigned long end_index = i_size >> PAGE_CACHE_SHIFT;
2133	u64 nr_delalloc;
2134	u64 delalloc_end;
2135	int page_started;
2136	int compressed;
2137	int write_flags;
2138	unsigned long nr_written = 0;
2139
2140	if (wbc->sync_mode == WB_SYNC_ALL)
2141		write_flags = WRITE_SYNC;
2142	else
2143		write_flags = WRITE;
2144
2145	trace___extent_writepage(page, inode, wbc);
2146
2147	WARN_ON(!PageLocked(page));
2148	pg_offset = i_size & (PAGE_CACHE_SIZE - 1);
 
2149	if (page->index > end_index ||
2150	   (page->index == end_index && !pg_offset)) {
2151		page->mapping->a_ops->invalidatepage(page, 0);
2152		unlock_page(page);
2153		return 0;
2154	}
2155
2156	if (page->index == end_index) {
2157		char *userpage;
2158
2159		userpage = kmap_atomic(page, KM_USER0);
2160		memset(userpage + pg_offset, 0,
2161		       PAGE_CACHE_SIZE - pg_offset);
2162		kunmap_atomic(userpage, KM_USER0);
2163		flush_dcache_page(page);
2164	}
2165	pg_offset = 0;
2166
2167	set_page_extent_mapped(page);
 
 
 
 
2168
2169	delalloc_start = start;
2170	delalloc_end = 0;
2171	page_started = 0;
2172	if (!epd->extent_locked) {
2173		u64 delalloc_to_write = 0;
2174		/*
2175		 * make sure the wbc mapping index is at least updated
2176		 * to this page.
2177		 */
2178		update_nr_written(page, wbc, 0);
2179
2180		while (delalloc_end < page_end) {
2181			nr_delalloc = find_lock_delalloc_range(inode, tree,
2182						       page,
2183						       &delalloc_start,
2184						       &delalloc_end,
2185						       128 * 1024 * 1024);
2186			if (nr_delalloc == 0) {
2187				delalloc_start = delalloc_end + 1;
2188				continue;
2189			}
2190			tree->ops->fill_delalloc(inode, page, delalloc_start,
2191						 delalloc_end, &page_started,
2192						 &nr_written);
2193			/*
2194			 * delalloc_end is already one less than the total
2195			 * length, so we don't subtract one from
2196			 * PAGE_CACHE_SIZE
2197			 */
2198			delalloc_to_write += (delalloc_end - delalloc_start +
2199					      PAGE_CACHE_SIZE) >>
2200					      PAGE_CACHE_SHIFT;
2201			delalloc_start = delalloc_end + 1;
2202		}
2203		if (wbc->nr_to_write < delalloc_to_write) {
2204			int thresh = 8192;
2205
2206			if (delalloc_to_write < thresh * 2)
2207				thresh = delalloc_to_write;
2208			wbc->nr_to_write = min_t(u64, delalloc_to_write,
2209						 thresh);
2210		}
2211
2212		/* did the fill delalloc function already unlock and start
2213		 * the IO?
2214		 */
2215		if (page_started) {
2216			ret = 0;
2217			/*
2218			 * we've unlocked the page, so we can't update
2219			 * the mapping's writeback index, just update
2220			 * nr_to_write.
2221			 */
2222			wbc->nr_to_write -= nr_written;
2223			goto done_unlocked;
2224		}
2225	}
2226	if (tree->ops && tree->ops->writepage_start_hook) {
2227		ret = tree->ops->writepage_start_hook(page, start,
2228						      page_end);
2229		if (ret == -EAGAIN) {
2230			redirty_page_for_writepage(wbc, page);
2231			update_nr_written(page, wbc, nr_written);
2232			unlock_page(page);
2233			ret = 0;
2234			goto done_unlocked;
2235		}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2236	}
2237
2238	/*
2239	 * we don't want to touch the inode after unlocking the page,
2240	 * so we update the mapping writeback index now
 
2241	 */
2242	update_nr_written(page, wbc, nr_written + 1);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2243
2244	end = page_end;
2245	if (last_byte <= start) {
2246		if (tree->ops && tree->ops->writepage_end_io_hook)
2247			tree->ops->writepage_end_io_hook(page, start,
2248							 page_end, NULL, 1);
2249		goto done;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2250	}
 
2251
2252	blocksize = inode->i_sb->s_blocksize;
 
 
 
 
 
 
 
2253
2254	while (cur <= end) {
2255		if (cur >= last_byte) {
2256			if (tree->ops && tree->ops->writepage_end_io_hook)
2257				tree->ops->writepage_end_io_hook(page, cur,
2258							 page_end, NULL, 1);
2259			break;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2260		}
2261		em = epd->get_extent(inode, page, pg_offset, cur,
2262				     end - cur + 1, 1);
2263		if (IS_ERR_OR_NULL(em)) {
2264			SetPageError(page);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2265			break;
2266		}
 
 
 
 
 
 
 
 
2267
2268		extent_offset = cur - em->start;
2269		BUG_ON(extent_map_end(em) <= cur);
2270		BUG_ON(end < cur);
2271		iosize = min(extent_map_end(em) - cur, end - cur + 1);
2272		iosize = (iosize + blocksize - 1) & ~((u64)blocksize - 1);
2273		sector = (em->block_start + extent_offset) >> 9;
2274		bdev = em->bdev;
2275		block_start = em->block_start;
2276		compressed = test_bit(EXTENT_FLAG_COMPRESSED, &em->flags);
2277		free_extent_map(em);
2278		em = NULL;
2279
2280		/*
2281		 * compressed and inline extents are written through other
2282		 * paths in the FS
2283		 */
2284		if (compressed || block_start == EXTENT_MAP_HOLE ||
2285		    block_start == EXTENT_MAP_INLINE) {
2286			/*
2287			 * end_io notification does not happen here for
2288			 * compressed extents
2289			 */
2290			if (!compressed && tree->ops &&
2291			    tree->ops->writepage_end_io_hook)
2292				tree->ops->writepage_end_io_hook(page, cur,
2293							 cur + iosize - 1,
2294							 NULL, 1);
2295			else if (compressed) {
2296				/* we don't want to end_page_writeback on
2297				 * a compressed extent.  this happens
2298				 * elsewhere
2299				 */
2300				nr++;
2301			}
2302
2303			cur += iosize;
2304			pg_offset += iosize;
2305			continue;
2306		}
2307		/* leave this out until we have a page_mkwrite call */
2308		if (0 && !test_range_bit(tree, cur, cur + iosize - 1,
2309				   EXTENT_DIRTY, 0, NULL)) {
2310			cur = cur + iosize;
2311			pg_offset += iosize;
2312			continue;
 
 
 
 
2313		}
 
 
 
 
2314
2315		if (tree->ops && tree->ops->writepage_io_hook) {
2316			ret = tree->ops->writepage_io_hook(page, cur,
2317						cur + iosize - 1);
2318		} else {
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2319			ret = 0;
2320		}
2321		if (ret) {
2322			SetPageError(page);
2323		} else {
2324			unsigned long max_nr = end_index + 1;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2325
2326			set_range_writeback(tree, cur, cur + iosize - 1);
2327			if (!PageWriteback(page)) {
2328				printk(KERN_ERR "btrfs warning page %lu not "
2329				       "writeback, cur %llu end %llu\n",
2330				       page->index, (unsigned long long)cur,
2331				       (unsigned long long)end);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2332			}
2333
2334			ret = submit_extent_page(write_flags, tree, page,
2335						 sector, iosize, pg_offset,
2336						 bdev, &epd->bio, max_nr,
2337						 end_bio_extent_writepage,
2338						 0, 0, 0);
2339			if (ret)
2340				SetPageError(page);
2341		}
2342		cur = cur + iosize;
2343		pg_offset += iosize;
2344		nr++;
2345	}
2346done:
2347	if (nr == 0) {
2348		/* make sure the mapping tag for page dirty gets cleared */
2349		set_page_writeback(page);
2350		end_page_writeback(page);
 
 
 
2351	}
2352	unlock_page(page);
2353
2354done_unlocked:
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2355
2356	/* drop our reference on any cached states */
2357	free_extent_state(cached_state);
2358	return 0;
 
2359}
2360
2361/**
2362 * write_cache_pages - walk the list of dirty pages of the given address space and write all of them.
2363 * @mapping: address space structure to write
2364 * @wbc: subtract the number of written pages from *@wbc->nr_to_write
2365 * @writepage: function called for each page
2366 * @data: data passed to writepage function
2367 *
2368 * If a page is already under I/O, write_cache_pages() skips it, even
2369 * if it's dirty.  This is desirable behaviour for memory-cleaning writeback,
2370 * but it is INCORRECT for data-integrity system calls such as fsync().  fsync()
2371 * and msync() need to guarantee that all the data which was dirty at the time
2372 * the call was made get new I/O started against them.  If wbc->sync_mode is
2373 * WB_SYNC_ALL then we were called for data integrity and we must wait for
2374 * existing IO to complete.
2375 */
2376static int extent_write_cache_pages(struct extent_io_tree *tree,
2377			     struct address_space *mapping,
2378			     struct writeback_control *wbc,
2379			     writepage_t writepage, void *data,
2380			     void (*flush_fn)(void *))
2381{
 
 
2382	int ret = 0;
2383	int done = 0;
2384	int nr_to_write_done = 0;
2385	struct pagevec pvec;
2386	int nr_pages;
2387	pgoff_t index;
2388	pgoff_t end;		/* Inclusive */
 
 
2389	int scanned = 0;
2390	int tag;
 
 
 
 
 
 
 
 
 
 
 
 
2391
2392	pagevec_init(&pvec, 0);
2393	if (wbc->range_cyclic) {
2394		index = mapping->writeback_index; /* Start from prev offset */
2395		end = -1;
 
 
 
 
 
2396	} else {
2397		index = wbc->range_start >> PAGE_CACHE_SHIFT;
2398		end = wbc->range_end >> PAGE_CACHE_SHIFT;
 
 
2399		scanned = 1;
2400	}
2401	if (wbc->sync_mode == WB_SYNC_ALL)
 
 
 
 
 
 
 
 
 
 
 
 
 
2402		tag = PAGECACHE_TAG_TOWRITE;
2403	else
2404		tag = PAGECACHE_TAG_DIRTY;
2405retry:
2406	if (wbc->sync_mode == WB_SYNC_ALL)
2407		tag_pages_for_writeback(mapping, index, end);
 
2408	while (!done && !nr_to_write_done && (index <= end) &&
2409	       (nr_pages = pagevec_lookup_tag(&pvec, mapping, &index, tag,
2410			min(end - index, (pgoff_t)PAGEVEC_SIZE-1) + 1))) {
2411		unsigned i;
2412
2413		scanned = 1;
2414		for (i = 0; i < nr_pages; i++) {
2415			struct page *page = pvec.pages[i];
2416
 
2417			/*
2418			 * At this point we hold neither mapping->tree_lock nor
2419			 * lock on the page itself: the page may be truncated or
2420			 * invalidated (changing page->mapping to NULL), or even
2421			 * swizzled back from swapper_space to tmpfs file
2422			 * mapping
2423			 */
2424			if (tree->ops && tree->ops->write_cache_pages_lock_hook)
2425				tree->ops->write_cache_pages_lock_hook(page);
2426			else
2427				lock_page(page);
2428
2429			if (unlikely(page->mapping != mapping)) {
2430				unlock_page(page);
2431				continue;
2432			}
2433
2434			if (!wbc->range_cyclic && page->index > end) {
2435				done = 1;
2436				unlock_page(page);
2437				continue;
2438			}
2439
2440			if (wbc->sync_mode != WB_SYNC_NONE) {
2441				if (PageWriteback(page))
2442					flush_fn(data);
2443				wait_on_page_writeback(page);
2444			}
2445
2446			if (PageWriteback(page) ||
2447			    !clear_page_dirty_for_io(page)) {
2448				unlock_page(page);
2449				continue;
2450			}
2451
2452			ret = (*writepage)(page, wbc, data);
2453
2454			if (unlikely(ret == AOP_WRITEPAGE_ACTIVATE)) {
2455				unlock_page(page);
2456				ret = 0;
2457			}
2458			if (ret)
2459				done = 1;
 
 
2460
2461			/*
2462			 * the filesystem may choose to bump up nr_to_write.
2463			 * We have to make sure to honor the new nr_to_write
2464			 * at any time
2465			 */
2466			nr_to_write_done = wbc->nr_to_write <= 0;
 
2467		}
2468		pagevec_release(&pvec);
2469		cond_resched();
2470	}
2471	if (!scanned && !done) {
2472		/*
2473		 * We hit the last page and there is more work to be done: wrap
2474		 * back to the start of the file
2475		 */
2476		scanned = 1;
2477		index = 0;
2478		goto retry;
2479	}
2480	return ret;
2481}
2482
2483static void flush_epd_write_bio(struct extent_page_data *epd)
2484{
2485	if (epd->bio) {
2486		if (epd->sync_io)
2487			submit_one_bio(WRITE_SYNC, epd->bio, 0, 0);
2488		else
2489			submit_one_bio(WRITE, epd->bio, 0, 0);
2490		epd->bio = NULL;
2491	}
2492}
2493
2494static noinline void flush_write_bio(void *data)
2495{
2496	struct extent_page_data *epd = data;
2497	flush_epd_write_bio(epd);
2498}
2499
2500int extent_write_full_page(struct extent_io_tree *tree, struct page *page,
2501			  get_extent_t *get_extent,
2502			  struct writeback_control *wbc)
2503{
2504	int ret;
2505	struct extent_page_data epd = {
2506		.bio = NULL,
2507		.tree = tree,
2508		.get_extent = get_extent,
2509		.extent_locked = 0,
2510		.sync_io = wbc->sync_mode == WB_SYNC_ALL,
2511	};
2512
2513	ret = __extent_writepage(page, wbc, &epd);
2514
2515	flush_epd_write_bio(&epd);
2516	return ret;
2517}
2518
2519int extent_write_locked_range(struct extent_io_tree *tree, struct inode *inode,
2520			      u64 start, u64 end, get_extent_t *get_extent,
2521			      int mode)
 
 
 
 
 
2522{
 
2523	int ret = 0;
2524	struct address_space *mapping = inode->i_mapping;
2525	struct page *page;
2526	unsigned long nr_pages = (end - start + PAGE_CACHE_SIZE) >>
2527		PAGE_CACHE_SHIFT;
2528
2529	struct extent_page_data epd = {
2530		.bio = NULL,
2531		.tree = tree,
2532		.get_extent = get_extent,
2533		.extent_locked = 1,
2534		.sync_io = mode == WB_SYNC_ALL,
2535	};
2536	struct writeback_control wbc_writepages = {
2537		.sync_mode	= mode,
2538		.nr_to_write	= nr_pages * 2,
2539		.range_start	= start,
2540		.range_end	= end + 1,
2541	};
2542
2543	while (start <= end) {
2544		page = find_get_page(mapping, start >> PAGE_CACHE_SHIFT);
2545		if (clear_page_dirty_for_io(page))
2546			ret = __extent_writepage(page, &wbc_writepages, &epd);
2547		else {
2548			if (tree->ops && tree->ops->writepage_end_io_hook)
2549				tree->ops->writepage_end_io_hook(page, start,
2550						 start + PAGE_CACHE_SIZE - 1,
2551						 NULL, 1);
2552			unlock_page(page);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2553		}
2554		page_cache_release(page);
2555		start += PAGE_CACHE_SIZE;
 
 
 
 
2556	}
2557
2558	flush_epd_write_bio(&epd);
2559	return ret;
2560}
2561
2562int extent_writepages(struct extent_io_tree *tree,
2563		      struct address_space *mapping,
2564		      get_extent_t *get_extent,
2565		      struct writeback_control *wbc)
2566{
 
2567	int ret = 0;
2568	struct extent_page_data epd = {
2569		.bio = NULL,
2570		.tree = tree,
2571		.get_extent = get_extent,
2572		.extent_locked = 0,
2573		.sync_io = wbc->sync_mode == WB_SYNC_ALL,
2574	};
2575
2576	ret = extent_write_cache_pages(tree, mapping, wbc,
2577				       __extent_writepage, &epd,
2578				       flush_write_bio);
2579	flush_epd_write_bio(&epd);
 
 
 
 
2580	return ret;
2581}
2582
2583int extent_readpages(struct extent_io_tree *tree,
2584		     struct address_space *mapping,
2585		     struct list_head *pages, unsigned nr_pages,
2586		     get_extent_t get_extent)
2587{
2588	struct bio *bio = NULL;
2589	unsigned page_idx;
2590	unsigned long bio_flags = 0;
2591
2592	for (page_idx = 0; page_idx < nr_pages; page_idx++) {
2593		struct page *page = list_entry(pages->prev, struct page, lru);
2594
2595		prefetchw(&page->flags);
2596		list_del(&page->lru);
2597		if (!add_to_page_cache_lru(page, mapping,
2598					page->index, GFP_NOFS)) {
2599			__extent_read_full_page(tree, page, get_extent,
2600						&bio, 0, &bio_flags);
2601		}
2602		page_cache_release(page);
2603	}
2604	BUG_ON(!list_empty(pages));
2605	if (bio)
2606		submit_one_bio(READ, bio, 0, bio_flags);
2607	return 0;
2608}
2609
2610/*
2611 * basic invalidatepage code, this waits on any locked or writeback
2612 * ranges corresponding to the page, and then deletes any extent state
2613 * records from the tree
2614 */
2615int extent_invalidatepage(struct extent_io_tree *tree,
2616			  struct page *page, unsigned long offset)
2617{
2618	struct extent_state *cached_state = NULL;
2619	u64 start = ((u64)page->index << PAGE_CACHE_SHIFT);
2620	u64 end = start + PAGE_CACHE_SIZE - 1;
2621	size_t blocksize = page->mapping->host->i_sb->s_blocksize;
2622
2623	start += (offset + blocksize - 1) & ~(blocksize - 1);
 
 
 
2624	if (start > end)
2625		return 0;
2626
2627	lock_extent_bits(tree, start, end, 0, &cached_state, GFP_NOFS);
2628	wait_on_page_writeback(page);
2629	clear_extent_bit(tree, start, end,
2630			 EXTENT_LOCKED | EXTENT_DIRTY | EXTENT_DELALLOC |
2631			 EXTENT_DO_ACCOUNTING,
2632			 1, 1, &cached_state, GFP_NOFS);
 
 
 
2633	return 0;
2634}
2635
2636/*
2637 * a helper for releasepage, this tests for areas of the page that
2638 * are locked or under IO and drops the related state bits if it is safe
2639 * to drop the page.
2640 */
2641int try_release_extent_state(struct extent_map_tree *map,
2642			     struct extent_io_tree *tree, struct page *page,
2643			     gfp_t mask)
2644{
2645	u64 start = (u64)page->index << PAGE_CACHE_SHIFT;
2646	u64 end = start + PAGE_CACHE_SIZE - 1;
2647	int ret = 1;
2648
2649	if (test_range_bit(tree, start, end,
2650			   EXTENT_IOBITS, 0, NULL))
2651		ret = 0;
2652	else {
2653		if ((mask & GFP_NOFS) == GFP_NOFS)
2654			mask = GFP_NOFS;
2655		/*
2656		 * at this point we can safely clear everything except the
2657		 * locked bit and the nodatasum bit
2658		 */
2659		ret = clear_extent_bit(tree, start, end,
2660				 ~(EXTENT_LOCKED | EXTENT_NODATASUM),
2661				 0, 0, NULL, mask);
 
 
2662
2663		/* if clear_extent_bit failed for enomem reasons,
2664		 * we can't allow the release to continue.
2665		 */
2666		if (ret < 0)
2667			ret = 0;
2668		else
2669			ret = 1;
2670	}
2671	return ret;
2672}
2673
2674/*
2675 * a helper for releasepage.  As long as there are no locked extents
2676 * in the range corresponding to the page, both state records and extent
2677 * map records are removed
2678 */
2679int try_release_extent_mapping(struct extent_map_tree *map,
2680			       struct extent_io_tree *tree, struct page *page,
2681			       gfp_t mask)
2682{
2683	struct extent_map *em;
2684	u64 start = (u64)page->index << PAGE_CACHE_SHIFT;
2685	u64 end = start + PAGE_CACHE_SIZE - 1;
 
 
 
2686
2687	if ((mask & __GFP_WAIT) &&
2688	    page->mapping->host->i_size > 16 * 1024 * 1024) {
2689		u64 len;
2690		while (start <= end) {
 
 
 
2691			len = end - start + 1;
2692			write_lock(&map->lock);
2693			em = lookup_extent_mapping(map, start, len);
2694			if (IS_ERR_OR_NULL(em)) {
2695				write_unlock(&map->lock);
2696				break;
2697			}
2698			if (test_bit(EXTENT_FLAG_PINNED, &em->flags) ||
2699			    em->start != start) {
2700				write_unlock(&map->lock);
2701				free_extent_map(em);
2702				break;
2703			}
2704			if (!test_range_bit(tree, em->start,
2705					    extent_map_end(em) - 1,
2706					    EXTENT_LOCKED | EXTENT_WRITEBACK,
2707					    0, NULL)) {
2708				remove_extent_mapping(map, em);
2709				/* once for the rb tree */
2710				free_extent_map(em);
2711			}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2712			start = extent_map_end(em);
2713			write_unlock(&map->lock);
2714
2715			/* once for us */
2716			free_extent_map(em);
 
 
2717		}
2718	}
2719	return try_release_extent_state(map, tree, page, mask);
2720}
2721
 
 
 
 
 
 
 
2722/*
2723 * helper function for fiemap, which doesn't want to see any holes.
2724 * This maps until we find something past 'last'
 
 
 
 
 
2725 */
2726static struct extent_map *get_extent_skip_holes(struct inode *inode,
2727						u64 offset,
2728						u64 last,
2729						get_extent_t *get_extent)
2730{
2731	u64 sectorsize = BTRFS_I(inode)->root->sectorsize;
2732	struct extent_map *em;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2733	u64 len;
 
 
 
2734
2735	if (offset >= last)
2736		return NULL;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2737
2738	while(1) {
2739		len = last - offset;
2740		if (len == 0)
2741			break;
2742		len = (len + sectorsize - 1) & ~(sectorsize - 1);
2743		em = get_extent(inode, NULL, 0, offset, len, 0);
2744		if (IS_ERR_OR_NULL(em))
2745			return em;
2746
2747		/* if this isn't a hole return it */
2748		if (!test_bit(EXTENT_FLAG_VACANCY, &em->flags) &&
2749		    em->block_start != EXTENT_MAP_HOLE) {
2750			return em;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2751		}
 
2752
2753		/* this is a hole, advance to the next extent */
2754		offset = extent_map_end(em);
2755		free_extent_map(em);
2756		if (offset >= last)
2757			break;
 
 
 
 
 
 
 
 
 
 
2758	}
2759	return NULL;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2760}
2761
2762int extent_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
2763		__u64 start, __u64 len, get_extent_t *get_extent)
 
 
 
 
 
 
 
 
 
 
 
2764{
2765	int ret = 0;
2766	u64 off = start;
2767	u64 max = start + len;
2768	u32 flags = 0;
2769	u32 found_type;
2770	u64 last;
2771	u64 last_for_get_extent = 0;
2772	u64 disko = 0;
2773	u64 isize = i_size_read(inode);
2774	struct btrfs_key found_key;
2775	struct extent_map *em = NULL;
2776	struct extent_state *cached_state = NULL;
2777	struct btrfs_path *path;
2778	struct btrfs_file_extent_item *item;
2779	int end = 0;
2780	u64 em_start = 0;
2781	u64 em_len = 0;
2782	u64 em_end = 0;
2783	unsigned long emflags;
2784
2785	if (len == 0)
2786		return -EINVAL;
2787
2788	path = btrfs_alloc_path();
2789	if (!path)
2790		return -ENOMEM;
2791	path->leave_spinning = 1;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2792
2793	/*
2794	 * lookup the last file extent.  We're not using i_size here
2795	 * because there might be preallocation past i_size
 
2796	 */
2797	ret = btrfs_lookup_file_extent(NULL, BTRFS_I(inode)->root,
2798				       path, btrfs_ino(inode), -1, 0);
2799	if (ret < 0) {
2800		btrfs_free_path(path);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2801		return ret;
 
 
 
 
 
2802	}
2803	WARN_ON(!ret);
2804	path->slots[0]--;
2805	item = btrfs_item_ptr(path->nodes[0], path->slots[0],
2806			      struct btrfs_file_extent_item);
2807	btrfs_item_key_to_cpu(path->nodes[0], &found_key, path->slots[0]);
2808	found_type = btrfs_key_type(&found_key);
2809
2810	/* No extents, but there might be delalloc bits */
2811	if (found_key.objectid != btrfs_ino(inode) ||
2812	    found_type != BTRFS_EXTENT_DATA_KEY) {
2813		/* have to trust i_size as the end */
2814		last = (u64)-1;
2815		last_for_get_extent = isize;
2816	} else {
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2817		/*
2818		 * remember the start of the last extent.  There are a
2819		 * bunch of different factors that go into the length of the
2820		 * extent, so its much less complex to remember where it started
2821		 */
2822		last = found_key.offset;
2823		last_for_get_extent = last + 1;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2824	}
2825	btrfs_free_path(path);
2826
2827	/*
2828	 * we might have some extents allocated but more delalloc past those
2829	 * extents.  so, we trust isize unless the start of the last extent is
2830	 * beyond isize
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2831	 */
2832	if (last < isize) {
2833		last = (u64)-1;
2834		last_for_get_extent = isize;
 
 
 
 
 
2835	}
2836
2837	lock_extent_bits(&BTRFS_I(inode)->io_tree, start, start + len, 0,
2838			 &cached_state, GFP_NOFS);
 
 
 
 
 
 
 
 
2839
2840	em = get_extent_skip_holes(inode, off, last_for_get_extent,
2841				   get_extent);
2842	if (!em)
2843		goto out;
2844	if (IS_ERR(em)) {
2845		ret = PTR_ERR(em);
2846		goto out;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2847	}
2848
2849	while (!end) {
2850		u64 offset_in_extent;
 
2851
2852		/* break if the extent we found is outside the range */
2853		if (em->start >= max || extent_map_end(em) < off)
2854			break;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2855
 
 
 
 
 
2856		/*
2857		 * get_extent may return an extent that starts before our
2858		 * requested range.  We have to make sure the ranges
2859		 * we return to fiemap always move forward and don't
2860		 * overlap, so adjust the offsets here
2861		 */
2862		em_start = max(em->start, off);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2863
2864		/*
2865		 * record the offset from the start of the extent
2866		 * for adjusting the disk offset below
2867		 */
2868		offset_in_extent = em_start - em->start;
2869		em_end = extent_map_end(em);
2870		em_len = em_end - em_start;
2871		emflags = em->flags;
2872		disko = 0;
2873		flags = 0;
2874
2875		/*
2876		 * bump off for our next call to get_extent
2877		 */
2878		off = extent_map_end(em);
2879		if (off >= max)
2880			end = 1;
2881
2882		if (em->block_start == EXTENT_MAP_LAST_BYTE) {
2883			end = 1;
2884			flags |= FIEMAP_EXTENT_LAST;
2885		} else if (em->block_start == EXTENT_MAP_INLINE) {
2886			flags |= (FIEMAP_EXTENT_DATA_INLINE |
2887				  FIEMAP_EXTENT_NOT_ALIGNED);
2888		} else if (em->block_start == EXTENT_MAP_DELALLOC) {
2889			flags |= (FIEMAP_EXTENT_DELALLOC |
2890				  FIEMAP_EXTENT_UNKNOWN);
2891		} else {
2892			disko = em->block_start + offset_in_extent;
 
2893		}
2894		if (test_bit(EXTENT_FLAG_COMPRESSED, &em->flags))
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2895			flags |= FIEMAP_EXTENT_ENCODED;
2896
2897		free_extent_map(em);
2898		em = NULL;
2899		if ((em_start >= last) || em_len == (u64)-1 ||
2900		   (last == (u64)-1 && isize <= em_end)) {
2901			flags |= FIEMAP_EXTENT_LAST;
2902			end = 1;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2903		}
2904
2905		/* now scan forward to see if this is really the last extent. */
2906		em = get_extent_skip_holes(inode, off, last_for_get_extent,
2907					   get_extent);
2908		if (IS_ERR(em)) {
2909			ret = PTR_ERR(em);
2910			goto out;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2911		}
2912		if (!em) {
2913			flags |= FIEMAP_EXTENT_LAST;
2914			end = 1;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2915		}
2916		ret = fiemap_fill_next_extent(fieinfo, em_start, disko,
2917					      em_len, flags);
 
 
 
 
 
 
2918		if (ret)
2919			goto out_free;
 
 
 
 
 
2920	}
2921out_free:
2922	free_extent_map(em);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2923out:
2924	unlock_extent_cached(&BTRFS_I(inode)->io_tree, start, start + len,
2925			     &cached_state, GFP_NOFS);
 
 
2926	return ret;
2927}
2928
2929static inline struct page *extent_buffer_page(struct extent_buffer *eb,
2930					      unsigned long i)
2931{
2932	struct page *p;
2933	struct address_space *mapping;
2934
2935	if (i == 0)
2936		return eb->first_page;
2937	i += eb->start >> PAGE_CACHE_SHIFT;
2938	mapping = eb->first_page->mapping;
2939	if (!mapping)
2940		return NULL;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2941
2942	/*
2943	 * extent_buffer_page is only called after pinning the page
2944	 * by increasing the reference count.  So we know the page must
2945	 * be in the radix tree.
2946	 */
2947	rcu_read_lock();
2948	p = radix_tree_lookup(&mapping->page_tree, i);
2949	rcu_read_unlock();
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2950
2951	return p;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2952}
2953
2954static inline unsigned long num_extent_pages(u64 start, u64 len)
 
 
 
2955{
2956	return ((start + len + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT) -
2957		(start >> PAGE_CACHE_SHIFT);
 
2958}
2959
2960static struct extent_buffer *__alloc_extent_buffer(struct extent_io_tree *tree,
2961						   u64 start,
2962						   unsigned long len,
2963						   gfp_t mask)
2964{
2965	struct extent_buffer *eb = NULL;
2966#if LEAK_DEBUG
2967	unsigned long flags;
2968#endif
2969
2970	eb = kmem_cache_zalloc(extent_buffer_cache, mask);
2971	if (eb == NULL)
2972		return NULL;
2973	eb->start = start;
2974	eb->len = len;
2975	rwlock_init(&eb->lock);
2976	atomic_set(&eb->write_locks, 0);
2977	atomic_set(&eb->read_locks, 0);
2978	atomic_set(&eb->blocking_readers, 0);
2979	atomic_set(&eb->blocking_writers, 0);
2980	atomic_set(&eb->spinning_readers, 0);
2981	atomic_set(&eb->spinning_writers, 0);
2982	init_waitqueue_head(&eb->write_lock_wq);
2983	init_waitqueue_head(&eb->read_lock_wq);
2984
2985#if LEAK_DEBUG
2986	spin_lock_irqsave(&leak_lock, flags);
2987	list_add(&eb->leak_list, &buffers);
2988	spin_unlock_irqrestore(&leak_lock, flags);
2989#endif
2990	atomic_set(&eb->refs, 1);
2991
 
 
2992	return eb;
2993}
2994
2995static void __free_extent_buffer(struct extent_buffer *eb)
2996{
2997#if LEAK_DEBUG
2998	unsigned long flags;
2999	spin_lock_irqsave(&leak_lock, flags);
3000	list_del(&eb->leak_list);
3001	spin_unlock_irqrestore(&leak_lock, flags);
3002#endif
3003	kmem_cache_free(extent_buffer_cache, eb);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3004}
3005
3006/*
3007 * Helper for releasing extent buffer page.
3008 */
3009static void btrfs_release_extent_buffer_page(struct extent_buffer *eb,
3010						unsigned long start_idx)
3011{
3012	unsigned long index;
3013	struct page *page;
 
3014
3015	if (!eb->first_page)
3016		return;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3017
3018	index = num_extent_pages(eb->start, eb->len);
3019	if (start_idx >= index)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3020		return;
3021
3022	do {
3023		index--;
3024		page = extent_buffer_page(eb, index);
3025		if (page)
3026			page_cache_release(page);
3027	} while (index != start_idx);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3028}
3029
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3030/*
3031 * Helper for releasing the extent buffer.
 
 
 
 
 
3032 */
3033static inline void btrfs_release_extent_buffer(struct extent_buffer *eb)
 
3034{
3035	btrfs_release_extent_buffer_page(eb, 0);
3036	__free_extent_buffer(eb);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3037}
3038
3039struct extent_buffer *alloc_extent_buffer(struct extent_io_tree *tree,
3040					  u64 start, unsigned long len,
3041					  struct page *page0)
3042{
3043	unsigned long num_pages = num_extent_pages(start, len);
3044	unsigned long i;
3045	unsigned long index = start >> PAGE_CACHE_SHIFT;
3046	struct extent_buffer *eb;
3047	struct extent_buffer *exists = NULL;
3048	struct page *p;
3049	struct address_space *mapping = tree->mapping;
 
 
3050	int uptodate = 1;
3051	int ret;
3052
3053	rcu_read_lock();
3054	eb = radix_tree_lookup(&tree->buffer, start >> PAGE_CACHE_SHIFT);
3055	if (eb && atomic_inc_not_zero(&eb->refs)) {
3056		rcu_read_unlock();
3057		mark_page_accessed(eb->first_page);
3058		return eb;
 
 
 
3059	}
3060	rcu_read_unlock();
 
 
 
 
 
 
3061
3062	eb = __alloc_extent_buffer(tree, start, len, GFP_NOFS);
3063	if (!eb)
3064		return NULL;
3065
3066	if (page0) {
3067		eb->first_page = page0;
3068		i = 1;
3069		index++;
3070		page_cache_get(page0);
3071		mark_page_accessed(page0);
3072		set_page_extent_mapped(page0);
3073		set_page_extent_head(page0, len);
3074		uptodate = PageUptodate(page0);
3075	} else {
3076		i = 0;
 
 
 
 
 
 
 
 
 
 
 
3077	}
3078	for (; i < num_pages; i++, index++) {
3079		p = find_or_create_page(mapping, index, GFP_NOFS);
3080		if (!p) {
3081			WARN_ON(1);
3082			goto free_eb;
 
 
 
 
 
 
 
 
 
 
 
 
 
3083		}
3084		set_page_extent_mapped(p);
3085		mark_page_accessed(p);
3086		if (i == 0) {
3087			eb->first_page = p;
3088			set_page_extent_head(p, len);
3089		} else {
3090			set_page_private(p, EXTENT_PAGE_PRIVATE);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3091		}
3092		if (!PageUptodate(p))
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3093			uptodate = 0;
3094
3095		/*
3096		 * see below about how we avoid a nasty race with release page
3097		 * and why we unlock later
 
 
 
3098		 */
3099		if (i != 0)
3100			unlock_page(p);
3101	}
3102	if (uptodate)
3103		set_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags);
3104
3105	ret = radix_tree_preload(GFP_NOFS & ~__GFP_HIGHMEM);
 
 
 
3106	if (ret)
3107		goto free_eb;
3108
3109	spin_lock(&tree->buffer_lock);
3110	ret = radix_tree_insert(&tree->buffer, start >> PAGE_CACHE_SHIFT, eb);
 
 
 
3111	if (ret == -EEXIST) {
3112		exists = radix_tree_lookup(&tree->buffer,
3113						start >> PAGE_CACHE_SHIFT);
3114		/* add one reference for the caller */
3115		atomic_inc(&exists->refs);
3116		spin_unlock(&tree->buffer_lock);
3117		radix_tree_preload_end();
3118		goto free_eb;
3119	}
3120	/* add one reference for the tree */
3121	atomic_inc(&eb->refs);
3122	spin_unlock(&tree->buffer_lock);
3123	radix_tree_preload_end();
3124
3125	/*
3126	 * there is a race where release page may have
3127	 * tried to find this extent buffer in the radix
3128	 * but failed.  It will tell the VM it is safe to
3129	 * reclaim the, and it will clear the page private bit.
3130	 * We must make sure to set the page private bit properly
3131	 * after the extent buffer is in the radix tree so
3132	 * it doesn't get lost
3133	 */
3134	set_page_extent_mapped(eb->first_page);
3135	set_page_extent_head(eb->first_page, eb->len);
3136	if (!page0)
3137		unlock_page(eb->first_page);
3138	return eb;
3139
3140free_eb:
3141	if (eb->first_page && !page0)
3142		unlock_page(eb->first_page);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3143
3144	if (!atomic_dec_and_test(&eb->refs))
3145		return exists;
3146	btrfs_release_extent_buffer(eb);
3147	return exists;
 
 
 
3148}
3149
3150struct extent_buffer *find_extent_buffer(struct extent_io_tree *tree,
3151					 u64 start, unsigned long len)
3152{
3153	struct extent_buffer *eb;
 
3154
3155	rcu_read_lock();
3156	eb = radix_tree_lookup(&tree->buffer, start >> PAGE_CACHE_SHIFT);
3157	if (eb && atomic_inc_not_zero(&eb->refs)) {
3158		rcu_read_unlock();
3159		mark_page_accessed(eb->first_page);
3160		return eb;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3161	}
3162	rcu_read_unlock();
3163
3164	return NULL;
3165}
3166
3167void free_extent_buffer(struct extent_buffer *eb)
3168{
 
3169	if (!eb)
3170		return;
3171
3172	if (!atomic_dec_and_test(&eb->refs))
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3173		return;
3174
3175	WARN_ON(1);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3176}
3177
3178int clear_extent_buffer_dirty(struct extent_io_tree *tree,
3179			      struct extent_buffer *eb)
3180{
3181	unsigned long i;
3182	unsigned long num_pages;
3183	struct page *page;
3184
3185	num_pages = num_extent_pages(eb->start, eb->len);
3186
3187	for (i = 0; i < num_pages; i++) {
3188		page = extent_buffer_page(eb, i);
3189		if (!PageDirty(page))
3190			continue;
3191
3192		lock_page(page);
3193		WARN_ON(!PagePrivate(page));
 
 
 
 
 
 
 
 
 
 
 
3194
3195		set_page_extent_mapped(page);
3196		if (i == 0)
3197			set_page_extent_head(page, eb->len);
3198
3199		clear_page_dirty_for_io(page);
3200		spin_lock_irq(&page->mapping->tree_lock);
3201		if (!PageDirty(page)) {
3202			radix_tree_tag_clear(&page->mapping->page_tree,
3203						page_index(page),
3204						PAGECACHE_TAG_DIRTY);
3205		}
3206		spin_unlock_irq(&page->mapping->tree_lock);
3207		unlock_page(page);
 
 
 
 
 
 
3208	}
3209	return 0;
3210}
3211
3212int set_extent_buffer_dirty(struct extent_io_tree *tree,
3213			     struct extent_buffer *eb)
3214{
3215	unsigned long i;
3216	unsigned long num_pages;
3217	int was_dirty = 0;
 
3218
3219	was_dirty = test_and_set_bit(EXTENT_BUFFER_DIRTY, &eb->bflags);
3220	num_pages = num_extent_pages(eb->start, eb->len);
3221	for (i = 0; i < num_pages; i++)
3222		__set_page_dirty_nobuffers(extent_buffer_page(eb, i));
3223	return was_dirty;
3224}
3225
3226static int __eb_straddles_pages(u64 start, u64 len)
3227{
3228	if (len < PAGE_CACHE_SIZE)
3229		return 1;
3230	if (start & (PAGE_CACHE_SIZE - 1))
3231		return 1;
3232	if ((start + len) & (PAGE_CACHE_SIZE - 1))
3233		return 1;
3234	return 0;
3235}
3236
3237static int eb_straddles_pages(struct extent_buffer *eb)
3238{
3239	return __eb_straddles_pages(eb->start, eb->len);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3240}
3241
3242int clear_extent_buffer_uptodate(struct extent_io_tree *tree,
3243				struct extent_buffer *eb,
3244				struct extent_state **cached_state)
3245{
3246	unsigned long i;
3247	struct page *page;
3248	unsigned long num_pages;
3249
3250	num_pages = num_extent_pages(eb->start, eb->len);
3251	clear_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags);
 
 
 
 
 
3252
3253	if (eb_straddles_pages(eb)) {
3254		clear_extent_uptodate(tree, eb->start, eb->start + eb->len - 1,
3255				      cached_state, GFP_NOFS);
3256	}
3257	for (i = 0; i < num_pages; i++) {
3258		page = extent_buffer_page(eb, i);
3259		if (page)
3260			ClearPageUptodate(page);
 
3261	}
3262	return 0;
3263}
3264
3265int set_extent_buffer_uptodate(struct extent_io_tree *tree,
3266				struct extent_buffer *eb)
3267{
3268	unsigned long i;
3269	struct page *page;
3270	unsigned long num_pages;
3271
3272	num_pages = num_extent_pages(eb->start, eb->len);
 
 
3273
3274	if (eb_straddles_pages(eb)) {
3275		set_extent_uptodate(tree, eb->start, eb->start + eb->len - 1,
3276				    NULL, GFP_NOFS);
3277	}
3278	for (i = 0; i < num_pages; i++) {
3279		page = extent_buffer_page(eb, i);
3280		if ((i == 0 && (eb->start & (PAGE_CACHE_SIZE - 1))) ||
3281		    ((i == num_pages - 1) &&
3282		     ((eb->start + eb->len) & (PAGE_CACHE_SIZE - 1)))) {
3283			check_page_uptodate(tree, page);
3284			continue;
3285		}
3286		SetPageUptodate(page);
3287	}
3288	return 0;
3289}
3290
3291int extent_range_uptodate(struct extent_io_tree *tree,
3292			  u64 start, u64 end)
3293{
3294	struct page *page;
3295	int ret;
3296	int pg_uptodate = 1;
3297	int uptodate;
3298	unsigned long index;
3299
3300	if (__eb_straddles_pages(start, end - start + 1)) {
3301		ret = test_range_bit(tree, start, end,
3302				     EXTENT_UPTODATE, 1, NULL);
3303		if (ret)
3304			return 1;
3305	}
3306	while (start <= end) {
3307		index = start >> PAGE_CACHE_SHIFT;
3308		page = find_get_page(tree->mapping, index);
3309		uptodate = PageUptodate(page);
3310		page_cache_release(page);
3311		if (!uptodate) {
3312			pg_uptodate = 0;
3313			break;
3314		}
3315		start += PAGE_CACHE_SIZE;
3316	}
3317	return pg_uptodate;
3318}
3319
3320int extent_buffer_uptodate(struct extent_io_tree *tree,
3321			   struct extent_buffer *eb,
3322			   struct extent_state *cached_state)
3323{
3324	int ret = 0;
3325	unsigned long num_pages;
3326	unsigned long i;
3327	struct page *page;
3328	int pg_uptodate = 1;
3329
3330	if (test_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags))
3331		return 1;
 
3332
3333	if (eb_straddles_pages(eb)) {
3334		ret = test_range_bit(tree, eb->start, eb->start + eb->len - 1,
3335				   EXTENT_UPTODATE, 1, cached_state);
3336		if (ret)
3337			return ret;
3338	}
3339
3340	num_pages = num_extent_pages(eb->start, eb->len);
3341	for (i = 0; i < num_pages; i++) {
3342		page = extent_buffer_page(eb, i);
3343		if (!PageUptodate(page)) {
3344			pg_uptodate = 0;
3345			break;
3346		}
 
 
 
 
3347	}
3348	return pg_uptodate;
 
 
 
 
 
 
3349}
3350
3351int read_extent_buffer_pages(struct extent_io_tree *tree,
3352			     struct extent_buffer *eb,
3353			     u64 start, int wait,
3354			     get_extent_t *get_extent, int mirror_num)
3355{
3356	unsigned long i;
3357	unsigned long start_i;
3358	struct page *page;
3359	int err;
3360	int ret = 0;
3361	int locked_pages = 0;
3362	int all_uptodate = 1;
3363	int inc_all_pages = 0;
3364	unsigned long num_pages;
3365	struct bio *bio = NULL;
3366	unsigned long bio_flags = 0;
3367
3368	if (test_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags))
3369		return 0;
3370
3371	if (eb_straddles_pages(eb)) {
3372		if (test_range_bit(tree, eb->start, eb->start + eb->len - 1,
3373				   EXTENT_UPTODATE, 1, NULL)) {
3374			return 0;
3375		}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3376	}
3377
3378	if (start) {
3379		WARN_ON(start < eb->start);
3380		start_i = (start >> PAGE_CACHE_SHIFT) -
3381			(eb->start >> PAGE_CACHE_SHIFT);
 
 
 
 
 
 
 
 
 
 
 
 
3382	} else {
3383		start_i = 0;
3384	}
3385
3386	num_pages = num_extent_pages(eb->start, eb->len);
3387	for (i = start_i; i < num_pages; i++) {
3388		page = extent_buffer_page(eb, i);
3389		if (!wait) {
3390			if (!trylock_page(page))
3391				goto unlock_exit;
3392		} else {
3393			lock_page(page);
3394		}
3395		locked_pages++;
3396		if (!PageUptodate(page))
3397			all_uptodate = 0;
3398	}
3399	if (all_uptodate) {
3400		if (start_i == 0)
3401			set_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags);
3402		goto unlock_exit;
3403	}
3404
3405	for (i = start_i; i < num_pages; i++) {
3406		page = extent_buffer_page(eb, i);
3407
3408		WARN_ON(!PagePrivate(page));
3409
3410		set_page_extent_mapped(page);
3411		if (i == 0)
3412			set_page_extent_head(page, eb->len);
3413
3414		if (inc_all_pages)
3415			page_cache_get(page);
3416		if (!PageUptodate(page)) {
3417			if (start_i == 0)
3418				inc_all_pages = 1;
3419			ClearPageError(page);
3420			err = __extent_read_full_page(tree, page,
3421						      get_extent, &bio,
3422						      mirror_num, &bio_flags);
3423			if (err)
3424				ret = err;
3425		} else {
3426			unlock_page(page);
3427		}
3428	}
 
3429
3430	if (bio)
3431		submit_one_bio(READ, bio, mirror_num, bio_flags);
 
 
 
 
3432
3433	if (ret || !wait)
3434		return ret;
3435
3436	for (i = start_i; i < num_pages; i++) {
3437		page = extent_buffer_page(eb, i);
3438		wait_on_page_locked(page);
3439		if (!PageUptodate(page))
3440			ret = -EIO;
3441	}
 
3442
3443	if (!ret)
3444		set_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags);
3445	return ret;
3446
3447unlock_exit:
3448	i = start_i;
3449	while (locked_pages > 0) {
3450		page = extent_buffer_page(eb, i);
3451		i++;
3452		unlock_page(page);
3453		locked_pages--;
3454	}
3455	return ret;
 
 
 
 
 
 
 
 
3456}
3457
3458void read_extent_buffer(struct extent_buffer *eb, void *dstv,
3459			unsigned long start,
3460			unsigned long len)
3461{
 
3462	size_t cur;
3463	size_t offset;
3464	struct page *page;
3465	char *kaddr;
3466	char *dst = (char *)dstv;
3467	size_t start_offset = eb->start & ((u64)PAGE_CACHE_SIZE - 1);
3468	unsigned long i = (start_offset + start) >> PAGE_CACHE_SHIFT;
3469
3470	WARN_ON(start > eb->len);
3471	WARN_ON(start + len > eb->start + eb->len);
 
 
 
 
 
 
 
 
 
 
 
3472
3473	offset = (start_offset + start) & ((unsigned long)PAGE_CACHE_SIZE - 1);
3474
3475	while (len > 0) {
3476		page = extent_buffer_page(eb, i);
3477
3478		cur = min(len, (PAGE_CACHE_SIZE - offset));
3479		kaddr = page_address(page);
3480		memcpy(dst, kaddr + offset, cur);
3481
3482		dst += cur;
3483		len -= cur;
3484		offset = 0;
3485		i++;
3486	}
3487}
3488
3489int map_private_extent_buffer(struct extent_buffer *eb, unsigned long start,
3490			       unsigned long min_len, char **map,
3491			       unsigned long *map_start,
3492			       unsigned long *map_len)
3493{
3494	size_t offset = start & (PAGE_CACHE_SIZE - 1);
3495	char *kaddr;
3496	struct page *p;
3497	size_t start_offset = eb->start & ((u64)PAGE_CACHE_SIZE - 1);
3498	unsigned long i = (start_offset + start) >> PAGE_CACHE_SHIFT;
3499	unsigned long end_i = (start_offset + start + min_len - 1) >>
3500		PAGE_CACHE_SHIFT;
3501
3502	if (i != end_i)
3503		return -EINVAL;
3504
3505	if (i == 0) {
3506		offset = start_offset;
3507		*map_start = 0;
3508	} else {
3509		offset = 0;
3510		*map_start = ((u64)i << PAGE_CACHE_SHIFT) - start_offset;
3511	}
3512
3513	if (start + min_len > eb->len) {
3514		printk(KERN_ERR "btrfs bad mapping eb start %llu len %lu, "
3515		       "wanted %lu %lu\n", (unsigned long long)eb->start,
3516		       eb->len, start, min_len);
3517		WARN_ON(1);
3518		return -EINVAL;
 
 
 
 
 
 
 
 
 
 
3519	}
3520
3521	p = extent_buffer_page(eb, i);
3522	kaddr = page_address(p);
3523	*map = kaddr + offset;
3524	*map_len = PAGE_CACHE_SIZE - offset;
3525	return 0;
3526}
3527
3528int memcmp_extent_buffer(struct extent_buffer *eb, const void *ptrv,
3529			  unsigned long start,
3530			  unsigned long len)
3531{
 
3532	size_t cur;
3533	size_t offset;
3534	struct page *page;
3535	char *kaddr;
3536	char *ptr = (char *)ptrv;
3537	size_t start_offset = eb->start & ((u64)PAGE_CACHE_SIZE - 1);
3538	unsigned long i = (start_offset + start) >> PAGE_CACHE_SHIFT;
3539	int ret = 0;
3540
3541	WARN_ON(start > eb->len);
3542	WARN_ON(start + len > eb->start + eb->len);
3543
3544	offset = (start_offset + start) & ((unsigned long)PAGE_CACHE_SIZE - 1);
3545
3546	while (len > 0) {
3547		page = extent_buffer_page(eb, i);
3548
3549		cur = min(len, (PAGE_CACHE_SIZE - offset));
3550
3551		kaddr = page_address(page);
 
 
3552		ret = memcmp(ptr, kaddr + offset, cur);
3553		if (ret)
3554			break;
3555
3556		ptr += cur;
3557		len -= cur;
3558		offset = 0;
3559		i++;
3560	}
3561	return ret;
3562}
3563
3564void write_extent_buffer(struct extent_buffer *eb, const void *srcv,
3565			 unsigned long start, unsigned long len)
 
 
 
 
 
3566{
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3567	size_t cur;
3568	size_t offset;
3569	struct page *page;
3570	char *kaddr;
3571	char *src = (char *)srcv;
3572	size_t start_offset = eb->start & ((u64)PAGE_CACHE_SIZE - 1);
3573	unsigned long i = (start_offset + start) >> PAGE_CACHE_SHIFT;
 
3574
3575	WARN_ON(start > eb->len);
3576	WARN_ON(start + len > eb->start + eb->len);
3577
3578	offset = (start_offset + start) & ((unsigned long)PAGE_CACHE_SIZE - 1);
 
 
 
 
 
 
 
 
3579
3580	while (len > 0) {
3581		page = extent_buffer_page(eb, i);
3582		WARN_ON(!PageUptodate(page));
3583
3584		cur = min(len, PAGE_CACHE_SIZE - offset);
3585		kaddr = page_address(page);
3586		memcpy(kaddr + offset, src, cur);
 
 
 
3587
3588		src += cur;
3589		len -= cur;
3590		offset = 0;
3591		i++;
3592	}
3593}
3594
3595void memset_extent_buffer(struct extent_buffer *eb, char c,
3596			  unsigned long start, unsigned long len)
3597{
3598	size_t cur;
3599	size_t offset;
3600	struct page *page;
3601	char *kaddr;
3602	size_t start_offset = eb->start & ((u64)PAGE_CACHE_SIZE - 1);
3603	unsigned long i = (start_offset + start) >> PAGE_CACHE_SHIFT;
3604
3605	WARN_ON(start > eb->len);
3606	WARN_ON(start + len > eb->start + eb->len);
 
 
 
3607
3608	offset = (start_offset + start) & ((unsigned long)PAGE_CACHE_SIZE - 1);
 
 
 
3609
3610	while (len > 0) {
3611		page = extent_buffer_page(eb, i);
3612		WARN_ON(!PageUptodate(page));
 
3613
3614		cur = min(len, PAGE_CACHE_SIZE - offset);
3615		kaddr = page_address(page);
3616		memset(kaddr + offset, c, cur);
3617
3618		len -= cur;
3619		offset = 0;
3620		i++;
3621	}
3622}
3623
3624void copy_extent_buffer(struct extent_buffer *dst, struct extent_buffer *src,
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3625			unsigned long dst_offset, unsigned long src_offset,
3626			unsigned long len)
3627{
 
3628	u64 dst_len = dst->len;
3629	size_t cur;
3630	size_t offset;
3631	struct page *page;
3632	char *kaddr;
3633	size_t start_offset = dst->start & ((u64)PAGE_CACHE_SIZE - 1);
3634	unsigned long i = (start_offset + dst_offset) >> PAGE_CACHE_SHIFT;
 
 
 
3635
3636	WARN_ON(src->len != dst_len);
3637
3638	offset = (start_offset + dst_offset) &
3639		((unsigned long)PAGE_CACHE_SIZE - 1);
3640
3641	while (len > 0) {
3642		page = extent_buffer_page(dst, i);
3643		WARN_ON(!PageUptodate(page));
3644
3645		cur = min(len, (unsigned long)(PAGE_CACHE_SIZE - offset));
3646
3647		kaddr = page_address(page);
3648		read_extent_buffer(src, kaddr + offset, src_offset, cur);
3649
3650		src_offset += cur;
3651		len -= cur;
3652		offset = 0;
3653		i++;
3654	}
3655}
3656
3657static void move_pages(struct page *dst_page, struct page *src_page,
3658		       unsigned long dst_off, unsigned long src_off,
3659		       unsigned long len)
3660{
3661	char *dst_kaddr = page_address(dst_page);
3662	if (dst_page == src_page) {
3663		memmove(dst_kaddr + dst_off, dst_kaddr + src_off, len);
3664	} else {
3665		char *src_kaddr = page_address(src_page);
3666		char *p = dst_kaddr + dst_off + len;
3667		char *s = src_kaddr + src_off + len;
 
 
 
 
 
 
 
 
 
3668
3669		while (len--)
3670			*--p = *--s;
3671	}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3672}
3673
3674static inline bool areas_overlap(unsigned long src, unsigned long dst, unsigned long len)
3675{
3676	unsigned long distance = (src > dst) ? src - dst : dst - src;
3677	return distance < len;
3678}
3679
3680static void copy_pages(struct page *dst_page, struct page *src_page,
3681		       unsigned long dst_off, unsigned long src_off,
3682		       unsigned long len)
3683{
3684	char *dst_kaddr = page_address(dst_page);
3685	char *src_kaddr;
3686
3687	if (dst_page != src_page) {
3688		src_kaddr = page_address(src_page);
3689	} else {
3690		src_kaddr = dst_kaddr;
3691		BUG_ON(areas_overlap(src_off, dst_off, len));
3692	}
3693
3694	memcpy(dst_kaddr + dst_off, src_kaddr + src_off, len);
3695}
3696
3697void memcpy_extent_buffer(struct extent_buffer *dst, unsigned long dst_offset,
3698			   unsigned long src_offset, unsigned long len)
3699{
3700	size_t cur;
3701	size_t dst_off_in_page;
3702	size_t src_off_in_page;
3703	size_t start_offset = dst->start & ((u64)PAGE_CACHE_SIZE - 1);
3704	unsigned long dst_i;
3705	unsigned long src_i;
3706
3707	if (src_offset + len > dst->len) {
3708		printk(KERN_ERR "btrfs memmove bogus src_offset %lu move "
3709		       "len %lu dst len %lu\n", src_offset, len, dst->len);
3710		BUG_ON(1);
3711	}
3712	if (dst_offset + len > dst->len) {
3713		printk(KERN_ERR "btrfs memmove bogus dst_offset %lu move "
3714		       "len %lu dst len %lu\n", dst_offset, len, dst->len);
3715		BUG_ON(1);
3716	}
3717
3718	while (len > 0) {
3719		dst_off_in_page = (start_offset + dst_offset) &
3720			((unsigned long)PAGE_CACHE_SIZE - 1);
3721		src_off_in_page = (start_offset + src_offset) &
3722			((unsigned long)PAGE_CACHE_SIZE - 1);
3723
3724		dst_i = (start_offset + dst_offset) >> PAGE_CACHE_SHIFT;
3725		src_i = (start_offset + src_offset) >> PAGE_CACHE_SHIFT;
3726
3727		cur = min(len, (unsigned long)(PAGE_CACHE_SIZE -
3728					       src_off_in_page));
3729		cur = min_t(unsigned long, cur,
3730			(unsigned long)(PAGE_CACHE_SIZE - dst_off_in_page));
3731
3732		copy_pages(extent_buffer_page(dst, dst_i),
3733			   extent_buffer_page(dst, src_i),
3734			   dst_off_in_page, src_off_in_page, cur);
3735
3736		src_offset += cur;
3737		dst_offset += cur;
3738		len -= cur;
3739	}
3740}
3741
3742void memmove_extent_buffer(struct extent_buffer *dst, unsigned long dst_offset,
3743			   unsigned long src_offset, unsigned long len)
 
3744{
3745	size_t cur;
3746	size_t dst_off_in_page;
3747	size_t src_off_in_page;
3748	unsigned long dst_end = dst_offset + len - 1;
3749	unsigned long src_end = src_offset + len - 1;
3750	size_t start_offset = dst->start & ((u64)PAGE_CACHE_SIZE - 1);
3751	unsigned long dst_i;
3752	unsigned long src_i;
3753
3754	if (src_offset + len > dst->len) {
3755		printk(KERN_ERR "btrfs memmove bogus src_offset %lu move "
3756		       "len %lu len %lu\n", src_offset, len, dst->len);
3757		BUG_ON(1);
3758	}
3759	if (dst_offset + len > dst->len) {
3760		printk(KERN_ERR "btrfs memmove bogus dst_offset %lu move "
3761		       "len %lu len %lu\n", dst_offset, len, dst->len);
3762		BUG_ON(1);
3763	}
3764	if (!areas_overlap(src_offset, dst_offset, len)) {
3765		memcpy_extent_buffer(dst, dst_offset, src_offset, len);
3766		return;
3767	}
 
 
 
 
 
 
3768	while (len > 0) {
3769		dst_i = (start_offset + dst_end) >> PAGE_CACHE_SHIFT;
3770		src_i = (start_offset + src_end) >> PAGE_CACHE_SHIFT;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3771
3772		dst_off_in_page = (start_offset + dst_end) &
3773			((unsigned long)PAGE_CACHE_SIZE - 1);
3774		src_off_in_page = (start_offset + src_end) &
3775			((unsigned long)PAGE_CACHE_SIZE - 1);
3776
3777		cur = min_t(unsigned long, len, src_off_in_page + 1);
3778		cur = min(cur, dst_off_in_page + 1);
3779		move_pages(extent_buffer_page(dst, dst_i),
3780			   extent_buffer_page(dst, src_i),
3781			   dst_off_in_page - cur + 1,
3782			   src_off_in_page - cur + 1, cur);
3783
3784		dst_end -= cur;
3785		src_end -= cur;
3786		len -= cur;
3787	}
3788}
3789
3790static inline void btrfs_release_extent_buffer_rcu(struct rcu_head *head)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3791{
3792	struct extent_buffer *eb =
3793			container_of(head, struct extent_buffer, rcu_head);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3794
3795	btrfs_release_extent_buffer(eb);
3796}
3797
3798int try_release_extent_buffer(struct extent_io_tree *tree, struct page *page)
3799{
3800	u64 start = page_offset(page);
3801	struct extent_buffer *eb;
3802	int ret = 1;
3803
3804	spin_lock(&tree->buffer_lock);
3805	eb = radix_tree_lookup(&tree->buffer, start >> PAGE_CACHE_SHIFT);
3806	if (!eb) {
3807		spin_unlock(&tree->buffer_lock);
3808		return ret;
 
 
 
 
 
 
3809	}
3810
3811	if (test_bit(EXTENT_BUFFER_DIRTY, &eb->bflags)) {
3812		ret = 0;
3813		goto out;
 
 
 
 
 
 
 
 
 
 
3814	}
 
3815
3816	/*
3817	 * set @eb->refs to 0 if it is already 1, and then release the @eb.
3818	 * Or go back.
3819	 */
3820	if (atomic_cmpxchg(&eb->refs, 1, 0) != 1) {
3821		ret = 0;
3822		goto out;
3823	}
3824
3825	radix_tree_delete(&tree->buffer, start >> PAGE_CACHE_SHIFT);
3826out:
3827	spin_unlock(&tree->buffer_lock);
3828
3829	/* at this point we can safely release the extent buffer */
3830	if (atomic_read(&eb->refs) == 0)
3831		call_rcu(&eb->rcu_head, btrfs_release_extent_buffer_rcu);
3832	return ret;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3833}
v6.9.4
   1// SPDX-License-Identifier: GPL-2.0
   2
   3#include <linux/bitops.h>
   4#include <linux/slab.h>
   5#include <linux/bio.h>
   6#include <linux/mm.h>
   7#include <linux/pagemap.h>
   8#include <linux/page-flags.h>
   9#include <linux/sched/mm.h>
  10#include <linux/spinlock.h>
  11#include <linux/blkdev.h>
  12#include <linux/swap.h>
  13#include <linux/writeback.h>
  14#include <linux/pagevec.h>
  15#include <linux/prefetch.h>
  16#include <linux/fsverity.h>
  17#include "extent_io.h"
  18#include "extent-io-tree.h"
  19#include "extent_map.h"
 
  20#include "ctree.h"
  21#include "btrfs_inode.h"
  22#include "bio.h"
  23#include "locking.h"
  24#include "backref.h"
  25#include "disk-io.h"
  26#include "subpage.h"
  27#include "zoned.h"
  28#include "block-group.h"
  29#include "compression.h"
  30#include "fs.h"
  31#include "accessors.h"
  32#include "file-item.h"
  33#include "file.h"
  34#include "dev-replace.h"
  35#include "super.h"
  36#include "transaction.h"
  37
 
  38static struct kmem_cache *extent_buffer_cache;
  39
  40#ifdef CONFIG_BTRFS_DEBUG
  41static inline void btrfs_leak_debug_add_eb(struct extent_buffer *eb)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
  42{
  43	struct btrfs_fs_info *fs_info = eb->fs_info;
  44	unsigned long flags;
 
 
 
 
 
 
 
 
 
 
  45
  46	spin_lock_irqsave(&fs_info->eb_leak_lock, flags);
  47	list_add(&eb->leak_list, &fs_info->allocated_ebs);
  48	spin_unlock_irqrestore(&fs_info->eb_leak_lock, flags);
  49}
  50
  51static inline void btrfs_leak_debug_del_eb(struct extent_buffer *eb)
  52{
  53	struct btrfs_fs_info *fs_info = eb->fs_info;
  54	unsigned long flags;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
  55
  56	spin_lock_irqsave(&fs_info->eb_leak_lock, flags);
  57	list_del(&eb->leak_list);
  58	spin_unlock_irqrestore(&fs_info->eb_leak_lock, flags);
 
 
 
 
 
 
 
  59}
  60
  61void btrfs_extent_buffer_leak_debug_check(struct btrfs_fs_info *fs_info)
  62{
  63	struct extent_buffer *eb;
 
  64	unsigned long flags;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
  65
  66	/*
  67	 * If we didn't get into open_ctree our allocated_ebs will not be
  68	 * initialized, so just skip this.
  69	 */
  70	if (!fs_info->allocated_ebs.next)
  71		return;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
  72
  73	WARN_ON(!list_empty(&fs_info->allocated_ebs));
  74	spin_lock_irqsave(&fs_info->eb_leak_lock, flags);
  75	while (!list_empty(&fs_info->allocated_ebs)) {
  76		eb = list_first_entry(&fs_info->allocated_ebs,
  77				      struct extent_buffer, leak_list);
  78		pr_err(
  79	"BTRFS: buffer leak start %llu len %u refs %d bflags %lu owner %llu\n",
  80		       eb->start, eb->len, atomic_read(&eb->refs), eb->bflags,
  81		       btrfs_header_owner(eb));
  82		list_del(&eb->leak_list);
  83		WARN_ON_ONCE(1);
  84		kmem_cache_free(extent_buffer_cache, eb);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
  85	}
  86	spin_unlock_irqrestore(&fs_info->eb_leak_lock, flags);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
  87}
  88#else
  89#define btrfs_leak_debug_add_eb(eb)			do {} while (0)
  90#define btrfs_leak_debug_del_eb(eb)			do {} while (0)
  91#endif
  92
  93/*
  94 * Structure to record info about the bio being assembled, and other info like
  95 * how many bytes are there before stripe/ordered extent boundary.
 
 
 
 
 
  96 */
  97struct btrfs_bio_ctrl {
  98	struct btrfs_bio *bbio;
  99	enum btrfs_compression_type compress_type;
 100	u32 len_to_oe_boundary;
 101	blk_opf_t opf;
 102	btrfs_bio_end_io_t end_io_func;
 103	struct writeback_control *wbc;
 104};
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 105
 106static void submit_one_bio(struct btrfs_bio_ctrl *bio_ctrl)
 
 
 
 
 
 
 
 
 
 
 
 
 107{
 108	struct btrfs_bio *bbio = bio_ctrl->bbio;
 109
 110	if (!bbio)
 111		return;
 
 
 
 
 
 
 112
 113	/* Caller should ensure the bio has at least some range added */
 114	ASSERT(bbio->bio.bi_iter.bi_size);
 115
 116	if (btrfs_op(&bbio->bio) == BTRFS_MAP_READ &&
 117	    bio_ctrl->compress_type != BTRFS_COMPRESS_NONE)
 118		btrfs_submit_compressed_read(bbio);
 119	else
 120		btrfs_submit_bio(bbio, 0);
 
 
 
 
 
 
 
 
 
 121
 122	/* The bbio is owned by the end_io handler now */
 123	bio_ctrl->bbio = NULL;
 
 
 
 124}
 125
 126/*
 127 * Submit or fail the current bio in the bio_ctrl structure.
 
 
 
 
 
 
 
 
 
 
 
 128 */
 129static void submit_write_bio(struct btrfs_bio_ctrl *bio_ctrl, int ret)
 
 130{
 131	struct btrfs_bio *bbio = bio_ctrl->bbio;
 
 
 132
 133	if (!bbio)
 134		return;
 
 
 
 
 
 
 
 
 
 
 
 135
 136	if (ret) {
 137		ASSERT(ret < 0);
 138		btrfs_bio_end_io(bbio, errno_to_blk_status(ret));
 139		/* The bio is owned by the end_io handler now */
 140		bio_ctrl->bbio = NULL;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 141	} else {
 142		submit_one_bio(bio_ctrl);
 143	}
 
 144}
 145
 146int __init extent_buffer_init_cachep(void)
 
 147{
 148	extent_buffer_cache = kmem_cache_create("btrfs_extent_buffer",
 149						sizeof(struct extent_buffer), 0, 0,
 150						NULL);
 151	if (!extent_buffer_cache)
 152		return -ENOMEM;
 153
 154	return 0;
 155}
 156
 157void __cold extent_buffer_free_cachep(void)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 158{
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 159	/*
 160	 * Make sure all delayed rcu free are flushed before we
 161	 * destroy caches.
 
 
 162	 */
 163	rcu_barrier();
 164	kmem_cache_destroy(extent_buffer_cache);
 165}
 
 
 
 
 
 
 
 
 
 
 166
 167void extent_range_clear_dirty_for_io(struct inode *inode, u64 start, u64 end)
 168{
 169	unsigned long index = start >> PAGE_SHIFT;
 170	unsigned long end_index = end >> PAGE_SHIFT;
 171	struct page *page;
 172
 173	while (index <= end_index) {
 174		page = find_get_page(inode->i_mapping, index);
 175		BUG_ON(!page); /* Pages should be in the extent_io_tree */
 176		clear_page_dirty_for_io(page);
 177		put_page(page);
 178		index++;
 
 
 
 179	}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 180}
 181
 182static void process_one_page(struct btrfs_fs_info *fs_info,
 183			     struct page *page, struct page *locked_page,
 184			     unsigned long page_ops, u64 start, u64 end)
 185{
 186	struct folio *folio = page_folio(page);
 187	u32 len;
 188
 189	ASSERT(end + 1 - start != 0 && end + 1 - start < U32_MAX);
 190	len = end + 1 - start;
 191
 192	if (page_ops & PAGE_SET_ORDERED)
 193		btrfs_folio_clamp_set_ordered(fs_info, folio, start, len);
 194	if (page_ops & PAGE_START_WRITEBACK) {
 195		btrfs_folio_clamp_clear_dirty(fs_info, folio, start, len);
 196		btrfs_folio_clamp_set_writeback(fs_info, folio, start, len);
 197	}
 198	if (page_ops & PAGE_END_WRITEBACK)
 199		btrfs_folio_clamp_clear_writeback(fs_info, folio, start, len);
 200
 201	if (page != locked_page && (page_ops & PAGE_UNLOCK))
 202		btrfs_folio_end_writer_lock(fs_info, folio, start, len);
 203}
 204
 205static void __process_pages_contig(struct address_space *mapping,
 206				   struct page *locked_page, u64 start, u64 end,
 207				   unsigned long page_ops)
 208{
 209	struct btrfs_fs_info *fs_info = inode_to_fs_info(mapping->host);
 210	pgoff_t start_index = start >> PAGE_SHIFT;
 211	pgoff_t end_index = end >> PAGE_SHIFT;
 212	pgoff_t index = start_index;
 213	struct folio_batch fbatch;
 214	int i;
 215
 216	folio_batch_init(&fbatch);
 217	while (index <= end_index) {
 218		int found_folios;
 219
 220		found_folios = filemap_get_folios_contig(mapping, &index,
 221				end_index, &fbatch);
 222		for (i = 0; i < found_folios; i++) {
 223			struct folio *folio = fbatch.folios[i];
 224
 225			process_one_page(fs_info, &folio->page, locked_page,
 226					 page_ops, start, end);
 
 
 
 
 227		}
 228		folio_batch_release(&fbatch);
 229		cond_resched();
 
 
 
 
 230	}
 
 
 
 231}
 232
 233static noinline void __unlock_for_delalloc(struct inode *inode,
 234					   struct page *locked_page,
 235					   u64 start, u64 end)
 236{
 237	unsigned long index = start >> PAGE_SHIFT;
 238	unsigned long end_index = end >> PAGE_SHIFT;
 239
 240	ASSERT(locked_page);
 241	if (index == locked_page->index && end_index == index)
 242		return;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 243
 244	__process_pages_contig(inode->i_mapping, locked_page, start, end,
 245			       PAGE_UNLOCK);
 
 
 
 
 
 246}
 247
 248static noinline int lock_delalloc_pages(struct inode *inode,
 249					struct page *locked_page,
 250					u64 start,
 251					u64 end)
 
 
 
 
 
 
 
 
 
 
 252{
 253	struct btrfs_fs_info *fs_info = inode_to_fs_info(inode);
 254	struct address_space *mapping = inode->i_mapping;
 255	pgoff_t start_index = start >> PAGE_SHIFT;
 256	pgoff_t end_index = end >> PAGE_SHIFT;
 257	pgoff_t index = start_index;
 258	u64 processed_end = start;
 259	struct folio_batch fbatch;
 260
 261	if (index == locked_page->index && index == end_index)
 262		return 0;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 263
 264	folio_batch_init(&fbatch);
 265	while (index <= end_index) {
 266		unsigned int found_folios, i;
 267
 268		found_folios = filemap_get_folios_contig(mapping, &index,
 269				end_index, &fbatch);
 270		if (found_folios == 0)
 271			goto out;
 272
 273		for (i = 0; i < found_folios; i++) {
 274			struct folio *folio = fbatch.folios[i];
 275			struct page *page = folio_page(folio, 0);
 276			u32 len = end + 1 - start;
 
 
 
 
 
 
 277
 278			if (page == locked_page)
 279				continue;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 280
 281			if (btrfs_folio_start_writer_lock(fs_info, folio, start,
 282							  len))
 
 
 
 
 
 
 
 
 
 
 283				goto out;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 284
 285			if (!PageDirty(page) || page->mapping != mapping) {
 286				btrfs_folio_end_writer_lock(fs_info, folio, start,
 287							    len);
 288				goto out;
 289			}
 290
 291			processed_end = page_offset(page) + PAGE_SIZE - 1;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 292		}
 293		folio_batch_release(&fbatch);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 294		cond_resched();
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 295	}
 
 
 296
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 297	return 0;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 298out:
 299	folio_batch_release(&fbatch);
 300	if (processed_end > start)
 301		__unlock_for_delalloc(inode, locked_page, start, processed_end);
 302	return -EAGAIN;
 303}
 304
 305/*
 306 * Find and lock a contiguous range of bytes in the file marked as delalloc, no
 307 * more than @max_bytes.
 
 308 *
 309 * @start:	The original start bytenr to search.
 310 *		Will store the extent range start bytenr.
 311 * @end:	The original end bytenr of the search range
 312 *		Will store the extent range end bytenr.
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 313 *
 314 * Return true if we find a delalloc range which starts inside the original
 315 * range, and @start/@end will store the delalloc range start/end.
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 316 *
 317 * Return false if we can't find any delalloc range which starts inside the
 318 * original range, and @start/@end will be the non-delalloc range start/end.
 319 */
 320EXPORT_FOR_TESTS
 321noinline_for_stack bool find_lock_delalloc_range(struct inode *inode,
 322				    struct page *locked_page, u64 *start,
 323				    u64 *end)
 324{
 325	struct btrfs_fs_info *fs_info = inode_to_fs_info(inode);
 326	struct extent_io_tree *tree = &BTRFS_I(inode)->io_tree;
 327	const u64 orig_start = *start;
 328	const u64 orig_end = *end;
 329	/* The sanity tests may not set a valid fs_info. */
 330	u64 max_bytes = fs_info ? fs_info->max_extent_size : BTRFS_MAX_EXTENT_SIZE;
 331	u64 delalloc_start;
 332	u64 delalloc_end;
 333	bool found;
 334	struct extent_state *cached_state = NULL;
 335	int ret;
 336	int loops = 0;
 337
 338	/* Caller should pass a valid @end to indicate the search range end */
 339	ASSERT(orig_end > orig_start);
 340
 341	/* The range should at least cover part of the page */
 342	ASSERT(!(orig_start >= page_offset(locked_page) + PAGE_SIZE ||
 343		 orig_end <= page_offset(locked_page)));
 344again:
 345	/* step one, find a bunch of delalloc bytes starting at start */
 346	delalloc_start = *start;
 347	delalloc_end = 0;
 348	found = btrfs_find_delalloc_range(tree, &delalloc_start, &delalloc_end,
 349					  max_bytes, &cached_state);
 350	if (!found || delalloc_end <= *start || delalloc_start > orig_end) {
 351		*start = delalloc_start;
 352
 353		/* @delalloc_end can be -1, never go beyond @orig_end */
 354		*end = min(delalloc_end, orig_end);
 355		free_extent_state(cached_state);
 356		return false;
 357	}
 358
 359	/*
 360	 * start comes from the offset of locked_page.  We have to lock
 361	 * pages in order, so we can't process delalloc bytes before
 362	 * locked_page
 363	 */
 364	if (delalloc_start < *start)
 365		delalloc_start = *start;
 366
 367	/*
 368	 * make sure to limit the number of pages we try to lock down
 
 369	 */
 370	if (delalloc_end + 1 - delalloc_start > max_bytes)
 371		delalloc_end = delalloc_start + max_bytes - 1;
 372
 373	/* step two, lock all the pages after the page that has start */
 374	ret = lock_delalloc_pages(inode, locked_page,
 375				  delalloc_start, delalloc_end);
 376	ASSERT(!ret || ret == -EAGAIN);
 377	if (ret == -EAGAIN) {
 378		/* some of the pages are gone, lets avoid looping by
 379		 * shortening the size of the delalloc range we're searching
 380		 */
 381		free_extent_state(cached_state);
 382		cached_state = NULL;
 383		if (!loops) {
 384			max_bytes = PAGE_SIZE;
 
 385			loops = 1;
 386			goto again;
 387		} else {
 388			found = false;
 389			goto out_failed;
 390		}
 391	}
 
 392
 393	/* step three, lock the state bits for the whole range */
 394	lock_extent(tree, delalloc_start, delalloc_end, &cached_state);
 
 395
 396	/* then test to make sure it is all still delalloc */
 397	ret = test_range_bit(tree, delalloc_start, delalloc_end,
 398			     EXTENT_DELALLOC, cached_state);
 399	if (!ret) {
 400		unlock_extent(tree, delalloc_start, delalloc_end,
 401			      &cached_state);
 402		__unlock_for_delalloc(inode, locked_page,
 403			      delalloc_start, delalloc_end);
 404		cond_resched();
 405		goto again;
 406	}
 407	free_extent_state(cached_state);
 408	*start = delalloc_start;
 409	*end = delalloc_end;
 410out_failed:
 411	return found;
 412}
 413
 414void extent_clear_unlock_delalloc(struct btrfs_inode *inode, u64 start, u64 end,
 415				  struct page *locked_page,
 416				  u32 clear_bits, unsigned long page_ops)
 
 417{
 418	clear_extent_bit(&inode->io_tree, start, end, clear_bits, NULL);
 
 
 
 
 
 
 419
 420	__process_pages_contig(inode->vfs_inode.i_mapping, locked_page,
 421			       start, end, page_ops);
 422}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 423
 424static bool btrfs_verify_page(struct page *page, u64 start)
 425{
 426	if (!fsverity_active(page->mapping->host) ||
 427	    PageUptodate(page) ||
 428	    start >= i_size_read(page->mapping->host))
 429		return true;
 430	return fsverity_verify_page(page);
 431}
 432
 433static void end_page_read(struct page *page, bool uptodate, u64 start, u32 len)
 434{
 435	struct btrfs_fs_info *fs_info = page_to_fs_info(page);
 436	struct folio *folio = page_folio(page);
 437
 438	ASSERT(page_offset(page) <= start &&
 439	       start + len <= page_offset(page) + PAGE_SIZE);
 440
 441	if (uptodate && btrfs_verify_page(page, start))
 442		btrfs_folio_set_uptodate(fs_info, folio, start, len);
 443	else
 444		btrfs_folio_clear_uptodate(fs_info, folio, start, len);
 445
 446	if (!btrfs_is_subpage(fs_info, page->mapping))
 447		unlock_page(page);
 448	else
 449		btrfs_subpage_end_reader(fs_info, folio, start, len);
 
 
 450}
 451
 452/*
 453 * After a write IO is done, we need to:
 454 *
 455 * - clear the uptodate bits on error
 456 * - clear the writeback bits in the extent tree for the range
 457 * - filio_end_writeback()  if there is no more pending io for the folio
 458 *
 459 * Scheduling is not allowed, so the extent state tree is expected
 460 * to have one and only one object corresponding to this IO.
 461 */
 462static void end_bbio_data_write(struct btrfs_bio *bbio)
 
 
 463{
 464	struct btrfs_fs_info *fs_info = bbio->fs_info;
 465	struct bio *bio = &bbio->bio;
 466	int error = blk_status_to_errno(bio->bi_status);
 467	struct folio_iter fi;
 468	const u32 sectorsize = fs_info->sectorsize;
 469
 470	ASSERT(!bio_flagged(bio, BIO_CLONED));
 471	bio_for_each_folio_all(fi, bio) {
 472		struct folio *folio = fi.folio;
 473		u64 start = folio_pos(folio) + fi.offset;
 474		u32 len = fi.length;
 475
 476		/* Only order 0 (single page) folios are allowed for data. */
 477		ASSERT(folio_order(folio) == 0);
 478
 479		/* Our read/write should always be sector aligned. */
 480		if (!IS_ALIGNED(fi.offset, sectorsize))
 481			btrfs_err(fs_info,
 482		"partial page write in btrfs with offset %zu and length %zu",
 483				  fi.offset, fi.length);
 484		else if (!IS_ALIGNED(fi.length, sectorsize))
 485			btrfs_info(fs_info,
 486		"incomplete page write with offset %zu and length %zu",
 487				   fi.offset, fi.length);
 488
 489		btrfs_finish_ordered_extent(bbio->ordered,
 490				folio_page(folio, 0), start, len, !error);
 491		if (error)
 492			mapping_set_error(folio->mapping, error);
 493		btrfs_folio_clear_writeback(fs_info, folio, start, len);
 494	}
 
 
 
 
 
 
 
 495
 496	bio_put(bio);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 497}
 498
 499/*
 500 * Record previously processed extent range
 501 *
 502 * For endio_readpage_release_extent() to handle a full extent range, reducing
 503 * the extent io operations.
 504 */
 505struct processed_extent {
 506	struct btrfs_inode *inode;
 507	/* Start of the range in @inode */
 508	u64 start;
 509	/* End of the range in @inode */
 510	u64 end;
 511	bool uptodate;
 512};
 513
 514/*
 515 * Try to release processed extent range
 516 *
 517 * May not release the extent range right now if the current range is
 518 * contiguous to processed extent.
 519 *
 520 * Will release processed extent when any of @inode, @uptodate, the range is
 521 * no longer contiguous to the processed range.
 522 *
 523 * Passing @inode == NULL will force processed extent to be released.
 524 */
 525static void endio_readpage_release_extent(struct processed_extent *processed,
 526			      struct btrfs_inode *inode, u64 start, u64 end,
 527			      bool uptodate)
 528{
 529	struct extent_state *cached = NULL;
 530	struct extent_io_tree *tree;
 531
 532	/* The first extent, initialize @processed */
 533	if (!processed->inode)
 534		goto update;
 535
 
 536	/*
 537	 * Contiguous to processed extent, just uptodate the end.
 538	 *
 539	 * Several things to notice:
 540	 *
 541	 * - bio can be merged as long as on-disk bytenr is contiguous
 542	 *   This means we can have page belonging to other inodes, thus need to
 543	 *   check if the inode still matches.
 544	 * - bvec can contain range beyond current page for multi-page bvec
 545	 *   Thus we need to do processed->end + 1 >= start check
 546	 */
 547	if (processed->inode == inode && processed->uptodate == uptodate &&
 548	    processed->end + 1 >= start && end >= processed->end) {
 549		processed->end = end;
 550		return;
 
 
 
 
 
 551	}
 552
 553	tree = &processed->inode->io_tree;
 554	/*
 555	 * Now we don't have range contiguous to the processed range, release
 556	 * the processed range now.
 557	 */
 558	unlock_extent(tree, processed->start, processed->end, &cached);
 559
 560update:
 561	/* Update processed to current range */
 562	processed->inode = inode;
 563	processed->start = start;
 564	processed->end = end;
 565	processed->uptodate = uptodate;
 566}
 567
 568static void begin_page_read(struct btrfs_fs_info *fs_info, struct page *page)
 569{
 570	struct folio *folio = page_folio(page);
 
 
 571
 572	ASSERT(folio_test_locked(folio));
 573	if (!btrfs_is_subpage(fs_info, folio->mapping))
 574		return;
 575
 576	ASSERT(folio_test_private(folio));
 577	btrfs_subpage_start_reader(fs_info, folio, page_offset(page), PAGE_SIZE);
 
 
 
 
 
 
 
 
 
 
 
 
 
 578}
 579
 580/*
 581 * After a data read IO is done, we need to:
 582 *
 583 * - clear the uptodate bits on error
 584 * - set the uptodate bits if things worked
 585 * - set the folio up to date if all extents in the tree are uptodate
 586 * - clear the lock bit in the extent tree
 587 * - unlock the folio if there are no other extents locked for it
 588 *
 589 * Scheduling is not allowed, so the extent state tree is expected
 590 * to have one and only one object corresponding to this IO.
 591 */
 592static void end_bbio_data_read(struct btrfs_bio *bbio)
 593{
 594	struct btrfs_fs_info *fs_info = bbio->fs_info;
 595	struct bio *bio = &bbio->bio;
 596	struct processed_extent processed = { 0 };
 597	struct folio_iter fi;
 598	const u32 sectorsize = fs_info->sectorsize;
 599
 600	ASSERT(!bio_flagged(bio, BIO_CLONED));
 601	bio_for_each_folio_all(fi, &bbio->bio) {
 602		bool uptodate = !bio->bi_status;
 603		struct folio *folio = fi.folio;
 604		struct inode *inode = folio->mapping->host;
 605		u64 start;
 606		u64 end;
 607		u32 len;
 608
 609		/* For now only order 0 folios are supported for data. */
 610		ASSERT(folio_order(folio) == 0);
 611		btrfs_debug(fs_info,
 612			"%s: bi_sector=%llu, err=%d, mirror=%u",
 613			__func__, bio->bi_iter.bi_sector, bio->bi_status,
 614			bbio->mirror_num);
 615
 616		/*
 617		 * We always issue full-sector reads, but if some block in a
 618		 * folio fails to read, blk_update_request() will advance
 619		 * bv_offset and adjust bv_len to compensate.  Print a warning
 620		 * for unaligned offsets, and an error if they don't add up to
 621		 * a full sector.
 622		 */
 623		if (!IS_ALIGNED(fi.offset, sectorsize))
 624			btrfs_err(fs_info,
 625		"partial page read in btrfs with offset %zu and length %zu",
 626				  fi.offset, fi.length);
 627		else if (!IS_ALIGNED(fi.offset + fi.length, sectorsize))
 628			btrfs_info(fs_info,
 629		"incomplete page read with offset %zu and length %zu",
 630				   fi.offset, fi.length);
 631
 632		start = folio_pos(folio) + fi.offset;
 633		end = start + fi.length - 1;
 634		len = fi.length;
 635
 636		if (likely(uptodate)) {
 637			loff_t i_size = i_size_read(inode);
 638			pgoff_t end_index = i_size >> folio_shift(folio);
 639
 640			/*
 641			 * Zero out the remaining part if this range straddles
 642			 * i_size.
 643			 *
 644			 * Here we should only zero the range inside the folio,
 645			 * not touch anything else.
 646			 *
 647			 * NOTE: i_size is exclusive while end is inclusive.
 648			 */
 649			if (folio_index(folio) == end_index && i_size <= end) {
 650				u32 zero_start = max(offset_in_folio(folio, i_size),
 651						     offset_in_folio(folio, start));
 652				u32 zero_len = offset_in_folio(folio, end) + 1 -
 653					       zero_start;
 654
 655				folio_zero_range(folio, zero_start, zero_len);
 656			}
 
 
 
 
 
 657		}
 658
 659		/* Update page status and unlock. */
 660		end_page_read(folio_page(folio, 0), uptodate, start, len);
 661		endio_readpage_release_extent(&processed, BTRFS_I(inode),
 662					      start, end, uptodate);
 
 
 
 
 
 
 
 
 663	}
 664	/* Release the last extent */
 665	endio_readpage_release_extent(&processed, NULL, 0, 0, false);
 666	bio_put(bio);
 667}
 668
 669/*
 670 * Populate every free slot in a provided array with pages.
 671 *
 672 * @nr_pages:   number of pages to allocate
 673 * @page_array: the array to fill with pages; any existing non-null entries in
 674 * 		the array will be skipped
 675 * @extra_gfp:	the extra GFP flags for the allocation.
 676 *
 677 * Return: 0        if all pages were able to be allocated;
 678 *         -ENOMEM  otherwise, the partially allocated pages would be freed and
 679 *                  the array slots zeroed
 680 */
 681int btrfs_alloc_page_array(unsigned int nr_pages, struct page **page_array,
 682			   gfp_t extra_gfp)
 683{
 684	const gfp_t gfp = GFP_NOFS | extra_gfp;
 685	unsigned int allocated;
 
 
 
 
 686
 687	for (allocated = 0; allocated < nr_pages;) {
 688		unsigned int last = allocated;
 
 
 
 
 
 
 
 
 
 
 
 689
 690		allocated = alloc_pages_bulk_array(gfp, nr_pages, page_array);
 691		if (unlikely(allocated == last)) {
 692			/* No progress, fail and do cleanup. */
 693			for (int i = 0; i < allocated; i++) {
 694				__free_page(page_array[i]);
 695				page_array[i] = NULL;
 696			}
 697			return -ENOMEM;
 698		}
 699	}
 700	return 0;
 701}
 702
 
 
 703/*
 704 * Populate needed folios for the extent buffer.
 
 
 
 705 *
 706 * For now, the folios populated are always in order 0 (aka, single page).
 
 707 */
 708static int alloc_eb_folio_array(struct extent_buffer *eb, gfp_t extra_gfp)
 709{
 710	struct page *page_array[INLINE_EXTENT_BUFFER_PAGES] = { 0 };
 711	int num_pages = num_extent_pages(eb);
 
 
 
 
 712	int ret;
 713
 714	ret = btrfs_alloc_page_array(num_pages, page_array, extra_gfp);
 715	if (ret < 0)
 716		return ret;
 
 
 
 
 717
 718	for (int i = 0; i < num_pages; i++)
 719		eb->folios[i] = page_folio(page_array[i]);
 720	eb->folio_size = PAGE_SIZE;
 721	eb->folio_shift = PAGE_SHIFT;
 722	return 0;
 723}
 724
 725static bool btrfs_bio_is_contig(struct btrfs_bio_ctrl *bio_ctrl,
 726				struct page *page, u64 disk_bytenr,
 727				unsigned int pg_offset)
 728{
 729	struct bio *bio = &bio_ctrl->bbio->bio;
 730	struct bio_vec *bvec = bio_last_bvec_all(bio);
 731	const sector_t sector = disk_bytenr >> SECTOR_SHIFT;
 
 732
 733	if (bio_ctrl->compress_type != BTRFS_COMPRESS_NONE) {
 734		/*
 735		 * For compression, all IO should have its logical bytenr set
 736		 * to the starting bytenr of the compressed extent.
 737		 */
 738		return bio->bi_iter.bi_sector == sector;
 739	}
 
 
 740
 741	/*
 742	 * The contig check requires the following conditions to be met:
 743	 *
 744	 * 1) The pages are belonging to the same inode
 745	 *    This is implied by the call chain.
 746	 *
 747	 * 2) The range has adjacent logical bytenr
 748	 *
 749	 * 3) The range has adjacent file offset
 750	 *    This is required for the usage of btrfs_bio->file_offset.
 751	 */
 752	return bio_end_sector(bio) == sector &&
 753		page_offset(bvec->bv_page) + bvec->bv_offset + bvec->bv_len ==
 754		page_offset(page) + pg_offset;
 755}
 756
 757static void alloc_new_bio(struct btrfs_inode *inode,
 758			  struct btrfs_bio_ctrl *bio_ctrl,
 759			  u64 disk_bytenr, u64 file_offset)
 760{
 761	struct btrfs_fs_info *fs_info = inode->root->fs_info;
 762	struct btrfs_bio *bbio;
 763
 764	bbio = btrfs_bio_alloc(BIO_MAX_VECS, bio_ctrl->opf, fs_info,
 765			       bio_ctrl->end_io_func, NULL);
 766	bbio->bio.bi_iter.bi_sector = disk_bytenr >> SECTOR_SHIFT;
 767	bbio->inode = inode;
 768	bbio->file_offset = file_offset;
 769	bio_ctrl->bbio = bbio;
 770	bio_ctrl->len_to_oe_boundary = U32_MAX;
 771
 772	/* Limit data write bios to the ordered boundary. */
 773	if (bio_ctrl->wbc) {
 774		struct btrfs_ordered_extent *ordered;
 775
 776		ordered = btrfs_lookup_ordered_extent(inode, file_offset);
 777		if (ordered) {
 778			bio_ctrl->len_to_oe_boundary = min_t(u32, U32_MAX,
 779					ordered->file_offset +
 780					ordered->disk_num_bytes - file_offset);
 781			bbio->ordered = ordered;
 782		}
 783
 784		/*
 785		 * Pick the last added device to support cgroup writeback.  For
 786		 * multi-device file systems this means blk-cgroup policies have
 787		 * to always be set on the last added/replaced device.
 788		 * This is a bit odd but has been like that for a long time.
 789		 */
 790		bio_set_dev(&bbio->bio, fs_info->fs_devices->latest_dev->bdev);
 791		wbc_init_bio(bio_ctrl->wbc, &bbio->bio);
 792	}
 793}
 794
 795/*
 796 * @disk_bytenr: logical bytenr where the write will be
 797 * @page:	page to add to the bio
 798 * @size:	portion of page that we want to write to
 799 * @pg_offset:	offset of the new bio or to check whether we are adding
 800 *              a contiguous page to the previous one
 
 801 *
 802 * The will either add the page into the existing @bio_ctrl->bbio, or allocate a
 803 * new one in @bio_ctrl->bbio.
 804 * The mirror number for this IO should already be initizlied in
 805 * @bio_ctrl->mirror_num.
 806 */
 807static void submit_extent_page(struct btrfs_bio_ctrl *bio_ctrl,
 808			       u64 disk_bytenr, struct page *page,
 809			       size_t size, unsigned long pg_offset)
 810{
 811	struct btrfs_inode *inode = page_to_inode(page);
 812
 813	ASSERT(pg_offset + size <= PAGE_SIZE);
 814	ASSERT(bio_ctrl->end_io_func);
 815
 816	if (bio_ctrl->bbio &&
 817	    !btrfs_bio_is_contig(bio_ctrl, page, disk_bytenr, pg_offset))
 818		submit_one_bio(bio_ctrl);
 
 819
 820	do {
 821		u32 len = size;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 822
 823		/* Allocate new bio if needed */
 824		if (!bio_ctrl->bbio) {
 825			alloc_new_bio(inode, bio_ctrl, disk_bytenr,
 826				      page_offset(page) + pg_offset);
 
 
 
 
 827		}
 
 828
 829		/* Cap to the current ordered extent boundary if there is one. */
 830		if (len > bio_ctrl->len_to_oe_boundary) {
 831			ASSERT(bio_ctrl->compress_type == BTRFS_COMPRESS_NONE);
 832			ASSERT(is_data_inode(&inode->vfs_inode));
 833			len = bio_ctrl->len_to_oe_boundary;
 
 
 
 
 
 
 
 
 
 
 
 
 
 834		}
 835
 836		if (bio_add_page(&bio_ctrl->bbio->bio, page, len, pg_offset) != len) {
 837			/* bio full: move on to a new one */
 838			submit_one_bio(bio_ctrl);
 839			continue;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 840		}
 
 841
 842		if (bio_ctrl->wbc)
 843			wbc_account_cgroup_owner(bio_ctrl->wbc, page, len);
 
 
 
 
 
 
 844
 845		size -= len;
 846		pg_offset += len;
 847		disk_bytenr += len;
 848
 849		/*
 850		 * len_to_oe_boundary defaults to U32_MAX, which isn't page or
 851		 * sector aligned.  alloc_new_bio() then sets it to the end of
 852		 * our ordered extent for writes into zoned devices.
 853		 *
 854		 * When len_to_oe_boundary is tracking an ordered extent, we
 855		 * trust the ordered extent code to align things properly, and
 856		 * the check above to cap our write to the ordered extent
 857		 * boundary is correct.
 858		 *
 859		 * When len_to_oe_boundary is U32_MAX, the cap above would
 860		 * result in a 4095 byte IO for the last page right before
 861		 * we hit the bio limit of UINT_MAX.  bio_add_page() has all
 862		 * the checks required to make sure we don't overflow the bio,
 863		 * and we should just ignore len_to_oe_boundary completely
 864		 * unless we're using it to track an ordered extent.
 865		 *
 866		 * It's pretty hard to make a bio sized U32_MAX, but it can
 867		 * happen when the page cache is able to feed us contiguous
 868		 * pages for large extents.
 869		 */
 870		if (bio_ctrl->len_to_oe_boundary != U32_MAX)
 871			bio_ctrl->len_to_oe_boundary -= len;
 872
 873		/* Ordered extent boundary: move on to a new bio. */
 874		if (bio_ctrl->len_to_oe_boundary == 0)
 875			submit_one_bio(bio_ctrl);
 876	} while (size);
 
 
 877}
 878
 879static int attach_extent_buffer_folio(struct extent_buffer *eb,
 880				      struct folio *folio,
 881				      struct btrfs_subpage *prealloc)
 882{
 883	struct btrfs_fs_info *fs_info = eb->fs_info;
 884	int ret = 0;
 
 
 
 
 885
 886	/*
 887	 * If the page is mapped to btree inode, we should hold the private
 888	 * lock to prevent race.
 889	 * For cloned or dummy extent buffers, their pages are not mapped and
 890	 * will not race with any other ebs.
 891	 */
 892	if (folio->mapping)
 893		lockdep_assert_held(&folio->mapping->i_private_lock);
 894
 895	if (fs_info->nodesize >= PAGE_SIZE) {
 896		if (!folio_test_private(folio))
 897			folio_attach_private(folio, eb);
 898		else
 899			WARN_ON(folio_get_private(folio) != eb);
 900		return 0;
 901	}
 902
 903	/* Already mapped, just free prealloc */
 904	if (folio_test_private(folio)) {
 905		btrfs_free_subpage(prealloc);
 906		return 0;
 907	}
 908
 909	if (prealloc)
 910		/* Has preallocated memory for subpage */
 911		folio_attach_private(folio, prealloc);
 912	else
 913		/* Do new allocation to attach subpage */
 914		ret = btrfs_attach_subpage(fs_info, folio, BTRFS_SUBPAGE_METADATA);
 
 
 915	return ret;
 916}
 917
 918int set_page_extent_mapped(struct page *page)
 
 
 
 
 
 
 
 
 
 919{
 920	return set_folio_extent_mapped(page_folio(page));
 921}
 
 
 
 
 
 
 
 
 
 
 
 
 
 922
 923int set_folio_extent_mapped(struct folio *folio)
 924{
 925	struct btrfs_fs_info *fs_info;
 
 
 
 
 
 
 
 
 
 
 
 
 
 926
 927	ASSERT(folio->mapping);
 
 
 928
 929	if (folio_test_private(folio))
 930		return 0;
 
 931
 932	fs_info = folio_to_fs_info(folio);
 
 
 
 933
 934	if (btrfs_is_subpage(fs_info, folio->mapping))
 935		return btrfs_attach_subpage(fs_info, folio, BTRFS_SUBPAGE_DATA);
 936
 937	folio_attach_private(folio, (void *)EXTENT_FOLIO_PRIVATE);
 938	return 0;
 939}
 940
 941void clear_page_extent_mapped(struct page *page)
 942{
 943	struct folio *folio = page_folio(page);
 944	struct btrfs_fs_info *fs_info;
 945
 946	ASSERT(page->mapping);
 947
 948	if (!folio_test_private(folio))
 949		return;
 950
 951	fs_info = page_to_fs_info(page);
 952	if (btrfs_is_subpage(fs_info, page->mapping))
 953		return btrfs_detach_subpage(fs_info, folio);
 954
 955	folio_detach_private(folio);
 956}
 957
 958static struct extent_map *__get_extent_map(struct inode *inode, struct page *page,
 959		 u64 start, u64 len, struct extent_map **em_cached)
 960{
 961	struct extent_map *em;
 
 
 962
 963	ASSERT(em_cached);
 964
 965	if (*em_cached) {
 966		em = *em_cached;
 967		if (extent_map_in_tree(em) && start >= em->start &&
 968		    start < extent_map_end(em)) {
 969			refcount_inc(&em->refs);
 970			return em;
 971		}
 972
 973		free_extent_map(em);
 974		*em_cached = NULL;
 975	}
 976
 977	em = btrfs_get_extent(BTRFS_I(inode), page, start, len);
 978	if (!IS_ERR(em)) {
 979		BUG_ON(*em_cached);
 980		refcount_inc(&em->refs);
 981		*em_cached = em;
 982	}
 983	return em;
 984}
 985/*
 986 * basic readpage implementation.  Locked extent state structs are inserted
 987 * into the tree that are removed when the IO is done (by the end_io
 988 * handlers)
 989 * XXX JDM: This needs looking at to ensure proper page locking
 990 * return 0 on success, otherwise return error
 991 */
 992static int btrfs_do_readpage(struct page *page, struct extent_map **em_cached,
 993		      struct btrfs_bio_ctrl *bio_ctrl, u64 *prev_em_start)
 
 
 
 994{
 995	struct inode *inode = page->mapping->host;
 996	struct btrfs_fs_info *fs_info = inode_to_fs_info(inode);
 997	u64 start = page_offset(page);
 998	const u64 end = start + PAGE_SIZE - 1;
 999	u64 cur = start;
1000	u64 extent_offset;
1001	u64 last_byte = i_size_read(inode);
1002	u64 block_start;
 
 
1003	struct extent_map *em;
1004	int ret = 0;
 
 
 
1005	size_t pg_offset = 0;
1006	size_t iosize;
1007	size_t blocksize = fs_info->sectorsize;
1008	struct extent_io_tree *tree = &BTRFS_I(inode)->io_tree;
 
 
 
 
 
 
 
 
 
 
1009
1010	ret = set_page_extent_mapped(page);
1011	if (ret < 0) {
1012		unlock_extent(tree, start, end, NULL);
1013		unlock_page(page);
1014		return ret;
 
 
 
 
1015	}
1016
1017	if (page->index == last_byte >> PAGE_SHIFT) {
1018		size_t zero_offset = offset_in_page(last_byte);
 
1019
1020		if (zero_offset) {
1021			iosize = PAGE_SIZE - zero_offset;
1022			memzero_page(page, zero_offset, iosize);
 
 
 
1023		}
1024	}
1025	bio_ctrl->end_io_func = end_bbio_data_read;
1026	begin_page_read(fs_info, page);
1027	while (cur <= end) {
1028		enum btrfs_compression_type compress_type = BTRFS_COMPRESS_NONE;
1029		bool force_bio_submit = false;
1030		u64 disk_bytenr;
1031
1032		ASSERT(IS_ALIGNED(cur, fs_info->sectorsize));
1033		if (cur >= last_byte) {
1034			iosize = PAGE_SIZE - pg_offset;
1035			memzero_page(page, pg_offset, iosize);
1036			unlock_extent(tree, cur, cur + iosize - 1, NULL);
1037			end_page_read(page, true, cur, iosize);
 
 
 
1038			break;
1039		}
1040		em = __get_extent_map(inode, page, cur, end - cur + 1, em_cached);
1041		if (IS_ERR(em)) {
1042			unlock_extent(tree, cur, end, NULL);
1043			end_page_read(page, false, cur, end + 1 - cur);
1044			return PTR_ERR(em);
 
1045		}
1046		extent_offset = cur - em->start;
1047		BUG_ON(extent_map_end(em) <= cur);
1048		BUG_ON(end < cur);
1049
1050		compress_type = extent_map_compression(em);
 
 
 
 
1051
1052		iosize = min(extent_map_end(em) - cur, end - cur + 1);
1053		iosize = ALIGN(iosize, blocksize);
1054		if (compress_type != BTRFS_COMPRESS_NONE)
1055			disk_bytenr = em->block_start;
1056		else
1057			disk_bytenr = em->block_start + extent_offset;
 
 
 
 
 
1058		block_start = em->block_start;
1059		if (em->flags & EXTENT_FLAG_PREALLOC)
1060			block_start = EXTENT_MAP_HOLE;
1061
1062		/*
1063		 * If we have a file range that points to a compressed extent
1064		 * and it's followed by a consecutive file range that points
1065		 * to the same compressed extent (possibly with a different
1066		 * offset and/or length, so it either points to the whole extent
1067		 * or only part of it), we must make sure we do not submit a
1068		 * single bio to populate the pages for the 2 ranges because
1069		 * this makes the compressed extent read zero out the pages
1070		 * belonging to the 2nd range. Imagine the following scenario:
1071		 *
1072		 *  File layout
1073		 *  [0 - 8K]                     [8K - 24K]
1074		 *    |                               |
1075		 *    |                               |
1076		 * points to extent X,         points to extent X,
1077		 * offset 4K, length of 8K     offset 0, length 16K
1078		 *
1079		 * [extent X, compressed length = 4K uncompressed length = 16K]
1080		 *
1081		 * If the bio to read the compressed extent covers both ranges,
1082		 * it will decompress extent X into the pages belonging to the
1083		 * first range and then it will stop, zeroing out the remaining
1084		 * pages that belong to the other range that points to extent X.
1085		 * So here we make sure we submit 2 bios, one for the first
1086		 * range and another one for the third range. Both will target
1087		 * the same physical extent from disk, but we can't currently
1088		 * make the compressed bio endio callback populate the pages
1089		 * for both ranges because each compressed bio is tightly
1090		 * coupled with a single extent map, and each range can have
1091		 * an extent map with a different offset value relative to the
1092		 * uncompressed data of our extent and different lengths. This
1093		 * is a corner case so we prioritize correctness over
1094		 * non-optimal behavior (submitting 2 bios for the same extent).
1095		 */
1096		if (compress_type != BTRFS_COMPRESS_NONE &&
1097		    prev_em_start && *prev_em_start != (u64)-1 &&
1098		    *prev_em_start != em->start)
1099			force_bio_submit = true;
1100
1101		if (prev_em_start)
1102			*prev_em_start = em->start;
1103
1104		free_extent_map(em);
1105		em = NULL;
1106
1107		/* we've found a hole, just zero and go on */
1108		if (block_start == EXTENT_MAP_HOLE) {
1109			memzero_page(page, pg_offset, iosize);
 
1110
1111			unlock_extent(tree, cur, cur + iosize - 1, NULL);
1112			end_page_read(page, true, cur, iosize);
 
 
 
 
 
 
 
1113			cur = cur + iosize;
1114			pg_offset += iosize;
1115			continue;
1116		}
1117		/* the get_extent function already copied into the page */
 
 
 
 
 
 
 
 
 
 
 
1118		if (block_start == EXTENT_MAP_INLINE) {
1119			unlock_extent(tree, cur, cur + iosize - 1, NULL);
1120			end_page_read(page, true, cur, iosize);
1121			cur = cur + iosize;
1122			pg_offset += iosize;
1123			continue;
1124		}
1125
1126		if (bio_ctrl->compress_type != compress_type) {
1127			submit_one_bio(bio_ctrl);
1128			bio_ctrl->compress_type = compress_type;
 
 
 
 
 
 
 
 
 
 
 
 
 
1129		}
1130
1131		if (force_bio_submit)
1132			submit_one_bio(bio_ctrl);
1133		submit_extent_page(bio_ctrl, disk_bytenr, page, iosize,
1134				   pg_offset);
1135		cur = cur + iosize;
1136		pg_offset += iosize;
1137	}
1138
 
 
 
 
 
1139	return 0;
1140}
1141
1142int btrfs_read_folio(struct file *file, struct folio *folio)
 
1143{
1144	struct page *page = &folio->page;
1145	struct btrfs_inode *inode = page_to_inode(page);
1146	u64 start = page_offset(page);
1147	u64 end = start + PAGE_SIZE - 1;
1148	struct btrfs_bio_ctrl bio_ctrl = { .opf = REQ_OP_READ };
1149	struct extent_map *em_cached = NULL;
1150	int ret;
1151
1152	btrfs_lock_and_flush_ordered_range(inode, start, end, NULL);
1153
1154	ret = btrfs_do_readpage(page, &em_cached, &bio_ctrl, NULL);
1155	free_extent_map(em_cached);
1156
1157	/*
1158	 * If btrfs_do_readpage() failed we will want to submit the assembled
1159	 * bio to do the cleanup.
1160	 */
1161	submit_one_bio(&bio_ctrl);
1162	return ret;
1163}
1164
1165static inline void contiguous_readpages(struct page *pages[], int nr_pages,
1166					u64 start, u64 end,
1167					struct extent_map **em_cached,
1168					struct btrfs_bio_ctrl *bio_ctrl,
1169					u64 *prev_em_start)
1170{
1171	struct btrfs_inode *inode = page_to_inode(pages[0]);
1172	int index;
1173
1174	ASSERT(em_cached);
1175
1176	btrfs_lock_and_flush_ordered_range(inode, start, end, NULL);
1177
1178	for (index = 0; index < nr_pages; index++) {
1179		btrfs_do_readpage(pages[index], em_cached, bio_ctrl,
1180				  prev_em_start);
1181		put_page(pages[index]);
1182	}
1183}
1184
1185/*
1186 * helper for __extent_writepage, doing all of the delayed allocation setup.
1187 *
1188 * This returns 1 if btrfs_run_delalloc_range function did all the work required
1189 * to write the page (copy into inline extent).  In this case the IO has
1190 * been started and the page is already unlocked.
1191 *
1192 * This returns 0 if all went well (page still locked)
1193 * This returns < 0 if there were errors (page still locked)
1194 */
1195static noinline_for_stack int writepage_delalloc(struct btrfs_inode *inode,
1196		struct page *page, struct writeback_control *wbc)
1197{
1198	const u64 page_start = page_offset(page);
1199	const u64 page_end = page_start + PAGE_SIZE - 1;
1200	u64 delalloc_start = page_start;
1201	u64 delalloc_end = page_end;
1202	u64 delalloc_to_write = 0;
1203	int ret = 0;
1204
1205	while (delalloc_start < page_end) {
1206		delalloc_end = page_end;
1207		if (!find_lock_delalloc_range(&inode->vfs_inode, page,
1208					      &delalloc_start, &delalloc_end)) {
1209			delalloc_start = delalloc_end + 1;
1210			continue;
1211		}
1212
1213		ret = btrfs_run_delalloc_range(inode, page, delalloc_start,
1214					       delalloc_end, wbc);
1215		if (ret < 0)
1216			return ret;
1217
1218		delalloc_start = delalloc_end + 1;
1219	}
1220
1221	/*
1222	 * delalloc_end is already one less than the total length, so
1223	 * we don't subtract one from PAGE_SIZE
1224	 */
1225	delalloc_to_write +=
1226		DIV_ROUND_UP(delalloc_end + 1 - page_start, PAGE_SIZE);
1227
1228	/*
1229	 * If btrfs_run_dealloc_range() already started I/O and unlocked
1230	 * the pages, we just need to account for them here.
1231	 */
1232	if (ret == 1) {
1233		wbc->nr_to_write -= delalloc_to_write;
1234		return 1;
1235	}
1236
1237	if (wbc->nr_to_write < delalloc_to_write) {
1238		int thresh = 8192;
1239
1240		if (delalloc_to_write < thresh * 2)
1241			thresh = delalloc_to_write;
1242		wbc->nr_to_write = min_t(u64, delalloc_to_write,
1243					 thresh);
1244	}
1245
1246	return 0;
1247}
1248
1249/*
1250 * Find the first byte we need to write.
1251 *
1252 * For subpage, one page can contain several sectors, and
1253 * __extent_writepage_io() will just grab all extent maps in the page
1254 * range and try to submit all non-inline/non-compressed extents.
1255 *
1256 * This is a big problem for subpage, we shouldn't re-submit already written
1257 * data at all.
1258 * This function will lookup subpage dirty bit to find which range we really
1259 * need to submit.
1260 *
1261 * Return the next dirty range in [@start, @end).
1262 * If no dirty range is found, @start will be page_offset(page) + PAGE_SIZE.
1263 */
1264static void find_next_dirty_byte(struct btrfs_fs_info *fs_info,
1265				 struct page *page, u64 *start, u64 *end)
1266{
1267	struct folio *folio = page_folio(page);
1268	struct btrfs_subpage *subpage = folio_get_private(folio);
1269	struct btrfs_subpage_info *spi = fs_info->subpage_info;
1270	u64 orig_start = *start;
1271	/* Declare as unsigned long so we can use bitmap ops */
1272	unsigned long flags;
1273	int range_start_bit;
1274	int range_end_bit;
1275
1276	/*
1277	 * For regular sector size == page size case, since one page only
1278	 * contains one sector, we return the page offset directly.
1279	 */
1280	if (!btrfs_is_subpage(fs_info, page->mapping)) {
1281		*start = page_offset(page);
1282		*end = page_offset(page) + PAGE_SIZE;
1283		return;
1284	}
1285
1286	range_start_bit = spi->dirty_offset +
1287			  (offset_in_page(orig_start) >> fs_info->sectorsize_bits);
1288
1289	/* We should have the page locked, but just in case */
1290	spin_lock_irqsave(&subpage->lock, flags);
1291	bitmap_next_set_region(subpage->bitmaps, &range_start_bit, &range_end_bit,
1292			       spi->dirty_offset + spi->bitmap_nr_bits);
1293	spin_unlock_irqrestore(&subpage->lock, flags);
1294
1295	range_start_bit -= spi->dirty_offset;
1296	range_end_bit -= spi->dirty_offset;
1297
1298	*start = page_offset(page) + range_start_bit * fs_info->sectorsize;
1299	*end = page_offset(page) + range_end_bit * fs_info->sectorsize;
1300}
1301
1302/*
1303 * helper for __extent_writepage.  This calls the writepage start hooks,
1304 * and does the loop to map the page into extents and bios.
1305 *
1306 * We return 1 if the IO is started and the page is unlocked,
1307 * 0 if all went well (page still locked)
1308 * < 0 if there were errors (page still locked)
1309 */
1310static noinline_for_stack int __extent_writepage_io(struct btrfs_inode *inode,
1311				 struct page *page,
1312				 struct btrfs_bio_ctrl *bio_ctrl,
1313				 loff_t i_size,
1314				 int *nr_ret)
1315{
1316	struct btrfs_fs_info *fs_info = inode->root->fs_info;
1317	u64 cur = page_offset(page);
1318	u64 end = cur + PAGE_SIZE - 1;
1319	u64 extent_offset;
1320	u64 block_start;
1321	struct extent_map *em;
1322	int ret = 0;
1323	int nr = 0;
1324
1325	ret = btrfs_writepage_cow_fixup(page);
1326	if (ret) {
1327		/* Fixup worker will requeue */
1328		redirty_page_for_writepage(bio_ctrl->wbc, page);
1329		unlock_page(page);
1330		return 1;
1331	}
1332
1333	bio_ctrl->end_io_func = end_bbio_data_write;
1334	while (cur <= end) {
1335		u32 len = end - cur + 1;
1336		u64 disk_bytenr;
1337		u64 em_end;
1338		u64 dirty_range_start = cur;
1339		u64 dirty_range_end;
1340		u32 iosize;
1341
1342		if (cur >= i_size) {
1343			btrfs_mark_ordered_io_finished(inode, page, cur, len,
1344						       true);
1345			/*
1346			 * This range is beyond i_size, thus we don't need to
1347			 * bother writing back.
1348			 * But we still need to clear the dirty subpage bit, or
1349			 * the next time the page gets dirtied, we will try to
1350			 * writeback the sectors with subpage dirty bits,
1351			 * causing writeback without ordered extent.
1352			 */
1353			btrfs_folio_clear_dirty(fs_info, page_folio(page), cur, len);
1354			break;
1355		}
1356
1357		find_next_dirty_byte(fs_info, page, &dirty_range_start,
1358				     &dirty_range_end);
1359		if (cur < dirty_range_start) {
1360			cur = dirty_range_start;
1361			continue;
1362		}
1363
1364		em = btrfs_get_extent(inode, NULL, cur, len);
1365		if (IS_ERR(em)) {
1366			ret = PTR_ERR_OR_ZERO(em);
1367			goto out_error;
1368		}
1369
1370		extent_offset = cur - em->start;
1371		em_end = extent_map_end(em);
1372		ASSERT(cur <= em_end);
1373		ASSERT(cur < end);
1374		ASSERT(IS_ALIGNED(em->start, fs_info->sectorsize));
1375		ASSERT(IS_ALIGNED(em->len, fs_info->sectorsize));
1376
1377		block_start = em->block_start;
1378		disk_bytenr = em->block_start + extent_offset;
1379
1380		ASSERT(!extent_map_is_compressed(em));
1381		ASSERT(block_start != EXTENT_MAP_HOLE);
1382		ASSERT(block_start != EXTENT_MAP_INLINE);
1383
1384		/*
1385		 * Note that em_end from extent_map_end() and dirty_range_end from
1386		 * find_next_dirty_byte() are all exclusive
1387		 */
1388		iosize = min(min(em_end, end + 1), dirty_range_end) - cur;
1389		free_extent_map(em);
1390		em = NULL;
1391
1392		btrfs_set_range_writeback(inode, cur, cur + iosize - 1);
1393		if (!PageWriteback(page)) {
1394			btrfs_err(inode->root->fs_info,
1395				   "page %lu not writeback, cur %llu end %llu",
1396			       page->index, cur, end);
1397		}
1398
1399		/*
1400		 * Although the PageDirty bit is cleared before entering this
1401		 * function, subpage dirty bit is not cleared.
1402		 * So clear subpage dirty bit here so next time we won't submit
1403		 * page for range already written to disk.
1404		 */
1405		btrfs_folio_clear_dirty(fs_info, page_folio(page), cur, iosize);
1406
1407		submit_extent_page(bio_ctrl, disk_bytenr, page, iosize,
1408				   cur - page_offset(page));
1409		cur += iosize;
1410		nr++;
1411	}
1412
1413	btrfs_folio_assert_not_dirty(fs_info, page_folio(page));
1414	*nr_ret = nr;
1415	return 0;
1416
1417out_error:
1418	/*
1419	 * If we finish without problem, we should not only clear page dirty,
1420	 * but also empty subpage dirty bits
1421	 */
1422	*nr_ret = nr;
1423	return ret;
1424}
1425
1426/*
1427 * the writepage semantics are similar to regular writepage.  extent
1428 * records are inserted to lock ranges in the tree, and as dirty areas
1429 * are found, they are marked writeback.  Then the lock bits are removed
1430 * and the end_io handler clears the writeback ranges
1431 *
1432 * Return 0 if everything goes well.
1433 * Return <0 for error.
1434 */
1435static int __extent_writepage(struct page *page, struct btrfs_bio_ctrl *bio_ctrl)
 
1436{
1437	struct folio *folio = page_folio(page);
1438	struct inode *inode = page->mapping->host;
1439	const u64 page_start = page_offset(page);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1440	int ret;
1441	int nr = 0;
1442	size_t pg_offset;
 
1443	loff_t i_size = i_size_read(inode);
1444	unsigned long end_index = i_size >> PAGE_SHIFT;
 
 
 
 
 
 
 
 
 
 
 
1445
1446	trace___extent_writepage(page, inode, bio_ctrl->wbc);
1447
1448	WARN_ON(!PageLocked(page));
1449
1450	pg_offset = offset_in_page(i_size);
1451	if (page->index > end_index ||
1452	   (page->index == end_index && !pg_offset)) {
1453		folio_invalidate(folio, 0, folio_size(folio));
1454		folio_unlock(folio);
1455		return 0;
1456	}
1457
1458	if (page->index == end_index)
1459		memzero_page(page, pg_offset, PAGE_SIZE - pg_offset);
1460
1461	ret = set_page_extent_mapped(page);
1462	if (ret < 0)
1463		goto done;
 
 
 
 
1464
1465	ret = writepage_delalloc(BTRFS_I(inode), page, bio_ctrl->wbc);
1466	if (ret == 1)
1467		return 0;
1468	if (ret)
1469		goto done;
1470
1471	ret = __extent_writepage_io(BTRFS_I(inode), page, bio_ctrl, i_size, &nr);
1472	if (ret == 1)
1473		return 0;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1474
1475	bio_ctrl->wbc->nr_to_write--;
 
 
 
 
1476
1477done:
1478	if (nr == 0) {
1479		/* make sure the mapping tag for page dirty gets cleared */
1480		set_page_writeback(page);
1481		end_page_writeback(page);
 
 
 
 
 
 
 
 
1482	}
1483	if (ret) {
1484		btrfs_mark_ordered_io_finished(BTRFS_I(inode), page, page_start,
1485					       PAGE_SIZE, !ret);
1486		mapping_set_error(page->mapping, ret);
1487	}
1488	unlock_page(page);
1489	ASSERT(ret <= 0);
1490	return ret;
1491}
1492
1493void wait_on_extent_buffer_writeback(struct extent_buffer *eb)
1494{
1495	wait_on_bit_io(&eb->bflags, EXTENT_BUFFER_WRITEBACK,
1496		       TASK_UNINTERRUPTIBLE);
1497}
1498
1499/*
1500 * Lock extent buffer status and pages for writeback.
1501 *
1502 * Return %false if the extent buffer doesn't need to be submitted (e.g. the
1503 * extent buffer is not dirty)
1504 * Return %true is the extent buffer is submitted to bio.
1505 */
1506static noinline_for_stack bool lock_extent_buffer_for_io(struct extent_buffer *eb,
1507			  struct writeback_control *wbc)
1508{
1509	struct btrfs_fs_info *fs_info = eb->fs_info;
1510	bool ret = false;
1511
1512	btrfs_tree_lock(eb);
1513	while (test_bit(EXTENT_BUFFER_WRITEBACK, &eb->bflags)) {
1514		btrfs_tree_unlock(eb);
1515		if (wbc->sync_mode != WB_SYNC_ALL)
1516			return false;
1517		wait_on_extent_buffer_writeback(eb);
1518		btrfs_tree_lock(eb);
1519	}
1520
1521	/*
1522	 * We need to do this to prevent races in people who check if the eb is
1523	 * under IO since we can end up having no IO bits set for a short period
1524	 * of time.
1525	 */
1526	spin_lock(&eb->refs_lock);
1527	if (test_and_clear_bit(EXTENT_BUFFER_DIRTY, &eb->bflags)) {
1528		set_bit(EXTENT_BUFFER_WRITEBACK, &eb->bflags);
1529		spin_unlock(&eb->refs_lock);
1530		btrfs_set_header_flag(eb, BTRFS_HEADER_FLAG_WRITTEN);
1531		percpu_counter_add_batch(&fs_info->dirty_metadata_bytes,
1532					 -eb->len,
1533					 fs_info->dirty_metadata_batch);
1534		ret = true;
1535	} else {
1536		spin_unlock(&eb->refs_lock);
1537	}
1538	btrfs_tree_unlock(eb);
1539	return ret;
1540}
1541
1542static void set_btree_ioerr(struct extent_buffer *eb)
1543{
1544	struct btrfs_fs_info *fs_info = eb->fs_info;
1545
1546	set_bit(EXTENT_BUFFER_WRITE_ERR, &eb->bflags);
1547
1548	/*
1549	 * A read may stumble upon this buffer later, make sure that it gets an
1550	 * error and knows there was an error.
1551	 */
1552	clear_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags);
1553
1554	/*
1555	 * We need to set the mapping with the io error as well because a write
1556	 * error will flip the file system readonly, and then syncfs() will
1557	 * return a 0 because we are readonly if we don't modify the err seq for
1558	 * the superblock.
1559	 */
1560	mapping_set_error(eb->fs_info->btree_inode->i_mapping, -EIO);
1561
1562	/*
1563	 * If writeback for a btree extent that doesn't belong to a log tree
1564	 * failed, increment the counter transaction->eb_write_errors.
1565	 * We do this because while the transaction is running and before it's
1566	 * committing (when we call filemap_fdata[write|wait]_range against
1567	 * the btree inode), we might have
1568	 * btree_inode->i_mapping->a_ops->writepages() called by the VM - if it
1569	 * returns an error or an error happens during writeback, when we're
1570	 * committing the transaction we wouldn't know about it, since the pages
1571	 * can be no longer dirty nor marked anymore for writeback (if a
1572	 * subsequent modification to the extent buffer didn't happen before the
1573	 * transaction commit), which makes filemap_fdata[write|wait]_range not
1574	 * able to find the pages tagged with SetPageError at transaction
1575	 * commit time. So if this happens we must abort the transaction,
1576	 * otherwise we commit a super block with btree roots that point to
1577	 * btree nodes/leafs whose content on disk is invalid - either garbage
1578	 * or the content of some node/leaf from a past generation that got
1579	 * cowed or deleted and is no longer valid.
1580	 *
1581	 * Note: setting AS_EIO/AS_ENOSPC in the btree inode's i_mapping would
1582	 * not be enough - we need to distinguish between log tree extents vs
1583	 * non-log tree extents, and the next filemap_fdatawait_range() call
1584	 * will catch and clear such errors in the mapping - and that call might
1585	 * be from a log sync and not from a transaction commit. Also, checking
1586	 * for the eb flag EXTENT_BUFFER_WRITE_ERR at transaction commit time is
1587	 * not done and would not be reliable - the eb might have been released
1588	 * from memory and reading it back again means that flag would not be
1589	 * set (since it's a runtime flag, not persisted on disk).
1590	 *
1591	 * Using the flags below in the btree inode also makes us achieve the
1592	 * goal of AS_EIO/AS_ENOSPC when writepages() returns success, started
1593	 * writeback for all dirty pages and before filemap_fdatawait_range()
1594	 * is called, the writeback for all dirty pages had already finished
1595	 * with errors - because we were not using AS_EIO/AS_ENOSPC,
1596	 * filemap_fdatawait_range() would return success, as it could not know
1597	 * that writeback errors happened (the pages were no longer tagged for
1598	 * writeback).
1599	 */
1600	switch (eb->log_index) {
1601	case -1:
1602		set_bit(BTRFS_FS_BTREE_ERR, &fs_info->flags);
1603		break;
1604	case 0:
1605		set_bit(BTRFS_FS_LOG1_ERR, &fs_info->flags);
1606		break;
1607	case 1:
1608		set_bit(BTRFS_FS_LOG2_ERR, &fs_info->flags);
1609		break;
1610	default:
1611		BUG(); /* unexpected, logic error */
1612	}
1613}
1614
1615/*
1616 * The endio specific version which won't touch any unsafe spinlock in endio
1617 * context.
1618 */
1619static struct extent_buffer *find_extent_buffer_nolock(
1620		struct btrfs_fs_info *fs_info, u64 start)
1621{
1622	struct extent_buffer *eb;
1623
1624	rcu_read_lock();
1625	eb = radix_tree_lookup(&fs_info->buffer_radix,
1626			       start >> fs_info->sectorsize_bits);
1627	if (eb && atomic_inc_not_zero(&eb->refs)) {
1628		rcu_read_unlock();
1629		return eb;
1630	}
1631	rcu_read_unlock();
1632	return NULL;
1633}
1634
1635static void end_bbio_meta_write(struct btrfs_bio *bbio)
1636{
1637	struct extent_buffer *eb = bbio->private;
1638	struct btrfs_fs_info *fs_info = eb->fs_info;
1639	bool uptodate = !bbio->bio.bi_status;
1640	struct folio_iter fi;
1641	u32 bio_offset = 0;
1642
1643	if (!uptodate)
1644		set_btree_ioerr(eb);
1645
1646	bio_for_each_folio_all(fi, &bbio->bio) {
1647		u64 start = eb->start + bio_offset;
1648		struct folio *folio = fi.folio;
1649		u32 len = fi.length;
1650
1651		btrfs_folio_clear_writeback(fs_info, folio, start, len);
1652		bio_offset += len;
1653	}
1654
1655	clear_bit(EXTENT_BUFFER_WRITEBACK, &eb->bflags);
1656	smp_mb__after_atomic();
1657	wake_up_bit(&eb->bflags, EXTENT_BUFFER_WRITEBACK);
1658
1659	bio_put(&bbio->bio);
1660}
1661
1662static void prepare_eb_write(struct extent_buffer *eb)
1663{
1664	u32 nritems;
1665	unsigned long start;
1666	unsigned long end;
1667
1668	clear_bit(EXTENT_BUFFER_WRITE_ERR, &eb->bflags);
1669
1670	/* Set btree blocks beyond nritems with 0 to avoid stale content */
1671	nritems = btrfs_header_nritems(eb);
1672	if (btrfs_header_level(eb) > 0) {
1673		end = btrfs_node_key_ptr_offset(eb, nritems);
1674		memzero_extent_buffer(eb, end, eb->len - end);
1675	} else {
1676		/*
1677		 * Leaf:
1678		 * header 0 1 2 .. N ... data_N .. data_2 data_1 data_0
1679		 */
1680		start = btrfs_item_nr_offset(eb, nritems);
1681		end = btrfs_item_nr_offset(eb, 0);
1682		if (nritems == 0)
1683			end += BTRFS_LEAF_DATA_SIZE(eb->fs_info);
1684		else
1685			end += btrfs_item_offset(eb, nritems - 1);
1686		memzero_extent_buffer(eb, start, end - start);
1687	}
1688}
1689
1690static noinline_for_stack void write_one_eb(struct extent_buffer *eb,
1691					    struct writeback_control *wbc)
1692{
1693	struct btrfs_fs_info *fs_info = eb->fs_info;
1694	struct btrfs_bio *bbio;
1695
1696	prepare_eb_write(eb);
1697
1698	bbio = btrfs_bio_alloc(INLINE_EXTENT_BUFFER_PAGES,
1699			       REQ_OP_WRITE | REQ_META | wbc_to_write_flags(wbc),
1700			       eb->fs_info, end_bbio_meta_write, eb);
1701	bbio->bio.bi_iter.bi_sector = eb->start >> SECTOR_SHIFT;
1702	bio_set_dev(&bbio->bio, fs_info->fs_devices->latest_dev->bdev);
1703	wbc_init_bio(wbc, &bbio->bio);
1704	bbio->inode = BTRFS_I(eb->fs_info->btree_inode);
1705	bbio->file_offset = eb->start;
1706	if (fs_info->nodesize < PAGE_SIZE) {
1707		struct folio *folio = eb->folios[0];
1708		bool ret;
1709
1710		folio_lock(folio);
1711		btrfs_subpage_set_writeback(fs_info, folio, eb->start, eb->len);
1712		if (btrfs_subpage_clear_and_test_dirty(fs_info, folio, eb->start,
1713						       eb->len)) {
1714			folio_clear_dirty_for_io(folio);
1715			wbc->nr_to_write--;
1716		}
1717		ret = bio_add_folio(&bbio->bio, folio, eb->len,
1718				    eb->start - folio_pos(folio));
1719		ASSERT(ret);
1720		wbc_account_cgroup_owner(wbc, folio_page(folio, 0), eb->len);
1721		folio_unlock(folio);
1722	} else {
1723		int num_folios = num_extent_folios(eb);
1724
1725		for (int i = 0; i < num_folios; i++) {
1726			struct folio *folio = eb->folios[i];
1727			bool ret;
1728
1729			folio_lock(folio);
1730			folio_clear_dirty_for_io(folio);
1731			folio_start_writeback(folio);
1732			ret = bio_add_folio(&bbio->bio, folio, eb->folio_size, 0);
1733			ASSERT(ret);
1734			wbc_account_cgroup_owner(wbc, folio_page(folio, 0),
1735						 eb->folio_size);
1736			wbc->nr_to_write -= folio_nr_pages(folio);
1737			folio_unlock(folio);
1738		}
1739	}
1740	btrfs_submit_bio(bbio, 0);
1741}
1742
1743/*
1744 * Submit one subpage btree page.
1745 *
1746 * The main difference to submit_eb_page() is:
1747 * - Page locking
1748 *   For subpage, we don't rely on page locking at all.
1749 *
1750 * - Flush write bio
1751 *   We only flush bio if we may be unable to fit current extent buffers into
1752 *   current bio.
1753 *
1754 * Return >=0 for the number of submitted extent buffers.
1755 * Return <0 for fatal error.
1756 */
1757static int submit_eb_subpage(struct page *page, struct writeback_control *wbc)
1758{
1759	struct btrfs_fs_info *fs_info = page_to_fs_info(page);
1760	struct folio *folio = page_folio(page);
1761	int submitted = 0;
1762	u64 page_start = page_offset(page);
1763	int bit_start = 0;
1764	int sectors_per_node = fs_info->nodesize >> fs_info->sectorsize_bits;
1765
1766	/* Lock and write each dirty extent buffers in the range */
1767	while (bit_start < fs_info->subpage_info->bitmap_nr_bits) {
1768		struct btrfs_subpage *subpage = folio_get_private(folio);
1769		struct extent_buffer *eb;
1770		unsigned long flags;
1771		u64 start;
1772
1773		/*
1774		 * Take private lock to ensure the subpage won't be detached
1775		 * in the meantime.
1776		 */
1777		spin_lock(&page->mapping->i_private_lock);
1778		if (!folio_test_private(folio)) {
1779			spin_unlock(&page->mapping->i_private_lock);
1780			break;
1781		}
1782		spin_lock_irqsave(&subpage->lock, flags);
1783		if (!test_bit(bit_start + fs_info->subpage_info->dirty_offset,
1784			      subpage->bitmaps)) {
1785			spin_unlock_irqrestore(&subpage->lock, flags);
1786			spin_unlock(&page->mapping->i_private_lock);
1787			bit_start++;
1788			continue;
1789		}
1790
1791		start = page_start + bit_start * fs_info->sectorsize;
1792		bit_start += sectors_per_node;
 
 
 
 
 
 
 
 
 
1793
1794		/*
1795		 * Here we just want to grab the eb without touching extra
1796		 * spin locks, so call find_extent_buffer_nolock().
1797		 */
1798		eb = find_extent_buffer_nolock(fs_info, start);
1799		spin_unlock_irqrestore(&subpage->lock, flags);
1800		spin_unlock(&page->mapping->i_private_lock);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1801
1802		/*
1803		 * The eb has already reached 0 refs thus find_extent_buffer()
1804		 * doesn't return it. We don't need to write back such eb
1805		 * anyway.
1806		 */
1807		if (!eb)
 
 
 
1808			continue;
1809
1810		if (lock_extent_buffer_for_io(eb, wbc)) {
1811			write_one_eb(eb, wbc);
1812			submitted++;
1813		}
1814		free_extent_buffer(eb);
1815	}
1816	return submitted;
1817}
1818
1819/*
1820 * Submit all page(s) of one extent buffer.
1821 *
1822 * @page:	the page of one extent buffer
1823 * @eb_context:	to determine if we need to submit this page, if current page
1824 *		belongs to this eb, we don't need to submit
1825 *
1826 * The caller should pass each page in their bytenr order, and here we use
1827 * @eb_context to determine if we have submitted pages of one extent buffer.
1828 *
1829 * If we have, we just skip until we hit a new page that doesn't belong to
1830 * current @eb_context.
1831 *
1832 * If not, we submit all the page(s) of the extent buffer.
1833 *
1834 * Return >0 if we have submitted the extent buffer successfully.
1835 * Return 0 if we don't need to submit the page, as it's already submitted by
1836 * previous call.
1837 * Return <0 for fatal error.
1838 */
1839static int submit_eb_page(struct page *page, struct btrfs_eb_write_context *ctx)
1840{
1841	struct writeback_control *wbc = ctx->wbc;
1842	struct address_space *mapping = page->mapping;
1843	struct folio *folio = page_folio(page);
1844	struct extent_buffer *eb;
1845	int ret;
1846
1847	if (!folio_test_private(folio))
1848		return 0;
1849
1850	if (page_to_fs_info(page)->nodesize < PAGE_SIZE)
1851		return submit_eb_subpage(page, wbc);
1852
1853	spin_lock(&mapping->i_private_lock);
1854	if (!folio_test_private(folio)) {
1855		spin_unlock(&mapping->i_private_lock);
1856		return 0;
1857	}
1858
1859	eb = folio_get_private(folio);
1860
1861	/*
1862	 * Shouldn't happen and normally this would be a BUG_ON but no point
1863	 * crashing the machine for something we can survive anyway.
1864	 */
1865	if (WARN_ON(!eb)) {
1866		spin_unlock(&mapping->i_private_lock);
1867		return 0;
1868	}
1869
1870	if (eb == ctx->eb) {
1871		spin_unlock(&mapping->i_private_lock);
1872		return 0;
1873	}
1874	ret = atomic_inc_not_zero(&eb->refs);
1875	spin_unlock(&mapping->i_private_lock);
1876	if (!ret)
1877		return 0;
1878
1879	ctx->eb = eb;
1880
1881	ret = btrfs_check_meta_write_pointer(eb->fs_info, ctx);
1882	if (ret) {
1883		if (ret == -EBUSY)
1884			ret = 0;
1885		free_extent_buffer(eb);
1886		return ret;
1887	}
1888
1889	if (!lock_extent_buffer_for_io(eb, wbc)) {
1890		free_extent_buffer(eb);
1891		return 0;
1892	}
1893	/* Implies write in zoned mode. */
1894	if (ctx->zoned_bg) {
1895		/* Mark the last eb in the block group. */
1896		btrfs_schedule_zone_finish_bg(ctx->zoned_bg, eb);
1897		ctx->zoned_bg->meta_write_pointer += eb->len;
1898	}
1899	write_one_eb(eb, wbc);
1900	free_extent_buffer(eb);
1901	return 1;
1902}
1903
1904int btree_write_cache_pages(struct address_space *mapping,
1905				   struct writeback_control *wbc)
1906{
1907	struct btrfs_eb_write_context ctx = { .wbc = wbc };
1908	struct btrfs_fs_info *fs_info = inode_to_fs_info(mapping->host);
1909	int ret = 0;
1910	int done = 0;
1911	int nr_to_write_done = 0;
1912	struct folio_batch fbatch;
1913	unsigned int nr_folios;
1914	pgoff_t index;
1915	pgoff_t end;		/* Inclusive */
1916	int scanned = 0;
1917	xa_mark_t tag;
1918
1919	folio_batch_init(&fbatch);
1920	if (wbc->range_cyclic) {
1921		index = mapping->writeback_index; /* Start from prev offset */
1922		end = -1;
1923		/*
1924		 * Start from the beginning does not need to cycle over the
1925		 * range, mark it as scanned.
1926		 */
1927		scanned = (index == 0);
1928	} else {
1929		index = wbc->range_start >> PAGE_SHIFT;
1930		end = wbc->range_end >> PAGE_SHIFT;
1931		scanned = 1;
1932	}
1933	if (wbc->sync_mode == WB_SYNC_ALL)
1934		tag = PAGECACHE_TAG_TOWRITE;
1935	else
1936		tag = PAGECACHE_TAG_DIRTY;
1937	btrfs_zoned_meta_io_lock(fs_info);
1938retry:
1939	if (wbc->sync_mode == WB_SYNC_ALL)
1940		tag_pages_for_writeback(mapping, index, end);
1941	while (!done && !nr_to_write_done && (index <= end) &&
1942	       (nr_folios = filemap_get_folios_tag(mapping, &index, end,
1943					    tag, &fbatch))) {
1944		unsigned i;
1945
1946		for (i = 0; i < nr_folios; i++) {
1947			struct folio *folio = fbatch.folios[i];
1948
1949			ret = submit_eb_page(&folio->page, &ctx);
1950			if (ret == 0)
1951				continue;
1952			if (ret < 0) {
1953				done = 1;
1954				break;
1955			}
1956
1957			/*
1958			 * the filesystem may choose to bump up nr_to_write.
1959			 * We have to make sure to honor the new nr_to_write
1960			 * at any time
1961			 */
1962			nr_to_write_done = wbc->nr_to_write <= 0;
 
1963		}
1964		folio_batch_release(&fbatch);
1965		cond_resched();
 
1966	}
1967	if (!scanned && !done) {
1968		/*
1969		 * We hit the last page and there is more work to be done: wrap
1970		 * back to the start of the file
1971		 */
1972		scanned = 1;
1973		index = 0;
1974		goto retry;
1975	}
1976	/*
1977	 * If something went wrong, don't allow any metadata write bio to be
1978	 * submitted.
1979	 *
1980	 * This would prevent use-after-free if we had dirty pages not
1981	 * cleaned up, which can still happen by fuzzed images.
1982	 *
1983	 * - Bad extent tree
1984	 *   Allowing existing tree block to be allocated for other trees.
1985	 *
1986	 * - Log tree operations
1987	 *   Exiting tree blocks get allocated to log tree, bumps its
1988	 *   generation, then get cleaned in tree re-balance.
1989	 *   Such tree block will not be written back, since it's clean,
1990	 *   thus no WRITTEN flag set.
1991	 *   And after log writes back, this tree block is not traced by
1992	 *   any dirty extent_io_tree.
1993	 *
1994	 * - Offending tree block gets re-dirtied from its original owner
1995	 *   Since it has bumped generation, no WRITTEN flag, it can be
1996	 *   reused without COWing. This tree block will not be traced
1997	 *   by btrfs_transaction::dirty_pages.
1998	 *
1999	 *   Now such dirty tree block will not be cleaned by any dirty
2000	 *   extent io tree. Thus we don't want to submit such wild eb
2001	 *   if the fs already has error.
2002	 *
2003	 * We can get ret > 0 from submit_extent_page() indicating how many ebs
2004	 * were submitted. Reset it to 0 to avoid false alerts for the caller.
2005	 */
2006	if (ret > 0)
2007		ret = 0;
2008	if (!ret && BTRFS_FS_ERROR(fs_info))
2009		ret = -EROFS;
2010
2011	if (ctx.zoned_bg)
2012		btrfs_put_block_group(ctx.zoned_bg);
2013	btrfs_zoned_meta_io_unlock(fs_info);
2014	return ret;
2015}
2016
2017/*
2018 * Walk the list of dirty pages of the given address space and write all of them.
2019 *
2020 * @mapping:   address space structure to write
2021 * @wbc:       subtract the number of written pages from *@wbc->nr_to_write
2022 * @bio_ctrl:  holds context for the write, namely the bio
2023 *
2024 * If a page is already under I/O, write_cache_pages() skips it, even
2025 * if it's dirty.  This is desirable behaviour for memory-cleaning writeback,
2026 * but it is INCORRECT for data-integrity system calls such as fsync().  fsync()
2027 * and msync() need to guarantee that all the data which was dirty at the time
2028 * the call was made get new I/O started against them.  If wbc->sync_mode is
2029 * WB_SYNC_ALL then we were called for data integrity and we must wait for
2030 * existing IO to complete.
2031 */
2032static int extent_write_cache_pages(struct address_space *mapping,
2033			     struct btrfs_bio_ctrl *bio_ctrl)
 
 
 
2034{
2035	struct writeback_control *wbc = bio_ctrl->wbc;
2036	struct inode *inode = mapping->host;
2037	int ret = 0;
2038	int done = 0;
2039	int nr_to_write_done = 0;
2040	struct folio_batch fbatch;
2041	unsigned int nr_folios;
2042	pgoff_t index;
2043	pgoff_t end;		/* Inclusive */
2044	pgoff_t done_index;
2045	int range_whole = 0;
2046	int scanned = 0;
2047	xa_mark_t tag;
2048
2049	/*
2050	 * We have to hold onto the inode so that ordered extents can do their
2051	 * work when the IO finishes.  The alternative to this is failing to add
2052	 * an ordered extent if the igrab() fails there and that is a huge pain
2053	 * to deal with, so instead just hold onto the inode throughout the
2054	 * writepages operation.  If it fails here we are freeing up the inode
2055	 * anyway and we'd rather not waste our time writing out stuff that is
2056	 * going to be truncated anyway.
2057	 */
2058	if (!igrab(inode))
2059		return 0;
2060
2061	folio_batch_init(&fbatch);
2062	if (wbc->range_cyclic) {
2063		index = mapping->writeback_index; /* Start from prev offset */
2064		end = -1;
2065		/*
2066		 * Start from the beginning does not need to cycle over the
2067		 * range, mark it as scanned.
2068		 */
2069		scanned = (index == 0);
2070	} else {
2071		index = wbc->range_start >> PAGE_SHIFT;
2072		end = wbc->range_end >> PAGE_SHIFT;
2073		if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX)
2074			range_whole = 1;
2075		scanned = 1;
2076	}
2077
2078	/*
2079	 * We do the tagged writepage as long as the snapshot flush bit is set
2080	 * and we are the first one who do the filemap_flush() on this inode.
2081	 *
2082	 * The nr_to_write == LONG_MAX is needed to make sure other flushers do
2083	 * not race in and drop the bit.
2084	 */
2085	if (range_whole && wbc->nr_to_write == LONG_MAX &&
2086	    test_and_clear_bit(BTRFS_INODE_SNAPSHOT_FLUSH,
2087			       &BTRFS_I(inode)->runtime_flags))
2088		wbc->tagged_writepages = 1;
2089
2090	if (wbc->sync_mode == WB_SYNC_ALL || wbc->tagged_writepages)
2091		tag = PAGECACHE_TAG_TOWRITE;
2092	else
2093		tag = PAGECACHE_TAG_DIRTY;
2094retry:
2095	if (wbc->sync_mode == WB_SYNC_ALL || wbc->tagged_writepages)
2096		tag_pages_for_writeback(mapping, index, end);
2097	done_index = index;
2098	while (!done && !nr_to_write_done && (index <= end) &&
2099			(nr_folios = filemap_get_folios_tag(mapping, &index,
2100							end, tag, &fbatch))) {
2101		unsigned i;
2102
2103		for (i = 0; i < nr_folios; i++) {
2104			struct folio *folio = fbatch.folios[i];
 
2105
2106			done_index = folio_next_index(folio);
2107			/*
2108			 * At this point we hold neither the i_pages lock nor
2109			 * the page lock: the page may be truncated or
2110			 * invalidated (changing page->mapping to NULL),
2111			 * or even swizzled back from swapper_space to
2112			 * tmpfs file mapping
2113			 */
2114			if (!folio_trylock(folio)) {
2115				submit_write_bio(bio_ctrl, 0);
2116				folio_lock(folio);
2117			}
2118
2119			if (unlikely(folio->mapping != mapping)) {
2120				folio_unlock(folio);
2121				continue;
2122			}
2123
2124			if (!folio_test_dirty(folio)) {
2125				/* Someone wrote it for us. */
2126				folio_unlock(folio);
2127				continue;
2128			}
2129
2130			if (wbc->sync_mode != WB_SYNC_NONE) {
2131				if (folio_test_writeback(folio))
2132					submit_write_bio(bio_ctrl, 0);
2133				folio_wait_writeback(folio);
2134			}
2135
2136			if (folio_test_writeback(folio) ||
2137			    !folio_clear_dirty_for_io(folio)) {
2138				folio_unlock(folio);
2139				continue;
2140			}
2141
2142			ret = __extent_writepage(&folio->page, bio_ctrl);
2143			if (ret < 0) {
 
 
 
 
 
2144				done = 1;
2145				break;
2146			}
2147
2148			/*
2149			 * The filesystem may choose to bump up nr_to_write.
2150			 * We have to make sure to honor the new nr_to_write
2151			 * at any time.
2152			 */
2153			nr_to_write_done = (wbc->sync_mode == WB_SYNC_NONE &&
2154					    wbc->nr_to_write <= 0);
2155		}
2156		folio_batch_release(&fbatch);
2157		cond_resched();
2158	}
2159	if (!scanned && !done) {
2160		/*
2161		 * We hit the last page and there is more work to be done: wrap
2162		 * back to the start of the file
2163		 */
2164		scanned = 1;
2165		index = 0;
 
 
 
 
2166
2167		/*
2168		 * If we're looping we could run into a page that is locked by a
2169		 * writer and that writer could be waiting on writeback for a
2170		 * page in our current bio, and thus deadlock, so flush the
2171		 * write bio here.
2172		 */
2173		submit_write_bio(bio_ctrl, 0);
2174		goto retry;
2175	}
 
2176
2177	if (wbc->range_cyclic || (wbc->nr_to_write > 0 && range_whole))
2178		mapping->writeback_index = done_index;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2179
2180	btrfs_add_delayed_iput(BTRFS_I(inode));
2181	return ret;
2182}
2183
2184/*
2185 * Submit the pages in the range to bio for call sites which delalloc range has
2186 * already been ran (aka, ordered extent inserted) and all pages are still
2187 * locked.
2188 */
2189void extent_write_locked_range(struct inode *inode, struct page *locked_page,
2190			       u64 start, u64 end, struct writeback_control *wbc,
2191			       bool pages_dirty)
2192{
2193	bool found_error = false;
2194	int ret = 0;
2195	struct address_space *mapping = inode->i_mapping;
2196	struct btrfs_fs_info *fs_info = inode_to_fs_info(inode);
2197	const u32 sectorsize = fs_info->sectorsize;
2198	loff_t i_size = i_size_read(inode);
2199	u64 cur = start;
2200	struct btrfs_bio_ctrl bio_ctrl = {
2201		.wbc = wbc,
2202		.opf = REQ_OP_WRITE | wbc_to_write_flags(wbc),
 
 
 
 
 
 
 
 
 
2203	};
2204
2205	if (wbc->no_cgroup_owner)
2206		bio_ctrl.opf |= REQ_BTRFS_CGROUP_PUNT;
2207
2208	ASSERT(IS_ALIGNED(start, sectorsize) && IS_ALIGNED(end + 1, sectorsize));
2209
2210	while (cur <= end) {
2211		u64 cur_end = min(round_down(cur, PAGE_SIZE) + PAGE_SIZE - 1, end);
2212		u32 cur_len = cur_end + 1 - cur;
2213		struct page *page;
2214		int nr = 0;
2215
2216		page = find_get_page(mapping, cur >> PAGE_SHIFT);
2217		ASSERT(PageLocked(page));
2218		if (pages_dirty && page != locked_page) {
2219			ASSERT(PageDirty(page));
2220			clear_page_dirty_for_io(page);
2221		}
2222
2223		ret = __extent_writepage_io(BTRFS_I(inode), page, &bio_ctrl,
2224					    i_size, &nr);
2225		if (ret == 1)
2226			goto next_page;
2227
2228		/* Make sure the mapping tag for page dirty gets cleared. */
2229		if (nr == 0) {
2230			set_page_writeback(page);
2231			end_page_writeback(page);
2232		}
2233		if (ret) {
2234			btrfs_mark_ordered_io_finished(BTRFS_I(inode), page,
2235						       cur, cur_len, !ret);
2236			mapping_set_error(page->mapping, ret);
2237		}
2238		btrfs_folio_unlock_writer(fs_info, page_folio(page), cur, cur_len);
2239		if (ret < 0)
2240			found_error = true;
2241next_page:
2242		put_page(page);
2243		cur = cur_end + 1;
2244	}
2245
2246	submit_write_bio(&bio_ctrl, found_error ? ret : 0);
 
2247}
2248
2249int extent_writepages(struct address_space *mapping,
 
 
2250		      struct writeback_control *wbc)
2251{
2252	struct inode *inode = mapping->host;
2253	int ret = 0;
2254	struct btrfs_bio_ctrl bio_ctrl = {
2255		.wbc = wbc,
2256		.opf = REQ_OP_WRITE | wbc_to_write_flags(wbc),
 
 
 
2257	};
2258
2259	/*
2260	 * Allow only a single thread to do the reloc work in zoned mode to
2261	 * protect the write pointer updates.
2262	 */
2263	btrfs_zoned_data_reloc_lock(BTRFS_I(inode));
2264	ret = extent_write_cache_pages(mapping, &bio_ctrl);
2265	submit_write_bio(&bio_ctrl, ret);
2266	btrfs_zoned_data_reloc_unlock(BTRFS_I(inode));
2267	return ret;
2268}
2269
2270void extent_readahead(struct readahead_control *rac)
2271{
2272	struct btrfs_bio_ctrl bio_ctrl = { .opf = REQ_OP_READ | REQ_RAHEAD };
2273	struct page *pagepool[16];
2274	struct extent_map *em_cached = NULL;
2275	u64 prev_em_start = (u64)-1;
2276	int nr;
2277
2278	while ((nr = readahead_page_batch(rac, pagepool))) {
2279		u64 contig_start = readahead_pos(rac);
2280		u64 contig_end = contig_start + readahead_batch_length(rac) - 1;
2281
2282		contiguous_readpages(pagepool, nr, contig_start, contig_end,
2283				&em_cached, &bio_ctrl, &prev_em_start);
2284	}
2285
2286	if (em_cached)
2287		free_extent_map(em_cached);
2288	submit_one_bio(&bio_ctrl);
 
 
 
 
 
 
2289}
2290
2291/*
2292 * basic invalidate_folio code, this waits on any locked or writeback
2293 * ranges corresponding to the folio, and then deletes any extent state
2294 * records from the tree
2295 */
2296int extent_invalidate_folio(struct extent_io_tree *tree,
2297			  struct folio *folio, size_t offset)
2298{
2299	struct extent_state *cached_state = NULL;
2300	u64 start = folio_pos(folio);
2301	u64 end = start + folio_size(folio) - 1;
2302	size_t blocksize = folio_to_fs_info(folio)->sectorsize;
2303
2304	/* This function is only called for the btree inode */
2305	ASSERT(tree->owner == IO_TREE_BTREE_INODE_IO);
2306
2307	start += ALIGN(offset, blocksize);
2308	if (start > end)
2309		return 0;
2310
2311	lock_extent(tree, start, end, &cached_state);
2312	folio_wait_writeback(folio);
2313
2314	/*
2315	 * Currently for btree io tree, only EXTENT_LOCKED is utilized,
2316	 * so here we only need to unlock the extent range to free any
2317	 * existing extent state.
2318	 */
2319	unlock_extent(tree, start, end, &cached_state);
2320	return 0;
2321}
2322
2323/*
2324 * a helper for release_folio, this tests for areas of the page that
2325 * are locked or under IO and drops the related state bits if it is safe
2326 * to drop the page.
2327 */
2328static int try_release_extent_state(struct extent_io_tree *tree,
2329				    struct page *page, gfp_t mask)
 
2330{
2331	u64 start = page_offset(page);
2332	u64 end = start + PAGE_SIZE - 1;
2333	int ret = 1;
2334
2335	if (test_range_bit_exists(tree, start, end, EXTENT_LOCKED)) {
 
2336		ret = 0;
2337	} else {
2338		u32 clear_bits = ~(EXTENT_LOCKED | EXTENT_NODATASUM |
2339				   EXTENT_DELALLOC_NEW | EXTENT_CTLBITS |
2340				   EXTENT_QGROUP_RESERVED);
2341
2342		/*
2343		 * At this point we can safely clear everything except the
2344		 * locked bit, the nodatasum bit and the delalloc new bit.
2345		 * The delalloc new bit will be cleared by ordered extent
2346		 * completion.
2347		 */
2348		ret = __clear_extent_bit(tree, start, end, clear_bits, NULL, NULL);
2349
2350		/* if clear_extent_bit failed for enomem reasons,
2351		 * we can't allow the release to continue.
2352		 */
2353		if (ret < 0)
2354			ret = 0;
2355		else
2356			ret = 1;
2357	}
2358	return ret;
2359}
2360
2361/*
2362 * a helper for release_folio.  As long as there are no locked extents
2363 * in the range corresponding to the page, both state records and extent
2364 * map records are removed
2365 */
2366int try_release_extent_mapping(struct page *page, gfp_t mask)
 
 
2367{
2368	struct extent_map *em;
2369	u64 start = page_offset(page);
2370	u64 end = start + PAGE_SIZE - 1;
2371	struct btrfs_inode *btrfs_inode = page_to_inode(page);
2372	struct extent_io_tree *tree = &btrfs_inode->io_tree;
2373	struct extent_map_tree *map = &btrfs_inode->extent_tree;
2374
2375	if (gfpflags_allow_blocking(mask) &&
2376	    page->mapping->host->i_size > SZ_16M) {
2377		u64 len;
2378		while (start <= end) {
2379			struct btrfs_fs_info *fs_info;
2380			u64 cur_gen;
2381
2382			len = end - start + 1;
2383			write_lock(&map->lock);
2384			em = lookup_extent_mapping(map, start, len);
2385			if (!em) {
2386				write_unlock(&map->lock);
2387				break;
2388			}
2389			if ((em->flags & EXTENT_FLAG_PINNED) ||
2390			    em->start != start) {
2391				write_unlock(&map->lock);
2392				free_extent_map(em);
2393				break;
2394			}
2395			if (test_range_bit_exists(tree, em->start,
2396						  extent_map_end(em) - 1,
2397						  EXTENT_LOCKED))
2398				goto next;
2399			/*
2400			 * If it's not in the list of modified extents, used
2401			 * by a fast fsync, we can remove it. If it's being
2402			 * logged we can safely remove it since fsync took an
2403			 * extra reference on the em.
2404			 */
2405			if (list_empty(&em->list) ||
2406			    (em->flags & EXTENT_FLAG_LOGGING))
2407				goto remove_em;
2408			/*
2409			 * If it's in the list of modified extents, remove it
2410			 * only if its generation is older then the current one,
2411			 * in which case we don't need it for a fast fsync.
2412			 * Otherwise don't remove it, we could be racing with an
2413			 * ongoing fast fsync that could miss the new extent.
2414			 */
2415			fs_info = btrfs_inode->root->fs_info;
2416			spin_lock(&fs_info->trans_lock);
2417			cur_gen = fs_info->generation;
2418			spin_unlock(&fs_info->trans_lock);
2419			if (em->generation >= cur_gen)
2420				goto next;
2421remove_em:
2422			/*
2423			 * We only remove extent maps that are not in the list of
2424			 * modified extents or that are in the list but with a
2425			 * generation lower then the current generation, so there
2426			 * is no need to set the full fsync flag on the inode (it
2427			 * hurts the fsync performance for workloads with a data
2428			 * size that exceeds or is close to the system's memory).
2429			 */
2430			remove_extent_mapping(map, em);
2431			/* once for the rb tree */
2432			free_extent_map(em);
2433next:
2434			start = extent_map_end(em);
2435			write_unlock(&map->lock);
2436
2437			/* once for us */
2438			free_extent_map(em);
2439
2440			cond_resched(); /* Allow large-extent preemption. */
2441		}
2442	}
2443	return try_release_extent_state(tree, page, mask);
2444}
2445
2446struct btrfs_fiemap_entry {
2447	u64 offset;
2448	u64 phys;
2449	u64 len;
2450	u32 flags;
2451};
2452
2453/*
2454 * Indicate the caller of emit_fiemap_extent() that it needs to unlock the file
2455 * range from the inode's io tree, unlock the subvolume tree search path, flush
2456 * the fiemap cache and relock the file range and research the subvolume tree.
2457 * The value here is something negative that can't be confused with a valid
2458 * errno value and different from 1 because that's also a return value from
2459 * fiemap_fill_next_extent() and also it's often used to mean some btree search
2460 * did not find a key, so make it some distinct negative value.
2461 */
2462#define BTRFS_FIEMAP_FLUSH_CACHE (-(MAX_ERRNO + 1))
2463
2464/*
2465 * Used to:
2466 *
2467 * - Cache the next entry to be emitted to the fiemap buffer, so that we can
2468 *   merge extents that are contiguous and can be grouped as a single one;
2469 *
2470 * - Store extents ready to be written to the fiemap buffer in an intermediary
2471 *   buffer. This intermediary buffer is to ensure that in case the fiemap
2472 *   buffer is memory mapped to the fiemap target file, we don't deadlock
2473 *   during btrfs_page_mkwrite(). This is because during fiemap we are locking
2474 *   an extent range in order to prevent races with delalloc flushing and
2475 *   ordered extent completion, which is needed in order to reliably detect
2476 *   delalloc in holes and prealloc extents. And this can lead to a deadlock
2477 *   if the fiemap buffer is memory mapped to the file we are running fiemap
2478 *   against (a silly, useless in practice scenario, but possible) because
2479 *   btrfs_page_mkwrite() will try to lock the same extent range.
2480 */
2481struct fiemap_cache {
2482	/* An array of ready fiemap entries. */
2483	struct btrfs_fiemap_entry *entries;
2484	/* Number of entries in the entries array. */
2485	int entries_size;
2486	/* Index of the next entry in the entries array to write to. */
2487	int entries_pos;
2488	/*
2489	 * Once the entries array is full, this indicates what's the offset for
2490	 * the next file extent item we must search for in the inode's subvolume
2491	 * tree after unlocking the extent range in the inode's io tree and
2492	 * releasing the search path.
2493	 */
2494	u64 next_search_offset;
2495	/*
2496	 * This matches struct fiemap_extent_info::fi_mapped_extents, we use it
2497	 * to count ourselves emitted extents and stop instead of relying on
2498	 * fiemap_fill_next_extent() because we buffer ready fiemap entries at
2499	 * the @entries array, and we want to stop as soon as we hit the max
2500	 * amount of extents to map, not just to save time but also to make the
2501	 * logic at extent_fiemap() simpler.
2502	 */
2503	unsigned int extents_mapped;
2504	/* Fields for the cached extent (unsubmitted, not ready, extent). */
2505	u64 offset;
2506	u64 phys;
2507	u64 len;
2508	u32 flags;
2509	bool cached;
2510};
2511
2512static int flush_fiemap_cache(struct fiemap_extent_info *fieinfo,
2513			      struct fiemap_cache *cache)
2514{
2515	for (int i = 0; i < cache->entries_pos; i++) {
2516		struct btrfs_fiemap_entry *entry = &cache->entries[i];
2517		int ret;
2518
2519		ret = fiemap_fill_next_extent(fieinfo, entry->offset,
2520					      entry->phys, entry->len,
2521					      entry->flags);
2522		/*
2523		 * Ignore 1 (reached max entries) because we keep track of that
2524		 * ourselves in emit_fiemap_extent().
2525		 */
2526		if (ret < 0)
2527			return ret;
2528	}
2529	cache->entries_pos = 0;
2530
2531	return 0;
2532}
 
 
 
 
 
 
2533
2534/*
2535 * Helper to submit fiemap extent.
2536 *
2537 * Will try to merge current fiemap extent specified by @offset, @phys,
2538 * @len and @flags with cached one.
2539 * And only when we fails to merge, cached one will be submitted as
2540 * fiemap extent.
2541 *
2542 * Return value is the same as fiemap_fill_next_extent().
2543 */
2544static int emit_fiemap_extent(struct fiemap_extent_info *fieinfo,
2545				struct fiemap_cache *cache,
2546				u64 offset, u64 phys, u64 len, u32 flags)
2547{
2548	struct btrfs_fiemap_entry *entry;
2549	u64 cache_end;
2550
2551	/* Set at the end of extent_fiemap(). */
2552	ASSERT((flags & FIEMAP_EXTENT_LAST) == 0);
2553
2554	if (!cache->cached)
2555		goto assign;
2556
2557	/*
2558	 * When iterating the extents of the inode, at extent_fiemap(), we may
2559	 * find an extent that starts at an offset behind the end offset of the
2560	 * previous extent we processed. This happens if fiemap is called
2561	 * without FIEMAP_FLAG_SYNC and there are ordered extents completing
2562	 * after we had to unlock the file range, release the search path, emit
2563	 * the fiemap extents stored in the buffer (cache->entries array) and
2564	 * the lock the remainder of the range and re-search the btree.
2565	 *
2566	 * For example we are in leaf X processing its last item, which is the
2567	 * file extent item for file range [512K, 1M[, and after
2568	 * btrfs_next_leaf() releases the path, there's an ordered extent that
2569	 * completes for the file range [768K, 2M[, and that results in trimming
2570	 * the file extent item so that it now corresponds to the file range
2571	 * [512K, 768K[ and a new file extent item is inserted for the file
2572	 * range [768K, 2M[, which may end up as the last item of leaf X or as
2573	 * the first item of the next leaf - in either case btrfs_next_leaf()
2574	 * will leave us with a path pointing to the new extent item, for the
2575	 * file range [768K, 2M[, since that's the first key that follows the
2576	 * last one we processed. So in order not to report overlapping extents
2577	 * to user space, we trim the length of the previously cached extent and
2578	 * emit it.
2579	 *
2580	 * Upon calling btrfs_next_leaf() we may also find an extent with an
2581	 * offset smaller than or equals to cache->offset, and this happens
2582	 * when we had a hole or prealloc extent with several delalloc ranges in
2583	 * it, but after btrfs_next_leaf() released the path, delalloc was
2584	 * flushed and the resulting ordered extents were completed, so we can
2585	 * now have found a file extent item for an offset that is smaller than
2586	 * or equals to what we have in cache->offset. We deal with this as
2587	 * described below.
2588	 */
2589	cache_end = cache->offset + cache->len;
2590	if (cache_end > offset) {
2591		if (offset == cache->offset) {
2592			/*
2593			 * We cached a dealloc range (found in the io tree) for
2594			 * a hole or prealloc extent and we have now found a
2595			 * file extent item for the same offset. What we have
2596			 * now is more recent and up to date, so discard what
2597			 * we had in the cache and use what we have just found.
2598			 */
2599			goto assign;
2600		} else if (offset > cache->offset) {
2601			/*
2602			 * The extent range we previously found ends after the
2603			 * offset of the file extent item we found and that
2604			 * offset falls somewhere in the middle of that previous
2605			 * extent range. So adjust the range we previously found
2606			 * to end at the offset of the file extent item we have
2607			 * just found, since this extent is more up to date.
2608			 * Emit that adjusted range and cache the file extent
2609			 * item we have just found. This corresponds to the case
2610			 * where a previously found file extent item was split
2611			 * due to an ordered extent completing.
2612			 */
2613			cache->len = offset - cache->offset;
2614			goto emit;
2615		} else {
2616			const u64 range_end = offset + len;
2617
2618			/*
2619			 * The offset of the file extent item we have just found
2620			 * is behind the cached offset. This means we were
2621			 * processing a hole or prealloc extent for which we
2622			 * have found delalloc ranges (in the io tree), so what
2623			 * we have in the cache is the last delalloc range we
2624			 * found while the file extent item we found can be
2625			 * either for a whole delalloc range we previously
2626			 * emmitted or only a part of that range.
2627			 *
2628			 * We have two cases here:
2629			 *
2630			 * 1) The file extent item's range ends at or behind the
2631			 *    cached extent's end. In this case just ignore the
2632			 *    current file extent item because we don't want to
2633			 *    overlap with previous ranges that may have been
2634			 *    emmitted already;
2635			 *
2636			 * 2) The file extent item starts behind the currently
2637			 *    cached extent but its end offset goes beyond the
2638			 *    end offset of the cached extent. We don't want to
2639			 *    overlap with a previous range that may have been
2640			 *    emmitted already, so we emit the currently cached
2641			 *    extent and then partially store the current file
2642			 *    extent item's range in the cache, for the subrange
2643			 *    going the cached extent's end to the end of the
2644			 *    file extent item.
2645			 */
2646			if (range_end <= cache_end)
2647				return 0;
2648
2649			if (!(flags & (FIEMAP_EXTENT_ENCODED | FIEMAP_EXTENT_DELALLOC)))
2650				phys += cache_end - offset;
2651
2652			offset = cache_end;
2653			len = range_end - cache_end;
2654			goto emit;
2655		}
2656	}
2657
2658	/*
2659	 * Only merges fiemap extents if
2660	 * 1) Their logical addresses are continuous
2661	 *
2662	 * 2) Their physical addresses are continuous
2663	 *    So truly compressed (physical size smaller than logical size)
2664	 *    extents won't get merged with each other
2665	 *
2666	 * 3) Share same flags
2667	 */
2668	if (cache->offset + cache->len  == offset &&
2669	    cache->phys + cache->len == phys  &&
2670	    cache->flags == flags) {
2671		cache->len += len;
2672		return 0;
2673	}
2674
2675emit:
2676	/* Not mergeable, need to submit cached one */
2677
2678	if (cache->entries_pos == cache->entries_size) {
2679		/*
2680		 * We will need to research for the end offset of the last
2681		 * stored extent and not from the current offset, because after
2682		 * unlocking the range and releasing the path, if there's a hole
2683		 * between that end offset and this current offset, a new extent
2684		 * may have been inserted due to a new write, so we don't want
2685		 * to miss it.
2686		 */
2687		entry = &cache->entries[cache->entries_size - 1];
2688		cache->next_search_offset = entry->offset + entry->len;
2689		cache->cached = false;
2690
2691		return BTRFS_FIEMAP_FLUSH_CACHE;
2692	}
2693
2694	entry = &cache->entries[cache->entries_pos];
2695	entry->offset = cache->offset;
2696	entry->phys = cache->phys;
2697	entry->len = cache->len;
2698	entry->flags = cache->flags;
2699	cache->entries_pos++;
2700	cache->extents_mapped++;
2701
2702	if (cache->extents_mapped == fieinfo->fi_extents_max) {
2703		cache->cached = false;
2704		return 1;
2705	}
2706assign:
2707	cache->cached = true;
2708	cache->offset = offset;
2709	cache->phys = phys;
2710	cache->len = len;
2711	cache->flags = flags;
2712
2713	return 0;
2714}
2715
2716/*
2717 * Emit last fiemap cache
2718 *
2719 * The last fiemap cache may still be cached in the following case:
2720 * 0		      4k		    8k
2721 * |<- Fiemap range ->|
2722 * |<------------  First extent ----------->|
2723 *
2724 * In this case, the first extent range will be cached but not emitted.
2725 * So we must emit it before ending extent_fiemap().
2726 */
2727static int emit_last_fiemap_cache(struct fiemap_extent_info *fieinfo,
2728				  struct fiemap_cache *cache)
2729{
2730	int ret;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2731
2732	if (!cache->cached)
2733		return 0;
2734
2735	ret = fiemap_fill_next_extent(fieinfo, cache->offset, cache->phys,
2736				      cache->len, cache->flags);
2737	cache->cached = false;
2738	if (ret > 0)
2739		ret = 0;
2740	return ret;
2741}
2742
2743static int fiemap_next_leaf_item(struct btrfs_inode *inode, struct btrfs_path *path)
2744{
2745	struct extent_buffer *clone = path->nodes[0];
2746	struct btrfs_key key;
2747	int slot;
2748	int ret;
2749
2750	path->slots[0]++;
2751	if (path->slots[0] < btrfs_header_nritems(path->nodes[0]))
2752		return 0;
2753
2754	/*
2755	 * Add a temporary extra ref to an already cloned extent buffer to
2756	 * prevent btrfs_next_leaf() freeing it, we want to reuse it to avoid
2757	 * the cost of allocating a new one.
2758	 */
2759	ASSERT(test_bit(EXTENT_BUFFER_UNMAPPED, &clone->bflags));
2760	atomic_inc(&clone->refs);
2761
2762	ret = btrfs_next_leaf(inode->root, path);
2763	if (ret != 0)
2764		goto out;
2765
2766	/*
2767	 * Don't bother with cloning if there are no more file extent items for
2768	 * our inode.
2769	 */
2770	btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0]);
2771	if (key.objectid != btrfs_ino(inode) || key.type != BTRFS_EXTENT_DATA_KEY) {
2772		ret = 1;
2773		goto out;
2774	}
2775
2776	/*
2777	 * Important to preserve the start field, for the optimizations when
2778	 * checking if extents are shared (see extent_fiemap()).
2779	 *
2780	 * We must set ->start before calling copy_extent_buffer_full().  If we
2781	 * are on sub-pagesize blocksize, we use ->start to determine the offset
2782	 * into the folio where our eb exists, and if we update ->start after
2783	 * the fact then any subsequent reads of the eb may read from a
2784	 * different offset in the folio than where we originally copied into.
2785	 */
2786	clone->start = path->nodes[0]->start;
2787	/* See the comment at fiemap_search_slot() about why we clone. */
2788	copy_extent_buffer_full(clone, path->nodes[0]);
2789
2790	slot = path->slots[0];
2791	btrfs_release_path(path);
2792	path->nodes[0] = clone;
2793	path->slots[0] = slot;
2794out:
2795	if (ret)
2796		free_extent_buffer(clone);
2797
2798	return ret;
2799}
2800
2801/*
2802 * Search for the first file extent item that starts at a given file offset or
2803 * the one that starts immediately before that offset.
2804 * Returns: 0 on success, < 0 on error, 1 if not found.
2805 */
2806static int fiemap_search_slot(struct btrfs_inode *inode, struct btrfs_path *path,
2807			      u64 file_offset)
2808{
2809	const u64 ino = btrfs_ino(inode);
2810	struct btrfs_root *root = inode->root;
2811	struct extent_buffer *clone;
2812	struct btrfs_key key;
2813	int slot;
2814	int ret;
2815
2816	key.objectid = ino;
2817	key.type = BTRFS_EXTENT_DATA_KEY;
2818	key.offset = file_offset;
2819
2820	ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
2821	if (ret < 0)
2822		return ret;
2823
2824	if (ret > 0 && path->slots[0] > 0) {
2825		btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0] - 1);
2826		if (key.objectid == ino && key.type == BTRFS_EXTENT_DATA_KEY)
2827			path->slots[0]--;
2828	}
2829
2830	if (path->slots[0] >= btrfs_header_nritems(path->nodes[0])) {
2831		ret = btrfs_next_leaf(root, path);
2832		if (ret != 0)
2833			return ret;
2834
2835		btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0]);
2836		if (key.objectid != ino || key.type != BTRFS_EXTENT_DATA_KEY)
2837			return 1;
2838	}
2839
2840	/*
2841	 * We clone the leaf and use it during fiemap. This is because while
2842	 * using the leaf we do expensive things like checking if an extent is
2843	 * shared, which can take a long time. In order to prevent blocking
2844	 * other tasks for too long, we use a clone of the leaf. We have locked
2845	 * the file range in the inode's io tree, so we know none of our file
2846	 * extent items can change. This way we avoid blocking other tasks that
2847	 * want to insert items for other inodes in the same leaf or b+tree
2848	 * rebalance operations (triggered for example when someone is trying
2849	 * to push items into this leaf when trying to insert an item in a
2850	 * neighbour leaf).
2851	 * We also need the private clone because holding a read lock on an
2852	 * extent buffer of the subvolume's b+tree will make lockdep unhappy
2853	 * when we check if extents are shared, as backref walking may need to
2854	 * lock the same leaf we are processing.
2855	 */
2856	clone = btrfs_clone_extent_buffer(path->nodes[0]);
2857	if (!clone)
2858		return -ENOMEM;
2859
2860	slot = path->slots[0];
2861	btrfs_release_path(path);
2862	path->nodes[0] = clone;
2863	path->slots[0] = slot;
2864
2865	return 0;
2866}
2867
2868/*
2869 * Process a range which is a hole or a prealloc extent in the inode's subvolume
2870 * btree. If @disk_bytenr is 0, we are dealing with a hole, otherwise a prealloc
2871 * extent. The end offset (@end) is inclusive.
2872 */
2873static int fiemap_process_hole(struct btrfs_inode *inode,
2874			       struct fiemap_extent_info *fieinfo,
2875			       struct fiemap_cache *cache,
2876			       struct extent_state **delalloc_cached_state,
2877			       struct btrfs_backref_share_check_ctx *backref_ctx,
2878			       u64 disk_bytenr, u64 extent_offset,
2879			       u64 extent_gen,
2880			       u64 start, u64 end)
2881{
2882	const u64 i_size = i_size_read(&inode->vfs_inode);
2883	u64 cur_offset = start;
2884	u64 last_delalloc_end = 0;
2885	u32 prealloc_flags = FIEMAP_EXTENT_UNWRITTEN;
2886	bool checked_extent_shared = false;
2887	int ret;
2888
2889	/*
2890	 * There can be no delalloc past i_size, so don't waste time looking for
2891	 * it beyond i_size.
2892	 */
2893	while (cur_offset < end && cur_offset < i_size) {
2894		u64 delalloc_start;
2895		u64 delalloc_end;
2896		u64 prealloc_start;
2897		u64 prealloc_len = 0;
2898		bool delalloc;
2899
2900		delalloc = btrfs_find_delalloc_in_range(inode, cur_offset, end,
2901							delalloc_cached_state,
2902							&delalloc_start,
2903							&delalloc_end);
2904		if (!delalloc)
2905			break;
2906
2907		/*
2908		 * If this is a prealloc extent we have to report every section
2909		 * of it that has no delalloc.
 
2910		 */
2911		if (disk_bytenr != 0) {
2912			if (last_delalloc_end == 0) {
2913				prealloc_start = start;
2914				prealloc_len = delalloc_start - start;
2915			} else {
2916				prealloc_start = last_delalloc_end + 1;
2917				prealloc_len = delalloc_start - prealloc_start;
2918			}
2919		}
2920
2921		if (prealloc_len > 0) {
2922			if (!checked_extent_shared && fieinfo->fi_extents_max) {
2923				ret = btrfs_is_data_extent_shared(inode,
2924								  disk_bytenr,
2925								  extent_gen,
2926								  backref_ctx);
2927				if (ret < 0)
2928					return ret;
2929				else if (ret > 0)
2930					prealloc_flags |= FIEMAP_EXTENT_SHARED;
2931
2932				checked_extent_shared = true;
2933			}
2934			ret = emit_fiemap_extent(fieinfo, cache, prealloc_start,
2935						 disk_bytenr + extent_offset,
2936						 prealloc_len, prealloc_flags);
2937			if (ret)
2938				return ret;
2939			extent_offset += prealloc_len;
2940		}
2941
2942		ret = emit_fiemap_extent(fieinfo, cache, delalloc_start, 0,
2943					 delalloc_end + 1 - delalloc_start,
2944					 FIEMAP_EXTENT_DELALLOC |
2945					 FIEMAP_EXTENT_UNKNOWN);
2946		if (ret)
2947			return ret;
2948
2949		last_delalloc_end = delalloc_end;
2950		cur_offset = delalloc_end + 1;
2951		extent_offset += cur_offset - delalloc_start;
2952		cond_resched();
2953	}
 
2954
2955	/*
2956	 * Either we found no delalloc for the whole prealloc extent or we have
2957	 * a prealloc extent that spans i_size or starts at or after i_size.
2958	 */
2959	if (disk_bytenr != 0 && last_delalloc_end < end) {
2960		u64 prealloc_start;
2961		u64 prealloc_len;
2962
2963		if (last_delalloc_end == 0) {
2964			prealloc_start = start;
2965			prealloc_len = end + 1 - start;
2966		} else {
2967			prealloc_start = last_delalloc_end + 1;
2968			prealloc_len = end + 1 - prealloc_start;
2969		}
2970
2971		if (!checked_extent_shared && fieinfo->fi_extents_max) {
2972			ret = btrfs_is_data_extent_shared(inode,
2973							  disk_bytenr,
2974							  extent_gen,
2975							  backref_ctx);
2976			if (ret < 0)
2977				return ret;
2978			else if (ret > 0)
2979				prealloc_flags |= FIEMAP_EXTENT_SHARED;
2980		}
2981		ret = emit_fiemap_extent(fieinfo, cache, prealloc_start,
2982					 disk_bytenr + extent_offset,
2983					 prealloc_len, prealloc_flags);
2984		if (ret)
2985			return ret;
2986	}
2987
2988	return 0;
2989}
2990
2991static int fiemap_find_last_extent_offset(struct btrfs_inode *inode,
2992					  struct btrfs_path *path,
2993					  u64 *last_extent_end_ret)
2994{
2995	const u64 ino = btrfs_ino(inode);
2996	struct btrfs_root *root = inode->root;
2997	struct extent_buffer *leaf;
2998	struct btrfs_file_extent_item *ei;
2999	struct btrfs_key key;
3000	u64 disk_bytenr;
3001	int ret;
3002
3003	/*
3004	 * Lookup the last file extent. We're not using i_size here because
3005	 * there might be preallocation past i_size.
3006	 */
3007	ret = btrfs_lookup_file_extent(NULL, root, path, ino, (u64)-1, 0);
3008	/* There can't be a file extent item at offset (u64)-1 */
3009	ASSERT(ret != 0);
3010	if (ret < 0)
3011		return ret;
3012
3013	/*
3014	 * For a non-existing key, btrfs_search_slot() always leaves us at a
3015	 * slot > 0, except if the btree is empty, which is impossible because
3016	 * at least it has the inode item for this inode and all the items for
3017	 * the root inode 256.
3018	 */
3019	ASSERT(path->slots[0] > 0);
3020	path->slots[0]--;
3021	leaf = path->nodes[0];
3022	btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
3023	if (key.objectid != ino || key.type != BTRFS_EXTENT_DATA_KEY) {
3024		/* No file extent items in the subvolume tree. */
3025		*last_extent_end_ret = 0;
3026		return 0;
3027	}
3028
3029	/*
3030	 * For an inline extent, the disk_bytenr is where inline data starts at,
3031	 * so first check if we have an inline extent item before checking if we
3032	 * have an implicit hole (disk_bytenr == 0).
3033	 */
3034	ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_file_extent_item);
3035	if (btrfs_file_extent_type(leaf, ei) == BTRFS_FILE_EXTENT_INLINE) {
3036		*last_extent_end_ret = btrfs_file_extent_end(path);
3037		return 0;
3038	}
3039
3040	/*
3041	 * Find the last file extent item that is not a hole (when NO_HOLES is
3042	 * not enabled). This should take at most 2 iterations in the worst
3043	 * case: we have one hole file extent item at slot 0 of a leaf and
3044	 * another hole file extent item as the last item in the previous leaf.
3045	 * This is because we merge file extent items that represent holes.
3046	 */
3047	disk_bytenr = btrfs_file_extent_disk_bytenr(leaf, ei);
3048	while (disk_bytenr == 0) {
3049		ret = btrfs_previous_item(root, path, ino, BTRFS_EXTENT_DATA_KEY);
3050		if (ret < 0) {
3051			return ret;
3052		} else if (ret > 0) {
3053			/* No file extent items that are not holes. */
3054			*last_extent_end_ret = 0;
3055			return 0;
3056		}
3057		leaf = path->nodes[0];
3058		ei = btrfs_item_ptr(leaf, path->slots[0],
3059				    struct btrfs_file_extent_item);
3060		disk_bytenr = btrfs_file_extent_disk_bytenr(leaf, ei);
3061	}
3062
3063	*last_extent_end_ret = btrfs_file_extent_end(path);
3064	return 0;
3065}
3066
3067int extent_fiemap(struct btrfs_inode *inode, struct fiemap_extent_info *fieinfo,
3068		  u64 start, u64 len)
3069{
3070	const u64 ino = btrfs_ino(inode);
3071	struct extent_state *cached_state = NULL;
3072	struct extent_state *delalloc_cached_state = NULL;
3073	struct btrfs_path *path;
3074	struct fiemap_cache cache = { 0 };
3075	struct btrfs_backref_share_check_ctx *backref_ctx;
3076	u64 last_extent_end;
3077	u64 prev_extent_end;
3078	u64 range_start;
3079	u64 range_end;
3080	const u64 sectorsize = inode->root->fs_info->sectorsize;
3081	bool stopped = false;
3082	int ret;
3083
3084	cache.entries_size = PAGE_SIZE / sizeof(struct btrfs_fiemap_entry);
3085	cache.entries = kmalloc_array(cache.entries_size,
3086				      sizeof(struct btrfs_fiemap_entry),
3087				      GFP_KERNEL);
3088	backref_ctx = btrfs_alloc_backref_share_check_ctx();
3089	path = btrfs_alloc_path();
3090	if (!cache.entries || !backref_ctx || !path) {
3091		ret = -ENOMEM;
3092		goto out;
3093	}
3094
3095restart:
3096	range_start = round_down(start, sectorsize);
3097	range_end = round_up(start + len, sectorsize);
3098	prev_extent_end = range_start;
3099
3100	lock_extent(&inode->io_tree, range_start, range_end, &cached_state);
3101
3102	ret = fiemap_find_last_extent_offset(inode, path, &last_extent_end);
3103	if (ret < 0)
3104		goto out_unlock;
3105	btrfs_release_path(path);
3106
3107	path->reada = READA_FORWARD;
3108	ret = fiemap_search_slot(inode, path, range_start);
3109	if (ret < 0) {
3110		goto out_unlock;
3111	} else if (ret > 0) {
3112		/*
3113		 * No file extent item found, but we may have delalloc between
3114		 * the current offset and i_size. So check for that.
 
 
3115		 */
3116		ret = 0;
3117		goto check_eof_delalloc;
3118	}
3119
3120	while (prev_extent_end < range_end) {
3121		struct extent_buffer *leaf = path->nodes[0];
3122		struct btrfs_file_extent_item *ei;
3123		struct btrfs_key key;
3124		u64 extent_end;
3125		u64 extent_len;
3126		u64 extent_offset = 0;
3127		u64 extent_gen;
3128		u64 disk_bytenr = 0;
3129		u64 flags = 0;
3130		int extent_type;
3131		u8 compression;
3132
3133		btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
3134		if (key.objectid != ino || key.type != BTRFS_EXTENT_DATA_KEY)
3135			break;
3136
3137		extent_end = btrfs_file_extent_end(path);
3138
3139		/*
3140		 * The first iteration can leave us at an extent item that ends
3141		 * before our range's start. Move to the next item.
3142		 */
3143		if (extent_end <= range_start)
3144			goto next_item;
3145
3146		backref_ctx->curr_leaf_bytenr = leaf->start;
3147
3148		/* We have in implicit hole (NO_HOLES feature enabled). */
3149		if (prev_extent_end < key.offset) {
3150			const u64 hole_end = min(key.offset, range_end) - 1;
3151
3152			ret = fiemap_process_hole(inode, fieinfo, &cache,
3153						  &delalloc_cached_state,
3154						  backref_ctx, 0, 0, 0,
3155						  prev_extent_end, hole_end);
3156			if (ret < 0) {
3157				goto out_unlock;
3158			} else if (ret > 0) {
3159				/* fiemap_fill_next_extent() told us to stop. */
3160				stopped = true;
3161				break;
3162			}
3163
3164			/* We've reached the end of the fiemap range, stop. */
3165			if (key.offset >= range_end) {
3166				stopped = true;
3167				break;
3168			}
3169		}
3170
3171		extent_len = extent_end - key.offset;
3172		ei = btrfs_item_ptr(leaf, path->slots[0],
3173				    struct btrfs_file_extent_item);
3174		compression = btrfs_file_extent_compression(leaf, ei);
3175		extent_type = btrfs_file_extent_type(leaf, ei);
3176		extent_gen = btrfs_file_extent_generation(leaf, ei);
3177
3178		if (extent_type != BTRFS_FILE_EXTENT_INLINE) {
3179			disk_bytenr = btrfs_file_extent_disk_bytenr(leaf, ei);
3180			if (compression == BTRFS_COMPRESS_NONE)
3181				extent_offset = btrfs_file_extent_offset(leaf, ei);
3182		}
3183
3184		if (compression != BTRFS_COMPRESS_NONE)
3185			flags |= FIEMAP_EXTENT_ENCODED;
3186
3187		if (extent_type == BTRFS_FILE_EXTENT_INLINE) {
3188			flags |= FIEMAP_EXTENT_DATA_INLINE;
3189			flags |= FIEMAP_EXTENT_NOT_ALIGNED;
3190			ret = emit_fiemap_extent(fieinfo, &cache, key.offset, 0,
3191						 extent_len, flags);
3192		} else if (extent_type == BTRFS_FILE_EXTENT_PREALLOC) {
3193			ret = fiemap_process_hole(inode, fieinfo, &cache,
3194						  &delalloc_cached_state,
3195						  backref_ctx,
3196						  disk_bytenr, extent_offset,
3197						  extent_gen, key.offset,
3198						  extent_end - 1);
3199		} else if (disk_bytenr == 0) {
3200			/* We have an explicit hole. */
3201			ret = fiemap_process_hole(inode, fieinfo, &cache,
3202						  &delalloc_cached_state,
3203						  backref_ctx, 0, 0, 0,
3204						  key.offset, extent_end - 1);
3205		} else {
3206			/* We have a regular extent. */
3207			if (fieinfo->fi_extents_max) {
3208				ret = btrfs_is_data_extent_shared(inode,
3209								  disk_bytenr,
3210								  extent_gen,
3211								  backref_ctx);
3212				if (ret < 0)
3213					goto out_unlock;
3214				else if (ret > 0)
3215					flags |= FIEMAP_EXTENT_SHARED;
3216			}
3217
3218			ret = emit_fiemap_extent(fieinfo, &cache, key.offset,
3219						 disk_bytenr + extent_offset,
3220						 extent_len, flags);
3221		}
3222
3223		if (ret < 0) {
3224			goto out_unlock;
3225		} else if (ret > 0) {
3226			/* emit_fiemap_extent() told us to stop. */
3227			stopped = true;
3228			break;
3229		}
3230
3231		prev_extent_end = extent_end;
3232next_item:
3233		if (fatal_signal_pending(current)) {
3234			ret = -EINTR;
3235			goto out_unlock;
3236		}
3237
3238		ret = fiemap_next_leaf_item(inode, path);
3239		if (ret < 0) {
3240			goto out_unlock;
3241		} else if (ret > 0) {
3242			/* No more file extent items for this inode. */
3243			break;
3244		}
3245		cond_resched();
3246	}
3247
3248check_eof_delalloc:
3249	if (!stopped && prev_extent_end < range_end) {
3250		ret = fiemap_process_hole(inode, fieinfo, &cache,
3251					  &delalloc_cached_state, backref_ctx,
3252					  0, 0, 0, prev_extent_end, range_end - 1);
3253		if (ret < 0)
3254			goto out_unlock;
3255		prev_extent_end = range_end;
3256	}
3257
3258	if (cache.cached && cache.offset + cache.len >= last_extent_end) {
3259		const u64 i_size = i_size_read(&inode->vfs_inode);
3260
3261		if (prev_extent_end < i_size) {
3262			u64 delalloc_start;
3263			u64 delalloc_end;
3264			bool delalloc;
3265
3266			delalloc = btrfs_find_delalloc_in_range(inode,
3267								prev_extent_end,
3268								i_size - 1,
3269								&delalloc_cached_state,
3270								&delalloc_start,
3271								&delalloc_end);
3272			if (!delalloc)
3273				cache.flags |= FIEMAP_EXTENT_LAST;
3274		} else {
3275			cache.flags |= FIEMAP_EXTENT_LAST;
3276		}
3277	}
3278
3279out_unlock:
3280	unlock_extent(&inode->io_tree, range_start, range_end, &cached_state);
3281
3282	if (ret == BTRFS_FIEMAP_FLUSH_CACHE) {
3283		btrfs_release_path(path);
3284		ret = flush_fiemap_cache(fieinfo, &cache);
3285		if (ret)
3286			goto out;
3287		len -= cache.next_search_offset - start;
3288		start = cache.next_search_offset;
3289		goto restart;
3290	} else if (ret < 0) {
3291		goto out;
3292	}
3293
3294	/*
3295	 * Must free the path before emitting to the fiemap buffer because we
3296	 * may have a non-cloned leaf and if the fiemap buffer is memory mapped
3297	 * to a file, a write into it (through btrfs_page_mkwrite()) may trigger
3298	 * waiting for an ordered extent that in order to complete needs to
3299	 * modify that leaf, therefore leading to a deadlock.
3300	 */
3301	btrfs_free_path(path);
3302	path = NULL;
3303
3304	ret = flush_fiemap_cache(fieinfo, &cache);
3305	if (ret)
3306		goto out;
3307
3308	ret = emit_last_fiemap_cache(fieinfo, &cache);
3309out:
3310	free_extent_state(delalloc_cached_state);
3311	kfree(cache.entries);
3312	btrfs_free_backref_share_ctx(backref_ctx);
3313	btrfs_free_path(path);
3314	return ret;
3315}
3316
3317static void __free_extent_buffer(struct extent_buffer *eb)
 
3318{
3319	kmem_cache_free(extent_buffer_cache, eb);
3320}
3321
3322static int extent_buffer_under_io(const struct extent_buffer *eb)
3323{
3324	return (test_bit(EXTENT_BUFFER_WRITEBACK, &eb->bflags) ||
3325		test_bit(EXTENT_BUFFER_DIRTY, &eb->bflags));
3326}
3327
3328static bool folio_range_has_eb(struct btrfs_fs_info *fs_info, struct folio *folio)
3329{
3330	struct btrfs_subpage *subpage;
3331
3332	lockdep_assert_held(&folio->mapping->i_private_lock);
3333
3334	if (folio_test_private(folio)) {
3335		subpage = folio_get_private(folio);
3336		if (atomic_read(&subpage->eb_refs))
3337			return true;
3338		/*
3339		 * Even there is no eb refs here, we may still have
3340		 * end_page_read() call relying on page::private.
3341		 */
3342		if (atomic_read(&subpage->readers))
3343			return true;
3344	}
3345	return false;
3346}
3347
3348static void detach_extent_buffer_folio(struct extent_buffer *eb, struct folio *folio)
3349{
3350	struct btrfs_fs_info *fs_info = eb->fs_info;
3351	const bool mapped = !test_bit(EXTENT_BUFFER_UNMAPPED, &eb->bflags);
3352
3353	/*
3354	 * For mapped eb, we're going to change the folio private, which should
3355	 * be done under the i_private_lock.
 
3356	 */
3357	if (mapped)
3358		spin_lock(&folio->mapping->i_private_lock);
3359
3360	if (!folio_test_private(folio)) {
3361		if (mapped)
3362			spin_unlock(&folio->mapping->i_private_lock);
3363		return;
3364	}
3365
3366	if (fs_info->nodesize >= PAGE_SIZE) {
3367		/*
3368		 * We do this since we'll remove the pages after we've
3369		 * removed the eb from the radix tree, so we could race
3370		 * and have this page now attached to the new eb.  So
3371		 * only clear folio if it's still connected to
3372		 * this eb.
3373		 */
3374		if (folio_test_private(folio) && folio_get_private(folio) == eb) {
3375			BUG_ON(test_bit(EXTENT_BUFFER_DIRTY, &eb->bflags));
3376			BUG_ON(folio_test_dirty(folio));
3377			BUG_ON(folio_test_writeback(folio));
3378			/* We need to make sure we haven't be attached to a new eb. */
3379			folio_detach_private(folio);
3380		}
3381		if (mapped)
3382			spin_unlock(&folio->mapping->i_private_lock);
3383		return;
3384	}
3385
3386	/*
3387	 * For subpage, we can have dummy eb with folio private attached.  In
3388	 * this case, we can directly detach the private as such folio is only
3389	 * attached to one dummy eb, no sharing.
3390	 */
3391	if (!mapped) {
3392		btrfs_detach_subpage(fs_info, folio);
3393		return;
3394	}
3395
3396	btrfs_folio_dec_eb_refs(fs_info, folio);
3397
3398	/*
3399	 * We can only detach the folio private if there are no other ebs in the
3400	 * page range and no unfinished IO.
3401	 */
3402	if (!folio_range_has_eb(fs_info, folio))
3403		btrfs_detach_subpage(fs_info, folio);
3404
3405	spin_unlock(&folio->mapping->i_private_lock);
3406}
3407
3408/* Release all pages attached to the extent buffer */
3409static void btrfs_release_extent_buffer_pages(struct extent_buffer *eb)
3410{
3411	ASSERT(!extent_buffer_under_io(eb));
3412
3413	for (int i = 0; i < INLINE_EXTENT_BUFFER_PAGES; i++) {
3414		struct folio *folio = eb->folios[i];
3415
3416		if (!folio)
3417			continue;
3418
3419		detach_extent_buffer_folio(eb, folio);
3420
3421		/* One for when we allocated the folio. */
3422		folio_put(folio);
3423	}
3424}
3425
3426/*
3427 * Helper for releasing the extent buffer.
3428 */
3429static inline void btrfs_release_extent_buffer(struct extent_buffer *eb)
3430{
3431	btrfs_release_extent_buffer_pages(eb);
3432	btrfs_leak_debug_del_eb(eb);
3433	__free_extent_buffer(eb);
3434}
3435
3436static struct extent_buffer *
3437__alloc_extent_buffer(struct btrfs_fs_info *fs_info, u64 start,
3438		      unsigned long len)
 
3439{
3440	struct extent_buffer *eb = NULL;
 
 
 
3441
3442	eb = kmem_cache_zalloc(extent_buffer_cache, GFP_NOFS|__GFP_NOFAIL);
 
 
3443	eb->start = start;
3444	eb->len = len;
3445	eb->fs_info = fs_info;
3446	init_rwsem(&eb->lock);
3447
3448	btrfs_leak_debug_add_eb(eb);
3449
3450	spin_lock_init(&eb->refs_lock);
 
 
 
 
 
 
 
 
 
3451	atomic_set(&eb->refs, 1);
3452
3453	ASSERT(len <= BTRFS_MAX_METADATA_BLOCKSIZE);
3454
3455	return eb;
3456}
3457
3458struct extent_buffer *btrfs_clone_extent_buffer(const struct extent_buffer *src)
3459{
3460	struct extent_buffer *new;
3461	int num_folios = num_extent_folios(src);
3462	int ret;
3463
3464	new = __alloc_extent_buffer(src->fs_info, src->start, src->len);
3465	if (new == NULL)
3466		return NULL;
3467
3468	/*
3469	 * Set UNMAPPED before calling btrfs_release_extent_buffer(), as
3470	 * btrfs_release_extent_buffer() have different behavior for
3471	 * UNMAPPED subpage extent buffer.
3472	 */
3473	set_bit(EXTENT_BUFFER_UNMAPPED, &new->bflags);
3474
3475	ret = alloc_eb_folio_array(new, 0);
3476	if (ret) {
3477		btrfs_release_extent_buffer(new);
3478		return NULL;
3479	}
3480
3481	for (int i = 0; i < num_folios; i++) {
3482		struct folio *folio = new->folios[i];
3483		int ret;
3484
3485		ret = attach_extent_buffer_folio(new, folio, NULL);
3486		if (ret < 0) {
3487			btrfs_release_extent_buffer(new);
3488			return NULL;
3489		}
3490		WARN_ON(folio_test_dirty(folio));
3491	}
3492	copy_extent_buffer_full(new, src);
3493	set_extent_buffer_uptodate(new);
3494
3495	return new;
3496}
3497
3498struct extent_buffer *__alloc_dummy_extent_buffer(struct btrfs_fs_info *fs_info,
3499						  u64 start, unsigned long len)
 
 
 
3500{
3501	struct extent_buffer *eb;
3502	int num_folios = 0;
3503	int ret;
3504
3505	eb = __alloc_extent_buffer(fs_info, start, len);
3506	if (!eb)
3507		return NULL;
3508
3509	ret = alloc_eb_folio_array(eb, 0);
3510	if (ret)
3511		goto err;
3512
3513	num_folios = num_extent_folios(eb);
3514	for (int i = 0; i < num_folios; i++) {
3515		ret = attach_extent_buffer_folio(eb, eb->folios[i], NULL);
3516		if (ret < 0)
3517			goto err;
3518	}
3519
3520	set_extent_buffer_uptodate(eb);
3521	btrfs_set_header_nritems(eb, 0);
3522	set_bit(EXTENT_BUFFER_UNMAPPED, &eb->bflags);
3523
3524	return eb;
3525err:
3526	for (int i = 0; i < num_folios; i++) {
3527		if (eb->folios[i]) {
3528			detach_extent_buffer_folio(eb, eb->folios[i]);
3529			__folio_put(eb->folios[i]);
3530		}
3531	}
3532	__free_extent_buffer(eb);
3533	return NULL;
3534}
3535
3536struct extent_buffer *alloc_dummy_extent_buffer(struct btrfs_fs_info *fs_info,
3537						u64 start)
3538{
3539	return __alloc_dummy_extent_buffer(fs_info, start, fs_info->nodesize);
3540}
3541
3542static void check_buffer_tree_ref(struct extent_buffer *eb)
3543{
3544	int refs;
3545	/*
3546	 * The TREE_REF bit is first set when the extent_buffer is added
3547	 * to the radix tree. It is also reset, if unset, when a new reference
3548	 * is created by find_extent_buffer.
3549	 *
3550	 * It is only cleared in two cases: freeing the last non-tree
3551	 * reference to the extent_buffer when its STALE bit is set or
3552	 * calling release_folio when the tree reference is the only reference.
3553	 *
3554	 * In both cases, care is taken to ensure that the extent_buffer's
3555	 * pages are not under io. However, release_folio can be concurrently
3556	 * called with creating new references, which is prone to race
3557	 * conditions between the calls to check_buffer_tree_ref in those
3558	 * codepaths and clearing TREE_REF in try_release_extent_buffer.
3559	 *
3560	 * The actual lifetime of the extent_buffer in the radix tree is
3561	 * adequately protected by the refcount, but the TREE_REF bit and
3562	 * its corresponding reference are not. To protect against this
3563	 * class of races, we call check_buffer_tree_ref from the codepaths
3564	 * which trigger io. Note that once io is initiated, TREE_REF can no
3565	 * longer be cleared, so that is the moment at which any such race is
3566	 * best fixed.
3567	 */
3568	refs = atomic_read(&eb->refs);
3569	if (refs >= 2 && test_bit(EXTENT_BUFFER_TREE_REF, &eb->bflags))
3570		return;
3571
3572	spin_lock(&eb->refs_lock);
3573	if (!test_and_set_bit(EXTENT_BUFFER_TREE_REF, &eb->bflags))
3574		atomic_inc(&eb->refs);
3575	spin_unlock(&eb->refs_lock);
3576}
3577
3578static void mark_extent_buffer_accessed(struct extent_buffer *eb)
3579{
3580	int num_folios= num_extent_folios(eb);
3581
3582	check_buffer_tree_ref(eb);
3583
3584	for (int i = 0; i < num_folios; i++)
3585		folio_mark_accessed(eb->folios[i]);
3586}
3587
3588struct extent_buffer *find_extent_buffer(struct btrfs_fs_info *fs_info,
3589					 u64 start)
3590{
3591	struct extent_buffer *eb;
3592
3593	eb = find_extent_buffer_nolock(fs_info, start);
3594	if (!eb)
3595		return NULL;
3596	/*
3597	 * Lock our eb's refs_lock to avoid races with free_extent_buffer().
3598	 * When we get our eb it might be flagged with EXTENT_BUFFER_STALE and
3599	 * another task running free_extent_buffer() might have seen that flag
3600	 * set, eb->refs == 2, that the buffer isn't under IO (dirty and
3601	 * writeback flags not set) and it's still in the tree (flag
3602	 * EXTENT_BUFFER_TREE_REF set), therefore being in the process of
3603	 * decrementing the extent buffer's reference count twice.  So here we
3604	 * could race and increment the eb's reference count, clear its stale
3605	 * flag, mark it as dirty and drop our reference before the other task
3606	 * finishes executing free_extent_buffer, which would later result in
3607	 * an attempt to free an extent buffer that is dirty.
3608	 */
3609	if (test_bit(EXTENT_BUFFER_STALE, &eb->bflags)) {
3610		spin_lock(&eb->refs_lock);
3611		spin_unlock(&eb->refs_lock);
3612	}
3613	mark_extent_buffer_accessed(eb);
3614	return eb;
3615}
3616
3617#ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS
3618struct extent_buffer *alloc_test_extent_buffer(struct btrfs_fs_info *fs_info,
3619					u64 start)
3620{
3621	struct extent_buffer *eb, *exists = NULL;
3622	int ret;
3623
3624	eb = find_extent_buffer(fs_info, start);
3625	if (eb)
3626		return eb;
3627	eb = alloc_dummy_extent_buffer(fs_info, start);
3628	if (!eb)
3629		return ERR_PTR(-ENOMEM);
3630	eb->fs_info = fs_info;
3631again:
3632	ret = radix_tree_preload(GFP_NOFS);
3633	if (ret) {
3634		exists = ERR_PTR(ret);
3635		goto free_eb;
3636	}
3637	spin_lock(&fs_info->buffer_lock);
3638	ret = radix_tree_insert(&fs_info->buffer_radix,
3639				start >> fs_info->sectorsize_bits, eb);
3640	spin_unlock(&fs_info->buffer_lock);
3641	radix_tree_preload_end();
3642	if (ret == -EEXIST) {
3643		exists = find_extent_buffer(fs_info, start);
3644		if (exists)
3645			goto free_eb;
3646		else
3647			goto again;
3648	}
3649	check_buffer_tree_ref(eb);
3650	set_bit(EXTENT_BUFFER_IN_TREE, &eb->bflags);
3651
3652	return eb;
3653free_eb:
3654	btrfs_release_extent_buffer(eb);
3655	return exists;
3656}
3657#endif
3658
3659static struct extent_buffer *grab_extent_buffer(
3660		struct btrfs_fs_info *fs_info, struct page *page)
3661{
3662	struct folio *folio = page_folio(page);
3663	struct extent_buffer *exists;
3664
3665	/*
3666	 * For subpage case, we completely rely on radix tree to ensure we
3667	 * don't try to insert two ebs for the same bytenr.  So here we always
3668	 * return NULL and just continue.
3669	 */
3670	if (fs_info->nodesize < PAGE_SIZE)
3671		return NULL;
3672
3673	/* Page not yet attached to an extent buffer */
3674	if (!folio_test_private(folio))
3675		return NULL;
3676
3677	/*
3678	 * We could have already allocated an eb for this page and attached one
3679	 * so lets see if we can get a ref on the existing eb, and if we can we
3680	 * know it's good and we can just return that one, else we know we can
3681	 * just overwrite folio private.
3682	 */
3683	exists = folio_get_private(folio);
3684	if (atomic_inc_not_zero(&exists->refs))
3685		return exists;
3686
3687	WARN_ON(PageDirty(page));
3688	folio_detach_private(folio);
3689	return NULL;
3690}
3691
3692static int check_eb_alignment(struct btrfs_fs_info *fs_info, u64 start)
3693{
3694	if (!IS_ALIGNED(start, fs_info->sectorsize)) {
3695		btrfs_err(fs_info, "bad tree block start %llu", start);
3696		return -EINVAL;
3697	}
3698
3699	if (fs_info->nodesize < PAGE_SIZE &&
3700	    offset_in_page(start) + fs_info->nodesize > PAGE_SIZE) {
3701		btrfs_err(fs_info,
3702		"tree block crosses page boundary, start %llu nodesize %u",
3703			  start, fs_info->nodesize);
3704		return -EINVAL;
3705	}
3706	if (fs_info->nodesize >= PAGE_SIZE &&
3707	    !PAGE_ALIGNED(start)) {
3708		btrfs_err(fs_info,
3709		"tree block is not page aligned, start %llu nodesize %u",
3710			  start, fs_info->nodesize);
3711		return -EINVAL;
3712	}
3713	if (!IS_ALIGNED(start, fs_info->nodesize) &&
3714	    !test_and_set_bit(BTRFS_FS_UNALIGNED_TREE_BLOCK, &fs_info->flags)) {
3715		btrfs_warn(fs_info,
3716"tree block not nodesize aligned, start %llu nodesize %u, can be resolved by a full metadata balance",
3717			      start, fs_info->nodesize);
3718	}
3719	return 0;
3720}
3721
3722
3723/*
3724 * Return 0 if eb->folios[i] is attached to btree inode successfully.
3725 * Return >0 if there is already another extent buffer for the range,
3726 * and @found_eb_ret would be updated.
3727 * Return -EAGAIN if the filemap has an existing folio but with different size
3728 * than @eb.
3729 * The caller needs to free the existing folios and retry using the same order.
3730 */
3731static int attach_eb_folio_to_filemap(struct extent_buffer *eb, int i,
3732				      struct extent_buffer **found_eb_ret)
3733{
3734
3735	struct btrfs_fs_info *fs_info = eb->fs_info;
3736	struct address_space *mapping = fs_info->btree_inode->i_mapping;
3737	const unsigned long index = eb->start >> PAGE_SHIFT;
3738	struct folio *existing_folio;
3739	int ret;
3740
3741	ASSERT(found_eb_ret);
3742
3743	/* Caller should ensure the folio exists. */
3744	ASSERT(eb->folios[i]);
3745
3746retry:
3747	ret = filemap_add_folio(mapping, eb->folios[i], index + i,
3748				GFP_NOFS | __GFP_NOFAIL);
3749	if (!ret)
3750		return 0;
3751
3752	existing_folio = filemap_lock_folio(mapping, index + i);
3753	/* The page cache only exists for a very short time, just retry. */
3754	if (IS_ERR(existing_folio))
3755		goto retry;
3756
3757	/* For now, we should only have single-page folios for btree inode. */
3758	ASSERT(folio_nr_pages(existing_folio) == 1);
3759
3760	if (folio_size(existing_folio) != eb->folio_size) {
3761		folio_unlock(existing_folio);
3762		folio_put(existing_folio);
3763		return -EAGAIN;
3764	}
3765
3766	if (fs_info->nodesize < PAGE_SIZE) {
3767		/*
3768		 * We're going to reuse the existing page, can drop our page
3769		 * and subpage structure now.
3770		 */
3771		__free_page(folio_page(eb->folios[i], 0));
3772		eb->folios[i] = existing_folio;
3773	} else {
3774		struct extent_buffer *existing_eb;
3775
3776		existing_eb = grab_extent_buffer(fs_info,
3777						 folio_page(existing_folio, 0));
3778		if (existing_eb) {
3779			/* The extent buffer still exists, we can use it directly. */
3780			*found_eb_ret = existing_eb;
3781			folio_unlock(existing_folio);
3782			folio_put(existing_folio);
3783			return 1;
3784		}
3785		/* The extent buffer no longer exists, we can reuse the folio. */
3786		__free_page(folio_page(eb->folios[i], 0));
3787		eb->folios[i] = existing_folio;
3788	}
3789	return 0;
3790}
3791
3792struct extent_buffer *alloc_extent_buffer(struct btrfs_fs_info *fs_info,
3793					  u64 start, u64 owner_root, int level)
 
3794{
3795	unsigned long len = fs_info->nodesize;
3796	int num_folios;
3797	int attached = 0;
3798	struct extent_buffer *eb;
3799	struct extent_buffer *existing_eb = NULL;
3800	struct address_space *mapping = fs_info->btree_inode->i_mapping;
3801	struct btrfs_subpage *prealloc = NULL;
3802	u64 lockdep_owner = owner_root;
3803	bool page_contig = true;
3804	int uptodate = 1;
3805	int ret;
3806
3807	if (check_eb_alignment(fs_info, start))
3808		return ERR_PTR(-EINVAL);
3809
3810#if BITS_PER_LONG == 32
3811	if (start >= MAX_LFS_FILESIZE) {
3812		btrfs_err_rl(fs_info,
3813		"extent buffer %llu is beyond 32bit page cache limit", start);
3814		btrfs_err_32bit_limit(fs_info);
3815		return ERR_PTR(-EOVERFLOW);
3816	}
3817	if (start >= BTRFS_32BIT_EARLY_WARN_THRESHOLD)
3818		btrfs_warn_32bit_limit(fs_info);
3819#endif
3820
3821	eb = find_extent_buffer(fs_info, start);
3822	if (eb)
3823		return eb;
3824
3825	eb = __alloc_extent_buffer(fs_info, start, len);
3826	if (!eb)
3827		return ERR_PTR(-ENOMEM);
3828
3829	/*
3830	 * The reloc trees are just snapshots, so we need them to appear to be
3831	 * just like any other fs tree WRT lockdep.
3832	 */
3833	if (lockdep_owner == BTRFS_TREE_RELOC_OBJECTID)
3834		lockdep_owner = BTRFS_FS_TREE_OBJECTID;
3835
3836	btrfs_set_buffer_lockdep_class(lockdep_owner, eb, level);
3837
3838	/*
3839	 * Preallocate folio private for subpage case, so that we won't
3840	 * allocate memory with i_private_lock nor page lock hold.
3841	 *
3842	 * The memory will be freed by attach_extent_buffer_page() or freed
3843	 * manually if we exit earlier.
3844	 */
3845	if (fs_info->nodesize < PAGE_SIZE) {
3846		prealloc = btrfs_alloc_subpage(fs_info, BTRFS_SUBPAGE_METADATA);
3847		if (IS_ERR(prealloc)) {
3848			ret = PTR_ERR(prealloc);
3849			goto out;
3850		}
3851	}
3852
3853reallocate:
3854	/* Allocate all pages first. */
3855	ret = alloc_eb_folio_array(eb, __GFP_NOFAIL);
3856	if (ret < 0) {
3857		btrfs_free_subpage(prealloc);
3858		goto out;
3859	}
3860
3861	num_folios = num_extent_folios(eb);
3862	/* Attach all pages to the filemap. */
3863	for (int i = 0; i < num_folios; i++) {
3864		struct folio *folio;
3865
3866		ret = attach_eb_folio_to_filemap(eb, i, &existing_eb);
3867		if (ret > 0) {
3868			ASSERT(existing_eb);
3869			goto out;
3870		}
3871
3872		/*
3873		 * TODO: Special handling for a corner case where the order of
3874		 * folios mismatch between the new eb and filemap.
3875		 *
3876		 * This happens when:
3877		 *
3878		 * - the new eb is using higher order folio
3879		 *
3880		 * - the filemap is still using 0-order folios for the range
3881		 *   This can happen at the previous eb allocation, and we don't
3882		 *   have higher order folio for the call.
3883		 *
3884		 * - the existing eb has already been freed
3885		 *
3886		 * In this case, we have to free the existing folios first, and
3887		 * re-allocate using the same order.
3888		 * Thankfully this is not going to happen yet, as we're still
3889		 * using 0-order folios.
3890		 */
3891		if (unlikely(ret == -EAGAIN)) {
3892			ASSERT(0);
3893			goto reallocate;
3894		}
3895		attached++;
3896
3897		/*
3898		 * Only after attach_eb_folio_to_filemap(), eb->folios[] is
3899		 * reliable, as we may choose to reuse the existing page cache
3900		 * and free the allocated page.
3901		 */
3902		folio = eb->folios[i];
3903		eb->folio_size = folio_size(folio);
3904		eb->folio_shift = folio_shift(folio);
3905		spin_lock(&mapping->i_private_lock);
3906		/* Should not fail, as we have preallocated the memory */
3907		ret = attach_extent_buffer_folio(eb, folio, prealloc);
3908		ASSERT(!ret);
3909		/*
3910		 * To inform we have extra eb under allocation, so that
3911		 * detach_extent_buffer_page() won't release the folio private
3912		 * when the eb hasn't yet been inserted into radix tree.
3913		 *
3914		 * The ref will be decreased when the eb released the page, in
3915		 * detach_extent_buffer_page().
3916		 * Thus needs no special handling in error path.
3917		 */
3918		btrfs_folio_inc_eb_refs(fs_info, folio);
3919		spin_unlock(&mapping->i_private_lock);
3920
3921		WARN_ON(btrfs_folio_test_dirty(fs_info, folio, eb->start, eb->len));
3922
3923		/*
3924		 * Check if the current page is physically contiguous with previous eb
3925		 * page.
3926		 * At this stage, either we allocated a large folio, thus @i
3927		 * would only be 0, or we fall back to per-page allocation.
3928		 */
3929		if (i && folio_page(eb->folios[i - 1], 0) + 1 != folio_page(folio, 0))
3930			page_contig = false;
3931
3932		if (!btrfs_folio_test_uptodate(fs_info, folio, eb->start, eb->len))
3933			uptodate = 0;
3934
3935		/*
3936		 * We can't unlock the pages just yet since the extent buffer
3937		 * hasn't been properly inserted in the radix tree, this
3938		 * opens a race with btree_release_folio which can free a page
3939		 * while we are still filling in all pages for the buffer and
3940		 * we could crash.
3941		 */
 
 
3942	}
3943	if (uptodate)
3944		set_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags);
3945	/* All pages are physically contiguous, can skip cross page handling. */
3946	if (page_contig)
3947		eb->addr = folio_address(eb->folios[0]) + offset_in_page(eb->start);
3948again:
3949	ret = radix_tree_preload(GFP_NOFS);
3950	if (ret)
3951		goto out;
3952
3953	spin_lock(&fs_info->buffer_lock);
3954	ret = radix_tree_insert(&fs_info->buffer_radix,
3955				start >> fs_info->sectorsize_bits, eb);
3956	spin_unlock(&fs_info->buffer_lock);
3957	radix_tree_preload_end();
3958	if (ret == -EEXIST) {
3959		ret = 0;
3960		existing_eb = find_extent_buffer(fs_info, start);
3961		if (existing_eb)
3962			goto out;
3963		else
3964			goto again;
 
3965	}
3966	/* add one reference for the tree */
3967	check_buffer_tree_ref(eb);
3968	set_bit(EXTENT_BUFFER_IN_TREE, &eb->bflags);
 
3969
3970	/*
3971	 * Now it's safe to unlock the pages because any calls to
3972	 * btree_release_folio will correctly detect that a page belongs to a
3973	 * live buffer and won't free them prematurely.
3974	 */
3975	for (int i = 0; i < num_folios; i++)
3976		unlock_page(folio_page(eb->folios[i], 0));
 
 
 
 
 
 
3977	return eb;
3978
3979out:
3980	WARN_ON(!atomic_dec_and_test(&eb->refs));
3981
3982	/*
3983	 * Any attached folios need to be detached before we unlock them.  This
3984	 * is because when we're inserting our new folios into the mapping, and
3985	 * then attaching our eb to that folio.  If we fail to insert our folio
3986	 * we'll lookup the folio for that index, and grab that EB.  We do not
3987	 * want that to grab this eb, as we're getting ready to free it.  So we
3988	 * have to detach it first and then unlock it.
3989	 *
3990	 * We have to drop our reference and NULL it out here because in the
3991	 * subpage case detaching does a btrfs_folio_dec_eb_refs() for our eb.
3992	 * Below when we call btrfs_release_extent_buffer() we will call
3993	 * detach_extent_buffer_folio() on our remaining pages in the !subpage
3994	 * case.  If we left eb->folios[i] populated in the subpage case we'd
3995	 * double put our reference and be super sad.
3996	 */
3997	for (int i = 0; i < attached; i++) {
3998		ASSERT(eb->folios[i]);
3999		detach_extent_buffer_folio(eb, eb->folios[i]);
4000		unlock_page(folio_page(eb->folios[i], 0));
4001		folio_put(eb->folios[i]);
4002		eb->folios[i] = NULL;
4003	}
4004	/*
4005	 * Now all pages of that extent buffer is unmapped, set UNMAPPED flag,
4006	 * so it can be cleaned up without utlizing page->mapping.
4007	 */
4008	set_bit(EXTENT_BUFFER_UNMAPPED, &eb->bflags);
4009
 
 
4010	btrfs_release_extent_buffer(eb);
4011	if (ret < 0)
4012		return ERR_PTR(ret);
4013	ASSERT(existing_eb);
4014	return existing_eb;
4015}
4016
4017static inline void btrfs_release_extent_buffer_rcu(struct rcu_head *head)
 
4018{
4019	struct extent_buffer *eb =
4020			container_of(head, struct extent_buffer, rcu_head);
4021
4022	__free_extent_buffer(eb);
4023}
4024
4025static int release_extent_buffer(struct extent_buffer *eb)
4026	__releases(&eb->refs_lock)
4027{
4028	lockdep_assert_held(&eb->refs_lock);
4029
4030	WARN_ON(atomic_read(&eb->refs) == 0);
4031	if (atomic_dec_and_test(&eb->refs)) {
4032		if (test_and_clear_bit(EXTENT_BUFFER_IN_TREE, &eb->bflags)) {
4033			struct btrfs_fs_info *fs_info = eb->fs_info;
4034
4035			spin_unlock(&eb->refs_lock);
4036
4037			spin_lock(&fs_info->buffer_lock);
4038			radix_tree_delete(&fs_info->buffer_radix,
4039					  eb->start >> fs_info->sectorsize_bits);
4040			spin_unlock(&fs_info->buffer_lock);
4041		} else {
4042			spin_unlock(&eb->refs_lock);
4043		}
4044
4045		btrfs_leak_debug_del_eb(eb);
4046		/* Should be safe to release our pages at this point */
4047		btrfs_release_extent_buffer_pages(eb);
4048#ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS
4049		if (unlikely(test_bit(EXTENT_BUFFER_UNMAPPED, &eb->bflags))) {
4050			__free_extent_buffer(eb);
4051			return 1;
4052		}
4053#endif
4054		call_rcu(&eb->rcu_head, btrfs_release_extent_buffer_rcu);
4055		return 1;
4056	}
4057	spin_unlock(&eb->refs_lock);
4058
4059	return 0;
4060}
4061
4062void free_extent_buffer(struct extent_buffer *eb)
4063{
4064	int refs;
4065	if (!eb)
4066		return;
4067
4068	refs = atomic_read(&eb->refs);
4069	while (1) {
4070		if ((!test_bit(EXTENT_BUFFER_UNMAPPED, &eb->bflags) && refs <= 3)
4071		    || (test_bit(EXTENT_BUFFER_UNMAPPED, &eb->bflags) &&
4072			refs == 1))
4073			break;
4074		if (atomic_try_cmpxchg(&eb->refs, &refs, refs - 1))
4075			return;
4076	}
4077
4078	spin_lock(&eb->refs_lock);
4079	if (atomic_read(&eb->refs) == 2 &&
4080	    test_bit(EXTENT_BUFFER_STALE, &eb->bflags) &&
4081	    !extent_buffer_under_io(eb) &&
4082	    test_and_clear_bit(EXTENT_BUFFER_TREE_REF, &eb->bflags))
4083		atomic_dec(&eb->refs);
4084
4085	/*
4086	 * I know this is terrible, but it's temporary until we stop tracking
4087	 * the uptodate bits and such for the extent buffers.
4088	 */
4089	release_extent_buffer(eb);
4090}
4091
4092void free_extent_buffer_stale(struct extent_buffer *eb)
4093{
4094	if (!eb)
4095		return;
4096
4097	spin_lock(&eb->refs_lock);
4098	set_bit(EXTENT_BUFFER_STALE, &eb->bflags);
4099
4100	if (atomic_read(&eb->refs) == 2 && !extent_buffer_under_io(eb) &&
4101	    test_and_clear_bit(EXTENT_BUFFER_TREE_REF, &eb->bflags))
4102		atomic_dec(&eb->refs);
4103	release_extent_buffer(eb);
4104}
4105
4106static void btree_clear_folio_dirty(struct folio *folio)
4107{
4108	ASSERT(folio_test_dirty(folio));
4109	ASSERT(folio_test_locked(folio));
4110	folio_clear_dirty_for_io(folio);
4111	xa_lock_irq(&folio->mapping->i_pages);
4112	if (!folio_test_dirty(folio))
4113		__xa_clear_mark(&folio->mapping->i_pages,
4114				folio_index(folio), PAGECACHE_TAG_DIRTY);
4115	xa_unlock_irq(&folio->mapping->i_pages);
4116}
4117
4118static void clear_subpage_extent_buffer_dirty(const struct extent_buffer *eb)
4119{
4120	struct btrfs_fs_info *fs_info = eb->fs_info;
4121	struct folio *folio = eb->folios[0];
4122	bool last;
4123
4124	/* btree_clear_folio_dirty() needs page locked. */
4125	folio_lock(folio);
4126	last = btrfs_subpage_clear_and_test_dirty(fs_info, folio, eb->start, eb->len);
4127	if (last)
4128		btree_clear_folio_dirty(folio);
4129	folio_unlock(folio);
4130	WARN_ON(atomic_read(&eb->refs) == 0);
4131}
4132
4133void btrfs_clear_buffer_dirty(struct btrfs_trans_handle *trans,
4134			      struct extent_buffer *eb)
4135{
4136	struct btrfs_fs_info *fs_info = eb->fs_info;
4137	int num_folios;
 
4138
4139	btrfs_assert_tree_write_locked(eb);
4140
4141	if (trans && btrfs_header_generation(eb) != trans->transid)
4142		return;
 
 
4143
4144	/*
4145	 * Instead of clearing the dirty flag off of the buffer, mark it as
4146	 * EXTENT_BUFFER_ZONED_ZEROOUT. This allows us to preserve
4147	 * write-ordering in zoned mode, without the need to later re-dirty
4148	 * the extent_buffer.
4149	 *
4150	 * The actual zeroout of the buffer will happen later in
4151	 * btree_csum_one_bio.
4152	 */
4153	if (btrfs_is_zoned(fs_info) && test_bit(EXTENT_BUFFER_DIRTY, &eb->bflags)) {
4154		set_bit(EXTENT_BUFFER_ZONED_ZEROOUT, &eb->bflags);
4155		return;
4156	}
4157
4158	if (!test_and_clear_bit(EXTENT_BUFFER_DIRTY, &eb->bflags))
4159		return;
 
4160
4161	percpu_counter_add_batch(&fs_info->dirty_metadata_bytes, -eb->len,
4162				 fs_info->dirty_metadata_batch);
4163
4164	if (eb->fs_info->nodesize < PAGE_SIZE)
4165		return clear_subpage_extent_buffer_dirty(eb);
4166
4167	num_folios = num_extent_folios(eb);
4168	for (int i = 0; i < num_folios; i++) {
4169		struct folio *folio = eb->folios[i];
4170
4171		if (!folio_test_dirty(folio))
4172			continue;
4173		folio_lock(folio);
4174		btree_clear_folio_dirty(folio);
4175		folio_unlock(folio);
4176	}
4177	WARN_ON(atomic_read(&eb->refs) == 0);
4178}
4179
4180void set_extent_buffer_dirty(struct extent_buffer *eb)
 
4181{
4182	int num_folios;
4183	bool was_dirty;
4184
4185	check_buffer_tree_ref(eb);
4186
4187	was_dirty = test_and_set_bit(EXTENT_BUFFER_DIRTY, &eb->bflags);
 
 
 
 
 
4188
4189	num_folios = num_extent_folios(eb);
4190	WARN_ON(atomic_read(&eb->refs) == 0);
4191	WARN_ON(!test_bit(EXTENT_BUFFER_TREE_REF, &eb->bflags));
4192	WARN_ON(test_bit(EXTENT_BUFFER_ZONED_ZEROOUT, &eb->bflags));
 
 
 
 
 
 
4193
4194	if (!was_dirty) {
4195		bool subpage = eb->fs_info->nodesize < PAGE_SIZE;
4196
4197		/*
4198		 * For subpage case, we can have other extent buffers in the
4199		 * same page, and in clear_subpage_extent_buffer_dirty() we
4200		 * have to clear page dirty without subpage lock held.
4201		 * This can cause race where our page gets dirty cleared after
4202		 * we just set it.
4203		 *
4204		 * Thankfully, clear_subpage_extent_buffer_dirty() has locked
4205		 * its page for other reasons, we can use page lock to prevent
4206		 * the above race.
4207		 */
4208		if (subpage)
4209			lock_page(folio_page(eb->folios[0], 0));
4210		for (int i = 0; i < num_folios; i++)
4211			btrfs_folio_set_dirty(eb->fs_info, eb->folios[i],
4212					      eb->start, eb->len);
4213		if (subpage)
4214			unlock_page(folio_page(eb->folios[0], 0));
4215		percpu_counter_add_batch(&eb->fs_info->dirty_metadata_bytes,
4216					 eb->len,
4217					 eb->fs_info->dirty_metadata_batch);
4218	}
4219#ifdef CONFIG_BTRFS_DEBUG
4220	for (int i = 0; i < num_folios; i++)
4221		ASSERT(folio_test_dirty(eb->folios[i]));
4222#endif
4223}
4224
4225void clear_extent_buffer_uptodate(struct extent_buffer *eb)
 
 
4226{
4227	struct btrfs_fs_info *fs_info = eb->fs_info;
4228	int num_folios = num_extent_folios(eb);
 
4229
 
4230	clear_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags);
4231	for (int i = 0; i < num_folios; i++) {
4232		struct folio *folio = eb->folios[i];
4233
4234		if (!folio)
4235			continue;
4236
4237		/*
4238		 * This is special handling for metadata subpage, as regular
4239		 * btrfs_is_subpage() can not handle cloned/dummy metadata.
4240		 */
4241		if (fs_info->nodesize >= PAGE_SIZE)
4242			folio_clear_uptodate(folio);
4243		else
4244			btrfs_subpage_clear_uptodate(fs_info, folio,
4245						     eb->start, eb->len);
4246	}
 
4247}
4248
4249void set_extent_buffer_uptodate(struct extent_buffer *eb)
 
4250{
4251	struct btrfs_fs_info *fs_info = eb->fs_info;
4252	int num_folios = num_extent_folios(eb);
 
4253
4254	set_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags);
4255	for (int i = 0; i < num_folios; i++) {
4256		struct folio *folio = eb->folios[i];
4257
4258		/*
4259		 * This is special handling for metadata subpage, as regular
4260		 * btrfs_is_subpage() can not handle cloned/dummy metadata.
4261		 */
4262		if (fs_info->nodesize >= PAGE_SIZE)
4263			folio_mark_uptodate(folio);
4264		else
4265			btrfs_subpage_set_uptodate(fs_info, folio,
4266						   eb->start, eb->len);
 
 
 
 
4267	}
 
4268}
4269
4270static void end_bbio_meta_read(struct btrfs_bio *bbio)
 
4271{
4272	struct extent_buffer *eb = bbio->private;
4273	struct btrfs_fs_info *fs_info = eb->fs_info;
4274	bool uptodate = !bbio->bio.bi_status;
4275	struct folio_iter fi;
4276	u32 bio_offset = 0;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4277
4278	eb->read_mirror = bbio->mirror_num;
 
 
 
 
 
 
 
 
4279
4280	if (uptodate &&
4281	    btrfs_validate_extent_buffer(eb, &bbio->parent_check) < 0)
4282		uptodate = false;
4283
4284	if (uptodate) {
4285		set_extent_buffer_uptodate(eb);
4286	} else {
4287		clear_extent_buffer_uptodate(eb);
4288		set_bit(EXTENT_BUFFER_READ_ERR, &eb->bflags);
4289	}
4290
4291	bio_for_each_folio_all(fi, &bbio->bio) {
4292		struct folio *folio = fi.folio;
4293		u64 start = eb->start + bio_offset;
4294		u32 len = fi.length;
4295
4296		if (uptodate)
4297			btrfs_folio_set_uptodate(fs_info, folio, start, len);
4298		else
4299			btrfs_folio_clear_uptodate(fs_info, folio, start, len);
4300
4301		bio_offset += len;
4302	}
4303
4304	clear_bit(EXTENT_BUFFER_READING, &eb->bflags);
4305	smp_mb__after_atomic();
4306	wake_up_bit(&eb->bflags, EXTENT_BUFFER_READING);
4307	free_extent_buffer(eb);
4308
4309	bio_put(&bbio->bio);
4310}
4311
4312int read_extent_buffer_pages(struct extent_buffer *eb, int wait, int mirror_num,
4313			     struct btrfs_tree_parent_check *check)
 
 
4314{
4315	struct btrfs_bio *bbio;
4316	bool ret;
 
 
 
 
 
 
 
 
 
4317
4318	if (test_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags))
4319		return 0;
4320
4321	/*
4322	 * We could have had EXTENT_BUFFER_UPTODATE cleared by the write
4323	 * operation, which could potentially still be in flight.  In this case
4324	 * we simply want to return an error.
4325	 */
4326	if (unlikely(test_bit(EXTENT_BUFFER_WRITE_ERR, &eb->bflags)))
4327		return -EIO;
4328
4329	/* Someone else is already reading the buffer, just wait for it. */
4330	if (test_and_set_bit(EXTENT_BUFFER_READING, &eb->bflags))
4331		goto done;
4332
4333	/*
4334	 * Between the initial test_bit(EXTENT_BUFFER_UPTODATE) and the above
4335	 * test_and_set_bit(EXTENT_BUFFER_READING), someone else could have
4336	 * started and finished reading the same eb.  In this case, UPTODATE
4337	 * will now be set, and we shouldn't read it in again.
4338	 */
4339	if (unlikely(test_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags))) {
4340		clear_bit(EXTENT_BUFFER_READING, &eb->bflags);
4341		smp_mb__after_atomic();
4342		wake_up_bit(&eb->bflags, EXTENT_BUFFER_READING);
4343		return 0;
4344	}
4345
4346	clear_bit(EXTENT_BUFFER_READ_ERR, &eb->bflags);
4347	eb->read_mirror = 0;
4348	check_buffer_tree_ref(eb);
4349	atomic_inc(&eb->refs);
4350
4351	bbio = btrfs_bio_alloc(INLINE_EXTENT_BUFFER_PAGES,
4352			       REQ_OP_READ | REQ_META, eb->fs_info,
4353			       end_bbio_meta_read, eb);
4354	bbio->bio.bi_iter.bi_sector = eb->start >> SECTOR_SHIFT;
4355	bbio->inode = BTRFS_I(eb->fs_info->btree_inode);
4356	bbio->file_offset = eb->start;
4357	memcpy(&bbio->parent_check, check, sizeof(*check));
4358	if (eb->fs_info->nodesize < PAGE_SIZE) {
4359		ret = bio_add_folio(&bbio->bio, eb->folios[0], eb->len,
4360				    eb->start - folio_pos(eb->folios[0]));
4361		ASSERT(ret);
4362	} else {
4363		int num_folios = num_extent_folios(eb);
 
4364
4365		for (int i = 0; i < num_folios; i++) {
4366			struct folio *folio = eb->folios[i];
4367
4368			ret = bio_add_folio(&bbio->bio, folio, eb->folio_size, 0);
4369			ASSERT(ret);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4370		}
4371	}
4372	btrfs_submit_bio(bbio, mirror_num);
4373
4374done:
4375	if (wait == WAIT_COMPLETE) {
4376		wait_on_bit_io(&eb->bflags, EXTENT_BUFFER_READING, TASK_UNINTERRUPTIBLE);
4377		if (!test_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags))
4378			return -EIO;
4379	}
4380
4381	return 0;
4382}
4383
4384static bool report_eb_range(const struct extent_buffer *eb, unsigned long start,
4385			    unsigned long len)
4386{
4387	btrfs_warn(eb->fs_info,
4388		"access to eb bytenr %llu len %u out of range start %lu len %lu",
4389		eb->start, eb->len, start, len);
4390	WARN_ON(IS_ENABLED(CONFIG_BTRFS_DEBUG));
4391
4392	return true;
4393}
 
4394
4395/*
4396 * Check if the [start, start + len) range is valid before reading/writing
4397 * the eb.
4398 * NOTE: @start and @len are offset inside the eb, not logical address.
4399 *
4400 * Caller should not touch the dst/src memory if this function returns error.
4401 */
4402static inline int check_eb_range(const struct extent_buffer *eb,
4403				 unsigned long start, unsigned long len)
4404{
4405	unsigned long offset;
4406
4407	/* start, start + len should not go beyond eb->len nor overflow */
4408	if (unlikely(check_add_overflow(start, len, &offset) || offset > eb->len))
4409		return report_eb_range(eb, start, len);
4410
4411	return false;
4412}
4413
4414void read_extent_buffer(const struct extent_buffer *eb, void *dstv,
4415			unsigned long start, unsigned long len)
 
4416{
4417	const int unit_size = eb->folio_size;
4418	size_t cur;
4419	size_t offset;
 
 
4420	char *dst = (char *)dstv;
4421	unsigned long i = get_eb_folio_index(eb, start);
 
4422
4423	if (check_eb_range(eb, start, len)) {
4424		/*
4425		 * Invalid range hit, reset the memory, so callers won't get
4426		 * some random garbage for their uninitialized memory.
4427		 */
4428		memset(dstv, 0, len);
4429		return;
4430	}
4431
4432	if (eb->addr) {
4433		memcpy(dstv, eb->addr + start, len);
4434		return;
4435	}
4436
4437	offset = get_eb_offset_in_folio(eb, start);
4438
4439	while (len > 0) {
4440		char *kaddr;
4441
4442		cur = min(len, unit_size - offset);
4443		kaddr = folio_address(eb->folios[i]);
4444		memcpy(dst, kaddr + offset, cur);
4445
4446		dst += cur;
4447		len -= cur;
4448		offset = 0;
4449		i++;
4450	}
4451}
4452
4453int read_extent_buffer_to_user_nofault(const struct extent_buffer *eb,
4454				       void __user *dstv,
4455				       unsigned long start, unsigned long len)
 
4456{
4457	const int unit_size = eb->folio_size;
4458	size_t cur;
4459	size_t offset;
4460	char __user *dst = (char __user *)dstv;
4461	unsigned long i = get_eb_folio_index(eb, start);
4462	int ret = 0;
 
4463
4464	WARN_ON(start > eb->len);
4465	WARN_ON(start + len > eb->start + eb->len);
4466
4467	if (eb->addr) {
4468		if (copy_to_user_nofault(dstv, eb->addr + start, len))
4469			ret = -EFAULT;
4470		return ret;
 
 
4471	}
4472
4473	offset = get_eb_offset_in_folio(eb, start);
4474
4475	while (len > 0) {
4476		char *kaddr;
4477
4478		cur = min(len, unit_size - offset);
4479		kaddr = folio_address(eb->folios[i]);
4480		if (copy_to_user_nofault(dst, kaddr + offset, cur)) {
4481			ret = -EFAULT;
4482			break;
4483		}
4484
4485		dst += cur;
4486		len -= cur;
4487		offset = 0;
4488		i++;
4489	}
4490
4491	return ret;
 
 
 
 
4492}
4493
4494int memcmp_extent_buffer(const struct extent_buffer *eb, const void *ptrv,
4495			 unsigned long start, unsigned long len)
 
4496{
4497	const int unit_size = eb->folio_size;
4498	size_t cur;
4499	size_t offset;
 
4500	char *kaddr;
4501	char *ptr = (char *)ptrv;
4502	unsigned long i = get_eb_folio_index(eb, start);
 
4503	int ret = 0;
4504
4505	if (check_eb_range(eb, start, len))
4506		return -EINVAL;
 
 
4507
4508	if (eb->addr)
4509		return memcmp(ptrv, eb->addr + start, len);
4510
4511	offset = get_eb_offset_in_folio(eb, start);
4512
4513	while (len > 0) {
4514		cur = min(len, unit_size - offset);
4515		kaddr = folio_address(eb->folios[i]);
4516		ret = memcmp(ptr, kaddr + offset, cur);
4517		if (ret)
4518			break;
4519
4520		ptr += cur;
4521		len -= cur;
4522		offset = 0;
4523		i++;
4524	}
4525	return ret;
4526}
4527
4528/*
4529 * Check that the extent buffer is uptodate.
4530 *
4531 * For regular sector size == PAGE_SIZE case, check if @page is uptodate.
4532 * For subpage case, check if the range covered by the eb has EXTENT_UPTODATE.
4533 */
4534static void assert_eb_folio_uptodate(const struct extent_buffer *eb, int i)
4535{
4536	struct btrfs_fs_info *fs_info = eb->fs_info;
4537	struct folio *folio = eb->folios[i];
4538
4539	ASSERT(folio);
4540
4541	/*
4542	 * If we are using the commit root we could potentially clear a page
4543	 * Uptodate while we're using the extent buffer that we've previously
4544	 * looked up.  We don't want to complain in this case, as the page was
4545	 * valid before, we just didn't write it out.  Instead we want to catch
4546	 * the case where we didn't actually read the block properly, which
4547	 * would have !PageUptodate and !EXTENT_BUFFER_WRITE_ERR.
4548	 */
4549	if (test_bit(EXTENT_BUFFER_WRITE_ERR, &eb->bflags))
4550		return;
4551
4552	if (fs_info->nodesize < PAGE_SIZE) {
4553		struct folio *folio = eb->folios[0];
4554
4555		ASSERT(i == 0);
4556		if (WARN_ON(!btrfs_subpage_test_uptodate(fs_info, folio,
4557							 eb->start, eb->len)))
4558			btrfs_subpage_dump_bitmap(fs_info, folio, eb->start, eb->len);
4559	} else {
4560		WARN_ON(!folio_test_uptodate(folio));
4561	}
4562}
4563
4564static void __write_extent_buffer(const struct extent_buffer *eb,
4565				  const void *srcv, unsigned long start,
4566				  unsigned long len, bool use_memmove)
4567{
4568	const int unit_size = eb->folio_size;
4569	size_t cur;
4570	size_t offset;
 
4571	char *kaddr;
4572	char *src = (char *)srcv;
4573	unsigned long i = get_eb_folio_index(eb, start);
4574	/* For unmapped (dummy) ebs, no need to check their uptodate status. */
4575	const bool check_uptodate = !test_bit(EXTENT_BUFFER_UNMAPPED, &eb->bflags);
4576
4577	if (check_eb_range(eb, start, len))
4578		return;
4579
4580	if (eb->addr) {
4581		if (use_memmove)
4582			memmove(eb->addr + start, srcv, len);
4583		else
4584			memcpy(eb->addr + start, srcv, len);
4585		return;
4586	}
4587
4588	offset = get_eb_offset_in_folio(eb, start);
4589
4590	while (len > 0) {
4591		if (check_uptodate)
4592			assert_eb_folio_uptodate(eb, i);
4593
4594		cur = min(len, unit_size - offset);
4595		kaddr = folio_address(eb->folios[i]);
4596		if (use_memmove)
4597			memmove(kaddr + offset, src, cur);
4598		else
4599			memcpy(kaddr + offset, src, cur);
4600
4601		src += cur;
4602		len -= cur;
4603		offset = 0;
4604		i++;
4605	}
4606}
4607
4608void write_extent_buffer(const struct extent_buffer *eb, const void *srcv,
4609			 unsigned long start, unsigned long len)
4610{
4611	return __write_extent_buffer(eb, srcv, start, len, false);
4612}
 
 
 
 
4613
4614static void memset_extent_buffer(const struct extent_buffer *eb, int c,
4615				 unsigned long start, unsigned long len)
4616{
4617	const int unit_size = eb->folio_size;
4618	unsigned long cur = start;
4619
4620	if (eb->addr) {
4621		memset(eb->addr + start, c, len);
4622		return;
4623	}
4624
4625	while (cur < start + len) {
4626		unsigned long index = get_eb_folio_index(eb, cur);
4627		unsigned int offset = get_eb_offset_in_folio(eb, cur);
4628		unsigned int cur_len = min(start + len - cur, unit_size - offset);
4629
4630		assert_eb_folio_uptodate(eb, index);
4631		memset(folio_address(eb->folios[index]) + offset, c, cur_len);
 
4632
4633		cur += cur_len;
 
 
4634	}
4635}
4636
4637void memzero_extent_buffer(const struct extent_buffer *eb, unsigned long start,
4638			   unsigned long len)
4639{
4640	if (check_eb_range(eb, start, len))
4641		return;
4642	return memset_extent_buffer(eb, 0, start, len);
4643}
4644
4645void copy_extent_buffer_full(const struct extent_buffer *dst,
4646			     const struct extent_buffer *src)
4647{
4648	const int unit_size = src->folio_size;
4649	unsigned long cur = 0;
4650
4651	ASSERT(dst->len == src->len);
4652
4653	while (cur < src->len) {
4654		unsigned long index = get_eb_folio_index(src, cur);
4655		unsigned long offset = get_eb_offset_in_folio(src, cur);
4656		unsigned long cur_len = min(src->len, unit_size - offset);
4657		void *addr = folio_address(src->folios[index]) + offset;
4658
4659		write_extent_buffer(dst, addr, cur, cur_len);
4660
4661		cur += cur_len;
4662	}
4663}
4664
4665void copy_extent_buffer(const struct extent_buffer *dst,
4666			const struct extent_buffer *src,
4667			unsigned long dst_offset, unsigned long src_offset,
4668			unsigned long len)
4669{
4670	const int unit_size = dst->folio_size;
4671	u64 dst_len = dst->len;
4672	size_t cur;
4673	size_t offset;
 
4674	char *kaddr;
4675	unsigned long i = get_eb_folio_index(dst, dst_offset);
4676
4677	if (check_eb_range(dst, dst_offset, len) ||
4678	    check_eb_range(src, src_offset, len))
4679		return;
4680
4681	WARN_ON(src->len != dst_len);
4682
4683	offset = get_eb_offset_in_folio(dst, dst_offset);
 
4684
4685	while (len > 0) {
4686		assert_eb_folio_uptodate(dst, i);
 
4687
4688		cur = min(len, (unsigned long)(unit_size - offset));
4689
4690		kaddr = folio_address(dst->folios[i]);
4691		read_extent_buffer(src, kaddr + offset, src_offset, cur);
4692
4693		src_offset += cur;
4694		len -= cur;
4695		offset = 0;
4696		i++;
4697	}
4698}
4699
4700/*
4701 * Calculate the folio and offset of the byte containing the given bit number.
4702 *
4703 * @eb:           the extent buffer
4704 * @start:        offset of the bitmap item in the extent buffer
4705 * @nr:           bit number
4706 * @folio_index:  return index of the folio in the extent buffer that contains
4707 *                the given bit number
4708 * @folio_offset: return offset into the folio given by folio_index
4709 *
4710 * This helper hides the ugliness of finding the byte in an extent buffer which
4711 * contains a given bit.
4712 */
4713static inline void eb_bitmap_offset(const struct extent_buffer *eb,
4714				    unsigned long start, unsigned long nr,
4715				    unsigned long *folio_index,
4716				    size_t *folio_offset)
4717{
4718	size_t byte_offset = BIT_BYTE(nr);
4719	size_t offset;
4720
4721	/*
4722	 * The byte we want is the offset of the extent buffer + the offset of
4723	 * the bitmap item in the extent buffer + the offset of the byte in the
4724	 * bitmap item.
4725	 */
4726	offset = start + offset_in_eb_folio(eb, eb->start) + byte_offset;
4727
4728	*folio_index = offset >> eb->folio_shift;
4729	*folio_offset = offset_in_eb_folio(eb, offset);
4730}
4731
4732/*
4733 * Determine whether a bit in a bitmap item is set.
4734 *
4735 * @eb:     the extent buffer
4736 * @start:  offset of the bitmap item in the extent buffer
4737 * @nr:     bit number to test
4738 */
4739int extent_buffer_test_bit(const struct extent_buffer *eb, unsigned long start,
4740			   unsigned long nr)
4741{
4742	unsigned long i;
4743	size_t offset;
4744	u8 *kaddr;
4745
4746	eb_bitmap_offset(eb, start, nr, &i, &offset);
4747	assert_eb_folio_uptodate(eb, i);
4748	kaddr = folio_address(eb->folios[i]);
4749	return 1U & (kaddr[offset] >> (nr & (BITS_PER_BYTE - 1)));
4750}
4751
4752static u8 *extent_buffer_get_byte(const struct extent_buffer *eb, unsigned long bytenr)
4753{
4754	unsigned long index = get_eb_folio_index(eb, bytenr);
4755
4756	if (check_eb_range(eb, bytenr, 1))
4757		return NULL;
4758	return folio_address(eb->folios[index]) + get_eb_offset_in_folio(eb, bytenr);
4759}
4760
4761/*
4762 * Set an area of a bitmap to 1.
4763 *
4764 * @eb:     the extent buffer
4765 * @start:  offset of the bitmap item in the extent buffer
4766 * @pos:    bit number of the first bit
4767 * @len:    number of bits to set
4768 */
4769void extent_buffer_bitmap_set(const struct extent_buffer *eb, unsigned long start,
4770			      unsigned long pos, unsigned long len)
4771{
4772	unsigned int first_byte = start + BIT_BYTE(pos);
4773	unsigned int last_byte = start + BIT_BYTE(pos + len - 1);
4774	const bool same_byte = (first_byte == last_byte);
4775	u8 mask = BITMAP_FIRST_BYTE_MASK(pos);
4776	u8 *kaddr;
4777
4778	if (same_byte)
4779		mask &= BITMAP_LAST_BYTE_MASK(pos + len);
4780
4781	/* Handle the first byte. */
4782	kaddr = extent_buffer_get_byte(eb, first_byte);
4783	*kaddr |= mask;
4784	if (same_byte)
4785		return;
4786
4787	/* Handle the byte aligned part. */
4788	ASSERT(first_byte + 1 <= last_byte);
4789	memset_extent_buffer(eb, 0xff, first_byte + 1, last_byte - first_byte - 1);
4790
4791	/* Handle the last byte. */
4792	kaddr = extent_buffer_get_byte(eb, last_byte);
4793	*kaddr |= BITMAP_LAST_BYTE_MASK(pos + len);
4794}
4795
4796
4797/*
4798 * Clear an area of a bitmap.
4799 *
4800 * @eb:     the extent buffer
4801 * @start:  offset of the bitmap item in the extent buffer
4802 * @pos:    bit number of the first bit
4803 * @len:    number of bits to clear
4804 */
4805void extent_buffer_bitmap_clear(const struct extent_buffer *eb,
4806				unsigned long start, unsigned long pos,
4807				unsigned long len)
4808{
4809	unsigned int first_byte = start + BIT_BYTE(pos);
4810	unsigned int last_byte = start + BIT_BYTE(pos + len - 1);
4811	const bool same_byte = (first_byte == last_byte);
4812	u8 mask = BITMAP_FIRST_BYTE_MASK(pos);
4813	u8 *kaddr;
4814
4815	if (same_byte)
4816		mask &= BITMAP_LAST_BYTE_MASK(pos + len);
4817
4818	/* Handle the first byte. */
4819	kaddr = extent_buffer_get_byte(eb, first_byte);
4820	*kaddr &= ~mask;
4821	if (same_byte)
4822		return;
4823
4824	/* Handle the byte aligned part. */
4825	ASSERT(first_byte + 1 <= last_byte);
4826	memset_extent_buffer(eb, 0, first_byte + 1, last_byte - first_byte - 1);
4827
4828	/* Handle the last byte. */
4829	kaddr = extent_buffer_get_byte(eb, last_byte);
4830	*kaddr &= ~BITMAP_LAST_BYTE_MASK(pos + len);
4831}
4832
4833static inline bool areas_overlap(unsigned long src, unsigned long dst, unsigned long len)
4834{
4835	unsigned long distance = (src > dst) ? src - dst : dst - src;
4836	return distance < len;
4837}
4838
4839void memcpy_extent_buffer(const struct extent_buffer *dst,
4840			  unsigned long dst_offset, unsigned long src_offset,
4841			  unsigned long len)
4842{
4843	const int unit_size = dst->folio_size;
4844	unsigned long cur_off = 0;
4845
4846	if (check_eb_range(dst, dst_offset, len) ||
4847	    check_eb_range(dst, src_offset, len))
4848		return;
 
 
 
4849
4850	if (dst->addr) {
4851		const bool use_memmove = areas_overlap(src_offset, dst_offset, len);
4852
4853		if (use_memmove)
4854			memmove(dst->addr + dst_offset, dst->addr + src_offset, len);
4855		else
4856			memcpy(dst->addr + dst_offset, dst->addr + src_offset, len);
4857		return;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4858	}
4859
4860	while (cur_off < len) {
4861		unsigned long cur_src = cur_off + src_offset;
4862		unsigned long folio_index = get_eb_folio_index(dst, cur_src);
4863		unsigned long folio_off = get_eb_offset_in_folio(dst, cur_src);
4864		unsigned long cur_len = min(src_offset + len - cur_src,
4865					    unit_size - folio_off);
4866		void *src_addr = folio_address(dst->folios[folio_index]) + folio_off;
4867		const bool use_memmove = areas_overlap(src_offset + cur_off,
4868						       dst_offset + cur_off, cur_len);
4869
4870		__write_extent_buffer(dst, src_addr, dst_offset + cur_off, cur_len,
4871				      use_memmove);
4872		cur_off += cur_len;
 
 
 
 
 
 
 
 
4873	}
4874}
4875
4876void memmove_extent_buffer(const struct extent_buffer *dst,
4877			   unsigned long dst_offset, unsigned long src_offset,
4878			   unsigned long len)
4879{
 
 
 
4880	unsigned long dst_end = dst_offset + len - 1;
4881	unsigned long src_end = src_offset + len - 1;
4882
4883	if (check_eb_range(dst, dst_offset, len) ||
4884	    check_eb_range(dst, src_offset, len))
4885		return;
4886
4887	if (dst_offset < src_offset) {
 
 
 
 
 
 
 
 
 
4888		memcpy_extent_buffer(dst, dst_offset, src_offset, len);
4889		return;
4890	}
4891
4892	if (dst->addr) {
4893		memmove(dst->addr + dst_offset, dst->addr + src_offset, len);
4894		return;
4895	}
4896
4897	while (len > 0) {
4898		unsigned long src_i;
4899		size_t cur;
4900		size_t dst_off_in_folio;
4901		size_t src_off_in_folio;
4902		void *src_addr;
4903		bool use_memmove;
4904
4905		src_i = get_eb_folio_index(dst, src_end);
4906
4907		dst_off_in_folio = get_eb_offset_in_folio(dst, dst_end);
4908		src_off_in_folio = get_eb_offset_in_folio(dst, src_end);
4909
4910		cur = min_t(unsigned long, len, src_off_in_folio + 1);
4911		cur = min(cur, dst_off_in_folio + 1);
4912
4913		src_addr = folio_address(dst->folios[src_i]) + src_off_in_folio -
4914					 cur + 1;
4915		use_memmove = areas_overlap(src_end - cur + 1, dst_end - cur + 1,
4916					    cur);
4917
4918		__write_extent_buffer(dst, src_addr, dst_end - cur + 1, cur,
4919				      use_memmove);
 
 
 
 
 
 
 
 
 
4920
4921		dst_end -= cur;
4922		src_end -= cur;
4923		len -= cur;
4924	}
4925}
4926
4927#define GANG_LOOKUP_SIZE	16
4928static struct extent_buffer *get_next_extent_buffer(
4929		struct btrfs_fs_info *fs_info, struct page *page, u64 bytenr)
4930{
4931	struct extent_buffer *gang[GANG_LOOKUP_SIZE];
4932	struct extent_buffer *found = NULL;
4933	u64 page_start = page_offset(page);
4934	u64 cur = page_start;
4935
4936	ASSERT(in_range(bytenr, page_start, PAGE_SIZE));
4937	lockdep_assert_held(&fs_info->buffer_lock);
4938
4939	while (cur < page_start + PAGE_SIZE) {
4940		int ret;
4941		int i;
4942
4943		ret = radix_tree_gang_lookup(&fs_info->buffer_radix,
4944				(void **)gang, cur >> fs_info->sectorsize_bits,
4945				min_t(unsigned int, GANG_LOOKUP_SIZE,
4946				      PAGE_SIZE / fs_info->nodesize));
4947		if (ret == 0)
4948			goto out;
4949		for (i = 0; i < ret; i++) {
4950			/* Already beyond page end */
4951			if (gang[i]->start >= page_start + PAGE_SIZE)
4952				goto out;
4953			/* Found one */
4954			if (gang[i]->start >= bytenr) {
4955				found = gang[i];
4956				goto out;
4957			}
4958		}
4959		cur = gang[ret - 1]->start + gang[ret - 1]->len;
4960	}
4961out:
4962	return found;
4963}
4964
4965static int try_release_subpage_extent_buffer(struct page *page)
4966{
4967	struct btrfs_fs_info *fs_info = page_to_fs_info(page);
4968	u64 cur = page_offset(page);
4969	const u64 end = page_offset(page) + PAGE_SIZE;
4970	int ret;
4971
4972	while (cur < end) {
4973		struct extent_buffer *eb = NULL;
4974
4975		/*
4976		 * Unlike try_release_extent_buffer() which uses folio private
4977		 * to grab buffer, for subpage case we rely on radix tree, thus
4978		 * we need to ensure radix tree consistency.
4979		 *
4980		 * We also want an atomic snapshot of the radix tree, thus go
4981		 * with spinlock rather than RCU.
4982		 */
4983		spin_lock(&fs_info->buffer_lock);
4984		eb = get_next_extent_buffer(fs_info, page, cur);
4985		if (!eb) {
4986			/* No more eb in the page range after or at cur */
4987			spin_unlock(&fs_info->buffer_lock);
4988			break;
4989		}
4990		cur = eb->start + eb->len;
4991
4992		/*
4993		 * The same as try_release_extent_buffer(), to ensure the eb
4994		 * won't disappear out from under us.
4995		 */
4996		spin_lock(&eb->refs_lock);
4997		if (atomic_read(&eb->refs) != 1 || extent_buffer_under_io(eb)) {
4998			spin_unlock(&eb->refs_lock);
4999			spin_unlock(&fs_info->buffer_lock);
5000			break;
5001		}
5002		spin_unlock(&fs_info->buffer_lock);
5003
5004		/*
5005		 * If tree ref isn't set then we know the ref on this eb is a
5006		 * real ref, so just return, this eb will likely be freed soon
5007		 * anyway.
5008		 */
5009		if (!test_and_clear_bit(EXTENT_BUFFER_TREE_REF, &eb->bflags)) {
5010			spin_unlock(&eb->refs_lock);
5011			break;
5012		}
5013
5014		/*
5015		 * Here we don't care about the return value, we will always
5016		 * check the folio private at the end.  And
5017		 * release_extent_buffer() will release the refs_lock.
5018		 */
5019		release_extent_buffer(eb);
5020	}
5021	/*
5022	 * Finally to check if we have cleared folio private, as if we have
5023	 * released all ebs in the page, the folio private should be cleared now.
5024	 */
5025	spin_lock(&page->mapping->i_private_lock);
5026	if (!folio_test_private(page_folio(page)))
5027		ret = 1;
5028	else
5029		ret = 0;
5030	spin_unlock(&page->mapping->i_private_lock);
5031	return ret;
5032
 
5033}
5034
5035int try_release_extent_buffer(struct page *page)
5036{
5037	struct folio *folio = page_folio(page);
5038	struct extent_buffer *eb;
 
5039
5040	if (page_to_fs_info(page)->nodesize < PAGE_SIZE)
5041		return try_release_subpage_extent_buffer(page);
5042
5043	/*
5044	 * We need to make sure nobody is changing folio private, as we rely on
5045	 * folio private as the pointer to extent buffer.
5046	 */
5047	spin_lock(&page->mapping->i_private_lock);
5048	if (!folio_test_private(folio)) {
5049		spin_unlock(&page->mapping->i_private_lock);
5050		return 1;
5051	}
5052
5053	eb = folio_get_private(folio);
5054	BUG_ON(!eb);
5055
5056	/*
5057	 * This is a little awful but should be ok, we need to make sure that
5058	 * the eb doesn't disappear out from under us while we're looking at
5059	 * this page.
5060	 */
5061	spin_lock(&eb->refs_lock);
5062	if (atomic_read(&eb->refs) != 1 || extent_buffer_under_io(eb)) {
5063		spin_unlock(&eb->refs_lock);
5064		spin_unlock(&page->mapping->i_private_lock);
5065		return 0;
5066	}
5067	spin_unlock(&page->mapping->i_private_lock);
5068
5069	/*
5070	 * If tree ref isn't set then we know the ref on this eb is a real ref,
5071	 * so just return, this page will likely be freed soon anyway.
5072	 */
5073	if (!test_and_clear_bit(EXTENT_BUFFER_TREE_REF, &eb->bflags)) {
5074		spin_unlock(&eb->refs_lock);
5075		return 0;
5076	}
5077
5078	return release_extent_buffer(eb);
5079}
 
5080
5081/*
5082 * Attempt to readahead a child block.
5083 *
5084 * @fs_info:	the fs_info
5085 * @bytenr:	bytenr to read
5086 * @owner_root: objectid of the root that owns this eb
5087 * @gen:	generation for the uptodate check, can be 0
5088 * @level:	level for the eb
5089 *
5090 * Attempt to readahead a tree block at @bytenr.  If @gen is 0 then we do a
5091 * normal uptodate check of the eb, without checking the generation.  If we have
5092 * to read the block we will not block on anything.
5093 */
5094void btrfs_readahead_tree_block(struct btrfs_fs_info *fs_info,
5095				u64 bytenr, u64 owner_root, u64 gen, int level)
5096{
5097	struct btrfs_tree_parent_check check = {
5098		.has_first_key = 0,
5099		.level = level,
5100		.transid = gen
5101	};
5102	struct extent_buffer *eb;
5103	int ret;
5104
5105	eb = btrfs_find_create_tree_block(fs_info, bytenr, owner_root, level);
5106	if (IS_ERR(eb))
5107		return;
5108
5109	if (btrfs_buffer_uptodate(eb, gen, 1)) {
5110		free_extent_buffer(eb);
5111		return;
5112	}
5113
5114	ret = read_extent_buffer_pages(eb, WAIT_NONE, 0, &check);
5115	if (ret < 0)
5116		free_extent_buffer_stale(eb);
5117	else
5118		free_extent_buffer(eb);
5119}
5120
5121/*
5122 * Readahead a node's child block.
5123 *
5124 * @node:	parent node we're reading from
5125 * @slot:	slot in the parent node for the child we want to read
5126 *
5127 * A helper for btrfs_readahead_tree_block, we simply read the bytenr pointed at
5128 * the slot in the node provided.
5129 */
5130void btrfs_readahead_node_child(struct extent_buffer *node, int slot)
5131{
5132	btrfs_readahead_tree_block(node->fs_info,
5133				   btrfs_node_blockptr(node, slot),
5134				   btrfs_header_owner(node),
5135				   btrfs_node_ptr_generation(node, slot),
5136				   btrfs_header_level(node) - 1);
5137}