Linux Audio

Check our new training course

Loading...
v6.13.7
   1// SPDX-License-Identifier: GPL-2.0
   2
   3#include <linux/bitops.h>
   4#include <linux/slab.h>
   5#include <linux/bio.h>
   6#include <linux/mm.h>
   7#include <linux/pagemap.h>
   8#include <linux/page-flags.h>
   9#include <linux/sched/mm.h>
  10#include <linux/spinlock.h>
  11#include <linux/blkdev.h>
  12#include <linux/swap.h>
  13#include <linux/writeback.h>
  14#include <linux/pagevec.h>
  15#include <linux/prefetch.h>
  16#include <linux/fsverity.h>
  17#include "extent_io.h"
  18#include "extent-io-tree.h"
  19#include "extent_map.h"
  20#include "ctree.h"
  21#include "btrfs_inode.h"
  22#include "bio.h"
  23#include "locking.h"
  24#include "backref.h"
  25#include "disk-io.h"
  26#include "subpage.h"
  27#include "zoned.h"
  28#include "block-group.h"
  29#include "compression.h"
  30#include "fs.h"
  31#include "accessors.h"
  32#include "file-item.h"
  33#include "file.h"
  34#include "dev-replace.h"
  35#include "super.h"
  36#include "transaction.h"
  37
  38static struct kmem_cache *extent_buffer_cache;
  39
  40#ifdef CONFIG_BTRFS_DEBUG
  41static inline void btrfs_leak_debug_add_eb(struct extent_buffer *eb)
  42{
  43	struct btrfs_fs_info *fs_info = eb->fs_info;
  44	unsigned long flags;
  45
  46	spin_lock_irqsave(&fs_info->eb_leak_lock, flags);
  47	list_add(&eb->leak_list, &fs_info->allocated_ebs);
  48	spin_unlock_irqrestore(&fs_info->eb_leak_lock, flags);
  49}
  50
  51static inline void btrfs_leak_debug_del_eb(struct extent_buffer *eb)
  52{
  53	struct btrfs_fs_info *fs_info = eb->fs_info;
  54	unsigned long flags;
  55
  56	spin_lock_irqsave(&fs_info->eb_leak_lock, flags);
  57	list_del(&eb->leak_list);
  58	spin_unlock_irqrestore(&fs_info->eb_leak_lock, flags);
  59}
  60
  61void btrfs_extent_buffer_leak_debug_check(struct btrfs_fs_info *fs_info)
  62{
  63	struct extent_buffer *eb;
  64	unsigned long flags;
  65
  66	/*
  67	 * If we didn't get into open_ctree our allocated_ebs will not be
  68	 * initialized, so just skip this.
  69	 */
  70	if (!fs_info->allocated_ebs.next)
  71		return;
  72
  73	WARN_ON(!list_empty(&fs_info->allocated_ebs));
  74	spin_lock_irqsave(&fs_info->eb_leak_lock, flags);
  75	while (!list_empty(&fs_info->allocated_ebs)) {
  76		eb = list_first_entry(&fs_info->allocated_ebs,
  77				      struct extent_buffer, leak_list);
  78		pr_err(
  79	"BTRFS: buffer leak start %llu len %u refs %d bflags %lu owner %llu\n",
  80		       eb->start, eb->len, atomic_read(&eb->refs), eb->bflags,
  81		       btrfs_header_owner(eb));
  82		list_del(&eb->leak_list);
  83		WARN_ON_ONCE(1);
  84		kmem_cache_free(extent_buffer_cache, eb);
  85	}
  86	spin_unlock_irqrestore(&fs_info->eb_leak_lock, flags);
  87}
  88#else
  89#define btrfs_leak_debug_add_eb(eb)			do {} while (0)
  90#define btrfs_leak_debug_del_eb(eb)			do {} while (0)
  91#endif
  92
  93/*
  94 * Structure to record info about the bio being assembled, and other info like
  95 * how many bytes are there before stripe/ordered extent boundary.
  96 */
  97struct btrfs_bio_ctrl {
  98	struct btrfs_bio *bbio;
  99	enum btrfs_compression_type compress_type;
 100	u32 len_to_oe_boundary;
 101	blk_opf_t opf;
 102	btrfs_bio_end_io_t end_io_func;
 103	struct writeback_control *wbc;
 104
 105	/*
 106	 * The sectors of the page which are going to be submitted by
 107	 * extent_writepage_io().
 108	 * This is to avoid touching ranges covered by compression/inline.
 109	 */
 110	unsigned long submit_bitmap;
 111};
 112
 113static void submit_one_bio(struct btrfs_bio_ctrl *bio_ctrl)
 114{
 115	struct btrfs_bio *bbio = bio_ctrl->bbio;
 116
 117	if (!bbio)
 118		return;
 119
 120	/* Caller should ensure the bio has at least some range added */
 121	ASSERT(bbio->bio.bi_iter.bi_size);
 122
 123	if (btrfs_op(&bbio->bio) == BTRFS_MAP_READ &&
 124	    bio_ctrl->compress_type != BTRFS_COMPRESS_NONE)
 125		btrfs_submit_compressed_read(bbio);
 126	else
 127		btrfs_submit_bbio(bbio, 0);
 128
 129	/* The bbio is owned by the end_io handler now */
 130	bio_ctrl->bbio = NULL;
 131}
 132
 133/*
 134 * Submit or fail the current bio in the bio_ctrl structure.
 135 */
 136static void submit_write_bio(struct btrfs_bio_ctrl *bio_ctrl, int ret)
 137{
 138	struct btrfs_bio *bbio = bio_ctrl->bbio;
 139
 140	if (!bbio)
 141		return;
 142
 143	if (ret) {
 144		ASSERT(ret < 0);
 145		btrfs_bio_end_io(bbio, errno_to_blk_status(ret));
 146		/* The bio is owned by the end_io handler now */
 147		bio_ctrl->bbio = NULL;
 148	} else {
 149		submit_one_bio(bio_ctrl);
 150	}
 151}
 152
 153int __init extent_buffer_init_cachep(void)
 154{
 155	extent_buffer_cache = kmem_cache_create("btrfs_extent_buffer",
 156						sizeof(struct extent_buffer), 0, 0,
 157						NULL);
 158	if (!extent_buffer_cache)
 159		return -ENOMEM;
 160
 161	return 0;
 162}
 163
 164void __cold extent_buffer_free_cachep(void)
 165{
 166	/*
 167	 * Make sure all delayed rcu free are flushed before we
 168	 * destroy caches.
 169	 */
 170	rcu_barrier();
 171	kmem_cache_destroy(extent_buffer_cache);
 172}
 173
 174static void process_one_folio(struct btrfs_fs_info *fs_info,
 175			      struct folio *folio, const struct folio *locked_folio,
 176			      unsigned long page_ops, u64 start, u64 end)
 177{
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 178	u32 len;
 179
 180	ASSERT(end + 1 - start != 0 && end + 1 - start < U32_MAX);
 181	len = end + 1 - start;
 182
 183	if (page_ops & PAGE_SET_ORDERED)
 184		btrfs_folio_clamp_set_ordered(fs_info, folio, start, len);
 185	if (page_ops & PAGE_START_WRITEBACK) {
 186		btrfs_folio_clamp_clear_dirty(fs_info, folio, start, len);
 187		btrfs_folio_clamp_set_writeback(fs_info, folio, start, len);
 188	}
 189	if (page_ops & PAGE_END_WRITEBACK)
 190		btrfs_folio_clamp_clear_writeback(fs_info, folio, start, len);
 191
 192	if (folio != locked_folio && (page_ops & PAGE_UNLOCK))
 193		btrfs_folio_end_lock(fs_info, folio, start, len);
 194}
 195
 196static void __process_folios_contig(struct address_space *mapping,
 197				    const struct folio *locked_folio, u64 start,
 198				    u64 end, unsigned long page_ops)
 199{
 200	struct btrfs_fs_info *fs_info = inode_to_fs_info(mapping->host);
 201	pgoff_t start_index = start >> PAGE_SHIFT;
 202	pgoff_t end_index = end >> PAGE_SHIFT;
 203	pgoff_t index = start_index;
 204	struct folio_batch fbatch;
 205	int i;
 206
 207	folio_batch_init(&fbatch);
 208	while (index <= end_index) {
 209		int found_folios;
 210
 211		found_folios = filemap_get_folios_contig(mapping, &index,
 212				end_index, &fbatch);
 213		for (i = 0; i < found_folios; i++) {
 214			struct folio *folio = fbatch.folios[i];
 215
 216			process_one_folio(fs_info, folio, locked_folio,
 217					  page_ops, start, end);
 218		}
 219		folio_batch_release(&fbatch);
 220		cond_resched();
 221	}
 222}
 223
 224static noinline void __unlock_for_delalloc(const struct inode *inode,
 225					   const struct folio *locked_folio,
 226					   u64 start, u64 end)
 227{
 228	unsigned long index = start >> PAGE_SHIFT;
 229	unsigned long end_index = end >> PAGE_SHIFT;
 230
 231	ASSERT(locked_folio);
 232	if (index == locked_folio->index && end_index == index)
 233		return;
 234
 235	__process_folios_contig(inode->i_mapping, locked_folio, start, end,
 236				PAGE_UNLOCK);
 237}
 238
 239static noinline int lock_delalloc_folios(struct inode *inode,
 240					 const struct folio *locked_folio,
 241					 u64 start, u64 end)
 
 242{
 243	struct btrfs_fs_info *fs_info = inode_to_fs_info(inode);
 244	struct address_space *mapping = inode->i_mapping;
 245	pgoff_t start_index = start >> PAGE_SHIFT;
 246	pgoff_t end_index = end >> PAGE_SHIFT;
 247	pgoff_t index = start_index;
 248	u64 processed_end = start;
 249	struct folio_batch fbatch;
 250
 251	if (index == locked_folio->index && index == end_index)
 252		return 0;
 253
 254	folio_batch_init(&fbatch);
 255	while (index <= end_index) {
 256		unsigned int found_folios, i;
 257
 258		found_folios = filemap_get_folios_contig(mapping, &index,
 259				end_index, &fbatch);
 260		if (found_folios == 0)
 261			goto out;
 262
 263		for (i = 0; i < found_folios; i++) {
 264			struct folio *folio = fbatch.folios[i];
 265			u64 range_start;
 266			u32 range_len;
 267
 268			if (folio == locked_folio)
 269				continue;
 270
 271			folio_lock(folio);
 272			if (!folio_test_dirty(folio) || folio->mapping != mapping) {
 273				folio_unlock(folio);
 
 
 
 
 274				goto out;
 275			}
 276			range_start = max_t(u64, folio_pos(folio), start);
 277			range_len = min_t(u64, folio_pos(folio) + folio_size(folio),
 278					  end + 1) - range_start;
 279			btrfs_folio_set_lock(fs_info, folio, range_start, range_len);
 280
 281			processed_end = range_start + range_len - 1;
 282		}
 283		folio_batch_release(&fbatch);
 284		cond_resched();
 285	}
 286
 287	return 0;
 288out:
 289	folio_batch_release(&fbatch);
 290	if (processed_end > start)
 291		__unlock_for_delalloc(inode, locked_folio, start,
 292				      processed_end);
 293	return -EAGAIN;
 294}
 295
 296/*
 297 * Find and lock a contiguous range of bytes in the file marked as delalloc, no
 298 * more than @max_bytes.
 299 *
 300 * @start:	The original start bytenr to search.
 301 *		Will store the extent range start bytenr.
 302 * @end:	The original end bytenr of the search range
 303 *		Will store the extent range end bytenr.
 304 *
 305 * Return true if we find a delalloc range which starts inside the original
 306 * range, and @start/@end will store the delalloc range start/end.
 307 *
 308 * Return false if we can't find any delalloc range which starts inside the
 309 * original range, and @start/@end will be the non-delalloc range start/end.
 310 */
 311EXPORT_FOR_TESTS
 312noinline_for_stack bool find_lock_delalloc_range(struct inode *inode,
 313						 struct folio *locked_folio,
 314						 u64 *start, u64 *end)
 315{
 316	struct btrfs_fs_info *fs_info = inode_to_fs_info(inode);
 317	struct extent_io_tree *tree = &BTRFS_I(inode)->io_tree;
 318	const u64 orig_start = *start;
 319	const u64 orig_end = *end;
 320	/* The sanity tests may not set a valid fs_info. */
 321	u64 max_bytes = fs_info ? fs_info->max_extent_size : BTRFS_MAX_EXTENT_SIZE;
 322	u64 delalloc_start;
 323	u64 delalloc_end;
 324	bool found;
 325	struct extent_state *cached_state = NULL;
 326	int ret;
 327	int loops = 0;
 328
 329	/* Caller should pass a valid @end to indicate the search range end */
 330	ASSERT(orig_end > orig_start);
 331
 332	/* The range should at least cover part of the folio */
 333	ASSERT(!(orig_start >= folio_pos(locked_folio) + folio_size(locked_folio) ||
 334		 orig_end <= folio_pos(locked_folio)));
 335again:
 336	/* step one, find a bunch of delalloc bytes starting at start */
 337	delalloc_start = *start;
 338	delalloc_end = 0;
 339	found = btrfs_find_delalloc_range(tree, &delalloc_start, &delalloc_end,
 340					  max_bytes, &cached_state);
 341	if (!found || delalloc_end <= *start || delalloc_start > orig_end) {
 342		*start = delalloc_start;
 343
 344		/* @delalloc_end can be -1, never go beyond @orig_end */
 345		*end = min(delalloc_end, orig_end);
 346		free_extent_state(cached_state);
 347		return false;
 348	}
 349
 350	/*
 351	 * start comes from the offset of locked_folio.  We have to lock
 352	 * folios in order, so we can't process delalloc bytes before
 353	 * locked_folio
 354	 */
 355	if (delalloc_start < *start)
 356		delalloc_start = *start;
 357
 358	/*
 359	 * make sure to limit the number of folios we try to lock down
 360	 */
 361	if (delalloc_end + 1 - delalloc_start > max_bytes)
 362		delalloc_end = delalloc_start + max_bytes - 1;
 363
 364	/* step two, lock all the folioss after the folios that has start */
 365	ret = lock_delalloc_folios(inode, locked_folio, delalloc_start,
 366				   delalloc_end);
 367	ASSERT(!ret || ret == -EAGAIN);
 368	if (ret == -EAGAIN) {
 369		/* some of the folios are gone, lets avoid looping by
 370		 * shortening the size of the delalloc range we're searching
 371		 */
 372		free_extent_state(cached_state);
 373		cached_state = NULL;
 374		if (!loops) {
 375			max_bytes = PAGE_SIZE;
 376			loops = 1;
 377			goto again;
 378		} else {
 379			found = false;
 380			goto out_failed;
 381		}
 382	}
 383
 384	/* step three, lock the state bits for the whole range */
 385	lock_extent(tree, delalloc_start, delalloc_end, &cached_state);
 386
 387	/* then test to make sure it is all still delalloc */
 388	ret = test_range_bit(tree, delalloc_start, delalloc_end,
 389			     EXTENT_DELALLOC, cached_state);
 390
 391	unlock_extent(tree, delalloc_start, delalloc_end, &cached_state);
 392	if (!ret) {
 393		__unlock_for_delalloc(inode, locked_folio, delalloc_start,
 394				      delalloc_end);
 
 
 395		cond_resched();
 396		goto again;
 397	}
 
 398	*start = delalloc_start;
 399	*end = delalloc_end;
 400out_failed:
 401	return found;
 402}
 403
 404void extent_clear_unlock_delalloc(struct btrfs_inode *inode, u64 start, u64 end,
 405				  const struct folio *locked_folio,
 406				  struct extent_state **cached,
 407				  u32 clear_bits, unsigned long page_ops)
 408{
 409	clear_extent_bit(&inode->io_tree, start, end, clear_bits, cached);
 410
 411	__process_folios_contig(inode->vfs_inode.i_mapping, locked_folio, start,
 412				end, page_ops);
 413}
 414
 415static bool btrfs_verify_folio(struct folio *folio, u64 start, u32 len)
 416{
 417	struct btrfs_fs_info *fs_info = folio_to_fs_info(folio);
 418
 419	if (!fsverity_active(folio->mapping->host) ||
 420	    btrfs_folio_test_uptodate(fs_info, folio, start, len) ||
 421	    start >= i_size_read(folio->mapping->host))
 422		return true;
 423	return fsverity_verify_folio(folio);
 424}
 425
 426static void end_folio_read(struct folio *folio, bool uptodate, u64 start, u32 len)
 427{
 428	struct btrfs_fs_info *fs_info = folio_to_fs_info(folio);
 
 429
 430	ASSERT(folio_pos(folio) <= start &&
 431	       start + len <= folio_pos(folio) + PAGE_SIZE);
 432
 433	if (uptodate && btrfs_verify_folio(folio, start, len))
 434		btrfs_folio_set_uptodate(fs_info, folio, start, len);
 435	else
 436		btrfs_folio_clear_uptodate(fs_info, folio, start, len);
 437
 438	if (!btrfs_is_subpage(fs_info, folio->mapping))
 439		folio_unlock(folio);
 440	else
 441		btrfs_folio_end_lock(fs_info, folio, start, len);
 442}
 443
 444/*
 445 * After a write IO is done, we need to:
 446 *
 447 * - clear the uptodate bits on error
 448 * - clear the writeback bits in the extent tree for the range
 449 * - filio_end_writeback()  if there is no more pending io for the folio
 450 *
 451 * Scheduling is not allowed, so the extent state tree is expected
 452 * to have one and only one object corresponding to this IO.
 453 */
 454static void end_bbio_data_write(struct btrfs_bio *bbio)
 455{
 456	struct btrfs_fs_info *fs_info = bbio->fs_info;
 457	struct bio *bio = &bbio->bio;
 458	int error = blk_status_to_errno(bio->bi_status);
 459	struct folio_iter fi;
 460	const u32 sectorsize = fs_info->sectorsize;
 461
 462	ASSERT(!bio_flagged(bio, BIO_CLONED));
 463	bio_for_each_folio_all(fi, bio) {
 464		struct folio *folio = fi.folio;
 465		u64 start = folio_pos(folio) + fi.offset;
 466		u32 len = fi.length;
 467
 468		/* Only order 0 (single page) folios are allowed for data. */
 469		ASSERT(folio_order(folio) == 0);
 470
 471		/* Our read/write should always be sector aligned. */
 472		if (!IS_ALIGNED(fi.offset, sectorsize))
 473			btrfs_err(fs_info,
 474		"partial page write in btrfs with offset %zu and length %zu",
 475				  fi.offset, fi.length);
 476		else if (!IS_ALIGNED(fi.length, sectorsize))
 477			btrfs_info(fs_info,
 478		"incomplete page write with offset %zu and length %zu",
 479				   fi.offset, fi.length);
 480
 481		btrfs_finish_ordered_extent(bbio->ordered, folio, start, len,
 482					    !error);
 483		if (error)
 484			mapping_set_error(folio->mapping, error);
 485		btrfs_folio_clear_writeback(fs_info, folio, start, len);
 486	}
 487
 488	bio_put(bio);
 489}
 490
 491static void begin_folio_read(struct btrfs_fs_info *fs_info, struct folio *folio)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 492{
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 493	ASSERT(folio_test_locked(folio));
 494	if (!btrfs_is_subpage(fs_info, folio->mapping))
 495		return;
 496
 497	ASSERT(folio_test_private(folio));
 498	btrfs_folio_set_lock(fs_info, folio, folio_pos(folio), PAGE_SIZE);
 499}
 500
 501/*
 502 * After a data read IO is done, we need to:
 503 *
 504 * - clear the uptodate bits on error
 505 * - set the uptodate bits if things worked
 506 * - set the folio up to date if all extents in the tree are uptodate
 507 * - clear the lock bit in the extent tree
 508 * - unlock the folio if there are no other extents locked for it
 509 *
 510 * Scheduling is not allowed, so the extent state tree is expected
 511 * to have one and only one object corresponding to this IO.
 512 */
 513static void end_bbio_data_read(struct btrfs_bio *bbio)
 514{
 515	struct btrfs_fs_info *fs_info = bbio->fs_info;
 516	struct bio *bio = &bbio->bio;
 
 517	struct folio_iter fi;
 518	const u32 sectorsize = fs_info->sectorsize;
 519
 520	ASSERT(!bio_flagged(bio, BIO_CLONED));
 521	bio_for_each_folio_all(fi, &bbio->bio) {
 522		bool uptodate = !bio->bi_status;
 523		struct folio *folio = fi.folio;
 524		struct inode *inode = folio->mapping->host;
 525		u64 start;
 526		u64 end;
 527		u32 len;
 528
 529		/* For now only order 0 folios are supported for data. */
 530		ASSERT(folio_order(folio) == 0);
 531		btrfs_debug(fs_info,
 532			"%s: bi_sector=%llu, err=%d, mirror=%u",
 533			__func__, bio->bi_iter.bi_sector, bio->bi_status,
 534			bbio->mirror_num);
 535
 536		/*
 537		 * We always issue full-sector reads, but if some block in a
 538		 * folio fails to read, blk_update_request() will advance
 539		 * bv_offset and adjust bv_len to compensate.  Print a warning
 540		 * for unaligned offsets, and an error if they don't add up to
 541		 * a full sector.
 542		 */
 543		if (!IS_ALIGNED(fi.offset, sectorsize))
 544			btrfs_err(fs_info,
 545		"partial page read in btrfs with offset %zu and length %zu",
 546				  fi.offset, fi.length);
 547		else if (!IS_ALIGNED(fi.offset + fi.length, sectorsize))
 548			btrfs_info(fs_info,
 549		"incomplete page read with offset %zu and length %zu",
 550				   fi.offset, fi.length);
 551
 552		start = folio_pos(folio) + fi.offset;
 553		end = start + fi.length - 1;
 554		len = fi.length;
 555
 556		if (likely(uptodate)) {
 557			loff_t i_size = i_size_read(inode);
 558			pgoff_t end_index = i_size >> folio_shift(folio);
 559
 560			/*
 561			 * Zero out the remaining part if this range straddles
 562			 * i_size.
 563			 *
 564			 * Here we should only zero the range inside the folio,
 565			 * not touch anything else.
 566			 *
 567			 * NOTE: i_size is exclusive while end is inclusive.
 568			 */
 569			if (folio_index(folio) == end_index && i_size <= end) {
 570				u32 zero_start = max(offset_in_folio(folio, i_size),
 571						     offset_in_folio(folio, start));
 572				u32 zero_len = offset_in_folio(folio, end) + 1 -
 573					       zero_start;
 574
 575				folio_zero_range(folio, zero_start, zero_len);
 576			}
 577		}
 578
 579		/* Update page status and unlock. */
 580		end_folio_read(folio, uptodate, start, len);
 
 
 581	}
 
 
 582	bio_put(bio);
 583}
 584
 585/*
 586 * Populate every free slot in a provided array with folios using GFP_NOFS.
 587 *
 588 * @nr_folios:   number of folios to allocate
 589 * @folio_array: the array to fill with folios; any existing non-NULL entries in
 590 *		 the array will be skipped
 591 *
 592 * Return: 0        if all folios were able to be allocated;
 593 *         -ENOMEM  otherwise, the partially allocated folios would be freed and
 594 *                  the array slots zeroed
 595 */
 596int btrfs_alloc_folio_array(unsigned int nr_folios, struct folio **folio_array)
 597{
 598	for (int i = 0; i < nr_folios; i++) {
 599		if (folio_array[i])
 600			continue;
 601		folio_array[i] = folio_alloc(GFP_NOFS, 0);
 602		if (!folio_array[i])
 603			goto error;
 604	}
 605	return 0;
 606error:
 607	for (int i = 0; i < nr_folios; i++) {
 608		if (folio_array[i])
 609			folio_put(folio_array[i]);
 610	}
 611	return -ENOMEM;
 612}
 613
 614/*
 615 * Populate every free slot in a provided array with pages, using GFP_NOFS.
 616 *
 617 * @nr_pages:   number of pages to allocate
 618 * @page_array: the array to fill with pages; any existing non-null entries in
 619 *		the array will be skipped
 620 * @nofail:	whether using __GFP_NOFAIL flag
 621 *
 622 * Return: 0        if all pages were able to be allocated;
 623 *         -ENOMEM  otherwise, the partially allocated pages would be freed and
 624 *                  the array slots zeroed
 625 */
 626int btrfs_alloc_page_array(unsigned int nr_pages, struct page **page_array,
 627			   bool nofail)
 628{
 629	const gfp_t gfp = nofail ? (GFP_NOFS | __GFP_NOFAIL) : GFP_NOFS;
 630	unsigned int allocated;
 631
 632	for (allocated = 0; allocated < nr_pages;) {
 633		unsigned int last = allocated;
 634
 635		allocated = alloc_pages_bulk_array(gfp, nr_pages, page_array);
 636		if (unlikely(allocated == last)) {
 637			/* No progress, fail and do cleanup. */
 638			for (int i = 0; i < allocated; i++) {
 639				__free_page(page_array[i]);
 640				page_array[i] = NULL;
 641			}
 642			return -ENOMEM;
 643		}
 644	}
 645	return 0;
 646}
 647
 648/*
 649 * Populate needed folios for the extent buffer.
 650 *
 651 * For now, the folios populated are always in order 0 (aka, single page).
 652 */
 653static int alloc_eb_folio_array(struct extent_buffer *eb, bool nofail)
 654{
 655	struct page *page_array[INLINE_EXTENT_BUFFER_PAGES] = { 0 };
 656	int num_pages = num_extent_pages(eb);
 657	int ret;
 658
 659	ret = btrfs_alloc_page_array(num_pages, page_array, nofail);
 660	if (ret < 0)
 661		return ret;
 662
 663	for (int i = 0; i < num_pages; i++)
 664		eb->folios[i] = page_folio(page_array[i]);
 665	eb->folio_size = PAGE_SIZE;
 666	eb->folio_shift = PAGE_SHIFT;
 667	return 0;
 668}
 669
 670static bool btrfs_bio_is_contig(struct btrfs_bio_ctrl *bio_ctrl,
 671				struct folio *folio, u64 disk_bytenr,
 672				unsigned int pg_offset)
 673{
 674	struct bio *bio = &bio_ctrl->bbio->bio;
 675	struct bio_vec *bvec = bio_last_bvec_all(bio);
 676	const sector_t sector = disk_bytenr >> SECTOR_SHIFT;
 677	struct folio *bv_folio = page_folio(bvec->bv_page);
 678
 679	if (bio_ctrl->compress_type != BTRFS_COMPRESS_NONE) {
 680		/*
 681		 * For compression, all IO should have its logical bytenr set
 682		 * to the starting bytenr of the compressed extent.
 683		 */
 684		return bio->bi_iter.bi_sector == sector;
 685	}
 686
 687	/*
 688	 * The contig check requires the following conditions to be met:
 689	 *
 690	 * 1) The folios are belonging to the same inode
 691	 *    This is implied by the call chain.
 692	 *
 693	 * 2) The range has adjacent logical bytenr
 694	 *
 695	 * 3) The range has adjacent file offset
 696	 *    This is required for the usage of btrfs_bio->file_offset.
 697	 */
 698	return bio_end_sector(bio) == sector &&
 699		folio_pos(bv_folio) + bvec->bv_offset + bvec->bv_len ==
 700		folio_pos(folio) + pg_offset;
 701}
 702
 703static void alloc_new_bio(struct btrfs_inode *inode,
 704			  struct btrfs_bio_ctrl *bio_ctrl,
 705			  u64 disk_bytenr, u64 file_offset)
 706{
 707	struct btrfs_fs_info *fs_info = inode->root->fs_info;
 708	struct btrfs_bio *bbio;
 709
 710	bbio = btrfs_bio_alloc(BIO_MAX_VECS, bio_ctrl->opf, fs_info,
 711			       bio_ctrl->end_io_func, NULL);
 712	bbio->bio.bi_iter.bi_sector = disk_bytenr >> SECTOR_SHIFT;
 713	bbio->inode = inode;
 714	bbio->file_offset = file_offset;
 715	bio_ctrl->bbio = bbio;
 716	bio_ctrl->len_to_oe_boundary = U32_MAX;
 717
 718	/* Limit data write bios to the ordered boundary. */
 719	if (bio_ctrl->wbc) {
 720		struct btrfs_ordered_extent *ordered;
 721
 722		ordered = btrfs_lookup_ordered_extent(inode, file_offset);
 723		if (ordered) {
 724			bio_ctrl->len_to_oe_boundary = min_t(u32, U32_MAX,
 725					ordered->file_offset +
 726					ordered->disk_num_bytes - file_offset);
 727			bbio->ordered = ordered;
 728		}
 729
 730		/*
 731		 * Pick the last added device to support cgroup writeback.  For
 732		 * multi-device file systems this means blk-cgroup policies have
 733		 * to always be set on the last added/replaced device.
 734		 * This is a bit odd but has been like that for a long time.
 735		 */
 736		bio_set_dev(&bbio->bio, fs_info->fs_devices->latest_dev->bdev);
 737		wbc_init_bio(bio_ctrl->wbc, &bbio->bio);
 738	}
 739}
 740
 741/*
 742 * @disk_bytenr: logical bytenr where the write will be
 743 * @page:	page to add to the bio
 744 * @size:	portion of page that we want to write to
 745 * @pg_offset:	offset of the new bio or to check whether we are adding
 746 *              a contiguous page to the previous one
 747 *
 748 * The will either add the page into the existing @bio_ctrl->bbio, or allocate a
 749 * new one in @bio_ctrl->bbio.
 750 * The mirror number for this IO should already be initizlied in
 751 * @bio_ctrl->mirror_num.
 752 */
 753static void submit_extent_folio(struct btrfs_bio_ctrl *bio_ctrl,
 754			       u64 disk_bytenr, struct folio *folio,
 755			       size_t size, unsigned long pg_offset)
 756{
 757	struct btrfs_inode *inode = folio_to_inode(folio);
 758
 759	ASSERT(pg_offset + size <= PAGE_SIZE);
 760	ASSERT(bio_ctrl->end_io_func);
 761
 762	if (bio_ctrl->bbio &&
 763	    !btrfs_bio_is_contig(bio_ctrl, folio, disk_bytenr, pg_offset))
 764		submit_one_bio(bio_ctrl);
 765
 766	do {
 767		u32 len = size;
 768
 769		/* Allocate new bio if needed */
 770		if (!bio_ctrl->bbio) {
 771			alloc_new_bio(inode, bio_ctrl, disk_bytenr,
 772				      folio_pos(folio) + pg_offset);
 773		}
 774
 775		/* Cap to the current ordered extent boundary if there is one. */
 776		if (len > bio_ctrl->len_to_oe_boundary) {
 777			ASSERT(bio_ctrl->compress_type == BTRFS_COMPRESS_NONE);
 778			ASSERT(is_data_inode(inode));
 779			len = bio_ctrl->len_to_oe_boundary;
 780		}
 781
 782		if (!bio_add_folio(&bio_ctrl->bbio->bio, folio, len, pg_offset)) {
 783			/* bio full: move on to a new one */
 784			submit_one_bio(bio_ctrl);
 785			continue;
 786		}
 787
 788		if (bio_ctrl->wbc)
 789			wbc_account_cgroup_owner(bio_ctrl->wbc, folio,
 790						 len);
 791
 792		size -= len;
 793		pg_offset += len;
 794		disk_bytenr += len;
 795
 796		/*
 797		 * len_to_oe_boundary defaults to U32_MAX, which isn't folio or
 798		 * sector aligned.  alloc_new_bio() then sets it to the end of
 799		 * our ordered extent for writes into zoned devices.
 800		 *
 801		 * When len_to_oe_boundary is tracking an ordered extent, we
 802		 * trust the ordered extent code to align things properly, and
 803		 * the check above to cap our write to the ordered extent
 804		 * boundary is correct.
 805		 *
 806		 * When len_to_oe_boundary is U32_MAX, the cap above would
 807		 * result in a 4095 byte IO for the last folio right before
 808		 * we hit the bio limit of UINT_MAX.  bio_add_folio() has all
 809		 * the checks required to make sure we don't overflow the bio,
 810		 * and we should just ignore len_to_oe_boundary completely
 811		 * unless we're using it to track an ordered extent.
 812		 *
 813		 * It's pretty hard to make a bio sized U32_MAX, but it can
 814		 * happen when the page cache is able to feed us contiguous
 815		 * folios for large extents.
 816		 */
 817		if (bio_ctrl->len_to_oe_boundary != U32_MAX)
 818			bio_ctrl->len_to_oe_boundary -= len;
 819
 820		/* Ordered extent boundary: move on to a new bio. */
 821		if (bio_ctrl->len_to_oe_boundary == 0)
 822			submit_one_bio(bio_ctrl);
 823	} while (size);
 824}
 825
 826static int attach_extent_buffer_folio(struct extent_buffer *eb,
 827				      struct folio *folio,
 828				      struct btrfs_subpage *prealloc)
 829{
 830	struct btrfs_fs_info *fs_info = eb->fs_info;
 831	int ret = 0;
 832
 833	/*
 834	 * If the page is mapped to btree inode, we should hold the private
 835	 * lock to prevent race.
 836	 * For cloned or dummy extent buffers, their pages are not mapped and
 837	 * will not race with any other ebs.
 838	 */
 839	if (folio->mapping)
 840		lockdep_assert_held(&folio->mapping->i_private_lock);
 841
 842	if (fs_info->nodesize >= PAGE_SIZE) {
 843		if (!folio_test_private(folio))
 844			folio_attach_private(folio, eb);
 845		else
 846			WARN_ON(folio_get_private(folio) != eb);
 847		return 0;
 848	}
 849
 850	/* Already mapped, just free prealloc */
 851	if (folio_test_private(folio)) {
 852		btrfs_free_subpage(prealloc);
 853		return 0;
 854	}
 855
 856	if (prealloc)
 857		/* Has preallocated memory for subpage */
 858		folio_attach_private(folio, prealloc);
 859	else
 860		/* Do new allocation to attach subpage */
 861		ret = btrfs_attach_subpage(fs_info, folio, BTRFS_SUBPAGE_METADATA);
 862	return ret;
 863}
 864
 865int set_page_extent_mapped(struct page *page)
 866{
 867	return set_folio_extent_mapped(page_folio(page));
 868}
 869
 870int set_folio_extent_mapped(struct folio *folio)
 871{
 872	struct btrfs_fs_info *fs_info;
 873
 874	ASSERT(folio->mapping);
 875
 876	if (folio_test_private(folio))
 877		return 0;
 878
 879	fs_info = folio_to_fs_info(folio);
 880
 881	if (btrfs_is_subpage(fs_info, folio->mapping))
 882		return btrfs_attach_subpage(fs_info, folio, BTRFS_SUBPAGE_DATA);
 883
 884	folio_attach_private(folio, (void *)EXTENT_FOLIO_PRIVATE);
 885	return 0;
 886}
 887
 888void clear_folio_extent_mapped(struct folio *folio)
 889{
 
 890	struct btrfs_fs_info *fs_info;
 891
 892	ASSERT(folio->mapping);
 893
 894	if (!folio_test_private(folio))
 895		return;
 896
 897	fs_info = folio_to_fs_info(folio);
 898	if (btrfs_is_subpage(fs_info, folio->mapping))
 899		return btrfs_detach_subpage(fs_info, folio);
 900
 901	folio_detach_private(folio);
 902}
 903
 904static struct extent_map *get_extent_map(struct btrfs_inode *inode,
 905					 struct folio *folio, u64 start,
 906					 u64 len, struct extent_map **em_cached)
 907{
 908	struct extent_map *em;
 909
 910	ASSERT(em_cached);
 911
 912	if (*em_cached) {
 913		em = *em_cached;
 914		if (extent_map_in_tree(em) && start >= em->start &&
 915		    start < extent_map_end(em)) {
 916			refcount_inc(&em->refs);
 917			return em;
 918		}
 919
 920		free_extent_map(em);
 921		*em_cached = NULL;
 922	}
 923
 924	em = btrfs_get_extent(inode, folio, start, len);
 925	if (!IS_ERR(em)) {
 926		BUG_ON(*em_cached);
 927		refcount_inc(&em->refs);
 928		*em_cached = em;
 929	}
 930
 931	return em;
 932}
 933/*
 934 * basic readpage implementation.  Locked extent state structs are inserted
 935 * into the tree that are removed when the IO is done (by the end_io
 936 * handlers)
 937 * XXX JDM: This needs looking at to ensure proper page locking
 938 * return 0 on success, otherwise return error
 939 */
 940static int btrfs_do_readpage(struct folio *folio, struct extent_map **em_cached,
 941		      struct btrfs_bio_ctrl *bio_ctrl, u64 *prev_em_start)
 942{
 943	struct inode *inode = folio->mapping->host;
 944	struct btrfs_fs_info *fs_info = inode_to_fs_info(inode);
 945	u64 start = folio_pos(folio);
 946	const u64 end = start + PAGE_SIZE - 1;
 947	u64 cur = start;
 948	u64 extent_offset;
 949	u64 last_byte = i_size_read(inode);
 950	u64 block_start;
 951	struct extent_map *em;
 952	int ret = 0;
 953	size_t pg_offset = 0;
 954	size_t iosize;
 955	size_t blocksize = fs_info->sectorsize;
 
 956
 957	ret = set_folio_extent_mapped(folio);
 958	if (ret < 0) {
 959		folio_unlock(folio);
 
 960		return ret;
 961	}
 962
 963	if (folio->index == last_byte >> folio_shift(folio)) {
 964		size_t zero_offset = offset_in_folio(folio, last_byte);
 965
 966		if (zero_offset) {
 967			iosize = folio_size(folio) - zero_offset;
 968			folio_zero_range(folio, zero_offset, iosize);
 969		}
 970	}
 971	bio_ctrl->end_io_func = end_bbio_data_read;
 972	begin_folio_read(fs_info, folio);
 973	while (cur <= end) {
 974		enum btrfs_compression_type compress_type = BTRFS_COMPRESS_NONE;
 975		bool force_bio_submit = false;
 976		u64 disk_bytenr;
 977
 978		ASSERT(IS_ALIGNED(cur, fs_info->sectorsize));
 979		if (cur >= last_byte) {
 980			iosize = folio_size(folio) - pg_offset;
 981			folio_zero_range(folio, pg_offset, iosize);
 982			end_folio_read(folio, true, cur, iosize);
 
 983			break;
 984		}
 985		em = get_extent_map(BTRFS_I(inode), folio, cur, end - cur + 1, em_cached);
 986		if (IS_ERR(em)) {
 987			end_folio_read(folio, false, cur, end + 1 - cur);
 
 988			return PTR_ERR(em);
 989		}
 990		extent_offset = cur - em->start;
 991		BUG_ON(extent_map_end(em) <= cur);
 992		BUG_ON(end < cur);
 993
 994		compress_type = extent_map_compression(em);
 995
 996		iosize = min(extent_map_end(em) - cur, end - cur + 1);
 997		iosize = ALIGN(iosize, blocksize);
 998		if (compress_type != BTRFS_COMPRESS_NONE)
 999			disk_bytenr = em->disk_bytenr;
1000		else
1001			disk_bytenr = extent_map_block_start(em) + extent_offset;
1002		block_start = extent_map_block_start(em);
1003		if (em->flags & EXTENT_FLAG_PREALLOC)
1004			block_start = EXTENT_MAP_HOLE;
1005
1006		/*
1007		 * If we have a file range that points to a compressed extent
1008		 * and it's followed by a consecutive file range that points
1009		 * to the same compressed extent (possibly with a different
1010		 * offset and/or length, so it either points to the whole extent
1011		 * or only part of it), we must make sure we do not submit a
1012		 * single bio to populate the folios for the 2 ranges because
1013		 * this makes the compressed extent read zero out the folios
1014		 * belonging to the 2nd range. Imagine the following scenario:
1015		 *
1016		 *  File layout
1017		 *  [0 - 8K]                     [8K - 24K]
1018		 *    |                               |
1019		 *    |                               |
1020		 * points to extent X,         points to extent X,
1021		 * offset 4K, length of 8K     offset 0, length 16K
1022		 *
1023		 * [extent X, compressed length = 4K uncompressed length = 16K]
1024		 *
1025		 * If the bio to read the compressed extent covers both ranges,
1026		 * it will decompress extent X into the folios belonging to the
1027		 * first range and then it will stop, zeroing out the remaining
1028		 * folios that belong to the other range that points to extent X.
1029		 * So here we make sure we submit 2 bios, one for the first
1030		 * range and another one for the third range. Both will target
1031		 * the same physical extent from disk, but we can't currently
1032		 * make the compressed bio endio callback populate the folios
1033		 * for both ranges because each compressed bio is tightly
1034		 * coupled with a single extent map, and each range can have
1035		 * an extent map with a different offset value relative to the
1036		 * uncompressed data of our extent and different lengths. This
1037		 * is a corner case so we prioritize correctness over
1038		 * non-optimal behavior (submitting 2 bios for the same extent).
1039		 */
1040		if (compress_type != BTRFS_COMPRESS_NONE &&
1041		    prev_em_start && *prev_em_start != (u64)-1 &&
1042		    *prev_em_start != em->start)
1043			force_bio_submit = true;
1044
1045		if (prev_em_start)
1046			*prev_em_start = em->start;
1047
1048		free_extent_map(em);
1049		em = NULL;
1050
1051		/* we've found a hole, just zero and go on */
1052		if (block_start == EXTENT_MAP_HOLE) {
1053			folio_zero_range(folio, pg_offset, iosize);
1054
1055			end_folio_read(folio, true, cur, iosize);
 
1056			cur = cur + iosize;
1057			pg_offset += iosize;
1058			continue;
1059		}
1060		/* the get_extent function already copied into the folio */
1061		if (block_start == EXTENT_MAP_INLINE) {
1062			end_folio_read(folio, true, cur, iosize);
 
1063			cur = cur + iosize;
1064			pg_offset += iosize;
1065			continue;
1066		}
1067
1068		if (bio_ctrl->compress_type != compress_type) {
1069			submit_one_bio(bio_ctrl);
1070			bio_ctrl->compress_type = compress_type;
1071		}
1072
1073		if (force_bio_submit)
1074			submit_one_bio(bio_ctrl);
1075		submit_extent_folio(bio_ctrl, disk_bytenr, folio, iosize,
1076				    pg_offset);
1077		cur = cur + iosize;
1078		pg_offset += iosize;
1079	}
1080
1081	return 0;
1082}
1083
1084int btrfs_read_folio(struct file *file, struct folio *folio)
1085{
1086	struct btrfs_inode *inode = folio_to_inode(folio);
1087	const u64 start = folio_pos(folio);
1088	const u64 end = start + folio_size(folio) - 1;
1089	struct extent_state *cached_state = NULL;
1090	struct btrfs_bio_ctrl bio_ctrl = { .opf = REQ_OP_READ };
1091	struct extent_map *em_cached = NULL;
1092	int ret;
1093
1094	btrfs_lock_and_flush_ordered_range(inode, start, end, &cached_state);
1095	ret = btrfs_do_readpage(folio, &em_cached, &bio_ctrl, NULL);
1096	unlock_extent(&inode->io_tree, start, end, &cached_state);
1097
 
1098	free_extent_map(em_cached);
1099
1100	/*
1101	 * If btrfs_do_readpage() failed we will want to submit the assembled
1102	 * bio to do the cleanup.
1103	 */
1104	submit_one_bio(&bio_ctrl);
1105	return ret;
1106}
1107
1108static void set_delalloc_bitmap(struct folio *folio, unsigned long *delalloc_bitmap,
1109				u64 start, u32 len)
 
 
 
1110{
1111	struct btrfs_fs_info *fs_info = folio_to_fs_info(folio);
1112	const u64 folio_start = folio_pos(folio);
1113	unsigned int start_bit;
1114	unsigned int nbits;
1115
1116	ASSERT(start >= folio_start && start + len <= folio_start + PAGE_SIZE);
1117	start_bit = (start - folio_start) >> fs_info->sectorsize_bits;
1118	nbits = len >> fs_info->sectorsize_bits;
1119	ASSERT(bitmap_test_range_all_zero(delalloc_bitmap, start_bit, nbits));
1120	bitmap_set(delalloc_bitmap, start_bit, nbits);
1121}
1122
1123static bool find_next_delalloc_bitmap(struct folio *folio,
1124				      unsigned long *delalloc_bitmap, u64 start,
1125				      u64 *found_start, u32 *found_len)
1126{
1127	struct btrfs_fs_info *fs_info = folio_to_fs_info(folio);
1128	const u64 folio_start = folio_pos(folio);
1129	const unsigned int bitmap_size = fs_info->sectors_per_page;
1130	unsigned int start_bit;
1131	unsigned int first_zero;
1132	unsigned int first_set;
1133
1134	ASSERT(start >= folio_start && start < folio_start + PAGE_SIZE);
1135
1136	start_bit = (start - folio_start) >> fs_info->sectorsize_bits;
1137	first_set = find_next_bit(delalloc_bitmap, bitmap_size, start_bit);
1138	if (first_set >= bitmap_size)
1139		return false;
1140
1141	*found_start = folio_start + (first_set << fs_info->sectorsize_bits);
1142	first_zero = find_next_zero_bit(delalloc_bitmap, bitmap_size, first_set);
1143	*found_len = (first_zero - first_set) << fs_info->sectorsize_bits;
1144	return true;
 
 
 
 
 
1145}
1146
1147/*
1148 * Do all of the delayed allocation setup.
1149 *
1150 * Return >0 if all the dirty blocks are submitted async (compression) or inlined.
1151 * The @folio should no longer be touched (treat it as already unlocked).
 
1152 *
1153 * Return 0 if there is still dirty block that needs to be submitted through
1154 * extent_writepage_io().
1155 * bio_ctrl->submit_bitmap will indicate which blocks of the folio should be
1156 * submitted, and @folio is still kept locked.
1157 *
1158 * Return <0 if there is any error hit.
1159 * Any allocated ordered extent range covering this folio will be marked
1160 * finished (IOERR), and @folio is still kept locked.
1161 */
1162static noinline_for_stack int writepage_delalloc(struct btrfs_inode *inode,
1163						 struct folio *folio,
1164						 struct btrfs_bio_ctrl *bio_ctrl)
1165{
1166	struct btrfs_fs_info *fs_info = inode_to_fs_info(&inode->vfs_inode);
1167	struct writeback_control *wbc = bio_ctrl->wbc;
1168	const bool is_subpage = btrfs_is_subpage(fs_info, folio->mapping);
1169	const u64 page_start = folio_pos(folio);
1170	const u64 page_end = page_start + folio_size(folio) - 1;
1171	unsigned long delalloc_bitmap = 0;
1172	/*
1173	 * Save the last found delalloc end. As the delalloc end can go beyond
1174	 * page boundary, thus we cannot rely on subpage bitmap to locate the
1175	 * last delalloc end.
1176	 */
1177	u64 last_delalloc_end = 0;
1178	/*
1179	 * The range end (exclusive) of the last successfully finished delalloc
1180	 * range.
1181	 * Any range covered by ordered extent must either be manually marked
1182	 * finished (error handling), or has IO submitted (and finish the
1183	 * ordered extent normally).
1184	 *
1185	 * This records the end of ordered extent cleanup if we hit an error.
1186	 */
1187	u64 last_finished_delalloc_end = page_start;
1188	u64 delalloc_start = page_start;
1189	u64 delalloc_end = page_end;
1190	u64 delalloc_to_write = 0;
1191	int ret = 0;
1192	int bit;
1193
1194	/* Save the dirty bitmap as our submission bitmap will be a subset of it. */
1195	if (btrfs_is_subpage(fs_info, inode->vfs_inode.i_mapping)) {
1196		ASSERT(fs_info->sectors_per_page > 1);
1197		btrfs_get_subpage_dirty_bitmap(fs_info, folio, &bio_ctrl->submit_bitmap);
1198	} else {
1199		bio_ctrl->submit_bitmap = 1;
1200	}
1201
1202	for_each_set_bit(bit, &bio_ctrl->submit_bitmap, fs_info->sectors_per_page) {
1203		u64 start = page_start + (bit << fs_info->sectorsize_bits);
1204
1205		btrfs_folio_set_lock(fs_info, folio, start, fs_info->sectorsize);
1206	}
1207
1208	/* Lock all (subpage) delalloc ranges inside the folio first. */
1209	while (delalloc_start < page_end) {
1210		delalloc_end = page_end;
1211		if (!find_lock_delalloc_range(&inode->vfs_inode, folio,
1212					      &delalloc_start, &delalloc_end)) {
1213			delalloc_start = delalloc_end + 1;
1214			continue;
1215		}
1216		set_delalloc_bitmap(folio, &delalloc_bitmap, delalloc_start,
1217				    min(delalloc_end, page_end) + 1 - delalloc_start);
1218		last_delalloc_end = delalloc_end;
1219		delalloc_start = delalloc_end + 1;
1220	}
1221	delalloc_start = page_start;
1222
1223	if (!last_delalloc_end)
1224		goto out;
1225
1226	/* Run the delalloc ranges for the above locked ranges. */
1227	while (delalloc_start < page_end) {
1228		u64 found_start;
1229		u32 found_len;
1230		bool found;
1231
1232		if (!is_subpage) {
1233			/*
1234			 * For non-subpage case, the found delalloc range must
1235			 * cover this folio and there must be only one locked
1236			 * delalloc range.
1237			 */
1238			found_start = page_start;
1239			found_len = last_delalloc_end + 1 - found_start;
1240			found = true;
1241		} else {
1242			found = find_next_delalloc_bitmap(folio, &delalloc_bitmap,
1243					delalloc_start, &found_start, &found_len);
1244		}
1245		if (!found)
1246			break;
1247		/*
1248		 * The subpage range covers the last sector, the delalloc range may
1249		 * end beyond the folio boundary, use the saved delalloc_end
1250		 * instead.
1251		 */
1252		if (found_start + found_len >= page_end)
1253			found_len = last_delalloc_end + 1 - found_start;
1254
1255		if (ret >= 0) {
1256			/*
1257			 * Some delalloc range may be created by previous folios.
1258			 * Thus we still need to clean up this range during error
1259			 * handling.
1260			 */
1261			last_finished_delalloc_end = found_start;
1262			/* No errors hit so far, run the current delalloc range. */
1263			ret = btrfs_run_delalloc_range(inode, folio,
1264						       found_start,
1265						       found_start + found_len - 1,
1266						       wbc);
1267			if (ret >= 0)
1268				last_finished_delalloc_end = found_start + found_len;
1269		} else {
1270			/*
1271			 * We've hit an error during previous delalloc range,
1272			 * have to cleanup the remaining locked ranges.
1273			 */
1274			unlock_extent(&inode->io_tree, found_start,
1275				      found_start + found_len - 1, NULL);
1276			__unlock_for_delalloc(&inode->vfs_inode, folio,
1277					      found_start,
1278					      found_start + found_len - 1);
1279		}
1280
1281		/*
1282		 * We have some ranges that's going to be submitted asynchronously
1283		 * (compression or inline).  These range have their own control
1284		 * on when to unlock the pages.  We should not touch them
1285		 * anymore, so clear the range from the submission bitmap.
1286		 */
1287		if (ret > 0) {
1288			unsigned int start_bit = (found_start - page_start) >>
1289						 fs_info->sectorsize_bits;
1290			unsigned int end_bit = (min(page_end + 1, found_start + found_len) -
1291						page_start) >> fs_info->sectorsize_bits;
1292			bitmap_clear(&bio_ctrl->submit_bitmap, start_bit, end_bit - start_bit);
1293		}
1294		/*
1295		 * Above btrfs_run_delalloc_range() may have unlocked the folio,
1296		 * thus for the last range, we cannot touch the folio anymore.
1297		 */
1298		if (found_start + found_len >= last_delalloc_end + 1)
1299			break;
1300
1301		delalloc_start = found_start + found_len;
1302	}
1303	/*
1304	 * It's possible we had some ordered extents created before we hit
1305	 * an error, cleanup non-async successfully created delalloc ranges.
1306	 */
1307	if (unlikely(ret < 0)) {
1308		unsigned int bitmap_size = min(
1309				(last_finished_delalloc_end - page_start) >>
1310				fs_info->sectorsize_bits,
1311				fs_info->sectors_per_page);
1312
1313		for_each_set_bit(bit, &bio_ctrl->submit_bitmap, bitmap_size)
1314			btrfs_mark_ordered_io_finished(inode, folio,
1315				page_start + (bit << fs_info->sectorsize_bits),
1316				fs_info->sectorsize, false);
1317		return ret;
1318	}
1319out:
1320	if (last_delalloc_end)
1321		delalloc_end = last_delalloc_end;
1322	else
1323		delalloc_end = page_end;
1324	/*
1325	 * delalloc_end is already one less than the total length, so
1326	 * we don't subtract one from PAGE_SIZE
1327	 */
1328	delalloc_to_write +=
1329		DIV_ROUND_UP(delalloc_end + 1 - page_start, PAGE_SIZE);
1330
1331	/*
1332	 * If all ranges are submitted asynchronously, we just need to account
1333	 * for them here.
1334	 */
1335	if (bitmap_empty(&bio_ctrl->submit_bitmap, fs_info->sectors_per_page)) {
1336		wbc->nr_to_write -= delalloc_to_write;
1337		return 1;
1338	}
1339
1340	if (wbc->nr_to_write < delalloc_to_write) {
1341		int thresh = 8192;
1342
1343		if (delalloc_to_write < thresh * 2)
1344			thresh = delalloc_to_write;
1345		wbc->nr_to_write = min_t(u64, delalloc_to_write,
1346					 thresh);
1347	}
1348
1349	return 0;
1350}
1351
1352/*
1353 * Return 0 if we have submitted or queued the sector for submission.
1354 * Return <0 for critical errors.
1355 *
1356 * Caller should make sure filepos < i_size and handle filepos >= i_size case.
 
 
 
 
 
 
 
 
 
 
1357 */
1358static int submit_one_sector(struct btrfs_inode *inode,
1359			     struct folio *folio,
1360			     u64 filepos, struct btrfs_bio_ctrl *bio_ctrl,
1361			     loff_t i_size)
1362{
1363	struct btrfs_fs_info *fs_info = inode->root->fs_info;
1364	struct extent_map *em;
1365	u64 block_start;
1366	u64 disk_bytenr;
1367	u64 extent_offset;
1368	u64 em_end;
1369	const u32 sectorsize = fs_info->sectorsize;
1370
1371	ASSERT(IS_ALIGNED(filepos, sectorsize));
1372
1373	/* @filepos >= i_size case should be handled by the caller. */
1374	ASSERT(filepos < i_size);
1375
1376	em = btrfs_get_extent(inode, NULL, filepos, sectorsize);
1377	if (IS_ERR(em))
1378		return PTR_ERR_OR_ZERO(em);
1379
1380	extent_offset = filepos - em->start;
1381	em_end = extent_map_end(em);
1382	ASSERT(filepos <= em_end);
1383	ASSERT(IS_ALIGNED(em->start, sectorsize));
1384	ASSERT(IS_ALIGNED(em->len, sectorsize));
 
 
 
 
1385
1386	block_start = extent_map_block_start(em);
1387	disk_bytenr = extent_map_block_start(em) + extent_offset;
1388
1389	ASSERT(!extent_map_is_compressed(em));
1390	ASSERT(block_start != EXTENT_MAP_HOLE);
1391	ASSERT(block_start != EXTENT_MAP_INLINE);
 
 
1392
1393	free_extent_map(em);
1394	em = NULL;
1395
1396	/*
1397	 * Although the PageDirty bit is cleared before entering this
1398	 * function, subpage dirty bit is not cleared.
1399	 * So clear subpage dirty bit here so next time we won't submit
1400	 * a folio for a range already written to disk.
1401	 */
1402	btrfs_folio_clear_dirty(fs_info, folio, filepos, sectorsize);
1403	btrfs_folio_set_writeback(fs_info, folio, filepos, sectorsize);
1404	/*
1405	 * Above call should set the whole folio with writeback flag, even
1406	 * just for a single subpage sector.
1407	 * As long as the folio is properly locked and the range is correct,
1408	 * we should always get the folio with writeback flag.
1409	 */
1410	ASSERT(folio_test_writeback(folio));
1411
1412	submit_extent_folio(bio_ctrl, disk_bytenr, folio,
1413			    sectorsize, filepos - folio_pos(folio));
1414	return 0;
1415}
1416
1417/*
1418 * Helper for extent_writepage().  This calls the writepage start hooks,
1419 * and does the loop to map the page into extents and bios.
1420 *
1421 * We return 1 if the IO is started and the page is unlocked,
1422 * 0 if all went well (page still locked)
1423 * < 0 if there were errors (page still locked)
1424 */
1425static noinline_for_stack int extent_writepage_io(struct btrfs_inode *inode,
1426						  struct folio *folio,
1427						  u64 start, u32 len,
1428						  struct btrfs_bio_ctrl *bio_ctrl,
1429						  loff_t i_size)
1430{
1431	struct btrfs_fs_info *fs_info = inode->root->fs_info;
1432	unsigned long range_bitmap = 0;
1433	bool submitted_io = false;
1434	bool error = false;
1435	const u64 folio_start = folio_pos(folio);
1436	u64 cur;
1437	int bit;
1438	int ret = 0;
 
1439
1440	ASSERT(start >= folio_start &&
1441	       start + len <= folio_start + folio_size(folio));
1442
1443	ret = btrfs_writepage_cow_fixup(folio);
1444	if (ret) {
1445		/* Fixup worker will requeue */
1446		folio_redirty_for_writepage(bio_ctrl->wbc, folio);
1447		folio_unlock(folio);
1448		return 1;
1449	}
1450
1451	for (cur = start; cur < start + len; cur += fs_info->sectorsize)
1452		set_bit((cur - folio_start) >> fs_info->sectorsize_bits, &range_bitmap);
1453	bitmap_and(&bio_ctrl->submit_bitmap, &bio_ctrl->submit_bitmap, &range_bitmap,
1454		   fs_info->sectors_per_page);
1455
1456	bio_ctrl->end_io_func = end_bbio_data_write;
1457
1458	for_each_set_bit(bit, &bio_ctrl->submit_bitmap, fs_info->sectors_per_page) {
1459		cur = folio_pos(folio) + (bit << fs_info->sectorsize_bits);
 
 
 
 
1460
1461		if (cur >= i_size) {
1462			btrfs_mark_ordered_io_finished(inode, folio, cur,
1463						       start + len - cur, true);
1464			/*
1465			 * This range is beyond i_size, thus we don't need to
1466			 * bother writing back.
1467			 * But we still need to clear the dirty subpage bit, or
1468			 * the next time the folio gets dirtied, we will try to
1469			 * writeback the sectors with subpage dirty bits,
1470			 * causing writeback without ordered extent.
1471			 */
1472			btrfs_folio_clear_dirty(fs_info, folio, cur,
1473						start + len - cur);
1474			break;
1475		}
1476		ret = submit_one_sector(inode, folio, cur, bio_ctrl, i_size);
1477		if (unlikely(ret < 0)) {
1478			/*
1479			 * bio_ctrl may contain a bio crossing several folios.
1480			 * Submit it immediately so that the bio has a chance
1481			 * to finish normally, other than marked as error.
1482			 */
1483			submit_one_bio(bio_ctrl);
1484			/*
1485			 * Failed to grab the extent map which should be very rare.
1486			 * Since there is no bio submitted to finish the ordered
1487			 * extent, we have to manually finish this sector.
1488			 */
1489			btrfs_mark_ordered_io_finished(inode, folio, cur,
1490						       fs_info->sectorsize, false);
1491			error = true;
1492			continue;
1493		}
1494		submitted_io = true;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1495	}
1496
 
 
 
 
 
1497	/*
1498	 * If we didn't submitted any sector (>= i_size), folio dirty get
1499	 * cleared but PAGECACHE_TAG_DIRTY is not cleared (only cleared
1500	 * by folio_start_writeback() if the folio is not dirty).
1501	 *
1502	 * Here we set writeback and clear for the range. If the full folio
1503	 * is no longer dirty then we clear the PAGECACHE_TAG_DIRTY tag.
1504	 *
1505	 * If we hit any error, the corresponding sector will still be dirty
1506	 * thus no need to clear PAGECACHE_TAG_DIRTY.
1507	 */
1508	if (!submitted_io && !error) {
1509		btrfs_folio_set_writeback(fs_info, folio, start, len);
1510		btrfs_folio_clear_writeback(fs_info, folio, start, len);
1511	}
1512	return ret;
1513}
1514
1515/*
1516 * the writepage semantics are similar to regular writepage.  extent
1517 * records are inserted to lock ranges in the tree, and as dirty areas
1518 * are found, they are marked writeback.  Then the lock bits are removed
1519 * and the end_io handler clears the writeback ranges
1520 *
1521 * Return 0 if everything goes well.
1522 * Return <0 for error.
1523 */
1524static int extent_writepage(struct folio *folio, struct btrfs_bio_ctrl *bio_ctrl)
1525{
1526	struct btrfs_inode *inode = BTRFS_I(folio->mapping->host);
1527	struct btrfs_fs_info *fs_info = inode->root->fs_info;
 
1528	int ret;
 
1529	size_t pg_offset;
1530	loff_t i_size = i_size_read(&inode->vfs_inode);
1531	unsigned long end_index = i_size >> PAGE_SHIFT;
1532
1533	trace_extent_writepage(folio, &inode->vfs_inode, bio_ctrl->wbc);
1534
1535	WARN_ON(!folio_test_locked(folio));
1536
1537	pg_offset = offset_in_folio(folio, i_size);
1538	if (folio->index > end_index ||
1539	   (folio->index == end_index && !pg_offset)) {
1540		folio_invalidate(folio, 0, folio_size(folio));
1541		folio_unlock(folio);
1542		return 0;
1543	}
1544
1545	if (folio->index == end_index)
1546		folio_zero_range(folio, pg_offset, folio_size(folio) - pg_offset);
1547
1548	/*
1549	 * Default to unlock the whole folio.
1550	 * The proper bitmap can only be initialized until writepage_delalloc().
1551	 */
1552	bio_ctrl->submit_bitmap = (unsigned long)-1;
1553	ret = set_folio_extent_mapped(folio);
1554	if (ret < 0)
1555		goto done;
1556
1557	ret = writepage_delalloc(inode, folio, bio_ctrl);
1558	if (ret == 1)
1559		return 0;
1560	if (ret)
1561		goto done;
1562
1563	ret = extent_writepage_io(inode, folio, folio_pos(folio),
1564				  PAGE_SIZE, bio_ctrl, i_size);
1565	if (ret == 1)
1566		return 0;
1567
1568	bio_ctrl->wbc->nr_to_write--;
1569
1570done:
1571	if (ret < 0)
1572		mapping_set_error(folio->mapping, ret);
1573	/*
1574	 * Only unlock ranges that are submitted. As there can be some async
1575	 * submitted ranges inside the folio.
1576	 */
1577	btrfs_folio_end_lock_bitmap(fs_info, folio, bio_ctrl->submit_bitmap);
 
 
 
 
1578	ASSERT(ret <= 0);
1579	return ret;
1580}
1581
1582void wait_on_extent_buffer_writeback(struct extent_buffer *eb)
1583{
1584	wait_on_bit_io(&eb->bflags, EXTENT_BUFFER_WRITEBACK,
1585		       TASK_UNINTERRUPTIBLE);
1586}
1587
1588/*
1589 * Lock extent buffer status and pages for writeback.
1590 *
1591 * Return %false if the extent buffer doesn't need to be submitted (e.g. the
1592 * extent buffer is not dirty)
1593 * Return %true is the extent buffer is submitted to bio.
1594 */
1595static noinline_for_stack bool lock_extent_buffer_for_io(struct extent_buffer *eb,
1596			  struct writeback_control *wbc)
1597{
1598	struct btrfs_fs_info *fs_info = eb->fs_info;
1599	bool ret = false;
1600
1601	btrfs_tree_lock(eb);
1602	while (test_bit(EXTENT_BUFFER_WRITEBACK, &eb->bflags)) {
1603		btrfs_tree_unlock(eb);
1604		if (wbc->sync_mode != WB_SYNC_ALL)
1605			return false;
1606		wait_on_extent_buffer_writeback(eb);
1607		btrfs_tree_lock(eb);
1608	}
1609
1610	/*
1611	 * We need to do this to prevent races in people who check if the eb is
1612	 * under IO since we can end up having no IO bits set for a short period
1613	 * of time.
1614	 */
1615	spin_lock(&eb->refs_lock);
1616	if (test_and_clear_bit(EXTENT_BUFFER_DIRTY, &eb->bflags)) {
1617		set_bit(EXTENT_BUFFER_WRITEBACK, &eb->bflags);
1618		spin_unlock(&eb->refs_lock);
1619		btrfs_set_header_flag(eb, BTRFS_HEADER_FLAG_WRITTEN);
1620		percpu_counter_add_batch(&fs_info->dirty_metadata_bytes,
1621					 -eb->len,
1622					 fs_info->dirty_metadata_batch);
1623		ret = true;
1624	} else {
1625		spin_unlock(&eb->refs_lock);
1626	}
1627	btrfs_tree_unlock(eb);
1628	return ret;
1629}
1630
1631static void set_btree_ioerr(struct extent_buffer *eb)
1632{
1633	struct btrfs_fs_info *fs_info = eb->fs_info;
1634
1635	set_bit(EXTENT_BUFFER_WRITE_ERR, &eb->bflags);
1636
1637	/*
1638	 * A read may stumble upon this buffer later, make sure that it gets an
1639	 * error and knows there was an error.
1640	 */
1641	clear_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags);
1642
1643	/*
1644	 * We need to set the mapping with the io error as well because a write
1645	 * error will flip the file system readonly, and then syncfs() will
1646	 * return a 0 because we are readonly if we don't modify the err seq for
1647	 * the superblock.
1648	 */
1649	mapping_set_error(eb->fs_info->btree_inode->i_mapping, -EIO);
1650
1651	/*
1652	 * If writeback for a btree extent that doesn't belong to a log tree
1653	 * failed, increment the counter transaction->eb_write_errors.
1654	 * We do this because while the transaction is running and before it's
1655	 * committing (when we call filemap_fdata[write|wait]_range against
1656	 * the btree inode), we might have
1657	 * btree_inode->i_mapping->a_ops->writepages() called by the VM - if it
1658	 * returns an error or an error happens during writeback, when we're
1659	 * committing the transaction we wouldn't know about it, since the pages
1660	 * can be no longer dirty nor marked anymore for writeback (if a
1661	 * subsequent modification to the extent buffer didn't happen before the
1662	 * transaction commit), which makes filemap_fdata[write|wait]_range not
1663	 * able to find the pages which contain errors at transaction
1664	 * commit time. So if this happens we must abort the transaction,
1665	 * otherwise we commit a super block with btree roots that point to
1666	 * btree nodes/leafs whose content on disk is invalid - either garbage
1667	 * or the content of some node/leaf from a past generation that got
1668	 * cowed or deleted and is no longer valid.
1669	 *
1670	 * Note: setting AS_EIO/AS_ENOSPC in the btree inode's i_mapping would
1671	 * not be enough - we need to distinguish between log tree extents vs
1672	 * non-log tree extents, and the next filemap_fdatawait_range() call
1673	 * will catch and clear such errors in the mapping - and that call might
1674	 * be from a log sync and not from a transaction commit. Also, checking
1675	 * for the eb flag EXTENT_BUFFER_WRITE_ERR at transaction commit time is
1676	 * not done and would not be reliable - the eb might have been released
1677	 * from memory and reading it back again means that flag would not be
1678	 * set (since it's a runtime flag, not persisted on disk).
1679	 *
1680	 * Using the flags below in the btree inode also makes us achieve the
1681	 * goal of AS_EIO/AS_ENOSPC when writepages() returns success, started
1682	 * writeback for all dirty pages and before filemap_fdatawait_range()
1683	 * is called, the writeback for all dirty pages had already finished
1684	 * with errors - because we were not using AS_EIO/AS_ENOSPC,
1685	 * filemap_fdatawait_range() would return success, as it could not know
1686	 * that writeback errors happened (the pages were no longer tagged for
1687	 * writeback).
1688	 */
1689	switch (eb->log_index) {
1690	case -1:
1691		set_bit(BTRFS_FS_BTREE_ERR, &fs_info->flags);
1692		break;
1693	case 0:
1694		set_bit(BTRFS_FS_LOG1_ERR, &fs_info->flags);
1695		break;
1696	case 1:
1697		set_bit(BTRFS_FS_LOG2_ERR, &fs_info->flags);
1698		break;
1699	default:
1700		BUG(); /* unexpected, logic error */
1701	}
1702}
1703
1704/*
1705 * The endio specific version which won't touch any unsafe spinlock in endio
1706 * context.
1707 */
1708static struct extent_buffer *find_extent_buffer_nolock(
1709		const struct btrfs_fs_info *fs_info, u64 start)
1710{
1711	struct extent_buffer *eb;
1712
1713	rcu_read_lock();
1714	eb = radix_tree_lookup(&fs_info->buffer_radix,
1715			       start >> fs_info->sectorsize_bits);
1716	if (eb && atomic_inc_not_zero(&eb->refs)) {
1717		rcu_read_unlock();
1718		return eb;
1719	}
1720	rcu_read_unlock();
1721	return NULL;
1722}
1723
1724static void end_bbio_meta_write(struct btrfs_bio *bbio)
1725{
1726	struct extent_buffer *eb = bbio->private;
1727	struct btrfs_fs_info *fs_info = eb->fs_info;
1728	bool uptodate = !bbio->bio.bi_status;
1729	struct folio_iter fi;
1730	u32 bio_offset = 0;
1731
1732	if (!uptodate)
1733		set_btree_ioerr(eb);
1734
1735	bio_for_each_folio_all(fi, &bbio->bio) {
1736		u64 start = eb->start + bio_offset;
1737		struct folio *folio = fi.folio;
1738		u32 len = fi.length;
1739
1740		btrfs_folio_clear_writeback(fs_info, folio, start, len);
1741		bio_offset += len;
1742	}
1743
1744	clear_bit(EXTENT_BUFFER_WRITEBACK, &eb->bflags);
1745	smp_mb__after_atomic();
1746	wake_up_bit(&eb->bflags, EXTENT_BUFFER_WRITEBACK);
1747
1748	bio_put(&bbio->bio);
1749}
1750
1751static void prepare_eb_write(struct extent_buffer *eb)
1752{
1753	u32 nritems;
1754	unsigned long start;
1755	unsigned long end;
1756
1757	clear_bit(EXTENT_BUFFER_WRITE_ERR, &eb->bflags);
1758
1759	/* Set btree blocks beyond nritems with 0 to avoid stale content */
1760	nritems = btrfs_header_nritems(eb);
1761	if (btrfs_header_level(eb) > 0) {
1762		end = btrfs_node_key_ptr_offset(eb, nritems);
1763		memzero_extent_buffer(eb, end, eb->len - end);
1764	} else {
1765		/*
1766		 * Leaf:
1767		 * header 0 1 2 .. N ... data_N .. data_2 data_1 data_0
1768		 */
1769		start = btrfs_item_nr_offset(eb, nritems);
1770		end = btrfs_item_nr_offset(eb, 0);
1771		if (nritems == 0)
1772			end += BTRFS_LEAF_DATA_SIZE(eb->fs_info);
1773		else
1774			end += btrfs_item_offset(eb, nritems - 1);
1775		memzero_extent_buffer(eb, start, end - start);
1776	}
1777}
1778
1779static noinline_for_stack void write_one_eb(struct extent_buffer *eb,
1780					    struct writeback_control *wbc)
1781{
1782	struct btrfs_fs_info *fs_info = eb->fs_info;
1783	struct btrfs_bio *bbio;
1784
1785	prepare_eb_write(eb);
1786
1787	bbio = btrfs_bio_alloc(INLINE_EXTENT_BUFFER_PAGES,
1788			       REQ_OP_WRITE | REQ_META | wbc_to_write_flags(wbc),
1789			       eb->fs_info, end_bbio_meta_write, eb);
1790	bbio->bio.bi_iter.bi_sector = eb->start >> SECTOR_SHIFT;
1791	bio_set_dev(&bbio->bio, fs_info->fs_devices->latest_dev->bdev);
1792	wbc_init_bio(wbc, &bbio->bio);
1793	bbio->inode = BTRFS_I(eb->fs_info->btree_inode);
1794	bbio->file_offset = eb->start;
1795	if (fs_info->nodesize < PAGE_SIZE) {
1796		struct folio *folio = eb->folios[0];
1797		bool ret;
1798
1799		folio_lock(folio);
1800		btrfs_subpage_set_writeback(fs_info, folio, eb->start, eb->len);
1801		if (btrfs_subpage_clear_and_test_dirty(fs_info, folio, eb->start,
1802						       eb->len)) {
1803			folio_clear_dirty_for_io(folio);
1804			wbc->nr_to_write--;
1805		}
1806		ret = bio_add_folio(&bbio->bio, folio, eb->len,
1807				    eb->start - folio_pos(folio));
1808		ASSERT(ret);
1809		wbc_account_cgroup_owner(wbc, folio, eb->len);
1810		folio_unlock(folio);
1811	} else {
1812		int num_folios = num_extent_folios(eb);
1813
1814		for (int i = 0; i < num_folios; i++) {
1815			struct folio *folio = eb->folios[i];
1816			bool ret;
1817
1818			folio_lock(folio);
1819			folio_clear_dirty_for_io(folio);
1820			folio_start_writeback(folio);
1821			ret = bio_add_folio(&bbio->bio, folio, eb->folio_size, 0);
1822			ASSERT(ret);
1823			wbc_account_cgroup_owner(wbc, folio, eb->folio_size);
 
1824			wbc->nr_to_write -= folio_nr_pages(folio);
1825			folio_unlock(folio);
1826		}
1827	}
1828	btrfs_submit_bbio(bbio, 0);
1829}
1830
1831/*
1832 * Submit one subpage btree page.
1833 *
1834 * The main difference to submit_eb_page() is:
1835 * - Page locking
1836 *   For subpage, we don't rely on page locking at all.
1837 *
1838 * - Flush write bio
1839 *   We only flush bio if we may be unable to fit current extent buffers into
1840 *   current bio.
1841 *
1842 * Return >=0 for the number of submitted extent buffers.
1843 * Return <0 for fatal error.
1844 */
1845static int submit_eb_subpage(struct folio *folio, struct writeback_control *wbc)
1846{
1847	struct btrfs_fs_info *fs_info = folio_to_fs_info(folio);
 
1848	int submitted = 0;
1849	u64 folio_start = folio_pos(folio);
1850	int bit_start = 0;
1851	int sectors_per_node = fs_info->nodesize >> fs_info->sectorsize_bits;
1852
1853	/* Lock and write each dirty extent buffers in the range */
1854	while (bit_start < fs_info->sectors_per_page) {
1855		struct btrfs_subpage *subpage = folio_get_private(folio);
1856		struct extent_buffer *eb;
1857		unsigned long flags;
1858		u64 start;
1859
1860		/*
1861		 * Take private lock to ensure the subpage won't be detached
1862		 * in the meantime.
1863		 */
1864		spin_lock(&folio->mapping->i_private_lock);
1865		if (!folio_test_private(folio)) {
1866			spin_unlock(&folio->mapping->i_private_lock);
1867			break;
1868		}
1869		spin_lock_irqsave(&subpage->lock, flags);
1870		if (!test_bit(bit_start + btrfs_bitmap_nr_dirty * fs_info->sectors_per_page,
1871			      subpage->bitmaps)) {
1872			spin_unlock_irqrestore(&subpage->lock, flags);
1873			spin_unlock(&folio->mapping->i_private_lock);
1874			bit_start++;
1875			continue;
1876		}
1877
1878		start = folio_start + bit_start * fs_info->sectorsize;
1879		bit_start += sectors_per_node;
1880
1881		/*
1882		 * Here we just want to grab the eb without touching extra
1883		 * spin locks, so call find_extent_buffer_nolock().
1884		 */
1885		eb = find_extent_buffer_nolock(fs_info, start);
1886		spin_unlock_irqrestore(&subpage->lock, flags);
1887		spin_unlock(&folio->mapping->i_private_lock);
1888
1889		/*
1890		 * The eb has already reached 0 refs thus find_extent_buffer()
1891		 * doesn't return it. We don't need to write back such eb
1892		 * anyway.
1893		 */
1894		if (!eb)
1895			continue;
1896
1897		if (lock_extent_buffer_for_io(eb, wbc)) {
1898			write_one_eb(eb, wbc);
1899			submitted++;
1900		}
1901		free_extent_buffer(eb);
1902	}
1903	return submitted;
1904}
1905
1906/*
1907 * Submit all page(s) of one extent buffer.
1908 *
1909 * @page:	the page of one extent buffer
1910 * @eb_context:	to determine if we need to submit this page, if current page
1911 *		belongs to this eb, we don't need to submit
1912 *
1913 * The caller should pass each page in their bytenr order, and here we use
1914 * @eb_context to determine if we have submitted pages of one extent buffer.
1915 *
1916 * If we have, we just skip until we hit a new page that doesn't belong to
1917 * current @eb_context.
1918 *
1919 * If not, we submit all the page(s) of the extent buffer.
1920 *
1921 * Return >0 if we have submitted the extent buffer successfully.
1922 * Return 0 if we don't need to submit the page, as it's already submitted by
1923 * previous call.
1924 * Return <0 for fatal error.
1925 */
1926static int submit_eb_page(struct folio *folio, struct btrfs_eb_write_context *ctx)
1927{
1928	struct writeback_control *wbc = ctx->wbc;
1929	struct address_space *mapping = folio->mapping;
 
1930	struct extent_buffer *eb;
1931	int ret;
1932
1933	if (!folio_test_private(folio))
1934		return 0;
1935
1936	if (folio_to_fs_info(folio)->nodesize < PAGE_SIZE)
1937		return submit_eb_subpage(folio, wbc);
1938
1939	spin_lock(&mapping->i_private_lock);
1940	if (!folio_test_private(folio)) {
1941		spin_unlock(&mapping->i_private_lock);
1942		return 0;
1943	}
1944
1945	eb = folio_get_private(folio);
1946
1947	/*
1948	 * Shouldn't happen and normally this would be a BUG_ON but no point
1949	 * crashing the machine for something we can survive anyway.
1950	 */
1951	if (WARN_ON(!eb)) {
1952		spin_unlock(&mapping->i_private_lock);
1953		return 0;
1954	}
1955
1956	if (eb == ctx->eb) {
1957		spin_unlock(&mapping->i_private_lock);
1958		return 0;
1959	}
1960	ret = atomic_inc_not_zero(&eb->refs);
1961	spin_unlock(&mapping->i_private_lock);
1962	if (!ret)
1963		return 0;
1964
1965	ctx->eb = eb;
1966
1967	ret = btrfs_check_meta_write_pointer(eb->fs_info, ctx);
1968	if (ret) {
1969		if (ret == -EBUSY)
1970			ret = 0;
1971		free_extent_buffer(eb);
1972		return ret;
1973	}
1974
1975	if (!lock_extent_buffer_for_io(eb, wbc)) {
1976		free_extent_buffer(eb);
1977		return 0;
1978	}
1979	/* Implies write in zoned mode. */
1980	if (ctx->zoned_bg) {
1981		/* Mark the last eb in the block group. */
1982		btrfs_schedule_zone_finish_bg(ctx->zoned_bg, eb);
1983		ctx->zoned_bg->meta_write_pointer += eb->len;
1984	}
1985	write_one_eb(eb, wbc);
1986	free_extent_buffer(eb);
1987	return 1;
1988}
1989
1990int btree_write_cache_pages(struct address_space *mapping,
1991				   struct writeback_control *wbc)
1992{
1993	struct btrfs_eb_write_context ctx = { .wbc = wbc };
1994	struct btrfs_fs_info *fs_info = inode_to_fs_info(mapping->host);
1995	int ret = 0;
1996	int done = 0;
1997	int nr_to_write_done = 0;
1998	struct folio_batch fbatch;
1999	unsigned int nr_folios;
2000	pgoff_t index;
2001	pgoff_t end;		/* Inclusive */
2002	int scanned = 0;
2003	xa_mark_t tag;
2004
2005	folio_batch_init(&fbatch);
2006	if (wbc->range_cyclic) {
2007		index = mapping->writeback_index; /* Start from prev offset */
2008		end = -1;
2009		/*
2010		 * Start from the beginning does not need to cycle over the
2011		 * range, mark it as scanned.
2012		 */
2013		scanned = (index == 0);
2014	} else {
2015		index = wbc->range_start >> PAGE_SHIFT;
2016		end = wbc->range_end >> PAGE_SHIFT;
2017		scanned = 1;
2018	}
2019	if (wbc->sync_mode == WB_SYNC_ALL)
2020		tag = PAGECACHE_TAG_TOWRITE;
2021	else
2022		tag = PAGECACHE_TAG_DIRTY;
2023	btrfs_zoned_meta_io_lock(fs_info);
2024retry:
2025	if (wbc->sync_mode == WB_SYNC_ALL)
2026		tag_pages_for_writeback(mapping, index, end);
2027	while (!done && !nr_to_write_done && (index <= end) &&
2028	       (nr_folios = filemap_get_folios_tag(mapping, &index, end,
2029					    tag, &fbatch))) {
2030		unsigned i;
2031
2032		for (i = 0; i < nr_folios; i++) {
2033			struct folio *folio = fbatch.folios[i];
2034
2035			ret = submit_eb_page(folio, &ctx);
2036			if (ret == 0)
2037				continue;
2038			if (ret < 0) {
2039				done = 1;
2040				break;
2041			}
2042
2043			/*
2044			 * the filesystem may choose to bump up nr_to_write.
2045			 * We have to make sure to honor the new nr_to_write
2046			 * at any time
2047			 */
2048			nr_to_write_done = wbc->nr_to_write <= 0;
2049		}
2050		folio_batch_release(&fbatch);
2051		cond_resched();
2052	}
2053	if (!scanned && !done) {
2054		/*
2055		 * We hit the last page and there is more work to be done: wrap
2056		 * back to the start of the file
2057		 */
2058		scanned = 1;
2059		index = 0;
2060		goto retry;
2061	}
2062	/*
2063	 * If something went wrong, don't allow any metadata write bio to be
2064	 * submitted.
2065	 *
2066	 * This would prevent use-after-free if we had dirty pages not
2067	 * cleaned up, which can still happen by fuzzed images.
2068	 *
2069	 * - Bad extent tree
2070	 *   Allowing existing tree block to be allocated for other trees.
2071	 *
2072	 * - Log tree operations
2073	 *   Exiting tree blocks get allocated to log tree, bumps its
2074	 *   generation, then get cleaned in tree re-balance.
2075	 *   Such tree block will not be written back, since it's clean,
2076	 *   thus no WRITTEN flag set.
2077	 *   And after log writes back, this tree block is not traced by
2078	 *   any dirty extent_io_tree.
2079	 *
2080	 * - Offending tree block gets re-dirtied from its original owner
2081	 *   Since it has bumped generation, no WRITTEN flag, it can be
2082	 *   reused without COWing. This tree block will not be traced
2083	 *   by btrfs_transaction::dirty_pages.
2084	 *
2085	 *   Now such dirty tree block will not be cleaned by any dirty
2086	 *   extent io tree. Thus we don't want to submit such wild eb
2087	 *   if the fs already has error.
2088	 *
2089	 * We can get ret > 0 from submit_extent_folio() indicating how many ebs
2090	 * were submitted. Reset it to 0 to avoid false alerts for the caller.
2091	 */
2092	if (ret > 0)
2093		ret = 0;
2094	if (!ret && BTRFS_FS_ERROR(fs_info))
2095		ret = -EROFS;
2096
2097	if (ctx.zoned_bg)
2098		btrfs_put_block_group(ctx.zoned_bg);
2099	btrfs_zoned_meta_io_unlock(fs_info);
2100	return ret;
2101}
2102
2103/*
2104 * Walk the list of dirty pages of the given address space and write all of them.
2105 *
2106 * @mapping:   address space structure to write
2107 * @wbc:       subtract the number of written pages from *@wbc->nr_to_write
2108 * @bio_ctrl:  holds context for the write, namely the bio
2109 *
2110 * If a page is already under I/O, write_cache_pages() skips it, even
2111 * if it's dirty.  This is desirable behaviour for memory-cleaning writeback,
2112 * but it is INCORRECT for data-integrity system calls such as fsync().  fsync()
2113 * and msync() need to guarantee that all the data which was dirty at the time
2114 * the call was made get new I/O started against them.  If wbc->sync_mode is
2115 * WB_SYNC_ALL then we were called for data integrity and we must wait for
2116 * existing IO to complete.
2117 */
2118static int extent_write_cache_pages(struct address_space *mapping,
2119			     struct btrfs_bio_ctrl *bio_ctrl)
2120{
2121	struct writeback_control *wbc = bio_ctrl->wbc;
2122	struct inode *inode = mapping->host;
2123	int ret = 0;
2124	int done = 0;
2125	int nr_to_write_done = 0;
2126	struct folio_batch fbatch;
2127	unsigned int nr_folios;
2128	pgoff_t index;
2129	pgoff_t end;		/* Inclusive */
2130	pgoff_t done_index;
2131	int range_whole = 0;
2132	int scanned = 0;
2133	xa_mark_t tag;
2134
2135	/*
2136	 * We have to hold onto the inode so that ordered extents can do their
2137	 * work when the IO finishes.  The alternative to this is failing to add
2138	 * an ordered extent if the igrab() fails there and that is a huge pain
2139	 * to deal with, so instead just hold onto the inode throughout the
2140	 * writepages operation.  If it fails here we are freeing up the inode
2141	 * anyway and we'd rather not waste our time writing out stuff that is
2142	 * going to be truncated anyway.
2143	 */
2144	if (!igrab(inode))
2145		return 0;
2146
2147	folio_batch_init(&fbatch);
2148	if (wbc->range_cyclic) {
2149		index = mapping->writeback_index; /* Start from prev offset */
2150		end = -1;
2151		/*
2152		 * Start from the beginning does not need to cycle over the
2153		 * range, mark it as scanned.
2154		 */
2155		scanned = (index == 0);
2156	} else {
2157		index = wbc->range_start >> PAGE_SHIFT;
2158		end = wbc->range_end >> PAGE_SHIFT;
2159		if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX)
2160			range_whole = 1;
2161		scanned = 1;
2162	}
2163
2164	/*
2165	 * We do the tagged writepage as long as the snapshot flush bit is set
2166	 * and we are the first one who do the filemap_flush() on this inode.
2167	 *
2168	 * The nr_to_write == LONG_MAX is needed to make sure other flushers do
2169	 * not race in and drop the bit.
2170	 */
2171	if (range_whole && wbc->nr_to_write == LONG_MAX &&
2172	    test_and_clear_bit(BTRFS_INODE_SNAPSHOT_FLUSH,
2173			       &BTRFS_I(inode)->runtime_flags))
2174		wbc->tagged_writepages = 1;
2175
2176	if (wbc->sync_mode == WB_SYNC_ALL || wbc->tagged_writepages)
2177		tag = PAGECACHE_TAG_TOWRITE;
2178	else
2179		tag = PAGECACHE_TAG_DIRTY;
2180retry:
2181	if (wbc->sync_mode == WB_SYNC_ALL || wbc->tagged_writepages)
2182		tag_pages_for_writeback(mapping, index, end);
2183	done_index = index;
2184	while (!done && !nr_to_write_done && (index <= end) &&
2185			(nr_folios = filemap_get_folios_tag(mapping, &index,
2186							end, tag, &fbatch))) {
2187		unsigned i;
2188
2189		for (i = 0; i < nr_folios; i++) {
2190			struct folio *folio = fbatch.folios[i];
2191
2192			done_index = folio_next_index(folio);
2193			/*
2194			 * At this point we hold neither the i_pages lock nor
2195			 * the page lock: the page may be truncated or
2196			 * invalidated (changing page->mapping to NULL),
2197			 * or even swizzled back from swapper_space to
2198			 * tmpfs file mapping
2199			 */
2200			if (!folio_trylock(folio)) {
2201				submit_write_bio(bio_ctrl, 0);
2202				folio_lock(folio);
2203			}
2204
2205			if (unlikely(folio->mapping != mapping)) {
2206				folio_unlock(folio);
2207				continue;
2208			}
2209
2210			if (!folio_test_dirty(folio)) {
2211				/* Someone wrote it for us. */
2212				folio_unlock(folio);
2213				continue;
2214			}
2215
2216			/*
2217			 * For subpage case, compression can lead to mixed
2218			 * writeback and dirty flags, e.g:
2219			 * 0     32K    64K    96K    128K
2220			 * |     |//////||/////|   |//|
2221			 *
2222			 * In above case, [32K, 96K) is asynchronously submitted
2223			 * for compression, and [124K, 128K) needs to be written back.
2224			 *
2225			 * If we didn't wait wrtiteback for page 64K, [128K, 128K)
2226			 * won't be submitted as the page still has writeback flag
2227			 * and will be skipped in the next check.
2228			 *
2229			 * This mixed writeback and dirty case is only possible for
2230			 * subpage case.
2231			 *
2232			 * TODO: Remove this check after migrating compression to
2233			 * regular submission.
2234			 */
2235			if (wbc->sync_mode != WB_SYNC_NONE ||
2236			    btrfs_is_subpage(inode_to_fs_info(inode), mapping)) {
2237				if (folio_test_writeback(folio))
2238					submit_write_bio(bio_ctrl, 0);
2239				folio_wait_writeback(folio);
2240			}
2241
2242			if (folio_test_writeback(folio) ||
2243			    !folio_clear_dirty_for_io(folio)) {
2244				folio_unlock(folio);
2245				continue;
2246			}
2247
2248			ret = extent_writepage(folio, bio_ctrl);
2249			if (ret < 0) {
2250				done = 1;
2251				break;
2252			}
2253
2254			/*
2255			 * The filesystem may choose to bump up nr_to_write.
2256			 * We have to make sure to honor the new nr_to_write
2257			 * at any time.
2258			 */
2259			nr_to_write_done = (wbc->sync_mode == WB_SYNC_NONE &&
2260					    wbc->nr_to_write <= 0);
2261		}
2262		folio_batch_release(&fbatch);
2263		cond_resched();
2264	}
2265	if (!scanned && !done) {
2266		/*
2267		 * We hit the last page and there is more work to be done: wrap
2268		 * back to the start of the file
2269		 */
2270		scanned = 1;
2271		index = 0;
2272
2273		/*
2274		 * If we're looping we could run into a page that is locked by a
2275		 * writer and that writer could be waiting on writeback for a
2276		 * page in our current bio, and thus deadlock, so flush the
2277		 * write bio here.
2278		 */
2279		submit_write_bio(bio_ctrl, 0);
2280		goto retry;
2281	}
2282
2283	if (wbc->range_cyclic || (wbc->nr_to_write > 0 && range_whole))
2284		mapping->writeback_index = done_index;
2285
2286	btrfs_add_delayed_iput(BTRFS_I(inode));
2287	return ret;
2288}
2289
2290/*
2291 * Submit the pages in the range to bio for call sites which delalloc range has
2292 * already been ran (aka, ordered extent inserted) and all pages are still
2293 * locked.
2294 */
2295void extent_write_locked_range(struct inode *inode, const struct folio *locked_folio,
2296			       u64 start, u64 end, struct writeback_control *wbc,
2297			       bool pages_dirty)
2298{
2299	bool found_error = false;
2300	int ret = 0;
2301	struct address_space *mapping = inode->i_mapping;
2302	struct btrfs_fs_info *fs_info = inode_to_fs_info(inode);
2303	const u32 sectorsize = fs_info->sectorsize;
2304	loff_t i_size = i_size_read(inode);
2305	u64 cur = start;
2306	struct btrfs_bio_ctrl bio_ctrl = {
2307		.wbc = wbc,
2308		.opf = REQ_OP_WRITE | wbc_to_write_flags(wbc),
2309	};
2310
2311	if (wbc->no_cgroup_owner)
2312		bio_ctrl.opf |= REQ_BTRFS_CGROUP_PUNT;
2313
2314	ASSERT(IS_ALIGNED(start, sectorsize) && IS_ALIGNED(end + 1, sectorsize));
2315
2316	while (cur <= end) {
2317		u64 cur_end = min(round_down(cur, PAGE_SIZE) + PAGE_SIZE - 1, end);
2318		u32 cur_len = cur_end + 1 - cur;
2319		struct folio *folio;
2320
2321		folio = filemap_get_folio(mapping, cur >> PAGE_SHIFT);
2322
2323		/*
2324		 * This shouldn't happen, the pages are pinned and locked, this
2325		 * code is just in case, but shouldn't actually be run.
2326		 */
2327		if (IS_ERR(folio)) {
2328			btrfs_mark_ordered_io_finished(BTRFS_I(inode), NULL,
2329						       cur, cur_len, false);
2330			mapping_set_error(mapping, PTR_ERR(folio));
2331			cur = cur_end + 1;
2332			continue;
2333		}
2334
2335		ASSERT(folio_test_locked(folio));
2336		if (pages_dirty && folio != locked_folio)
2337			ASSERT(folio_test_dirty(folio));
2338
2339		/*
2340		 * Set the submission bitmap to submit all sectors.
2341		 * extent_writepage_io() will do the truncation correctly.
2342		 */
2343		bio_ctrl.submit_bitmap = (unsigned long)-1;
2344		ret = extent_writepage_io(BTRFS_I(inode), folio, cur, cur_len,
2345					  &bio_ctrl, i_size);
2346		if (ret == 1)
2347			goto next_page;
2348
2349		if (ret)
2350			mapping_set_error(mapping, ret);
2351		btrfs_folio_end_lock(fs_info, folio, cur, cur_len);
 
 
 
 
 
 
 
 
2352		if (ret < 0)
2353			found_error = true;
2354next_page:
2355		folio_put(folio);
2356		cur = cur_end + 1;
2357	}
2358
2359	submit_write_bio(&bio_ctrl, found_error ? ret : 0);
2360}
2361
2362int btrfs_writepages(struct address_space *mapping, struct writeback_control *wbc)
 
2363{
2364	struct inode *inode = mapping->host;
2365	int ret = 0;
2366	struct btrfs_bio_ctrl bio_ctrl = {
2367		.wbc = wbc,
2368		.opf = REQ_OP_WRITE | wbc_to_write_flags(wbc),
2369	};
2370
2371	/*
2372	 * Allow only a single thread to do the reloc work in zoned mode to
2373	 * protect the write pointer updates.
2374	 */
2375	btrfs_zoned_data_reloc_lock(BTRFS_I(inode));
2376	ret = extent_write_cache_pages(mapping, &bio_ctrl);
2377	submit_write_bio(&bio_ctrl, ret);
2378	btrfs_zoned_data_reloc_unlock(BTRFS_I(inode));
2379	return ret;
2380}
2381
2382void btrfs_readahead(struct readahead_control *rac)
2383{
2384	struct btrfs_bio_ctrl bio_ctrl = { .opf = REQ_OP_READ | REQ_RAHEAD };
2385	struct folio *folio;
2386	struct btrfs_inode *inode = BTRFS_I(rac->mapping->host);
2387	const u64 start = readahead_pos(rac);
2388	const u64 end = start + readahead_length(rac) - 1;
2389	struct extent_state *cached_state = NULL;
2390	struct extent_map *em_cached = NULL;
2391	u64 prev_em_start = (u64)-1;
 
2392
2393	btrfs_lock_and_flush_ordered_range(inode, start, end, &cached_state);
 
 
2394
2395	while ((folio = readahead_folio(rac)) != NULL)
2396		btrfs_do_readpage(folio, &em_cached, &bio_ctrl, &prev_em_start);
2397
2398	unlock_extent(&inode->io_tree, start, end, &cached_state);
2399
2400	if (em_cached)
2401		free_extent_map(em_cached);
2402	submit_one_bio(&bio_ctrl);
2403}
2404
2405/*
2406 * basic invalidate_folio code, this waits on any locked or writeback
2407 * ranges corresponding to the folio, and then deletes any extent state
2408 * records from the tree
2409 */
2410int extent_invalidate_folio(struct extent_io_tree *tree,
2411			  struct folio *folio, size_t offset)
2412{
2413	struct extent_state *cached_state = NULL;
2414	u64 start = folio_pos(folio);
2415	u64 end = start + folio_size(folio) - 1;
2416	size_t blocksize = folio_to_fs_info(folio)->sectorsize;
2417
2418	/* This function is only called for the btree inode */
2419	ASSERT(tree->owner == IO_TREE_BTREE_INODE_IO);
2420
2421	start += ALIGN(offset, blocksize);
2422	if (start > end)
2423		return 0;
2424
2425	lock_extent(tree, start, end, &cached_state);
2426	folio_wait_writeback(folio);
2427
2428	/*
2429	 * Currently for btree io tree, only EXTENT_LOCKED is utilized,
2430	 * so here we only need to unlock the extent range to free any
2431	 * existing extent state.
2432	 */
2433	unlock_extent(tree, start, end, &cached_state);
2434	return 0;
2435}
2436
2437/*
2438 * a helper for release_folio, this tests for areas of the page that
2439 * are locked or under IO and drops the related state bits if it is safe
2440 * to drop the page.
2441 */
2442static bool try_release_extent_state(struct extent_io_tree *tree,
2443				     struct folio *folio)
2444{
2445	u64 start = folio_pos(folio);
2446	u64 end = start + PAGE_SIZE - 1;
2447	bool ret;
2448
2449	if (test_range_bit_exists(tree, start, end, EXTENT_LOCKED)) {
2450		ret = false;
2451	} else {
2452		u32 clear_bits = ~(EXTENT_LOCKED | EXTENT_NODATASUM |
2453				   EXTENT_DELALLOC_NEW | EXTENT_CTLBITS |
2454				   EXTENT_QGROUP_RESERVED);
2455		int ret2;
2456
2457		/*
2458		 * At this point we can safely clear everything except the
2459		 * locked bit, the nodatasum bit and the delalloc new bit.
2460		 * The delalloc new bit will be cleared by ordered extent
2461		 * completion.
2462		 */
2463		ret2 = __clear_extent_bit(tree, start, end, clear_bits, NULL, NULL);
2464
2465		/* if clear_extent_bit failed for enomem reasons,
2466		 * we can't allow the release to continue.
2467		 */
2468		if (ret2 < 0)
2469			ret = false;
2470		else
2471			ret = true;
2472	}
2473	return ret;
2474}
2475
2476/*
2477 * a helper for release_folio.  As long as there are no locked extents
2478 * in the range corresponding to the page, both state records and extent
2479 * map records are removed
2480 */
2481bool try_release_extent_mapping(struct folio *folio, gfp_t mask)
2482{
2483	u64 start = folio_pos(folio);
 
2484	u64 end = start + PAGE_SIZE - 1;
2485	struct btrfs_inode *inode = folio_to_inode(folio);
2486	struct extent_io_tree *io_tree = &inode->io_tree;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2487
2488	while (start <= end) {
2489		const u64 cur_gen = btrfs_get_fs_generation(inode->root->fs_info);
2490		const u64 len = end - start + 1;
2491		struct extent_map_tree *extent_tree = &inode->extent_tree;
2492		struct extent_map *em;
2493
2494		write_lock(&extent_tree->lock);
2495		em = lookup_extent_mapping(extent_tree, start, len);
2496		if (!em) {
2497			write_unlock(&extent_tree->lock);
2498			break;
2499		}
2500		if ((em->flags & EXTENT_FLAG_PINNED) || em->start != start) {
2501			write_unlock(&extent_tree->lock);
2502			free_extent_map(em);
2503			break;
 
2504		}
2505		if (test_range_bit_exists(io_tree, em->start,
2506					  extent_map_end(em) - 1, EXTENT_LOCKED))
2507			goto next;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2508		/*
2509		 * If it's not in the list of modified extents, used by a fast
2510		 * fsync, we can remove it. If it's being logged we can safely
2511		 * remove it since fsync took an extra reference on the em.
2512		 */
2513		if (list_empty(&em->list) || (em->flags & EXTENT_FLAG_LOGGING))
2514			goto remove_em;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2515		/*
2516		 * If it's in the list of modified extents, remove it only if
2517		 * its generation is older then the current one, in which case
2518		 * we don't need it for a fast fsync. Otherwise don't remove it,
2519		 * we could be racing with an ongoing fast fsync that could miss
2520		 * the new extent.
 
2521		 */
2522		if (em->generation >= cur_gen)
2523			goto next;
2524remove_em:
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2525		/*
2526		 * We only remove extent maps that are not in the list of
2527		 * modified extents or that are in the list but with a
2528		 * generation lower then the current generation, so there is no
2529		 * need to set the full fsync flag on the inode (it hurts the
2530		 * fsync performance for workloads with a data size that exceeds
2531		 * or is close to the system's memory).
2532		 */
2533		remove_extent_mapping(inode, em);
2534		/* Once for the inode's extent map tree. */
2535		free_extent_map(em);
2536next:
2537		start = extent_map_end(em);
2538		write_unlock(&extent_tree->lock);
 
 
 
2539
2540		/* Once for us, for the lookup_extent_mapping() reference. */
2541		free_extent_map(em);
 
 
 
 
 
 
 
 
2542
2543		if (need_resched()) {
2544			/*
2545			 * If we need to resched but we can't block just exit
2546			 * and leave any remaining extent maps.
2547			 */
2548			if (!gfpflags_allow_blocking(mask))
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2549				break;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2550
2551			cond_resched();
 
 
 
 
2552		}
 
 
 
 
 
 
 
 
 
2553	}
2554	return try_release_extent_state(io_tree, folio);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2555}
2556
2557static void __free_extent_buffer(struct extent_buffer *eb)
2558{
2559	kmem_cache_free(extent_buffer_cache, eb);
2560}
2561
2562static int extent_buffer_under_io(const struct extent_buffer *eb)
2563{
2564	return (test_bit(EXTENT_BUFFER_WRITEBACK, &eb->bflags) ||
2565		test_bit(EXTENT_BUFFER_DIRTY, &eb->bflags));
2566}
2567
2568static bool folio_range_has_eb(struct folio *folio)
2569{
2570	struct btrfs_subpage *subpage;
2571
2572	lockdep_assert_held(&folio->mapping->i_private_lock);
2573
2574	if (folio_test_private(folio)) {
2575		subpage = folio_get_private(folio);
2576		if (atomic_read(&subpage->eb_refs))
2577			return true;
 
 
 
 
 
 
2578	}
2579	return false;
2580}
2581
2582static void detach_extent_buffer_folio(const struct extent_buffer *eb, struct folio *folio)
2583{
2584	struct btrfs_fs_info *fs_info = eb->fs_info;
2585	const bool mapped = !test_bit(EXTENT_BUFFER_UNMAPPED, &eb->bflags);
2586
2587	/*
2588	 * For mapped eb, we're going to change the folio private, which should
2589	 * be done under the i_private_lock.
2590	 */
2591	if (mapped)
2592		spin_lock(&folio->mapping->i_private_lock);
2593
2594	if (!folio_test_private(folio)) {
2595		if (mapped)
2596			spin_unlock(&folio->mapping->i_private_lock);
2597		return;
2598	}
2599
2600	if (fs_info->nodesize >= PAGE_SIZE) {
2601		/*
2602		 * We do this since we'll remove the pages after we've
2603		 * removed the eb from the radix tree, so we could race
2604		 * and have this page now attached to the new eb.  So
2605		 * only clear folio if it's still connected to
2606		 * this eb.
2607		 */
2608		if (folio_test_private(folio) && folio_get_private(folio) == eb) {
2609			BUG_ON(test_bit(EXTENT_BUFFER_DIRTY, &eb->bflags));
2610			BUG_ON(folio_test_dirty(folio));
2611			BUG_ON(folio_test_writeback(folio));
2612			/* We need to make sure we haven't be attached to a new eb. */
2613			folio_detach_private(folio);
2614		}
2615		if (mapped)
2616			spin_unlock(&folio->mapping->i_private_lock);
2617		return;
2618	}
2619
2620	/*
2621	 * For subpage, we can have dummy eb with folio private attached.  In
2622	 * this case, we can directly detach the private as such folio is only
2623	 * attached to one dummy eb, no sharing.
2624	 */
2625	if (!mapped) {
2626		btrfs_detach_subpage(fs_info, folio);
2627		return;
2628	}
2629
2630	btrfs_folio_dec_eb_refs(fs_info, folio);
2631
2632	/*
2633	 * We can only detach the folio private if there are no other ebs in the
2634	 * page range and no unfinished IO.
2635	 */
2636	if (!folio_range_has_eb(folio))
2637		btrfs_detach_subpage(fs_info, folio);
2638
2639	spin_unlock(&folio->mapping->i_private_lock);
2640}
2641
2642/* Release all pages attached to the extent buffer */
2643static void btrfs_release_extent_buffer_pages(const struct extent_buffer *eb)
2644{
2645	ASSERT(!extent_buffer_under_io(eb));
2646
2647	for (int i = 0; i < INLINE_EXTENT_BUFFER_PAGES; i++) {
2648		struct folio *folio = eb->folios[i];
2649
2650		if (!folio)
2651			continue;
2652
2653		detach_extent_buffer_folio(eb, folio);
2654
2655		/* One for when we allocated the folio. */
2656		folio_put(folio);
2657	}
2658}
2659
2660/*
2661 * Helper for releasing the extent buffer.
2662 */
2663static inline void btrfs_release_extent_buffer(struct extent_buffer *eb)
2664{
2665	btrfs_release_extent_buffer_pages(eb);
2666	btrfs_leak_debug_del_eb(eb);
2667	__free_extent_buffer(eb);
2668}
2669
2670static struct extent_buffer *
2671__alloc_extent_buffer(struct btrfs_fs_info *fs_info, u64 start,
2672		      unsigned long len)
2673{
2674	struct extent_buffer *eb = NULL;
2675
2676	eb = kmem_cache_zalloc(extent_buffer_cache, GFP_NOFS|__GFP_NOFAIL);
2677	eb->start = start;
2678	eb->len = len;
2679	eb->fs_info = fs_info;
2680	init_rwsem(&eb->lock);
2681
2682	btrfs_leak_debug_add_eb(eb);
2683
2684	spin_lock_init(&eb->refs_lock);
2685	atomic_set(&eb->refs, 1);
2686
2687	ASSERT(len <= BTRFS_MAX_METADATA_BLOCKSIZE);
2688
2689	return eb;
2690}
2691
2692struct extent_buffer *btrfs_clone_extent_buffer(const struct extent_buffer *src)
2693{
2694	struct extent_buffer *new;
2695	int num_folios = num_extent_folios(src);
2696	int ret;
2697
2698	new = __alloc_extent_buffer(src->fs_info, src->start, src->len);
2699	if (new == NULL)
2700		return NULL;
2701
2702	/*
2703	 * Set UNMAPPED before calling btrfs_release_extent_buffer(), as
2704	 * btrfs_release_extent_buffer() have different behavior for
2705	 * UNMAPPED subpage extent buffer.
2706	 */
2707	set_bit(EXTENT_BUFFER_UNMAPPED, &new->bflags);
2708
2709	ret = alloc_eb_folio_array(new, false);
2710	if (ret) {
2711		btrfs_release_extent_buffer(new);
2712		return NULL;
2713	}
2714
2715	for (int i = 0; i < num_folios; i++) {
2716		struct folio *folio = new->folios[i];
 
2717
2718		ret = attach_extent_buffer_folio(new, folio, NULL);
2719		if (ret < 0) {
2720			btrfs_release_extent_buffer(new);
2721			return NULL;
2722		}
2723		WARN_ON(folio_test_dirty(folio));
2724	}
2725	copy_extent_buffer_full(new, src);
2726	set_extent_buffer_uptodate(new);
2727
2728	return new;
2729}
2730
2731struct extent_buffer *__alloc_dummy_extent_buffer(struct btrfs_fs_info *fs_info,
2732						  u64 start, unsigned long len)
2733{
2734	struct extent_buffer *eb;
2735	int num_folios = 0;
2736	int ret;
2737
2738	eb = __alloc_extent_buffer(fs_info, start, len);
2739	if (!eb)
2740		return NULL;
2741
2742	ret = alloc_eb_folio_array(eb, false);
2743	if (ret)
2744		goto err;
2745
2746	num_folios = num_extent_folios(eb);
2747	for (int i = 0; i < num_folios; i++) {
2748		ret = attach_extent_buffer_folio(eb, eb->folios[i], NULL);
2749		if (ret < 0)
2750			goto err;
2751	}
2752
2753	set_extent_buffer_uptodate(eb);
2754	btrfs_set_header_nritems(eb, 0);
2755	set_bit(EXTENT_BUFFER_UNMAPPED, &eb->bflags);
2756
2757	return eb;
2758err:
2759	for (int i = 0; i < num_folios; i++) {
2760		if (eb->folios[i]) {
2761			detach_extent_buffer_folio(eb, eb->folios[i]);
2762			folio_put(eb->folios[i]);
2763		}
2764	}
2765	__free_extent_buffer(eb);
2766	return NULL;
2767}
2768
2769struct extent_buffer *alloc_dummy_extent_buffer(struct btrfs_fs_info *fs_info,
2770						u64 start)
2771{
2772	return __alloc_dummy_extent_buffer(fs_info, start, fs_info->nodesize);
2773}
2774
2775static void check_buffer_tree_ref(struct extent_buffer *eb)
2776{
2777	int refs;
2778	/*
2779	 * The TREE_REF bit is first set when the extent_buffer is added
2780	 * to the radix tree. It is also reset, if unset, when a new reference
2781	 * is created by find_extent_buffer.
2782	 *
2783	 * It is only cleared in two cases: freeing the last non-tree
2784	 * reference to the extent_buffer when its STALE bit is set or
2785	 * calling release_folio when the tree reference is the only reference.
2786	 *
2787	 * In both cases, care is taken to ensure that the extent_buffer's
2788	 * pages are not under io. However, release_folio can be concurrently
2789	 * called with creating new references, which is prone to race
2790	 * conditions between the calls to check_buffer_tree_ref in those
2791	 * codepaths and clearing TREE_REF in try_release_extent_buffer.
2792	 *
2793	 * The actual lifetime of the extent_buffer in the radix tree is
2794	 * adequately protected by the refcount, but the TREE_REF bit and
2795	 * its corresponding reference are not. To protect against this
2796	 * class of races, we call check_buffer_tree_ref from the codepaths
2797	 * which trigger io. Note that once io is initiated, TREE_REF can no
2798	 * longer be cleared, so that is the moment at which any such race is
2799	 * best fixed.
2800	 */
2801	refs = atomic_read(&eb->refs);
2802	if (refs >= 2 && test_bit(EXTENT_BUFFER_TREE_REF, &eb->bflags))
2803		return;
2804
2805	spin_lock(&eb->refs_lock);
2806	if (!test_and_set_bit(EXTENT_BUFFER_TREE_REF, &eb->bflags))
2807		atomic_inc(&eb->refs);
2808	spin_unlock(&eb->refs_lock);
2809}
2810
2811static void mark_extent_buffer_accessed(struct extent_buffer *eb)
2812{
2813	int num_folios= num_extent_folios(eb);
2814
2815	check_buffer_tree_ref(eb);
2816
2817	for (int i = 0; i < num_folios; i++)
2818		folio_mark_accessed(eb->folios[i]);
2819}
2820
2821struct extent_buffer *find_extent_buffer(struct btrfs_fs_info *fs_info,
2822					 u64 start)
2823{
2824	struct extent_buffer *eb;
2825
2826	eb = find_extent_buffer_nolock(fs_info, start);
2827	if (!eb)
2828		return NULL;
2829	/*
2830	 * Lock our eb's refs_lock to avoid races with free_extent_buffer().
2831	 * When we get our eb it might be flagged with EXTENT_BUFFER_STALE and
2832	 * another task running free_extent_buffer() might have seen that flag
2833	 * set, eb->refs == 2, that the buffer isn't under IO (dirty and
2834	 * writeback flags not set) and it's still in the tree (flag
2835	 * EXTENT_BUFFER_TREE_REF set), therefore being in the process of
2836	 * decrementing the extent buffer's reference count twice.  So here we
2837	 * could race and increment the eb's reference count, clear its stale
2838	 * flag, mark it as dirty and drop our reference before the other task
2839	 * finishes executing free_extent_buffer, which would later result in
2840	 * an attempt to free an extent buffer that is dirty.
2841	 */
2842	if (test_bit(EXTENT_BUFFER_STALE, &eb->bflags)) {
2843		spin_lock(&eb->refs_lock);
2844		spin_unlock(&eb->refs_lock);
2845	}
2846	mark_extent_buffer_accessed(eb);
2847	return eb;
2848}
2849
2850#ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS
2851struct extent_buffer *alloc_test_extent_buffer(struct btrfs_fs_info *fs_info,
2852					u64 start)
2853{
2854	struct extent_buffer *eb, *exists = NULL;
2855	int ret;
2856
2857	eb = find_extent_buffer(fs_info, start);
2858	if (eb)
2859		return eb;
2860	eb = alloc_dummy_extent_buffer(fs_info, start);
2861	if (!eb)
2862		return ERR_PTR(-ENOMEM);
2863	eb->fs_info = fs_info;
2864again:
2865	ret = radix_tree_preload(GFP_NOFS);
2866	if (ret) {
2867		exists = ERR_PTR(ret);
2868		goto free_eb;
2869	}
2870	spin_lock(&fs_info->buffer_lock);
2871	ret = radix_tree_insert(&fs_info->buffer_radix,
2872				start >> fs_info->sectorsize_bits, eb);
2873	spin_unlock(&fs_info->buffer_lock);
2874	radix_tree_preload_end();
2875	if (ret == -EEXIST) {
2876		exists = find_extent_buffer(fs_info, start);
2877		if (exists)
2878			goto free_eb;
2879		else
2880			goto again;
2881	}
2882	check_buffer_tree_ref(eb);
2883	set_bit(EXTENT_BUFFER_IN_TREE, &eb->bflags);
2884
2885	return eb;
2886free_eb:
2887	btrfs_release_extent_buffer(eb);
2888	return exists;
2889}
2890#endif
2891
2892static struct extent_buffer *grab_extent_buffer(
2893		struct btrfs_fs_info *fs_info, struct page *page)
2894{
2895	struct folio *folio = page_folio(page);
2896	struct extent_buffer *exists;
2897
2898	lockdep_assert_held(&page->mapping->i_private_lock);
2899
2900	/*
2901	 * For subpage case, we completely rely on radix tree to ensure we
2902	 * don't try to insert two ebs for the same bytenr.  So here we always
2903	 * return NULL and just continue.
2904	 */
2905	if (fs_info->nodesize < PAGE_SIZE)
2906		return NULL;
2907
2908	/* Page not yet attached to an extent buffer */
2909	if (!folio_test_private(folio))
2910		return NULL;
2911
2912	/*
2913	 * We could have already allocated an eb for this page and attached one
2914	 * so lets see if we can get a ref on the existing eb, and if we can we
2915	 * know it's good and we can just return that one, else we know we can
2916	 * just overwrite folio private.
2917	 */
2918	exists = folio_get_private(folio);
2919	if (atomic_inc_not_zero(&exists->refs))
2920		return exists;
2921
2922	WARN_ON(PageDirty(page));
2923	folio_detach_private(folio);
2924	return NULL;
2925}
2926
2927static int check_eb_alignment(struct btrfs_fs_info *fs_info, u64 start)
2928{
2929	if (!IS_ALIGNED(start, fs_info->sectorsize)) {
2930		btrfs_err(fs_info, "bad tree block start %llu", start);
2931		return -EINVAL;
2932	}
2933
2934	if (fs_info->nodesize < PAGE_SIZE &&
2935	    offset_in_page(start) + fs_info->nodesize > PAGE_SIZE) {
2936		btrfs_err(fs_info,
2937		"tree block crosses page boundary, start %llu nodesize %u",
2938			  start, fs_info->nodesize);
2939		return -EINVAL;
2940	}
2941	if (fs_info->nodesize >= PAGE_SIZE &&
2942	    !PAGE_ALIGNED(start)) {
2943		btrfs_err(fs_info,
2944		"tree block is not page aligned, start %llu nodesize %u",
2945			  start, fs_info->nodesize);
2946		return -EINVAL;
2947	}
2948	if (!IS_ALIGNED(start, fs_info->nodesize) &&
2949	    !test_and_set_bit(BTRFS_FS_UNALIGNED_TREE_BLOCK, &fs_info->flags)) {
2950		btrfs_warn(fs_info,
2951"tree block not nodesize aligned, start %llu nodesize %u, can be resolved by a full metadata balance",
2952			      start, fs_info->nodesize);
2953	}
2954	return 0;
2955}
2956
2957
2958/*
2959 * Return 0 if eb->folios[i] is attached to btree inode successfully.
2960 * Return >0 if there is already another extent buffer for the range,
2961 * and @found_eb_ret would be updated.
2962 * Return -EAGAIN if the filemap has an existing folio but with different size
2963 * than @eb.
2964 * The caller needs to free the existing folios and retry using the same order.
2965 */
2966static int attach_eb_folio_to_filemap(struct extent_buffer *eb, int i,
2967				      struct btrfs_subpage *prealloc,
2968				      struct extent_buffer **found_eb_ret)
2969{
2970
2971	struct btrfs_fs_info *fs_info = eb->fs_info;
2972	struct address_space *mapping = fs_info->btree_inode->i_mapping;
2973	const unsigned long index = eb->start >> PAGE_SHIFT;
2974	struct folio *existing_folio = NULL;
2975	int ret;
2976
2977	ASSERT(found_eb_ret);
2978
2979	/* Caller should ensure the folio exists. */
2980	ASSERT(eb->folios[i]);
2981
2982retry:
2983	ret = filemap_add_folio(mapping, eb->folios[i], index + i,
2984				GFP_NOFS | __GFP_NOFAIL);
2985	if (!ret)
2986		goto finish;
2987
2988	existing_folio = filemap_lock_folio(mapping, index + i);
2989	/* The page cache only exists for a very short time, just retry. */
2990	if (IS_ERR(existing_folio)) {
2991		existing_folio = NULL;
2992		goto retry;
2993	}
2994
2995	/* For now, we should only have single-page folios for btree inode. */
2996	ASSERT(folio_nr_pages(existing_folio) == 1);
2997
2998	if (folio_size(existing_folio) != eb->folio_size) {
2999		folio_unlock(existing_folio);
3000		folio_put(existing_folio);
3001		return -EAGAIN;
3002	}
3003
3004finish:
3005	spin_lock(&mapping->i_private_lock);
3006	if (existing_folio && fs_info->nodesize < PAGE_SIZE) {
3007		/* We're going to reuse the existing page, can drop our folio now. */
 
3008		__free_page(folio_page(eb->folios[i], 0));
3009		eb->folios[i] = existing_folio;
3010	} else if (existing_folio) {
3011		struct extent_buffer *existing_eb;
3012
3013		existing_eb = grab_extent_buffer(fs_info,
3014						 folio_page(existing_folio, 0));
3015		if (existing_eb) {
3016			/* The extent buffer still exists, we can use it directly. */
3017			*found_eb_ret = existing_eb;
3018			spin_unlock(&mapping->i_private_lock);
3019			folio_unlock(existing_folio);
3020			folio_put(existing_folio);
3021			return 1;
3022		}
3023		/* The extent buffer no longer exists, we can reuse the folio. */
3024		__free_page(folio_page(eb->folios[i], 0));
3025		eb->folios[i] = existing_folio;
3026	}
3027	eb->folio_size = folio_size(eb->folios[i]);
3028	eb->folio_shift = folio_shift(eb->folios[i]);
3029	/* Should not fail, as we have preallocated the memory. */
3030	ret = attach_extent_buffer_folio(eb, eb->folios[i], prealloc);
3031	ASSERT(!ret);
3032	/*
3033	 * To inform we have an extra eb under allocation, so that
3034	 * detach_extent_buffer_page() won't release the folio private when the
3035	 * eb hasn't been inserted into radix tree yet.
3036	 *
3037	 * The ref will be decreased when the eb releases the page, in
3038	 * detach_extent_buffer_page().  Thus needs no special handling in the
3039	 * error path.
3040	 */
3041	btrfs_folio_inc_eb_refs(fs_info, eb->folios[i]);
3042	spin_unlock(&mapping->i_private_lock);
3043	return 0;
3044}
3045
3046struct extent_buffer *alloc_extent_buffer(struct btrfs_fs_info *fs_info,
3047					  u64 start, u64 owner_root, int level)
3048{
3049	unsigned long len = fs_info->nodesize;
3050	int num_folios;
3051	int attached = 0;
3052	struct extent_buffer *eb;
3053	struct extent_buffer *existing_eb = NULL;
 
3054	struct btrfs_subpage *prealloc = NULL;
3055	u64 lockdep_owner = owner_root;
3056	bool page_contig = true;
3057	int uptodate = 1;
3058	int ret;
3059
3060	if (check_eb_alignment(fs_info, start))
3061		return ERR_PTR(-EINVAL);
3062
3063#if BITS_PER_LONG == 32
3064	if (start >= MAX_LFS_FILESIZE) {
3065		btrfs_err_rl(fs_info,
3066		"extent buffer %llu is beyond 32bit page cache limit", start);
3067		btrfs_err_32bit_limit(fs_info);
3068		return ERR_PTR(-EOVERFLOW);
3069	}
3070	if (start >= BTRFS_32BIT_EARLY_WARN_THRESHOLD)
3071		btrfs_warn_32bit_limit(fs_info);
3072#endif
3073
3074	eb = find_extent_buffer(fs_info, start);
3075	if (eb)
3076		return eb;
3077
3078	eb = __alloc_extent_buffer(fs_info, start, len);
3079	if (!eb)
3080		return ERR_PTR(-ENOMEM);
3081
3082	/*
3083	 * The reloc trees are just snapshots, so we need them to appear to be
3084	 * just like any other fs tree WRT lockdep.
3085	 */
3086	if (lockdep_owner == BTRFS_TREE_RELOC_OBJECTID)
3087		lockdep_owner = BTRFS_FS_TREE_OBJECTID;
3088
3089	btrfs_set_buffer_lockdep_class(lockdep_owner, eb, level);
3090
3091	/*
3092	 * Preallocate folio private for subpage case, so that we won't
3093	 * allocate memory with i_private_lock nor page lock hold.
3094	 *
3095	 * The memory will be freed by attach_extent_buffer_page() or freed
3096	 * manually if we exit earlier.
3097	 */
3098	if (fs_info->nodesize < PAGE_SIZE) {
3099		prealloc = btrfs_alloc_subpage(fs_info, BTRFS_SUBPAGE_METADATA);
3100		if (IS_ERR(prealloc)) {
3101			ret = PTR_ERR(prealloc);
3102			goto out;
3103		}
3104	}
3105
3106reallocate:
3107	/* Allocate all pages first. */
3108	ret = alloc_eb_folio_array(eb, true);
3109	if (ret < 0) {
3110		btrfs_free_subpage(prealloc);
3111		goto out;
3112	}
3113
3114	num_folios = num_extent_folios(eb);
3115	/* Attach all pages to the filemap. */
3116	for (int i = 0; i < num_folios; i++) {
3117		struct folio *folio;
3118
3119		ret = attach_eb_folio_to_filemap(eb, i, prealloc, &existing_eb);
3120		if (ret > 0) {
3121			ASSERT(existing_eb);
3122			goto out;
3123		}
3124
3125		/*
3126		 * TODO: Special handling for a corner case where the order of
3127		 * folios mismatch between the new eb and filemap.
3128		 *
3129		 * This happens when:
3130		 *
3131		 * - the new eb is using higher order folio
3132		 *
3133		 * - the filemap is still using 0-order folios for the range
3134		 *   This can happen at the previous eb allocation, and we don't
3135		 *   have higher order folio for the call.
3136		 *
3137		 * - the existing eb has already been freed
3138		 *
3139		 * In this case, we have to free the existing folios first, and
3140		 * re-allocate using the same order.
3141		 * Thankfully this is not going to happen yet, as we're still
3142		 * using 0-order folios.
3143		 */
3144		if (unlikely(ret == -EAGAIN)) {
3145			ASSERT(0);
3146			goto reallocate;
3147		}
3148		attached++;
3149
3150		/*
3151		 * Only after attach_eb_folio_to_filemap(), eb->folios[] is
3152		 * reliable, as we may choose to reuse the existing page cache
3153		 * and free the allocated page.
3154		 */
3155		folio = eb->folios[i];
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3156		WARN_ON(btrfs_folio_test_dirty(fs_info, folio, eb->start, eb->len));
3157
3158		/*
3159		 * Check if the current page is physically contiguous with previous eb
3160		 * page.
3161		 * At this stage, either we allocated a large folio, thus @i
3162		 * would only be 0, or we fall back to per-page allocation.
3163		 */
3164		if (i && folio_page(eb->folios[i - 1], 0) + 1 != folio_page(folio, 0))
3165			page_contig = false;
3166
3167		if (!btrfs_folio_test_uptodate(fs_info, folio, eb->start, eb->len))
3168			uptodate = 0;
3169
3170		/*
3171		 * We can't unlock the pages just yet since the extent buffer
3172		 * hasn't been properly inserted in the radix tree, this
3173		 * opens a race with btree_release_folio which can free a page
3174		 * while we are still filling in all pages for the buffer and
3175		 * we could crash.
3176		 */
3177	}
3178	if (uptodate)
3179		set_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags);
3180	/* All pages are physically contiguous, can skip cross page handling. */
3181	if (page_contig)
3182		eb->addr = folio_address(eb->folios[0]) + offset_in_page(eb->start);
3183again:
3184	ret = radix_tree_preload(GFP_NOFS);
3185	if (ret)
3186		goto out;
3187
3188	spin_lock(&fs_info->buffer_lock);
3189	ret = radix_tree_insert(&fs_info->buffer_radix,
3190				start >> fs_info->sectorsize_bits, eb);
3191	spin_unlock(&fs_info->buffer_lock);
3192	radix_tree_preload_end();
3193	if (ret == -EEXIST) {
3194		ret = 0;
3195		existing_eb = find_extent_buffer(fs_info, start);
3196		if (existing_eb)
3197			goto out;
3198		else
3199			goto again;
3200	}
3201	/* add one reference for the tree */
3202	check_buffer_tree_ref(eb);
3203	set_bit(EXTENT_BUFFER_IN_TREE, &eb->bflags);
3204
3205	/*
3206	 * Now it's safe to unlock the pages because any calls to
3207	 * btree_release_folio will correctly detect that a page belongs to a
3208	 * live buffer and won't free them prematurely.
3209	 */
3210	for (int i = 0; i < num_folios; i++)
3211		unlock_page(folio_page(eb->folios[i], 0));
3212	return eb;
3213
3214out:
3215	WARN_ON(!atomic_dec_and_test(&eb->refs));
3216
3217	/*
3218	 * Any attached folios need to be detached before we unlock them.  This
3219	 * is because when we're inserting our new folios into the mapping, and
3220	 * then attaching our eb to that folio.  If we fail to insert our folio
3221	 * we'll lookup the folio for that index, and grab that EB.  We do not
3222	 * want that to grab this eb, as we're getting ready to free it.  So we
3223	 * have to detach it first and then unlock it.
3224	 *
3225	 * We have to drop our reference and NULL it out here because in the
3226	 * subpage case detaching does a btrfs_folio_dec_eb_refs() for our eb.
3227	 * Below when we call btrfs_release_extent_buffer() we will call
3228	 * detach_extent_buffer_folio() on our remaining pages in the !subpage
3229	 * case.  If we left eb->folios[i] populated in the subpage case we'd
3230	 * double put our reference and be super sad.
3231	 */
3232	for (int i = 0; i < attached; i++) {
3233		ASSERT(eb->folios[i]);
3234		detach_extent_buffer_folio(eb, eb->folios[i]);
3235		unlock_page(folio_page(eb->folios[i], 0));
3236		folio_put(eb->folios[i]);
3237		eb->folios[i] = NULL;
3238	}
3239	/*
3240	 * Now all pages of that extent buffer is unmapped, set UNMAPPED flag,
3241	 * so it can be cleaned up without utilizing page->mapping.
3242	 */
3243	set_bit(EXTENT_BUFFER_UNMAPPED, &eb->bflags);
3244
3245	btrfs_release_extent_buffer(eb);
3246	if (ret < 0)
3247		return ERR_PTR(ret);
3248	ASSERT(existing_eb);
3249	return existing_eb;
3250}
3251
3252static inline void btrfs_release_extent_buffer_rcu(struct rcu_head *head)
3253{
3254	struct extent_buffer *eb =
3255			container_of(head, struct extent_buffer, rcu_head);
3256
3257	__free_extent_buffer(eb);
3258}
3259
3260static int release_extent_buffer(struct extent_buffer *eb)
3261	__releases(&eb->refs_lock)
3262{
3263	lockdep_assert_held(&eb->refs_lock);
3264
3265	WARN_ON(atomic_read(&eb->refs) == 0);
3266	if (atomic_dec_and_test(&eb->refs)) {
3267		if (test_and_clear_bit(EXTENT_BUFFER_IN_TREE, &eb->bflags)) {
3268			struct btrfs_fs_info *fs_info = eb->fs_info;
3269
3270			spin_unlock(&eb->refs_lock);
3271
3272			spin_lock(&fs_info->buffer_lock);
3273			radix_tree_delete(&fs_info->buffer_radix,
3274					  eb->start >> fs_info->sectorsize_bits);
3275			spin_unlock(&fs_info->buffer_lock);
3276		} else {
3277			spin_unlock(&eb->refs_lock);
3278		}
3279
3280		btrfs_leak_debug_del_eb(eb);
3281		/* Should be safe to release our pages at this point */
3282		btrfs_release_extent_buffer_pages(eb);
3283#ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS
3284		if (unlikely(test_bit(EXTENT_BUFFER_UNMAPPED, &eb->bflags))) {
3285			__free_extent_buffer(eb);
3286			return 1;
3287		}
3288#endif
3289		call_rcu(&eb->rcu_head, btrfs_release_extent_buffer_rcu);
3290		return 1;
3291	}
3292	spin_unlock(&eb->refs_lock);
3293
3294	return 0;
3295}
3296
3297void free_extent_buffer(struct extent_buffer *eb)
3298{
3299	int refs;
3300	if (!eb)
3301		return;
3302
3303	refs = atomic_read(&eb->refs);
3304	while (1) {
3305		if ((!test_bit(EXTENT_BUFFER_UNMAPPED, &eb->bflags) && refs <= 3)
3306		    || (test_bit(EXTENT_BUFFER_UNMAPPED, &eb->bflags) &&
3307			refs == 1))
3308			break;
3309		if (atomic_try_cmpxchg(&eb->refs, &refs, refs - 1))
3310			return;
3311	}
3312
3313	spin_lock(&eb->refs_lock);
3314	if (atomic_read(&eb->refs) == 2 &&
3315	    test_bit(EXTENT_BUFFER_STALE, &eb->bflags) &&
3316	    !extent_buffer_under_io(eb) &&
3317	    test_and_clear_bit(EXTENT_BUFFER_TREE_REF, &eb->bflags))
3318		atomic_dec(&eb->refs);
3319
3320	/*
3321	 * I know this is terrible, but it's temporary until we stop tracking
3322	 * the uptodate bits and such for the extent buffers.
3323	 */
3324	release_extent_buffer(eb);
3325}
3326
3327void free_extent_buffer_stale(struct extent_buffer *eb)
3328{
3329	if (!eb)
3330		return;
3331
3332	spin_lock(&eb->refs_lock);
3333	set_bit(EXTENT_BUFFER_STALE, &eb->bflags);
3334
3335	if (atomic_read(&eb->refs) == 2 && !extent_buffer_under_io(eb) &&
3336	    test_and_clear_bit(EXTENT_BUFFER_TREE_REF, &eb->bflags))
3337		atomic_dec(&eb->refs);
3338	release_extent_buffer(eb);
3339}
3340
3341static void btree_clear_folio_dirty(struct folio *folio)
3342{
3343	ASSERT(folio_test_dirty(folio));
3344	ASSERT(folio_test_locked(folio));
3345	folio_clear_dirty_for_io(folio);
3346	xa_lock_irq(&folio->mapping->i_pages);
3347	if (!folio_test_dirty(folio))
3348		__xa_clear_mark(&folio->mapping->i_pages,
3349				folio_index(folio), PAGECACHE_TAG_DIRTY);
3350	xa_unlock_irq(&folio->mapping->i_pages);
3351}
3352
3353static void clear_subpage_extent_buffer_dirty(const struct extent_buffer *eb)
3354{
3355	struct btrfs_fs_info *fs_info = eb->fs_info;
3356	struct folio *folio = eb->folios[0];
3357	bool last;
3358
3359	/* btree_clear_folio_dirty() needs page locked. */
3360	folio_lock(folio);
3361	last = btrfs_subpage_clear_and_test_dirty(fs_info, folio, eb->start, eb->len);
3362	if (last)
3363		btree_clear_folio_dirty(folio);
3364	folio_unlock(folio);
3365	WARN_ON(atomic_read(&eb->refs) == 0);
3366}
3367
3368void btrfs_clear_buffer_dirty(struct btrfs_trans_handle *trans,
3369			      struct extent_buffer *eb)
3370{
3371	struct btrfs_fs_info *fs_info = eb->fs_info;
3372	int num_folios;
3373
3374	btrfs_assert_tree_write_locked(eb);
3375
3376	if (trans && btrfs_header_generation(eb) != trans->transid)
3377		return;
3378
3379	/*
3380	 * Instead of clearing the dirty flag off of the buffer, mark it as
3381	 * EXTENT_BUFFER_ZONED_ZEROOUT. This allows us to preserve
3382	 * write-ordering in zoned mode, without the need to later re-dirty
3383	 * the extent_buffer.
3384	 *
3385	 * The actual zeroout of the buffer will happen later in
3386	 * btree_csum_one_bio.
3387	 */
3388	if (btrfs_is_zoned(fs_info) && test_bit(EXTENT_BUFFER_DIRTY, &eb->bflags)) {
3389		set_bit(EXTENT_BUFFER_ZONED_ZEROOUT, &eb->bflags);
3390		return;
3391	}
3392
3393	if (!test_and_clear_bit(EXTENT_BUFFER_DIRTY, &eb->bflags))
3394		return;
3395
3396	percpu_counter_add_batch(&fs_info->dirty_metadata_bytes, -eb->len,
3397				 fs_info->dirty_metadata_batch);
3398
3399	if (eb->fs_info->nodesize < PAGE_SIZE)
3400		return clear_subpage_extent_buffer_dirty(eb);
3401
3402	num_folios = num_extent_folios(eb);
3403	for (int i = 0; i < num_folios; i++) {
3404		struct folio *folio = eb->folios[i];
3405
3406		if (!folio_test_dirty(folio))
3407			continue;
3408		folio_lock(folio);
3409		btree_clear_folio_dirty(folio);
3410		folio_unlock(folio);
3411	}
3412	WARN_ON(atomic_read(&eb->refs) == 0);
3413}
3414
3415void set_extent_buffer_dirty(struct extent_buffer *eb)
3416{
3417	int num_folios;
3418	bool was_dirty;
3419
3420	check_buffer_tree_ref(eb);
3421
3422	was_dirty = test_and_set_bit(EXTENT_BUFFER_DIRTY, &eb->bflags);
3423
3424	num_folios = num_extent_folios(eb);
3425	WARN_ON(atomic_read(&eb->refs) == 0);
3426	WARN_ON(!test_bit(EXTENT_BUFFER_TREE_REF, &eb->bflags));
3427	WARN_ON(test_bit(EXTENT_BUFFER_ZONED_ZEROOUT, &eb->bflags));
3428
3429	if (!was_dirty) {
3430		bool subpage = eb->fs_info->nodesize < PAGE_SIZE;
3431
3432		/*
3433		 * For subpage case, we can have other extent buffers in the
3434		 * same page, and in clear_subpage_extent_buffer_dirty() we
3435		 * have to clear page dirty without subpage lock held.
3436		 * This can cause race where our page gets dirty cleared after
3437		 * we just set it.
3438		 *
3439		 * Thankfully, clear_subpage_extent_buffer_dirty() has locked
3440		 * its page for other reasons, we can use page lock to prevent
3441		 * the above race.
3442		 */
3443		if (subpage)
3444			lock_page(folio_page(eb->folios[0], 0));
3445		for (int i = 0; i < num_folios; i++)
3446			btrfs_folio_set_dirty(eb->fs_info, eb->folios[i],
3447					      eb->start, eb->len);
3448		if (subpage)
3449			unlock_page(folio_page(eb->folios[0], 0));
3450		percpu_counter_add_batch(&eb->fs_info->dirty_metadata_bytes,
3451					 eb->len,
3452					 eb->fs_info->dirty_metadata_batch);
3453	}
3454#ifdef CONFIG_BTRFS_DEBUG
3455	for (int i = 0; i < num_folios; i++)
3456		ASSERT(folio_test_dirty(eb->folios[i]));
3457#endif
3458}
3459
3460void clear_extent_buffer_uptodate(struct extent_buffer *eb)
3461{
3462	struct btrfs_fs_info *fs_info = eb->fs_info;
3463	int num_folios = num_extent_folios(eb);
3464
3465	clear_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags);
3466	for (int i = 0; i < num_folios; i++) {
3467		struct folio *folio = eb->folios[i];
3468
3469		if (!folio)
3470			continue;
3471
3472		/*
3473		 * This is special handling for metadata subpage, as regular
3474		 * btrfs_is_subpage() can not handle cloned/dummy metadata.
3475		 */
3476		if (fs_info->nodesize >= PAGE_SIZE)
3477			folio_clear_uptodate(folio);
3478		else
3479			btrfs_subpage_clear_uptodate(fs_info, folio,
3480						     eb->start, eb->len);
3481	}
3482}
3483
3484void set_extent_buffer_uptodate(struct extent_buffer *eb)
3485{
3486	struct btrfs_fs_info *fs_info = eb->fs_info;
3487	int num_folios = num_extent_folios(eb);
3488
3489	set_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags);
3490	for (int i = 0; i < num_folios; i++) {
3491		struct folio *folio = eb->folios[i];
3492
3493		/*
3494		 * This is special handling for metadata subpage, as regular
3495		 * btrfs_is_subpage() can not handle cloned/dummy metadata.
3496		 */
3497		if (fs_info->nodesize >= PAGE_SIZE)
3498			folio_mark_uptodate(folio);
3499		else
3500			btrfs_subpage_set_uptodate(fs_info, folio,
3501						   eb->start, eb->len);
3502	}
3503}
3504
3505static void clear_extent_buffer_reading(struct extent_buffer *eb)
3506{
3507	clear_bit(EXTENT_BUFFER_READING, &eb->bflags);
3508	smp_mb__after_atomic();
3509	wake_up_bit(&eb->bflags, EXTENT_BUFFER_READING);
3510}
3511
3512static void end_bbio_meta_read(struct btrfs_bio *bbio)
3513{
3514	struct extent_buffer *eb = bbio->private;
3515	struct btrfs_fs_info *fs_info = eb->fs_info;
3516	bool uptodate = !bbio->bio.bi_status;
3517	struct folio_iter fi;
3518	u32 bio_offset = 0;
3519
3520	/*
3521	 * If the extent buffer is marked UPTODATE before the read operation
3522	 * completes, other calls to read_extent_buffer_pages() will return
3523	 * early without waiting for the read to finish, causing data races.
3524	 */
3525	WARN_ON(test_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags));
3526
3527	eb->read_mirror = bbio->mirror_num;
3528
3529	if (uptodate &&
3530	    btrfs_validate_extent_buffer(eb, &bbio->parent_check) < 0)
3531		uptodate = false;
3532
3533	if (uptodate) {
3534		set_extent_buffer_uptodate(eb);
3535	} else {
3536		clear_extent_buffer_uptodate(eb);
3537		set_bit(EXTENT_BUFFER_READ_ERR, &eb->bflags);
3538	}
3539
3540	bio_for_each_folio_all(fi, &bbio->bio) {
3541		struct folio *folio = fi.folio;
3542		u64 start = eb->start + bio_offset;
3543		u32 len = fi.length;
3544
3545		if (uptodate)
3546			btrfs_folio_set_uptodate(fs_info, folio, start, len);
3547		else
3548			btrfs_folio_clear_uptodate(fs_info, folio, start, len);
3549
3550		bio_offset += len;
3551	}
3552
3553	clear_extent_buffer_reading(eb);
 
 
3554	free_extent_buffer(eb);
3555
3556	bio_put(&bbio->bio);
3557}
3558
3559int read_extent_buffer_pages(struct extent_buffer *eb, int wait, int mirror_num,
3560			     const struct btrfs_tree_parent_check *check)
3561{
3562	struct btrfs_bio *bbio;
3563	bool ret;
3564
3565	if (test_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags))
3566		return 0;
3567
3568	/*
3569	 * We could have had EXTENT_BUFFER_UPTODATE cleared by the write
3570	 * operation, which could potentially still be in flight.  In this case
3571	 * we simply want to return an error.
3572	 */
3573	if (unlikely(test_bit(EXTENT_BUFFER_WRITE_ERR, &eb->bflags)))
3574		return -EIO;
3575
3576	/* Someone else is already reading the buffer, just wait for it. */
3577	if (test_and_set_bit(EXTENT_BUFFER_READING, &eb->bflags))
3578		goto done;
3579
3580	/*
3581	 * Between the initial test_bit(EXTENT_BUFFER_UPTODATE) and the above
3582	 * test_and_set_bit(EXTENT_BUFFER_READING), someone else could have
3583	 * started and finished reading the same eb.  In this case, UPTODATE
3584	 * will now be set, and we shouldn't read it in again.
3585	 */
3586	if (unlikely(test_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags))) {
3587		clear_extent_buffer_reading(eb);
 
 
3588		return 0;
3589	}
3590
3591	clear_bit(EXTENT_BUFFER_READ_ERR, &eb->bflags);
3592	eb->read_mirror = 0;
3593	check_buffer_tree_ref(eb);
3594	atomic_inc(&eb->refs);
3595
3596	bbio = btrfs_bio_alloc(INLINE_EXTENT_BUFFER_PAGES,
3597			       REQ_OP_READ | REQ_META, eb->fs_info,
3598			       end_bbio_meta_read, eb);
3599	bbio->bio.bi_iter.bi_sector = eb->start >> SECTOR_SHIFT;
3600	bbio->inode = BTRFS_I(eb->fs_info->btree_inode);
3601	bbio->file_offset = eb->start;
3602	memcpy(&bbio->parent_check, check, sizeof(*check));
3603	if (eb->fs_info->nodesize < PAGE_SIZE) {
3604		ret = bio_add_folio(&bbio->bio, eb->folios[0], eb->len,
3605				    eb->start - folio_pos(eb->folios[0]));
3606		ASSERT(ret);
3607	} else {
3608		int num_folios = num_extent_folios(eb);
3609
3610		for (int i = 0; i < num_folios; i++) {
3611			struct folio *folio = eb->folios[i];
3612
3613			ret = bio_add_folio(&bbio->bio, folio, eb->folio_size, 0);
3614			ASSERT(ret);
3615		}
3616	}
3617	btrfs_submit_bbio(bbio, mirror_num);
3618
3619done:
3620	if (wait == WAIT_COMPLETE) {
3621		wait_on_bit_io(&eb->bflags, EXTENT_BUFFER_READING, TASK_UNINTERRUPTIBLE);
3622		if (!test_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags))
3623			return -EIO;
3624	}
3625
3626	return 0;
3627}
3628
3629static bool report_eb_range(const struct extent_buffer *eb, unsigned long start,
3630			    unsigned long len)
3631{
3632	btrfs_warn(eb->fs_info,
3633		"access to eb bytenr %llu len %u out of range start %lu len %lu",
3634		eb->start, eb->len, start, len);
3635	WARN_ON(IS_ENABLED(CONFIG_BTRFS_DEBUG));
3636
3637	return true;
3638}
3639
3640/*
3641 * Check if the [start, start + len) range is valid before reading/writing
3642 * the eb.
3643 * NOTE: @start and @len are offset inside the eb, not logical address.
3644 *
3645 * Caller should not touch the dst/src memory if this function returns error.
3646 */
3647static inline int check_eb_range(const struct extent_buffer *eb,
3648				 unsigned long start, unsigned long len)
3649{
3650	unsigned long offset;
3651
3652	/* start, start + len should not go beyond eb->len nor overflow */
3653	if (unlikely(check_add_overflow(start, len, &offset) || offset > eb->len))
3654		return report_eb_range(eb, start, len);
3655
3656	return false;
3657}
3658
3659void read_extent_buffer(const struct extent_buffer *eb, void *dstv,
3660			unsigned long start, unsigned long len)
3661{
3662	const int unit_size = eb->folio_size;
3663	size_t cur;
3664	size_t offset;
3665	char *dst = (char *)dstv;
3666	unsigned long i = get_eb_folio_index(eb, start);
3667
3668	if (check_eb_range(eb, start, len)) {
3669		/*
3670		 * Invalid range hit, reset the memory, so callers won't get
3671		 * some random garbage for their uninitialized memory.
3672		 */
3673		memset(dstv, 0, len);
3674		return;
3675	}
3676
3677	if (eb->addr) {
3678		memcpy(dstv, eb->addr + start, len);
3679		return;
3680	}
3681
3682	offset = get_eb_offset_in_folio(eb, start);
3683
3684	while (len > 0) {
3685		char *kaddr;
3686
3687		cur = min(len, unit_size - offset);
3688		kaddr = folio_address(eb->folios[i]);
3689		memcpy(dst, kaddr + offset, cur);
3690
3691		dst += cur;
3692		len -= cur;
3693		offset = 0;
3694		i++;
3695	}
3696}
3697
3698int read_extent_buffer_to_user_nofault(const struct extent_buffer *eb,
3699				       void __user *dstv,
3700				       unsigned long start, unsigned long len)
3701{
3702	const int unit_size = eb->folio_size;
3703	size_t cur;
3704	size_t offset;
3705	char __user *dst = (char __user *)dstv;
3706	unsigned long i = get_eb_folio_index(eb, start);
3707	int ret = 0;
3708
3709	WARN_ON(start > eb->len);
3710	WARN_ON(start + len > eb->start + eb->len);
3711
3712	if (eb->addr) {
3713		if (copy_to_user_nofault(dstv, eb->addr + start, len))
3714			ret = -EFAULT;
3715		return ret;
3716	}
3717
3718	offset = get_eb_offset_in_folio(eb, start);
3719
3720	while (len > 0) {
3721		char *kaddr;
3722
3723		cur = min(len, unit_size - offset);
3724		kaddr = folio_address(eb->folios[i]);
3725		if (copy_to_user_nofault(dst, kaddr + offset, cur)) {
3726			ret = -EFAULT;
3727			break;
3728		}
3729
3730		dst += cur;
3731		len -= cur;
3732		offset = 0;
3733		i++;
3734	}
3735
3736	return ret;
3737}
3738
3739int memcmp_extent_buffer(const struct extent_buffer *eb, const void *ptrv,
3740			 unsigned long start, unsigned long len)
3741{
3742	const int unit_size = eb->folio_size;
3743	size_t cur;
3744	size_t offset;
3745	char *kaddr;
3746	char *ptr = (char *)ptrv;
3747	unsigned long i = get_eb_folio_index(eb, start);
3748	int ret = 0;
3749
3750	if (check_eb_range(eb, start, len))
3751		return -EINVAL;
3752
3753	if (eb->addr)
3754		return memcmp(ptrv, eb->addr + start, len);
3755
3756	offset = get_eb_offset_in_folio(eb, start);
3757
3758	while (len > 0) {
3759		cur = min(len, unit_size - offset);
3760		kaddr = folio_address(eb->folios[i]);
3761		ret = memcmp(ptr, kaddr + offset, cur);
3762		if (ret)
3763			break;
3764
3765		ptr += cur;
3766		len -= cur;
3767		offset = 0;
3768		i++;
3769	}
3770	return ret;
3771}
3772
3773/*
3774 * Check that the extent buffer is uptodate.
3775 *
3776 * For regular sector size == PAGE_SIZE case, check if @page is uptodate.
3777 * For subpage case, check if the range covered by the eb has EXTENT_UPTODATE.
3778 */
3779static void assert_eb_folio_uptodate(const struct extent_buffer *eb, int i)
3780{
3781	struct btrfs_fs_info *fs_info = eb->fs_info;
3782	struct folio *folio = eb->folios[i];
3783
3784	ASSERT(folio);
3785
3786	/*
3787	 * If we are using the commit root we could potentially clear a page
3788	 * Uptodate while we're using the extent buffer that we've previously
3789	 * looked up.  We don't want to complain in this case, as the page was
3790	 * valid before, we just didn't write it out.  Instead we want to catch
3791	 * the case where we didn't actually read the block properly, which
3792	 * would have !PageUptodate and !EXTENT_BUFFER_WRITE_ERR.
3793	 */
3794	if (test_bit(EXTENT_BUFFER_WRITE_ERR, &eb->bflags))
3795		return;
3796
3797	if (fs_info->nodesize < PAGE_SIZE) {
3798		folio = eb->folios[0];
 
3799		ASSERT(i == 0);
3800		if (WARN_ON(!btrfs_subpage_test_uptodate(fs_info, folio,
3801							 eb->start, eb->len)))
3802			btrfs_subpage_dump_bitmap(fs_info, folio, eb->start, eb->len);
3803	} else {
3804		WARN_ON(!folio_test_uptodate(folio));
3805	}
3806}
3807
3808static void __write_extent_buffer(const struct extent_buffer *eb,
3809				  const void *srcv, unsigned long start,
3810				  unsigned long len, bool use_memmove)
3811{
3812	const int unit_size = eb->folio_size;
3813	size_t cur;
3814	size_t offset;
3815	char *kaddr;
3816	const char *src = (const char *)srcv;
3817	unsigned long i = get_eb_folio_index(eb, start);
3818	/* For unmapped (dummy) ebs, no need to check their uptodate status. */
3819	const bool check_uptodate = !test_bit(EXTENT_BUFFER_UNMAPPED, &eb->bflags);
3820
3821	if (check_eb_range(eb, start, len))
3822		return;
3823
3824	if (eb->addr) {
3825		if (use_memmove)
3826			memmove(eb->addr + start, srcv, len);
3827		else
3828			memcpy(eb->addr + start, srcv, len);
3829		return;
3830	}
3831
3832	offset = get_eb_offset_in_folio(eb, start);
3833
3834	while (len > 0) {
3835		if (check_uptodate)
3836			assert_eb_folio_uptodate(eb, i);
3837
3838		cur = min(len, unit_size - offset);
3839		kaddr = folio_address(eb->folios[i]);
3840		if (use_memmove)
3841			memmove(kaddr + offset, src, cur);
3842		else
3843			memcpy(kaddr + offset, src, cur);
3844
3845		src += cur;
3846		len -= cur;
3847		offset = 0;
3848		i++;
3849	}
3850}
3851
3852void write_extent_buffer(const struct extent_buffer *eb, const void *srcv,
3853			 unsigned long start, unsigned long len)
3854{
3855	return __write_extent_buffer(eb, srcv, start, len, false);
3856}
3857
3858static void memset_extent_buffer(const struct extent_buffer *eb, int c,
3859				 unsigned long start, unsigned long len)
3860{
3861	const int unit_size = eb->folio_size;
3862	unsigned long cur = start;
3863
3864	if (eb->addr) {
3865		memset(eb->addr + start, c, len);
3866		return;
3867	}
3868
3869	while (cur < start + len) {
3870		unsigned long index = get_eb_folio_index(eb, cur);
3871		unsigned int offset = get_eb_offset_in_folio(eb, cur);
3872		unsigned int cur_len = min(start + len - cur, unit_size - offset);
3873
3874		assert_eb_folio_uptodate(eb, index);
3875		memset(folio_address(eb->folios[index]) + offset, c, cur_len);
3876
3877		cur += cur_len;
3878	}
3879}
3880
3881void memzero_extent_buffer(const struct extent_buffer *eb, unsigned long start,
3882			   unsigned long len)
3883{
3884	if (check_eb_range(eb, start, len))
3885		return;
3886	return memset_extent_buffer(eb, 0, start, len);
3887}
3888
3889void copy_extent_buffer_full(const struct extent_buffer *dst,
3890			     const struct extent_buffer *src)
3891{
3892	const int unit_size = src->folio_size;
3893	unsigned long cur = 0;
3894
3895	ASSERT(dst->len == src->len);
3896
3897	while (cur < src->len) {
3898		unsigned long index = get_eb_folio_index(src, cur);
3899		unsigned long offset = get_eb_offset_in_folio(src, cur);
3900		unsigned long cur_len = min(src->len, unit_size - offset);
3901		void *addr = folio_address(src->folios[index]) + offset;
3902
3903		write_extent_buffer(dst, addr, cur, cur_len);
3904
3905		cur += cur_len;
3906	}
3907}
3908
3909void copy_extent_buffer(const struct extent_buffer *dst,
3910			const struct extent_buffer *src,
3911			unsigned long dst_offset, unsigned long src_offset,
3912			unsigned long len)
3913{
3914	const int unit_size = dst->folio_size;
3915	u64 dst_len = dst->len;
3916	size_t cur;
3917	size_t offset;
3918	char *kaddr;
3919	unsigned long i = get_eb_folio_index(dst, dst_offset);
3920
3921	if (check_eb_range(dst, dst_offset, len) ||
3922	    check_eb_range(src, src_offset, len))
3923		return;
3924
3925	WARN_ON(src->len != dst_len);
3926
3927	offset = get_eb_offset_in_folio(dst, dst_offset);
3928
3929	while (len > 0) {
3930		assert_eb_folio_uptodate(dst, i);
3931
3932		cur = min(len, (unsigned long)(unit_size - offset));
3933
3934		kaddr = folio_address(dst->folios[i]);
3935		read_extent_buffer(src, kaddr + offset, src_offset, cur);
3936
3937		src_offset += cur;
3938		len -= cur;
3939		offset = 0;
3940		i++;
3941	}
3942}
3943
3944/*
3945 * Calculate the folio and offset of the byte containing the given bit number.
3946 *
3947 * @eb:           the extent buffer
3948 * @start:        offset of the bitmap item in the extent buffer
3949 * @nr:           bit number
3950 * @folio_index:  return index of the folio in the extent buffer that contains
3951 *                the given bit number
3952 * @folio_offset: return offset into the folio given by folio_index
3953 *
3954 * This helper hides the ugliness of finding the byte in an extent buffer which
3955 * contains a given bit.
3956 */
3957static inline void eb_bitmap_offset(const struct extent_buffer *eb,
3958				    unsigned long start, unsigned long nr,
3959				    unsigned long *folio_index,
3960				    size_t *folio_offset)
3961{
3962	size_t byte_offset = BIT_BYTE(nr);
3963	size_t offset;
3964
3965	/*
3966	 * The byte we want is the offset of the extent buffer + the offset of
3967	 * the bitmap item in the extent buffer + the offset of the byte in the
3968	 * bitmap item.
3969	 */
3970	offset = start + offset_in_eb_folio(eb, eb->start) + byte_offset;
3971
3972	*folio_index = offset >> eb->folio_shift;
3973	*folio_offset = offset_in_eb_folio(eb, offset);
3974}
3975
3976/*
3977 * Determine whether a bit in a bitmap item is set.
3978 *
3979 * @eb:     the extent buffer
3980 * @start:  offset of the bitmap item in the extent buffer
3981 * @nr:     bit number to test
3982 */
3983int extent_buffer_test_bit(const struct extent_buffer *eb, unsigned long start,
3984			   unsigned long nr)
3985{
3986	unsigned long i;
3987	size_t offset;
3988	u8 *kaddr;
3989
3990	eb_bitmap_offset(eb, start, nr, &i, &offset);
3991	assert_eb_folio_uptodate(eb, i);
3992	kaddr = folio_address(eb->folios[i]);
3993	return 1U & (kaddr[offset] >> (nr & (BITS_PER_BYTE - 1)));
3994}
3995
3996static u8 *extent_buffer_get_byte(const struct extent_buffer *eb, unsigned long bytenr)
3997{
3998	unsigned long index = get_eb_folio_index(eb, bytenr);
3999
4000	if (check_eb_range(eb, bytenr, 1))
4001		return NULL;
4002	return folio_address(eb->folios[index]) + get_eb_offset_in_folio(eb, bytenr);
4003}
4004
4005/*
4006 * Set an area of a bitmap to 1.
4007 *
4008 * @eb:     the extent buffer
4009 * @start:  offset of the bitmap item in the extent buffer
4010 * @pos:    bit number of the first bit
4011 * @len:    number of bits to set
4012 */
4013void extent_buffer_bitmap_set(const struct extent_buffer *eb, unsigned long start,
4014			      unsigned long pos, unsigned long len)
4015{
4016	unsigned int first_byte = start + BIT_BYTE(pos);
4017	unsigned int last_byte = start + BIT_BYTE(pos + len - 1);
4018	const bool same_byte = (first_byte == last_byte);
4019	u8 mask = BITMAP_FIRST_BYTE_MASK(pos);
4020	u8 *kaddr;
4021
4022	if (same_byte)
4023		mask &= BITMAP_LAST_BYTE_MASK(pos + len);
4024
4025	/* Handle the first byte. */
4026	kaddr = extent_buffer_get_byte(eb, first_byte);
4027	*kaddr |= mask;
4028	if (same_byte)
4029		return;
4030
4031	/* Handle the byte aligned part. */
4032	ASSERT(first_byte + 1 <= last_byte);
4033	memset_extent_buffer(eb, 0xff, first_byte + 1, last_byte - first_byte - 1);
4034
4035	/* Handle the last byte. */
4036	kaddr = extent_buffer_get_byte(eb, last_byte);
4037	*kaddr |= BITMAP_LAST_BYTE_MASK(pos + len);
4038}
4039
4040
4041/*
4042 * Clear an area of a bitmap.
4043 *
4044 * @eb:     the extent buffer
4045 * @start:  offset of the bitmap item in the extent buffer
4046 * @pos:    bit number of the first bit
4047 * @len:    number of bits to clear
4048 */
4049void extent_buffer_bitmap_clear(const struct extent_buffer *eb,
4050				unsigned long start, unsigned long pos,
4051				unsigned long len)
4052{
4053	unsigned int first_byte = start + BIT_BYTE(pos);
4054	unsigned int last_byte = start + BIT_BYTE(pos + len - 1);
4055	const bool same_byte = (first_byte == last_byte);
4056	u8 mask = BITMAP_FIRST_BYTE_MASK(pos);
4057	u8 *kaddr;
4058
4059	if (same_byte)
4060		mask &= BITMAP_LAST_BYTE_MASK(pos + len);
4061
4062	/* Handle the first byte. */
4063	kaddr = extent_buffer_get_byte(eb, first_byte);
4064	*kaddr &= ~mask;
4065	if (same_byte)
4066		return;
4067
4068	/* Handle the byte aligned part. */
4069	ASSERT(first_byte + 1 <= last_byte);
4070	memset_extent_buffer(eb, 0, first_byte + 1, last_byte - first_byte - 1);
4071
4072	/* Handle the last byte. */
4073	kaddr = extent_buffer_get_byte(eb, last_byte);
4074	*kaddr &= ~BITMAP_LAST_BYTE_MASK(pos + len);
4075}
4076
4077static inline bool areas_overlap(unsigned long src, unsigned long dst, unsigned long len)
4078{
4079	unsigned long distance = (src > dst) ? src - dst : dst - src;
4080	return distance < len;
4081}
4082
4083void memcpy_extent_buffer(const struct extent_buffer *dst,
4084			  unsigned long dst_offset, unsigned long src_offset,
4085			  unsigned long len)
4086{
4087	const int unit_size = dst->folio_size;
4088	unsigned long cur_off = 0;
4089
4090	if (check_eb_range(dst, dst_offset, len) ||
4091	    check_eb_range(dst, src_offset, len))
4092		return;
4093
4094	if (dst->addr) {
4095		const bool use_memmove = areas_overlap(src_offset, dst_offset, len);
4096
4097		if (use_memmove)
4098			memmove(dst->addr + dst_offset, dst->addr + src_offset, len);
4099		else
4100			memcpy(dst->addr + dst_offset, dst->addr + src_offset, len);
4101		return;
4102	}
4103
4104	while (cur_off < len) {
4105		unsigned long cur_src = cur_off + src_offset;
4106		unsigned long folio_index = get_eb_folio_index(dst, cur_src);
4107		unsigned long folio_off = get_eb_offset_in_folio(dst, cur_src);
4108		unsigned long cur_len = min(src_offset + len - cur_src,
4109					    unit_size - folio_off);
4110		void *src_addr = folio_address(dst->folios[folio_index]) + folio_off;
4111		const bool use_memmove = areas_overlap(src_offset + cur_off,
4112						       dst_offset + cur_off, cur_len);
4113
4114		__write_extent_buffer(dst, src_addr, dst_offset + cur_off, cur_len,
4115				      use_memmove);
4116		cur_off += cur_len;
4117	}
4118}
4119
4120void memmove_extent_buffer(const struct extent_buffer *dst,
4121			   unsigned long dst_offset, unsigned long src_offset,
4122			   unsigned long len)
4123{
4124	unsigned long dst_end = dst_offset + len - 1;
4125	unsigned long src_end = src_offset + len - 1;
4126
4127	if (check_eb_range(dst, dst_offset, len) ||
4128	    check_eb_range(dst, src_offset, len))
4129		return;
4130
4131	if (dst_offset < src_offset) {
4132		memcpy_extent_buffer(dst, dst_offset, src_offset, len);
4133		return;
4134	}
4135
4136	if (dst->addr) {
4137		memmove(dst->addr + dst_offset, dst->addr + src_offset, len);
4138		return;
4139	}
4140
4141	while (len > 0) {
4142		unsigned long src_i;
4143		size_t cur;
4144		size_t dst_off_in_folio;
4145		size_t src_off_in_folio;
4146		void *src_addr;
4147		bool use_memmove;
4148
4149		src_i = get_eb_folio_index(dst, src_end);
4150
4151		dst_off_in_folio = get_eb_offset_in_folio(dst, dst_end);
4152		src_off_in_folio = get_eb_offset_in_folio(dst, src_end);
4153
4154		cur = min_t(unsigned long, len, src_off_in_folio + 1);
4155		cur = min(cur, dst_off_in_folio + 1);
4156
4157		src_addr = folio_address(dst->folios[src_i]) + src_off_in_folio -
4158					 cur + 1;
4159		use_memmove = areas_overlap(src_end - cur + 1, dst_end - cur + 1,
4160					    cur);
4161
4162		__write_extent_buffer(dst, src_addr, dst_end - cur + 1, cur,
4163				      use_memmove);
4164
4165		dst_end -= cur;
4166		src_end -= cur;
4167		len -= cur;
4168	}
4169}
4170
4171#define GANG_LOOKUP_SIZE	16
4172static struct extent_buffer *get_next_extent_buffer(
4173		const struct btrfs_fs_info *fs_info, struct folio *folio, u64 bytenr)
4174{
4175	struct extent_buffer *gang[GANG_LOOKUP_SIZE];
4176	struct extent_buffer *found = NULL;
4177	u64 folio_start = folio_pos(folio);
4178	u64 cur = folio_start;
4179
4180	ASSERT(in_range(bytenr, folio_start, PAGE_SIZE));
4181	lockdep_assert_held(&fs_info->buffer_lock);
4182
4183	while (cur < folio_start + PAGE_SIZE) {
4184		int ret;
4185		int i;
4186
4187		ret = radix_tree_gang_lookup(&fs_info->buffer_radix,
4188				(void **)gang, cur >> fs_info->sectorsize_bits,
4189				min_t(unsigned int, GANG_LOOKUP_SIZE,
4190				      PAGE_SIZE / fs_info->nodesize));
4191		if (ret == 0)
4192			goto out;
4193		for (i = 0; i < ret; i++) {
4194			/* Already beyond page end */
4195			if (gang[i]->start >= folio_start + PAGE_SIZE)
4196				goto out;
4197			/* Found one */
4198			if (gang[i]->start >= bytenr) {
4199				found = gang[i];
4200				goto out;
4201			}
4202		}
4203		cur = gang[ret - 1]->start + gang[ret - 1]->len;
4204	}
4205out:
4206	return found;
4207}
4208
4209static int try_release_subpage_extent_buffer(struct folio *folio)
4210{
4211	struct btrfs_fs_info *fs_info = folio_to_fs_info(folio);
4212	u64 cur = folio_pos(folio);
4213	const u64 end = cur + PAGE_SIZE;
4214	int ret;
4215
4216	while (cur < end) {
4217		struct extent_buffer *eb = NULL;
4218
4219		/*
4220		 * Unlike try_release_extent_buffer() which uses folio private
4221		 * to grab buffer, for subpage case we rely on radix tree, thus
4222		 * we need to ensure radix tree consistency.
4223		 *
4224		 * We also want an atomic snapshot of the radix tree, thus go
4225		 * with spinlock rather than RCU.
4226		 */
4227		spin_lock(&fs_info->buffer_lock);
4228		eb = get_next_extent_buffer(fs_info, folio, cur);
4229		if (!eb) {
4230			/* No more eb in the page range after or at cur */
4231			spin_unlock(&fs_info->buffer_lock);
4232			break;
4233		}
4234		cur = eb->start + eb->len;
4235
4236		/*
4237		 * The same as try_release_extent_buffer(), to ensure the eb
4238		 * won't disappear out from under us.
4239		 */
4240		spin_lock(&eb->refs_lock);
4241		if (atomic_read(&eb->refs) != 1 || extent_buffer_under_io(eb)) {
4242			spin_unlock(&eb->refs_lock);
4243			spin_unlock(&fs_info->buffer_lock);
4244			break;
4245		}
4246		spin_unlock(&fs_info->buffer_lock);
4247
4248		/*
4249		 * If tree ref isn't set then we know the ref on this eb is a
4250		 * real ref, so just return, this eb will likely be freed soon
4251		 * anyway.
4252		 */
4253		if (!test_and_clear_bit(EXTENT_BUFFER_TREE_REF, &eb->bflags)) {
4254			spin_unlock(&eb->refs_lock);
4255			break;
4256		}
4257
4258		/*
4259		 * Here we don't care about the return value, we will always
4260		 * check the folio private at the end.  And
4261		 * release_extent_buffer() will release the refs_lock.
4262		 */
4263		release_extent_buffer(eb);
4264	}
4265	/*
4266	 * Finally to check if we have cleared folio private, as if we have
4267	 * released all ebs in the page, the folio private should be cleared now.
4268	 */
4269	spin_lock(&folio->mapping->i_private_lock);
4270	if (!folio_test_private(folio))
4271		ret = 1;
4272	else
4273		ret = 0;
4274	spin_unlock(&folio->mapping->i_private_lock);
4275	return ret;
4276
4277}
4278
4279int try_release_extent_buffer(struct folio *folio)
4280{
 
4281	struct extent_buffer *eb;
4282
4283	if (folio_to_fs_info(folio)->nodesize < PAGE_SIZE)
4284		return try_release_subpage_extent_buffer(folio);
4285
4286	/*
4287	 * We need to make sure nobody is changing folio private, as we rely on
4288	 * folio private as the pointer to extent buffer.
4289	 */
4290	spin_lock(&folio->mapping->i_private_lock);
4291	if (!folio_test_private(folio)) {
4292		spin_unlock(&folio->mapping->i_private_lock);
4293		return 1;
4294	}
4295
4296	eb = folio_get_private(folio);
4297	BUG_ON(!eb);
4298
4299	/*
4300	 * This is a little awful but should be ok, we need to make sure that
4301	 * the eb doesn't disappear out from under us while we're looking at
4302	 * this page.
4303	 */
4304	spin_lock(&eb->refs_lock);
4305	if (atomic_read(&eb->refs) != 1 || extent_buffer_under_io(eb)) {
4306		spin_unlock(&eb->refs_lock);
4307		spin_unlock(&folio->mapping->i_private_lock);
4308		return 0;
4309	}
4310	spin_unlock(&folio->mapping->i_private_lock);
4311
4312	/*
4313	 * If tree ref isn't set then we know the ref on this eb is a real ref,
4314	 * so just return, this page will likely be freed soon anyway.
4315	 */
4316	if (!test_and_clear_bit(EXTENT_BUFFER_TREE_REF, &eb->bflags)) {
4317		spin_unlock(&eb->refs_lock);
4318		return 0;
4319	}
4320
4321	return release_extent_buffer(eb);
4322}
4323
4324/*
4325 * Attempt to readahead a child block.
4326 *
4327 * @fs_info:	the fs_info
4328 * @bytenr:	bytenr to read
4329 * @owner_root: objectid of the root that owns this eb
4330 * @gen:	generation for the uptodate check, can be 0
4331 * @level:	level for the eb
4332 *
4333 * Attempt to readahead a tree block at @bytenr.  If @gen is 0 then we do a
4334 * normal uptodate check of the eb, without checking the generation.  If we have
4335 * to read the block we will not block on anything.
4336 */
4337void btrfs_readahead_tree_block(struct btrfs_fs_info *fs_info,
4338				u64 bytenr, u64 owner_root, u64 gen, int level)
4339{
4340	struct btrfs_tree_parent_check check = {
 
4341		.level = level,
4342		.transid = gen
4343	};
4344	struct extent_buffer *eb;
4345	int ret;
4346
4347	eb = btrfs_find_create_tree_block(fs_info, bytenr, owner_root, level);
4348	if (IS_ERR(eb))
4349		return;
4350
4351	if (btrfs_buffer_uptodate(eb, gen, 1)) {
4352		free_extent_buffer(eb);
4353		return;
4354	}
4355
4356	ret = read_extent_buffer_pages(eb, WAIT_NONE, 0, &check);
4357	if (ret < 0)
4358		free_extent_buffer_stale(eb);
4359	else
4360		free_extent_buffer(eb);
4361}
4362
4363/*
4364 * Readahead a node's child block.
4365 *
4366 * @node:	parent node we're reading from
4367 * @slot:	slot in the parent node for the child we want to read
4368 *
4369 * A helper for btrfs_readahead_tree_block, we simply read the bytenr pointed at
4370 * the slot in the node provided.
4371 */
4372void btrfs_readahead_node_child(struct extent_buffer *node, int slot)
4373{
4374	btrfs_readahead_tree_block(node->fs_info,
4375				   btrfs_node_blockptr(node, slot),
4376				   btrfs_header_owner(node),
4377				   btrfs_node_ptr_generation(node, slot),
4378				   btrfs_header_level(node) - 1);
4379}
v6.9.4
   1// SPDX-License-Identifier: GPL-2.0
   2
   3#include <linux/bitops.h>
   4#include <linux/slab.h>
   5#include <linux/bio.h>
   6#include <linux/mm.h>
   7#include <linux/pagemap.h>
   8#include <linux/page-flags.h>
   9#include <linux/sched/mm.h>
  10#include <linux/spinlock.h>
  11#include <linux/blkdev.h>
  12#include <linux/swap.h>
  13#include <linux/writeback.h>
  14#include <linux/pagevec.h>
  15#include <linux/prefetch.h>
  16#include <linux/fsverity.h>
  17#include "extent_io.h"
  18#include "extent-io-tree.h"
  19#include "extent_map.h"
  20#include "ctree.h"
  21#include "btrfs_inode.h"
  22#include "bio.h"
  23#include "locking.h"
  24#include "backref.h"
  25#include "disk-io.h"
  26#include "subpage.h"
  27#include "zoned.h"
  28#include "block-group.h"
  29#include "compression.h"
  30#include "fs.h"
  31#include "accessors.h"
  32#include "file-item.h"
  33#include "file.h"
  34#include "dev-replace.h"
  35#include "super.h"
  36#include "transaction.h"
  37
  38static struct kmem_cache *extent_buffer_cache;
  39
  40#ifdef CONFIG_BTRFS_DEBUG
  41static inline void btrfs_leak_debug_add_eb(struct extent_buffer *eb)
  42{
  43	struct btrfs_fs_info *fs_info = eb->fs_info;
  44	unsigned long flags;
  45
  46	spin_lock_irqsave(&fs_info->eb_leak_lock, flags);
  47	list_add(&eb->leak_list, &fs_info->allocated_ebs);
  48	spin_unlock_irqrestore(&fs_info->eb_leak_lock, flags);
  49}
  50
  51static inline void btrfs_leak_debug_del_eb(struct extent_buffer *eb)
  52{
  53	struct btrfs_fs_info *fs_info = eb->fs_info;
  54	unsigned long flags;
  55
  56	spin_lock_irqsave(&fs_info->eb_leak_lock, flags);
  57	list_del(&eb->leak_list);
  58	spin_unlock_irqrestore(&fs_info->eb_leak_lock, flags);
  59}
  60
  61void btrfs_extent_buffer_leak_debug_check(struct btrfs_fs_info *fs_info)
  62{
  63	struct extent_buffer *eb;
  64	unsigned long flags;
  65
  66	/*
  67	 * If we didn't get into open_ctree our allocated_ebs will not be
  68	 * initialized, so just skip this.
  69	 */
  70	if (!fs_info->allocated_ebs.next)
  71		return;
  72
  73	WARN_ON(!list_empty(&fs_info->allocated_ebs));
  74	spin_lock_irqsave(&fs_info->eb_leak_lock, flags);
  75	while (!list_empty(&fs_info->allocated_ebs)) {
  76		eb = list_first_entry(&fs_info->allocated_ebs,
  77				      struct extent_buffer, leak_list);
  78		pr_err(
  79	"BTRFS: buffer leak start %llu len %u refs %d bflags %lu owner %llu\n",
  80		       eb->start, eb->len, atomic_read(&eb->refs), eb->bflags,
  81		       btrfs_header_owner(eb));
  82		list_del(&eb->leak_list);
  83		WARN_ON_ONCE(1);
  84		kmem_cache_free(extent_buffer_cache, eb);
  85	}
  86	spin_unlock_irqrestore(&fs_info->eb_leak_lock, flags);
  87}
  88#else
  89#define btrfs_leak_debug_add_eb(eb)			do {} while (0)
  90#define btrfs_leak_debug_del_eb(eb)			do {} while (0)
  91#endif
  92
  93/*
  94 * Structure to record info about the bio being assembled, and other info like
  95 * how many bytes are there before stripe/ordered extent boundary.
  96 */
  97struct btrfs_bio_ctrl {
  98	struct btrfs_bio *bbio;
  99	enum btrfs_compression_type compress_type;
 100	u32 len_to_oe_boundary;
 101	blk_opf_t opf;
 102	btrfs_bio_end_io_t end_io_func;
 103	struct writeback_control *wbc;
 
 
 
 
 
 
 
 104};
 105
 106static void submit_one_bio(struct btrfs_bio_ctrl *bio_ctrl)
 107{
 108	struct btrfs_bio *bbio = bio_ctrl->bbio;
 109
 110	if (!bbio)
 111		return;
 112
 113	/* Caller should ensure the bio has at least some range added */
 114	ASSERT(bbio->bio.bi_iter.bi_size);
 115
 116	if (btrfs_op(&bbio->bio) == BTRFS_MAP_READ &&
 117	    bio_ctrl->compress_type != BTRFS_COMPRESS_NONE)
 118		btrfs_submit_compressed_read(bbio);
 119	else
 120		btrfs_submit_bio(bbio, 0);
 121
 122	/* The bbio is owned by the end_io handler now */
 123	bio_ctrl->bbio = NULL;
 124}
 125
 126/*
 127 * Submit or fail the current bio in the bio_ctrl structure.
 128 */
 129static void submit_write_bio(struct btrfs_bio_ctrl *bio_ctrl, int ret)
 130{
 131	struct btrfs_bio *bbio = bio_ctrl->bbio;
 132
 133	if (!bbio)
 134		return;
 135
 136	if (ret) {
 137		ASSERT(ret < 0);
 138		btrfs_bio_end_io(bbio, errno_to_blk_status(ret));
 139		/* The bio is owned by the end_io handler now */
 140		bio_ctrl->bbio = NULL;
 141	} else {
 142		submit_one_bio(bio_ctrl);
 143	}
 144}
 145
 146int __init extent_buffer_init_cachep(void)
 147{
 148	extent_buffer_cache = kmem_cache_create("btrfs_extent_buffer",
 149						sizeof(struct extent_buffer), 0, 0,
 150						NULL);
 151	if (!extent_buffer_cache)
 152		return -ENOMEM;
 153
 154	return 0;
 155}
 156
 157void __cold extent_buffer_free_cachep(void)
 158{
 159	/*
 160	 * Make sure all delayed rcu free are flushed before we
 161	 * destroy caches.
 162	 */
 163	rcu_barrier();
 164	kmem_cache_destroy(extent_buffer_cache);
 165}
 166
 167void extent_range_clear_dirty_for_io(struct inode *inode, u64 start, u64 end)
 
 
 168{
 169	unsigned long index = start >> PAGE_SHIFT;
 170	unsigned long end_index = end >> PAGE_SHIFT;
 171	struct page *page;
 172
 173	while (index <= end_index) {
 174		page = find_get_page(inode->i_mapping, index);
 175		BUG_ON(!page); /* Pages should be in the extent_io_tree */
 176		clear_page_dirty_for_io(page);
 177		put_page(page);
 178		index++;
 179	}
 180}
 181
 182static void process_one_page(struct btrfs_fs_info *fs_info,
 183			     struct page *page, struct page *locked_page,
 184			     unsigned long page_ops, u64 start, u64 end)
 185{
 186	struct folio *folio = page_folio(page);
 187	u32 len;
 188
 189	ASSERT(end + 1 - start != 0 && end + 1 - start < U32_MAX);
 190	len = end + 1 - start;
 191
 192	if (page_ops & PAGE_SET_ORDERED)
 193		btrfs_folio_clamp_set_ordered(fs_info, folio, start, len);
 194	if (page_ops & PAGE_START_WRITEBACK) {
 195		btrfs_folio_clamp_clear_dirty(fs_info, folio, start, len);
 196		btrfs_folio_clamp_set_writeback(fs_info, folio, start, len);
 197	}
 198	if (page_ops & PAGE_END_WRITEBACK)
 199		btrfs_folio_clamp_clear_writeback(fs_info, folio, start, len);
 200
 201	if (page != locked_page && (page_ops & PAGE_UNLOCK))
 202		btrfs_folio_end_writer_lock(fs_info, folio, start, len);
 203}
 204
 205static void __process_pages_contig(struct address_space *mapping,
 206				   struct page *locked_page, u64 start, u64 end,
 207				   unsigned long page_ops)
 208{
 209	struct btrfs_fs_info *fs_info = inode_to_fs_info(mapping->host);
 210	pgoff_t start_index = start >> PAGE_SHIFT;
 211	pgoff_t end_index = end >> PAGE_SHIFT;
 212	pgoff_t index = start_index;
 213	struct folio_batch fbatch;
 214	int i;
 215
 216	folio_batch_init(&fbatch);
 217	while (index <= end_index) {
 218		int found_folios;
 219
 220		found_folios = filemap_get_folios_contig(mapping, &index,
 221				end_index, &fbatch);
 222		for (i = 0; i < found_folios; i++) {
 223			struct folio *folio = fbatch.folios[i];
 224
 225			process_one_page(fs_info, &folio->page, locked_page,
 226					 page_ops, start, end);
 227		}
 228		folio_batch_release(&fbatch);
 229		cond_resched();
 230	}
 231}
 232
 233static noinline void __unlock_for_delalloc(struct inode *inode,
 234					   struct page *locked_page,
 235					   u64 start, u64 end)
 236{
 237	unsigned long index = start >> PAGE_SHIFT;
 238	unsigned long end_index = end >> PAGE_SHIFT;
 239
 240	ASSERT(locked_page);
 241	if (index == locked_page->index && end_index == index)
 242		return;
 243
 244	__process_pages_contig(inode->i_mapping, locked_page, start, end,
 245			       PAGE_UNLOCK);
 246}
 247
 248static noinline int lock_delalloc_pages(struct inode *inode,
 249					struct page *locked_page,
 250					u64 start,
 251					u64 end)
 252{
 253	struct btrfs_fs_info *fs_info = inode_to_fs_info(inode);
 254	struct address_space *mapping = inode->i_mapping;
 255	pgoff_t start_index = start >> PAGE_SHIFT;
 256	pgoff_t end_index = end >> PAGE_SHIFT;
 257	pgoff_t index = start_index;
 258	u64 processed_end = start;
 259	struct folio_batch fbatch;
 260
 261	if (index == locked_page->index && index == end_index)
 262		return 0;
 263
 264	folio_batch_init(&fbatch);
 265	while (index <= end_index) {
 266		unsigned int found_folios, i;
 267
 268		found_folios = filemap_get_folios_contig(mapping, &index,
 269				end_index, &fbatch);
 270		if (found_folios == 0)
 271			goto out;
 272
 273		for (i = 0; i < found_folios; i++) {
 274			struct folio *folio = fbatch.folios[i];
 275			struct page *page = folio_page(folio, 0);
 276			u32 len = end + 1 - start;
 277
 278			if (page == locked_page)
 279				continue;
 280
 281			if (btrfs_folio_start_writer_lock(fs_info, folio, start,
 282							  len))
 283				goto out;
 284
 285			if (!PageDirty(page) || page->mapping != mapping) {
 286				btrfs_folio_end_writer_lock(fs_info, folio, start,
 287							    len);
 288				goto out;
 289			}
 
 
 
 
 290
 291			processed_end = page_offset(page) + PAGE_SIZE - 1;
 292		}
 293		folio_batch_release(&fbatch);
 294		cond_resched();
 295	}
 296
 297	return 0;
 298out:
 299	folio_batch_release(&fbatch);
 300	if (processed_end > start)
 301		__unlock_for_delalloc(inode, locked_page, start, processed_end);
 
 302	return -EAGAIN;
 303}
 304
 305/*
 306 * Find and lock a contiguous range of bytes in the file marked as delalloc, no
 307 * more than @max_bytes.
 308 *
 309 * @start:	The original start bytenr to search.
 310 *		Will store the extent range start bytenr.
 311 * @end:	The original end bytenr of the search range
 312 *		Will store the extent range end bytenr.
 313 *
 314 * Return true if we find a delalloc range which starts inside the original
 315 * range, and @start/@end will store the delalloc range start/end.
 316 *
 317 * Return false if we can't find any delalloc range which starts inside the
 318 * original range, and @start/@end will be the non-delalloc range start/end.
 319 */
 320EXPORT_FOR_TESTS
 321noinline_for_stack bool find_lock_delalloc_range(struct inode *inode,
 322				    struct page *locked_page, u64 *start,
 323				    u64 *end)
 324{
 325	struct btrfs_fs_info *fs_info = inode_to_fs_info(inode);
 326	struct extent_io_tree *tree = &BTRFS_I(inode)->io_tree;
 327	const u64 orig_start = *start;
 328	const u64 orig_end = *end;
 329	/* The sanity tests may not set a valid fs_info. */
 330	u64 max_bytes = fs_info ? fs_info->max_extent_size : BTRFS_MAX_EXTENT_SIZE;
 331	u64 delalloc_start;
 332	u64 delalloc_end;
 333	bool found;
 334	struct extent_state *cached_state = NULL;
 335	int ret;
 336	int loops = 0;
 337
 338	/* Caller should pass a valid @end to indicate the search range end */
 339	ASSERT(orig_end > orig_start);
 340
 341	/* The range should at least cover part of the page */
 342	ASSERT(!(orig_start >= page_offset(locked_page) + PAGE_SIZE ||
 343		 orig_end <= page_offset(locked_page)));
 344again:
 345	/* step one, find a bunch of delalloc bytes starting at start */
 346	delalloc_start = *start;
 347	delalloc_end = 0;
 348	found = btrfs_find_delalloc_range(tree, &delalloc_start, &delalloc_end,
 349					  max_bytes, &cached_state);
 350	if (!found || delalloc_end <= *start || delalloc_start > orig_end) {
 351		*start = delalloc_start;
 352
 353		/* @delalloc_end can be -1, never go beyond @orig_end */
 354		*end = min(delalloc_end, orig_end);
 355		free_extent_state(cached_state);
 356		return false;
 357	}
 358
 359	/*
 360	 * start comes from the offset of locked_page.  We have to lock
 361	 * pages in order, so we can't process delalloc bytes before
 362	 * locked_page
 363	 */
 364	if (delalloc_start < *start)
 365		delalloc_start = *start;
 366
 367	/*
 368	 * make sure to limit the number of pages we try to lock down
 369	 */
 370	if (delalloc_end + 1 - delalloc_start > max_bytes)
 371		delalloc_end = delalloc_start + max_bytes - 1;
 372
 373	/* step two, lock all the pages after the page that has start */
 374	ret = lock_delalloc_pages(inode, locked_page,
 375				  delalloc_start, delalloc_end);
 376	ASSERT(!ret || ret == -EAGAIN);
 377	if (ret == -EAGAIN) {
 378		/* some of the pages are gone, lets avoid looping by
 379		 * shortening the size of the delalloc range we're searching
 380		 */
 381		free_extent_state(cached_state);
 382		cached_state = NULL;
 383		if (!loops) {
 384			max_bytes = PAGE_SIZE;
 385			loops = 1;
 386			goto again;
 387		} else {
 388			found = false;
 389			goto out_failed;
 390		}
 391	}
 392
 393	/* step three, lock the state bits for the whole range */
 394	lock_extent(tree, delalloc_start, delalloc_end, &cached_state);
 395
 396	/* then test to make sure it is all still delalloc */
 397	ret = test_range_bit(tree, delalloc_start, delalloc_end,
 398			     EXTENT_DELALLOC, cached_state);
 
 
 399	if (!ret) {
 400		unlock_extent(tree, delalloc_start, delalloc_end,
 401			      &cached_state);
 402		__unlock_for_delalloc(inode, locked_page,
 403			      delalloc_start, delalloc_end);
 404		cond_resched();
 405		goto again;
 406	}
 407	free_extent_state(cached_state);
 408	*start = delalloc_start;
 409	*end = delalloc_end;
 410out_failed:
 411	return found;
 412}
 413
 414void extent_clear_unlock_delalloc(struct btrfs_inode *inode, u64 start, u64 end,
 415				  struct page *locked_page,
 
 416				  u32 clear_bits, unsigned long page_ops)
 417{
 418	clear_extent_bit(&inode->io_tree, start, end, clear_bits, NULL);
 419
 420	__process_pages_contig(inode->vfs_inode.i_mapping, locked_page,
 421			       start, end, page_ops);
 422}
 423
 424static bool btrfs_verify_page(struct page *page, u64 start)
 425{
 426	if (!fsverity_active(page->mapping->host) ||
 427	    PageUptodate(page) ||
 428	    start >= i_size_read(page->mapping->host))
 
 
 429		return true;
 430	return fsverity_verify_page(page);
 431}
 432
 433static void end_page_read(struct page *page, bool uptodate, u64 start, u32 len)
 434{
 435	struct btrfs_fs_info *fs_info = page_to_fs_info(page);
 436	struct folio *folio = page_folio(page);
 437
 438	ASSERT(page_offset(page) <= start &&
 439	       start + len <= page_offset(page) + PAGE_SIZE);
 440
 441	if (uptodate && btrfs_verify_page(page, start))
 442		btrfs_folio_set_uptodate(fs_info, folio, start, len);
 443	else
 444		btrfs_folio_clear_uptodate(fs_info, folio, start, len);
 445
 446	if (!btrfs_is_subpage(fs_info, page->mapping))
 447		unlock_page(page);
 448	else
 449		btrfs_subpage_end_reader(fs_info, folio, start, len);
 450}
 451
 452/*
 453 * After a write IO is done, we need to:
 454 *
 455 * - clear the uptodate bits on error
 456 * - clear the writeback bits in the extent tree for the range
 457 * - filio_end_writeback()  if there is no more pending io for the folio
 458 *
 459 * Scheduling is not allowed, so the extent state tree is expected
 460 * to have one and only one object corresponding to this IO.
 461 */
 462static void end_bbio_data_write(struct btrfs_bio *bbio)
 463{
 464	struct btrfs_fs_info *fs_info = bbio->fs_info;
 465	struct bio *bio = &bbio->bio;
 466	int error = blk_status_to_errno(bio->bi_status);
 467	struct folio_iter fi;
 468	const u32 sectorsize = fs_info->sectorsize;
 469
 470	ASSERT(!bio_flagged(bio, BIO_CLONED));
 471	bio_for_each_folio_all(fi, bio) {
 472		struct folio *folio = fi.folio;
 473		u64 start = folio_pos(folio) + fi.offset;
 474		u32 len = fi.length;
 475
 476		/* Only order 0 (single page) folios are allowed for data. */
 477		ASSERT(folio_order(folio) == 0);
 478
 479		/* Our read/write should always be sector aligned. */
 480		if (!IS_ALIGNED(fi.offset, sectorsize))
 481			btrfs_err(fs_info,
 482		"partial page write in btrfs with offset %zu and length %zu",
 483				  fi.offset, fi.length);
 484		else if (!IS_ALIGNED(fi.length, sectorsize))
 485			btrfs_info(fs_info,
 486		"incomplete page write with offset %zu and length %zu",
 487				   fi.offset, fi.length);
 488
 489		btrfs_finish_ordered_extent(bbio->ordered,
 490				folio_page(folio, 0), start, len, !error);
 491		if (error)
 492			mapping_set_error(folio->mapping, error);
 493		btrfs_folio_clear_writeback(fs_info, folio, start, len);
 494	}
 495
 496	bio_put(bio);
 497}
 498
 499/*
 500 * Record previously processed extent range
 501 *
 502 * For endio_readpage_release_extent() to handle a full extent range, reducing
 503 * the extent io operations.
 504 */
 505struct processed_extent {
 506	struct btrfs_inode *inode;
 507	/* Start of the range in @inode */
 508	u64 start;
 509	/* End of the range in @inode */
 510	u64 end;
 511	bool uptodate;
 512};
 513
 514/*
 515 * Try to release processed extent range
 516 *
 517 * May not release the extent range right now if the current range is
 518 * contiguous to processed extent.
 519 *
 520 * Will release processed extent when any of @inode, @uptodate, the range is
 521 * no longer contiguous to the processed range.
 522 *
 523 * Passing @inode == NULL will force processed extent to be released.
 524 */
 525static void endio_readpage_release_extent(struct processed_extent *processed,
 526			      struct btrfs_inode *inode, u64 start, u64 end,
 527			      bool uptodate)
 528{
 529	struct extent_state *cached = NULL;
 530	struct extent_io_tree *tree;
 531
 532	/* The first extent, initialize @processed */
 533	if (!processed->inode)
 534		goto update;
 535
 536	/*
 537	 * Contiguous to processed extent, just uptodate the end.
 538	 *
 539	 * Several things to notice:
 540	 *
 541	 * - bio can be merged as long as on-disk bytenr is contiguous
 542	 *   This means we can have page belonging to other inodes, thus need to
 543	 *   check if the inode still matches.
 544	 * - bvec can contain range beyond current page for multi-page bvec
 545	 *   Thus we need to do processed->end + 1 >= start check
 546	 */
 547	if (processed->inode == inode && processed->uptodate == uptodate &&
 548	    processed->end + 1 >= start && end >= processed->end) {
 549		processed->end = end;
 550		return;
 551	}
 552
 553	tree = &processed->inode->io_tree;
 554	/*
 555	 * Now we don't have range contiguous to the processed range, release
 556	 * the processed range now.
 557	 */
 558	unlock_extent(tree, processed->start, processed->end, &cached);
 559
 560update:
 561	/* Update processed to current range */
 562	processed->inode = inode;
 563	processed->start = start;
 564	processed->end = end;
 565	processed->uptodate = uptodate;
 566}
 567
 568static void begin_page_read(struct btrfs_fs_info *fs_info, struct page *page)
 569{
 570	struct folio *folio = page_folio(page);
 571
 572	ASSERT(folio_test_locked(folio));
 573	if (!btrfs_is_subpage(fs_info, folio->mapping))
 574		return;
 575
 576	ASSERT(folio_test_private(folio));
 577	btrfs_subpage_start_reader(fs_info, folio, page_offset(page), PAGE_SIZE);
 578}
 579
 580/*
 581 * After a data read IO is done, we need to:
 582 *
 583 * - clear the uptodate bits on error
 584 * - set the uptodate bits if things worked
 585 * - set the folio up to date if all extents in the tree are uptodate
 586 * - clear the lock bit in the extent tree
 587 * - unlock the folio if there are no other extents locked for it
 588 *
 589 * Scheduling is not allowed, so the extent state tree is expected
 590 * to have one and only one object corresponding to this IO.
 591 */
 592static void end_bbio_data_read(struct btrfs_bio *bbio)
 593{
 594	struct btrfs_fs_info *fs_info = bbio->fs_info;
 595	struct bio *bio = &bbio->bio;
 596	struct processed_extent processed = { 0 };
 597	struct folio_iter fi;
 598	const u32 sectorsize = fs_info->sectorsize;
 599
 600	ASSERT(!bio_flagged(bio, BIO_CLONED));
 601	bio_for_each_folio_all(fi, &bbio->bio) {
 602		bool uptodate = !bio->bi_status;
 603		struct folio *folio = fi.folio;
 604		struct inode *inode = folio->mapping->host;
 605		u64 start;
 606		u64 end;
 607		u32 len;
 608
 609		/* For now only order 0 folios are supported for data. */
 610		ASSERT(folio_order(folio) == 0);
 611		btrfs_debug(fs_info,
 612			"%s: bi_sector=%llu, err=%d, mirror=%u",
 613			__func__, bio->bi_iter.bi_sector, bio->bi_status,
 614			bbio->mirror_num);
 615
 616		/*
 617		 * We always issue full-sector reads, but if some block in a
 618		 * folio fails to read, blk_update_request() will advance
 619		 * bv_offset and adjust bv_len to compensate.  Print a warning
 620		 * for unaligned offsets, and an error if they don't add up to
 621		 * a full sector.
 622		 */
 623		if (!IS_ALIGNED(fi.offset, sectorsize))
 624			btrfs_err(fs_info,
 625		"partial page read in btrfs with offset %zu and length %zu",
 626				  fi.offset, fi.length);
 627		else if (!IS_ALIGNED(fi.offset + fi.length, sectorsize))
 628			btrfs_info(fs_info,
 629		"incomplete page read with offset %zu and length %zu",
 630				   fi.offset, fi.length);
 631
 632		start = folio_pos(folio) + fi.offset;
 633		end = start + fi.length - 1;
 634		len = fi.length;
 635
 636		if (likely(uptodate)) {
 637			loff_t i_size = i_size_read(inode);
 638			pgoff_t end_index = i_size >> folio_shift(folio);
 639
 640			/*
 641			 * Zero out the remaining part if this range straddles
 642			 * i_size.
 643			 *
 644			 * Here we should only zero the range inside the folio,
 645			 * not touch anything else.
 646			 *
 647			 * NOTE: i_size is exclusive while end is inclusive.
 648			 */
 649			if (folio_index(folio) == end_index && i_size <= end) {
 650				u32 zero_start = max(offset_in_folio(folio, i_size),
 651						     offset_in_folio(folio, start));
 652				u32 zero_len = offset_in_folio(folio, end) + 1 -
 653					       zero_start;
 654
 655				folio_zero_range(folio, zero_start, zero_len);
 656			}
 657		}
 658
 659		/* Update page status and unlock. */
 660		end_page_read(folio_page(folio, 0), uptodate, start, len);
 661		endio_readpage_release_extent(&processed, BTRFS_I(inode),
 662					      start, end, uptodate);
 663	}
 664	/* Release the last extent */
 665	endio_readpage_release_extent(&processed, NULL, 0, 0, false);
 666	bio_put(bio);
 667}
 668
 669/*
 670 * Populate every free slot in a provided array with pages.
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 671 *
 672 * @nr_pages:   number of pages to allocate
 673 * @page_array: the array to fill with pages; any existing non-null entries in
 674 * 		the array will be skipped
 675 * @extra_gfp:	the extra GFP flags for the allocation.
 676 *
 677 * Return: 0        if all pages were able to be allocated;
 678 *         -ENOMEM  otherwise, the partially allocated pages would be freed and
 679 *                  the array slots zeroed
 680 */
 681int btrfs_alloc_page_array(unsigned int nr_pages, struct page **page_array,
 682			   gfp_t extra_gfp)
 683{
 684	const gfp_t gfp = GFP_NOFS | extra_gfp;
 685	unsigned int allocated;
 686
 687	for (allocated = 0; allocated < nr_pages;) {
 688		unsigned int last = allocated;
 689
 690		allocated = alloc_pages_bulk_array(gfp, nr_pages, page_array);
 691		if (unlikely(allocated == last)) {
 692			/* No progress, fail and do cleanup. */
 693			for (int i = 0; i < allocated; i++) {
 694				__free_page(page_array[i]);
 695				page_array[i] = NULL;
 696			}
 697			return -ENOMEM;
 698		}
 699	}
 700	return 0;
 701}
 702
 703/*
 704 * Populate needed folios for the extent buffer.
 705 *
 706 * For now, the folios populated are always in order 0 (aka, single page).
 707 */
 708static int alloc_eb_folio_array(struct extent_buffer *eb, gfp_t extra_gfp)
 709{
 710	struct page *page_array[INLINE_EXTENT_BUFFER_PAGES] = { 0 };
 711	int num_pages = num_extent_pages(eb);
 712	int ret;
 713
 714	ret = btrfs_alloc_page_array(num_pages, page_array, extra_gfp);
 715	if (ret < 0)
 716		return ret;
 717
 718	for (int i = 0; i < num_pages; i++)
 719		eb->folios[i] = page_folio(page_array[i]);
 720	eb->folio_size = PAGE_SIZE;
 721	eb->folio_shift = PAGE_SHIFT;
 722	return 0;
 723}
 724
 725static bool btrfs_bio_is_contig(struct btrfs_bio_ctrl *bio_ctrl,
 726				struct page *page, u64 disk_bytenr,
 727				unsigned int pg_offset)
 728{
 729	struct bio *bio = &bio_ctrl->bbio->bio;
 730	struct bio_vec *bvec = bio_last_bvec_all(bio);
 731	const sector_t sector = disk_bytenr >> SECTOR_SHIFT;
 
 732
 733	if (bio_ctrl->compress_type != BTRFS_COMPRESS_NONE) {
 734		/*
 735		 * For compression, all IO should have its logical bytenr set
 736		 * to the starting bytenr of the compressed extent.
 737		 */
 738		return bio->bi_iter.bi_sector == sector;
 739	}
 740
 741	/*
 742	 * The contig check requires the following conditions to be met:
 743	 *
 744	 * 1) The pages are belonging to the same inode
 745	 *    This is implied by the call chain.
 746	 *
 747	 * 2) The range has adjacent logical bytenr
 748	 *
 749	 * 3) The range has adjacent file offset
 750	 *    This is required for the usage of btrfs_bio->file_offset.
 751	 */
 752	return bio_end_sector(bio) == sector &&
 753		page_offset(bvec->bv_page) + bvec->bv_offset + bvec->bv_len ==
 754		page_offset(page) + pg_offset;
 755}
 756
 757static void alloc_new_bio(struct btrfs_inode *inode,
 758			  struct btrfs_bio_ctrl *bio_ctrl,
 759			  u64 disk_bytenr, u64 file_offset)
 760{
 761	struct btrfs_fs_info *fs_info = inode->root->fs_info;
 762	struct btrfs_bio *bbio;
 763
 764	bbio = btrfs_bio_alloc(BIO_MAX_VECS, bio_ctrl->opf, fs_info,
 765			       bio_ctrl->end_io_func, NULL);
 766	bbio->bio.bi_iter.bi_sector = disk_bytenr >> SECTOR_SHIFT;
 767	bbio->inode = inode;
 768	bbio->file_offset = file_offset;
 769	bio_ctrl->bbio = bbio;
 770	bio_ctrl->len_to_oe_boundary = U32_MAX;
 771
 772	/* Limit data write bios to the ordered boundary. */
 773	if (bio_ctrl->wbc) {
 774		struct btrfs_ordered_extent *ordered;
 775
 776		ordered = btrfs_lookup_ordered_extent(inode, file_offset);
 777		if (ordered) {
 778			bio_ctrl->len_to_oe_boundary = min_t(u32, U32_MAX,
 779					ordered->file_offset +
 780					ordered->disk_num_bytes - file_offset);
 781			bbio->ordered = ordered;
 782		}
 783
 784		/*
 785		 * Pick the last added device to support cgroup writeback.  For
 786		 * multi-device file systems this means blk-cgroup policies have
 787		 * to always be set on the last added/replaced device.
 788		 * This is a bit odd but has been like that for a long time.
 789		 */
 790		bio_set_dev(&bbio->bio, fs_info->fs_devices->latest_dev->bdev);
 791		wbc_init_bio(bio_ctrl->wbc, &bbio->bio);
 792	}
 793}
 794
 795/*
 796 * @disk_bytenr: logical bytenr where the write will be
 797 * @page:	page to add to the bio
 798 * @size:	portion of page that we want to write to
 799 * @pg_offset:	offset of the new bio or to check whether we are adding
 800 *              a contiguous page to the previous one
 801 *
 802 * The will either add the page into the existing @bio_ctrl->bbio, or allocate a
 803 * new one in @bio_ctrl->bbio.
 804 * The mirror number for this IO should already be initizlied in
 805 * @bio_ctrl->mirror_num.
 806 */
 807static void submit_extent_page(struct btrfs_bio_ctrl *bio_ctrl,
 808			       u64 disk_bytenr, struct page *page,
 809			       size_t size, unsigned long pg_offset)
 810{
 811	struct btrfs_inode *inode = page_to_inode(page);
 812
 813	ASSERT(pg_offset + size <= PAGE_SIZE);
 814	ASSERT(bio_ctrl->end_io_func);
 815
 816	if (bio_ctrl->bbio &&
 817	    !btrfs_bio_is_contig(bio_ctrl, page, disk_bytenr, pg_offset))
 818		submit_one_bio(bio_ctrl);
 819
 820	do {
 821		u32 len = size;
 822
 823		/* Allocate new bio if needed */
 824		if (!bio_ctrl->bbio) {
 825			alloc_new_bio(inode, bio_ctrl, disk_bytenr,
 826				      page_offset(page) + pg_offset);
 827		}
 828
 829		/* Cap to the current ordered extent boundary if there is one. */
 830		if (len > bio_ctrl->len_to_oe_boundary) {
 831			ASSERT(bio_ctrl->compress_type == BTRFS_COMPRESS_NONE);
 832			ASSERT(is_data_inode(&inode->vfs_inode));
 833			len = bio_ctrl->len_to_oe_boundary;
 834		}
 835
 836		if (bio_add_page(&bio_ctrl->bbio->bio, page, len, pg_offset) != len) {
 837			/* bio full: move on to a new one */
 838			submit_one_bio(bio_ctrl);
 839			continue;
 840		}
 841
 842		if (bio_ctrl->wbc)
 843			wbc_account_cgroup_owner(bio_ctrl->wbc, page, len);
 
 844
 845		size -= len;
 846		pg_offset += len;
 847		disk_bytenr += len;
 848
 849		/*
 850		 * len_to_oe_boundary defaults to U32_MAX, which isn't page or
 851		 * sector aligned.  alloc_new_bio() then sets it to the end of
 852		 * our ordered extent for writes into zoned devices.
 853		 *
 854		 * When len_to_oe_boundary is tracking an ordered extent, we
 855		 * trust the ordered extent code to align things properly, and
 856		 * the check above to cap our write to the ordered extent
 857		 * boundary is correct.
 858		 *
 859		 * When len_to_oe_boundary is U32_MAX, the cap above would
 860		 * result in a 4095 byte IO for the last page right before
 861		 * we hit the bio limit of UINT_MAX.  bio_add_page() has all
 862		 * the checks required to make sure we don't overflow the bio,
 863		 * and we should just ignore len_to_oe_boundary completely
 864		 * unless we're using it to track an ordered extent.
 865		 *
 866		 * It's pretty hard to make a bio sized U32_MAX, but it can
 867		 * happen when the page cache is able to feed us contiguous
 868		 * pages for large extents.
 869		 */
 870		if (bio_ctrl->len_to_oe_boundary != U32_MAX)
 871			bio_ctrl->len_to_oe_boundary -= len;
 872
 873		/* Ordered extent boundary: move on to a new bio. */
 874		if (bio_ctrl->len_to_oe_boundary == 0)
 875			submit_one_bio(bio_ctrl);
 876	} while (size);
 877}
 878
 879static int attach_extent_buffer_folio(struct extent_buffer *eb,
 880				      struct folio *folio,
 881				      struct btrfs_subpage *prealloc)
 882{
 883	struct btrfs_fs_info *fs_info = eb->fs_info;
 884	int ret = 0;
 885
 886	/*
 887	 * If the page is mapped to btree inode, we should hold the private
 888	 * lock to prevent race.
 889	 * For cloned or dummy extent buffers, their pages are not mapped and
 890	 * will not race with any other ebs.
 891	 */
 892	if (folio->mapping)
 893		lockdep_assert_held(&folio->mapping->i_private_lock);
 894
 895	if (fs_info->nodesize >= PAGE_SIZE) {
 896		if (!folio_test_private(folio))
 897			folio_attach_private(folio, eb);
 898		else
 899			WARN_ON(folio_get_private(folio) != eb);
 900		return 0;
 901	}
 902
 903	/* Already mapped, just free prealloc */
 904	if (folio_test_private(folio)) {
 905		btrfs_free_subpage(prealloc);
 906		return 0;
 907	}
 908
 909	if (prealloc)
 910		/* Has preallocated memory for subpage */
 911		folio_attach_private(folio, prealloc);
 912	else
 913		/* Do new allocation to attach subpage */
 914		ret = btrfs_attach_subpage(fs_info, folio, BTRFS_SUBPAGE_METADATA);
 915	return ret;
 916}
 917
 918int set_page_extent_mapped(struct page *page)
 919{
 920	return set_folio_extent_mapped(page_folio(page));
 921}
 922
 923int set_folio_extent_mapped(struct folio *folio)
 924{
 925	struct btrfs_fs_info *fs_info;
 926
 927	ASSERT(folio->mapping);
 928
 929	if (folio_test_private(folio))
 930		return 0;
 931
 932	fs_info = folio_to_fs_info(folio);
 933
 934	if (btrfs_is_subpage(fs_info, folio->mapping))
 935		return btrfs_attach_subpage(fs_info, folio, BTRFS_SUBPAGE_DATA);
 936
 937	folio_attach_private(folio, (void *)EXTENT_FOLIO_PRIVATE);
 938	return 0;
 939}
 940
 941void clear_page_extent_mapped(struct page *page)
 942{
 943	struct folio *folio = page_folio(page);
 944	struct btrfs_fs_info *fs_info;
 945
 946	ASSERT(page->mapping);
 947
 948	if (!folio_test_private(folio))
 949		return;
 950
 951	fs_info = page_to_fs_info(page);
 952	if (btrfs_is_subpage(fs_info, page->mapping))
 953		return btrfs_detach_subpage(fs_info, folio);
 954
 955	folio_detach_private(folio);
 956}
 957
 958static struct extent_map *__get_extent_map(struct inode *inode, struct page *page,
 959		 u64 start, u64 len, struct extent_map **em_cached)
 
 960{
 961	struct extent_map *em;
 962
 963	ASSERT(em_cached);
 964
 965	if (*em_cached) {
 966		em = *em_cached;
 967		if (extent_map_in_tree(em) && start >= em->start &&
 968		    start < extent_map_end(em)) {
 969			refcount_inc(&em->refs);
 970			return em;
 971		}
 972
 973		free_extent_map(em);
 974		*em_cached = NULL;
 975	}
 976
 977	em = btrfs_get_extent(BTRFS_I(inode), page, start, len);
 978	if (!IS_ERR(em)) {
 979		BUG_ON(*em_cached);
 980		refcount_inc(&em->refs);
 981		*em_cached = em;
 982	}
 
 983	return em;
 984}
 985/*
 986 * basic readpage implementation.  Locked extent state structs are inserted
 987 * into the tree that are removed when the IO is done (by the end_io
 988 * handlers)
 989 * XXX JDM: This needs looking at to ensure proper page locking
 990 * return 0 on success, otherwise return error
 991 */
 992static int btrfs_do_readpage(struct page *page, struct extent_map **em_cached,
 993		      struct btrfs_bio_ctrl *bio_ctrl, u64 *prev_em_start)
 994{
 995	struct inode *inode = page->mapping->host;
 996	struct btrfs_fs_info *fs_info = inode_to_fs_info(inode);
 997	u64 start = page_offset(page);
 998	const u64 end = start + PAGE_SIZE - 1;
 999	u64 cur = start;
1000	u64 extent_offset;
1001	u64 last_byte = i_size_read(inode);
1002	u64 block_start;
1003	struct extent_map *em;
1004	int ret = 0;
1005	size_t pg_offset = 0;
1006	size_t iosize;
1007	size_t blocksize = fs_info->sectorsize;
1008	struct extent_io_tree *tree = &BTRFS_I(inode)->io_tree;
1009
1010	ret = set_page_extent_mapped(page);
1011	if (ret < 0) {
1012		unlock_extent(tree, start, end, NULL);
1013		unlock_page(page);
1014		return ret;
1015	}
1016
1017	if (page->index == last_byte >> PAGE_SHIFT) {
1018		size_t zero_offset = offset_in_page(last_byte);
1019
1020		if (zero_offset) {
1021			iosize = PAGE_SIZE - zero_offset;
1022			memzero_page(page, zero_offset, iosize);
1023		}
1024	}
1025	bio_ctrl->end_io_func = end_bbio_data_read;
1026	begin_page_read(fs_info, page);
1027	while (cur <= end) {
1028		enum btrfs_compression_type compress_type = BTRFS_COMPRESS_NONE;
1029		bool force_bio_submit = false;
1030		u64 disk_bytenr;
1031
1032		ASSERT(IS_ALIGNED(cur, fs_info->sectorsize));
1033		if (cur >= last_byte) {
1034			iosize = PAGE_SIZE - pg_offset;
1035			memzero_page(page, pg_offset, iosize);
1036			unlock_extent(tree, cur, cur + iosize - 1, NULL);
1037			end_page_read(page, true, cur, iosize);
1038			break;
1039		}
1040		em = __get_extent_map(inode, page, cur, end - cur + 1, em_cached);
1041		if (IS_ERR(em)) {
1042			unlock_extent(tree, cur, end, NULL);
1043			end_page_read(page, false, cur, end + 1 - cur);
1044			return PTR_ERR(em);
1045		}
1046		extent_offset = cur - em->start;
1047		BUG_ON(extent_map_end(em) <= cur);
1048		BUG_ON(end < cur);
1049
1050		compress_type = extent_map_compression(em);
1051
1052		iosize = min(extent_map_end(em) - cur, end - cur + 1);
1053		iosize = ALIGN(iosize, blocksize);
1054		if (compress_type != BTRFS_COMPRESS_NONE)
1055			disk_bytenr = em->block_start;
1056		else
1057			disk_bytenr = em->block_start + extent_offset;
1058		block_start = em->block_start;
1059		if (em->flags & EXTENT_FLAG_PREALLOC)
1060			block_start = EXTENT_MAP_HOLE;
1061
1062		/*
1063		 * If we have a file range that points to a compressed extent
1064		 * and it's followed by a consecutive file range that points
1065		 * to the same compressed extent (possibly with a different
1066		 * offset and/or length, so it either points to the whole extent
1067		 * or only part of it), we must make sure we do not submit a
1068		 * single bio to populate the pages for the 2 ranges because
1069		 * this makes the compressed extent read zero out the pages
1070		 * belonging to the 2nd range. Imagine the following scenario:
1071		 *
1072		 *  File layout
1073		 *  [0 - 8K]                     [8K - 24K]
1074		 *    |                               |
1075		 *    |                               |
1076		 * points to extent X,         points to extent X,
1077		 * offset 4K, length of 8K     offset 0, length 16K
1078		 *
1079		 * [extent X, compressed length = 4K uncompressed length = 16K]
1080		 *
1081		 * If the bio to read the compressed extent covers both ranges,
1082		 * it will decompress extent X into the pages belonging to the
1083		 * first range and then it will stop, zeroing out the remaining
1084		 * pages that belong to the other range that points to extent X.
1085		 * So here we make sure we submit 2 bios, one for the first
1086		 * range and another one for the third range. Both will target
1087		 * the same physical extent from disk, but we can't currently
1088		 * make the compressed bio endio callback populate the pages
1089		 * for both ranges because each compressed bio is tightly
1090		 * coupled with a single extent map, and each range can have
1091		 * an extent map with a different offset value relative to the
1092		 * uncompressed data of our extent and different lengths. This
1093		 * is a corner case so we prioritize correctness over
1094		 * non-optimal behavior (submitting 2 bios for the same extent).
1095		 */
1096		if (compress_type != BTRFS_COMPRESS_NONE &&
1097		    prev_em_start && *prev_em_start != (u64)-1 &&
1098		    *prev_em_start != em->start)
1099			force_bio_submit = true;
1100
1101		if (prev_em_start)
1102			*prev_em_start = em->start;
1103
1104		free_extent_map(em);
1105		em = NULL;
1106
1107		/* we've found a hole, just zero and go on */
1108		if (block_start == EXTENT_MAP_HOLE) {
1109			memzero_page(page, pg_offset, iosize);
1110
1111			unlock_extent(tree, cur, cur + iosize - 1, NULL);
1112			end_page_read(page, true, cur, iosize);
1113			cur = cur + iosize;
1114			pg_offset += iosize;
1115			continue;
1116		}
1117		/* the get_extent function already copied into the page */
1118		if (block_start == EXTENT_MAP_INLINE) {
1119			unlock_extent(tree, cur, cur + iosize - 1, NULL);
1120			end_page_read(page, true, cur, iosize);
1121			cur = cur + iosize;
1122			pg_offset += iosize;
1123			continue;
1124		}
1125
1126		if (bio_ctrl->compress_type != compress_type) {
1127			submit_one_bio(bio_ctrl);
1128			bio_ctrl->compress_type = compress_type;
1129		}
1130
1131		if (force_bio_submit)
1132			submit_one_bio(bio_ctrl);
1133		submit_extent_page(bio_ctrl, disk_bytenr, page, iosize,
1134				   pg_offset);
1135		cur = cur + iosize;
1136		pg_offset += iosize;
1137	}
1138
1139	return 0;
1140}
1141
1142int btrfs_read_folio(struct file *file, struct folio *folio)
1143{
1144	struct page *page = &folio->page;
1145	struct btrfs_inode *inode = page_to_inode(page);
1146	u64 start = page_offset(page);
1147	u64 end = start + PAGE_SIZE - 1;
1148	struct btrfs_bio_ctrl bio_ctrl = { .opf = REQ_OP_READ };
1149	struct extent_map *em_cached = NULL;
1150	int ret;
1151
1152	btrfs_lock_and_flush_ordered_range(inode, start, end, NULL);
 
 
1153
1154	ret = btrfs_do_readpage(page, &em_cached, &bio_ctrl, NULL);
1155	free_extent_map(em_cached);
1156
1157	/*
1158	 * If btrfs_do_readpage() failed we will want to submit the assembled
1159	 * bio to do the cleanup.
1160	 */
1161	submit_one_bio(&bio_ctrl);
1162	return ret;
1163}
1164
1165static inline void contiguous_readpages(struct page *pages[], int nr_pages,
1166					u64 start, u64 end,
1167					struct extent_map **em_cached,
1168					struct btrfs_bio_ctrl *bio_ctrl,
1169					u64 *prev_em_start)
1170{
1171	struct btrfs_inode *inode = page_to_inode(pages[0]);
1172	int index;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1173
1174	ASSERT(em_cached);
1175
1176	btrfs_lock_and_flush_ordered_range(inode, start, end, NULL);
1177
1178	for (index = 0; index < nr_pages; index++) {
1179		btrfs_do_readpage(pages[index], em_cached, bio_ctrl,
1180				  prev_em_start);
1181		put_page(pages[index]);
1182	}
1183}
1184
1185/*
1186 * helper for __extent_writepage, doing all of the delayed allocation setup.
1187 *
1188 * This returns 1 if btrfs_run_delalloc_range function did all the work required
1189 * to write the page (copy into inline extent).  In this case the IO has
1190 * been started and the page is already unlocked.
1191 *
1192 * This returns 0 if all went well (page still locked)
1193 * This returns < 0 if there were errors (page still locked)
 
 
 
 
 
 
1194 */
1195static noinline_for_stack int writepage_delalloc(struct btrfs_inode *inode,
1196		struct page *page, struct writeback_control *wbc)
 
1197{
1198	const u64 page_start = page_offset(page);
1199	const u64 page_end = page_start + PAGE_SIZE - 1;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1200	u64 delalloc_start = page_start;
1201	u64 delalloc_end = page_end;
1202	u64 delalloc_to_write = 0;
1203	int ret = 0;
 
 
 
 
 
 
 
 
 
1204
 
 
 
 
 
 
 
1205	while (delalloc_start < page_end) {
1206		delalloc_end = page_end;
1207		if (!find_lock_delalloc_range(&inode->vfs_inode, page,
1208					      &delalloc_start, &delalloc_end)) {
1209			delalloc_start = delalloc_end + 1;
1210			continue;
1211		}
 
 
 
 
 
 
 
 
 
1212
1213		ret = btrfs_run_delalloc_range(inode, page, delalloc_start,
1214					       delalloc_end, wbc);
1215		if (ret < 0)
1216			return ret;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1217
1218		delalloc_start = delalloc_end + 1;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1219	}
1220
 
 
 
 
1221	/*
1222	 * delalloc_end is already one less than the total length, so
1223	 * we don't subtract one from PAGE_SIZE
1224	 */
1225	delalloc_to_write +=
1226		DIV_ROUND_UP(delalloc_end + 1 - page_start, PAGE_SIZE);
1227
1228	/*
1229	 * If btrfs_run_dealloc_range() already started I/O and unlocked
1230	 * the pages, we just need to account for them here.
1231	 */
1232	if (ret == 1) {
1233		wbc->nr_to_write -= delalloc_to_write;
1234		return 1;
1235	}
1236
1237	if (wbc->nr_to_write < delalloc_to_write) {
1238		int thresh = 8192;
1239
1240		if (delalloc_to_write < thresh * 2)
1241			thresh = delalloc_to_write;
1242		wbc->nr_to_write = min_t(u64, delalloc_to_write,
1243					 thresh);
1244	}
1245
1246	return 0;
1247}
1248
1249/*
1250 * Find the first byte we need to write.
 
1251 *
1252 * For subpage, one page can contain several sectors, and
1253 * __extent_writepage_io() will just grab all extent maps in the page
1254 * range and try to submit all non-inline/non-compressed extents.
1255 *
1256 * This is a big problem for subpage, we shouldn't re-submit already written
1257 * data at all.
1258 * This function will lookup subpage dirty bit to find which range we really
1259 * need to submit.
1260 *
1261 * Return the next dirty range in [@start, @end).
1262 * If no dirty range is found, @start will be page_offset(page) + PAGE_SIZE.
1263 */
1264static void find_next_dirty_byte(struct btrfs_fs_info *fs_info,
1265				 struct page *page, u64 *start, u64 *end)
 
 
1266{
1267	struct folio *folio = page_folio(page);
1268	struct btrfs_subpage *subpage = folio_get_private(folio);
1269	struct btrfs_subpage_info *spi = fs_info->subpage_info;
1270	u64 orig_start = *start;
1271	/* Declare as unsigned long so we can use bitmap ops */
1272	unsigned long flags;
1273	int range_start_bit;
1274	int range_end_bit;
 
 
 
 
 
 
 
 
1275
1276	/*
1277	 * For regular sector size == page size case, since one page only
1278	 * contains one sector, we return the page offset directly.
1279	 */
1280	if (!btrfs_is_subpage(fs_info, page->mapping)) {
1281		*start = page_offset(page);
1282		*end = page_offset(page) + PAGE_SIZE;
1283		return;
1284	}
1285
1286	range_start_bit = spi->dirty_offset +
1287			  (offset_in_page(orig_start) >> fs_info->sectorsize_bits);
1288
1289	/* We should have the page locked, but just in case */
1290	spin_lock_irqsave(&subpage->lock, flags);
1291	bitmap_next_set_region(subpage->bitmaps, &range_start_bit, &range_end_bit,
1292			       spi->dirty_offset + spi->bitmap_nr_bits);
1293	spin_unlock_irqrestore(&subpage->lock, flags);
1294
1295	range_start_bit -= spi->dirty_offset;
1296	range_end_bit -= spi->dirty_offset;
1297
1298	*start = page_offset(page) + range_start_bit * fs_info->sectorsize;
1299	*end = page_offset(page) + range_end_bit * fs_info->sectorsize;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1300}
1301
1302/*
1303 * helper for __extent_writepage.  This calls the writepage start hooks,
1304 * and does the loop to map the page into extents and bios.
1305 *
1306 * We return 1 if the IO is started and the page is unlocked,
1307 * 0 if all went well (page still locked)
1308 * < 0 if there were errors (page still locked)
1309 */
1310static noinline_for_stack int __extent_writepage_io(struct btrfs_inode *inode,
1311				 struct page *page,
1312				 struct btrfs_bio_ctrl *bio_ctrl,
1313				 loff_t i_size,
1314				 int *nr_ret)
1315{
1316	struct btrfs_fs_info *fs_info = inode->root->fs_info;
1317	u64 cur = page_offset(page);
1318	u64 end = cur + PAGE_SIZE - 1;
1319	u64 extent_offset;
1320	u64 block_start;
1321	struct extent_map *em;
 
1322	int ret = 0;
1323	int nr = 0;
1324
1325	ret = btrfs_writepage_cow_fixup(page);
 
 
 
1326	if (ret) {
1327		/* Fixup worker will requeue */
1328		redirty_page_for_writepage(bio_ctrl->wbc, page);
1329		unlock_page(page);
1330		return 1;
1331	}
1332
 
 
 
 
 
1333	bio_ctrl->end_io_func = end_bbio_data_write;
1334	while (cur <= end) {
1335		u32 len = end - cur + 1;
1336		u64 disk_bytenr;
1337		u64 em_end;
1338		u64 dirty_range_start = cur;
1339		u64 dirty_range_end;
1340		u32 iosize;
1341
1342		if (cur >= i_size) {
1343			btrfs_mark_ordered_io_finished(inode, page, cur, len,
1344						       true);
1345			/*
1346			 * This range is beyond i_size, thus we don't need to
1347			 * bother writing back.
1348			 * But we still need to clear the dirty subpage bit, or
1349			 * the next time the page gets dirtied, we will try to
1350			 * writeback the sectors with subpage dirty bits,
1351			 * causing writeback without ordered extent.
1352			 */
1353			btrfs_folio_clear_dirty(fs_info, page_folio(page), cur, len);
 
1354			break;
1355		}
1356
1357		find_next_dirty_byte(fs_info, page, &dirty_range_start,
1358				     &dirty_range_end);
1359		if (cur < dirty_range_start) {
1360			cur = dirty_range_start;
 
 
 
 
 
 
 
 
 
 
 
1361			continue;
1362		}
1363
1364		em = btrfs_get_extent(inode, NULL, cur, len);
1365		if (IS_ERR(em)) {
1366			ret = PTR_ERR_OR_ZERO(em);
1367			goto out_error;
1368		}
1369
1370		extent_offset = cur - em->start;
1371		em_end = extent_map_end(em);
1372		ASSERT(cur <= em_end);
1373		ASSERT(cur < end);
1374		ASSERT(IS_ALIGNED(em->start, fs_info->sectorsize));
1375		ASSERT(IS_ALIGNED(em->len, fs_info->sectorsize));
1376
1377		block_start = em->block_start;
1378		disk_bytenr = em->block_start + extent_offset;
1379
1380		ASSERT(!extent_map_is_compressed(em));
1381		ASSERT(block_start != EXTENT_MAP_HOLE);
1382		ASSERT(block_start != EXTENT_MAP_INLINE);
1383
1384		/*
1385		 * Note that em_end from extent_map_end() and dirty_range_end from
1386		 * find_next_dirty_byte() are all exclusive
1387		 */
1388		iosize = min(min(em_end, end + 1), dirty_range_end) - cur;
1389		free_extent_map(em);
1390		em = NULL;
1391
1392		btrfs_set_range_writeback(inode, cur, cur + iosize - 1);
1393		if (!PageWriteback(page)) {
1394			btrfs_err(inode->root->fs_info,
1395				   "page %lu not writeback, cur %llu end %llu",
1396			       page->index, cur, end);
1397		}
1398
1399		/*
1400		 * Although the PageDirty bit is cleared before entering this
1401		 * function, subpage dirty bit is not cleared.
1402		 * So clear subpage dirty bit here so next time we won't submit
1403		 * page for range already written to disk.
1404		 */
1405		btrfs_folio_clear_dirty(fs_info, page_folio(page), cur, iosize);
1406
1407		submit_extent_page(bio_ctrl, disk_bytenr, page, iosize,
1408				   cur - page_offset(page));
1409		cur += iosize;
1410		nr++;
1411	}
1412
1413	btrfs_folio_assert_not_dirty(fs_info, page_folio(page));
1414	*nr_ret = nr;
1415	return 0;
1416
1417out_error:
1418	/*
1419	 * If we finish without problem, we should not only clear page dirty,
1420	 * but also empty subpage dirty bits
 
 
 
 
 
 
 
1421	 */
1422	*nr_ret = nr;
 
 
 
1423	return ret;
1424}
1425
1426/*
1427 * the writepage semantics are similar to regular writepage.  extent
1428 * records are inserted to lock ranges in the tree, and as dirty areas
1429 * are found, they are marked writeback.  Then the lock bits are removed
1430 * and the end_io handler clears the writeback ranges
1431 *
1432 * Return 0 if everything goes well.
1433 * Return <0 for error.
1434 */
1435static int __extent_writepage(struct page *page, struct btrfs_bio_ctrl *bio_ctrl)
1436{
1437	struct folio *folio = page_folio(page);
1438	struct inode *inode = page->mapping->host;
1439	const u64 page_start = page_offset(page);
1440	int ret;
1441	int nr = 0;
1442	size_t pg_offset;
1443	loff_t i_size = i_size_read(inode);
1444	unsigned long end_index = i_size >> PAGE_SHIFT;
1445
1446	trace___extent_writepage(page, inode, bio_ctrl->wbc);
1447
1448	WARN_ON(!PageLocked(page));
1449
1450	pg_offset = offset_in_page(i_size);
1451	if (page->index > end_index ||
1452	   (page->index == end_index && !pg_offset)) {
1453		folio_invalidate(folio, 0, folio_size(folio));
1454		folio_unlock(folio);
1455		return 0;
1456	}
1457
1458	if (page->index == end_index)
1459		memzero_page(page, pg_offset, PAGE_SIZE - pg_offset);
1460
1461	ret = set_page_extent_mapped(page);
 
 
 
 
 
1462	if (ret < 0)
1463		goto done;
1464
1465	ret = writepage_delalloc(BTRFS_I(inode), page, bio_ctrl->wbc);
1466	if (ret == 1)
1467		return 0;
1468	if (ret)
1469		goto done;
1470
1471	ret = __extent_writepage_io(BTRFS_I(inode), page, bio_ctrl, i_size, &nr);
 
1472	if (ret == 1)
1473		return 0;
1474
1475	bio_ctrl->wbc->nr_to_write--;
1476
1477done:
1478	if (nr == 0) {
1479		/* make sure the mapping tag for page dirty gets cleared */
1480		set_page_writeback(page);
1481		end_page_writeback(page);
1482	}
1483	if (ret) {
1484		btrfs_mark_ordered_io_finished(BTRFS_I(inode), page, page_start,
1485					       PAGE_SIZE, !ret);
1486		mapping_set_error(page->mapping, ret);
1487	}
1488	unlock_page(page);
1489	ASSERT(ret <= 0);
1490	return ret;
1491}
1492
1493void wait_on_extent_buffer_writeback(struct extent_buffer *eb)
1494{
1495	wait_on_bit_io(&eb->bflags, EXTENT_BUFFER_WRITEBACK,
1496		       TASK_UNINTERRUPTIBLE);
1497}
1498
1499/*
1500 * Lock extent buffer status and pages for writeback.
1501 *
1502 * Return %false if the extent buffer doesn't need to be submitted (e.g. the
1503 * extent buffer is not dirty)
1504 * Return %true is the extent buffer is submitted to bio.
1505 */
1506static noinline_for_stack bool lock_extent_buffer_for_io(struct extent_buffer *eb,
1507			  struct writeback_control *wbc)
1508{
1509	struct btrfs_fs_info *fs_info = eb->fs_info;
1510	bool ret = false;
1511
1512	btrfs_tree_lock(eb);
1513	while (test_bit(EXTENT_BUFFER_WRITEBACK, &eb->bflags)) {
1514		btrfs_tree_unlock(eb);
1515		if (wbc->sync_mode != WB_SYNC_ALL)
1516			return false;
1517		wait_on_extent_buffer_writeback(eb);
1518		btrfs_tree_lock(eb);
1519	}
1520
1521	/*
1522	 * We need to do this to prevent races in people who check if the eb is
1523	 * under IO since we can end up having no IO bits set for a short period
1524	 * of time.
1525	 */
1526	spin_lock(&eb->refs_lock);
1527	if (test_and_clear_bit(EXTENT_BUFFER_DIRTY, &eb->bflags)) {
1528		set_bit(EXTENT_BUFFER_WRITEBACK, &eb->bflags);
1529		spin_unlock(&eb->refs_lock);
1530		btrfs_set_header_flag(eb, BTRFS_HEADER_FLAG_WRITTEN);
1531		percpu_counter_add_batch(&fs_info->dirty_metadata_bytes,
1532					 -eb->len,
1533					 fs_info->dirty_metadata_batch);
1534		ret = true;
1535	} else {
1536		spin_unlock(&eb->refs_lock);
1537	}
1538	btrfs_tree_unlock(eb);
1539	return ret;
1540}
1541
1542static void set_btree_ioerr(struct extent_buffer *eb)
1543{
1544	struct btrfs_fs_info *fs_info = eb->fs_info;
1545
1546	set_bit(EXTENT_BUFFER_WRITE_ERR, &eb->bflags);
1547
1548	/*
1549	 * A read may stumble upon this buffer later, make sure that it gets an
1550	 * error and knows there was an error.
1551	 */
1552	clear_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags);
1553
1554	/*
1555	 * We need to set the mapping with the io error as well because a write
1556	 * error will flip the file system readonly, and then syncfs() will
1557	 * return a 0 because we are readonly if we don't modify the err seq for
1558	 * the superblock.
1559	 */
1560	mapping_set_error(eb->fs_info->btree_inode->i_mapping, -EIO);
1561
1562	/*
1563	 * If writeback for a btree extent that doesn't belong to a log tree
1564	 * failed, increment the counter transaction->eb_write_errors.
1565	 * We do this because while the transaction is running and before it's
1566	 * committing (when we call filemap_fdata[write|wait]_range against
1567	 * the btree inode), we might have
1568	 * btree_inode->i_mapping->a_ops->writepages() called by the VM - if it
1569	 * returns an error or an error happens during writeback, when we're
1570	 * committing the transaction we wouldn't know about it, since the pages
1571	 * can be no longer dirty nor marked anymore for writeback (if a
1572	 * subsequent modification to the extent buffer didn't happen before the
1573	 * transaction commit), which makes filemap_fdata[write|wait]_range not
1574	 * able to find the pages tagged with SetPageError at transaction
1575	 * commit time. So if this happens we must abort the transaction,
1576	 * otherwise we commit a super block with btree roots that point to
1577	 * btree nodes/leafs whose content on disk is invalid - either garbage
1578	 * or the content of some node/leaf from a past generation that got
1579	 * cowed or deleted and is no longer valid.
1580	 *
1581	 * Note: setting AS_EIO/AS_ENOSPC in the btree inode's i_mapping would
1582	 * not be enough - we need to distinguish between log tree extents vs
1583	 * non-log tree extents, and the next filemap_fdatawait_range() call
1584	 * will catch and clear such errors in the mapping - and that call might
1585	 * be from a log sync and not from a transaction commit. Also, checking
1586	 * for the eb flag EXTENT_BUFFER_WRITE_ERR at transaction commit time is
1587	 * not done and would not be reliable - the eb might have been released
1588	 * from memory and reading it back again means that flag would not be
1589	 * set (since it's a runtime flag, not persisted on disk).
1590	 *
1591	 * Using the flags below in the btree inode also makes us achieve the
1592	 * goal of AS_EIO/AS_ENOSPC when writepages() returns success, started
1593	 * writeback for all dirty pages and before filemap_fdatawait_range()
1594	 * is called, the writeback for all dirty pages had already finished
1595	 * with errors - because we were not using AS_EIO/AS_ENOSPC,
1596	 * filemap_fdatawait_range() would return success, as it could not know
1597	 * that writeback errors happened (the pages were no longer tagged for
1598	 * writeback).
1599	 */
1600	switch (eb->log_index) {
1601	case -1:
1602		set_bit(BTRFS_FS_BTREE_ERR, &fs_info->flags);
1603		break;
1604	case 0:
1605		set_bit(BTRFS_FS_LOG1_ERR, &fs_info->flags);
1606		break;
1607	case 1:
1608		set_bit(BTRFS_FS_LOG2_ERR, &fs_info->flags);
1609		break;
1610	default:
1611		BUG(); /* unexpected, logic error */
1612	}
1613}
1614
1615/*
1616 * The endio specific version which won't touch any unsafe spinlock in endio
1617 * context.
1618 */
1619static struct extent_buffer *find_extent_buffer_nolock(
1620		struct btrfs_fs_info *fs_info, u64 start)
1621{
1622	struct extent_buffer *eb;
1623
1624	rcu_read_lock();
1625	eb = radix_tree_lookup(&fs_info->buffer_radix,
1626			       start >> fs_info->sectorsize_bits);
1627	if (eb && atomic_inc_not_zero(&eb->refs)) {
1628		rcu_read_unlock();
1629		return eb;
1630	}
1631	rcu_read_unlock();
1632	return NULL;
1633}
1634
1635static void end_bbio_meta_write(struct btrfs_bio *bbio)
1636{
1637	struct extent_buffer *eb = bbio->private;
1638	struct btrfs_fs_info *fs_info = eb->fs_info;
1639	bool uptodate = !bbio->bio.bi_status;
1640	struct folio_iter fi;
1641	u32 bio_offset = 0;
1642
1643	if (!uptodate)
1644		set_btree_ioerr(eb);
1645
1646	bio_for_each_folio_all(fi, &bbio->bio) {
1647		u64 start = eb->start + bio_offset;
1648		struct folio *folio = fi.folio;
1649		u32 len = fi.length;
1650
1651		btrfs_folio_clear_writeback(fs_info, folio, start, len);
1652		bio_offset += len;
1653	}
1654
1655	clear_bit(EXTENT_BUFFER_WRITEBACK, &eb->bflags);
1656	smp_mb__after_atomic();
1657	wake_up_bit(&eb->bflags, EXTENT_BUFFER_WRITEBACK);
1658
1659	bio_put(&bbio->bio);
1660}
1661
1662static void prepare_eb_write(struct extent_buffer *eb)
1663{
1664	u32 nritems;
1665	unsigned long start;
1666	unsigned long end;
1667
1668	clear_bit(EXTENT_BUFFER_WRITE_ERR, &eb->bflags);
1669
1670	/* Set btree blocks beyond nritems with 0 to avoid stale content */
1671	nritems = btrfs_header_nritems(eb);
1672	if (btrfs_header_level(eb) > 0) {
1673		end = btrfs_node_key_ptr_offset(eb, nritems);
1674		memzero_extent_buffer(eb, end, eb->len - end);
1675	} else {
1676		/*
1677		 * Leaf:
1678		 * header 0 1 2 .. N ... data_N .. data_2 data_1 data_0
1679		 */
1680		start = btrfs_item_nr_offset(eb, nritems);
1681		end = btrfs_item_nr_offset(eb, 0);
1682		if (nritems == 0)
1683			end += BTRFS_LEAF_DATA_SIZE(eb->fs_info);
1684		else
1685			end += btrfs_item_offset(eb, nritems - 1);
1686		memzero_extent_buffer(eb, start, end - start);
1687	}
1688}
1689
1690static noinline_for_stack void write_one_eb(struct extent_buffer *eb,
1691					    struct writeback_control *wbc)
1692{
1693	struct btrfs_fs_info *fs_info = eb->fs_info;
1694	struct btrfs_bio *bbio;
1695
1696	prepare_eb_write(eb);
1697
1698	bbio = btrfs_bio_alloc(INLINE_EXTENT_BUFFER_PAGES,
1699			       REQ_OP_WRITE | REQ_META | wbc_to_write_flags(wbc),
1700			       eb->fs_info, end_bbio_meta_write, eb);
1701	bbio->bio.bi_iter.bi_sector = eb->start >> SECTOR_SHIFT;
1702	bio_set_dev(&bbio->bio, fs_info->fs_devices->latest_dev->bdev);
1703	wbc_init_bio(wbc, &bbio->bio);
1704	bbio->inode = BTRFS_I(eb->fs_info->btree_inode);
1705	bbio->file_offset = eb->start;
1706	if (fs_info->nodesize < PAGE_SIZE) {
1707		struct folio *folio = eb->folios[0];
1708		bool ret;
1709
1710		folio_lock(folio);
1711		btrfs_subpage_set_writeback(fs_info, folio, eb->start, eb->len);
1712		if (btrfs_subpage_clear_and_test_dirty(fs_info, folio, eb->start,
1713						       eb->len)) {
1714			folio_clear_dirty_for_io(folio);
1715			wbc->nr_to_write--;
1716		}
1717		ret = bio_add_folio(&bbio->bio, folio, eb->len,
1718				    eb->start - folio_pos(folio));
1719		ASSERT(ret);
1720		wbc_account_cgroup_owner(wbc, folio_page(folio, 0), eb->len);
1721		folio_unlock(folio);
1722	} else {
1723		int num_folios = num_extent_folios(eb);
1724
1725		for (int i = 0; i < num_folios; i++) {
1726			struct folio *folio = eb->folios[i];
1727			bool ret;
1728
1729			folio_lock(folio);
1730			folio_clear_dirty_for_io(folio);
1731			folio_start_writeback(folio);
1732			ret = bio_add_folio(&bbio->bio, folio, eb->folio_size, 0);
1733			ASSERT(ret);
1734			wbc_account_cgroup_owner(wbc, folio_page(folio, 0),
1735						 eb->folio_size);
1736			wbc->nr_to_write -= folio_nr_pages(folio);
1737			folio_unlock(folio);
1738		}
1739	}
1740	btrfs_submit_bio(bbio, 0);
1741}
1742
1743/*
1744 * Submit one subpage btree page.
1745 *
1746 * The main difference to submit_eb_page() is:
1747 * - Page locking
1748 *   For subpage, we don't rely on page locking at all.
1749 *
1750 * - Flush write bio
1751 *   We only flush bio if we may be unable to fit current extent buffers into
1752 *   current bio.
1753 *
1754 * Return >=0 for the number of submitted extent buffers.
1755 * Return <0 for fatal error.
1756 */
1757static int submit_eb_subpage(struct page *page, struct writeback_control *wbc)
1758{
1759	struct btrfs_fs_info *fs_info = page_to_fs_info(page);
1760	struct folio *folio = page_folio(page);
1761	int submitted = 0;
1762	u64 page_start = page_offset(page);
1763	int bit_start = 0;
1764	int sectors_per_node = fs_info->nodesize >> fs_info->sectorsize_bits;
1765
1766	/* Lock and write each dirty extent buffers in the range */
1767	while (bit_start < fs_info->subpage_info->bitmap_nr_bits) {
1768		struct btrfs_subpage *subpage = folio_get_private(folio);
1769		struct extent_buffer *eb;
1770		unsigned long flags;
1771		u64 start;
1772
1773		/*
1774		 * Take private lock to ensure the subpage won't be detached
1775		 * in the meantime.
1776		 */
1777		spin_lock(&page->mapping->i_private_lock);
1778		if (!folio_test_private(folio)) {
1779			spin_unlock(&page->mapping->i_private_lock);
1780			break;
1781		}
1782		spin_lock_irqsave(&subpage->lock, flags);
1783		if (!test_bit(bit_start + fs_info->subpage_info->dirty_offset,
1784			      subpage->bitmaps)) {
1785			spin_unlock_irqrestore(&subpage->lock, flags);
1786			spin_unlock(&page->mapping->i_private_lock);
1787			bit_start++;
1788			continue;
1789		}
1790
1791		start = page_start + bit_start * fs_info->sectorsize;
1792		bit_start += sectors_per_node;
1793
1794		/*
1795		 * Here we just want to grab the eb without touching extra
1796		 * spin locks, so call find_extent_buffer_nolock().
1797		 */
1798		eb = find_extent_buffer_nolock(fs_info, start);
1799		spin_unlock_irqrestore(&subpage->lock, flags);
1800		spin_unlock(&page->mapping->i_private_lock);
1801
1802		/*
1803		 * The eb has already reached 0 refs thus find_extent_buffer()
1804		 * doesn't return it. We don't need to write back such eb
1805		 * anyway.
1806		 */
1807		if (!eb)
1808			continue;
1809
1810		if (lock_extent_buffer_for_io(eb, wbc)) {
1811			write_one_eb(eb, wbc);
1812			submitted++;
1813		}
1814		free_extent_buffer(eb);
1815	}
1816	return submitted;
1817}
1818
1819/*
1820 * Submit all page(s) of one extent buffer.
1821 *
1822 * @page:	the page of one extent buffer
1823 * @eb_context:	to determine if we need to submit this page, if current page
1824 *		belongs to this eb, we don't need to submit
1825 *
1826 * The caller should pass each page in their bytenr order, and here we use
1827 * @eb_context to determine if we have submitted pages of one extent buffer.
1828 *
1829 * If we have, we just skip until we hit a new page that doesn't belong to
1830 * current @eb_context.
1831 *
1832 * If not, we submit all the page(s) of the extent buffer.
1833 *
1834 * Return >0 if we have submitted the extent buffer successfully.
1835 * Return 0 if we don't need to submit the page, as it's already submitted by
1836 * previous call.
1837 * Return <0 for fatal error.
1838 */
1839static int submit_eb_page(struct page *page, struct btrfs_eb_write_context *ctx)
1840{
1841	struct writeback_control *wbc = ctx->wbc;
1842	struct address_space *mapping = page->mapping;
1843	struct folio *folio = page_folio(page);
1844	struct extent_buffer *eb;
1845	int ret;
1846
1847	if (!folio_test_private(folio))
1848		return 0;
1849
1850	if (page_to_fs_info(page)->nodesize < PAGE_SIZE)
1851		return submit_eb_subpage(page, wbc);
1852
1853	spin_lock(&mapping->i_private_lock);
1854	if (!folio_test_private(folio)) {
1855		spin_unlock(&mapping->i_private_lock);
1856		return 0;
1857	}
1858
1859	eb = folio_get_private(folio);
1860
1861	/*
1862	 * Shouldn't happen and normally this would be a BUG_ON but no point
1863	 * crashing the machine for something we can survive anyway.
1864	 */
1865	if (WARN_ON(!eb)) {
1866		spin_unlock(&mapping->i_private_lock);
1867		return 0;
1868	}
1869
1870	if (eb == ctx->eb) {
1871		spin_unlock(&mapping->i_private_lock);
1872		return 0;
1873	}
1874	ret = atomic_inc_not_zero(&eb->refs);
1875	spin_unlock(&mapping->i_private_lock);
1876	if (!ret)
1877		return 0;
1878
1879	ctx->eb = eb;
1880
1881	ret = btrfs_check_meta_write_pointer(eb->fs_info, ctx);
1882	if (ret) {
1883		if (ret == -EBUSY)
1884			ret = 0;
1885		free_extent_buffer(eb);
1886		return ret;
1887	}
1888
1889	if (!lock_extent_buffer_for_io(eb, wbc)) {
1890		free_extent_buffer(eb);
1891		return 0;
1892	}
1893	/* Implies write in zoned mode. */
1894	if (ctx->zoned_bg) {
1895		/* Mark the last eb in the block group. */
1896		btrfs_schedule_zone_finish_bg(ctx->zoned_bg, eb);
1897		ctx->zoned_bg->meta_write_pointer += eb->len;
1898	}
1899	write_one_eb(eb, wbc);
1900	free_extent_buffer(eb);
1901	return 1;
1902}
1903
1904int btree_write_cache_pages(struct address_space *mapping,
1905				   struct writeback_control *wbc)
1906{
1907	struct btrfs_eb_write_context ctx = { .wbc = wbc };
1908	struct btrfs_fs_info *fs_info = inode_to_fs_info(mapping->host);
1909	int ret = 0;
1910	int done = 0;
1911	int nr_to_write_done = 0;
1912	struct folio_batch fbatch;
1913	unsigned int nr_folios;
1914	pgoff_t index;
1915	pgoff_t end;		/* Inclusive */
1916	int scanned = 0;
1917	xa_mark_t tag;
1918
1919	folio_batch_init(&fbatch);
1920	if (wbc->range_cyclic) {
1921		index = mapping->writeback_index; /* Start from prev offset */
1922		end = -1;
1923		/*
1924		 * Start from the beginning does not need to cycle over the
1925		 * range, mark it as scanned.
1926		 */
1927		scanned = (index == 0);
1928	} else {
1929		index = wbc->range_start >> PAGE_SHIFT;
1930		end = wbc->range_end >> PAGE_SHIFT;
1931		scanned = 1;
1932	}
1933	if (wbc->sync_mode == WB_SYNC_ALL)
1934		tag = PAGECACHE_TAG_TOWRITE;
1935	else
1936		tag = PAGECACHE_TAG_DIRTY;
1937	btrfs_zoned_meta_io_lock(fs_info);
1938retry:
1939	if (wbc->sync_mode == WB_SYNC_ALL)
1940		tag_pages_for_writeback(mapping, index, end);
1941	while (!done && !nr_to_write_done && (index <= end) &&
1942	       (nr_folios = filemap_get_folios_tag(mapping, &index, end,
1943					    tag, &fbatch))) {
1944		unsigned i;
1945
1946		for (i = 0; i < nr_folios; i++) {
1947			struct folio *folio = fbatch.folios[i];
1948
1949			ret = submit_eb_page(&folio->page, &ctx);
1950			if (ret == 0)
1951				continue;
1952			if (ret < 0) {
1953				done = 1;
1954				break;
1955			}
1956
1957			/*
1958			 * the filesystem may choose to bump up nr_to_write.
1959			 * We have to make sure to honor the new nr_to_write
1960			 * at any time
1961			 */
1962			nr_to_write_done = wbc->nr_to_write <= 0;
1963		}
1964		folio_batch_release(&fbatch);
1965		cond_resched();
1966	}
1967	if (!scanned && !done) {
1968		/*
1969		 * We hit the last page and there is more work to be done: wrap
1970		 * back to the start of the file
1971		 */
1972		scanned = 1;
1973		index = 0;
1974		goto retry;
1975	}
1976	/*
1977	 * If something went wrong, don't allow any metadata write bio to be
1978	 * submitted.
1979	 *
1980	 * This would prevent use-after-free if we had dirty pages not
1981	 * cleaned up, which can still happen by fuzzed images.
1982	 *
1983	 * - Bad extent tree
1984	 *   Allowing existing tree block to be allocated for other trees.
1985	 *
1986	 * - Log tree operations
1987	 *   Exiting tree blocks get allocated to log tree, bumps its
1988	 *   generation, then get cleaned in tree re-balance.
1989	 *   Such tree block will not be written back, since it's clean,
1990	 *   thus no WRITTEN flag set.
1991	 *   And after log writes back, this tree block is not traced by
1992	 *   any dirty extent_io_tree.
1993	 *
1994	 * - Offending tree block gets re-dirtied from its original owner
1995	 *   Since it has bumped generation, no WRITTEN flag, it can be
1996	 *   reused without COWing. This tree block will not be traced
1997	 *   by btrfs_transaction::dirty_pages.
1998	 *
1999	 *   Now such dirty tree block will not be cleaned by any dirty
2000	 *   extent io tree. Thus we don't want to submit such wild eb
2001	 *   if the fs already has error.
2002	 *
2003	 * We can get ret > 0 from submit_extent_page() indicating how many ebs
2004	 * were submitted. Reset it to 0 to avoid false alerts for the caller.
2005	 */
2006	if (ret > 0)
2007		ret = 0;
2008	if (!ret && BTRFS_FS_ERROR(fs_info))
2009		ret = -EROFS;
2010
2011	if (ctx.zoned_bg)
2012		btrfs_put_block_group(ctx.zoned_bg);
2013	btrfs_zoned_meta_io_unlock(fs_info);
2014	return ret;
2015}
2016
2017/*
2018 * Walk the list of dirty pages of the given address space and write all of them.
2019 *
2020 * @mapping:   address space structure to write
2021 * @wbc:       subtract the number of written pages from *@wbc->nr_to_write
2022 * @bio_ctrl:  holds context for the write, namely the bio
2023 *
2024 * If a page is already under I/O, write_cache_pages() skips it, even
2025 * if it's dirty.  This is desirable behaviour for memory-cleaning writeback,
2026 * but it is INCORRECT for data-integrity system calls such as fsync().  fsync()
2027 * and msync() need to guarantee that all the data which was dirty at the time
2028 * the call was made get new I/O started against them.  If wbc->sync_mode is
2029 * WB_SYNC_ALL then we were called for data integrity and we must wait for
2030 * existing IO to complete.
2031 */
2032static int extent_write_cache_pages(struct address_space *mapping,
2033			     struct btrfs_bio_ctrl *bio_ctrl)
2034{
2035	struct writeback_control *wbc = bio_ctrl->wbc;
2036	struct inode *inode = mapping->host;
2037	int ret = 0;
2038	int done = 0;
2039	int nr_to_write_done = 0;
2040	struct folio_batch fbatch;
2041	unsigned int nr_folios;
2042	pgoff_t index;
2043	pgoff_t end;		/* Inclusive */
2044	pgoff_t done_index;
2045	int range_whole = 0;
2046	int scanned = 0;
2047	xa_mark_t tag;
2048
2049	/*
2050	 * We have to hold onto the inode so that ordered extents can do their
2051	 * work when the IO finishes.  The alternative to this is failing to add
2052	 * an ordered extent if the igrab() fails there and that is a huge pain
2053	 * to deal with, so instead just hold onto the inode throughout the
2054	 * writepages operation.  If it fails here we are freeing up the inode
2055	 * anyway and we'd rather not waste our time writing out stuff that is
2056	 * going to be truncated anyway.
2057	 */
2058	if (!igrab(inode))
2059		return 0;
2060
2061	folio_batch_init(&fbatch);
2062	if (wbc->range_cyclic) {
2063		index = mapping->writeback_index; /* Start from prev offset */
2064		end = -1;
2065		/*
2066		 * Start from the beginning does not need to cycle over the
2067		 * range, mark it as scanned.
2068		 */
2069		scanned = (index == 0);
2070	} else {
2071		index = wbc->range_start >> PAGE_SHIFT;
2072		end = wbc->range_end >> PAGE_SHIFT;
2073		if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX)
2074			range_whole = 1;
2075		scanned = 1;
2076	}
2077
2078	/*
2079	 * We do the tagged writepage as long as the snapshot flush bit is set
2080	 * and we are the first one who do the filemap_flush() on this inode.
2081	 *
2082	 * The nr_to_write == LONG_MAX is needed to make sure other flushers do
2083	 * not race in and drop the bit.
2084	 */
2085	if (range_whole && wbc->nr_to_write == LONG_MAX &&
2086	    test_and_clear_bit(BTRFS_INODE_SNAPSHOT_FLUSH,
2087			       &BTRFS_I(inode)->runtime_flags))
2088		wbc->tagged_writepages = 1;
2089
2090	if (wbc->sync_mode == WB_SYNC_ALL || wbc->tagged_writepages)
2091		tag = PAGECACHE_TAG_TOWRITE;
2092	else
2093		tag = PAGECACHE_TAG_DIRTY;
2094retry:
2095	if (wbc->sync_mode == WB_SYNC_ALL || wbc->tagged_writepages)
2096		tag_pages_for_writeback(mapping, index, end);
2097	done_index = index;
2098	while (!done && !nr_to_write_done && (index <= end) &&
2099			(nr_folios = filemap_get_folios_tag(mapping, &index,
2100							end, tag, &fbatch))) {
2101		unsigned i;
2102
2103		for (i = 0; i < nr_folios; i++) {
2104			struct folio *folio = fbatch.folios[i];
2105
2106			done_index = folio_next_index(folio);
2107			/*
2108			 * At this point we hold neither the i_pages lock nor
2109			 * the page lock: the page may be truncated or
2110			 * invalidated (changing page->mapping to NULL),
2111			 * or even swizzled back from swapper_space to
2112			 * tmpfs file mapping
2113			 */
2114			if (!folio_trylock(folio)) {
2115				submit_write_bio(bio_ctrl, 0);
2116				folio_lock(folio);
2117			}
2118
2119			if (unlikely(folio->mapping != mapping)) {
2120				folio_unlock(folio);
2121				continue;
2122			}
2123
2124			if (!folio_test_dirty(folio)) {
2125				/* Someone wrote it for us. */
2126				folio_unlock(folio);
2127				continue;
2128			}
2129
2130			if (wbc->sync_mode != WB_SYNC_NONE) {
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2131				if (folio_test_writeback(folio))
2132					submit_write_bio(bio_ctrl, 0);
2133				folio_wait_writeback(folio);
2134			}
2135
2136			if (folio_test_writeback(folio) ||
2137			    !folio_clear_dirty_for_io(folio)) {
2138				folio_unlock(folio);
2139				continue;
2140			}
2141
2142			ret = __extent_writepage(&folio->page, bio_ctrl);
2143			if (ret < 0) {
2144				done = 1;
2145				break;
2146			}
2147
2148			/*
2149			 * The filesystem may choose to bump up nr_to_write.
2150			 * We have to make sure to honor the new nr_to_write
2151			 * at any time.
2152			 */
2153			nr_to_write_done = (wbc->sync_mode == WB_SYNC_NONE &&
2154					    wbc->nr_to_write <= 0);
2155		}
2156		folio_batch_release(&fbatch);
2157		cond_resched();
2158	}
2159	if (!scanned && !done) {
2160		/*
2161		 * We hit the last page and there is more work to be done: wrap
2162		 * back to the start of the file
2163		 */
2164		scanned = 1;
2165		index = 0;
2166
2167		/*
2168		 * If we're looping we could run into a page that is locked by a
2169		 * writer and that writer could be waiting on writeback for a
2170		 * page in our current bio, and thus deadlock, so flush the
2171		 * write bio here.
2172		 */
2173		submit_write_bio(bio_ctrl, 0);
2174		goto retry;
2175	}
2176
2177	if (wbc->range_cyclic || (wbc->nr_to_write > 0 && range_whole))
2178		mapping->writeback_index = done_index;
2179
2180	btrfs_add_delayed_iput(BTRFS_I(inode));
2181	return ret;
2182}
2183
2184/*
2185 * Submit the pages in the range to bio for call sites which delalloc range has
2186 * already been ran (aka, ordered extent inserted) and all pages are still
2187 * locked.
2188 */
2189void extent_write_locked_range(struct inode *inode, struct page *locked_page,
2190			       u64 start, u64 end, struct writeback_control *wbc,
2191			       bool pages_dirty)
2192{
2193	bool found_error = false;
2194	int ret = 0;
2195	struct address_space *mapping = inode->i_mapping;
2196	struct btrfs_fs_info *fs_info = inode_to_fs_info(inode);
2197	const u32 sectorsize = fs_info->sectorsize;
2198	loff_t i_size = i_size_read(inode);
2199	u64 cur = start;
2200	struct btrfs_bio_ctrl bio_ctrl = {
2201		.wbc = wbc,
2202		.opf = REQ_OP_WRITE | wbc_to_write_flags(wbc),
2203	};
2204
2205	if (wbc->no_cgroup_owner)
2206		bio_ctrl.opf |= REQ_BTRFS_CGROUP_PUNT;
2207
2208	ASSERT(IS_ALIGNED(start, sectorsize) && IS_ALIGNED(end + 1, sectorsize));
2209
2210	while (cur <= end) {
2211		u64 cur_end = min(round_down(cur, PAGE_SIZE) + PAGE_SIZE - 1, end);
2212		u32 cur_len = cur_end + 1 - cur;
2213		struct page *page;
2214		int nr = 0;
 
2215
2216		page = find_get_page(mapping, cur >> PAGE_SHIFT);
2217		ASSERT(PageLocked(page));
2218		if (pages_dirty && page != locked_page) {
2219			ASSERT(PageDirty(page));
2220			clear_page_dirty_for_io(page);
 
 
 
 
 
2221		}
2222
2223		ret = __extent_writepage_io(BTRFS_I(inode), page, &bio_ctrl,
2224					    i_size, &nr);
 
 
 
 
 
 
 
 
 
2225		if (ret == 1)
2226			goto next_page;
2227
2228		/* Make sure the mapping tag for page dirty gets cleared. */
2229		if (nr == 0) {
2230			set_page_writeback(page);
2231			end_page_writeback(page);
2232		}
2233		if (ret) {
2234			btrfs_mark_ordered_io_finished(BTRFS_I(inode), page,
2235						       cur, cur_len, !ret);
2236			mapping_set_error(page->mapping, ret);
2237		}
2238		btrfs_folio_unlock_writer(fs_info, page_folio(page), cur, cur_len);
2239		if (ret < 0)
2240			found_error = true;
2241next_page:
2242		put_page(page);
2243		cur = cur_end + 1;
2244	}
2245
2246	submit_write_bio(&bio_ctrl, found_error ? ret : 0);
2247}
2248
2249int extent_writepages(struct address_space *mapping,
2250		      struct writeback_control *wbc)
2251{
2252	struct inode *inode = mapping->host;
2253	int ret = 0;
2254	struct btrfs_bio_ctrl bio_ctrl = {
2255		.wbc = wbc,
2256		.opf = REQ_OP_WRITE | wbc_to_write_flags(wbc),
2257	};
2258
2259	/*
2260	 * Allow only a single thread to do the reloc work in zoned mode to
2261	 * protect the write pointer updates.
2262	 */
2263	btrfs_zoned_data_reloc_lock(BTRFS_I(inode));
2264	ret = extent_write_cache_pages(mapping, &bio_ctrl);
2265	submit_write_bio(&bio_ctrl, ret);
2266	btrfs_zoned_data_reloc_unlock(BTRFS_I(inode));
2267	return ret;
2268}
2269
2270void extent_readahead(struct readahead_control *rac)
2271{
2272	struct btrfs_bio_ctrl bio_ctrl = { .opf = REQ_OP_READ | REQ_RAHEAD };
2273	struct page *pagepool[16];
 
 
 
 
2274	struct extent_map *em_cached = NULL;
2275	u64 prev_em_start = (u64)-1;
2276	int nr;
2277
2278	while ((nr = readahead_page_batch(rac, pagepool))) {
2279		u64 contig_start = readahead_pos(rac);
2280		u64 contig_end = contig_start + readahead_batch_length(rac) - 1;
2281
2282		contiguous_readpages(pagepool, nr, contig_start, contig_end,
2283				&em_cached, &bio_ctrl, &prev_em_start);
2284	}
 
2285
2286	if (em_cached)
2287		free_extent_map(em_cached);
2288	submit_one_bio(&bio_ctrl);
2289}
2290
2291/*
2292 * basic invalidate_folio code, this waits on any locked or writeback
2293 * ranges corresponding to the folio, and then deletes any extent state
2294 * records from the tree
2295 */
2296int extent_invalidate_folio(struct extent_io_tree *tree,
2297			  struct folio *folio, size_t offset)
2298{
2299	struct extent_state *cached_state = NULL;
2300	u64 start = folio_pos(folio);
2301	u64 end = start + folio_size(folio) - 1;
2302	size_t blocksize = folio_to_fs_info(folio)->sectorsize;
2303
2304	/* This function is only called for the btree inode */
2305	ASSERT(tree->owner == IO_TREE_BTREE_INODE_IO);
2306
2307	start += ALIGN(offset, blocksize);
2308	if (start > end)
2309		return 0;
2310
2311	lock_extent(tree, start, end, &cached_state);
2312	folio_wait_writeback(folio);
2313
2314	/*
2315	 * Currently for btree io tree, only EXTENT_LOCKED is utilized,
2316	 * so here we only need to unlock the extent range to free any
2317	 * existing extent state.
2318	 */
2319	unlock_extent(tree, start, end, &cached_state);
2320	return 0;
2321}
2322
2323/*
2324 * a helper for release_folio, this tests for areas of the page that
2325 * are locked or under IO and drops the related state bits if it is safe
2326 * to drop the page.
2327 */
2328static int try_release_extent_state(struct extent_io_tree *tree,
2329				    struct page *page, gfp_t mask)
2330{
2331	u64 start = page_offset(page);
2332	u64 end = start + PAGE_SIZE - 1;
2333	int ret = 1;
2334
2335	if (test_range_bit_exists(tree, start, end, EXTENT_LOCKED)) {
2336		ret = 0;
2337	} else {
2338		u32 clear_bits = ~(EXTENT_LOCKED | EXTENT_NODATASUM |
2339				   EXTENT_DELALLOC_NEW | EXTENT_CTLBITS |
2340				   EXTENT_QGROUP_RESERVED);
 
2341
2342		/*
2343		 * At this point we can safely clear everything except the
2344		 * locked bit, the nodatasum bit and the delalloc new bit.
2345		 * The delalloc new bit will be cleared by ordered extent
2346		 * completion.
2347		 */
2348		ret = __clear_extent_bit(tree, start, end, clear_bits, NULL, NULL);
2349
2350		/* if clear_extent_bit failed for enomem reasons,
2351		 * we can't allow the release to continue.
2352		 */
2353		if (ret < 0)
2354			ret = 0;
2355		else
2356			ret = 1;
2357	}
2358	return ret;
2359}
2360
2361/*
2362 * a helper for release_folio.  As long as there are no locked extents
2363 * in the range corresponding to the page, both state records and extent
2364 * map records are removed
2365 */
2366int try_release_extent_mapping(struct page *page, gfp_t mask)
2367{
2368	struct extent_map *em;
2369	u64 start = page_offset(page);
2370	u64 end = start + PAGE_SIZE - 1;
2371	struct btrfs_inode *btrfs_inode = page_to_inode(page);
2372	struct extent_io_tree *tree = &btrfs_inode->io_tree;
2373	struct extent_map_tree *map = &btrfs_inode->extent_tree;
2374
2375	if (gfpflags_allow_blocking(mask) &&
2376	    page->mapping->host->i_size > SZ_16M) {
2377		u64 len;
2378		while (start <= end) {
2379			struct btrfs_fs_info *fs_info;
2380			u64 cur_gen;
2381
2382			len = end - start + 1;
2383			write_lock(&map->lock);
2384			em = lookup_extent_mapping(map, start, len);
2385			if (!em) {
2386				write_unlock(&map->lock);
2387				break;
2388			}
2389			if ((em->flags & EXTENT_FLAG_PINNED) ||
2390			    em->start != start) {
2391				write_unlock(&map->lock);
2392				free_extent_map(em);
2393				break;
2394			}
2395			if (test_range_bit_exists(tree, em->start,
2396						  extent_map_end(em) - 1,
2397						  EXTENT_LOCKED))
2398				goto next;
2399			/*
2400			 * If it's not in the list of modified extents, used
2401			 * by a fast fsync, we can remove it. If it's being
2402			 * logged we can safely remove it since fsync took an
2403			 * extra reference on the em.
2404			 */
2405			if (list_empty(&em->list) ||
2406			    (em->flags & EXTENT_FLAG_LOGGING))
2407				goto remove_em;
2408			/*
2409			 * If it's in the list of modified extents, remove it
2410			 * only if its generation is older then the current one,
2411			 * in which case we don't need it for a fast fsync.
2412			 * Otherwise don't remove it, we could be racing with an
2413			 * ongoing fast fsync that could miss the new extent.
2414			 */
2415			fs_info = btrfs_inode->root->fs_info;
2416			spin_lock(&fs_info->trans_lock);
2417			cur_gen = fs_info->generation;
2418			spin_unlock(&fs_info->trans_lock);
2419			if (em->generation >= cur_gen)
2420				goto next;
2421remove_em:
2422			/*
2423			 * We only remove extent maps that are not in the list of
2424			 * modified extents or that are in the list but with a
2425			 * generation lower then the current generation, so there
2426			 * is no need to set the full fsync flag on the inode (it
2427			 * hurts the fsync performance for workloads with a data
2428			 * size that exceeds or is close to the system's memory).
2429			 */
2430			remove_extent_mapping(map, em);
2431			/* once for the rb tree */
2432			free_extent_map(em);
2433next:
2434			start = extent_map_end(em);
2435			write_unlock(&map->lock);
2436
2437			/* once for us */
 
 
 
 
 
 
 
 
 
 
 
 
 
2438			free_extent_map(em);
2439
2440			cond_resched(); /* Allow large-extent preemption. */
2441		}
2442	}
2443	return try_release_extent_state(tree, page, mask);
2444}
2445
2446struct btrfs_fiemap_entry {
2447	u64 offset;
2448	u64 phys;
2449	u64 len;
2450	u32 flags;
2451};
2452
2453/*
2454 * Indicate the caller of emit_fiemap_extent() that it needs to unlock the file
2455 * range from the inode's io tree, unlock the subvolume tree search path, flush
2456 * the fiemap cache and relock the file range and research the subvolume tree.
2457 * The value here is something negative that can't be confused with a valid
2458 * errno value and different from 1 because that's also a return value from
2459 * fiemap_fill_next_extent() and also it's often used to mean some btree search
2460 * did not find a key, so make it some distinct negative value.
2461 */
2462#define BTRFS_FIEMAP_FLUSH_CACHE (-(MAX_ERRNO + 1))
2463
2464/*
2465 * Used to:
2466 *
2467 * - Cache the next entry to be emitted to the fiemap buffer, so that we can
2468 *   merge extents that are contiguous and can be grouped as a single one;
2469 *
2470 * - Store extents ready to be written to the fiemap buffer in an intermediary
2471 *   buffer. This intermediary buffer is to ensure that in case the fiemap
2472 *   buffer is memory mapped to the fiemap target file, we don't deadlock
2473 *   during btrfs_page_mkwrite(). This is because during fiemap we are locking
2474 *   an extent range in order to prevent races with delalloc flushing and
2475 *   ordered extent completion, which is needed in order to reliably detect
2476 *   delalloc in holes and prealloc extents. And this can lead to a deadlock
2477 *   if the fiemap buffer is memory mapped to the file we are running fiemap
2478 *   against (a silly, useless in practice scenario, but possible) because
2479 *   btrfs_page_mkwrite() will try to lock the same extent range.
2480 */
2481struct fiemap_cache {
2482	/* An array of ready fiemap entries. */
2483	struct btrfs_fiemap_entry *entries;
2484	/* Number of entries in the entries array. */
2485	int entries_size;
2486	/* Index of the next entry in the entries array to write to. */
2487	int entries_pos;
2488	/*
2489	 * Once the entries array is full, this indicates what's the offset for
2490	 * the next file extent item we must search for in the inode's subvolume
2491	 * tree after unlocking the extent range in the inode's io tree and
2492	 * releasing the search path.
2493	 */
2494	u64 next_search_offset;
2495	/*
2496	 * This matches struct fiemap_extent_info::fi_mapped_extents, we use it
2497	 * to count ourselves emitted extents and stop instead of relying on
2498	 * fiemap_fill_next_extent() because we buffer ready fiemap entries at
2499	 * the @entries array, and we want to stop as soon as we hit the max
2500	 * amount of extents to map, not just to save time but also to make the
2501	 * logic at extent_fiemap() simpler.
2502	 */
2503	unsigned int extents_mapped;
2504	/* Fields for the cached extent (unsubmitted, not ready, extent). */
2505	u64 offset;
2506	u64 phys;
2507	u64 len;
2508	u32 flags;
2509	bool cached;
2510};
2511
2512static int flush_fiemap_cache(struct fiemap_extent_info *fieinfo,
2513			      struct fiemap_cache *cache)
2514{
2515	for (int i = 0; i < cache->entries_pos; i++) {
2516		struct btrfs_fiemap_entry *entry = &cache->entries[i];
2517		int ret;
2518
2519		ret = fiemap_fill_next_extent(fieinfo, entry->offset,
2520					      entry->phys, entry->len,
2521					      entry->flags);
2522		/*
2523		 * Ignore 1 (reached max entries) because we keep track of that
2524		 * ourselves in emit_fiemap_extent().
 
2525		 */
2526		if (ret < 0)
2527			return ret;
2528	}
2529	cache->entries_pos = 0;
2530
2531	return 0;
2532}
2533
2534/*
2535 * Helper to submit fiemap extent.
2536 *
2537 * Will try to merge current fiemap extent specified by @offset, @phys,
2538 * @len and @flags with cached one.
2539 * And only when we fails to merge, cached one will be submitted as
2540 * fiemap extent.
2541 *
2542 * Return value is the same as fiemap_fill_next_extent().
2543 */
2544static int emit_fiemap_extent(struct fiemap_extent_info *fieinfo,
2545				struct fiemap_cache *cache,
2546				u64 offset, u64 phys, u64 len, u32 flags)
2547{
2548	struct btrfs_fiemap_entry *entry;
2549	u64 cache_end;
2550
2551	/* Set at the end of extent_fiemap(). */
2552	ASSERT((flags & FIEMAP_EXTENT_LAST) == 0);
2553
2554	if (!cache->cached)
2555		goto assign;
2556
2557	/*
2558	 * When iterating the extents of the inode, at extent_fiemap(), we may
2559	 * find an extent that starts at an offset behind the end offset of the
2560	 * previous extent we processed. This happens if fiemap is called
2561	 * without FIEMAP_FLAG_SYNC and there are ordered extents completing
2562	 * after we had to unlock the file range, release the search path, emit
2563	 * the fiemap extents stored in the buffer (cache->entries array) and
2564	 * the lock the remainder of the range and re-search the btree.
2565	 *
2566	 * For example we are in leaf X processing its last item, which is the
2567	 * file extent item for file range [512K, 1M[, and after
2568	 * btrfs_next_leaf() releases the path, there's an ordered extent that
2569	 * completes for the file range [768K, 2M[, and that results in trimming
2570	 * the file extent item so that it now corresponds to the file range
2571	 * [512K, 768K[ and a new file extent item is inserted for the file
2572	 * range [768K, 2M[, which may end up as the last item of leaf X or as
2573	 * the first item of the next leaf - in either case btrfs_next_leaf()
2574	 * will leave us with a path pointing to the new extent item, for the
2575	 * file range [768K, 2M[, since that's the first key that follows the
2576	 * last one we processed. So in order not to report overlapping extents
2577	 * to user space, we trim the length of the previously cached extent and
2578	 * emit it.
2579	 *
2580	 * Upon calling btrfs_next_leaf() we may also find an extent with an
2581	 * offset smaller than or equals to cache->offset, and this happens
2582	 * when we had a hole or prealloc extent with several delalloc ranges in
2583	 * it, but after btrfs_next_leaf() released the path, delalloc was
2584	 * flushed and the resulting ordered extents were completed, so we can
2585	 * now have found a file extent item for an offset that is smaller than
2586	 * or equals to what we have in cache->offset. We deal with this as
2587	 * described below.
2588	 */
2589	cache_end = cache->offset + cache->len;
2590	if (cache_end > offset) {
2591		if (offset == cache->offset) {
2592			/*
2593			 * We cached a dealloc range (found in the io tree) for
2594			 * a hole or prealloc extent and we have now found a
2595			 * file extent item for the same offset. What we have
2596			 * now is more recent and up to date, so discard what
2597			 * we had in the cache and use what we have just found.
2598			 */
2599			goto assign;
2600		} else if (offset > cache->offset) {
2601			/*
2602			 * The extent range we previously found ends after the
2603			 * offset of the file extent item we found and that
2604			 * offset falls somewhere in the middle of that previous
2605			 * extent range. So adjust the range we previously found
2606			 * to end at the offset of the file extent item we have
2607			 * just found, since this extent is more up to date.
2608			 * Emit that adjusted range and cache the file extent
2609			 * item we have just found. This corresponds to the case
2610			 * where a previously found file extent item was split
2611			 * due to an ordered extent completing.
2612			 */
2613			cache->len = offset - cache->offset;
2614			goto emit;
2615		} else {
2616			const u64 range_end = offset + len;
2617
2618			/*
2619			 * The offset of the file extent item we have just found
2620			 * is behind the cached offset. This means we were
2621			 * processing a hole or prealloc extent for which we
2622			 * have found delalloc ranges (in the io tree), so what
2623			 * we have in the cache is the last delalloc range we
2624			 * found while the file extent item we found can be
2625			 * either for a whole delalloc range we previously
2626			 * emmitted or only a part of that range.
2627			 *
2628			 * We have two cases here:
2629			 *
2630			 * 1) The file extent item's range ends at or behind the
2631			 *    cached extent's end. In this case just ignore the
2632			 *    current file extent item because we don't want to
2633			 *    overlap with previous ranges that may have been
2634			 *    emmitted already;
2635			 *
2636			 * 2) The file extent item starts behind the currently
2637			 *    cached extent but its end offset goes beyond the
2638			 *    end offset of the cached extent. We don't want to
2639			 *    overlap with a previous range that may have been
2640			 *    emmitted already, so we emit the currently cached
2641			 *    extent and then partially store the current file
2642			 *    extent item's range in the cache, for the subrange
2643			 *    going the cached extent's end to the end of the
2644			 *    file extent item.
2645			 */
2646			if (range_end <= cache_end)
2647				return 0;
2648
2649			if (!(flags & (FIEMAP_EXTENT_ENCODED | FIEMAP_EXTENT_DELALLOC)))
2650				phys += cache_end - offset;
2651
2652			offset = cache_end;
2653			len = range_end - cache_end;
2654			goto emit;
2655		}
2656	}
2657
2658	/*
2659	 * Only merges fiemap extents if
2660	 * 1) Their logical addresses are continuous
2661	 *
2662	 * 2) Their physical addresses are continuous
2663	 *    So truly compressed (physical size smaller than logical size)
2664	 *    extents won't get merged with each other
2665	 *
2666	 * 3) Share same flags
2667	 */
2668	if (cache->offset + cache->len  == offset &&
2669	    cache->phys + cache->len == phys  &&
2670	    cache->flags == flags) {
2671		cache->len += len;
2672		return 0;
2673	}
2674
2675emit:
2676	/* Not mergeable, need to submit cached one */
2677
2678	if (cache->entries_pos == cache->entries_size) {
2679		/*
2680		 * We will need to research for the end offset of the last
2681		 * stored extent and not from the current offset, because after
2682		 * unlocking the range and releasing the path, if there's a hole
2683		 * between that end offset and this current offset, a new extent
2684		 * may have been inserted due to a new write, so we don't want
2685		 * to miss it.
2686		 */
2687		entry = &cache->entries[cache->entries_size - 1];
2688		cache->next_search_offset = entry->offset + entry->len;
2689		cache->cached = false;
2690
2691		return BTRFS_FIEMAP_FLUSH_CACHE;
2692	}
2693
2694	entry = &cache->entries[cache->entries_pos];
2695	entry->offset = cache->offset;
2696	entry->phys = cache->phys;
2697	entry->len = cache->len;
2698	entry->flags = cache->flags;
2699	cache->entries_pos++;
2700	cache->extents_mapped++;
2701
2702	if (cache->extents_mapped == fieinfo->fi_extents_max) {
2703		cache->cached = false;
2704		return 1;
2705	}
2706assign:
2707	cache->cached = true;
2708	cache->offset = offset;
2709	cache->phys = phys;
2710	cache->len = len;
2711	cache->flags = flags;
2712
2713	return 0;
2714}
2715
2716/*
2717 * Emit last fiemap cache
2718 *
2719 * The last fiemap cache may still be cached in the following case:
2720 * 0		      4k		    8k
2721 * |<- Fiemap range ->|
2722 * |<------------  First extent ----------->|
2723 *
2724 * In this case, the first extent range will be cached but not emitted.
2725 * So we must emit it before ending extent_fiemap().
2726 */
2727static int emit_last_fiemap_cache(struct fiemap_extent_info *fieinfo,
2728				  struct fiemap_cache *cache)
2729{
2730	int ret;
2731
2732	if (!cache->cached)
2733		return 0;
2734
2735	ret = fiemap_fill_next_extent(fieinfo, cache->offset, cache->phys,
2736				      cache->len, cache->flags);
2737	cache->cached = false;
2738	if (ret > 0)
2739		ret = 0;
2740	return ret;
2741}
2742
2743static int fiemap_next_leaf_item(struct btrfs_inode *inode, struct btrfs_path *path)
2744{
2745	struct extent_buffer *clone = path->nodes[0];
2746	struct btrfs_key key;
2747	int slot;
2748	int ret;
2749
2750	path->slots[0]++;
2751	if (path->slots[0] < btrfs_header_nritems(path->nodes[0]))
2752		return 0;
2753
2754	/*
2755	 * Add a temporary extra ref to an already cloned extent buffer to
2756	 * prevent btrfs_next_leaf() freeing it, we want to reuse it to avoid
2757	 * the cost of allocating a new one.
2758	 */
2759	ASSERT(test_bit(EXTENT_BUFFER_UNMAPPED, &clone->bflags));
2760	atomic_inc(&clone->refs);
2761
2762	ret = btrfs_next_leaf(inode->root, path);
2763	if (ret != 0)
2764		goto out;
2765
2766	/*
2767	 * Don't bother with cloning if there are no more file extent items for
2768	 * our inode.
2769	 */
2770	btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0]);
2771	if (key.objectid != btrfs_ino(inode) || key.type != BTRFS_EXTENT_DATA_KEY) {
2772		ret = 1;
2773		goto out;
2774	}
2775
2776	/*
2777	 * Important to preserve the start field, for the optimizations when
2778	 * checking if extents are shared (see extent_fiemap()).
2779	 *
2780	 * We must set ->start before calling copy_extent_buffer_full().  If we
2781	 * are on sub-pagesize blocksize, we use ->start to determine the offset
2782	 * into the folio where our eb exists, and if we update ->start after
2783	 * the fact then any subsequent reads of the eb may read from a
2784	 * different offset in the folio than where we originally copied into.
2785	 */
2786	clone->start = path->nodes[0]->start;
2787	/* See the comment at fiemap_search_slot() about why we clone. */
2788	copy_extent_buffer_full(clone, path->nodes[0]);
2789
2790	slot = path->slots[0];
2791	btrfs_release_path(path);
2792	path->nodes[0] = clone;
2793	path->slots[0] = slot;
2794out:
2795	if (ret)
2796		free_extent_buffer(clone);
2797
2798	return ret;
2799}
2800
2801/*
2802 * Search for the first file extent item that starts at a given file offset or
2803 * the one that starts immediately before that offset.
2804 * Returns: 0 on success, < 0 on error, 1 if not found.
2805 */
2806static int fiemap_search_slot(struct btrfs_inode *inode, struct btrfs_path *path,
2807			      u64 file_offset)
2808{
2809	const u64 ino = btrfs_ino(inode);
2810	struct btrfs_root *root = inode->root;
2811	struct extent_buffer *clone;
2812	struct btrfs_key key;
2813	int slot;
2814	int ret;
2815
2816	key.objectid = ino;
2817	key.type = BTRFS_EXTENT_DATA_KEY;
2818	key.offset = file_offset;
2819
2820	ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
2821	if (ret < 0)
2822		return ret;
2823
2824	if (ret > 0 && path->slots[0] > 0) {
2825		btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0] - 1);
2826		if (key.objectid == ino && key.type == BTRFS_EXTENT_DATA_KEY)
2827			path->slots[0]--;
2828	}
2829
2830	if (path->slots[0] >= btrfs_header_nritems(path->nodes[0])) {
2831		ret = btrfs_next_leaf(root, path);
2832		if (ret != 0)
2833			return ret;
2834
2835		btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0]);
2836		if (key.objectid != ino || key.type != BTRFS_EXTENT_DATA_KEY)
2837			return 1;
2838	}
2839
2840	/*
2841	 * We clone the leaf and use it during fiemap. This is because while
2842	 * using the leaf we do expensive things like checking if an extent is
2843	 * shared, which can take a long time. In order to prevent blocking
2844	 * other tasks for too long, we use a clone of the leaf. We have locked
2845	 * the file range in the inode's io tree, so we know none of our file
2846	 * extent items can change. This way we avoid blocking other tasks that
2847	 * want to insert items for other inodes in the same leaf or b+tree
2848	 * rebalance operations (triggered for example when someone is trying
2849	 * to push items into this leaf when trying to insert an item in a
2850	 * neighbour leaf).
2851	 * We also need the private clone because holding a read lock on an
2852	 * extent buffer of the subvolume's b+tree will make lockdep unhappy
2853	 * when we check if extents are shared, as backref walking may need to
2854	 * lock the same leaf we are processing.
2855	 */
2856	clone = btrfs_clone_extent_buffer(path->nodes[0]);
2857	if (!clone)
2858		return -ENOMEM;
2859
2860	slot = path->slots[0];
2861	btrfs_release_path(path);
2862	path->nodes[0] = clone;
2863	path->slots[0] = slot;
2864
2865	return 0;
2866}
2867
2868/*
2869 * Process a range which is a hole or a prealloc extent in the inode's subvolume
2870 * btree. If @disk_bytenr is 0, we are dealing with a hole, otherwise a prealloc
2871 * extent. The end offset (@end) is inclusive.
2872 */
2873static int fiemap_process_hole(struct btrfs_inode *inode,
2874			       struct fiemap_extent_info *fieinfo,
2875			       struct fiemap_cache *cache,
2876			       struct extent_state **delalloc_cached_state,
2877			       struct btrfs_backref_share_check_ctx *backref_ctx,
2878			       u64 disk_bytenr, u64 extent_offset,
2879			       u64 extent_gen,
2880			       u64 start, u64 end)
2881{
2882	const u64 i_size = i_size_read(&inode->vfs_inode);
2883	u64 cur_offset = start;
2884	u64 last_delalloc_end = 0;
2885	u32 prealloc_flags = FIEMAP_EXTENT_UNWRITTEN;
2886	bool checked_extent_shared = false;
2887	int ret;
2888
2889	/*
2890	 * There can be no delalloc past i_size, so don't waste time looking for
2891	 * it beyond i_size.
2892	 */
2893	while (cur_offset < end && cur_offset < i_size) {
2894		u64 delalloc_start;
2895		u64 delalloc_end;
2896		u64 prealloc_start;
2897		u64 prealloc_len = 0;
2898		bool delalloc;
2899
2900		delalloc = btrfs_find_delalloc_in_range(inode, cur_offset, end,
2901							delalloc_cached_state,
2902							&delalloc_start,
2903							&delalloc_end);
2904		if (!delalloc)
2905			break;
2906
2907		/*
2908		 * If this is a prealloc extent we have to report every section
2909		 * of it that has no delalloc.
 
 
 
 
2910		 */
2911		if (disk_bytenr != 0) {
2912			if (last_delalloc_end == 0) {
2913				prealloc_start = start;
2914				prealloc_len = delalloc_start - start;
2915			} else {
2916				prealloc_start = last_delalloc_end + 1;
2917				prealloc_len = delalloc_start - prealloc_start;
2918			}
2919		}
2920
2921		if (prealloc_len > 0) {
2922			if (!checked_extent_shared && fieinfo->fi_extents_max) {
2923				ret = btrfs_is_data_extent_shared(inode,
2924								  disk_bytenr,
2925								  extent_gen,
2926								  backref_ctx);
2927				if (ret < 0)
2928					return ret;
2929				else if (ret > 0)
2930					prealloc_flags |= FIEMAP_EXTENT_SHARED;
2931
2932				checked_extent_shared = true;
2933			}
2934			ret = emit_fiemap_extent(fieinfo, cache, prealloc_start,
2935						 disk_bytenr + extent_offset,
2936						 prealloc_len, prealloc_flags);
2937			if (ret)
2938				return ret;
2939			extent_offset += prealloc_len;
2940		}
2941
2942		ret = emit_fiemap_extent(fieinfo, cache, delalloc_start, 0,
2943					 delalloc_end + 1 - delalloc_start,
2944					 FIEMAP_EXTENT_DELALLOC |
2945					 FIEMAP_EXTENT_UNKNOWN);
2946		if (ret)
2947			return ret;
2948
2949		last_delalloc_end = delalloc_end;
2950		cur_offset = delalloc_end + 1;
2951		extent_offset += cur_offset - delalloc_start;
2952		cond_resched();
2953	}
2954
2955	/*
2956	 * Either we found no delalloc for the whole prealloc extent or we have
2957	 * a prealloc extent that spans i_size or starts at or after i_size.
2958	 */
2959	if (disk_bytenr != 0 && last_delalloc_end < end) {
2960		u64 prealloc_start;
2961		u64 prealloc_len;
2962
2963		if (last_delalloc_end == 0) {
2964			prealloc_start = start;
2965			prealloc_len = end + 1 - start;
2966		} else {
2967			prealloc_start = last_delalloc_end + 1;
2968			prealloc_len = end + 1 - prealloc_start;
2969		}
2970
2971		if (!checked_extent_shared && fieinfo->fi_extents_max) {
2972			ret = btrfs_is_data_extent_shared(inode,
2973							  disk_bytenr,
2974							  extent_gen,
2975							  backref_ctx);
2976			if (ret < 0)
2977				return ret;
2978			else if (ret > 0)
2979				prealloc_flags |= FIEMAP_EXTENT_SHARED;
2980		}
2981		ret = emit_fiemap_extent(fieinfo, cache, prealloc_start,
2982					 disk_bytenr + extent_offset,
2983					 prealloc_len, prealloc_flags);
2984		if (ret)
2985			return ret;
2986	}
2987
2988	return 0;
2989}
2990
2991static int fiemap_find_last_extent_offset(struct btrfs_inode *inode,
2992					  struct btrfs_path *path,
2993					  u64 *last_extent_end_ret)
2994{
2995	const u64 ino = btrfs_ino(inode);
2996	struct btrfs_root *root = inode->root;
2997	struct extent_buffer *leaf;
2998	struct btrfs_file_extent_item *ei;
2999	struct btrfs_key key;
3000	u64 disk_bytenr;
3001	int ret;
3002
3003	/*
3004	 * Lookup the last file extent. We're not using i_size here because
3005	 * there might be preallocation past i_size.
3006	 */
3007	ret = btrfs_lookup_file_extent(NULL, root, path, ino, (u64)-1, 0);
3008	/* There can't be a file extent item at offset (u64)-1 */
3009	ASSERT(ret != 0);
3010	if (ret < 0)
3011		return ret;
3012
3013	/*
3014	 * For a non-existing key, btrfs_search_slot() always leaves us at a
3015	 * slot > 0, except if the btree is empty, which is impossible because
3016	 * at least it has the inode item for this inode and all the items for
3017	 * the root inode 256.
3018	 */
3019	ASSERT(path->slots[0] > 0);
3020	path->slots[0]--;
3021	leaf = path->nodes[0];
3022	btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
3023	if (key.objectid != ino || key.type != BTRFS_EXTENT_DATA_KEY) {
3024		/* No file extent items in the subvolume tree. */
3025		*last_extent_end_ret = 0;
3026		return 0;
3027	}
3028
3029	/*
3030	 * For an inline extent, the disk_bytenr is where inline data starts at,
3031	 * so first check if we have an inline extent item before checking if we
3032	 * have an implicit hole (disk_bytenr == 0).
3033	 */
3034	ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_file_extent_item);
3035	if (btrfs_file_extent_type(leaf, ei) == BTRFS_FILE_EXTENT_INLINE) {
3036		*last_extent_end_ret = btrfs_file_extent_end(path);
3037		return 0;
3038	}
3039
3040	/*
3041	 * Find the last file extent item that is not a hole (when NO_HOLES is
3042	 * not enabled). This should take at most 2 iterations in the worst
3043	 * case: we have one hole file extent item at slot 0 of a leaf and
3044	 * another hole file extent item as the last item in the previous leaf.
3045	 * This is because we merge file extent items that represent holes.
3046	 */
3047	disk_bytenr = btrfs_file_extent_disk_bytenr(leaf, ei);
3048	while (disk_bytenr == 0) {
3049		ret = btrfs_previous_item(root, path, ino, BTRFS_EXTENT_DATA_KEY);
3050		if (ret < 0) {
3051			return ret;
3052		} else if (ret > 0) {
3053			/* No file extent items that are not holes. */
3054			*last_extent_end_ret = 0;
3055			return 0;
3056		}
3057		leaf = path->nodes[0];
3058		ei = btrfs_item_ptr(leaf, path->slots[0],
3059				    struct btrfs_file_extent_item);
3060		disk_bytenr = btrfs_file_extent_disk_bytenr(leaf, ei);
3061	}
3062
3063	*last_extent_end_ret = btrfs_file_extent_end(path);
3064	return 0;
3065}
3066
3067int extent_fiemap(struct btrfs_inode *inode, struct fiemap_extent_info *fieinfo,
3068		  u64 start, u64 len)
3069{
3070	const u64 ino = btrfs_ino(inode);
3071	struct extent_state *cached_state = NULL;
3072	struct extent_state *delalloc_cached_state = NULL;
3073	struct btrfs_path *path;
3074	struct fiemap_cache cache = { 0 };
3075	struct btrfs_backref_share_check_ctx *backref_ctx;
3076	u64 last_extent_end;
3077	u64 prev_extent_end;
3078	u64 range_start;
3079	u64 range_end;
3080	const u64 sectorsize = inode->root->fs_info->sectorsize;
3081	bool stopped = false;
3082	int ret;
3083
3084	cache.entries_size = PAGE_SIZE / sizeof(struct btrfs_fiemap_entry);
3085	cache.entries = kmalloc_array(cache.entries_size,
3086				      sizeof(struct btrfs_fiemap_entry),
3087				      GFP_KERNEL);
3088	backref_ctx = btrfs_alloc_backref_share_check_ctx();
3089	path = btrfs_alloc_path();
3090	if (!cache.entries || !backref_ctx || !path) {
3091		ret = -ENOMEM;
3092		goto out;
3093	}
3094
3095restart:
3096	range_start = round_down(start, sectorsize);
3097	range_end = round_up(start + len, sectorsize);
3098	prev_extent_end = range_start;
3099
3100	lock_extent(&inode->io_tree, range_start, range_end, &cached_state);
3101
3102	ret = fiemap_find_last_extent_offset(inode, path, &last_extent_end);
3103	if (ret < 0)
3104		goto out_unlock;
3105	btrfs_release_path(path);
3106
3107	path->reada = READA_FORWARD;
3108	ret = fiemap_search_slot(inode, path, range_start);
3109	if (ret < 0) {
3110		goto out_unlock;
3111	} else if (ret > 0) {
3112		/*
3113		 * No file extent item found, but we may have delalloc between
3114		 * the current offset and i_size. So check for that.
3115		 */
3116		ret = 0;
3117		goto check_eof_delalloc;
3118	}
3119
3120	while (prev_extent_end < range_end) {
3121		struct extent_buffer *leaf = path->nodes[0];
3122		struct btrfs_file_extent_item *ei;
3123		struct btrfs_key key;
3124		u64 extent_end;
3125		u64 extent_len;
3126		u64 extent_offset = 0;
3127		u64 extent_gen;
3128		u64 disk_bytenr = 0;
3129		u64 flags = 0;
3130		int extent_type;
3131		u8 compression;
3132
3133		btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
3134		if (key.objectid != ino || key.type != BTRFS_EXTENT_DATA_KEY)
3135			break;
3136
3137		extent_end = btrfs_file_extent_end(path);
3138
3139		/*
3140		 * The first iteration can leave us at an extent item that ends
3141		 * before our range's start. Move to the next item.
3142		 */
3143		if (extent_end <= range_start)
3144			goto next_item;
3145
3146		backref_ctx->curr_leaf_bytenr = leaf->start;
3147
3148		/* We have in implicit hole (NO_HOLES feature enabled). */
3149		if (prev_extent_end < key.offset) {
3150			const u64 hole_end = min(key.offset, range_end) - 1;
3151
3152			ret = fiemap_process_hole(inode, fieinfo, &cache,
3153						  &delalloc_cached_state,
3154						  backref_ctx, 0, 0, 0,
3155						  prev_extent_end, hole_end);
3156			if (ret < 0) {
3157				goto out_unlock;
3158			} else if (ret > 0) {
3159				/* fiemap_fill_next_extent() told us to stop. */
3160				stopped = true;
3161				break;
3162			}
3163
3164			/* We've reached the end of the fiemap range, stop. */
3165			if (key.offset >= range_end) {
3166				stopped = true;
3167				break;
3168			}
3169		}
3170
3171		extent_len = extent_end - key.offset;
3172		ei = btrfs_item_ptr(leaf, path->slots[0],
3173				    struct btrfs_file_extent_item);
3174		compression = btrfs_file_extent_compression(leaf, ei);
3175		extent_type = btrfs_file_extent_type(leaf, ei);
3176		extent_gen = btrfs_file_extent_generation(leaf, ei);
3177
3178		if (extent_type != BTRFS_FILE_EXTENT_INLINE) {
3179			disk_bytenr = btrfs_file_extent_disk_bytenr(leaf, ei);
3180			if (compression == BTRFS_COMPRESS_NONE)
3181				extent_offset = btrfs_file_extent_offset(leaf, ei);
3182		}
3183
3184		if (compression != BTRFS_COMPRESS_NONE)
3185			flags |= FIEMAP_EXTENT_ENCODED;
3186
3187		if (extent_type == BTRFS_FILE_EXTENT_INLINE) {
3188			flags |= FIEMAP_EXTENT_DATA_INLINE;
3189			flags |= FIEMAP_EXTENT_NOT_ALIGNED;
3190			ret = emit_fiemap_extent(fieinfo, &cache, key.offset, 0,
3191						 extent_len, flags);
3192		} else if (extent_type == BTRFS_FILE_EXTENT_PREALLOC) {
3193			ret = fiemap_process_hole(inode, fieinfo, &cache,
3194						  &delalloc_cached_state,
3195						  backref_ctx,
3196						  disk_bytenr, extent_offset,
3197						  extent_gen, key.offset,
3198						  extent_end - 1);
3199		} else if (disk_bytenr == 0) {
3200			/* We have an explicit hole. */
3201			ret = fiemap_process_hole(inode, fieinfo, &cache,
3202						  &delalloc_cached_state,
3203						  backref_ctx, 0, 0, 0,
3204						  key.offset, extent_end - 1);
3205		} else {
3206			/* We have a regular extent. */
3207			if (fieinfo->fi_extents_max) {
3208				ret = btrfs_is_data_extent_shared(inode,
3209								  disk_bytenr,
3210								  extent_gen,
3211								  backref_ctx);
3212				if (ret < 0)
3213					goto out_unlock;
3214				else if (ret > 0)
3215					flags |= FIEMAP_EXTENT_SHARED;
3216			}
3217
3218			ret = emit_fiemap_extent(fieinfo, &cache, key.offset,
3219						 disk_bytenr + extent_offset,
3220						 extent_len, flags);
3221		}
3222
3223		if (ret < 0) {
3224			goto out_unlock;
3225		} else if (ret > 0) {
3226			/* emit_fiemap_extent() told us to stop. */
3227			stopped = true;
3228			break;
3229		}
3230
3231		prev_extent_end = extent_end;
3232next_item:
3233		if (fatal_signal_pending(current)) {
3234			ret = -EINTR;
3235			goto out_unlock;
3236		}
3237
3238		ret = fiemap_next_leaf_item(inode, path);
3239		if (ret < 0) {
3240			goto out_unlock;
3241		} else if (ret > 0) {
3242			/* No more file extent items for this inode. */
3243			break;
3244		}
3245		cond_resched();
3246	}
3247
3248check_eof_delalloc:
3249	if (!stopped && prev_extent_end < range_end) {
3250		ret = fiemap_process_hole(inode, fieinfo, &cache,
3251					  &delalloc_cached_state, backref_ctx,
3252					  0, 0, 0, prev_extent_end, range_end - 1);
3253		if (ret < 0)
3254			goto out_unlock;
3255		prev_extent_end = range_end;
3256	}
3257
3258	if (cache.cached && cache.offset + cache.len >= last_extent_end) {
3259		const u64 i_size = i_size_read(&inode->vfs_inode);
3260
3261		if (prev_extent_end < i_size) {
3262			u64 delalloc_start;
3263			u64 delalloc_end;
3264			bool delalloc;
3265
3266			delalloc = btrfs_find_delalloc_in_range(inode,
3267								prev_extent_end,
3268								i_size - 1,
3269								&delalloc_cached_state,
3270								&delalloc_start,
3271								&delalloc_end);
3272			if (!delalloc)
3273				cache.flags |= FIEMAP_EXTENT_LAST;
3274		} else {
3275			cache.flags |= FIEMAP_EXTENT_LAST;
3276		}
3277	}
3278
3279out_unlock:
3280	unlock_extent(&inode->io_tree, range_start, range_end, &cached_state);
3281
3282	if (ret == BTRFS_FIEMAP_FLUSH_CACHE) {
3283		btrfs_release_path(path);
3284		ret = flush_fiemap_cache(fieinfo, &cache);
3285		if (ret)
3286			goto out;
3287		len -= cache.next_search_offset - start;
3288		start = cache.next_search_offset;
3289		goto restart;
3290	} else if (ret < 0) {
3291		goto out;
3292	}
3293
3294	/*
3295	 * Must free the path before emitting to the fiemap buffer because we
3296	 * may have a non-cloned leaf and if the fiemap buffer is memory mapped
3297	 * to a file, a write into it (through btrfs_page_mkwrite()) may trigger
3298	 * waiting for an ordered extent that in order to complete needs to
3299	 * modify that leaf, therefore leading to a deadlock.
3300	 */
3301	btrfs_free_path(path);
3302	path = NULL;
3303
3304	ret = flush_fiemap_cache(fieinfo, &cache);
3305	if (ret)
3306		goto out;
3307
3308	ret = emit_last_fiemap_cache(fieinfo, &cache);
3309out:
3310	free_extent_state(delalloc_cached_state);
3311	kfree(cache.entries);
3312	btrfs_free_backref_share_ctx(backref_ctx);
3313	btrfs_free_path(path);
3314	return ret;
3315}
3316
3317static void __free_extent_buffer(struct extent_buffer *eb)
3318{
3319	kmem_cache_free(extent_buffer_cache, eb);
3320}
3321
3322static int extent_buffer_under_io(const struct extent_buffer *eb)
3323{
3324	return (test_bit(EXTENT_BUFFER_WRITEBACK, &eb->bflags) ||
3325		test_bit(EXTENT_BUFFER_DIRTY, &eb->bflags));
3326}
3327
3328static bool folio_range_has_eb(struct btrfs_fs_info *fs_info, struct folio *folio)
3329{
3330	struct btrfs_subpage *subpage;
3331
3332	lockdep_assert_held(&folio->mapping->i_private_lock);
3333
3334	if (folio_test_private(folio)) {
3335		subpage = folio_get_private(folio);
3336		if (atomic_read(&subpage->eb_refs))
3337			return true;
3338		/*
3339		 * Even there is no eb refs here, we may still have
3340		 * end_page_read() call relying on page::private.
3341		 */
3342		if (atomic_read(&subpage->readers))
3343			return true;
3344	}
3345	return false;
3346}
3347
3348static void detach_extent_buffer_folio(struct extent_buffer *eb, struct folio *folio)
3349{
3350	struct btrfs_fs_info *fs_info = eb->fs_info;
3351	const bool mapped = !test_bit(EXTENT_BUFFER_UNMAPPED, &eb->bflags);
3352
3353	/*
3354	 * For mapped eb, we're going to change the folio private, which should
3355	 * be done under the i_private_lock.
3356	 */
3357	if (mapped)
3358		spin_lock(&folio->mapping->i_private_lock);
3359
3360	if (!folio_test_private(folio)) {
3361		if (mapped)
3362			spin_unlock(&folio->mapping->i_private_lock);
3363		return;
3364	}
3365
3366	if (fs_info->nodesize >= PAGE_SIZE) {
3367		/*
3368		 * We do this since we'll remove the pages after we've
3369		 * removed the eb from the radix tree, so we could race
3370		 * and have this page now attached to the new eb.  So
3371		 * only clear folio if it's still connected to
3372		 * this eb.
3373		 */
3374		if (folio_test_private(folio) && folio_get_private(folio) == eb) {
3375			BUG_ON(test_bit(EXTENT_BUFFER_DIRTY, &eb->bflags));
3376			BUG_ON(folio_test_dirty(folio));
3377			BUG_ON(folio_test_writeback(folio));
3378			/* We need to make sure we haven't be attached to a new eb. */
3379			folio_detach_private(folio);
3380		}
3381		if (mapped)
3382			spin_unlock(&folio->mapping->i_private_lock);
3383		return;
3384	}
3385
3386	/*
3387	 * For subpage, we can have dummy eb with folio private attached.  In
3388	 * this case, we can directly detach the private as such folio is only
3389	 * attached to one dummy eb, no sharing.
3390	 */
3391	if (!mapped) {
3392		btrfs_detach_subpage(fs_info, folio);
3393		return;
3394	}
3395
3396	btrfs_folio_dec_eb_refs(fs_info, folio);
3397
3398	/*
3399	 * We can only detach the folio private if there are no other ebs in the
3400	 * page range and no unfinished IO.
3401	 */
3402	if (!folio_range_has_eb(fs_info, folio))
3403		btrfs_detach_subpage(fs_info, folio);
3404
3405	spin_unlock(&folio->mapping->i_private_lock);
3406}
3407
3408/* Release all pages attached to the extent buffer */
3409static void btrfs_release_extent_buffer_pages(struct extent_buffer *eb)
3410{
3411	ASSERT(!extent_buffer_under_io(eb));
3412
3413	for (int i = 0; i < INLINE_EXTENT_BUFFER_PAGES; i++) {
3414		struct folio *folio = eb->folios[i];
3415
3416		if (!folio)
3417			continue;
3418
3419		detach_extent_buffer_folio(eb, folio);
3420
3421		/* One for when we allocated the folio. */
3422		folio_put(folio);
3423	}
3424}
3425
3426/*
3427 * Helper for releasing the extent buffer.
3428 */
3429static inline void btrfs_release_extent_buffer(struct extent_buffer *eb)
3430{
3431	btrfs_release_extent_buffer_pages(eb);
3432	btrfs_leak_debug_del_eb(eb);
3433	__free_extent_buffer(eb);
3434}
3435
3436static struct extent_buffer *
3437__alloc_extent_buffer(struct btrfs_fs_info *fs_info, u64 start,
3438		      unsigned long len)
3439{
3440	struct extent_buffer *eb = NULL;
3441
3442	eb = kmem_cache_zalloc(extent_buffer_cache, GFP_NOFS|__GFP_NOFAIL);
3443	eb->start = start;
3444	eb->len = len;
3445	eb->fs_info = fs_info;
3446	init_rwsem(&eb->lock);
3447
3448	btrfs_leak_debug_add_eb(eb);
3449
3450	spin_lock_init(&eb->refs_lock);
3451	atomic_set(&eb->refs, 1);
3452
3453	ASSERT(len <= BTRFS_MAX_METADATA_BLOCKSIZE);
3454
3455	return eb;
3456}
3457
3458struct extent_buffer *btrfs_clone_extent_buffer(const struct extent_buffer *src)
3459{
3460	struct extent_buffer *new;
3461	int num_folios = num_extent_folios(src);
3462	int ret;
3463
3464	new = __alloc_extent_buffer(src->fs_info, src->start, src->len);
3465	if (new == NULL)
3466		return NULL;
3467
3468	/*
3469	 * Set UNMAPPED before calling btrfs_release_extent_buffer(), as
3470	 * btrfs_release_extent_buffer() have different behavior for
3471	 * UNMAPPED subpage extent buffer.
3472	 */
3473	set_bit(EXTENT_BUFFER_UNMAPPED, &new->bflags);
3474
3475	ret = alloc_eb_folio_array(new, 0);
3476	if (ret) {
3477		btrfs_release_extent_buffer(new);
3478		return NULL;
3479	}
3480
3481	for (int i = 0; i < num_folios; i++) {
3482		struct folio *folio = new->folios[i];
3483		int ret;
3484
3485		ret = attach_extent_buffer_folio(new, folio, NULL);
3486		if (ret < 0) {
3487			btrfs_release_extent_buffer(new);
3488			return NULL;
3489		}
3490		WARN_ON(folio_test_dirty(folio));
3491	}
3492	copy_extent_buffer_full(new, src);
3493	set_extent_buffer_uptodate(new);
3494
3495	return new;
3496}
3497
3498struct extent_buffer *__alloc_dummy_extent_buffer(struct btrfs_fs_info *fs_info,
3499						  u64 start, unsigned long len)
3500{
3501	struct extent_buffer *eb;
3502	int num_folios = 0;
3503	int ret;
3504
3505	eb = __alloc_extent_buffer(fs_info, start, len);
3506	if (!eb)
3507		return NULL;
3508
3509	ret = alloc_eb_folio_array(eb, 0);
3510	if (ret)
3511		goto err;
3512
3513	num_folios = num_extent_folios(eb);
3514	for (int i = 0; i < num_folios; i++) {
3515		ret = attach_extent_buffer_folio(eb, eb->folios[i], NULL);
3516		if (ret < 0)
3517			goto err;
3518	}
3519
3520	set_extent_buffer_uptodate(eb);
3521	btrfs_set_header_nritems(eb, 0);
3522	set_bit(EXTENT_BUFFER_UNMAPPED, &eb->bflags);
3523
3524	return eb;
3525err:
3526	for (int i = 0; i < num_folios; i++) {
3527		if (eb->folios[i]) {
3528			detach_extent_buffer_folio(eb, eb->folios[i]);
3529			__folio_put(eb->folios[i]);
3530		}
3531	}
3532	__free_extent_buffer(eb);
3533	return NULL;
3534}
3535
3536struct extent_buffer *alloc_dummy_extent_buffer(struct btrfs_fs_info *fs_info,
3537						u64 start)
3538{
3539	return __alloc_dummy_extent_buffer(fs_info, start, fs_info->nodesize);
3540}
3541
3542static void check_buffer_tree_ref(struct extent_buffer *eb)
3543{
3544	int refs;
3545	/*
3546	 * The TREE_REF bit is first set when the extent_buffer is added
3547	 * to the radix tree. It is also reset, if unset, when a new reference
3548	 * is created by find_extent_buffer.
3549	 *
3550	 * It is only cleared in two cases: freeing the last non-tree
3551	 * reference to the extent_buffer when its STALE bit is set or
3552	 * calling release_folio when the tree reference is the only reference.
3553	 *
3554	 * In both cases, care is taken to ensure that the extent_buffer's
3555	 * pages are not under io. However, release_folio can be concurrently
3556	 * called with creating new references, which is prone to race
3557	 * conditions between the calls to check_buffer_tree_ref in those
3558	 * codepaths and clearing TREE_REF in try_release_extent_buffer.
3559	 *
3560	 * The actual lifetime of the extent_buffer in the radix tree is
3561	 * adequately protected by the refcount, but the TREE_REF bit and
3562	 * its corresponding reference are not. To protect against this
3563	 * class of races, we call check_buffer_tree_ref from the codepaths
3564	 * which trigger io. Note that once io is initiated, TREE_REF can no
3565	 * longer be cleared, so that is the moment at which any such race is
3566	 * best fixed.
3567	 */
3568	refs = atomic_read(&eb->refs);
3569	if (refs >= 2 && test_bit(EXTENT_BUFFER_TREE_REF, &eb->bflags))
3570		return;
3571
3572	spin_lock(&eb->refs_lock);
3573	if (!test_and_set_bit(EXTENT_BUFFER_TREE_REF, &eb->bflags))
3574		atomic_inc(&eb->refs);
3575	spin_unlock(&eb->refs_lock);
3576}
3577
3578static void mark_extent_buffer_accessed(struct extent_buffer *eb)
3579{
3580	int num_folios= num_extent_folios(eb);
3581
3582	check_buffer_tree_ref(eb);
3583
3584	for (int i = 0; i < num_folios; i++)
3585		folio_mark_accessed(eb->folios[i]);
3586}
3587
3588struct extent_buffer *find_extent_buffer(struct btrfs_fs_info *fs_info,
3589					 u64 start)
3590{
3591	struct extent_buffer *eb;
3592
3593	eb = find_extent_buffer_nolock(fs_info, start);
3594	if (!eb)
3595		return NULL;
3596	/*
3597	 * Lock our eb's refs_lock to avoid races with free_extent_buffer().
3598	 * When we get our eb it might be flagged with EXTENT_BUFFER_STALE and
3599	 * another task running free_extent_buffer() might have seen that flag
3600	 * set, eb->refs == 2, that the buffer isn't under IO (dirty and
3601	 * writeback flags not set) and it's still in the tree (flag
3602	 * EXTENT_BUFFER_TREE_REF set), therefore being in the process of
3603	 * decrementing the extent buffer's reference count twice.  So here we
3604	 * could race and increment the eb's reference count, clear its stale
3605	 * flag, mark it as dirty and drop our reference before the other task
3606	 * finishes executing free_extent_buffer, which would later result in
3607	 * an attempt to free an extent buffer that is dirty.
3608	 */
3609	if (test_bit(EXTENT_BUFFER_STALE, &eb->bflags)) {
3610		spin_lock(&eb->refs_lock);
3611		spin_unlock(&eb->refs_lock);
3612	}
3613	mark_extent_buffer_accessed(eb);
3614	return eb;
3615}
3616
3617#ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS
3618struct extent_buffer *alloc_test_extent_buffer(struct btrfs_fs_info *fs_info,
3619					u64 start)
3620{
3621	struct extent_buffer *eb, *exists = NULL;
3622	int ret;
3623
3624	eb = find_extent_buffer(fs_info, start);
3625	if (eb)
3626		return eb;
3627	eb = alloc_dummy_extent_buffer(fs_info, start);
3628	if (!eb)
3629		return ERR_PTR(-ENOMEM);
3630	eb->fs_info = fs_info;
3631again:
3632	ret = radix_tree_preload(GFP_NOFS);
3633	if (ret) {
3634		exists = ERR_PTR(ret);
3635		goto free_eb;
3636	}
3637	spin_lock(&fs_info->buffer_lock);
3638	ret = radix_tree_insert(&fs_info->buffer_radix,
3639				start >> fs_info->sectorsize_bits, eb);
3640	spin_unlock(&fs_info->buffer_lock);
3641	radix_tree_preload_end();
3642	if (ret == -EEXIST) {
3643		exists = find_extent_buffer(fs_info, start);
3644		if (exists)
3645			goto free_eb;
3646		else
3647			goto again;
3648	}
3649	check_buffer_tree_ref(eb);
3650	set_bit(EXTENT_BUFFER_IN_TREE, &eb->bflags);
3651
3652	return eb;
3653free_eb:
3654	btrfs_release_extent_buffer(eb);
3655	return exists;
3656}
3657#endif
3658
3659static struct extent_buffer *grab_extent_buffer(
3660		struct btrfs_fs_info *fs_info, struct page *page)
3661{
3662	struct folio *folio = page_folio(page);
3663	struct extent_buffer *exists;
3664
 
 
3665	/*
3666	 * For subpage case, we completely rely on radix tree to ensure we
3667	 * don't try to insert two ebs for the same bytenr.  So here we always
3668	 * return NULL and just continue.
3669	 */
3670	if (fs_info->nodesize < PAGE_SIZE)
3671		return NULL;
3672
3673	/* Page not yet attached to an extent buffer */
3674	if (!folio_test_private(folio))
3675		return NULL;
3676
3677	/*
3678	 * We could have already allocated an eb for this page and attached one
3679	 * so lets see if we can get a ref on the existing eb, and if we can we
3680	 * know it's good and we can just return that one, else we know we can
3681	 * just overwrite folio private.
3682	 */
3683	exists = folio_get_private(folio);
3684	if (atomic_inc_not_zero(&exists->refs))
3685		return exists;
3686
3687	WARN_ON(PageDirty(page));
3688	folio_detach_private(folio);
3689	return NULL;
3690}
3691
3692static int check_eb_alignment(struct btrfs_fs_info *fs_info, u64 start)
3693{
3694	if (!IS_ALIGNED(start, fs_info->sectorsize)) {
3695		btrfs_err(fs_info, "bad tree block start %llu", start);
3696		return -EINVAL;
3697	}
3698
3699	if (fs_info->nodesize < PAGE_SIZE &&
3700	    offset_in_page(start) + fs_info->nodesize > PAGE_SIZE) {
3701		btrfs_err(fs_info,
3702		"tree block crosses page boundary, start %llu nodesize %u",
3703			  start, fs_info->nodesize);
3704		return -EINVAL;
3705	}
3706	if (fs_info->nodesize >= PAGE_SIZE &&
3707	    !PAGE_ALIGNED(start)) {
3708		btrfs_err(fs_info,
3709		"tree block is not page aligned, start %llu nodesize %u",
3710			  start, fs_info->nodesize);
3711		return -EINVAL;
3712	}
3713	if (!IS_ALIGNED(start, fs_info->nodesize) &&
3714	    !test_and_set_bit(BTRFS_FS_UNALIGNED_TREE_BLOCK, &fs_info->flags)) {
3715		btrfs_warn(fs_info,
3716"tree block not nodesize aligned, start %llu nodesize %u, can be resolved by a full metadata balance",
3717			      start, fs_info->nodesize);
3718	}
3719	return 0;
3720}
3721
3722
3723/*
3724 * Return 0 if eb->folios[i] is attached to btree inode successfully.
3725 * Return >0 if there is already another extent buffer for the range,
3726 * and @found_eb_ret would be updated.
3727 * Return -EAGAIN if the filemap has an existing folio but with different size
3728 * than @eb.
3729 * The caller needs to free the existing folios and retry using the same order.
3730 */
3731static int attach_eb_folio_to_filemap(struct extent_buffer *eb, int i,
 
3732				      struct extent_buffer **found_eb_ret)
3733{
3734
3735	struct btrfs_fs_info *fs_info = eb->fs_info;
3736	struct address_space *mapping = fs_info->btree_inode->i_mapping;
3737	const unsigned long index = eb->start >> PAGE_SHIFT;
3738	struct folio *existing_folio;
3739	int ret;
3740
3741	ASSERT(found_eb_ret);
3742
3743	/* Caller should ensure the folio exists. */
3744	ASSERT(eb->folios[i]);
3745
3746retry:
3747	ret = filemap_add_folio(mapping, eb->folios[i], index + i,
3748				GFP_NOFS | __GFP_NOFAIL);
3749	if (!ret)
3750		return 0;
3751
3752	existing_folio = filemap_lock_folio(mapping, index + i);
3753	/* The page cache only exists for a very short time, just retry. */
3754	if (IS_ERR(existing_folio))
 
3755		goto retry;
 
3756
3757	/* For now, we should only have single-page folios for btree inode. */
3758	ASSERT(folio_nr_pages(existing_folio) == 1);
3759
3760	if (folio_size(existing_folio) != eb->folio_size) {
3761		folio_unlock(existing_folio);
3762		folio_put(existing_folio);
3763		return -EAGAIN;
3764	}
3765
3766	if (fs_info->nodesize < PAGE_SIZE) {
3767		/*
3768		 * We're going to reuse the existing page, can drop our page
3769		 * and subpage structure now.
3770		 */
3771		__free_page(folio_page(eb->folios[i], 0));
3772		eb->folios[i] = existing_folio;
3773	} else {
3774		struct extent_buffer *existing_eb;
3775
3776		existing_eb = grab_extent_buffer(fs_info,
3777						 folio_page(existing_folio, 0));
3778		if (existing_eb) {
3779			/* The extent buffer still exists, we can use it directly. */
3780			*found_eb_ret = existing_eb;
 
3781			folio_unlock(existing_folio);
3782			folio_put(existing_folio);
3783			return 1;
3784		}
3785		/* The extent buffer no longer exists, we can reuse the folio. */
3786		__free_page(folio_page(eb->folios[i], 0));
3787		eb->folios[i] = existing_folio;
3788	}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3789	return 0;
3790}
3791
3792struct extent_buffer *alloc_extent_buffer(struct btrfs_fs_info *fs_info,
3793					  u64 start, u64 owner_root, int level)
3794{
3795	unsigned long len = fs_info->nodesize;
3796	int num_folios;
3797	int attached = 0;
3798	struct extent_buffer *eb;
3799	struct extent_buffer *existing_eb = NULL;
3800	struct address_space *mapping = fs_info->btree_inode->i_mapping;
3801	struct btrfs_subpage *prealloc = NULL;
3802	u64 lockdep_owner = owner_root;
3803	bool page_contig = true;
3804	int uptodate = 1;
3805	int ret;
3806
3807	if (check_eb_alignment(fs_info, start))
3808		return ERR_PTR(-EINVAL);
3809
3810#if BITS_PER_LONG == 32
3811	if (start >= MAX_LFS_FILESIZE) {
3812		btrfs_err_rl(fs_info,
3813		"extent buffer %llu is beyond 32bit page cache limit", start);
3814		btrfs_err_32bit_limit(fs_info);
3815		return ERR_PTR(-EOVERFLOW);
3816	}
3817	if (start >= BTRFS_32BIT_EARLY_WARN_THRESHOLD)
3818		btrfs_warn_32bit_limit(fs_info);
3819#endif
3820
3821	eb = find_extent_buffer(fs_info, start);
3822	if (eb)
3823		return eb;
3824
3825	eb = __alloc_extent_buffer(fs_info, start, len);
3826	if (!eb)
3827		return ERR_PTR(-ENOMEM);
3828
3829	/*
3830	 * The reloc trees are just snapshots, so we need them to appear to be
3831	 * just like any other fs tree WRT lockdep.
3832	 */
3833	if (lockdep_owner == BTRFS_TREE_RELOC_OBJECTID)
3834		lockdep_owner = BTRFS_FS_TREE_OBJECTID;
3835
3836	btrfs_set_buffer_lockdep_class(lockdep_owner, eb, level);
3837
3838	/*
3839	 * Preallocate folio private for subpage case, so that we won't
3840	 * allocate memory with i_private_lock nor page lock hold.
3841	 *
3842	 * The memory will be freed by attach_extent_buffer_page() or freed
3843	 * manually if we exit earlier.
3844	 */
3845	if (fs_info->nodesize < PAGE_SIZE) {
3846		prealloc = btrfs_alloc_subpage(fs_info, BTRFS_SUBPAGE_METADATA);
3847		if (IS_ERR(prealloc)) {
3848			ret = PTR_ERR(prealloc);
3849			goto out;
3850		}
3851	}
3852
3853reallocate:
3854	/* Allocate all pages first. */
3855	ret = alloc_eb_folio_array(eb, __GFP_NOFAIL);
3856	if (ret < 0) {
3857		btrfs_free_subpage(prealloc);
3858		goto out;
3859	}
3860
3861	num_folios = num_extent_folios(eb);
3862	/* Attach all pages to the filemap. */
3863	for (int i = 0; i < num_folios; i++) {
3864		struct folio *folio;
3865
3866		ret = attach_eb_folio_to_filemap(eb, i, &existing_eb);
3867		if (ret > 0) {
3868			ASSERT(existing_eb);
3869			goto out;
3870		}
3871
3872		/*
3873		 * TODO: Special handling for a corner case where the order of
3874		 * folios mismatch between the new eb and filemap.
3875		 *
3876		 * This happens when:
3877		 *
3878		 * - the new eb is using higher order folio
3879		 *
3880		 * - the filemap is still using 0-order folios for the range
3881		 *   This can happen at the previous eb allocation, and we don't
3882		 *   have higher order folio for the call.
3883		 *
3884		 * - the existing eb has already been freed
3885		 *
3886		 * In this case, we have to free the existing folios first, and
3887		 * re-allocate using the same order.
3888		 * Thankfully this is not going to happen yet, as we're still
3889		 * using 0-order folios.
3890		 */
3891		if (unlikely(ret == -EAGAIN)) {
3892			ASSERT(0);
3893			goto reallocate;
3894		}
3895		attached++;
3896
3897		/*
3898		 * Only after attach_eb_folio_to_filemap(), eb->folios[] is
3899		 * reliable, as we may choose to reuse the existing page cache
3900		 * and free the allocated page.
3901		 */
3902		folio = eb->folios[i];
3903		eb->folio_size = folio_size(folio);
3904		eb->folio_shift = folio_shift(folio);
3905		spin_lock(&mapping->i_private_lock);
3906		/* Should not fail, as we have preallocated the memory */
3907		ret = attach_extent_buffer_folio(eb, folio, prealloc);
3908		ASSERT(!ret);
3909		/*
3910		 * To inform we have extra eb under allocation, so that
3911		 * detach_extent_buffer_page() won't release the folio private
3912		 * when the eb hasn't yet been inserted into radix tree.
3913		 *
3914		 * The ref will be decreased when the eb released the page, in
3915		 * detach_extent_buffer_page().
3916		 * Thus needs no special handling in error path.
3917		 */
3918		btrfs_folio_inc_eb_refs(fs_info, folio);
3919		spin_unlock(&mapping->i_private_lock);
3920
3921		WARN_ON(btrfs_folio_test_dirty(fs_info, folio, eb->start, eb->len));
3922
3923		/*
3924		 * Check if the current page is physically contiguous with previous eb
3925		 * page.
3926		 * At this stage, either we allocated a large folio, thus @i
3927		 * would only be 0, or we fall back to per-page allocation.
3928		 */
3929		if (i && folio_page(eb->folios[i - 1], 0) + 1 != folio_page(folio, 0))
3930			page_contig = false;
3931
3932		if (!btrfs_folio_test_uptodate(fs_info, folio, eb->start, eb->len))
3933			uptodate = 0;
3934
3935		/*
3936		 * We can't unlock the pages just yet since the extent buffer
3937		 * hasn't been properly inserted in the radix tree, this
3938		 * opens a race with btree_release_folio which can free a page
3939		 * while we are still filling in all pages for the buffer and
3940		 * we could crash.
3941		 */
3942	}
3943	if (uptodate)
3944		set_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags);
3945	/* All pages are physically contiguous, can skip cross page handling. */
3946	if (page_contig)
3947		eb->addr = folio_address(eb->folios[0]) + offset_in_page(eb->start);
3948again:
3949	ret = radix_tree_preload(GFP_NOFS);
3950	if (ret)
3951		goto out;
3952
3953	spin_lock(&fs_info->buffer_lock);
3954	ret = radix_tree_insert(&fs_info->buffer_radix,
3955				start >> fs_info->sectorsize_bits, eb);
3956	spin_unlock(&fs_info->buffer_lock);
3957	radix_tree_preload_end();
3958	if (ret == -EEXIST) {
3959		ret = 0;
3960		existing_eb = find_extent_buffer(fs_info, start);
3961		if (existing_eb)
3962			goto out;
3963		else
3964			goto again;
3965	}
3966	/* add one reference for the tree */
3967	check_buffer_tree_ref(eb);
3968	set_bit(EXTENT_BUFFER_IN_TREE, &eb->bflags);
3969
3970	/*
3971	 * Now it's safe to unlock the pages because any calls to
3972	 * btree_release_folio will correctly detect that a page belongs to a
3973	 * live buffer and won't free them prematurely.
3974	 */
3975	for (int i = 0; i < num_folios; i++)
3976		unlock_page(folio_page(eb->folios[i], 0));
3977	return eb;
3978
3979out:
3980	WARN_ON(!atomic_dec_and_test(&eb->refs));
3981
3982	/*
3983	 * Any attached folios need to be detached before we unlock them.  This
3984	 * is because when we're inserting our new folios into the mapping, and
3985	 * then attaching our eb to that folio.  If we fail to insert our folio
3986	 * we'll lookup the folio for that index, and grab that EB.  We do not
3987	 * want that to grab this eb, as we're getting ready to free it.  So we
3988	 * have to detach it first and then unlock it.
3989	 *
3990	 * We have to drop our reference and NULL it out here because in the
3991	 * subpage case detaching does a btrfs_folio_dec_eb_refs() for our eb.
3992	 * Below when we call btrfs_release_extent_buffer() we will call
3993	 * detach_extent_buffer_folio() on our remaining pages in the !subpage
3994	 * case.  If we left eb->folios[i] populated in the subpage case we'd
3995	 * double put our reference and be super sad.
3996	 */
3997	for (int i = 0; i < attached; i++) {
3998		ASSERT(eb->folios[i]);
3999		detach_extent_buffer_folio(eb, eb->folios[i]);
4000		unlock_page(folio_page(eb->folios[i], 0));
4001		folio_put(eb->folios[i]);
4002		eb->folios[i] = NULL;
4003	}
4004	/*
4005	 * Now all pages of that extent buffer is unmapped, set UNMAPPED flag,
4006	 * so it can be cleaned up without utlizing page->mapping.
4007	 */
4008	set_bit(EXTENT_BUFFER_UNMAPPED, &eb->bflags);
4009
4010	btrfs_release_extent_buffer(eb);
4011	if (ret < 0)
4012		return ERR_PTR(ret);
4013	ASSERT(existing_eb);
4014	return existing_eb;
4015}
4016
4017static inline void btrfs_release_extent_buffer_rcu(struct rcu_head *head)
4018{
4019	struct extent_buffer *eb =
4020			container_of(head, struct extent_buffer, rcu_head);
4021
4022	__free_extent_buffer(eb);
4023}
4024
4025static int release_extent_buffer(struct extent_buffer *eb)
4026	__releases(&eb->refs_lock)
4027{
4028	lockdep_assert_held(&eb->refs_lock);
4029
4030	WARN_ON(atomic_read(&eb->refs) == 0);
4031	if (atomic_dec_and_test(&eb->refs)) {
4032		if (test_and_clear_bit(EXTENT_BUFFER_IN_TREE, &eb->bflags)) {
4033			struct btrfs_fs_info *fs_info = eb->fs_info;
4034
4035			spin_unlock(&eb->refs_lock);
4036
4037			spin_lock(&fs_info->buffer_lock);
4038			radix_tree_delete(&fs_info->buffer_radix,
4039					  eb->start >> fs_info->sectorsize_bits);
4040			spin_unlock(&fs_info->buffer_lock);
4041		} else {
4042			spin_unlock(&eb->refs_lock);
4043		}
4044
4045		btrfs_leak_debug_del_eb(eb);
4046		/* Should be safe to release our pages at this point */
4047		btrfs_release_extent_buffer_pages(eb);
4048#ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS
4049		if (unlikely(test_bit(EXTENT_BUFFER_UNMAPPED, &eb->bflags))) {
4050			__free_extent_buffer(eb);
4051			return 1;
4052		}
4053#endif
4054		call_rcu(&eb->rcu_head, btrfs_release_extent_buffer_rcu);
4055		return 1;
4056	}
4057	spin_unlock(&eb->refs_lock);
4058
4059	return 0;
4060}
4061
4062void free_extent_buffer(struct extent_buffer *eb)
4063{
4064	int refs;
4065	if (!eb)
4066		return;
4067
4068	refs = atomic_read(&eb->refs);
4069	while (1) {
4070		if ((!test_bit(EXTENT_BUFFER_UNMAPPED, &eb->bflags) && refs <= 3)
4071		    || (test_bit(EXTENT_BUFFER_UNMAPPED, &eb->bflags) &&
4072			refs == 1))
4073			break;
4074		if (atomic_try_cmpxchg(&eb->refs, &refs, refs - 1))
4075			return;
4076	}
4077
4078	spin_lock(&eb->refs_lock);
4079	if (atomic_read(&eb->refs) == 2 &&
4080	    test_bit(EXTENT_BUFFER_STALE, &eb->bflags) &&
4081	    !extent_buffer_under_io(eb) &&
4082	    test_and_clear_bit(EXTENT_BUFFER_TREE_REF, &eb->bflags))
4083		atomic_dec(&eb->refs);
4084
4085	/*
4086	 * I know this is terrible, but it's temporary until we stop tracking
4087	 * the uptodate bits and such for the extent buffers.
4088	 */
4089	release_extent_buffer(eb);
4090}
4091
4092void free_extent_buffer_stale(struct extent_buffer *eb)
4093{
4094	if (!eb)
4095		return;
4096
4097	spin_lock(&eb->refs_lock);
4098	set_bit(EXTENT_BUFFER_STALE, &eb->bflags);
4099
4100	if (atomic_read(&eb->refs) == 2 && !extent_buffer_under_io(eb) &&
4101	    test_and_clear_bit(EXTENT_BUFFER_TREE_REF, &eb->bflags))
4102		atomic_dec(&eb->refs);
4103	release_extent_buffer(eb);
4104}
4105
4106static void btree_clear_folio_dirty(struct folio *folio)
4107{
4108	ASSERT(folio_test_dirty(folio));
4109	ASSERT(folio_test_locked(folio));
4110	folio_clear_dirty_for_io(folio);
4111	xa_lock_irq(&folio->mapping->i_pages);
4112	if (!folio_test_dirty(folio))
4113		__xa_clear_mark(&folio->mapping->i_pages,
4114				folio_index(folio), PAGECACHE_TAG_DIRTY);
4115	xa_unlock_irq(&folio->mapping->i_pages);
4116}
4117
4118static void clear_subpage_extent_buffer_dirty(const struct extent_buffer *eb)
4119{
4120	struct btrfs_fs_info *fs_info = eb->fs_info;
4121	struct folio *folio = eb->folios[0];
4122	bool last;
4123
4124	/* btree_clear_folio_dirty() needs page locked. */
4125	folio_lock(folio);
4126	last = btrfs_subpage_clear_and_test_dirty(fs_info, folio, eb->start, eb->len);
4127	if (last)
4128		btree_clear_folio_dirty(folio);
4129	folio_unlock(folio);
4130	WARN_ON(atomic_read(&eb->refs) == 0);
4131}
4132
4133void btrfs_clear_buffer_dirty(struct btrfs_trans_handle *trans,
4134			      struct extent_buffer *eb)
4135{
4136	struct btrfs_fs_info *fs_info = eb->fs_info;
4137	int num_folios;
4138
4139	btrfs_assert_tree_write_locked(eb);
4140
4141	if (trans && btrfs_header_generation(eb) != trans->transid)
4142		return;
4143
4144	/*
4145	 * Instead of clearing the dirty flag off of the buffer, mark it as
4146	 * EXTENT_BUFFER_ZONED_ZEROOUT. This allows us to preserve
4147	 * write-ordering in zoned mode, without the need to later re-dirty
4148	 * the extent_buffer.
4149	 *
4150	 * The actual zeroout of the buffer will happen later in
4151	 * btree_csum_one_bio.
4152	 */
4153	if (btrfs_is_zoned(fs_info) && test_bit(EXTENT_BUFFER_DIRTY, &eb->bflags)) {
4154		set_bit(EXTENT_BUFFER_ZONED_ZEROOUT, &eb->bflags);
4155		return;
4156	}
4157
4158	if (!test_and_clear_bit(EXTENT_BUFFER_DIRTY, &eb->bflags))
4159		return;
4160
4161	percpu_counter_add_batch(&fs_info->dirty_metadata_bytes, -eb->len,
4162				 fs_info->dirty_metadata_batch);
4163
4164	if (eb->fs_info->nodesize < PAGE_SIZE)
4165		return clear_subpage_extent_buffer_dirty(eb);
4166
4167	num_folios = num_extent_folios(eb);
4168	for (int i = 0; i < num_folios; i++) {
4169		struct folio *folio = eb->folios[i];
4170
4171		if (!folio_test_dirty(folio))
4172			continue;
4173		folio_lock(folio);
4174		btree_clear_folio_dirty(folio);
4175		folio_unlock(folio);
4176	}
4177	WARN_ON(atomic_read(&eb->refs) == 0);
4178}
4179
4180void set_extent_buffer_dirty(struct extent_buffer *eb)
4181{
4182	int num_folios;
4183	bool was_dirty;
4184
4185	check_buffer_tree_ref(eb);
4186
4187	was_dirty = test_and_set_bit(EXTENT_BUFFER_DIRTY, &eb->bflags);
4188
4189	num_folios = num_extent_folios(eb);
4190	WARN_ON(atomic_read(&eb->refs) == 0);
4191	WARN_ON(!test_bit(EXTENT_BUFFER_TREE_REF, &eb->bflags));
4192	WARN_ON(test_bit(EXTENT_BUFFER_ZONED_ZEROOUT, &eb->bflags));
4193
4194	if (!was_dirty) {
4195		bool subpage = eb->fs_info->nodesize < PAGE_SIZE;
4196
4197		/*
4198		 * For subpage case, we can have other extent buffers in the
4199		 * same page, and in clear_subpage_extent_buffer_dirty() we
4200		 * have to clear page dirty without subpage lock held.
4201		 * This can cause race where our page gets dirty cleared after
4202		 * we just set it.
4203		 *
4204		 * Thankfully, clear_subpage_extent_buffer_dirty() has locked
4205		 * its page for other reasons, we can use page lock to prevent
4206		 * the above race.
4207		 */
4208		if (subpage)
4209			lock_page(folio_page(eb->folios[0], 0));
4210		for (int i = 0; i < num_folios; i++)
4211			btrfs_folio_set_dirty(eb->fs_info, eb->folios[i],
4212					      eb->start, eb->len);
4213		if (subpage)
4214			unlock_page(folio_page(eb->folios[0], 0));
4215		percpu_counter_add_batch(&eb->fs_info->dirty_metadata_bytes,
4216					 eb->len,
4217					 eb->fs_info->dirty_metadata_batch);
4218	}
4219#ifdef CONFIG_BTRFS_DEBUG
4220	for (int i = 0; i < num_folios; i++)
4221		ASSERT(folio_test_dirty(eb->folios[i]));
4222#endif
4223}
4224
4225void clear_extent_buffer_uptodate(struct extent_buffer *eb)
4226{
4227	struct btrfs_fs_info *fs_info = eb->fs_info;
4228	int num_folios = num_extent_folios(eb);
4229
4230	clear_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags);
4231	for (int i = 0; i < num_folios; i++) {
4232		struct folio *folio = eb->folios[i];
4233
4234		if (!folio)
4235			continue;
4236
4237		/*
4238		 * This is special handling for metadata subpage, as regular
4239		 * btrfs_is_subpage() can not handle cloned/dummy metadata.
4240		 */
4241		if (fs_info->nodesize >= PAGE_SIZE)
4242			folio_clear_uptodate(folio);
4243		else
4244			btrfs_subpage_clear_uptodate(fs_info, folio,
4245						     eb->start, eb->len);
4246	}
4247}
4248
4249void set_extent_buffer_uptodate(struct extent_buffer *eb)
4250{
4251	struct btrfs_fs_info *fs_info = eb->fs_info;
4252	int num_folios = num_extent_folios(eb);
4253
4254	set_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags);
4255	for (int i = 0; i < num_folios; i++) {
4256		struct folio *folio = eb->folios[i];
4257
4258		/*
4259		 * This is special handling for metadata subpage, as regular
4260		 * btrfs_is_subpage() can not handle cloned/dummy metadata.
4261		 */
4262		if (fs_info->nodesize >= PAGE_SIZE)
4263			folio_mark_uptodate(folio);
4264		else
4265			btrfs_subpage_set_uptodate(fs_info, folio,
4266						   eb->start, eb->len);
4267	}
4268}
4269
 
 
 
 
 
 
 
4270static void end_bbio_meta_read(struct btrfs_bio *bbio)
4271{
4272	struct extent_buffer *eb = bbio->private;
4273	struct btrfs_fs_info *fs_info = eb->fs_info;
4274	bool uptodate = !bbio->bio.bi_status;
4275	struct folio_iter fi;
4276	u32 bio_offset = 0;
4277
 
 
 
 
 
 
 
4278	eb->read_mirror = bbio->mirror_num;
4279
4280	if (uptodate &&
4281	    btrfs_validate_extent_buffer(eb, &bbio->parent_check) < 0)
4282		uptodate = false;
4283
4284	if (uptodate) {
4285		set_extent_buffer_uptodate(eb);
4286	} else {
4287		clear_extent_buffer_uptodate(eb);
4288		set_bit(EXTENT_BUFFER_READ_ERR, &eb->bflags);
4289	}
4290
4291	bio_for_each_folio_all(fi, &bbio->bio) {
4292		struct folio *folio = fi.folio;
4293		u64 start = eb->start + bio_offset;
4294		u32 len = fi.length;
4295
4296		if (uptodate)
4297			btrfs_folio_set_uptodate(fs_info, folio, start, len);
4298		else
4299			btrfs_folio_clear_uptodate(fs_info, folio, start, len);
4300
4301		bio_offset += len;
4302	}
4303
4304	clear_bit(EXTENT_BUFFER_READING, &eb->bflags);
4305	smp_mb__after_atomic();
4306	wake_up_bit(&eb->bflags, EXTENT_BUFFER_READING);
4307	free_extent_buffer(eb);
4308
4309	bio_put(&bbio->bio);
4310}
4311
4312int read_extent_buffer_pages(struct extent_buffer *eb, int wait, int mirror_num,
4313			     struct btrfs_tree_parent_check *check)
4314{
4315	struct btrfs_bio *bbio;
4316	bool ret;
4317
4318	if (test_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags))
4319		return 0;
4320
4321	/*
4322	 * We could have had EXTENT_BUFFER_UPTODATE cleared by the write
4323	 * operation, which could potentially still be in flight.  In this case
4324	 * we simply want to return an error.
4325	 */
4326	if (unlikely(test_bit(EXTENT_BUFFER_WRITE_ERR, &eb->bflags)))
4327		return -EIO;
4328
4329	/* Someone else is already reading the buffer, just wait for it. */
4330	if (test_and_set_bit(EXTENT_BUFFER_READING, &eb->bflags))
4331		goto done;
4332
4333	/*
4334	 * Between the initial test_bit(EXTENT_BUFFER_UPTODATE) and the above
4335	 * test_and_set_bit(EXTENT_BUFFER_READING), someone else could have
4336	 * started and finished reading the same eb.  In this case, UPTODATE
4337	 * will now be set, and we shouldn't read it in again.
4338	 */
4339	if (unlikely(test_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags))) {
4340		clear_bit(EXTENT_BUFFER_READING, &eb->bflags);
4341		smp_mb__after_atomic();
4342		wake_up_bit(&eb->bflags, EXTENT_BUFFER_READING);
4343		return 0;
4344	}
4345
4346	clear_bit(EXTENT_BUFFER_READ_ERR, &eb->bflags);
4347	eb->read_mirror = 0;
4348	check_buffer_tree_ref(eb);
4349	atomic_inc(&eb->refs);
4350
4351	bbio = btrfs_bio_alloc(INLINE_EXTENT_BUFFER_PAGES,
4352			       REQ_OP_READ | REQ_META, eb->fs_info,
4353			       end_bbio_meta_read, eb);
4354	bbio->bio.bi_iter.bi_sector = eb->start >> SECTOR_SHIFT;
4355	bbio->inode = BTRFS_I(eb->fs_info->btree_inode);
4356	bbio->file_offset = eb->start;
4357	memcpy(&bbio->parent_check, check, sizeof(*check));
4358	if (eb->fs_info->nodesize < PAGE_SIZE) {
4359		ret = bio_add_folio(&bbio->bio, eb->folios[0], eb->len,
4360				    eb->start - folio_pos(eb->folios[0]));
4361		ASSERT(ret);
4362	} else {
4363		int num_folios = num_extent_folios(eb);
4364
4365		for (int i = 0; i < num_folios; i++) {
4366			struct folio *folio = eb->folios[i];
4367
4368			ret = bio_add_folio(&bbio->bio, folio, eb->folio_size, 0);
4369			ASSERT(ret);
4370		}
4371	}
4372	btrfs_submit_bio(bbio, mirror_num);
4373
4374done:
4375	if (wait == WAIT_COMPLETE) {
4376		wait_on_bit_io(&eb->bflags, EXTENT_BUFFER_READING, TASK_UNINTERRUPTIBLE);
4377		if (!test_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags))
4378			return -EIO;
4379	}
4380
4381	return 0;
4382}
4383
4384static bool report_eb_range(const struct extent_buffer *eb, unsigned long start,
4385			    unsigned long len)
4386{
4387	btrfs_warn(eb->fs_info,
4388		"access to eb bytenr %llu len %u out of range start %lu len %lu",
4389		eb->start, eb->len, start, len);
4390	WARN_ON(IS_ENABLED(CONFIG_BTRFS_DEBUG));
4391
4392	return true;
4393}
4394
4395/*
4396 * Check if the [start, start + len) range is valid before reading/writing
4397 * the eb.
4398 * NOTE: @start and @len are offset inside the eb, not logical address.
4399 *
4400 * Caller should not touch the dst/src memory if this function returns error.
4401 */
4402static inline int check_eb_range(const struct extent_buffer *eb,
4403				 unsigned long start, unsigned long len)
4404{
4405	unsigned long offset;
4406
4407	/* start, start + len should not go beyond eb->len nor overflow */
4408	if (unlikely(check_add_overflow(start, len, &offset) || offset > eb->len))
4409		return report_eb_range(eb, start, len);
4410
4411	return false;
4412}
4413
4414void read_extent_buffer(const struct extent_buffer *eb, void *dstv,
4415			unsigned long start, unsigned long len)
4416{
4417	const int unit_size = eb->folio_size;
4418	size_t cur;
4419	size_t offset;
4420	char *dst = (char *)dstv;
4421	unsigned long i = get_eb_folio_index(eb, start);
4422
4423	if (check_eb_range(eb, start, len)) {
4424		/*
4425		 * Invalid range hit, reset the memory, so callers won't get
4426		 * some random garbage for their uninitialized memory.
4427		 */
4428		memset(dstv, 0, len);
4429		return;
4430	}
4431
4432	if (eb->addr) {
4433		memcpy(dstv, eb->addr + start, len);
4434		return;
4435	}
4436
4437	offset = get_eb_offset_in_folio(eb, start);
4438
4439	while (len > 0) {
4440		char *kaddr;
4441
4442		cur = min(len, unit_size - offset);
4443		kaddr = folio_address(eb->folios[i]);
4444		memcpy(dst, kaddr + offset, cur);
4445
4446		dst += cur;
4447		len -= cur;
4448		offset = 0;
4449		i++;
4450	}
4451}
4452
4453int read_extent_buffer_to_user_nofault(const struct extent_buffer *eb,
4454				       void __user *dstv,
4455				       unsigned long start, unsigned long len)
4456{
4457	const int unit_size = eb->folio_size;
4458	size_t cur;
4459	size_t offset;
4460	char __user *dst = (char __user *)dstv;
4461	unsigned long i = get_eb_folio_index(eb, start);
4462	int ret = 0;
4463
4464	WARN_ON(start > eb->len);
4465	WARN_ON(start + len > eb->start + eb->len);
4466
4467	if (eb->addr) {
4468		if (copy_to_user_nofault(dstv, eb->addr + start, len))
4469			ret = -EFAULT;
4470		return ret;
4471	}
4472
4473	offset = get_eb_offset_in_folio(eb, start);
4474
4475	while (len > 0) {
4476		char *kaddr;
4477
4478		cur = min(len, unit_size - offset);
4479		kaddr = folio_address(eb->folios[i]);
4480		if (copy_to_user_nofault(dst, kaddr + offset, cur)) {
4481			ret = -EFAULT;
4482			break;
4483		}
4484
4485		dst += cur;
4486		len -= cur;
4487		offset = 0;
4488		i++;
4489	}
4490
4491	return ret;
4492}
4493
4494int memcmp_extent_buffer(const struct extent_buffer *eb, const void *ptrv,
4495			 unsigned long start, unsigned long len)
4496{
4497	const int unit_size = eb->folio_size;
4498	size_t cur;
4499	size_t offset;
4500	char *kaddr;
4501	char *ptr = (char *)ptrv;
4502	unsigned long i = get_eb_folio_index(eb, start);
4503	int ret = 0;
4504
4505	if (check_eb_range(eb, start, len))
4506		return -EINVAL;
4507
4508	if (eb->addr)
4509		return memcmp(ptrv, eb->addr + start, len);
4510
4511	offset = get_eb_offset_in_folio(eb, start);
4512
4513	while (len > 0) {
4514		cur = min(len, unit_size - offset);
4515		kaddr = folio_address(eb->folios[i]);
4516		ret = memcmp(ptr, kaddr + offset, cur);
4517		if (ret)
4518			break;
4519
4520		ptr += cur;
4521		len -= cur;
4522		offset = 0;
4523		i++;
4524	}
4525	return ret;
4526}
4527
4528/*
4529 * Check that the extent buffer is uptodate.
4530 *
4531 * For regular sector size == PAGE_SIZE case, check if @page is uptodate.
4532 * For subpage case, check if the range covered by the eb has EXTENT_UPTODATE.
4533 */
4534static void assert_eb_folio_uptodate(const struct extent_buffer *eb, int i)
4535{
4536	struct btrfs_fs_info *fs_info = eb->fs_info;
4537	struct folio *folio = eb->folios[i];
4538
4539	ASSERT(folio);
4540
4541	/*
4542	 * If we are using the commit root we could potentially clear a page
4543	 * Uptodate while we're using the extent buffer that we've previously
4544	 * looked up.  We don't want to complain in this case, as the page was
4545	 * valid before, we just didn't write it out.  Instead we want to catch
4546	 * the case where we didn't actually read the block properly, which
4547	 * would have !PageUptodate and !EXTENT_BUFFER_WRITE_ERR.
4548	 */
4549	if (test_bit(EXTENT_BUFFER_WRITE_ERR, &eb->bflags))
4550		return;
4551
4552	if (fs_info->nodesize < PAGE_SIZE) {
4553		struct folio *folio = eb->folios[0];
4554
4555		ASSERT(i == 0);
4556		if (WARN_ON(!btrfs_subpage_test_uptodate(fs_info, folio,
4557							 eb->start, eb->len)))
4558			btrfs_subpage_dump_bitmap(fs_info, folio, eb->start, eb->len);
4559	} else {
4560		WARN_ON(!folio_test_uptodate(folio));
4561	}
4562}
4563
4564static void __write_extent_buffer(const struct extent_buffer *eb,
4565				  const void *srcv, unsigned long start,
4566				  unsigned long len, bool use_memmove)
4567{
4568	const int unit_size = eb->folio_size;
4569	size_t cur;
4570	size_t offset;
4571	char *kaddr;
4572	char *src = (char *)srcv;
4573	unsigned long i = get_eb_folio_index(eb, start);
4574	/* For unmapped (dummy) ebs, no need to check their uptodate status. */
4575	const bool check_uptodate = !test_bit(EXTENT_BUFFER_UNMAPPED, &eb->bflags);
4576
4577	if (check_eb_range(eb, start, len))
4578		return;
4579
4580	if (eb->addr) {
4581		if (use_memmove)
4582			memmove(eb->addr + start, srcv, len);
4583		else
4584			memcpy(eb->addr + start, srcv, len);
4585		return;
4586	}
4587
4588	offset = get_eb_offset_in_folio(eb, start);
4589
4590	while (len > 0) {
4591		if (check_uptodate)
4592			assert_eb_folio_uptodate(eb, i);
4593
4594		cur = min(len, unit_size - offset);
4595		kaddr = folio_address(eb->folios[i]);
4596		if (use_memmove)
4597			memmove(kaddr + offset, src, cur);
4598		else
4599			memcpy(kaddr + offset, src, cur);
4600
4601		src += cur;
4602		len -= cur;
4603		offset = 0;
4604		i++;
4605	}
4606}
4607
4608void write_extent_buffer(const struct extent_buffer *eb, const void *srcv,
4609			 unsigned long start, unsigned long len)
4610{
4611	return __write_extent_buffer(eb, srcv, start, len, false);
4612}
4613
4614static void memset_extent_buffer(const struct extent_buffer *eb, int c,
4615				 unsigned long start, unsigned long len)
4616{
4617	const int unit_size = eb->folio_size;
4618	unsigned long cur = start;
4619
4620	if (eb->addr) {
4621		memset(eb->addr + start, c, len);
4622		return;
4623	}
4624
4625	while (cur < start + len) {
4626		unsigned long index = get_eb_folio_index(eb, cur);
4627		unsigned int offset = get_eb_offset_in_folio(eb, cur);
4628		unsigned int cur_len = min(start + len - cur, unit_size - offset);
4629
4630		assert_eb_folio_uptodate(eb, index);
4631		memset(folio_address(eb->folios[index]) + offset, c, cur_len);
4632
4633		cur += cur_len;
4634	}
4635}
4636
4637void memzero_extent_buffer(const struct extent_buffer *eb, unsigned long start,
4638			   unsigned long len)
4639{
4640	if (check_eb_range(eb, start, len))
4641		return;
4642	return memset_extent_buffer(eb, 0, start, len);
4643}
4644
4645void copy_extent_buffer_full(const struct extent_buffer *dst,
4646			     const struct extent_buffer *src)
4647{
4648	const int unit_size = src->folio_size;
4649	unsigned long cur = 0;
4650
4651	ASSERT(dst->len == src->len);
4652
4653	while (cur < src->len) {
4654		unsigned long index = get_eb_folio_index(src, cur);
4655		unsigned long offset = get_eb_offset_in_folio(src, cur);
4656		unsigned long cur_len = min(src->len, unit_size - offset);
4657		void *addr = folio_address(src->folios[index]) + offset;
4658
4659		write_extent_buffer(dst, addr, cur, cur_len);
4660
4661		cur += cur_len;
4662	}
4663}
4664
4665void copy_extent_buffer(const struct extent_buffer *dst,
4666			const struct extent_buffer *src,
4667			unsigned long dst_offset, unsigned long src_offset,
4668			unsigned long len)
4669{
4670	const int unit_size = dst->folio_size;
4671	u64 dst_len = dst->len;
4672	size_t cur;
4673	size_t offset;
4674	char *kaddr;
4675	unsigned long i = get_eb_folio_index(dst, dst_offset);
4676
4677	if (check_eb_range(dst, dst_offset, len) ||
4678	    check_eb_range(src, src_offset, len))
4679		return;
4680
4681	WARN_ON(src->len != dst_len);
4682
4683	offset = get_eb_offset_in_folio(dst, dst_offset);
4684
4685	while (len > 0) {
4686		assert_eb_folio_uptodate(dst, i);
4687
4688		cur = min(len, (unsigned long)(unit_size - offset));
4689
4690		kaddr = folio_address(dst->folios[i]);
4691		read_extent_buffer(src, kaddr + offset, src_offset, cur);
4692
4693		src_offset += cur;
4694		len -= cur;
4695		offset = 0;
4696		i++;
4697	}
4698}
4699
4700/*
4701 * Calculate the folio and offset of the byte containing the given bit number.
4702 *
4703 * @eb:           the extent buffer
4704 * @start:        offset of the bitmap item in the extent buffer
4705 * @nr:           bit number
4706 * @folio_index:  return index of the folio in the extent buffer that contains
4707 *                the given bit number
4708 * @folio_offset: return offset into the folio given by folio_index
4709 *
4710 * This helper hides the ugliness of finding the byte in an extent buffer which
4711 * contains a given bit.
4712 */
4713static inline void eb_bitmap_offset(const struct extent_buffer *eb,
4714				    unsigned long start, unsigned long nr,
4715				    unsigned long *folio_index,
4716				    size_t *folio_offset)
4717{
4718	size_t byte_offset = BIT_BYTE(nr);
4719	size_t offset;
4720
4721	/*
4722	 * The byte we want is the offset of the extent buffer + the offset of
4723	 * the bitmap item in the extent buffer + the offset of the byte in the
4724	 * bitmap item.
4725	 */
4726	offset = start + offset_in_eb_folio(eb, eb->start) + byte_offset;
4727
4728	*folio_index = offset >> eb->folio_shift;
4729	*folio_offset = offset_in_eb_folio(eb, offset);
4730}
4731
4732/*
4733 * Determine whether a bit in a bitmap item is set.
4734 *
4735 * @eb:     the extent buffer
4736 * @start:  offset of the bitmap item in the extent buffer
4737 * @nr:     bit number to test
4738 */
4739int extent_buffer_test_bit(const struct extent_buffer *eb, unsigned long start,
4740			   unsigned long nr)
4741{
4742	unsigned long i;
4743	size_t offset;
4744	u8 *kaddr;
4745
4746	eb_bitmap_offset(eb, start, nr, &i, &offset);
4747	assert_eb_folio_uptodate(eb, i);
4748	kaddr = folio_address(eb->folios[i]);
4749	return 1U & (kaddr[offset] >> (nr & (BITS_PER_BYTE - 1)));
4750}
4751
4752static u8 *extent_buffer_get_byte(const struct extent_buffer *eb, unsigned long bytenr)
4753{
4754	unsigned long index = get_eb_folio_index(eb, bytenr);
4755
4756	if (check_eb_range(eb, bytenr, 1))
4757		return NULL;
4758	return folio_address(eb->folios[index]) + get_eb_offset_in_folio(eb, bytenr);
4759}
4760
4761/*
4762 * Set an area of a bitmap to 1.
4763 *
4764 * @eb:     the extent buffer
4765 * @start:  offset of the bitmap item in the extent buffer
4766 * @pos:    bit number of the first bit
4767 * @len:    number of bits to set
4768 */
4769void extent_buffer_bitmap_set(const struct extent_buffer *eb, unsigned long start,
4770			      unsigned long pos, unsigned long len)
4771{
4772	unsigned int first_byte = start + BIT_BYTE(pos);
4773	unsigned int last_byte = start + BIT_BYTE(pos + len - 1);
4774	const bool same_byte = (first_byte == last_byte);
4775	u8 mask = BITMAP_FIRST_BYTE_MASK(pos);
4776	u8 *kaddr;
4777
4778	if (same_byte)
4779		mask &= BITMAP_LAST_BYTE_MASK(pos + len);
4780
4781	/* Handle the first byte. */
4782	kaddr = extent_buffer_get_byte(eb, first_byte);
4783	*kaddr |= mask;
4784	if (same_byte)
4785		return;
4786
4787	/* Handle the byte aligned part. */
4788	ASSERT(first_byte + 1 <= last_byte);
4789	memset_extent_buffer(eb, 0xff, first_byte + 1, last_byte - first_byte - 1);
4790
4791	/* Handle the last byte. */
4792	kaddr = extent_buffer_get_byte(eb, last_byte);
4793	*kaddr |= BITMAP_LAST_BYTE_MASK(pos + len);
4794}
4795
4796
4797/*
4798 * Clear an area of a bitmap.
4799 *
4800 * @eb:     the extent buffer
4801 * @start:  offset of the bitmap item in the extent buffer
4802 * @pos:    bit number of the first bit
4803 * @len:    number of bits to clear
4804 */
4805void extent_buffer_bitmap_clear(const struct extent_buffer *eb,
4806				unsigned long start, unsigned long pos,
4807				unsigned long len)
4808{
4809	unsigned int first_byte = start + BIT_BYTE(pos);
4810	unsigned int last_byte = start + BIT_BYTE(pos + len - 1);
4811	const bool same_byte = (first_byte == last_byte);
4812	u8 mask = BITMAP_FIRST_BYTE_MASK(pos);
4813	u8 *kaddr;
4814
4815	if (same_byte)
4816		mask &= BITMAP_LAST_BYTE_MASK(pos + len);
4817
4818	/* Handle the first byte. */
4819	kaddr = extent_buffer_get_byte(eb, first_byte);
4820	*kaddr &= ~mask;
4821	if (same_byte)
4822		return;
4823
4824	/* Handle the byte aligned part. */
4825	ASSERT(first_byte + 1 <= last_byte);
4826	memset_extent_buffer(eb, 0, first_byte + 1, last_byte - first_byte - 1);
4827
4828	/* Handle the last byte. */
4829	kaddr = extent_buffer_get_byte(eb, last_byte);
4830	*kaddr &= ~BITMAP_LAST_BYTE_MASK(pos + len);
4831}
4832
4833static inline bool areas_overlap(unsigned long src, unsigned long dst, unsigned long len)
4834{
4835	unsigned long distance = (src > dst) ? src - dst : dst - src;
4836	return distance < len;
4837}
4838
4839void memcpy_extent_buffer(const struct extent_buffer *dst,
4840			  unsigned long dst_offset, unsigned long src_offset,
4841			  unsigned long len)
4842{
4843	const int unit_size = dst->folio_size;
4844	unsigned long cur_off = 0;
4845
4846	if (check_eb_range(dst, dst_offset, len) ||
4847	    check_eb_range(dst, src_offset, len))
4848		return;
4849
4850	if (dst->addr) {
4851		const bool use_memmove = areas_overlap(src_offset, dst_offset, len);
4852
4853		if (use_memmove)
4854			memmove(dst->addr + dst_offset, dst->addr + src_offset, len);
4855		else
4856			memcpy(dst->addr + dst_offset, dst->addr + src_offset, len);
4857		return;
4858	}
4859
4860	while (cur_off < len) {
4861		unsigned long cur_src = cur_off + src_offset;
4862		unsigned long folio_index = get_eb_folio_index(dst, cur_src);
4863		unsigned long folio_off = get_eb_offset_in_folio(dst, cur_src);
4864		unsigned long cur_len = min(src_offset + len - cur_src,
4865					    unit_size - folio_off);
4866		void *src_addr = folio_address(dst->folios[folio_index]) + folio_off;
4867		const bool use_memmove = areas_overlap(src_offset + cur_off,
4868						       dst_offset + cur_off, cur_len);
4869
4870		__write_extent_buffer(dst, src_addr, dst_offset + cur_off, cur_len,
4871				      use_memmove);
4872		cur_off += cur_len;
4873	}
4874}
4875
4876void memmove_extent_buffer(const struct extent_buffer *dst,
4877			   unsigned long dst_offset, unsigned long src_offset,
4878			   unsigned long len)
4879{
4880	unsigned long dst_end = dst_offset + len - 1;
4881	unsigned long src_end = src_offset + len - 1;
4882
4883	if (check_eb_range(dst, dst_offset, len) ||
4884	    check_eb_range(dst, src_offset, len))
4885		return;
4886
4887	if (dst_offset < src_offset) {
4888		memcpy_extent_buffer(dst, dst_offset, src_offset, len);
4889		return;
4890	}
4891
4892	if (dst->addr) {
4893		memmove(dst->addr + dst_offset, dst->addr + src_offset, len);
4894		return;
4895	}
4896
4897	while (len > 0) {
4898		unsigned long src_i;
4899		size_t cur;
4900		size_t dst_off_in_folio;
4901		size_t src_off_in_folio;
4902		void *src_addr;
4903		bool use_memmove;
4904
4905		src_i = get_eb_folio_index(dst, src_end);
4906
4907		dst_off_in_folio = get_eb_offset_in_folio(dst, dst_end);
4908		src_off_in_folio = get_eb_offset_in_folio(dst, src_end);
4909
4910		cur = min_t(unsigned long, len, src_off_in_folio + 1);
4911		cur = min(cur, dst_off_in_folio + 1);
4912
4913		src_addr = folio_address(dst->folios[src_i]) + src_off_in_folio -
4914					 cur + 1;
4915		use_memmove = areas_overlap(src_end - cur + 1, dst_end - cur + 1,
4916					    cur);
4917
4918		__write_extent_buffer(dst, src_addr, dst_end - cur + 1, cur,
4919				      use_memmove);
4920
4921		dst_end -= cur;
4922		src_end -= cur;
4923		len -= cur;
4924	}
4925}
4926
4927#define GANG_LOOKUP_SIZE	16
4928static struct extent_buffer *get_next_extent_buffer(
4929		struct btrfs_fs_info *fs_info, struct page *page, u64 bytenr)
4930{
4931	struct extent_buffer *gang[GANG_LOOKUP_SIZE];
4932	struct extent_buffer *found = NULL;
4933	u64 page_start = page_offset(page);
4934	u64 cur = page_start;
4935
4936	ASSERT(in_range(bytenr, page_start, PAGE_SIZE));
4937	lockdep_assert_held(&fs_info->buffer_lock);
4938
4939	while (cur < page_start + PAGE_SIZE) {
4940		int ret;
4941		int i;
4942
4943		ret = radix_tree_gang_lookup(&fs_info->buffer_radix,
4944				(void **)gang, cur >> fs_info->sectorsize_bits,
4945				min_t(unsigned int, GANG_LOOKUP_SIZE,
4946				      PAGE_SIZE / fs_info->nodesize));
4947		if (ret == 0)
4948			goto out;
4949		for (i = 0; i < ret; i++) {
4950			/* Already beyond page end */
4951			if (gang[i]->start >= page_start + PAGE_SIZE)
4952				goto out;
4953			/* Found one */
4954			if (gang[i]->start >= bytenr) {
4955				found = gang[i];
4956				goto out;
4957			}
4958		}
4959		cur = gang[ret - 1]->start + gang[ret - 1]->len;
4960	}
4961out:
4962	return found;
4963}
4964
4965static int try_release_subpage_extent_buffer(struct page *page)
4966{
4967	struct btrfs_fs_info *fs_info = page_to_fs_info(page);
4968	u64 cur = page_offset(page);
4969	const u64 end = page_offset(page) + PAGE_SIZE;
4970	int ret;
4971
4972	while (cur < end) {
4973		struct extent_buffer *eb = NULL;
4974
4975		/*
4976		 * Unlike try_release_extent_buffer() which uses folio private
4977		 * to grab buffer, for subpage case we rely on radix tree, thus
4978		 * we need to ensure radix tree consistency.
4979		 *
4980		 * We also want an atomic snapshot of the radix tree, thus go
4981		 * with spinlock rather than RCU.
4982		 */
4983		spin_lock(&fs_info->buffer_lock);
4984		eb = get_next_extent_buffer(fs_info, page, cur);
4985		if (!eb) {
4986			/* No more eb in the page range after or at cur */
4987			spin_unlock(&fs_info->buffer_lock);
4988			break;
4989		}
4990		cur = eb->start + eb->len;
4991
4992		/*
4993		 * The same as try_release_extent_buffer(), to ensure the eb
4994		 * won't disappear out from under us.
4995		 */
4996		spin_lock(&eb->refs_lock);
4997		if (atomic_read(&eb->refs) != 1 || extent_buffer_under_io(eb)) {
4998			spin_unlock(&eb->refs_lock);
4999			spin_unlock(&fs_info->buffer_lock);
5000			break;
5001		}
5002		spin_unlock(&fs_info->buffer_lock);
5003
5004		/*
5005		 * If tree ref isn't set then we know the ref on this eb is a
5006		 * real ref, so just return, this eb will likely be freed soon
5007		 * anyway.
5008		 */
5009		if (!test_and_clear_bit(EXTENT_BUFFER_TREE_REF, &eb->bflags)) {
5010			spin_unlock(&eb->refs_lock);
5011			break;
5012		}
5013
5014		/*
5015		 * Here we don't care about the return value, we will always
5016		 * check the folio private at the end.  And
5017		 * release_extent_buffer() will release the refs_lock.
5018		 */
5019		release_extent_buffer(eb);
5020	}
5021	/*
5022	 * Finally to check if we have cleared folio private, as if we have
5023	 * released all ebs in the page, the folio private should be cleared now.
5024	 */
5025	spin_lock(&page->mapping->i_private_lock);
5026	if (!folio_test_private(page_folio(page)))
5027		ret = 1;
5028	else
5029		ret = 0;
5030	spin_unlock(&page->mapping->i_private_lock);
5031	return ret;
5032
5033}
5034
5035int try_release_extent_buffer(struct page *page)
5036{
5037	struct folio *folio = page_folio(page);
5038	struct extent_buffer *eb;
5039
5040	if (page_to_fs_info(page)->nodesize < PAGE_SIZE)
5041		return try_release_subpage_extent_buffer(page);
5042
5043	/*
5044	 * We need to make sure nobody is changing folio private, as we rely on
5045	 * folio private as the pointer to extent buffer.
5046	 */
5047	spin_lock(&page->mapping->i_private_lock);
5048	if (!folio_test_private(folio)) {
5049		spin_unlock(&page->mapping->i_private_lock);
5050		return 1;
5051	}
5052
5053	eb = folio_get_private(folio);
5054	BUG_ON(!eb);
5055
5056	/*
5057	 * This is a little awful but should be ok, we need to make sure that
5058	 * the eb doesn't disappear out from under us while we're looking at
5059	 * this page.
5060	 */
5061	spin_lock(&eb->refs_lock);
5062	if (atomic_read(&eb->refs) != 1 || extent_buffer_under_io(eb)) {
5063		spin_unlock(&eb->refs_lock);
5064		spin_unlock(&page->mapping->i_private_lock);
5065		return 0;
5066	}
5067	spin_unlock(&page->mapping->i_private_lock);
5068
5069	/*
5070	 * If tree ref isn't set then we know the ref on this eb is a real ref,
5071	 * so just return, this page will likely be freed soon anyway.
5072	 */
5073	if (!test_and_clear_bit(EXTENT_BUFFER_TREE_REF, &eb->bflags)) {
5074		spin_unlock(&eb->refs_lock);
5075		return 0;
5076	}
5077
5078	return release_extent_buffer(eb);
5079}
5080
5081/*
5082 * Attempt to readahead a child block.
5083 *
5084 * @fs_info:	the fs_info
5085 * @bytenr:	bytenr to read
5086 * @owner_root: objectid of the root that owns this eb
5087 * @gen:	generation for the uptodate check, can be 0
5088 * @level:	level for the eb
5089 *
5090 * Attempt to readahead a tree block at @bytenr.  If @gen is 0 then we do a
5091 * normal uptodate check of the eb, without checking the generation.  If we have
5092 * to read the block we will not block on anything.
5093 */
5094void btrfs_readahead_tree_block(struct btrfs_fs_info *fs_info,
5095				u64 bytenr, u64 owner_root, u64 gen, int level)
5096{
5097	struct btrfs_tree_parent_check check = {
5098		.has_first_key = 0,
5099		.level = level,
5100		.transid = gen
5101	};
5102	struct extent_buffer *eb;
5103	int ret;
5104
5105	eb = btrfs_find_create_tree_block(fs_info, bytenr, owner_root, level);
5106	if (IS_ERR(eb))
5107		return;
5108
5109	if (btrfs_buffer_uptodate(eb, gen, 1)) {
5110		free_extent_buffer(eb);
5111		return;
5112	}
5113
5114	ret = read_extent_buffer_pages(eb, WAIT_NONE, 0, &check);
5115	if (ret < 0)
5116		free_extent_buffer_stale(eb);
5117	else
5118		free_extent_buffer(eb);
5119}
5120
5121/*
5122 * Readahead a node's child block.
5123 *
5124 * @node:	parent node we're reading from
5125 * @slot:	slot in the parent node for the child we want to read
5126 *
5127 * A helper for btrfs_readahead_tree_block, we simply read the bytenr pointed at
5128 * the slot in the node provided.
5129 */
5130void btrfs_readahead_node_child(struct extent_buffer *node, int slot)
5131{
5132	btrfs_readahead_tree_block(node->fs_info,
5133				   btrfs_node_blockptr(node, slot),
5134				   btrfs_header_owner(node),
5135				   btrfs_node_ptr_generation(node, slot),
5136				   btrfs_header_level(node) - 1);
5137}