Linux Audio

Check our new training course

Loading...
v6.13.7
   1// SPDX-License-Identifier: GPL-2.0
   2
   3#include <linux/bitops.h>
   4#include <linux/slab.h>
   5#include <linux/bio.h>
   6#include <linux/mm.h>
   7#include <linux/pagemap.h>
   8#include <linux/page-flags.h>
   9#include <linux/sched/mm.h>
  10#include <linux/spinlock.h>
  11#include <linux/blkdev.h>
  12#include <linux/swap.h>
  13#include <linux/writeback.h>
  14#include <linux/pagevec.h>
  15#include <linux/prefetch.h>
  16#include <linux/fsverity.h>
  17#include "extent_io.h"
  18#include "extent-io-tree.h"
  19#include "extent_map.h"
  20#include "ctree.h"
  21#include "btrfs_inode.h"
  22#include "bio.h"
 
  23#include "locking.h"
 
  24#include "backref.h"
  25#include "disk-io.h"
  26#include "subpage.h"
  27#include "zoned.h"
  28#include "block-group.h"
  29#include "compression.h"
  30#include "fs.h"
  31#include "accessors.h"
  32#include "file-item.h"
  33#include "file.h"
  34#include "dev-replace.h"
  35#include "super.h"
  36#include "transaction.h"
  37
 
  38static struct kmem_cache *extent_buffer_cache;
 
 
 
 
 
 
  39
  40#ifdef CONFIG_BTRFS_DEBUG
  41static inline void btrfs_leak_debug_add_eb(struct extent_buffer *eb)
 
 
 
 
 
 
  42{
  43	struct btrfs_fs_info *fs_info = eb->fs_info;
  44	unsigned long flags;
  45
  46	spin_lock_irqsave(&fs_info->eb_leak_lock, flags);
  47	list_add(&eb->leak_list, &fs_info->allocated_ebs);
  48	spin_unlock_irqrestore(&fs_info->eb_leak_lock, flags);
  49}
  50
  51static inline void btrfs_leak_debug_del_eb(struct extent_buffer *eb)
 
  52{
  53	struct btrfs_fs_info *fs_info = eb->fs_info;
  54	unsigned long flags;
  55
  56	spin_lock_irqsave(&fs_info->eb_leak_lock, flags);
  57	list_del(&eb->leak_list);
  58	spin_unlock_irqrestore(&fs_info->eb_leak_lock, flags);
  59}
  60
  61void btrfs_extent_buffer_leak_debug_check(struct btrfs_fs_info *fs_info)
 
  62{
 
  63	struct extent_buffer *eb;
  64	unsigned long flags;
  65
  66	/*
  67	 * If we didn't get into open_ctree our allocated_ebs will not be
  68	 * initialized, so just skip this.
  69	 */
  70	if (!fs_info->allocated_ebs.next)
  71		return;
 
 
 
  72
  73	WARN_ON(!list_empty(&fs_info->allocated_ebs));
  74	spin_lock_irqsave(&fs_info->eb_leak_lock, flags);
  75	while (!list_empty(&fs_info->allocated_ebs)) {
  76		eb = list_first_entry(&fs_info->allocated_ebs,
  77				      struct extent_buffer, leak_list);
  78		pr_err(
  79	"BTRFS: buffer leak start %llu len %u refs %d bflags %lu owner %llu\n",
  80		       eb->start, eb->len, atomic_read(&eb->refs), eb->bflags,
  81		       btrfs_header_owner(eb));
  82		list_del(&eb->leak_list);
  83		WARN_ON_ONCE(1);
  84		kmem_cache_free(extent_buffer_cache, eb);
  85	}
  86	spin_unlock_irqrestore(&fs_info->eb_leak_lock, flags);
 
 
 
 
 
 
 
 
 
  87}
  88#else
  89#define btrfs_leak_debug_add_eb(eb)			do {} while (0)
  90#define btrfs_leak_debug_del_eb(eb)			do {} while (0)
 
 
  91#endif
  92
  93/*
  94 * Structure to record info about the bio being assembled, and other info like
  95 * how many bytes are there before stripe/ordered extent boundary.
  96 */
  97struct btrfs_bio_ctrl {
  98	struct btrfs_bio *bbio;
  99	enum btrfs_compression_type compress_type;
 100	u32 len_to_oe_boundary;
 101	blk_opf_t opf;
 102	btrfs_bio_end_io_t end_io_func;
 103	struct writeback_control *wbc;
 104
 105	/*
 106	 * The sectors of the page which are going to be submitted by
 107	 * extent_writepage_io().
 108	 * This is to avoid touching ranges covered by compression/inline.
 109	 */
 110	unsigned long submit_bitmap;
 
 
 
 111};
 112
 113static void submit_one_bio(struct btrfs_bio_ctrl *bio_ctrl)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 114{
 115	struct btrfs_bio *bbio = bio_ctrl->bbio;
 116
 117	if (!bbio)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 118		return;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 119
 120	/* Caller should ensure the bio has at least some range added */
 121	ASSERT(bbio->bio.bi_iter.bi_size);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 122
 123	if (btrfs_op(&bbio->bio) == BTRFS_MAP_READ &&
 124	    bio_ctrl->compress_type != BTRFS_COMPRESS_NONE)
 125		btrfs_submit_compressed_read(bbio);
 126	else
 127		btrfs_submit_bbio(bbio, 0);
 128
 129	/* The bbio is owned by the end_io handler now */
 130	bio_ctrl->bbio = NULL;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 131}
 132
 133/*
 134 * Submit or fail the current bio in the bio_ctrl structure.
 
 
 
 
 
 
 135 */
 136static void submit_write_bio(struct btrfs_bio_ctrl *bio_ctrl, int ret)
 
 137{
 138	struct btrfs_bio *bbio = bio_ctrl->bbio;
 
 139
 140	if (!bbio)
 141		return;
 142
 143	if (ret) {
 144		ASSERT(ret < 0);
 145		btrfs_bio_end_io(bbio, errno_to_blk_status(ret));
 146		/* The bio is owned by the end_io handler now */
 147		bio_ctrl->bbio = NULL;
 148	} else {
 149		submit_one_bio(bio_ctrl);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 150	}
 151}
 152
 153int __init extent_buffer_init_cachep(void)
 
 154{
 155	extent_buffer_cache = kmem_cache_create("btrfs_extent_buffer",
 156						sizeof(struct extent_buffer), 0, 0,
 157						NULL);
 158	if (!extent_buffer_cache)
 159		return -ENOMEM;
 
 
 
 
 
 160
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 161	return 0;
 162}
 163
 164void __cold extent_buffer_free_cachep(void)
 
 165{
 166	/*
 167	 * Make sure all delayed rcu free are flushed before we
 168	 * destroy caches.
 169	 */
 170	rcu_barrier();
 171	kmem_cache_destroy(extent_buffer_cache);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 172}
 173
 174static void process_one_folio(struct btrfs_fs_info *fs_info,
 175			      struct folio *folio, const struct folio *locked_folio,
 176			      unsigned long page_ops, u64 start, u64 end)
 177{
 178	u32 len;
 
 
 
 
 
 179
 180	ASSERT(end + 1 - start != 0 && end + 1 - start < U32_MAX);
 181	len = end + 1 - start;
 
 
 
 
 
 
 
 
 
 
 
 
 
 182
 183	if (page_ops & PAGE_SET_ORDERED)
 184		btrfs_folio_clamp_set_ordered(fs_info, folio, start, len);
 185	if (page_ops & PAGE_START_WRITEBACK) {
 186		btrfs_folio_clamp_clear_dirty(fs_info, folio, start, len);
 187		btrfs_folio_clamp_set_writeback(fs_info, folio, start, len);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 188	}
 189	if (page_ops & PAGE_END_WRITEBACK)
 190		btrfs_folio_clamp_clear_writeback(fs_info, folio, start, len);
 
 
 
 
 
 
 191
 192	if (folio != locked_folio && (page_ops & PAGE_UNLOCK))
 193		btrfs_folio_end_lock(fs_info, folio, start, len);
 194}
 195
 196static void __process_folios_contig(struct address_space *mapping,
 197				    const struct folio *locked_folio, u64 start,
 198				    u64 end, unsigned long page_ops)
 199{
 200	struct btrfs_fs_info *fs_info = inode_to_fs_info(mapping->host);
 201	pgoff_t start_index = start >> PAGE_SHIFT;
 202	pgoff_t end_index = end >> PAGE_SHIFT;
 203	pgoff_t index = start_index;
 204	struct folio_batch fbatch;
 205	int i;
 206
 207	folio_batch_init(&fbatch);
 208	while (index <= end_index) {
 209		int found_folios;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 210
 211		found_folios = filemap_get_folios_contig(mapping, &index,
 212				end_index, &fbatch);
 213		for (i = 0; i < found_folios; i++) {
 214			struct folio *folio = fbatch.folios[i];
 
 
 
 
 
 
 
 
 
 
 
 215
 216			process_one_folio(fs_info, folio, locked_folio,
 217					  page_ops, start, end);
 
 
 
 
 
 
 
 
 
 
 
 
 218		}
 219		folio_batch_release(&fbatch);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 220		cond_resched();
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 221	}
 
 
 222}
 223
 224static noinline void __unlock_for_delalloc(const struct inode *inode,
 225					   const struct folio *locked_folio,
 226					   u64 start, u64 end)
 227{
 228	unsigned long index = start >> PAGE_SHIFT;
 229	unsigned long end_index = end >> PAGE_SHIFT;
 230
 231	ASSERT(locked_folio);
 232	if (index == locked_folio->index && end_index == index)
 233		return;
 
 
 
 
 
 
 234
 235	__process_folios_contig(inode->i_mapping, locked_folio, start, end,
 236				PAGE_UNLOCK);
 
 
 
 
 
 
 
 
 237}
 238
 239static noinline int lock_delalloc_folios(struct inode *inode,
 240					 const struct folio *locked_folio,
 241					 u64 start, u64 end)
 242{
 243	struct btrfs_fs_info *fs_info = inode_to_fs_info(inode);
 244	struct address_space *mapping = inode->i_mapping;
 245	pgoff_t start_index = start >> PAGE_SHIFT;
 246	pgoff_t end_index = end >> PAGE_SHIFT;
 247	pgoff_t index = start_index;
 248	u64 processed_end = start;
 249	struct folio_batch fbatch;
 250
 251	if (index == locked_folio->index && index == end_index)
 252		return 0;
 
 
 
 
 
 
 
 
 253
 254	folio_batch_init(&fbatch);
 255	while (index <= end_index) {
 256		unsigned int found_folios, i;
 
 
 
 
 
 
 
 
 
 
 
 
 
 257
 258		found_folios = filemap_get_folios_contig(mapping, &index,
 259				end_index, &fbatch);
 260		if (found_folios == 0)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 261			goto out;
 
 262
 263		for (i = 0; i < found_folios; i++) {
 264			struct folio *folio = fbatch.folios[i];
 265			u64 range_start;
 266			u32 range_len;
 
 
 
 
 
 
 
 
 267
 268			if (folio == locked_folio)
 269				continue;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 270
 271			folio_lock(folio);
 272			if (!folio_test_dirty(folio) || folio->mapping != mapping) {
 273				folio_unlock(folio);
 
 
 
 
 
 274				goto out;
 275			}
 276			range_start = max_t(u64, folio_pos(folio), start);
 277			range_len = min_t(u64, folio_pos(folio) + folio_size(folio),
 278					  end + 1) - range_start;
 279			btrfs_folio_set_lock(fs_info, folio, range_start, range_len);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 280
 281			processed_end = range_start + range_len - 1;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 282		}
 283		folio_batch_release(&fbatch);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 284		cond_resched();
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 285	}
 286
 287	return 0;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 288out:
 289	folio_batch_release(&fbatch);
 290	if (processed_end > start)
 291		__unlock_for_delalloc(inode, locked_folio, start,
 292				      processed_end);
 293	return -EAGAIN;
 294}
 295
 296/*
 297 * Find and lock a contiguous range of bytes in the file marked as delalloc, no
 298 * more than @max_bytes.
 
 299 *
 300 * @start:	The original start bytenr to search.
 301 *		Will store the extent range start bytenr.
 302 * @end:	The original end bytenr of the search range
 303 *		Will store the extent range end bytenr.
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 304 *
 305 * Return true if we find a delalloc range which starts inside the original
 306 * range, and @start/@end will store the delalloc range start/end.
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 307 *
 308 * Return false if we can't find any delalloc range which starts inside the
 309 * original range, and @start/@end will be the non-delalloc range start/end.
 310 */
 311EXPORT_FOR_TESTS
 312noinline_for_stack bool find_lock_delalloc_range(struct inode *inode,
 313						 struct folio *locked_folio,
 314						 u64 *start, u64 *end)
 315{
 316	struct btrfs_fs_info *fs_info = inode_to_fs_info(inode);
 317	struct extent_io_tree *tree = &BTRFS_I(inode)->io_tree;
 318	const u64 orig_start = *start;
 319	const u64 orig_end = *end;
 320	/* The sanity tests may not set a valid fs_info. */
 321	u64 max_bytes = fs_info ? fs_info->max_extent_size : BTRFS_MAX_EXTENT_SIZE;
 322	u64 delalloc_start;
 323	u64 delalloc_end;
 324	bool found;
 325	struct extent_state *cached_state = NULL;
 326	int ret;
 327	int loops = 0;
 328
 329	/* Caller should pass a valid @end to indicate the search range end */
 330	ASSERT(orig_end > orig_start);
 331
 332	/* The range should at least cover part of the folio */
 333	ASSERT(!(orig_start >= folio_pos(locked_folio) + folio_size(locked_folio) ||
 334		 orig_end <= folio_pos(locked_folio)));
 335again:
 336	/* step one, find a bunch of delalloc bytes starting at start */
 337	delalloc_start = *start;
 338	delalloc_end = 0;
 339	found = btrfs_find_delalloc_range(tree, &delalloc_start, &delalloc_end,
 340					  max_bytes, &cached_state);
 341	if (!found || delalloc_end <= *start || delalloc_start > orig_end) {
 342		*start = delalloc_start;
 343
 344		/* @delalloc_end can be -1, never go beyond @orig_end */
 345		*end = min(delalloc_end, orig_end);
 346		free_extent_state(cached_state);
 347		return false;
 348	}
 349
 350	/*
 351	 * start comes from the offset of locked_folio.  We have to lock
 352	 * folios in order, so we can't process delalloc bytes before
 353	 * locked_folio
 354	 */
 355	if (delalloc_start < *start)
 356		delalloc_start = *start;
 357
 358	/*
 359	 * make sure to limit the number of folios we try to lock down
 360	 */
 361	if (delalloc_end + 1 - delalloc_start > max_bytes)
 362		delalloc_end = delalloc_start + max_bytes - 1;
 363
 364	/* step two, lock all the folioss after the folios that has start */
 365	ret = lock_delalloc_folios(inode, locked_folio, delalloc_start,
 366				   delalloc_end);
 367	ASSERT(!ret || ret == -EAGAIN);
 368	if (ret == -EAGAIN) {
 369		/* some of the folios are gone, lets avoid looping by
 370		 * shortening the size of the delalloc range we're searching
 371		 */
 372		free_extent_state(cached_state);
 373		cached_state = NULL;
 374		if (!loops) {
 375			max_bytes = PAGE_SIZE;
 376			loops = 1;
 377			goto again;
 378		} else {
 379			found = false;
 380			goto out_failed;
 381		}
 382	}
 
 383
 384	/* step three, lock the state bits for the whole range */
 385	lock_extent(tree, delalloc_start, delalloc_end, &cached_state);
 386
 387	/* then test to make sure it is all still delalloc */
 388	ret = test_range_bit(tree, delalloc_start, delalloc_end,
 389			     EXTENT_DELALLOC, cached_state);
 390
 391	unlock_extent(tree, delalloc_start, delalloc_end, &cached_state);
 392	if (!ret) {
 393		__unlock_for_delalloc(inode, locked_folio, delalloc_start,
 394				      delalloc_end);
 
 
 395		cond_resched();
 396		goto again;
 397	}
 
 398	*start = delalloc_start;
 399	*end = delalloc_end;
 400out_failed:
 401	return found;
 402}
 403
 404void extent_clear_unlock_delalloc(struct btrfs_inode *inode, u64 start, u64 end,
 405				  const struct folio *locked_folio,
 406				  struct extent_state **cached,
 407				  u32 clear_bits, unsigned long page_ops)
 408{
 409	clear_extent_bit(&inode->io_tree, start, end, clear_bits, cached);
 
 
 
 
 
 
 410
 411	__process_folios_contig(inode->vfs_inode.i_mapping, locked_folio, start,
 412				end, page_ops);
 413}
 
 414
 415static bool btrfs_verify_folio(struct folio *folio, u64 start, u32 len)
 416{
 417	struct btrfs_fs_info *fs_info = folio_to_fs_info(folio);
 
 
 
 
 
 
 
 
 
 
 
 
 
 418
 419	if (!fsverity_active(folio->mapping->host) ||
 420	    btrfs_folio_test_uptodate(fs_info, folio, start, len) ||
 421	    start >= i_size_read(folio->mapping->host))
 422		return true;
 423	return fsverity_verify_folio(folio);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 424}
 425
 426static void end_folio_read(struct folio *folio, bool uptodate, u64 start, u32 len)
 427{
 428	struct btrfs_fs_info *fs_info = folio_to_fs_info(folio);
 
 
 
 
 
 
 
 
 
 
 
 
 429
 430	ASSERT(folio_pos(folio) <= start &&
 431	       start + len <= folio_pos(folio) + PAGE_SIZE);
 432
 433	if (uptodate && btrfs_verify_folio(folio, start, len))
 434		btrfs_folio_set_uptodate(fs_info, folio, start, len);
 435	else
 436		btrfs_folio_clear_uptodate(fs_info, folio, start, len);
 
 
 
 
 
 
 
 
 437
 438	if (!btrfs_is_subpage(fs_info, folio->mapping))
 439		folio_unlock(folio);
 440	else
 441		btrfs_folio_end_lock(fs_info, folio, start, len);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 442}
 443
 444/*
 445 * After a write IO is done, we need to:
 446 *
 447 * - clear the uptodate bits on error
 448 * - clear the writeback bits in the extent tree for the range
 449 * - filio_end_writeback()  if there is no more pending io for the folio
 450 *
 451 * Scheduling is not allowed, so the extent state tree is expected
 452 * to have one and only one object corresponding to this IO.
 453 */
 454static void end_bbio_data_write(struct btrfs_bio *bbio)
 
 455{
 456	struct btrfs_fs_info *fs_info = bbio->fs_info;
 457	struct bio *bio = &bbio->bio;
 458	int error = blk_status_to_errno(bio->bi_status);
 459	struct folio_iter fi;
 460	const u32 sectorsize = fs_info->sectorsize;
 461
 462	ASSERT(!bio_flagged(bio, BIO_CLONED));
 463	bio_for_each_folio_all(fi, bio) {
 464		struct folio *folio = fi.folio;
 465		u64 start = folio_pos(folio) + fi.offset;
 466		u32 len = fi.length;
 467
 468		/* Only order 0 (single page) folios are allowed for data. */
 469		ASSERT(folio_order(folio) == 0);
 470
 471		/* Our read/write should always be sector aligned. */
 472		if (!IS_ALIGNED(fi.offset, sectorsize))
 473			btrfs_err(fs_info,
 474		"partial page write in btrfs with offset %zu and length %zu",
 475				  fi.offset, fi.length);
 476		else if (!IS_ALIGNED(fi.length, sectorsize))
 477			btrfs_info(fs_info,
 478		"incomplete page write with offset %zu and length %zu",
 479				   fi.offset, fi.length);
 480
 481		btrfs_finish_ordered_extent(bbio->ordered, folio, start, len,
 482					    !error);
 483		if (error)
 484			mapping_set_error(folio->mapping, error);
 485		btrfs_folio_clear_writeback(fs_info, folio, start, len);
 486	}
 487
 488	bio_put(bio);
 
 
 
 
 
 
 
 489}
 490
 491static void begin_folio_read(struct btrfs_fs_info *fs_info, struct folio *folio)
 
 492{
 493	ASSERT(folio_test_locked(folio));
 494	if (!btrfs_is_subpage(fs_info, folio->mapping))
 495		return;
 496
 497	ASSERT(folio_test_private(folio));
 498	btrfs_folio_set_lock(fs_info, folio, folio_pos(folio), PAGE_SIZE);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 499}
 500
 501/*
 502 * After a data read IO is done, we need to:
 503 *
 504 * - clear the uptodate bits on error
 505 * - set the uptodate bits if things worked
 506 * - set the folio up to date if all extents in the tree are uptodate
 507 * - clear the lock bit in the extent tree
 508 * - unlock the folio if there are no other extents locked for it
 509 *
 510 * Scheduling is not allowed, so the extent state tree is expected
 511 * to have one and only one object corresponding to this IO.
 512 */
 513static void end_bbio_data_read(struct btrfs_bio *bbio)
 
 514{
 515	struct btrfs_fs_info *fs_info = bbio->fs_info;
 516	struct bio *bio = &bbio->bio;
 517	struct folio_iter fi;
 518	const u32 sectorsize = fs_info->sectorsize;
 519
 520	ASSERT(!bio_flagged(bio, BIO_CLONED));
 521	bio_for_each_folio_all(fi, &bbio->bio) {
 522		bool uptodate = !bio->bi_status;
 523		struct folio *folio = fi.folio;
 524		struct inode *inode = folio->mapping->host;
 525		u64 start;
 526		u64 end;
 527		u32 len;
 528
 529		/* For now only order 0 folios are supported for data. */
 530		ASSERT(folio_order(folio) == 0);
 531		btrfs_debug(fs_info,
 532			"%s: bi_sector=%llu, err=%d, mirror=%u",
 533			__func__, bio->bi_iter.bi_sector, bio->bi_status,
 534			bbio->mirror_num);
 535
 536		/*
 537		 * We always issue full-sector reads, but if some block in a
 538		 * folio fails to read, blk_update_request() will advance
 539		 * bv_offset and adjust bv_len to compensate.  Print a warning
 540		 * for unaligned offsets, and an error if they don't add up to
 541		 * a full sector.
 542		 */
 543		if (!IS_ALIGNED(fi.offset, sectorsize))
 544			btrfs_err(fs_info,
 545		"partial page read in btrfs with offset %zu and length %zu",
 546				  fi.offset, fi.length);
 547		else if (!IS_ALIGNED(fi.offset + fi.length, sectorsize))
 548			btrfs_info(fs_info,
 549		"incomplete page read with offset %zu and length %zu",
 550				   fi.offset, fi.length);
 551
 552		start = folio_pos(folio) + fi.offset;
 553		end = start + fi.length - 1;
 554		len = fi.length;
 555
 556		if (likely(uptodate)) {
 557			loff_t i_size = i_size_read(inode);
 558			pgoff_t end_index = i_size >> folio_shift(folio);
 
 
 
 
 
 559
 560			/*
 561			 * Zero out the remaining part if this range straddles
 562			 * i_size.
 563			 *
 564			 * Here we should only zero the range inside the folio,
 565			 * not touch anything else.
 566			 *
 567			 * NOTE: i_size is exclusive while end is inclusive.
 568			 */
 569			if (folio_index(folio) == end_index && i_size <= end) {
 570				u32 zero_start = max(offset_in_folio(folio, i_size),
 571						     offset_in_folio(folio, start));
 572				u32 zero_len = offset_in_folio(folio, end) + 1 -
 573					       zero_start;
 574
 575				folio_zero_range(folio, zero_start, zero_len);
 576			}
 
 
 
 
 
 
 577		}
 578
 579		/* Update page status and unlock. */
 580		end_folio_read(folio, uptodate, start, len);
 581	}
 582	bio_put(bio);
 
 583}
 584
 585/*
 586 * Populate every free slot in a provided array with folios using GFP_NOFS.
 587 *
 588 * @nr_folios:   number of folios to allocate
 589 * @folio_array: the array to fill with folios; any existing non-NULL entries in
 590 *		 the array will be skipped
 591 *
 592 * Return: 0        if all folios were able to be allocated;
 593 *         -ENOMEM  otherwise, the partially allocated folios would be freed and
 594 *                  the array slots zeroed
 595 */
 596int btrfs_alloc_folio_array(unsigned int nr_folios, struct folio **folio_array)
 597{
 598	for (int i = 0; i < nr_folios; i++) {
 599		if (folio_array[i])
 600			continue;
 601		folio_array[i] = folio_alloc(GFP_NOFS, 0);
 602		if (!folio_array[i])
 603			goto error;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 604	}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 605	return 0;
 606error:
 607	for (int i = 0; i < nr_folios; i++) {
 608		if (folio_array[i])
 609			folio_put(folio_array[i]);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 610	}
 611	return -ENOMEM;
 
 612}
 613
 614/*
 615 * Populate every free slot in a provided array with pages, using GFP_NOFS.
 616 *
 617 * @nr_pages:   number of pages to allocate
 618 * @page_array: the array to fill with pages; any existing non-null entries in
 619 *		the array will be skipped
 620 * @nofail:	whether using __GFP_NOFAIL flag
 621 *
 622 * Return: 0        if all pages were able to be allocated;
 623 *         -ENOMEM  otherwise, the partially allocated pages would be freed and
 624 *                  the array slots zeroed
 625 */
 626int btrfs_alloc_page_array(unsigned int nr_pages, struct page **page_array,
 627			   bool nofail)
 628{
 629	const gfp_t gfp = nofail ? (GFP_NOFS | __GFP_NOFAIL) : GFP_NOFS;
 630	unsigned int allocated;
 631
 632	for (allocated = 0; allocated < nr_pages;) {
 633		unsigned int last = allocated;
 634
 635		allocated = alloc_pages_bulk_array(gfp, nr_pages, page_array);
 636		if (unlikely(allocated == last)) {
 637			/* No progress, fail and do cleanup. */
 638			for (int i = 0; i < allocated; i++) {
 639				__free_page(page_array[i]);
 640				page_array[i] = NULL;
 641			}
 642			return -ENOMEM;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 643		}
 644	}
 
 
 
 
 645	return 0;
 646}
 647
 648/*
 649 * Populate needed folios for the extent buffer.
 650 *
 651 * For now, the folios populated are always in order 0 (aka, single page).
 
 652 */
 653static int alloc_eb_folio_array(struct extent_buffer *eb, bool nofail)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 654{
 655	struct page *page_array[INLINE_EXTENT_BUFFER_PAGES] = { 0 };
 656	int num_pages = num_extent_pages(eb);
 
 
 
 
 657	int ret;
 
 658
 659	ret = btrfs_alloc_page_array(num_pages, page_array, nofail);
 660	if (ret < 0)
 661		return ret;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 662
 663	for (int i = 0; i < num_pages; i++)
 664		eb->folios[i] = page_folio(page_array[i]);
 665	eb->folio_size = PAGE_SIZE;
 666	eb->folio_shift = PAGE_SHIFT;
 667	return 0;
 668}
 669
 670static bool btrfs_bio_is_contig(struct btrfs_bio_ctrl *bio_ctrl,
 671				struct folio *folio, u64 disk_bytenr,
 672				unsigned int pg_offset)
 673{
 674	struct bio *bio = &bio_ctrl->bbio->bio;
 675	struct bio_vec *bvec = bio_last_bvec_all(bio);
 676	const sector_t sector = disk_bytenr >> SECTOR_SHIFT;
 677	struct folio *bv_folio = page_folio(bvec->bv_page);
 678
 679	if (bio_ctrl->compress_type != BTRFS_COMPRESS_NONE) {
 
 680		/*
 681		 * For compression, all IO should have its logical bytenr set
 682		 * to the starting bytenr of the compressed extent.
 
 683		 */
 684		return bio->bi_iter.bi_sector == sector;
 
 
 
 685	}
 686
 687	/*
 688	 * The contig check requires the following conditions to be met:
 689	 *
 690	 * 1) The folios are belonging to the same inode
 691	 *    This is implied by the call chain.
 692	 *
 693	 * 2) The range has adjacent logical bytenr
 694	 *
 695	 * 3) The range has adjacent file offset
 696	 *    This is required for the usage of btrfs_bio->file_offset.
 697	 */
 698	return bio_end_sector(bio) == sector &&
 699		folio_pos(bv_folio) + bvec->bv_offset + bvec->bv_len ==
 700		folio_pos(folio) + pg_offset;
 701}
 702
 703static void alloc_new_bio(struct btrfs_inode *inode,
 704			  struct btrfs_bio_ctrl *bio_ctrl,
 705			  u64 disk_bytenr, u64 file_offset)
 706{
 707	struct btrfs_fs_info *fs_info = inode->root->fs_info;
 708	struct btrfs_bio *bbio;
 709
 710	bbio = btrfs_bio_alloc(BIO_MAX_VECS, bio_ctrl->opf, fs_info,
 711			       bio_ctrl->end_io_func, NULL);
 712	bbio->bio.bi_iter.bi_sector = disk_bytenr >> SECTOR_SHIFT;
 713	bbio->inode = inode;
 714	bbio->file_offset = file_offset;
 715	bio_ctrl->bbio = bbio;
 716	bio_ctrl->len_to_oe_boundary = U32_MAX;
 717
 718	/* Limit data write bios to the ordered boundary. */
 719	if (bio_ctrl->wbc) {
 720		struct btrfs_ordered_extent *ordered;
 721
 722		ordered = btrfs_lookup_ordered_extent(inode, file_offset);
 723		if (ordered) {
 724			bio_ctrl->len_to_oe_boundary = min_t(u32, U32_MAX,
 725					ordered->file_offset +
 726					ordered->disk_num_bytes - file_offset);
 727			bbio->ordered = ordered;
 728		}
 729
 730		/*
 731		 * Pick the last added device to support cgroup writeback.  For
 732		 * multi-device file systems this means blk-cgroup policies have
 733		 * to always be set on the last added/replaced device.
 734		 * This is a bit odd but has been like that for a long time.
 
 
 735		 */
 736		bio_set_dev(&bbio->bio, fs_info->fs_devices->latest_dev->bdev);
 737		wbc_init_bio(bio_ctrl->wbc, &bbio->bio);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 738	}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 739}
 740
 741/*
 742 * @disk_bytenr: logical bytenr where the write will be
 743 * @page:	page to add to the bio
 744 * @size:	portion of page that we want to write to
 745 * @pg_offset:	offset of the new bio or to check whether we are adding
 746 *              a contiguous page to the previous one
 747 *
 748 * The will either add the page into the existing @bio_ctrl->bbio, or allocate a
 749 * new one in @bio_ctrl->bbio.
 750 * The mirror number for this IO should already be initizlied in
 751 * @bio_ctrl->mirror_num.
 752 */
 753static void submit_extent_folio(struct btrfs_bio_ctrl *bio_ctrl,
 754			       u64 disk_bytenr, struct folio *folio,
 755			       size_t size, unsigned long pg_offset)
 756{
 757	struct btrfs_inode *inode = folio_to_inode(folio);
 758
 759	ASSERT(pg_offset + size <= PAGE_SIZE);
 760	ASSERT(bio_ctrl->end_io_func);
 761
 762	if (bio_ctrl->bbio &&
 763	    !btrfs_bio_is_contig(bio_ctrl, folio, disk_bytenr, pg_offset))
 764		submit_one_bio(bio_ctrl);
 765
 766	do {
 767		u32 len = size;
 
 
 
 
 
 
 
 
 
 
 
 768
 769		/* Allocate new bio if needed */
 770		if (!bio_ctrl->bbio) {
 771			alloc_new_bio(inode, bio_ctrl, disk_bytenr,
 772				      folio_pos(folio) + pg_offset);
 773		}
 774
 775		/* Cap to the current ordered extent boundary if there is one. */
 776		if (len > bio_ctrl->len_to_oe_boundary) {
 777			ASSERT(bio_ctrl->compress_type == BTRFS_COMPRESS_NONE);
 778			ASSERT(is_data_inode(inode));
 779			len = bio_ctrl->len_to_oe_boundary;
 780		}
 781
 782		if (!bio_add_folio(&bio_ctrl->bbio->bio, folio, len, pg_offset)) {
 783			/* bio full: move on to a new one */
 784			submit_one_bio(bio_ctrl);
 785			continue;
 786		}
 787
 788		if (bio_ctrl->wbc)
 789			wbc_account_cgroup_owner(bio_ctrl->wbc, folio,
 790						 len);
 791
 792		size -= len;
 793		pg_offset += len;
 794		disk_bytenr += len;
 795
 796		/*
 797		 * len_to_oe_boundary defaults to U32_MAX, which isn't folio or
 798		 * sector aligned.  alloc_new_bio() then sets it to the end of
 799		 * our ordered extent for writes into zoned devices.
 800		 *
 801		 * When len_to_oe_boundary is tracking an ordered extent, we
 802		 * trust the ordered extent code to align things properly, and
 803		 * the check above to cap our write to the ordered extent
 804		 * boundary is correct.
 805		 *
 806		 * When len_to_oe_boundary is U32_MAX, the cap above would
 807		 * result in a 4095 byte IO for the last folio right before
 808		 * we hit the bio limit of UINT_MAX.  bio_add_folio() has all
 809		 * the checks required to make sure we don't overflow the bio,
 810		 * and we should just ignore len_to_oe_boundary completely
 811		 * unless we're using it to track an ordered extent.
 812		 *
 813		 * It's pretty hard to make a bio sized U32_MAX, but it can
 814		 * happen when the page cache is able to feed us contiguous
 815		 * folios for large extents.
 816		 */
 817		if (bio_ctrl->len_to_oe_boundary != U32_MAX)
 818			bio_ctrl->len_to_oe_boundary -= len;
 819
 820		/* Ordered extent boundary: move on to a new bio. */
 821		if (bio_ctrl->len_to_oe_boundary == 0)
 822			submit_one_bio(bio_ctrl);
 823	} while (size);
 824}
 825
 826static int attach_extent_buffer_folio(struct extent_buffer *eb,
 827				      struct folio *folio,
 828				      struct btrfs_subpage *prealloc)
 829{
 830	struct btrfs_fs_info *fs_info = eb->fs_info;
 
 831	int ret = 0;
 832
 833	/*
 834	 * If the page is mapped to btree inode, we should hold the private
 835	 * lock to prevent race.
 836	 * For cloned or dummy extent buffers, their pages are not mapped and
 837	 * will not race with any other ebs.
 838	 */
 839	if (folio->mapping)
 840		lockdep_assert_held(&folio->mapping->i_private_lock);
 841
 842	if (fs_info->nodesize >= PAGE_SIZE) {
 843		if (!folio_test_private(folio))
 844			folio_attach_private(folio, eb);
 845		else
 846			WARN_ON(folio_get_private(folio) != eb);
 847		return 0;
 848	}
 
 849
 850	/* Already mapped, just free prealloc */
 851	if (folio_test_private(folio)) {
 852		btrfs_free_subpage(prealloc);
 853		return 0;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 854	}
 855
 856	if (prealloc)
 857		/* Has preallocated memory for subpage */
 858		folio_attach_private(folio, prealloc);
 859	else
 860		/* Do new allocation to attach subpage */
 861		ret = btrfs_attach_subpage(fs_info, folio, BTRFS_SUBPAGE_METADATA);
 862	return ret;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 863}
 864
 865int set_page_extent_mapped(struct page *page)
 
 
 
 
 
 866{
 867	return set_folio_extent_mapped(page_folio(page));
 868}
 869
 870int set_folio_extent_mapped(struct folio *folio)
 
 
 
 
 
 871{
 872	struct btrfs_fs_info *fs_info;
 873
 874	ASSERT(folio->mapping);
 
 
 
 
 
 875
 876	if (folio_test_private(folio))
 877		return 0;
 
 
 878
 879	fs_info = folio_to_fs_info(folio);
 
 
 
 
 
 
 880
 881	if (btrfs_is_subpage(fs_info, folio->mapping))
 882		return btrfs_attach_subpage(fs_info, folio, BTRFS_SUBPAGE_DATA);
 
 883
 884	folio_attach_private(folio, (void *)EXTENT_FOLIO_PRIVATE);
 885	return 0;
 
 
 886}
 887
 888void clear_folio_extent_mapped(struct folio *folio)
 889{
 890	struct btrfs_fs_info *fs_info;
 
 891
 892	ASSERT(folio->mapping);
 
 
 893
 894	if (!folio_test_private(folio))
 895		return;
 896
 897	fs_info = folio_to_fs_info(folio);
 898	if (btrfs_is_subpage(fs_info, folio->mapping))
 899		return btrfs_detach_subpage(fs_info, folio);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 900
 901	folio_detach_private(folio);
 902}
 903
 904static struct extent_map *get_extent_map(struct btrfs_inode *inode,
 905					 struct folio *folio, u64 start,
 906					 u64 len, struct extent_map **em_cached)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 907{
 908	struct extent_map *em;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 909
 910	ASSERT(em_cached);
 
 
 
 
 
 
 
 911
 912	if (*em_cached) {
 
 
 
 
 
 
 
 913		em = *em_cached;
 914		if (extent_map_in_tree(em) && start >= em->start &&
 915		    start < extent_map_end(em)) {
 916			refcount_inc(&em->refs);
 917			return em;
 918		}
 919
 920		free_extent_map(em);
 921		*em_cached = NULL;
 922	}
 923
 924	em = btrfs_get_extent(inode, folio, start, len);
 925	if (!IS_ERR(em)) {
 926		BUG_ON(*em_cached);
 927		refcount_inc(&em->refs);
 928		*em_cached = em;
 929	}
 930
 931	return em;
 932}
 933/*
 934 * basic readpage implementation.  Locked extent state structs are inserted
 935 * into the tree that are removed when the IO is done (by the end_io
 936 * handlers)
 937 * XXX JDM: This needs looking at to ensure proper page locking
 938 * return 0 on success, otherwise return error
 939 */
 940static int btrfs_do_readpage(struct folio *folio, struct extent_map **em_cached,
 941		      struct btrfs_bio_ctrl *bio_ctrl, u64 *prev_em_start)
 
 
 
 
 
 942{
 943	struct inode *inode = folio->mapping->host;
 944	struct btrfs_fs_info *fs_info = inode_to_fs_info(inode);
 945	u64 start = folio_pos(folio);
 946	const u64 end = start + PAGE_SIZE - 1;
 947	u64 cur = start;
 948	u64 extent_offset;
 949	u64 last_byte = i_size_read(inode);
 950	u64 block_start;
 
 951	struct extent_map *em;
 
 952	int ret = 0;
 
 953	size_t pg_offset = 0;
 954	size_t iosize;
 955	size_t blocksize = fs_info->sectorsize;
 956
 957	ret = set_folio_extent_mapped(folio);
 958	if (ret < 0) {
 959		folio_unlock(folio);
 960		return ret;
 
 
 
 
 
 
 961	}
 962
 963	if (folio->index == last_byte >> folio_shift(folio)) {
 964		size_t zero_offset = offset_in_folio(folio, last_byte);
 
 965
 966		if (zero_offset) {
 967			iosize = folio_size(folio) - zero_offset;
 968			folio_zero_range(folio, zero_offset, iosize);
 
 
 
 969		}
 970	}
 971	bio_ctrl->end_io_func = end_bbio_data_read;
 972	begin_folio_read(fs_info, folio);
 973	while (cur <= end) {
 974		enum btrfs_compression_type compress_type = BTRFS_COMPRESS_NONE;
 975		bool force_bio_submit = false;
 976		u64 disk_bytenr;
 977
 978		ASSERT(IS_ALIGNED(cur, fs_info->sectorsize));
 979		if (cur >= last_byte) {
 980			iosize = folio_size(folio) - pg_offset;
 981			folio_zero_range(folio, pg_offset, iosize);
 982			end_folio_read(folio, true, cur, iosize);
 
 
 
 
 
 
 
 
 
 983			break;
 984		}
 985		em = get_extent_map(BTRFS_I(inode), folio, cur, end - cur + 1, em_cached);
 986		if (IS_ERR(em)) {
 987			end_folio_read(folio, false, cur, end + 1 - cur);
 988			return PTR_ERR(em);
 
 
 989		}
 990		extent_offset = cur - em->start;
 991		BUG_ON(extent_map_end(em) <= cur);
 992		BUG_ON(end < cur);
 993
 994		compress_type = extent_map_compression(em);
 
 
 
 
 995
 996		iosize = min(extent_map_end(em) - cur, end - cur + 1);
 
 997		iosize = ALIGN(iosize, blocksize);
 998		if (compress_type != BTRFS_COMPRESS_NONE)
 999			disk_bytenr = em->disk_bytenr;
1000		else
1001			disk_bytenr = extent_map_block_start(em) + extent_offset;
1002		block_start = extent_map_block_start(em);
1003		if (em->flags & EXTENT_FLAG_PREALLOC)
 
 
 
 
1004			block_start = EXTENT_MAP_HOLE;
1005
1006		/*
1007		 * If we have a file range that points to a compressed extent
1008		 * and it's followed by a consecutive file range that points
1009		 * to the same compressed extent (possibly with a different
1010		 * offset and/or length, so it either points to the whole extent
1011		 * or only part of it), we must make sure we do not submit a
1012		 * single bio to populate the folios for the 2 ranges because
1013		 * this makes the compressed extent read zero out the folios
1014		 * belonging to the 2nd range. Imagine the following scenario:
1015		 *
1016		 *  File layout
1017		 *  [0 - 8K]                     [8K - 24K]
1018		 *    |                               |
1019		 *    |                               |
1020		 * points to extent X,         points to extent X,
1021		 * offset 4K, length of 8K     offset 0, length 16K
1022		 *
1023		 * [extent X, compressed length = 4K uncompressed length = 16K]
1024		 *
1025		 * If the bio to read the compressed extent covers both ranges,
1026		 * it will decompress extent X into the folios belonging to the
1027		 * first range and then it will stop, zeroing out the remaining
1028		 * folios that belong to the other range that points to extent X.
1029		 * So here we make sure we submit 2 bios, one for the first
1030		 * range and another one for the third range. Both will target
1031		 * the same physical extent from disk, but we can't currently
1032		 * make the compressed bio endio callback populate the folios
1033		 * for both ranges because each compressed bio is tightly
1034		 * coupled with a single extent map, and each range can have
1035		 * an extent map with a different offset value relative to the
1036		 * uncompressed data of our extent and different lengths. This
1037		 * is a corner case so we prioritize correctness over
1038		 * non-optimal behavior (submitting 2 bios for the same extent).
1039		 */
1040		if (compress_type != BTRFS_COMPRESS_NONE &&
1041		    prev_em_start && *prev_em_start != (u64)-1 &&
1042		    *prev_em_start != em->start)
1043			force_bio_submit = true;
1044
1045		if (prev_em_start)
1046			*prev_em_start = em->start;
1047
1048		free_extent_map(em);
1049		em = NULL;
1050
1051		/* we've found a hole, just zero and go on */
1052		if (block_start == EXTENT_MAP_HOLE) {
1053			folio_zero_range(folio, pg_offset, iosize);
 
1054
1055			end_folio_read(folio, true, cur, iosize);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1056			cur = cur + iosize;
1057			pg_offset += iosize;
1058			continue;
1059		}
1060		/* the get_extent function already copied into the folio */
 
 
1061		if (block_start == EXTENT_MAP_INLINE) {
1062			end_folio_read(folio, true, cur, iosize);
 
1063			cur = cur + iosize;
1064			pg_offset += iosize;
1065			continue;
1066		}
1067
1068		if (bio_ctrl->compress_type != compress_type) {
1069			submit_one_bio(bio_ctrl);
1070			bio_ctrl->compress_type = compress_type;
 
 
 
 
 
 
 
 
 
 
 
1071		}
1072
1073		if (force_bio_submit)
1074			submit_one_bio(bio_ctrl);
1075		submit_extent_folio(bio_ctrl, disk_bytenr, folio, iosize,
1076				    pg_offset);
1077		cur = cur + iosize;
1078		pg_offset += iosize;
1079	}
 
 
 
 
 
 
 
 
1080
1081	return 0;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1082}
1083
1084int btrfs_read_folio(struct file *file, struct folio *folio)
1085{
1086	struct btrfs_inode *inode = folio_to_inode(folio);
1087	const u64 start = folio_pos(folio);
1088	const u64 end = start + folio_size(folio) - 1;
1089	struct extent_state *cached_state = NULL;
1090	struct btrfs_bio_ctrl bio_ctrl = { .opf = REQ_OP_READ };
1091	struct extent_map *em_cached = NULL;
1092	int ret;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1093
1094	btrfs_lock_and_flush_ordered_range(inode, start, end, &cached_state);
1095	ret = btrfs_do_readpage(folio, &em_cached, &bio_ctrl, NULL);
1096	unlock_extent(&inode->io_tree, start, end, &cached_state);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1097
1098	free_extent_map(em_cached);
 
 
 
 
 
 
 
 
 
1099
1100	/*
1101	 * If btrfs_do_readpage() failed we will want to submit the assembled
1102	 * bio to do the cleanup.
1103	 */
1104	submit_one_bio(&bio_ctrl);
1105	return ret;
1106}
1107
1108static void set_delalloc_bitmap(struct folio *folio, unsigned long *delalloc_bitmap,
1109				u64 start, u32 len)
1110{
1111	struct btrfs_fs_info *fs_info = folio_to_fs_info(folio);
1112	const u64 folio_start = folio_pos(folio);
1113	unsigned int start_bit;
1114	unsigned int nbits;
1115
1116	ASSERT(start >= folio_start && start + len <= folio_start + PAGE_SIZE);
1117	start_bit = (start - folio_start) >> fs_info->sectorsize_bits;
1118	nbits = len >> fs_info->sectorsize_bits;
1119	ASSERT(bitmap_test_range_all_zero(delalloc_bitmap, start_bit, nbits));
1120	bitmap_set(delalloc_bitmap, start_bit, nbits);
1121}
1122
1123static bool find_next_delalloc_bitmap(struct folio *folio,
1124				      unsigned long *delalloc_bitmap, u64 start,
1125				      u64 *found_start, u32 *found_len)
1126{
1127	struct btrfs_fs_info *fs_info = folio_to_fs_info(folio);
1128	const u64 folio_start = folio_pos(folio);
1129	const unsigned int bitmap_size = fs_info->sectors_per_page;
1130	unsigned int start_bit;
1131	unsigned int first_zero;
1132	unsigned int first_set;
1133
1134	ASSERT(start >= folio_start && start < folio_start + PAGE_SIZE);
1135
1136	start_bit = (start - folio_start) >> fs_info->sectorsize_bits;
1137	first_set = find_next_bit(delalloc_bitmap, bitmap_size, start_bit);
1138	if (first_set >= bitmap_size)
1139		return false;
1140
1141	*found_start = folio_start + (first_set << fs_info->sectorsize_bits);
1142	first_zero = find_next_zero_bit(delalloc_bitmap, bitmap_size, first_set);
1143	*found_len = (first_zero - first_set) << fs_info->sectorsize_bits;
1144	return true;
 
 
 
 
 
 
 
1145}
1146
1147/*
1148 * Do all of the delayed allocation setup.
1149 *
1150 * Return >0 if all the dirty blocks are submitted async (compression) or inlined.
1151 * The @folio should no longer be touched (treat it as already unlocked).
1152 *
1153 * Return 0 if there is still dirty block that needs to be submitted through
1154 * extent_writepage_io().
1155 * bio_ctrl->submit_bitmap will indicate which blocks of the folio should be
1156 * submitted, and @folio is still kept locked.
1157 *
1158 * Return <0 if there is any error hit.
1159 * Any allocated ordered extent range covering this folio will be marked
1160 * finished (IOERR), and @folio is still kept locked.
1161 */
1162static noinline_for_stack int writepage_delalloc(struct btrfs_inode *inode,
1163						 struct folio *folio,
1164						 struct btrfs_bio_ctrl *bio_ctrl)
1165{
1166	struct btrfs_fs_info *fs_info = inode_to_fs_info(&inode->vfs_inode);
1167	struct writeback_control *wbc = bio_ctrl->wbc;
1168	const bool is_subpage = btrfs_is_subpage(fs_info, folio->mapping);
1169	const u64 page_start = folio_pos(folio);
1170	const u64 page_end = page_start + folio_size(folio) - 1;
1171	unsigned long delalloc_bitmap = 0;
1172	/*
1173	 * Save the last found delalloc end. As the delalloc end can go beyond
1174	 * page boundary, thus we cannot rely on subpage bitmap to locate the
1175	 * last delalloc end.
1176	 */
1177	u64 last_delalloc_end = 0;
1178	/*
1179	 * The range end (exclusive) of the last successfully finished delalloc
1180	 * range.
1181	 * Any range covered by ordered extent must either be manually marked
1182	 * finished (error handling), or has IO submitted (and finish the
1183	 * ordered extent normally).
1184	 *
1185	 * This records the end of ordered extent cleanup if we hit an error.
1186	 */
1187	u64 last_finished_delalloc_end = page_start;
1188	u64 delalloc_start = page_start;
1189	u64 delalloc_end = page_end;
1190	u64 delalloc_to_write = 0;
1191	int ret = 0;
1192	int bit;
1193
1194	/* Save the dirty bitmap as our submission bitmap will be a subset of it. */
1195	if (btrfs_is_subpage(fs_info, inode->vfs_inode.i_mapping)) {
1196		ASSERT(fs_info->sectors_per_page > 1);
1197		btrfs_get_subpage_dirty_bitmap(fs_info, folio, &bio_ctrl->submit_bitmap);
1198	} else {
1199		bio_ctrl->submit_bitmap = 1;
1200	}
1201
1202	for_each_set_bit(bit, &bio_ctrl->submit_bitmap, fs_info->sectors_per_page) {
1203		u64 start = page_start + (bit << fs_info->sectorsize_bits);
1204
1205		btrfs_folio_set_lock(fs_info, folio, start, fs_info->sectorsize);
1206	}
1207
1208	/* Lock all (subpage) delalloc ranges inside the folio first. */
1209	while (delalloc_start < page_end) {
1210		delalloc_end = page_end;
1211		if (!find_lock_delalloc_range(&inode->vfs_inode, folio,
1212					      &delalloc_start, &delalloc_end)) {
 
 
1213			delalloc_start = delalloc_end + 1;
1214			continue;
1215		}
1216		set_delalloc_bitmap(folio, &delalloc_bitmap, delalloc_start,
1217				    min(delalloc_end, page_end) + 1 - delalloc_start);
1218		last_delalloc_end = delalloc_end;
1219		delalloc_start = delalloc_end + 1;
1220	}
1221	delalloc_start = page_start;
1222
1223	if (!last_delalloc_end)
1224		goto out;
1225
1226	/* Run the delalloc ranges for the above locked ranges. */
1227	while (delalloc_start < page_end) {
1228		u64 found_start;
1229		u32 found_len;
1230		bool found;
1231
1232		if (!is_subpage) {
1233			/*
1234			 * For non-subpage case, the found delalloc range must
1235			 * cover this folio and there must be only one locked
1236			 * delalloc range.
1237			 */
1238			found_start = page_start;
1239			found_len = last_delalloc_end + 1 - found_start;
1240			found = true;
1241		} else {
1242			found = find_next_delalloc_bitmap(folio, &delalloc_bitmap,
1243					delalloc_start, &found_start, &found_len);
1244		}
1245		if (!found)
1246			break;
1247		/*
1248		 * The subpage range covers the last sector, the delalloc range may
1249		 * end beyond the folio boundary, use the saved delalloc_end
1250		 * instead.
1251		 */
1252		if (found_start + found_len >= page_end)
1253			found_len = last_delalloc_end + 1 - found_start;
1254
1255		if (ret >= 0) {
1256			/*
1257			 * Some delalloc range may be created by previous folios.
1258			 * Thus we still need to clean up this range during error
1259			 * handling.
1260			 */
1261			last_finished_delalloc_end = found_start;
1262			/* No errors hit so far, run the current delalloc range. */
1263			ret = btrfs_run_delalloc_range(inode, folio,
1264						       found_start,
1265						       found_start + found_len - 1,
1266						       wbc);
1267			if (ret >= 0)
1268				last_finished_delalloc_end = found_start + found_len;
1269		} else {
1270			/*
1271			 * We've hit an error during previous delalloc range,
1272			 * have to cleanup the remaining locked ranges.
1273			 */
1274			unlock_extent(&inode->io_tree, found_start,
1275				      found_start + found_len - 1, NULL);
1276			__unlock_for_delalloc(&inode->vfs_inode, folio,
1277					      found_start,
1278					      found_start + found_len - 1);
1279		}
1280
1281		/*
1282		 * We have some ranges that's going to be submitted asynchronously
1283		 * (compression or inline).  These range have their own control
1284		 * on when to unlock the pages.  We should not touch them
1285		 * anymore, so clear the range from the submission bitmap.
1286		 */
1287		if (ret > 0) {
1288			unsigned int start_bit = (found_start - page_start) >>
1289						 fs_info->sectorsize_bits;
1290			unsigned int end_bit = (min(page_end + 1, found_start + found_len) -
1291						page_start) >> fs_info->sectorsize_bits;
1292			bitmap_clear(&bio_ctrl->submit_bitmap, start_bit, end_bit - start_bit);
1293		}
1294		/*
1295		 * Above btrfs_run_delalloc_range() may have unlocked the folio,
1296		 * thus for the last range, we cannot touch the folio anymore.
1297		 */
1298		if (found_start + found_len >= last_delalloc_end + 1)
1299			break;
1300
1301		delalloc_start = found_start + found_len;
1302	}
1303	/*
1304	 * It's possible we had some ordered extents created before we hit
1305	 * an error, cleanup non-async successfully created delalloc ranges.
1306	 */
1307	if (unlikely(ret < 0)) {
1308		unsigned int bitmap_size = min(
1309				(last_finished_delalloc_end - page_start) >>
1310				fs_info->sectorsize_bits,
1311				fs_info->sectors_per_page);
1312
1313		for_each_set_bit(bit, &bio_ctrl->submit_bitmap, bitmap_size)
1314			btrfs_mark_ordered_io_finished(inode, folio,
1315				page_start + (bit << fs_info->sectorsize_bits),
1316				fs_info->sectorsize, false);
1317		return ret;
1318	}
1319out:
1320	if (last_delalloc_end)
1321		delalloc_end = last_delalloc_end;
1322	else
1323		delalloc_end = page_end;
1324	/*
1325	 * delalloc_end is already one less than the total length, so
1326	 * we don't subtract one from PAGE_SIZE
1327	 */
1328	delalloc_to_write +=
1329		DIV_ROUND_UP(delalloc_end + 1 - page_start, PAGE_SIZE);
1330
1331	/*
1332	 * If all ranges are submitted asynchronously, we just need to account
1333	 * for them here.
1334	 */
1335	if (bitmap_empty(&bio_ctrl->submit_bitmap, fs_info->sectors_per_page)) {
1336		wbc->nr_to_write -= delalloc_to_write;
1337		return 1;
1338	}
1339
1340	if (wbc->nr_to_write < delalloc_to_write) {
1341		int thresh = 8192;
1342
1343		if (delalloc_to_write < thresh * 2)
1344			thresh = delalloc_to_write;
1345		wbc->nr_to_write = min_t(u64, delalloc_to_write,
1346					 thresh);
1347	}
1348
1349	return 0;
1350}
1351
1352/*
1353 * Return 0 if we have submitted or queued the sector for submission.
1354 * Return <0 for critical errors.
1355 *
1356 * Caller should make sure filepos < i_size and handle filepos >= i_size case.
1357 */
1358static int submit_one_sector(struct btrfs_inode *inode,
1359			     struct folio *folio,
1360			     u64 filepos, struct btrfs_bio_ctrl *bio_ctrl,
1361			     loff_t i_size)
1362{
1363	struct btrfs_fs_info *fs_info = inode->root->fs_info;
1364	struct extent_map *em;
1365	u64 block_start;
1366	u64 disk_bytenr;
1367	u64 extent_offset;
1368	u64 em_end;
1369	const u32 sectorsize = fs_info->sectorsize;
1370
1371	ASSERT(IS_ALIGNED(filepos, sectorsize));
1372
1373	/* @filepos >= i_size case should be handled by the caller. */
1374	ASSERT(filepos < i_size);
1375
1376	em = btrfs_get_extent(inode, NULL, filepos, sectorsize);
1377	if (IS_ERR(em))
1378		return PTR_ERR_OR_ZERO(em);
1379
1380	extent_offset = filepos - em->start;
1381	em_end = extent_map_end(em);
1382	ASSERT(filepos <= em_end);
1383	ASSERT(IS_ALIGNED(em->start, sectorsize));
1384	ASSERT(IS_ALIGNED(em->len, sectorsize));
1385
1386	block_start = extent_map_block_start(em);
1387	disk_bytenr = extent_map_block_start(em) + extent_offset;
1388
1389	ASSERT(!extent_map_is_compressed(em));
1390	ASSERT(block_start != EXTENT_MAP_HOLE);
1391	ASSERT(block_start != EXTENT_MAP_INLINE);
1392
1393	free_extent_map(em);
1394	em = NULL;
1395
1396	/*
1397	 * Although the PageDirty bit is cleared before entering this
1398	 * function, subpage dirty bit is not cleared.
1399	 * So clear subpage dirty bit here so next time we won't submit
1400	 * a folio for a range already written to disk.
1401	 */
1402	btrfs_folio_clear_dirty(fs_info, folio, filepos, sectorsize);
1403	btrfs_folio_set_writeback(fs_info, folio, filepos, sectorsize);
1404	/*
1405	 * Above call should set the whole folio with writeback flag, even
1406	 * just for a single subpage sector.
1407	 * As long as the folio is properly locked and the range is correct,
1408	 * we should always get the folio with writeback flag.
1409	 */
1410	ASSERT(folio_test_writeback(folio));
 
 
 
 
 
 
 
 
1411
1412	submit_extent_folio(bio_ctrl, disk_bytenr, folio,
1413			    sectorsize, filepos - folio_pos(folio));
1414	return 0;
 
1415}
1416
1417/*
1418 * Helper for extent_writepage().  This calls the writepage start hooks,
1419 * and does the loop to map the page into extents and bios.
1420 *
1421 * We return 1 if the IO is started and the page is unlocked,
1422 * 0 if all went well (page still locked)
1423 * < 0 if there were errors (page still locked)
1424 */
1425static noinline_for_stack int extent_writepage_io(struct btrfs_inode *inode,
1426						  struct folio *folio,
1427						  u64 start, u32 len,
1428						  struct btrfs_bio_ctrl *bio_ctrl,
1429						  loff_t i_size)
1430{
1431	struct btrfs_fs_info *fs_info = inode->root->fs_info;
1432	unsigned long range_bitmap = 0;
1433	bool submitted_io = false;
1434	bool error = false;
1435	const u64 folio_start = folio_pos(folio);
1436	u64 cur;
1437	int bit;
 
 
 
 
 
 
 
1438	int ret = 0;
 
 
1439
1440	ASSERT(start >= folio_start &&
1441	       start + len <= folio_start + folio_size(folio));
 
 
 
 
 
 
 
1442
1443	ret = btrfs_writepage_cow_fixup(folio);
1444	if (ret) {
1445		/* Fixup worker will requeue */
1446		folio_redirty_for_writepage(bio_ctrl->wbc, folio);
1447		folio_unlock(folio);
1448		return 1;
1449	}
1450
1451	for (cur = start; cur < start + len; cur += fs_info->sectorsize)
1452		set_bit((cur - folio_start) >> fs_info->sectorsize_bits, &range_bitmap);
1453	bitmap_and(&bio_ctrl->submit_bitmap, &bio_ctrl->submit_bitmap, &range_bitmap,
1454		   fs_info->sectors_per_page);
 
1455
1456	bio_ctrl->end_io_func = end_bbio_data_write;
 
 
 
 
 
 
1457
1458	for_each_set_bit(bit, &bio_ctrl->submit_bitmap, fs_info->sectors_per_page) {
1459		cur = folio_pos(folio) + (bit << fs_info->sectorsize_bits);
 
 
 
1460
1461		if (cur >= i_size) {
1462			btrfs_mark_ordered_io_finished(inode, folio, cur,
1463						       start + len - cur, true);
1464			/*
1465			 * This range is beyond i_size, thus we don't need to
1466			 * bother writing back.
1467			 * But we still need to clear the dirty subpage bit, or
1468			 * the next time the folio gets dirtied, we will try to
1469			 * writeback the sectors with subpage dirty bits,
1470			 * causing writeback without ordered extent.
1471			 */
1472			btrfs_folio_clear_dirty(fs_info, folio, cur,
1473						start + len - cur);
1474			break;
1475		}
1476		ret = submit_one_sector(inode, folio, cur, bio_ctrl, i_size);
1477		if (unlikely(ret < 0)) {
1478			/*
1479			 * bio_ctrl may contain a bio crossing several folios.
1480			 * Submit it immediately so that the bio has a chance
1481			 * to finish normally, other than marked as error.
1482			 */
1483			submit_one_bio(bio_ctrl);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1484			/*
1485			 * Failed to grab the extent map which should be very rare.
1486			 * Since there is no bio submitted to finish the ordered
1487			 * extent, we have to manually finish this sector.
1488			 */
1489			btrfs_mark_ordered_io_finished(inode, folio, cur,
1490						       fs_info->sectorsize, false);
1491			error = true;
 
 
 
 
 
 
 
 
 
 
 
 
1492			continue;
1493		}
1494		submitted_io = true;
1495	}
1496
1497	/*
1498	 * If we didn't submitted any sector (>= i_size), folio dirty get
1499	 * cleared but PAGECACHE_TAG_DIRTY is not cleared (only cleared
1500	 * by folio_start_writeback() if the folio is not dirty).
1501	 *
1502	 * Here we set writeback and clear for the range. If the full folio
1503	 * is no longer dirty then we clear the PAGECACHE_TAG_DIRTY tag.
1504	 *
1505	 * If we hit any error, the corresponding sector will still be dirty
1506	 * thus no need to clear PAGECACHE_TAG_DIRTY.
1507	 */
1508	if (!submitted_io && !error) {
1509		btrfs_folio_set_writeback(fs_info, folio, start, len);
1510		btrfs_folio_clear_writeback(fs_info, folio, start, len);
 
 
 
 
 
 
 
1511	}
 
 
1512	return ret;
1513}
1514
1515/*
1516 * the writepage semantics are similar to regular writepage.  extent
1517 * records are inserted to lock ranges in the tree, and as dirty areas
1518 * are found, they are marked writeback.  Then the lock bits are removed
1519 * and the end_io handler clears the writeback ranges
1520 *
1521 * Return 0 if everything goes well.
1522 * Return <0 for error.
1523 */
1524static int extent_writepage(struct folio *folio, struct btrfs_bio_ctrl *bio_ctrl)
 
1525{
1526	struct btrfs_inode *inode = BTRFS_I(folio->mapping->host);
1527	struct btrfs_fs_info *fs_info = inode->root->fs_info;
 
1528	int ret;
1529	size_t pg_offset;
1530	loff_t i_size = i_size_read(&inode->vfs_inode);
 
1531	unsigned long end_index = i_size >> PAGE_SHIFT;
 
 
 
 
1532
1533	trace_extent_writepage(folio, &inode->vfs_inode, bio_ctrl->wbc);
1534
1535	WARN_ON(!folio_test_locked(folio));
1536
1537	pg_offset = offset_in_folio(folio, i_size);
1538	if (folio->index > end_index ||
1539	   (folio->index == end_index && !pg_offset)) {
1540		folio_invalidate(folio, 0, folio_size(folio));
1541		folio_unlock(folio);
 
 
1542		return 0;
1543	}
1544
1545	if (folio->index == end_index)
1546		folio_zero_range(folio, pg_offset, folio_size(folio) - pg_offset);
1547
1548	/*
1549	 * Default to unlock the whole folio.
1550	 * The proper bitmap can only be initialized until writepage_delalloc().
1551	 */
1552	bio_ctrl->submit_bitmap = (unsigned long)-1;
1553	ret = set_folio_extent_mapped(folio);
1554	if (ret < 0)
1555		goto done;
1556
1557	ret = writepage_delalloc(inode, folio, bio_ctrl);
 
 
1558	if (ret == 1)
1559		return 0;
1560	if (ret)
1561		goto done;
1562
1563	ret = extent_writepage_io(inode, folio, folio_pos(folio),
1564				  PAGE_SIZE, bio_ctrl, i_size);
1565	if (ret == 1)
1566		return 0;
1567
1568	bio_ctrl->wbc->nr_to_write--;
1569
1570done:
1571	if (ret < 0)
1572		mapping_set_error(folio->mapping, ret);
1573	/*
1574	 * Only unlock ranges that are submitted. As there can be some async
1575	 * submitted ranges inside the folio.
1576	 */
1577	btrfs_folio_end_lock_bitmap(fs_info, folio, bio_ctrl->submit_bitmap);
1578	ASSERT(ret <= 0);
 
 
1579	return ret;
 
 
 
1580}
1581
1582void wait_on_extent_buffer_writeback(struct extent_buffer *eb)
1583{
1584	wait_on_bit_io(&eb->bflags, EXTENT_BUFFER_WRITEBACK,
1585		       TASK_UNINTERRUPTIBLE);
1586}
1587
1588/*
1589 * Lock extent buffer status and pages for writeback.
1590 *
1591 * Return %false if the extent buffer doesn't need to be submitted (e.g. the
1592 * extent buffer is not dirty)
1593 * Return %true is the extent buffer is submitted to bio.
1594 */
1595static noinline_for_stack bool lock_extent_buffer_for_io(struct extent_buffer *eb,
1596			  struct writeback_control *wbc)
1597{
1598	struct btrfs_fs_info *fs_info = eb->fs_info;
1599	bool ret = false;
 
1600
1601	btrfs_tree_lock(eb);
1602	while (test_bit(EXTENT_BUFFER_WRITEBACK, &eb->bflags)) {
1603		btrfs_tree_unlock(eb);
1604		if (wbc->sync_mode != WB_SYNC_ALL)
1605			return false;
1606		wait_on_extent_buffer_writeback(eb);
1607		btrfs_tree_lock(eb);
1608	}
1609
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1610	/*
1611	 * We need to do this to prevent races in people who check if the eb is
1612	 * under IO since we can end up having no IO bits set for a short period
1613	 * of time.
1614	 */
1615	spin_lock(&eb->refs_lock);
1616	if (test_and_clear_bit(EXTENT_BUFFER_DIRTY, &eb->bflags)) {
1617		set_bit(EXTENT_BUFFER_WRITEBACK, &eb->bflags);
1618		spin_unlock(&eb->refs_lock);
1619		btrfs_set_header_flag(eb, BTRFS_HEADER_FLAG_WRITTEN);
1620		percpu_counter_add_batch(&fs_info->dirty_metadata_bytes,
1621					 -eb->len,
1622					 fs_info->dirty_metadata_batch);
1623		ret = true;
1624	} else {
1625		spin_unlock(&eb->refs_lock);
1626	}
 
1627	btrfs_tree_unlock(eb);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1628	return ret;
1629}
1630
1631static void set_btree_ioerr(struct extent_buffer *eb)
1632{
1633	struct btrfs_fs_info *fs_info = eb->fs_info;
1634
1635	set_bit(EXTENT_BUFFER_WRITE_ERR, &eb->bflags);
 
1636
1637	/*
1638	 * A read may stumble upon this buffer later, make sure that it gets an
1639	 * error and knows there was an error.
1640	 */
1641	clear_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags);
1642
1643	/*
1644	 * We need to set the mapping with the io error as well because a write
1645	 * error will flip the file system readonly, and then syncfs() will
1646	 * return a 0 because we are readonly if we don't modify the err seq for
1647	 * the superblock.
1648	 */
1649	mapping_set_error(eb->fs_info->btree_inode->i_mapping, -EIO);
1650
1651	/*
1652	 * If writeback for a btree extent that doesn't belong to a log tree
1653	 * failed, increment the counter transaction->eb_write_errors.
1654	 * We do this because while the transaction is running and before it's
1655	 * committing (when we call filemap_fdata[write|wait]_range against
1656	 * the btree inode), we might have
1657	 * btree_inode->i_mapping->a_ops->writepages() called by the VM - if it
1658	 * returns an error or an error happens during writeback, when we're
1659	 * committing the transaction we wouldn't know about it, since the pages
1660	 * can be no longer dirty nor marked anymore for writeback (if a
1661	 * subsequent modification to the extent buffer didn't happen before the
1662	 * transaction commit), which makes filemap_fdata[write|wait]_range not
1663	 * able to find the pages which contain errors at transaction
1664	 * commit time. So if this happens we must abort the transaction,
1665	 * otherwise we commit a super block with btree roots that point to
1666	 * btree nodes/leafs whose content on disk is invalid - either garbage
1667	 * or the content of some node/leaf from a past generation that got
1668	 * cowed or deleted and is no longer valid.
1669	 *
1670	 * Note: setting AS_EIO/AS_ENOSPC in the btree inode's i_mapping would
1671	 * not be enough - we need to distinguish between log tree extents vs
1672	 * non-log tree extents, and the next filemap_fdatawait_range() call
1673	 * will catch and clear such errors in the mapping - and that call might
1674	 * be from a log sync and not from a transaction commit. Also, checking
1675	 * for the eb flag EXTENT_BUFFER_WRITE_ERR at transaction commit time is
1676	 * not done and would not be reliable - the eb might have been released
1677	 * from memory and reading it back again means that flag would not be
1678	 * set (since it's a runtime flag, not persisted on disk).
1679	 *
1680	 * Using the flags below in the btree inode also makes us achieve the
1681	 * goal of AS_EIO/AS_ENOSPC when writepages() returns success, started
1682	 * writeback for all dirty pages and before filemap_fdatawait_range()
1683	 * is called, the writeback for all dirty pages had already finished
1684	 * with errors - because we were not using AS_EIO/AS_ENOSPC,
1685	 * filemap_fdatawait_range() would return success, as it could not know
1686	 * that writeback errors happened (the pages were no longer tagged for
1687	 * writeback).
1688	 */
1689	switch (eb->log_index) {
1690	case -1:
1691		set_bit(BTRFS_FS_BTREE_ERR, &fs_info->flags);
1692		break;
1693	case 0:
1694		set_bit(BTRFS_FS_LOG1_ERR, &fs_info->flags);
1695		break;
1696	case 1:
1697		set_bit(BTRFS_FS_LOG2_ERR, &fs_info->flags);
1698		break;
1699	default:
1700		BUG(); /* unexpected, logic error */
1701	}
1702}
1703
1704/*
1705 * The endio specific version which won't touch any unsafe spinlock in endio
1706 * context.
1707 */
1708static struct extent_buffer *find_extent_buffer_nolock(
1709		const struct btrfs_fs_info *fs_info, u64 start)
1710{
 
1711	struct extent_buffer *eb;
 
1712
1713	rcu_read_lock();
1714	eb = radix_tree_lookup(&fs_info->buffer_radix,
1715			       start >> fs_info->sectorsize_bits);
1716	if (eb && atomic_inc_not_zero(&eb->refs)) {
1717		rcu_read_unlock();
1718		return eb;
1719	}
1720	rcu_read_unlock();
1721	return NULL;
1722}
1723
1724static void end_bbio_meta_write(struct btrfs_bio *bbio)
1725{
1726	struct extent_buffer *eb = bbio->private;
1727	struct btrfs_fs_info *fs_info = eb->fs_info;
1728	bool uptodate = !bbio->bio.bi_status;
1729	struct folio_iter fi;
1730	u32 bio_offset = 0;
1731
1732	if (!uptodate)
1733		set_btree_ioerr(eb);
1734
1735	bio_for_each_folio_all(fi, &bbio->bio) {
1736		u64 start = eb->start + bio_offset;
1737		struct folio *folio = fi.folio;
1738		u32 len = fi.length;
1739
1740		btrfs_folio_clear_writeback(fs_info, folio, start, len);
1741		bio_offset += len;
1742	}
 
 
1743
1744	clear_bit(EXTENT_BUFFER_WRITEBACK, &eb->bflags);
1745	smp_mb__after_atomic();
1746	wake_up_bit(&eb->bflags, EXTENT_BUFFER_WRITEBACK);
 
1747
1748	bio_put(&bbio->bio);
 
 
 
1749}
1750
1751static void prepare_eb_write(struct extent_buffer *eb)
1752{
 
 
 
 
 
 
1753	u32 nritems;
1754	unsigned long start;
1755	unsigned long end;
 
 
1756
1757	clear_bit(EXTENT_BUFFER_WRITE_ERR, &eb->bflags);
 
 
1758
1759	/* Set btree blocks beyond nritems with 0 to avoid stale content */
1760	nritems = btrfs_header_nritems(eb);
1761	if (btrfs_header_level(eb) > 0) {
1762		end = btrfs_node_key_ptr_offset(eb, nritems);
 
1763		memzero_extent_buffer(eb, end, eb->len - end);
1764	} else {
1765		/*
1766		 * Leaf:
1767		 * header 0 1 2 .. N ... data_N .. data_2 data_1 data_0
1768		 */
1769		start = btrfs_item_nr_offset(eb, nritems);
1770		end = btrfs_item_nr_offset(eb, 0);
1771		if (nritems == 0)
1772			end += BTRFS_LEAF_DATA_SIZE(eb->fs_info);
1773		else
1774			end += btrfs_item_offset(eb, nritems - 1);
1775		memzero_extent_buffer(eb, start, end - start);
1776	}
1777}
1778
1779static noinline_for_stack void write_one_eb(struct extent_buffer *eb,
1780					    struct writeback_control *wbc)
1781{
1782	struct btrfs_fs_info *fs_info = eb->fs_info;
1783	struct btrfs_bio *bbio;
1784
1785	prepare_eb_write(eb);
1786
1787	bbio = btrfs_bio_alloc(INLINE_EXTENT_BUFFER_PAGES,
1788			       REQ_OP_WRITE | REQ_META | wbc_to_write_flags(wbc),
1789			       eb->fs_info, end_bbio_meta_write, eb);
1790	bbio->bio.bi_iter.bi_sector = eb->start >> SECTOR_SHIFT;
1791	bio_set_dev(&bbio->bio, fs_info->fs_devices->latest_dev->bdev);
1792	wbc_init_bio(wbc, &bbio->bio);
1793	bbio->inode = BTRFS_I(eb->fs_info->btree_inode);
1794	bbio->file_offset = eb->start;
1795	if (fs_info->nodesize < PAGE_SIZE) {
1796		struct folio *folio = eb->folios[0];
1797		bool ret;
1798
1799		folio_lock(folio);
1800		btrfs_subpage_set_writeback(fs_info, folio, eb->start, eb->len);
1801		if (btrfs_subpage_clear_and_test_dirty(fs_info, folio, eb->start,
1802						       eb->len)) {
1803			folio_clear_dirty_for_io(folio);
1804			wbc->nr_to_write--;
1805		}
1806		ret = bio_add_folio(&bbio->bio, folio, eb->len,
1807				    eb->start - folio_pos(folio));
1808		ASSERT(ret);
1809		wbc_account_cgroup_owner(wbc, folio, eb->len);
1810		folio_unlock(folio);
1811	} else {
1812		int num_folios = num_extent_folios(eb);
1813
1814		for (int i = 0; i < num_folios; i++) {
1815			struct folio *folio = eb->folios[i];
1816			bool ret;
1817
1818			folio_lock(folio);
1819			folio_clear_dirty_for_io(folio);
1820			folio_start_writeback(folio);
1821			ret = bio_add_folio(&bbio->bio, folio, eb->folio_size, 0);
1822			ASSERT(ret);
1823			wbc_account_cgroup_owner(wbc, folio, eb->folio_size);
1824			wbc->nr_to_write -= folio_nr_pages(folio);
1825			folio_unlock(folio);
1826		}
1827	}
1828	btrfs_submit_bbio(bbio, 0);
1829}
1830
1831/*
1832 * Submit one subpage btree page.
1833 *
1834 * The main difference to submit_eb_page() is:
1835 * - Page locking
1836 *   For subpage, we don't rely on page locking at all.
1837 *
1838 * - Flush write bio
1839 *   We only flush bio if we may be unable to fit current extent buffers into
1840 *   current bio.
1841 *
1842 * Return >=0 for the number of submitted extent buffers.
1843 * Return <0 for fatal error.
1844 */
1845static int submit_eb_subpage(struct folio *folio, struct writeback_control *wbc)
1846{
1847	struct btrfs_fs_info *fs_info = folio_to_fs_info(folio);
1848	int submitted = 0;
1849	u64 folio_start = folio_pos(folio);
1850	int bit_start = 0;
1851	int sectors_per_node = fs_info->nodesize >> fs_info->sectorsize_bits;
1852
1853	/* Lock and write each dirty extent buffers in the range */
1854	while (bit_start < fs_info->sectors_per_page) {
1855		struct btrfs_subpage *subpage = folio_get_private(folio);
1856		struct extent_buffer *eb;
1857		unsigned long flags;
1858		u64 start;
1859
1860		/*
1861		 * Take private lock to ensure the subpage won't be detached
1862		 * in the meantime.
1863		 */
1864		spin_lock(&folio->mapping->i_private_lock);
1865		if (!folio_test_private(folio)) {
1866			spin_unlock(&folio->mapping->i_private_lock);
1867			break;
1868		}
1869		spin_lock_irqsave(&subpage->lock, flags);
1870		if (!test_bit(bit_start + btrfs_bitmap_nr_dirty * fs_info->sectors_per_page,
1871			      subpage->bitmaps)) {
1872			spin_unlock_irqrestore(&subpage->lock, flags);
1873			spin_unlock(&folio->mapping->i_private_lock);
1874			bit_start++;
1875			continue;
1876		}
1877
1878		start = folio_start + bit_start * fs_info->sectorsize;
1879		bit_start += sectors_per_node;
1880
1881		/*
1882		 * Here we just want to grab the eb without touching extra
1883		 * spin locks, so call find_extent_buffer_nolock().
1884		 */
1885		eb = find_extent_buffer_nolock(fs_info, start);
1886		spin_unlock_irqrestore(&subpage->lock, flags);
1887		spin_unlock(&folio->mapping->i_private_lock);
1888
1889		/*
1890		 * The eb has already reached 0 refs thus find_extent_buffer()
1891		 * doesn't return it. We don't need to write back such eb
1892		 * anyway.
1893		 */
1894		if (!eb)
1895			continue;
1896
1897		if (lock_extent_buffer_for_io(eb, wbc)) {
1898			write_one_eb(eb, wbc);
1899			submitted++;
1900		}
1901		free_extent_buffer(eb);
1902	}
1903	return submitted;
1904}
1905
1906/*
1907 * Submit all page(s) of one extent buffer.
1908 *
1909 * @page:	the page of one extent buffer
1910 * @eb_context:	to determine if we need to submit this page, if current page
1911 *		belongs to this eb, we don't need to submit
1912 *
1913 * The caller should pass each page in their bytenr order, and here we use
1914 * @eb_context to determine if we have submitted pages of one extent buffer.
1915 *
1916 * If we have, we just skip until we hit a new page that doesn't belong to
1917 * current @eb_context.
1918 *
1919 * If not, we submit all the page(s) of the extent buffer.
1920 *
1921 * Return >0 if we have submitted the extent buffer successfully.
1922 * Return 0 if we don't need to submit the page, as it's already submitted by
1923 * previous call.
1924 * Return <0 for fatal error.
1925 */
1926static int submit_eb_page(struct folio *folio, struct btrfs_eb_write_context *ctx)
1927{
1928	struct writeback_control *wbc = ctx->wbc;
1929	struct address_space *mapping = folio->mapping;
1930	struct extent_buffer *eb;
1931	int ret;
1932
1933	if (!folio_test_private(folio))
1934		return 0;
1935
1936	if (folio_to_fs_info(folio)->nodesize < PAGE_SIZE)
1937		return submit_eb_subpage(folio, wbc);
1938
1939	spin_lock(&mapping->i_private_lock);
1940	if (!folio_test_private(folio)) {
1941		spin_unlock(&mapping->i_private_lock);
1942		return 0;
1943	}
1944
1945	eb = folio_get_private(folio);
1946
1947	/*
1948	 * Shouldn't happen and normally this would be a BUG_ON but no point
1949	 * crashing the machine for something we can survive anyway.
1950	 */
1951	if (WARN_ON(!eb)) {
1952		spin_unlock(&mapping->i_private_lock);
1953		return 0;
1954	}
1955
1956	if (eb == ctx->eb) {
1957		spin_unlock(&mapping->i_private_lock);
1958		return 0;
1959	}
1960	ret = atomic_inc_not_zero(&eb->refs);
1961	spin_unlock(&mapping->i_private_lock);
1962	if (!ret)
1963		return 0;
1964
1965	ctx->eb = eb;
1966
1967	ret = btrfs_check_meta_write_pointer(eb->fs_info, ctx);
1968	if (ret) {
1969		if (ret == -EBUSY)
1970			ret = 0;
1971		free_extent_buffer(eb);
1972		return ret;
1973	}
1974
1975	if (!lock_extent_buffer_for_io(eb, wbc)) {
1976		free_extent_buffer(eb);
1977		return 0;
1978	}
1979	/* Implies write in zoned mode. */
1980	if (ctx->zoned_bg) {
1981		/* Mark the last eb in the block group. */
1982		btrfs_schedule_zone_finish_bg(ctx->zoned_bg, eb);
1983		ctx->zoned_bg->meta_write_pointer += eb->len;
1984	}
1985	write_one_eb(eb, wbc);
1986	free_extent_buffer(eb);
1987	return 1;
1988}
1989
1990int btree_write_cache_pages(struct address_space *mapping,
1991				   struct writeback_control *wbc)
1992{
1993	struct btrfs_eb_write_context ctx = { .wbc = wbc };
1994	struct btrfs_fs_info *fs_info = inode_to_fs_info(mapping->host);
 
 
 
 
 
 
 
1995	int ret = 0;
1996	int done = 0;
1997	int nr_to_write_done = 0;
1998	struct folio_batch fbatch;
1999	unsigned int nr_folios;
2000	pgoff_t index;
2001	pgoff_t end;		/* Inclusive */
2002	int scanned = 0;
2003	xa_mark_t tag;
2004
2005	folio_batch_init(&fbatch);
2006	if (wbc->range_cyclic) {
2007		index = mapping->writeback_index; /* Start from prev offset */
2008		end = -1;
2009		/*
2010		 * Start from the beginning does not need to cycle over the
2011		 * range, mark it as scanned.
2012		 */
2013		scanned = (index == 0);
2014	} else {
2015		index = wbc->range_start >> PAGE_SHIFT;
2016		end = wbc->range_end >> PAGE_SHIFT;
2017		scanned = 1;
2018	}
2019	if (wbc->sync_mode == WB_SYNC_ALL)
2020		tag = PAGECACHE_TAG_TOWRITE;
2021	else
2022		tag = PAGECACHE_TAG_DIRTY;
2023	btrfs_zoned_meta_io_lock(fs_info);
2024retry:
2025	if (wbc->sync_mode == WB_SYNC_ALL)
2026		tag_pages_for_writeback(mapping, index, end);
2027	while (!done && !nr_to_write_done && (index <= end) &&
2028	       (nr_folios = filemap_get_folios_tag(mapping, &index, end,
2029					    tag, &fbatch))) {
2030		unsigned i;
2031
2032		for (i = 0; i < nr_folios; i++) {
2033			struct folio *folio = fbatch.folios[i];
 
2034
2035			ret = submit_eb_page(folio, &ctx);
2036			if (ret == 0)
2037				continue;
2038			if (ret < 0) {
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2039				done = 1;
 
2040				break;
2041			}
 
2042
2043			/*
2044			 * the filesystem may choose to bump up nr_to_write.
2045			 * We have to make sure to honor the new nr_to_write
2046			 * at any time
2047			 */
2048			nr_to_write_done = wbc->nr_to_write <= 0;
2049		}
2050		folio_batch_release(&fbatch);
2051		cond_resched();
2052	}
2053	if (!scanned && !done) {
2054		/*
2055		 * We hit the last page and there is more work to be done: wrap
2056		 * back to the start of the file
2057		 */
2058		scanned = 1;
2059		index = 0;
2060		goto retry;
2061	}
2062	/*
2063	 * If something went wrong, don't allow any metadata write bio to be
2064	 * submitted.
2065	 *
2066	 * This would prevent use-after-free if we had dirty pages not
2067	 * cleaned up, which can still happen by fuzzed images.
2068	 *
2069	 * - Bad extent tree
2070	 *   Allowing existing tree block to be allocated for other trees.
2071	 *
2072	 * - Log tree operations
2073	 *   Exiting tree blocks get allocated to log tree, bumps its
2074	 *   generation, then get cleaned in tree re-balance.
2075	 *   Such tree block will not be written back, since it's clean,
2076	 *   thus no WRITTEN flag set.
2077	 *   And after log writes back, this tree block is not traced by
2078	 *   any dirty extent_io_tree.
2079	 *
2080	 * - Offending tree block gets re-dirtied from its original owner
2081	 *   Since it has bumped generation, no WRITTEN flag, it can be
2082	 *   reused without COWing. This tree block will not be traced
2083	 *   by btrfs_transaction::dirty_pages.
2084	 *
2085	 *   Now such dirty tree block will not be cleaned by any dirty
2086	 *   extent io tree. Thus we don't want to submit such wild eb
2087	 *   if the fs already has error.
2088	 *
2089	 * We can get ret > 0 from submit_extent_folio() indicating how many ebs
2090	 * were submitted. Reset it to 0 to avoid false alerts for the caller.
2091	 */
2092	if (ret > 0)
2093		ret = 0;
2094	if (!ret && BTRFS_FS_ERROR(fs_info))
2095		ret = -EROFS;
2096
2097	if (ctx.zoned_bg)
2098		btrfs_put_block_group(ctx.zoned_bg);
2099	btrfs_zoned_meta_io_unlock(fs_info);
2100	return ret;
2101}
2102
2103/*
2104 * Walk the list of dirty pages of the given address space and write all of them.
2105 *
2106 * @mapping:   address space structure to write
2107 * @wbc:       subtract the number of written pages from *@wbc->nr_to_write
2108 * @bio_ctrl:  holds context for the write, namely the bio
2109 *
2110 * If a page is already under I/O, write_cache_pages() skips it, even
2111 * if it's dirty.  This is desirable behaviour for memory-cleaning writeback,
2112 * but it is INCORRECT for data-integrity system calls such as fsync().  fsync()
2113 * and msync() need to guarantee that all the data which was dirty at the time
2114 * the call was made get new I/O started against them.  If wbc->sync_mode is
2115 * WB_SYNC_ALL then we were called for data integrity and we must wait for
2116 * existing IO to complete.
2117 */
2118static int extent_write_cache_pages(struct address_space *mapping,
2119			     struct btrfs_bio_ctrl *bio_ctrl)
 
2120{
2121	struct writeback_control *wbc = bio_ctrl->wbc;
2122	struct inode *inode = mapping->host;
2123	int ret = 0;
2124	int done = 0;
2125	int nr_to_write_done = 0;
2126	struct folio_batch fbatch;
2127	unsigned int nr_folios;
2128	pgoff_t index;
2129	pgoff_t end;		/* Inclusive */
2130	pgoff_t done_index;
2131	int range_whole = 0;
2132	int scanned = 0;
2133	xa_mark_t tag;
2134
2135	/*
2136	 * We have to hold onto the inode so that ordered extents can do their
2137	 * work when the IO finishes.  The alternative to this is failing to add
2138	 * an ordered extent if the igrab() fails there and that is a huge pain
2139	 * to deal with, so instead just hold onto the inode throughout the
2140	 * writepages operation.  If it fails here we are freeing up the inode
2141	 * anyway and we'd rather not waste our time writing out stuff that is
2142	 * going to be truncated anyway.
2143	 */
2144	if (!igrab(inode))
2145		return 0;
2146
2147	folio_batch_init(&fbatch);
2148	if (wbc->range_cyclic) {
2149		index = mapping->writeback_index; /* Start from prev offset */
2150		end = -1;
2151		/*
2152		 * Start from the beginning does not need to cycle over the
2153		 * range, mark it as scanned.
2154		 */
2155		scanned = (index == 0);
2156	} else {
2157		index = wbc->range_start >> PAGE_SHIFT;
2158		end = wbc->range_end >> PAGE_SHIFT;
2159		if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX)
2160			range_whole = 1;
2161		scanned = 1;
2162	}
2163
2164	/*
2165	 * We do the tagged writepage as long as the snapshot flush bit is set
2166	 * and we are the first one who do the filemap_flush() on this inode.
2167	 *
2168	 * The nr_to_write == LONG_MAX is needed to make sure other flushers do
2169	 * not race in and drop the bit.
2170	 */
2171	if (range_whole && wbc->nr_to_write == LONG_MAX &&
2172	    test_and_clear_bit(BTRFS_INODE_SNAPSHOT_FLUSH,
2173			       &BTRFS_I(inode)->runtime_flags))
2174		wbc->tagged_writepages = 1;
2175
2176	if (wbc->sync_mode == WB_SYNC_ALL || wbc->tagged_writepages)
2177		tag = PAGECACHE_TAG_TOWRITE;
2178	else
2179		tag = PAGECACHE_TAG_DIRTY;
2180retry:
2181	if (wbc->sync_mode == WB_SYNC_ALL || wbc->tagged_writepages)
2182		tag_pages_for_writeback(mapping, index, end);
2183	done_index = index;
2184	while (!done && !nr_to_write_done && (index <= end) &&
2185			(nr_folios = filemap_get_folios_tag(mapping, &index,
2186							end, tag, &fbatch))) {
2187		unsigned i;
2188
2189		for (i = 0; i < nr_folios; i++) {
2190			struct folio *folio = fbatch.folios[i];
 
2191
2192			done_index = folio_next_index(folio);
2193			/*
2194			 * At this point we hold neither the i_pages lock nor
2195			 * the page lock: the page may be truncated or
2196			 * invalidated (changing page->mapping to NULL),
2197			 * or even swizzled back from swapper_space to
2198			 * tmpfs file mapping
2199			 */
2200			if (!folio_trylock(folio)) {
2201				submit_write_bio(bio_ctrl, 0);
2202				folio_lock(folio);
2203			}
2204
2205			if (unlikely(folio->mapping != mapping)) {
2206				folio_unlock(folio);
2207				continue;
2208			}
2209
2210			if (!folio_test_dirty(folio)) {
2211				/* Someone wrote it for us. */
2212				folio_unlock(folio);
2213				continue;
2214			}
2215
2216			/*
2217			 * For subpage case, compression can lead to mixed
2218			 * writeback and dirty flags, e.g:
2219			 * 0     32K    64K    96K    128K
2220			 * |     |//////||/////|   |//|
2221			 *
2222			 * In above case, [32K, 96K) is asynchronously submitted
2223			 * for compression, and [124K, 128K) needs to be written back.
2224			 *
2225			 * If we didn't wait wrtiteback for page 64K, [128K, 128K)
2226			 * won't be submitted as the page still has writeback flag
2227			 * and will be skipped in the next check.
2228			 *
2229			 * This mixed writeback and dirty case is only possible for
2230			 * subpage case.
2231			 *
2232			 * TODO: Remove this check after migrating compression to
2233			 * regular submission.
2234			 */
2235			if (wbc->sync_mode != WB_SYNC_NONE ||
2236			    btrfs_is_subpage(inode_to_fs_info(inode), mapping)) {
2237				if (folio_test_writeback(folio))
2238					submit_write_bio(bio_ctrl, 0);
2239				folio_wait_writeback(folio);
2240			}
2241
2242			if (folio_test_writeback(folio) ||
2243			    !folio_clear_dirty_for_io(folio)) {
2244				folio_unlock(folio);
2245				continue;
2246			}
2247
2248			ret = extent_writepage(folio, bio_ctrl);
 
 
 
 
 
2249			if (ret < 0) {
 
 
 
 
 
 
 
 
 
 
2250				done = 1;
2251				break;
2252			}
2253
2254			/*
2255			 * The filesystem may choose to bump up nr_to_write.
2256			 * We have to make sure to honor the new nr_to_write
2257			 * at any time.
2258			 */
2259			nr_to_write_done = (wbc->sync_mode == WB_SYNC_NONE &&
2260					    wbc->nr_to_write <= 0);
2261		}
2262		folio_batch_release(&fbatch);
2263		cond_resched();
2264	}
2265	if (!scanned && !done) {
2266		/*
2267		 * We hit the last page and there is more work to be done: wrap
2268		 * back to the start of the file
2269		 */
2270		scanned = 1;
2271		index = 0;
2272
2273		/*
2274		 * If we're looping we could run into a page that is locked by a
2275		 * writer and that writer could be waiting on writeback for a
2276		 * page in our current bio, and thus deadlock, so flush the
2277		 * write bio here.
2278		 */
2279		submit_write_bio(bio_ctrl, 0);
2280		goto retry;
2281	}
2282
2283	if (wbc->range_cyclic || (wbc->nr_to_write > 0 && range_whole))
2284		mapping->writeback_index = done_index;
2285
2286	btrfs_add_delayed_iput(BTRFS_I(inode));
2287	return ret;
2288}
2289
2290/*
2291 * Submit the pages in the range to bio for call sites which delalloc range has
2292 * already been ran (aka, ordered extent inserted) and all pages are still
2293 * locked.
2294 */
2295void extent_write_locked_range(struct inode *inode, const struct folio *locked_folio,
2296			       u64 start, u64 end, struct writeback_control *wbc,
2297			       bool pages_dirty)
2298{
2299	bool found_error = false;
2300	int ret = 0;
2301	struct address_space *mapping = inode->i_mapping;
2302	struct btrfs_fs_info *fs_info = inode_to_fs_info(inode);
2303	const u32 sectorsize = fs_info->sectorsize;
2304	loff_t i_size = i_size_read(inode);
2305	u64 cur = start;
2306	struct btrfs_bio_ctrl bio_ctrl = {
2307		.wbc = wbc,
2308		.opf = REQ_OP_WRITE | wbc_to_write_flags(wbc),
2309	};
2310
2311	if (wbc->no_cgroup_owner)
2312		bio_ctrl.opf |= REQ_BTRFS_CGROUP_PUNT;
2313
2314	ASSERT(IS_ALIGNED(start, sectorsize) && IS_ALIGNED(end + 1, sectorsize));
2315
2316	while (cur <= end) {
2317		u64 cur_end = min(round_down(cur, PAGE_SIZE) + PAGE_SIZE - 1, end);
2318		u32 cur_len = cur_end + 1 - cur;
2319		struct folio *folio;
 
2320
2321		folio = filemap_get_folio(mapping, cur >> PAGE_SHIFT);
 
 
 
 
 
 
 
 
2322
2323		/*
2324		 * This shouldn't happen, the pages are pinned and locked, this
2325		 * code is just in case, but shouldn't actually be run.
2326		 */
2327		if (IS_ERR(folio)) {
2328			btrfs_mark_ordered_io_finished(BTRFS_I(inode), NULL,
2329						       cur, cur_len, false);
2330			mapping_set_error(mapping, PTR_ERR(folio));
2331			cur = cur_end + 1;
2332			continue;
2333		}
2334
2335		ASSERT(folio_test_locked(folio));
2336		if (pages_dirty && folio != locked_folio)
2337			ASSERT(folio_test_dirty(folio));
2338
2339		/*
2340		 * Set the submission bitmap to submit all sectors.
2341		 * extent_writepage_io() will do the truncation correctly.
2342		 */
2343		bio_ctrl.submit_bitmap = (unsigned long)-1;
2344		ret = extent_writepage_io(BTRFS_I(inode), folio, cur, cur_len,
2345					  &bio_ctrl, i_size);
2346		if (ret == 1)
2347			goto next_page;
 
 
 
 
 
 
 
 
 
 
 
 
 
2348
2349		if (ret)
2350			mapping_set_error(mapping, ret);
2351		btrfs_folio_end_lock(fs_info, folio, cur, cur_len);
2352		if (ret < 0)
2353			found_error = true;
2354next_page:
2355		folio_put(folio);
2356		cur = cur_end + 1;
 
 
 
 
 
2357	}
2358
2359	submit_write_bio(&bio_ctrl, found_error ? ret : 0);
 
2360}
2361
2362int btrfs_writepages(struct address_space *mapping, struct writeback_control *wbc)
 
 
2363{
2364	struct inode *inode = mapping->host;
2365	int ret = 0;
2366	struct btrfs_bio_ctrl bio_ctrl = {
2367		.wbc = wbc,
2368		.opf = REQ_OP_WRITE | wbc_to_write_flags(wbc),
 
 
2369	};
2370
2371	/*
2372	 * Allow only a single thread to do the reloc work in zoned mode to
2373	 * protect the write pointer updates.
2374	 */
2375	btrfs_zoned_data_reloc_lock(BTRFS_I(inode));
2376	ret = extent_write_cache_pages(mapping, &bio_ctrl);
2377	submit_write_bio(&bio_ctrl, ret);
2378	btrfs_zoned_data_reloc_unlock(BTRFS_I(inode));
2379	return ret;
2380}
2381
2382void btrfs_readahead(struct readahead_control *rac)
2383{
2384	struct btrfs_bio_ctrl bio_ctrl = { .opf = REQ_OP_READ | REQ_RAHEAD };
2385	struct folio *folio;
2386	struct btrfs_inode *inode = BTRFS_I(rac->mapping->host);
2387	const u64 start = readahead_pos(rac);
2388	const u64 end = start + readahead_length(rac) - 1;
2389	struct extent_state *cached_state = NULL;
 
2390	struct extent_map *em_cached = NULL;
 
2391	u64 prev_em_start = (u64)-1;
2392
2393	btrfs_lock_and_flush_ordered_range(inode, start, end, &cached_state);
 
2394
2395	while ((folio = readahead_folio(rac)) != NULL)
2396		btrfs_do_readpage(folio, &em_cached, &bio_ctrl, &prev_em_start);
 
 
 
 
 
 
2397
2398	unlock_extent(&inode->io_tree, start, end, &cached_state);
 
 
 
 
 
 
 
 
 
2399
2400	if (em_cached)
2401		free_extent_map(em_cached);
2402	submit_one_bio(&bio_ctrl);
 
 
 
 
2403}
2404
2405/*
2406 * basic invalidate_folio code, this waits on any locked or writeback
2407 * ranges corresponding to the folio, and then deletes any extent state
2408 * records from the tree
2409 */
2410int extent_invalidate_folio(struct extent_io_tree *tree,
2411			  struct folio *folio, size_t offset)
2412{
2413	struct extent_state *cached_state = NULL;
2414	u64 start = folio_pos(folio);
2415	u64 end = start + folio_size(folio) - 1;
2416	size_t blocksize = folio_to_fs_info(folio)->sectorsize;
2417
2418	/* This function is only called for the btree inode */
2419	ASSERT(tree->owner == IO_TREE_BTREE_INODE_IO);
2420
2421	start += ALIGN(offset, blocksize);
2422	if (start > end)
2423		return 0;
2424
2425	lock_extent(tree, start, end, &cached_state);
2426	folio_wait_writeback(folio);
2427
2428	/*
2429	 * Currently for btree io tree, only EXTENT_LOCKED is utilized,
2430	 * so here we only need to unlock the extent range to free any
2431	 * existing extent state.
2432	 */
2433	unlock_extent(tree, start, end, &cached_state);
2434	return 0;
2435}
2436
2437/*
2438 * a helper for release_folio, this tests for areas of the page that
2439 * are locked or under IO and drops the related state bits if it is safe
2440 * to drop the page.
2441 */
2442static bool try_release_extent_state(struct extent_io_tree *tree,
2443				     struct folio *folio)
 
2444{
2445	u64 start = folio_pos(folio);
2446	u64 end = start + PAGE_SIZE - 1;
2447	bool ret;
2448
2449	if (test_range_bit_exists(tree, start, end, EXTENT_LOCKED)) {
2450		ret = false;
2451	} else {
2452		u32 clear_bits = ~(EXTENT_LOCKED | EXTENT_NODATASUM |
2453				   EXTENT_DELALLOC_NEW | EXTENT_CTLBITS |
2454				   EXTENT_QGROUP_RESERVED);
2455		int ret2;
2456
 
 
 
 
2457		/*
2458		 * At this point we can safely clear everything except the
2459		 * locked bit, the nodatasum bit and the delalloc new bit.
2460		 * The delalloc new bit will be cleared by ordered extent
2461		 * completion.
2462		 */
2463		ret2 = __clear_extent_bit(tree, start, end, clear_bits, NULL, NULL);
 
 
2464
2465		/* if clear_extent_bit failed for enomem reasons,
2466		 * we can't allow the release to continue.
2467		 */
2468		if (ret2 < 0)
2469			ret = false;
2470		else
2471			ret = true;
2472	}
2473	return ret;
2474}
2475
2476/*
2477 * a helper for release_folio.  As long as there are no locked extents
2478 * in the range corresponding to the page, both state records and extent
2479 * map records are removed
2480 */
2481bool try_release_extent_mapping(struct folio *folio, gfp_t mask)
 
 
2482{
2483	u64 start = folio_pos(folio);
 
2484	u64 end = start + PAGE_SIZE - 1;
2485	struct btrfs_inode *inode = folio_to_inode(folio);
2486	struct extent_io_tree *io_tree = &inode->io_tree;
2487
2488	while (start <= end) {
2489		const u64 cur_gen = btrfs_get_fs_generation(inode->root->fs_info);
2490		const u64 len = end - start + 1;
2491		struct extent_map_tree *extent_tree = &inode->extent_tree;
2492		struct extent_map *em;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2493
2494		write_lock(&extent_tree->lock);
2495		em = lookup_extent_mapping(extent_tree, start, len);
2496		if (!em) {
2497			write_unlock(&extent_tree->lock);
2498			break;
2499		}
2500		if ((em->flags & EXTENT_FLAG_PINNED) || em->start != start) {
2501			write_unlock(&extent_tree->lock);
2502			free_extent_map(em);
2503			break;
2504		}
2505		if (test_range_bit_exists(io_tree, em->start,
2506					  extent_map_end(em) - 1, EXTENT_LOCKED))
2507			goto next;
2508		/*
2509		 * If it's not in the list of modified extents, used by a fast
2510		 * fsync, we can remove it. If it's being logged we can safely
2511		 * remove it since fsync took an extra reference on the em.
2512		 */
2513		if (list_empty(&em->list) || (em->flags & EXTENT_FLAG_LOGGING))
2514			goto remove_em;
2515		/*
2516		 * If it's in the list of modified extents, remove it only if
2517		 * its generation is older then the current one, in which case
2518		 * we don't need it for a fast fsync. Otherwise don't remove it,
2519		 * we could be racing with an ongoing fast fsync that could miss
2520		 * the new extent.
2521		 */
2522		if (em->generation >= cur_gen)
2523			goto next;
2524remove_em:
2525		/*
2526		 * We only remove extent maps that are not in the list of
2527		 * modified extents or that are in the list but with a
2528		 * generation lower then the current generation, so there is no
2529		 * need to set the full fsync flag on the inode (it hurts the
2530		 * fsync performance for workloads with a data size that exceeds
2531		 * or is close to the system's memory).
2532		 */
2533		remove_extent_mapping(inode, em);
2534		/* Once for the inode's extent map tree. */
2535		free_extent_map(em);
2536next:
2537		start = extent_map_end(em);
2538		write_unlock(&extent_tree->lock);
2539
2540		/* Once for us, for the lookup_extent_mapping() reference. */
2541		free_extent_map(em);
 
 
 
 
 
 
 
 
2542
2543		if (need_resched()) {
2544			/*
2545			 * If we need to resched but we can't block just exit
2546			 * and leave any remaining extent maps.
2547			 */
2548			if (!gfpflags_allow_blocking(mask))
2549				break;
2550
2551			cond_resched();
2552		}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2553	}
2554	return try_release_extent_state(io_tree, folio);
2555}
2556
2557static void __free_extent_buffer(struct extent_buffer *eb)
2558{
2559	kmem_cache_free(extent_buffer_cache, eb);
2560}
 
 
 
 
 
 
 
 
2561
2562static int extent_buffer_under_io(const struct extent_buffer *eb)
 
 
 
 
 
 
 
 
 
 
 
 
2563{
2564	return (test_bit(EXTENT_BUFFER_WRITEBACK, &eb->bflags) ||
2565		test_bit(EXTENT_BUFFER_DIRTY, &eb->bflags));
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2566}
2567
2568static bool folio_range_has_eb(struct folio *folio)
 
 
 
 
 
 
 
 
 
 
 
 
 
2569{
2570	struct btrfs_subpage *subpage;
2571
2572	lockdep_assert_held(&folio->mapping->i_private_lock);
 
2573
2574	if (folio_test_private(folio)) {
2575		subpage = folio_get_private(folio);
2576		if (atomic_read(&subpage->eb_refs))
2577			return true;
2578	}
2579	return false;
2580}
2581
2582static void detach_extent_buffer_folio(const struct extent_buffer *eb, struct folio *folio)
 
2583{
2584	struct btrfs_fs_info *fs_info = eb->fs_info;
2585	const bool mapped = !test_bit(EXTENT_BUFFER_UNMAPPED, &eb->bflags);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2586
2587	/*
2588	 * For mapped eb, we're going to change the folio private, which should
2589	 * be done under the i_private_lock.
2590	 */
2591	if (mapped)
2592		spin_lock(&folio->mapping->i_private_lock);
2593
2594	if (!folio_test_private(folio)) {
2595		if (mapped)
2596			spin_unlock(&folio->mapping->i_private_lock);
2597		return;
 
 
2598	}
2599
2600	if (fs_info->nodesize >= PAGE_SIZE) {
 
 
 
 
 
 
 
 
 
 
2601		/*
2602		 * We do this since we'll remove the pages after we've
2603		 * removed the eb from the radix tree, so we could race
2604		 * and have this page now attached to the new eb.  So
2605		 * only clear folio if it's still connected to
2606		 * this eb.
2607		 */
2608		if (folio_test_private(folio) && folio_get_private(folio) == eb) {
2609			BUG_ON(test_bit(EXTENT_BUFFER_DIRTY, &eb->bflags));
2610			BUG_ON(folio_test_dirty(folio));
2611			BUG_ON(folio_test_writeback(folio));
2612			/* We need to make sure we haven't be attached to a new eb. */
2613			folio_detach_private(folio);
2614		}
2615		if (mapped)
2616			spin_unlock(&folio->mapping->i_private_lock);
2617		return;
2618	}
 
2619
2620	/*
2621	 * For subpage, we can have dummy eb with folio private attached.  In
2622	 * this case, we can directly detach the private as such folio is only
2623	 * attached to one dummy eb, no sharing.
2624	 */
2625	if (!mapped) {
2626		btrfs_detach_subpage(fs_info, folio);
2627		return;
2628	}
2629
2630	btrfs_folio_dec_eb_refs(fs_info, folio);
 
2631
2632	/*
2633	 * We can only detach the folio private if there are no other ebs in the
2634	 * page range and no unfinished IO.
2635	 */
2636	if (!folio_range_has_eb(folio))
2637		btrfs_detach_subpage(fs_info, folio);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2638
2639	spin_unlock(&folio->mapping->i_private_lock);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2640}
2641
2642/* Release all pages attached to the extent buffer */
2643static void btrfs_release_extent_buffer_pages(const struct extent_buffer *eb)
2644{
2645	ASSERT(!extent_buffer_under_io(eb));
 
 
2646
2647	for (int i = 0; i < INLINE_EXTENT_BUFFER_PAGES; i++) {
2648		struct folio *folio = eb->folios[i];
 
 
 
 
 
 
 
 
 
 
 
 
 
2649
2650		if (!folio)
 
 
 
 
 
 
 
 
 
2651			continue;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2652
2653		detach_extent_buffer_folio(eb, folio);
 
2654
2655		/* One for when we allocated the folio. */
2656		folio_put(folio);
2657	}
2658}
2659
2660/*
2661 * Helper for releasing the extent buffer.
2662 */
2663static inline void btrfs_release_extent_buffer(struct extent_buffer *eb)
2664{
2665	btrfs_release_extent_buffer_pages(eb);
2666	btrfs_leak_debug_del_eb(eb);
2667	__free_extent_buffer(eb);
2668}
2669
2670static struct extent_buffer *
2671__alloc_extent_buffer(struct btrfs_fs_info *fs_info, u64 start,
2672		      unsigned long len)
2673{
2674	struct extent_buffer *eb = NULL;
2675
2676	eb = kmem_cache_zalloc(extent_buffer_cache, GFP_NOFS|__GFP_NOFAIL);
2677	eb->start = start;
2678	eb->len = len;
2679	eb->fs_info = fs_info;
2680	init_rwsem(&eb->lock);
 
 
 
 
 
 
 
 
 
 
2681
2682	btrfs_leak_debug_add_eb(eb);
2683
2684	spin_lock_init(&eb->refs_lock);
2685	atomic_set(&eb->refs, 1);
 
2686
2687	ASSERT(len <= BTRFS_MAX_METADATA_BLOCKSIZE);
 
 
 
 
 
2688
2689	return eb;
2690}
2691
2692struct extent_buffer *btrfs_clone_extent_buffer(const struct extent_buffer *src)
2693{
 
 
2694	struct extent_buffer *new;
2695	int num_folios = num_extent_folios(src);
2696	int ret;
2697
2698	new = __alloc_extent_buffer(src->fs_info, src->start, src->len);
2699	if (new == NULL)
2700		return NULL;
2701
2702	/*
2703	 * Set UNMAPPED before calling btrfs_release_extent_buffer(), as
2704	 * btrfs_release_extent_buffer() have different behavior for
2705	 * UNMAPPED subpage extent buffer.
2706	 */
2707	set_bit(EXTENT_BUFFER_UNMAPPED, &new->bflags);
2708
2709	ret = alloc_eb_folio_array(new, false);
2710	if (ret) {
2711		btrfs_release_extent_buffer(new);
2712		return NULL;
2713	}
2714
2715	for (int i = 0; i < num_folios; i++) {
2716		struct folio *folio = new->folios[i];
2717
2718		ret = attach_extent_buffer_folio(new, folio, NULL);
2719		if (ret < 0) {
2720			btrfs_release_extent_buffer(new);
2721			return NULL;
2722		}
2723		WARN_ON(folio_test_dirty(folio));
 
 
 
 
2724	}
2725	copy_extent_buffer_full(new, src);
2726	set_extent_buffer_uptodate(new);
 
2727
2728	return new;
2729}
2730
2731struct extent_buffer *__alloc_dummy_extent_buffer(struct btrfs_fs_info *fs_info,
2732						  u64 start, unsigned long len)
2733{
2734	struct extent_buffer *eb;
2735	int num_folios = 0;
2736	int ret;
 
 
2737
2738	eb = __alloc_extent_buffer(fs_info, start, len);
2739	if (!eb)
2740		return NULL;
2741
2742	ret = alloc_eb_folio_array(eb, false);
2743	if (ret)
2744		goto err;
2745
2746	num_folios = num_extent_folios(eb);
2747	for (int i = 0; i < num_folios; i++) {
2748		ret = attach_extent_buffer_folio(eb, eb->folios[i], NULL);
2749		if (ret < 0)
2750			goto err;
2751	}
2752
2753	set_extent_buffer_uptodate(eb);
2754	btrfs_set_header_nritems(eb, 0);
2755	set_bit(EXTENT_BUFFER_UNMAPPED, &eb->bflags);
2756
2757	return eb;
2758err:
2759	for (int i = 0; i < num_folios; i++) {
2760		if (eb->folios[i]) {
2761			detach_extent_buffer_folio(eb, eb->folios[i]);
2762			folio_put(eb->folios[i]);
2763		}
2764	}
2765	__free_extent_buffer(eb);
2766	return NULL;
2767}
2768
2769struct extent_buffer *alloc_dummy_extent_buffer(struct btrfs_fs_info *fs_info,
2770						u64 start)
2771{
2772	return __alloc_dummy_extent_buffer(fs_info, start, fs_info->nodesize);
2773}
2774
2775static void check_buffer_tree_ref(struct extent_buffer *eb)
2776{
2777	int refs;
2778	/*
2779	 * The TREE_REF bit is first set when the extent_buffer is added
2780	 * to the radix tree. It is also reset, if unset, when a new reference
2781	 * is created by find_extent_buffer.
2782	 *
2783	 * It is only cleared in two cases: freeing the last non-tree
2784	 * reference to the extent_buffer when its STALE bit is set or
2785	 * calling release_folio when the tree reference is the only reference.
 
2786	 *
2787	 * In both cases, care is taken to ensure that the extent_buffer's
2788	 * pages are not under io. However, release_folio can be concurrently
2789	 * called with creating new references, which is prone to race
2790	 * conditions between the calls to check_buffer_tree_ref in those
2791	 * codepaths and clearing TREE_REF in try_release_extent_buffer.
 
2792	 *
2793	 * The actual lifetime of the extent_buffer in the radix tree is
2794	 * adequately protected by the refcount, but the TREE_REF bit and
2795	 * its corresponding reference are not. To protect against this
2796	 * class of races, we call check_buffer_tree_ref from the codepaths
2797	 * which trigger io. Note that once io is initiated, TREE_REF can no
2798	 * longer be cleared, so that is the moment at which any such race is
2799	 * best fixed.
2800	 */
2801	refs = atomic_read(&eb->refs);
2802	if (refs >= 2 && test_bit(EXTENT_BUFFER_TREE_REF, &eb->bflags))
2803		return;
2804
2805	spin_lock(&eb->refs_lock);
2806	if (!test_and_set_bit(EXTENT_BUFFER_TREE_REF, &eb->bflags))
2807		atomic_inc(&eb->refs);
2808	spin_unlock(&eb->refs_lock);
2809}
2810
2811static void mark_extent_buffer_accessed(struct extent_buffer *eb)
 
2812{
2813	int num_folios= num_extent_folios(eb);
2814
2815	check_buffer_tree_ref(eb);
2816
2817	for (int i = 0; i < num_folios; i++)
2818		folio_mark_accessed(eb->folios[i]);
 
 
 
 
 
2819}
2820
2821struct extent_buffer *find_extent_buffer(struct btrfs_fs_info *fs_info,
2822					 u64 start)
2823{
2824	struct extent_buffer *eb;
2825
2826	eb = find_extent_buffer_nolock(fs_info, start);
2827	if (!eb)
2828		return NULL;
2829	/*
2830	 * Lock our eb's refs_lock to avoid races with free_extent_buffer().
2831	 * When we get our eb it might be flagged with EXTENT_BUFFER_STALE and
2832	 * another task running free_extent_buffer() might have seen that flag
2833	 * set, eb->refs == 2, that the buffer isn't under IO (dirty and
2834	 * writeback flags not set) and it's still in the tree (flag
2835	 * EXTENT_BUFFER_TREE_REF set), therefore being in the process of
2836	 * decrementing the extent buffer's reference count twice.  So here we
2837	 * could race and increment the eb's reference count, clear its stale
2838	 * flag, mark it as dirty and drop our reference before the other task
2839	 * finishes executing free_extent_buffer, which would later result in
2840	 * an attempt to free an extent buffer that is dirty.
2841	 */
2842	if (test_bit(EXTENT_BUFFER_STALE, &eb->bflags)) {
2843		spin_lock(&eb->refs_lock);
2844		spin_unlock(&eb->refs_lock);
 
 
 
 
 
 
 
2845	}
2846	mark_extent_buffer_accessed(eb);
2847	return eb;
 
2848}
2849
2850#ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS
2851struct extent_buffer *alloc_test_extent_buffer(struct btrfs_fs_info *fs_info,
2852					u64 start)
2853{
2854	struct extent_buffer *eb, *exists = NULL;
2855	int ret;
2856
2857	eb = find_extent_buffer(fs_info, start);
2858	if (eb)
2859		return eb;
2860	eb = alloc_dummy_extent_buffer(fs_info, start);
2861	if (!eb)
2862		return ERR_PTR(-ENOMEM);
2863	eb->fs_info = fs_info;
2864again:
2865	ret = radix_tree_preload(GFP_NOFS);
2866	if (ret) {
2867		exists = ERR_PTR(ret);
2868		goto free_eb;
2869	}
2870	spin_lock(&fs_info->buffer_lock);
2871	ret = radix_tree_insert(&fs_info->buffer_radix,
2872				start >> fs_info->sectorsize_bits, eb);
2873	spin_unlock(&fs_info->buffer_lock);
2874	radix_tree_preload_end();
2875	if (ret == -EEXIST) {
2876		exists = find_extent_buffer(fs_info, start);
2877		if (exists)
2878			goto free_eb;
2879		else
2880			goto again;
2881	}
2882	check_buffer_tree_ref(eb);
2883	set_bit(EXTENT_BUFFER_IN_TREE, &eb->bflags);
2884
 
 
 
 
 
 
 
2885	return eb;
2886free_eb:
2887	btrfs_release_extent_buffer(eb);
2888	return exists;
2889}
2890#endif
2891
2892static struct extent_buffer *grab_extent_buffer(
2893		struct btrfs_fs_info *fs_info, struct page *page)
2894{
2895	struct folio *folio = page_folio(page);
2896	struct extent_buffer *exists;
2897
2898	lockdep_assert_held(&page->mapping->i_private_lock);
2899
2900	/*
2901	 * For subpage case, we completely rely on radix tree to ensure we
2902	 * don't try to insert two ebs for the same bytenr.  So here we always
2903	 * return NULL and just continue.
2904	 */
2905	if (fs_info->nodesize < PAGE_SIZE)
2906		return NULL;
2907
2908	/* Page not yet attached to an extent buffer */
2909	if (!folio_test_private(folio))
2910		return NULL;
2911
2912	/*
2913	 * We could have already allocated an eb for this page and attached one
2914	 * so lets see if we can get a ref on the existing eb, and if we can we
2915	 * know it's good and we can just return that one, else we know we can
2916	 * just overwrite folio private.
2917	 */
2918	exists = folio_get_private(folio);
2919	if (atomic_inc_not_zero(&exists->refs))
2920		return exists;
2921
2922	WARN_ON(PageDirty(page));
2923	folio_detach_private(folio);
2924	return NULL;
2925}
2926
2927static int check_eb_alignment(struct btrfs_fs_info *fs_info, u64 start)
2928{
2929	if (!IS_ALIGNED(start, fs_info->sectorsize)) {
2930		btrfs_err(fs_info, "bad tree block start %llu", start);
2931		return -EINVAL;
2932	}
2933
2934	if (fs_info->nodesize < PAGE_SIZE &&
2935	    offset_in_page(start) + fs_info->nodesize > PAGE_SIZE) {
2936		btrfs_err(fs_info,
2937		"tree block crosses page boundary, start %llu nodesize %u",
2938			  start, fs_info->nodesize);
2939		return -EINVAL;
2940	}
2941	if (fs_info->nodesize >= PAGE_SIZE &&
2942	    !PAGE_ALIGNED(start)) {
2943		btrfs_err(fs_info,
2944		"tree block is not page aligned, start %llu nodesize %u",
2945			  start, fs_info->nodesize);
2946		return -EINVAL;
2947	}
2948	if (!IS_ALIGNED(start, fs_info->nodesize) &&
2949	    !test_and_set_bit(BTRFS_FS_UNALIGNED_TREE_BLOCK, &fs_info->flags)) {
2950		btrfs_warn(fs_info,
2951"tree block not nodesize aligned, start %llu nodesize %u, can be resolved by a full metadata balance",
2952			      start, fs_info->nodesize);
2953	}
2954	return 0;
2955}
2956
2957
2958/*
2959 * Return 0 if eb->folios[i] is attached to btree inode successfully.
2960 * Return >0 if there is already another extent buffer for the range,
2961 * and @found_eb_ret would be updated.
2962 * Return -EAGAIN if the filemap has an existing folio but with different size
2963 * than @eb.
2964 * The caller needs to free the existing folios and retry using the same order.
2965 */
2966static int attach_eb_folio_to_filemap(struct extent_buffer *eb, int i,
2967				      struct btrfs_subpage *prealloc,
2968				      struct extent_buffer **found_eb_ret)
2969{
2970
2971	struct btrfs_fs_info *fs_info = eb->fs_info;
2972	struct address_space *mapping = fs_info->btree_inode->i_mapping;
2973	const unsigned long index = eb->start >> PAGE_SHIFT;
2974	struct folio *existing_folio = NULL;
2975	int ret;
2976
2977	ASSERT(found_eb_ret);
2978
2979	/* Caller should ensure the folio exists. */
2980	ASSERT(eb->folios[i]);
2981
2982retry:
2983	ret = filemap_add_folio(mapping, eb->folios[i], index + i,
2984				GFP_NOFS | __GFP_NOFAIL);
2985	if (!ret)
2986		goto finish;
2987
2988	existing_folio = filemap_lock_folio(mapping, index + i);
2989	/* The page cache only exists for a very short time, just retry. */
2990	if (IS_ERR(existing_folio)) {
2991		existing_folio = NULL;
2992		goto retry;
2993	}
2994
2995	/* For now, we should only have single-page folios for btree inode. */
2996	ASSERT(folio_nr_pages(existing_folio) == 1);
2997
2998	if (folio_size(existing_folio) != eb->folio_size) {
2999		folio_unlock(existing_folio);
3000		folio_put(existing_folio);
3001		return -EAGAIN;
3002	}
3003
3004finish:
3005	spin_lock(&mapping->i_private_lock);
3006	if (existing_folio && fs_info->nodesize < PAGE_SIZE) {
3007		/* We're going to reuse the existing page, can drop our folio now. */
3008		__free_page(folio_page(eb->folios[i], 0));
3009		eb->folios[i] = existing_folio;
3010	} else if (existing_folio) {
3011		struct extent_buffer *existing_eb;
3012
3013		existing_eb = grab_extent_buffer(fs_info,
3014						 folio_page(existing_folio, 0));
3015		if (existing_eb) {
3016			/* The extent buffer still exists, we can use it directly. */
3017			*found_eb_ret = existing_eb;
3018			spin_unlock(&mapping->i_private_lock);
3019			folio_unlock(existing_folio);
3020			folio_put(existing_folio);
3021			return 1;
3022		}
3023		/* The extent buffer no longer exists, we can reuse the folio. */
3024		__free_page(folio_page(eb->folios[i], 0));
3025		eb->folios[i] = existing_folio;
3026	}
3027	eb->folio_size = folio_size(eb->folios[i]);
3028	eb->folio_shift = folio_shift(eb->folios[i]);
3029	/* Should not fail, as we have preallocated the memory. */
3030	ret = attach_extent_buffer_folio(eb, eb->folios[i], prealloc);
3031	ASSERT(!ret);
3032	/*
3033	 * To inform we have an extra eb under allocation, so that
3034	 * detach_extent_buffer_page() won't release the folio private when the
3035	 * eb hasn't been inserted into radix tree yet.
3036	 *
3037	 * The ref will be decreased when the eb releases the page, in
3038	 * detach_extent_buffer_page().  Thus needs no special handling in the
3039	 * error path.
3040	 */
3041	btrfs_folio_inc_eb_refs(fs_info, eb->folios[i]);
3042	spin_unlock(&mapping->i_private_lock);
3043	return 0;
3044}
3045
3046struct extent_buffer *alloc_extent_buffer(struct btrfs_fs_info *fs_info,
3047					  u64 start, u64 owner_root, int level)
3048{
3049	unsigned long len = fs_info->nodesize;
3050	int num_folios;
3051	int attached = 0;
 
3052	struct extent_buffer *eb;
3053	struct extent_buffer *existing_eb = NULL;
3054	struct btrfs_subpage *prealloc = NULL;
3055	u64 lockdep_owner = owner_root;
3056	bool page_contig = true;
3057	int uptodate = 1;
3058	int ret;
3059
3060	if (check_eb_alignment(fs_info, start))
 
3061		return ERR_PTR(-EINVAL);
3062
3063#if BITS_PER_LONG == 32
3064	if (start >= MAX_LFS_FILESIZE) {
3065		btrfs_err_rl(fs_info,
3066		"extent buffer %llu is beyond 32bit page cache limit", start);
3067		btrfs_err_32bit_limit(fs_info);
3068		return ERR_PTR(-EOVERFLOW);
3069	}
3070	if (start >= BTRFS_32BIT_EARLY_WARN_THRESHOLD)
3071		btrfs_warn_32bit_limit(fs_info);
3072#endif
3073
3074	eb = find_extent_buffer(fs_info, start);
3075	if (eb)
3076		return eb;
3077
3078	eb = __alloc_extent_buffer(fs_info, start, len);
3079	if (!eb)
3080		return ERR_PTR(-ENOMEM);
3081
3082	/*
3083	 * The reloc trees are just snapshots, so we need them to appear to be
3084	 * just like any other fs tree WRT lockdep.
3085	 */
3086	if (lockdep_owner == BTRFS_TREE_RELOC_OBJECTID)
3087		lockdep_owner = BTRFS_FS_TREE_OBJECTID;
3088
3089	btrfs_set_buffer_lockdep_class(lockdep_owner, eb, level);
3090
3091	/*
3092	 * Preallocate folio private for subpage case, so that we won't
3093	 * allocate memory with i_private_lock nor page lock hold.
3094	 *
3095	 * The memory will be freed by attach_extent_buffer_page() or freed
3096	 * manually if we exit earlier.
3097	 */
3098	if (fs_info->nodesize < PAGE_SIZE) {
3099		prealloc = btrfs_alloc_subpage(fs_info, BTRFS_SUBPAGE_METADATA);
3100		if (IS_ERR(prealloc)) {
3101			ret = PTR_ERR(prealloc);
3102			goto out;
3103		}
3104	}
3105
3106reallocate:
3107	/* Allocate all pages first. */
3108	ret = alloc_eb_folio_array(eb, true);
3109	if (ret < 0) {
3110		btrfs_free_subpage(prealloc);
3111		goto out;
3112	}
3113
3114	num_folios = num_extent_folios(eb);
3115	/* Attach all pages to the filemap. */
3116	for (int i = 0; i < num_folios; i++) {
3117		struct folio *folio;
3118
3119		ret = attach_eb_folio_to_filemap(eb, i, prealloc, &existing_eb);
3120		if (ret > 0) {
3121			ASSERT(existing_eb);
3122			goto out;
3123		}
3124
3125		/*
3126		 * TODO: Special handling for a corner case where the order of
3127		 * folios mismatch between the new eb and filemap.
3128		 *
3129		 * This happens when:
3130		 *
3131		 * - the new eb is using higher order folio
3132		 *
3133		 * - the filemap is still using 0-order folios for the range
3134		 *   This can happen at the previous eb allocation, and we don't
3135		 *   have higher order folio for the call.
3136		 *
3137		 * - the existing eb has already been freed
3138		 *
3139		 * In this case, we have to free the existing folios first, and
3140		 * re-allocate using the same order.
3141		 * Thankfully this is not going to happen yet, as we're still
3142		 * using 0-order folios.
3143		 */
3144		if (unlikely(ret == -EAGAIN)) {
3145			ASSERT(0);
3146			goto reallocate;
3147		}
3148		attached++;
3149
3150		/*
3151		 * Only after attach_eb_folio_to_filemap(), eb->folios[] is
3152		 * reliable, as we may choose to reuse the existing page cache
3153		 * and free the allocated page.
3154		 */
3155		folio = eb->folios[i];
3156		WARN_ON(btrfs_folio_test_dirty(fs_info, folio, eb->start, eb->len));
3157
3158		/*
3159		 * Check if the current page is physically contiguous with previous eb
3160		 * page.
3161		 * At this stage, either we allocated a large folio, thus @i
3162		 * would only be 0, or we fall back to per-page allocation.
3163		 */
3164		if (i && folio_page(eb->folios[i - 1], 0) + 1 != folio_page(folio, 0))
3165			page_contig = false;
 
 
 
 
 
 
 
 
 
 
3166
3167		if (!btrfs_folio_test_uptodate(fs_info, folio, eb->start, eb->len))
 
 
 
 
 
 
 
 
 
 
 
 
3168			uptodate = 0;
3169
3170		/*
3171		 * We can't unlock the pages just yet since the extent buffer
3172		 * hasn't been properly inserted in the radix tree, this
3173		 * opens a race with btree_release_folio which can free a page
3174		 * while we are still filling in all pages for the buffer and
3175		 * we could crash.
3176		 */
3177	}
3178	if (uptodate)
3179		set_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags);
3180	/* All pages are physically contiguous, can skip cross page handling. */
3181	if (page_contig)
3182		eb->addr = folio_address(eb->folios[0]) + offset_in_page(eb->start);
3183again:
3184	ret = radix_tree_preload(GFP_NOFS);
3185	if (ret)
3186		goto out;
 
 
3187
3188	spin_lock(&fs_info->buffer_lock);
3189	ret = radix_tree_insert(&fs_info->buffer_radix,
3190				start >> fs_info->sectorsize_bits, eb);
3191	spin_unlock(&fs_info->buffer_lock);
3192	radix_tree_preload_end();
3193	if (ret == -EEXIST) {
3194		ret = 0;
3195		existing_eb = find_extent_buffer(fs_info, start);
3196		if (existing_eb)
3197			goto out;
3198		else
3199			goto again;
3200	}
3201	/* add one reference for the tree */
3202	check_buffer_tree_ref(eb);
3203	set_bit(EXTENT_BUFFER_IN_TREE, &eb->bflags);
3204
3205	/*
3206	 * Now it's safe to unlock the pages because any calls to
3207	 * btree_release_folio will correctly detect that a page belongs to a
3208	 * live buffer and won't free them prematurely.
3209	 */
3210	for (int i = 0; i < num_folios; i++)
3211		unlock_page(folio_page(eb->folios[i], 0));
 
 
 
 
 
 
 
 
 
3212	return eb;
3213
3214out:
3215	WARN_ON(!atomic_dec_and_test(&eb->refs));
3216
3217	/*
3218	 * Any attached folios need to be detached before we unlock them.  This
3219	 * is because when we're inserting our new folios into the mapping, and
3220	 * then attaching our eb to that folio.  If we fail to insert our folio
3221	 * we'll lookup the folio for that index, and grab that EB.  We do not
3222	 * want that to grab this eb, as we're getting ready to free it.  So we
3223	 * have to detach it first and then unlock it.
3224	 *
3225	 * We have to drop our reference and NULL it out here because in the
3226	 * subpage case detaching does a btrfs_folio_dec_eb_refs() for our eb.
3227	 * Below when we call btrfs_release_extent_buffer() we will call
3228	 * detach_extent_buffer_folio() on our remaining pages in the !subpage
3229	 * case.  If we left eb->folios[i] populated in the subpage case we'd
3230	 * double put our reference and be super sad.
3231	 */
3232	for (int i = 0; i < attached; i++) {
3233		ASSERT(eb->folios[i]);
3234		detach_extent_buffer_folio(eb, eb->folios[i]);
3235		unlock_page(folio_page(eb->folios[i], 0));
3236		folio_put(eb->folios[i]);
3237		eb->folios[i] = NULL;
3238	}
3239	/*
3240	 * Now all pages of that extent buffer is unmapped, set UNMAPPED flag,
3241	 * so it can be cleaned up without utilizing page->mapping.
3242	 */
3243	set_bit(EXTENT_BUFFER_UNMAPPED, &eb->bflags);
3244
3245	btrfs_release_extent_buffer(eb);
3246	if (ret < 0)
3247		return ERR_PTR(ret);
3248	ASSERT(existing_eb);
3249	return existing_eb;
3250}
3251
3252static inline void btrfs_release_extent_buffer_rcu(struct rcu_head *head)
3253{
3254	struct extent_buffer *eb =
3255			container_of(head, struct extent_buffer, rcu_head);
3256
3257	__free_extent_buffer(eb);
3258}
3259
 
3260static int release_extent_buffer(struct extent_buffer *eb)
3261	__releases(&eb->refs_lock)
3262{
3263	lockdep_assert_held(&eb->refs_lock);
3264
3265	WARN_ON(atomic_read(&eb->refs) == 0);
3266	if (atomic_dec_and_test(&eb->refs)) {
3267		if (test_and_clear_bit(EXTENT_BUFFER_IN_TREE, &eb->bflags)) {
3268			struct btrfs_fs_info *fs_info = eb->fs_info;
3269
3270			spin_unlock(&eb->refs_lock);
3271
3272			spin_lock(&fs_info->buffer_lock);
3273			radix_tree_delete(&fs_info->buffer_radix,
3274					  eb->start >> fs_info->sectorsize_bits);
3275			spin_unlock(&fs_info->buffer_lock);
3276		} else {
3277			spin_unlock(&eb->refs_lock);
3278		}
3279
3280		btrfs_leak_debug_del_eb(eb);
3281		/* Should be safe to release our pages at this point */
3282		btrfs_release_extent_buffer_pages(eb);
3283#ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS
3284		if (unlikely(test_bit(EXTENT_BUFFER_UNMAPPED, &eb->bflags))) {
3285			__free_extent_buffer(eb);
3286			return 1;
3287		}
3288#endif
3289		call_rcu(&eb->rcu_head, btrfs_release_extent_buffer_rcu);
3290		return 1;
3291	}
3292	spin_unlock(&eb->refs_lock);
3293
3294	return 0;
3295}
3296
3297void free_extent_buffer(struct extent_buffer *eb)
3298{
3299	int refs;
 
3300	if (!eb)
3301		return;
3302
3303	refs = atomic_read(&eb->refs);
3304	while (1) {
3305		if ((!test_bit(EXTENT_BUFFER_UNMAPPED, &eb->bflags) && refs <= 3)
3306		    || (test_bit(EXTENT_BUFFER_UNMAPPED, &eb->bflags) &&
3307			refs == 1))
3308			break;
3309		if (atomic_try_cmpxchg(&eb->refs, &refs, refs - 1))
 
3310			return;
3311	}
3312
3313	spin_lock(&eb->refs_lock);
3314	if (atomic_read(&eb->refs) == 2 &&
 
 
 
 
3315	    test_bit(EXTENT_BUFFER_STALE, &eb->bflags) &&
3316	    !extent_buffer_under_io(eb) &&
3317	    test_and_clear_bit(EXTENT_BUFFER_TREE_REF, &eb->bflags))
3318		atomic_dec(&eb->refs);
3319
3320	/*
3321	 * I know this is terrible, but it's temporary until we stop tracking
3322	 * the uptodate bits and such for the extent buffers.
3323	 */
3324	release_extent_buffer(eb);
3325}
3326
3327void free_extent_buffer_stale(struct extent_buffer *eb)
3328{
3329	if (!eb)
3330		return;
3331
3332	spin_lock(&eb->refs_lock);
3333	set_bit(EXTENT_BUFFER_STALE, &eb->bflags);
3334
3335	if (atomic_read(&eb->refs) == 2 && !extent_buffer_under_io(eb) &&
3336	    test_and_clear_bit(EXTENT_BUFFER_TREE_REF, &eb->bflags))
3337		atomic_dec(&eb->refs);
3338	release_extent_buffer(eb);
3339}
3340
3341static void btree_clear_folio_dirty(struct folio *folio)
3342{
3343	ASSERT(folio_test_dirty(folio));
3344	ASSERT(folio_test_locked(folio));
3345	folio_clear_dirty_for_io(folio);
3346	xa_lock_irq(&folio->mapping->i_pages);
3347	if (!folio_test_dirty(folio))
3348		__xa_clear_mark(&folio->mapping->i_pages,
3349				folio_index(folio), PAGECACHE_TAG_DIRTY);
3350	xa_unlock_irq(&folio->mapping->i_pages);
3351}
3352
3353static void clear_subpage_extent_buffer_dirty(const struct extent_buffer *eb)
3354{
3355	struct btrfs_fs_info *fs_info = eb->fs_info;
3356	struct folio *folio = eb->folios[0];
3357	bool last;
3358
3359	/* btree_clear_folio_dirty() needs page locked. */
3360	folio_lock(folio);
3361	last = btrfs_subpage_clear_and_test_dirty(fs_info, folio, eb->start, eb->len);
3362	if (last)
3363		btree_clear_folio_dirty(folio);
3364	folio_unlock(folio);
3365	WARN_ON(atomic_read(&eb->refs) == 0);
3366}
3367
3368void btrfs_clear_buffer_dirty(struct btrfs_trans_handle *trans,
3369			      struct extent_buffer *eb)
3370{
3371	struct btrfs_fs_info *fs_info = eb->fs_info;
3372	int num_folios;
3373
3374	btrfs_assert_tree_write_locked(eb);
3375
3376	if (trans && btrfs_header_generation(eb) != trans->transid)
3377		return;
3378
3379	/*
3380	 * Instead of clearing the dirty flag off of the buffer, mark it as
3381	 * EXTENT_BUFFER_ZONED_ZEROOUT. This allows us to preserve
3382	 * write-ordering in zoned mode, without the need to later re-dirty
3383	 * the extent_buffer.
3384	 *
3385	 * The actual zeroout of the buffer will happen later in
3386	 * btree_csum_one_bio.
3387	 */
3388	if (btrfs_is_zoned(fs_info) && test_bit(EXTENT_BUFFER_DIRTY, &eb->bflags)) {
3389		set_bit(EXTENT_BUFFER_ZONED_ZEROOUT, &eb->bflags);
3390		return;
3391	}
3392
3393	if (!test_and_clear_bit(EXTENT_BUFFER_DIRTY, &eb->bflags))
3394		return;
3395
3396	percpu_counter_add_batch(&fs_info->dirty_metadata_bytes, -eb->len,
3397				 fs_info->dirty_metadata_batch);
3398
3399	if (eb->fs_info->nodesize < PAGE_SIZE)
3400		return clear_subpage_extent_buffer_dirty(eb);
 
 
3401
3402	num_folios = num_extent_folios(eb);
3403	for (int i = 0; i < num_folios; i++) {
3404		struct folio *folio = eb->folios[i];
3405
3406		if (!folio_test_dirty(folio))
3407			continue;
3408		folio_lock(folio);
3409		btree_clear_folio_dirty(folio);
3410		folio_unlock(folio);
 
 
 
 
 
3411	}
3412	WARN_ON(atomic_read(&eb->refs) == 0);
3413}
3414
3415void set_extent_buffer_dirty(struct extent_buffer *eb)
3416{
3417	int num_folios;
3418	bool was_dirty;
 
3419
3420	check_buffer_tree_ref(eb);
3421
3422	was_dirty = test_and_set_bit(EXTENT_BUFFER_DIRTY, &eb->bflags);
3423
3424	num_folios = num_extent_folios(eb);
3425	WARN_ON(atomic_read(&eb->refs) == 0);
3426	WARN_ON(!test_bit(EXTENT_BUFFER_TREE_REF, &eb->bflags));
3427	WARN_ON(test_bit(EXTENT_BUFFER_ZONED_ZEROOUT, &eb->bflags));
3428
3429	if (!was_dirty) {
3430		bool subpage = eb->fs_info->nodesize < PAGE_SIZE;
3431
3432		/*
3433		 * For subpage case, we can have other extent buffers in the
3434		 * same page, and in clear_subpage_extent_buffer_dirty() we
3435		 * have to clear page dirty without subpage lock held.
3436		 * This can cause race where our page gets dirty cleared after
3437		 * we just set it.
3438		 *
3439		 * Thankfully, clear_subpage_extent_buffer_dirty() has locked
3440		 * its page for other reasons, we can use page lock to prevent
3441		 * the above race.
3442		 */
3443		if (subpage)
3444			lock_page(folio_page(eb->folios[0], 0));
3445		for (int i = 0; i < num_folios; i++)
3446			btrfs_folio_set_dirty(eb->fs_info, eb->folios[i],
3447					      eb->start, eb->len);
3448		if (subpage)
3449			unlock_page(folio_page(eb->folios[0], 0));
3450		percpu_counter_add_batch(&eb->fs_info->dirty_metadata_bytes,
3451					 eb->len,
3452					 eb->fs_info->dirty_metadata_batch);
3453	}
3454#ifdef CONFIG_BTRFS_DEBUG
3455	for (int i = 0; i < num_folios; i++)
3456		ASSERT(folio_test_dirty(eb->folios[i]));
3457#endif
3458}
3459
3460void clear_extent_buffer_uptodate(struct extent_buffer *eb)
3461{
3462	struct btrfs_fs_info *fs_info = eb->fs_info;
3463	int num_folios = num_extent_folios(eb);
 
3464
3465	clear_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags);
3466	for (int i = 0; i < num_folios; i++) {
3467		struct folio *folio = eb->folios[i];
3468
3469		if (!folio)
3470			continue;
3471
3472		/*
3473		 * This is special handling for metadata subpage, as regular
3474		 * btrfs_is_subpage() can not handle cloned/dummy metadata.
3475		 */
3476		if (fs_info->nodesize >= PAGE_SIZE)
3477			folio_clear_uptodate(folio);
3478		else
3479			btrfs_subpage_clear_uptodate(fs_info, folio,
3480						     eb->start, eb->len);
3481	}
3482}
3483
3484void set_extent_buffer_uptodate(struct extent_buffer *eb)
3485{
3486	struct btrfs_fs_info *fs_info = eb->fs_info;
3487	int num_folios = num_extent_folios(eb);
 
3488
3489	set_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags);
3490	for (int i = 0; i < num_folios; i++) {
3491		struct folio *folio = eb->folios[i];
3492
3493		/*
3494		 * This is special handling for metadata subpage, as regular
3495		 * btrfs_is_subpage() can not handle cloned/dummy metadata.
3496		 */
3497		if (fs_info->nodesize >= PAGE_SIZE)
3498			folio_mark_uptodate(folio);
3499		else
3500			btrfs_subpage_set_uptodate(fs_info, folio,
3501						   eb->start, eb->len);
3502	}
3503}
3504
3505static void clear_extent_buffer_reading(struct extent_buffer *eb)
 
3506{
3507	clear_bit(EXTENT_BUFFER_READING, &eb->bflags);
3508	smp_mb__after_atomic();
3509	wake_up_bit(&eb->bflags, EXTENT_BUFFER_READING);
3510}
3511
3512static void end_bbio_meta_read(struct btrfs_bio *bbio)
3513{
3514	struct extent_buffer *eb = bbio->private;
3515	struct btrfs_fs_info *fs_info = eb->fs_info;
3516	bool uptodate = !bbio->bio.bi_status;
3517	struct folio_iter fi;
3518	u32 bio_offset = 0;
3519
3520	/*
3521	 * If the extent buffer is marked UPTODATE before the read operation
3522	 * completes, other calls to read_extent_buffer_pages() will return
3523	 * early without waiting for the read to finish, causing data races.
3524	 */
3525	WARN_ON(test_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags));
3526
3527	eb->read_mirror = bbio->mirror_num;
3528
3529	if (uptodate &&
3530	    btrfs_validate_extent_buffer(eb, &bbio->parent_check) < 0)
3531		uptodate = false;
3532
3533	if (uptodate) {
3534		set_extent_buffer_uptodate(eb);
3535	} else {
3536		clear_extent_buffer_uptodate(eb);
3537		set_bit(EXTENT_BUFFER_READ_ERR, &eb->bflags);
3538	}
3539
3540	bio_for_each_folio_all(fi, &bbio->bio) {
3541		struct folio *folio = fi.folio;
3542		u64 start = eb->start + bio_offset;
3543		u32 len = fi.length;
3544
3545		if (uptodate)
3546			btrfs_folio_set_uptodate(fs_info, folio, start, len);
3547		else
3548			btrfs_folio_clear_uptodate(fs_info, folio, start, len);
3549
3550		bio_offset += len;
3551	}
3552
3553	clear_extent_buffer_reading(eb);
3554	free_extent_buffer(eb);
3555
3556	bio_put(&bbio->bio);
3557}
3558
3559int read_extent_buffer_pages(struct extent_buffer *eb, int wait, int mirror_num,
3560			     const struct btrfs_tree_parent_check *check)
3561{
3562	struct btrfs_bio *bbio;
3563	bool ret;
3564
3565	if (test_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags))
3566		return 0;
3567
 
 
 
 
 
 
 
 
 
 
 
3568	/*
3569	 * We could have had EXTENT_BUFFER_UPTODATE cleared by the write
3570	 * operation, which could potentially still be in flight.  In this case
3571	 * we simply want to return an error.
3572	 */
3573	if (unlikely(test_bit(EXTENT_BUFFER_WRITE_ERR, &eb->bflags)))
3574		return -EIO;
3575
3576	/* Someone else is already reading the buffer, just wait for it. */
3577	if (test_and_set_bit(EXTENT_BUFFER_READING, &eb->bflags))
3578		goto done;
 
3579
3580	/*
3581	 * Between the initial test_bit(EXTENT_BUFFER_UPTODATE) and the above
3582	 * test_and_set_bit(EXTENT_BUFFER_READING), someone else could have
3583	 * started and finished reading the same eb.  In this case, UPTODATE
3584	 * will now be set, and we shouldn't read it in again.
3585	 */
3586	if (unlikely(test_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags))) {
3587		clear_extent_buffer_reading(eb);
3588		return 0;
3589	}
3590
3591	clear_bit(EXTENT_BUFFER_READ_ERR, &eb->bflags);
3592	eb->read_mirror = 0;
3593	check_buffer_tree_ref(eb);
3594	atomic_inc(&eb->refs);
3595
3596	bbio = btrfs_bio_alloc(INLINE_EXTENT_BUFFER_PAGES,
3597			       REQ_OP_READ | REQ_META, eb->fs_info,
3598			       end_bbio_meta_read, eb);
3599	bbio->bio.bi_iter.bi_sector = eb->start >> SECTOR_SHIFT;
3600	bbio->inode = BTRFS_I(eb->fs_info->btree_inode);
3601	bbio->file_offset = eb->start;
3602	memcpy(&bbio->parent_check, check, sizeof(*check));
3603	if (eb->fs_info->nodesize < PAGE_SIZE) {
3604		ret = bio_add_folio(&bbio->bio, eb->folios[0], eb->len,
3605				    eb->start - folio_pos(eb->folios[0]));
3606		ASSERT(ret);
3607	} else {
3608		int num_folios = num_extent_folios(eb);
3609
3610		for (int i = 0; i < num_folios; i++) {
3611			struct folio *folio = eb->folios[i];
3612
3613			ret = bio_add_folio(&bbio->bio, folio, eb->folio_size, 0);
3614			ASSERT(ret);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3615		}
3616	}
3617	btrfs_submit_bbio(bbio, mirror_num);
3618
3619done:
3620	if (wait == WAIT_COMPLETE) {
3621		wait_on_bit_io(&eb->bflags, EXTENT_BUFFER_READING, TASK_UNINTERRUPTIBLE);
3622		if (!test_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags))
3623			return -EIO;
3624	}
3625
3626	return 0;
3627}
3628
3629static bool report_eb_range(const struct extent_buffer *eb, unsigned long start,
3630			    unsigned long len)
3631{
3632	btrfs_warn(eb->fs_info,
3633		"access to eb bytenr %llu len %u out of range start %lu len %lu",
3634		eb->start, eb->len, start, len);
3635	WARN_ON(IS_ENABLED(CONFIG_BTRFS_DEBUG));
3636
3637	return true;
3638}
3639
3640/*
3641 * Check if the [start, start + len) range is valid before reading/writing
3642 * the eb.
3643 * NOTE: @start and @len are offset inside the eb, not logical address.
3644 *
3645 * Caller should not touch the dst/src memory if this function returns error.
3646 */
3647static inline int check_eb_range(const struct extent_buffer *eb,
3648				 unsigned long start, unsigned long len)
3649{
3650	unsigned long offset;
3651
3652	/* start, start + len should not go beyond eb->len nor overflow */
3653	if (unlikely(check_add_overflow(start, len, &offset) || offset > eb->len))
3654		return report_eb_range(eb, start, len);
3655
3656	return false;
 
 
 
 
 
 
3657}
3658
3659void read_extent_buffer(const struct extent_buffer *eb, void *dstv,
3660			unsigned long start, unsigned long len)
3661{
3662	const int unit_size = eb->folio_size;
3663	size_t cur;
3664	size_t offset;
 
 
3665	char *dst = (char *)dstv;
3666	unsigned long i = get_eb_folio_index(eb, start);
3667
3668	if (check_eb_range(eb, start, len)) {
3669		/*
3670		 * Invalid range hit, reset the memory, so callers won't get
3671		 * some random garbage for their uninitialized memory.
3672		 */
3673		memset(dstv, 0, len);
3674		return;
3675	}
3676
3677	if (eb->addr) {
3678		memcpy(dstv, eb->addr + start, len);
 
 
3679		return;
3680	}
3681
3682	offset = get_eb_offset_in_folio(eb, start);
3683
3684	while (len > 0) {
3685		char *kaddr;
3686
3687		cur = min(len, unit_size - offset);
3688		kaddr = folio_address(eb->folios[i]);
3689		memcpy(dst, kaddr + offset, cur);
3690
3691		dst += cur;
3692		len -= cur;
3693		offset = 0;
3694		i++;
3695	}
3696}
3697
3698int read_extent_buffer_to_user_nofault(const struct extent_buffer *eb,
3699				       void __user *dstv,
3700				       unsigned long start, unsigned long len)
3701{
3702	const int unit_size = eb->folio_size;
3703	size_t cur;
3704	size_t offset;
 
 
3705	char __user *dst = (char __user *)dstv;
3706	unsigned long i = get_eb_folio_index(eb, start);
 
3707	int ret = 0;
3708
3709	WARN_ON(start > eb->len);
3710	WARN_ON(start + len > eb->start + eb->len);
3711
3712	if (eb->addr) {
3713		if (copy_to_user_nofault(dstv, eb->addr + start, len))
3714			ret = -EFAULT;
3715		return ret;
3716	}
3717
3718	offset = get_eb_offset_in_folio(eb, start);
3719
3720	while (len > 0) {
3721		char *kaddr;
3722
3723		cur = min(len, unit_size - offset);
3724		kaddr = folio_address(eb->folios[i]);
3725		if (copy_to_user_nofault(dst, kaddr + offset, cur)) {
3726			ret = -EFAULT;
3727			break;
3728		}
3729
3730		dst += cur;
3731		len -= cur;
3732		offset = 0;
3733		i++;
3734	}
3735
3736	return ret;
3737}
3738
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3739int memcmp_extent_buffer(const struct extent_buffer *eb, const void *ptrv,
3740			 unsigned long start, unsigned long len)
3741{
3742	const int unit_size = eb->folio_size;
3743	size_t cur;
3744	size_t offset;
 
3745	char *kaddr;
3746	char *ptr = (char *)ptrv;
3747	unsigned long i = get_eb_folio_index(eb, start);
 
3748	int ret = 0;
3749
3750	if (check_eb_range(eb, start, len))
3751		return -EINVAL;
3752
3753	if (eb->addr)
3754		return memcmp(ptrv, eb->addr + start, len);
3755
3756	offset = get_eb_offset_in_folio(eb, start);
3757
3758	while (len > 0) {
3759		cur = min(len, unit_size - offset);
3760		kaddr = folio_address(eb->folios[i]);
 
 
 
3761		ret = memcmp(ptr, kaddr + offset, cur);
3762		if (ret)
3763			break;
3764
3765		ptr += cur;
3766		len -= cur;
3767		offset = 0;
3768		i++;
3769	}
3770	return ret;
3771}
3772
3773/*
3774 * Check that the extent buffer is uptodate.
3775 *
3776 * For regular sector size == PAGE_SIZE case, check if @page is uptodate.
3777 * For subpage case, check if the range covered by the eb has EXTENT_UPTODATE.
3778 */
3779static void assert_eb_folio_uptodate(const struct extent_buffer *eb, int i)
3780{
3781	struct btrfs_fs_info *fs_info = eb->fs_info;
3782	struct folio *folio = eb->folios[i];
3783
3784	ASSERT(folio);
 
 
 
 
3785
3786	/*
3787	 * If we are using the commit root we could potentially clear a page
3788	 * Uptodate while we're using the extent buffer that we've previously
3789	 * looked up.  We don't want to complain in this case, as the page was
3790	 * valid before, we just didn't write it out.  Instead we want to catch
3791	 * the case where we didn't actually read the block properly, which
3792	 * would have !PageUptodate and !EXTENT_BUFFER_WRITE_ERR.
3793	 */
3794	if (test_bit(EXTENT_BUFFER_WRITE_ERR, &eb->bflags))
3795		return;
3796
3797	if (fs_info->nodesize < PAGE_SIZE) {
3798		folio = eb->folios[0];
3799		ASSERT(i == 0);
3800		if (WARN_ON(!btrfs_subpage_test_uptodate(fs_info, folio,
3801							 eb->start, eb->len)))
3802			btrfs_subpage_dump_bitmap(fs_info, folio, eb->start, eb->len);
3803	} else {
3804		WARN_ON(!folio_test_uptodate(folio));
3805	}
3806}
3807
3808static void __write_extent_buffer(const struct extent_buffer *eb,
3809				  const void *srcv, unsigned long start,
3810				  unsigned long len, bool use_memmove)
3811{
3812	const int unit_size = eb->folio_size;
3813	size_t cur;
3814	size_t offset;
 
3815	char *kaddr;
3816	const char *src = (const char *)srcv;
3817	unsigned long i = get_eb_folio_index(eb, start);
3818	/* For unmapped (dummy) ebs, no need to check their uptodate status. */
3819	const bool check_uptodate = !test_bit(EXTENT_BUFFER_UNMAPPED, &eb->bflags);
3820
3821	if (check_eb_range(eb, start, len))
3822		return;
3823
3824	if (eb->addr) {
3825		if (use_memmove)
3826			memmove(eb->addr + start, srcv, len);
3827		else
3828			memcpy(eb->addr + start, srcv, len);
3829		return;
3830	}
3831
3832	offset = get_eb_offset_in_folio(eb, start);
3833
3834	while (len > 0) {
3835		if (check_uptodate)
3836			assert_eb_folio_uptodate(eb, i);
3837
3838		cur = min(len, unit_size - offset);
3839		kaddr = folio_address(eb->folios[i]);
3840		if (use_memmove)
3841			memmove(kaddr + offset, src, cur);
3842		else
3843			memcpy(kaddr + offset, src, cur);
3844
3845		src += cur;
3846		len -= cur;
3847		offset = 0;
3848		i++;
3849	}
3850}
3851
3852void write_extent_buffer(const struct extent_buffer *eb, const void *srcv,
3853			 unsigned long start, unsigned long len)
3854{
3855	return __write_extent_buffer(eb, srcv, start, len, false);
3856}
 
 
 
 
3857
3858static void memset_extent_buffer(const struct extent_buffer *eb, int c,
3859				 unsigned long start, unsigned long len)
3860{
3861	const int unit_size = eb->folio_size;
3862	unsigned long cur = start;
3863
3864	if (eb->addr) {
3865		memset(eb->addr + start, c, len);
3866		return;
3867	}
3868
3869	while (cur < start + len) {
3870		unsigned long index = get_eb_folio_index(eb, cur);
3871		unsigned int offset = get_eb_offset_in_folio(eb, cur);
3872		unsigned int cur_len = min(start + len - cur, unit_size - offset);
3873
3874		assert_eb_folio_uptodate(eb, index);
3875		memset(folio_address(eb->folios[index]) + offset, c, cur_len);
 
3876
3877		cur += cur_len;
 
 
3878	}
3879}
3880
3881void memzero_extent_buffer(const struct extent_buffer *eb, unsigned long start,
3882			   unsigned long len)
3883{
3884	if (check_eb_range(eb, start, len))
3885		return;
3886	return memset_extent_buffer(eb, 0, start, len);
3887}
3888
3889void copy_extent_buffer_full(const struct extent_buffer *dst,
3890			     const struct extent_buffer *src)
3891{
3892	const int unit_size = src->folio_size;
3893	unsigned long cur = 0;
3894
3895	ASSERT(dst->len == src->len);
3896
3897	while (cur < src->len) {
3898		unsigned long index = get_eb_folio_index(src, cur);
3899		unsigned long offset = get_eb_offset_in_folio(src, cur);
3900		unsigned long cur_len = min(src->len, unit_size - offset);
3901		void *addr = folio_address(src->folios[index]) + offset;
3902
3903		write_extent_buffer(dst, addr, cur, cur_len);
3904
3905		cur += cur_len;
3906	}
3907}
3908
3909void copy_extent_buffer(const struct extent_buffer *dst,
3910			const struct extent_buffer *src,
3911			unsigned long dst_offset, unsigned long src_offset,
3912			unsigned long len)
3913{
3914	const int unit_size = dst->folio_size;
3915	u64 dst_len = dst->len;
3916	size_t cur;
3917	size_t offset;
 
3918	char *kaddr;
3919	unsigned long i = get_eb_folio_index(dst, dst_offset);
3920
3921	if (check_eb_range(dst, dst_offset, len) ||
3922	    check_eb_range(src, src_offset, len))
3923		return;
3924
3925	WARN_ON(src->len != dst_len);
3926
3927	offset = get_eb_offset_in_folio(dst, dst_offset);
 
3928
3929	while (len > 0) {
3930		assert_eb_folio_uptodate(dst, i);
 
3931
3932		cur = min(len, (unsigned long)(unit_size - offset));
3933
3934		kaddr = folio_address(dst->folios[i]);
3935		read_extent_buffer(src, kaddr + offset, src_offset, cur);
3936
3937		src_offset += cur;
3938		len -= cur;
3939		offset = 0;
3940		i++;
3941	}
3942}
3943
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3944/*
3945 * Calculate the folio and offset of the byte containing the given bit number.
3946 *
3947 * @eb:           the extent buffer
3948 * @start:        offset of the bitmap item in the extent buffer
3949 * @nr:           bit number
3950 * @folio_index:  return index of the folio in the extent buffer that contains
3951 *                the given bit number
3952 * @folio_offset: return offset into the folio given by folio_index
3953 *
3954 * This helper hides the ugliness of finding the byte in an extent buffer which
3955 * contains a given bit.
3956 */
3957static inline void eb_bitmap_offset(const struct extent_buffer *eb,
3958				    unsigned long start, unsigned long nr,
3959				    unsigned long *folio_index,
3960				    size_t *folio_offset)
3961{
 
3962	size_t byte_offset = BIT_BYTE(nr);
3963	size_t offset;
3964
3965	/*
3966	 * The byte we want is the offset of the extent buffer + the offset of
3967	 * the bitmap item in the extent buffer + the offset of the byte in the
3968	 * bitmap item.
3969	 */
3970	offset = start + offset_in_eb_folio(eb, eb->start) + byte_offset;
3971
3972	*folio_index = offset >> eb->folio_shift;
3973	*folio_offset = offset_in_eb_folio(eb, offset);
3974}
3975
3976/*
3977 * Determine whether a bit in a bitmap item is set.
3978 *
3979 * @eb:     the extent buffer
3980 * @start:  offset of the bitmap item in the extent buffer
3981 * @nr:     bit number to test
3982 */
3983int extent_buffer_test_bit(const struct extent_buffer *eb, unsigned long start,
3984			   unsigned long nr)
3985{
 
 
3986	unsigned long i;
3987	size_t offset;
3988	u8 *kaddr;
3989
3990	eb_bitmap_offset(eb, start, nr, &i, &offset);
3991	assert_eb_folio_uptodate(eb, i);
3992	kaddr = folio_address(eb->folios[i]);
 
3993	return 1U & (kaddr[offset] >> (nr & (BITS_PER_BYTE - 1)));
3994}
3995
3996static u8 *extent_buffer_get_byte(const struct extent_buffer *eb, unsigned long bytenr)
3997{
3998	unsigned long index = get_eb_folio_index(eb, bytenr);
3999
4000	if (check_eb_range(eb, bytenr, 1))
4001		return NULL;
4002	return folio_address(eb->folios[index]) + get_eb_offset_in_folio(eb, bytenr);
4003}
4004
4005/*
4006 * Set an area of a bitmap to 1.
4007 *
4008 * @eb:     the extent buffer
4009 * @start:  offset of the bitmap item in the extent buffer
4010 * @pos:    bit number of the first bit
4011 * @len:    number of bits to set
4012 */
4013void extent_buffer_bitmap_set(const struct extent_buffer *eb, unsigned long start,
4014			      unsigned long pos, unsigned long len)
4015{
4016	unsigned int first_byte = start + BIT_BYTE(pos);
4017	unsigned int last_byte = start + BIT_BYTE(pos + len - 1);
4018	const bool same_byte = (first_byte == last_byte);
4019	u8 mask = BITMAP_FIRST_BYTE_MASK(pos);
4020	u8 *kaddr;
4021
4022	if (same_byte)
4023		mask &= BITMAP_LAST_BYTE_MASK(pos + len);
4024
4025	/* Handle the first byte. */
4026	kaddr = extent_buffer_get_byte(eb, first_byte);
4027	*kaddr |= mask;
4028	if (same_byte)
4029		return;
4030
4031	/* Handle the byte aligned part. */
4032	ASSERT(first_byte + 1 <= last_byte);
4033	memset_extent_buffer(eb, 0xff, first_byte + 1, last_byte - first_byte - 1);
4034
4035	/* Handle the last byte. */
4036	kaddr = extent_buffer_get_byte(eb, last_byte);
4037	*kaddr |= BITMAP_LAST_BYTE_MASK(pos + len);
 
 
 
 
 
 
 
 
 
 
 
4038}
4039
4040
4041/*
4042 * Clear an area of a bitmap.
4043 *
4044 * @eb:     the extent buffer
4045 * @start:  offset of the bitmap item in the extent buffer
4046 * @pos:    bit number of the first bit
4047 * @len:    number of bits to clear
4048 */
4049void extent_buffer_bitmap_clear(const struct extent_buffer *eb,
4050				unsigned long start, unsigned long pos,
4051				unsigned long len)
4052{
4053	unsigned int first_byte = start + BIT_BYTE(pos);
4054	unsigned int last_byte = start + BIT_BYTE(pos + len - 1);
4055	const bool same_byte = (first_byte == last_byte);
4056	u8 mask = BITMAP_FIRST_BYTE_MASK(pos);
4057	u8 *kaddr;
4058
4059	if (same_byte)
4060		mask &= BITMAP_LAST_BYTE_MASK(pos + len);
4061
4062	/* Handle the first byte. */
4063	kaddr = extent_buffer_get_byte(eb, first_byte);
4064	*kaddr &= ~mask;
4065	if (same_byte)
4066		return;
4067
4068	/* Handle the byte aligned part. */
4069	ASSERT(first_byte + 1 <= last_byte);
4070	memset_extent_buffer(eb, 0, first_byte + 1, last_byte - first_byte - 1);
4071
4072	/* Handle the last byte. */
4073	kaddr = extent_buffer_get_byte(eb, last_byte);
4074	*kaddr &= ~BITMAP_LAST_BYTE_MASK(pos + len);
 
 
 
 
 
 
 
 
 
 
 
4075}
4076
4077static inline bool areas_overlap(unsigned long src, unsigned long dst, unsigned long len)
4078{
4079	unsigned long distance = (src > dst) ? src - dst : dst - src;
4080	return distance < len;
4081}
4082
4083void memcpy_extent_buffer(const struct extent_buffer *dst,
4084			  unsigned long dst_offset, unsigned long src_offset,
4085			  unsigned long len)
4086{
4087	const int unit_size = dst->folio_size;
4088	unsigned long cur_off = 0;
 
4089
4090	if (check_eb_range(dst, dst_offset, len) ||
4091	    check_eb_range(dst, src_offset, len))
4092		return;
 
 
 
 
4093
4094	if (dst->addr) {
4095		const bool use_memmove = areas_overlap(src_offset, dst_offset, len);
 
 
 
4096
4097		if (use_memmove)
4098			memmove(dst->addr + dst_offset, dst->addr + src_offset, len);
4099		else
4100			memcpy(dst->addr + dst_offset, dst->addr + src_offset, len);
4101		return;
 
 
 
 
 
 
 
 
 
 
 
4102	}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4103
4104	while (cur_off < len) {
4105		unsigned long cur_src = cur_off + src_offset;
4106		unsigned long folio_index = get_eb_folio_index(dst, cur_src);
4107		unsigned long folio_off = get_eb_offset_in_folio(dst, cur_src);
4108		unsigned long cur_len = min(src_offset + len - cur_src,
4109					    unit_size - folio_off);
4110		void *src_addr = folio_address(dst->folios[folio_index]) + folio_off;
4111		const bool use_memmove = areas_overlap(src_offset + cur_off,
4112						       dst_offset + cur_off, cur_len);
4113
4114		__write_extent_buffer(dst, src_addr, dst_offset + cur_off, cur_len,
4115				      use_memmove);
4116		cur_off += cur_len;
4117	}
4118}
4119
4120void memmove_extent_buffer(const struct extent_buffer *dst,
4121			   unsigned long dst_offset, unsigned long src_offset,
4122			   unsigned long len)
4123{
 
 
 
 
4124	unsigned long dst_end = dst_offset + len - 1;
4125	unsigned long src_end = src_offset + len - 1;
 
 
 
4126
4127	if (check_eb_range(dst, dst_offset, len) ||
4128	    check_eb_range(dst, src_offset, len))
4129		return;
4130
 
 
 
 
 
 
 
 
4131	if (dst_offset < src_offset) {
4132		memcpy_extent_buffer(dst, dst_offset, src_offset, len);
4133		return;
4134	}
4135
4136	if (dst->addr) {
4137		memmove(dst->addr + dst_offset, dst->addr + src_offset, len);
4138		return;
4139	}
4140
4141	while (len > 0) {
4142		unsigned long src_i;
4143		size_t cur;
4144		size_t dst_off_in_folio;
4145		size_t src_off_in_folio;
4146		void *src_addr;
4147		bool use_memmove;
4148
4149		src_i = get_eb_folio_index(dst, src_end);
4150
4151		dst_off_in_folio = get_eb_offset_in_folio(dst, dst_end);
4152		src_off_in_folio = get_eb_offset_in_folio(dst, src_end);
4153
4154		cur = min_t(unsigned long, len, src_off_in_folio + 1);
4155		cur = min(cur, dst_off_in_folio + 1);
4156
4157		src_addr = folio_address(dst->folios[src_i]) + src_off_in_folio -
4158					 cur + 1;
4159		use_memmove = areas_overlap(src_end - cur + 1, dst_end - cur + 1,
4160					    cur);
4161
4162		__write_extent_buffer(dst, src_addr, dst_end - cur + 1, cur,
4163				      use_memmove);
 
 
 
 
 
 
 
 
4164
4165		dst_end -= cur;
4166		src_end -= cur;
4167		len -= cur;
4168	}
4169}
4170
4171#define GANG_LOOKUP_SIZE	16
4172static struct extent_buffer *get_next_extent_buffer(
4173		const struct btrfs_fs_info *fs_info, struct folio *folio, u64 bytenr)
4174{
4175	struct extent_buffer *gang[GANG_LOOKUP_SIZE];
4176	struct extent_buffer *found = NULL;
4177	u64 folio_start = folio_pos(folio);
4178	u64 cur = folio_start;
4179
4180	ASSERT(in_range(bytenr, folio_start, PAGE_SIZE));
4181	lockdep_assert_held(&fs_info->buffer_lock);
4182
4183	while (cur < folio_start + PAGE_SIZE) {
4184		int ret;
4185		int i;
4186
4187		ret = radix_tree_gang_lookup(&fs_info->buffer_radix,
4188				(void **)gang, cur >> fs_info->sectorsize_bits,
4189				min_t(unsigned int, GANG_LOOKUP_SIZE,
4190				      PAGE_SIZE / fs_info->nodesize));
4191		if (ret == 0)
4192			goto out;
4193		for (i = 0; i < ret; i++) {
4194			/* Already beyond page end */
4195			if (gang[i]->start >= folio_start + PAGE_SIZE)
4196				goto out;
4197			/* Found one */
4198			if (gang[i]->start >= bytenr) {
4199				found = gang[i];
4200				goto out;
4201			}
4202		}
4203		cur = gang[ret - 1]->start + gang[ret - 1]->len;
4204	}
4205out:
4206	return found;
4207}
4208
4209static int try_release_subpage_extent_buffer(struct folio *folio)
4210{
4211	struct btrfs_fs_info *fs_info = folio_to_fs_info(folio);
4212	u64 cur = folio_pos(folio);
4213	const u64 end = cur + PAGE_SIZE;
4214	int ret;
4215
4216	while (cur < end) {
4217		struct extent_buffer *eb = NULL;
4218
4219		/*
4220		 * Unlike try_release_extent_buffer() which uses folio private
4221		 * to grab buffer, for subpage case we rely on radix tree, thus
4222		 * we need to ensure radix tree consistency.
4223		 *
4224		 * We also want an atomic snapshot of the radix tree, thus go
4225		 * with spinlock rather than RCU.
4226		 */
4227		spin_lock(&fs_info->buffer_lock);
4228		eb = get_next_extent_buffer(fs_info, folio, cur);
4229		if (!eb) {
4230			/* No more eb in the page range after or at cur */
4231			spin_unlock(&fs_info->buffer_lock);
4232			break;
4233		}
4234		cur = eb->start + eb->len;
4235
4236		/*
4237		 * The same as try_release_extent_buffer(), to ensure the eb
4238		 * won't disappear out from under us.
4239		 */
4240		spin_lock(&eb->refs_lock);
4241		if (atomic_read(&eb->refs) != 1 || extent_buffer_under_io(eb)) {
4242			spin_unlock(&eb->refs_lock);
4243			spin_unlock(&fs_info->buffer_lock);
4244			break;
4245		}
4246		spin_unlock(&fs_info->buffer_lock);
4247
4248		/*
4249		 * If tree ref isn't set then we know the ref on this eb is a
4250		 * real ref, so just return, this eb will likely be freed soon
4251		 * anyway.
4252		 */
4253		if (!test_and_clear_bit(EXTENT_BUFFER_TREE_REF, &eb->bflags)) {
4254			spin_unlock(&eb->refs_lock);
4255			break;
4256		}
4257
4258		/*
4259		 * Here we don't care about the return value, we will always
4260		 * check the folio private at the end.  And
4261		 * release_extent_buffer() will release the refs_lock.
4262		 */
4263		release_extent_buffer(eb);
4264	}
4265	/*
4266	 * Finally to check if we have cleared folio private, as if we have
4267	 * released all ebs in the page, the folio private should be cleared now.
4268	 */
4269	spin_lock(&folio->mapping->i_private_lock);
4270	if (!folio_test_private(folio))
4271		ret = 1;
4272	else
4273		ret = 0;
4274	spin_unlock(&folio->mapping->i_private_lock);
4275	return ret;
4276
4277}
4278
4279int try_release_extent_buffer(struct folio *folio)
4280{
4281	struct extent_buffer *eb;
4282
4283	if (folio_to_fs_info(folio)->nodesize < PAGE_SIZE)
4284		return try_release_subpage_extent_buffer(folio);
4285
4286	/*
4287	 * We need to make sure nobody is changing folio private, as we rely on
4288	 * folio private as the pointer to extent buffer.
4289	 */
4290	spin_lock(&folio->mapping->i_private_lock);
4291	if (!folio_test_private(folio)) {
4292		spin_unlock(&folio->mapping->i_private_lock);
4293		return 1;
4294	}
4295
4296	eb = folio_get_private(folio);
4297	BUG_ON(!eb);
4298
4299	/*
4300	 * This is a little awful but should be ok, we need to make sure that
4301	 * the eb doesn't disappear out from under us while we're looking at
4302	 * this page.
4303	 */
4304	spin_lock(&eb->refs_lock);
4305	if (atomic_read(&eb->refs) != 1 || extent_buffer_under_io(eb)) {
4306		spin_unlock(&eb->refs_lock);
4307		spin_unlock(&folio->mapping->i_private_lock);
4308		return 0;
4309	}
4310	spin_unlock(&folio->mapping->i_private_lock);
4311
4312	/*
4313	 * If tree ref isn't set then we know the ref on this eb is a real ref,
4314	 * so just return, this page will likely be freed soon anyway.
4315	 */
4316	if (!test_and_clear_bit(EXTENT_BUFFER_TREE_REF, &eb->bflags)) {
4317		spin_unlock(&eb->refs_lock);
4318		return 0;
4319	}
4320
4321	return release_extent_buffer(eb);
4322}
4323
4324/*
4325 * Attempt to readahead a child block.
4326 *
4327 * @fs_info:	the fs_info
4328 * @bytenr:	bytenr to read
4329 * @owner_root: objectid of the root that owns this eb
4330 * @gen:	generation for the uptodate check, can be 0
4331 * @level:	level for the eb
4332 *
4333 * Attempt to readahead a tree block at @bytenr.  If @gen is 0 then we do a
4334 * normal uptodate check of the eb, without checking the generation.  If we have
4335 * to read the block we will not block on anything.
4336 */
4337void btrfs_readahead_tree_block(struct btrfs_fs_info *fs_info,
4338				u64 bytenr, u64 owner_root, u64 gen, int level)
4339{
4340	struct btrfs_tree_parent_check check = {
4341		.level = level,
4342		.transid = gen
4343	};
4344	struct extent_buffer *eb;
4345	int ret;
4346
4347	eb = btrfs_find_create_tree_block(fs_info, bytenr, owner_root, level);
4348	if (IS_ERR(eb))
4349		return;
4350
4351	if (btrfs_buffer_uptodate(eb, gen, 1)) {
4352		free_extent_buffer(eb);
4353		return;
4354	}
4355
4356	ret = read_extent_buffer_pages(eb, WAIT_NONE, 0, &check);
4357	if (ret < 0)
4358		free_extent_buffer_stale(eb);
4359	else
4360		free_extent_buffer(eb);
4361}
4362
4363/*
4364 * Readahead a node's child block.
4365 *
4366 * @node:	parent node we're reading from
4367 * @slot:	slot in the parent node for the child we want to read
4368 *
4369 * A helper for btrfs_readahead_tree_block, we simply read the bytenr pointed at
4370 * the slot in the node provided.
4371 */
4372void btrfs_readahead_node_child(struct extent_buffer *node, int slot)
4373{
4374	btrfs_readahead_tree_block(node->fs_info,
4375				   btrfs_node_blockptr(node, slot),
4376				   btrfs_header_owner(node),
4377				   btrfs_node_ptr_generation(node, slot),
4378				   btrfs_header_level(node) - 1);
4379}
v4.17
   1// SPDX-License-Identifier: GPL-2.0
   2
   3#include <linux/bitops.h>
   4#include <linux/slab.h>
   5#include <linux/bio.h>
   6#include <linux/mm.h>
   7#include <linux/pagemap.h>
   8#include <linux/page-flags.h>
 
   9#include <linux/spinlock.h>
  10#include <linux/blkdev.h>
  11#include <linux/swap.h>
  12#include <linux/writeback.h>
  13#include <linux/pagevec.h>
  14#include <linux/prefetch.h>
  15#include <linux/cleancache.h>
  16#include "extent_io.h"
 
  17#include "extent_map.h"
  18#include "ctree.h"
  19#include "btrfs_inode.h"
  20#include "volumes.h"
  21#include "check-integrity.h"
  22#include "locking.h"
  23#include "rcu-string.h"
  24#include "backref.h"
  25#include "disk-io.h"
 
 
 
 
 
 
 
 
 
 
 
  26
  27static struct kmem_cache *extent_state_cache;
  28static struct kmem_cache *extent_buffer_cache;
  29static struct bio_set *btrfs_bioset;
  30
  31static inline bool extent_state_in_tree(const struct extent_state *state)
  32{
  33	return !RB_EMPTY_NODE(&state->rb_node);
  34}
  35
  36#ifdef CONFIG_BTRFS_DEBUG
  37static LIST_HEAD(buffers);
  38static LIST_HEAD(states);
  39
  40static DEFINE_SPINLOCK(leak_lock);
  41
  42static inline
  43void btrfs_leak_debug_add(struct list_head *new, struct list_head *head)
  44{
 
  45	unsigned long flags;
  46
  47	spin_lock_irqsave(&leak_lock, flags);
  48	list_add(new, head);
  49	spin_unlock_irqrestore(&leak_lock, flags);
  50}
  51
  52static inline
  53void btrfs_leak_debug_del(struct list_head *entry)
  54{
 
  55	unsigned long flags;
  56
  57	spin_lock_irqsave(&leak_lock, flags);
  58	list_del(entry);
  59	spin_unlock_irqrestore(&leak_lock, flags);
  60}
  61
  62static inline
  63void btrfs_leak_debug_check(void)
  64{
  65	struct extent_state *state;
  66	struct extent_buffer *eb;
 
  67
  68	while (!list_empty(&states)) {
  69		state = list_entry(states.next, struct extent_state, leak_list);
  70		pr_err("BTRFS: state leak: start %llu end %llu state %u in tree %d refs %d\n",
  71		       state->start, state->end, state->state,
  72		       extent_state_in_tree(state),
  73		       refcount_read(&state->refs));
  74		list_del(&state->leak_list);
  75		kmem_cache_free(extent_state_cache, state);
  76	}
  77
  78	while (!list_empty(&buffers)) {
  79		eb = list_entry(buffers.next, struct extent_buffer, leak_list);
  80		pr_err("BTRFS: buffer leak start %llu len %lu refs %d bflags %lu\n",
  81		       eb->start, eb->len, atomic_read(&eb->refs), eb->bflags);
 
 
 
 
 
  82		list_del(&eb->leak_list);
 
  83		kmem_cache_free(extent_buffer_cache, eb);
  84	}
  85}
  86
  87#define btrfs_debug_check_extent_io_range(tree, start, end)		\
  88	__btrfs_debug_check_extent_io_range(__func__, (tree), (start), (end))
  89static inline void __btrfs_debug_check_extent_io_range(const char *caller,
  90		struct extent_io_tree *tree, u64 start, u64 end)
  91{
  92	if (tree->ops && tree->ops->check_extent_io_range)
  93		tree->ops->check_extent_io_range(tree->private_data, caller,
  94						 start, end);
  95}
  96#else
  97#define btrfs_leak_debug_add(new, head)	do {} while (0)
  98#define btrfs_leak_debug_del(entry)	do {} while (0)
  99#define btrfs_leak_debug_check()	do {} while (0)
 100#define btrfs_debug_check_extent_io_range(c, s, e)	do {} while (0)
 101#endif
 102
 103#define BUFFER_LRU_MAX 64
 104
 105struct tree_entry {
 106	u64 start;
 107	u64 end;
 108	struct rb_node rb_node;
 109};
 110
 111struct extent_page_data {
 112	struct bio *bio;
 113	struct extent_io_tree *tree;
 114	/* tells writepage not to lock the state bits for this range
 115	 * it still does the unlocking
 
 
 
 116	 */
 117	unsigned int extent_locked:1;
 118
 119	/* tells the submit_bio code to use REQ_SYNC */
 120	unsigned int sync_io:1;
 121};
 122
 123static int add_extent_changeset(struct extent_state *state, unsigned bits,
 124				 struct extent_changeset *changeset,
 125				 int set)
 126{
 127	int ret;
 128
 129	if (!changeset)
 130		return 0;
 131	if (set && (state->state & bits) == bits)
 132		return 0;
 133	if (!set && (state->state & bits) == 0)
 134		return 0;
 135	changeset->bytes_changed += state->end - state->start + 1;
 136	ret = ulist_add(&changeset->range_changed, state->start, state->end,
 137			GFP_ATOMIC);
 138	return ret;
 139}
 140
 141static void flush_write_bio(struct extent_page_data *epd);
 142
 143static inline struct btrfs_fs_info *
 144tree_fs_info(struct extent_io_tree *tree)
 145{
 146	if (tree->ops)
 147		return tree->ops->tree_fs_info(tree->private_data);
 148	return NULL;
 149}
 150
 151int __init extent_io_init(void)
 152{
 153	extent_state_cache = kmem_cache_create("btrfs_extent_state",
 154			sizeof(struct extent_state), 0,
 155			SLAB_MEM_SPREAD, NULL);
 156	if (!extent_state_cache)
 157		return -ENOMEM;
 158
 159	extent_buffer_cache = kmem_cache_create("btrfs_extent_buffer",
 160			sizeof(struct extent_buffer), 0,
 161			SLAB_MEM_SPREAD, NULL);
 162	if (!extent_buffer_cache)
 163		goto free_state_cache;
 164
 165	btrfs_bioset = bioset_create(BIO_POOL_SIZE,
 166				     offsetof(struct btrfs_io_bio, bio),
 167				     BIOSET_NEED_BVECS);
 168	if (!btrfs_bioset)
 169		goto free_buffer_cache;
 170
 171	if (bioset_integrity_create(btrfs_bioset, BIO_POOL_SIZE))
 172		goto free_bioset;
 173
 174	return 0;
 175
 176free_bioset:
 177	bioset_free(btrfs_bioset);
 178	btrfs_bioset = NULL;
 179
 180free_buffer_cache:
 181	kmem_cache_destroy(extent_buffer_cache);
 182	extent_buffer_cache = NULL;
 183
 184free_state_cache:
 185	kmem_cache_destroy(extent_state_cache);
 186	extent_state_cache = NULL;
 187	return -ENOMEM;
 188}
 189
 190void __cold extent_io_exit(void)
 191{
 192	btrfs_leak_debug_check();
 193
 194	/*
 195	 * Make sure all delayed rcu free are flushed before we
 196	 * destroy caches.
 197	 */
 198	rcu_barrier();
 199	kmem_cache_destroy(extent_state_cache);
 200	kmem_cache_destroy(extent_buffer_cache);
 201	if (btrfs_bioset)
 202		bioset_free(btrfs_bioset);
 203}
 204
 205void extent_io_tree_init(struct extent_io_tree *tree,
 206			 void *private_data)
 207{
 208	tree->state = RB_ROOT;
 209	tree->ops = NULL;
 210	tree->dirty_bytes = 0;
 211	spin_lock_init(&tree->lock);
 212	tree->private_data = private_data;
 213}
 214
 215static struct extent_state *alloc_extent_state(gfp_t mask)
 216{
 217	struct extent_state *state;
 218
 219	/*
 220	 * The given mask might be not appropriate for the slab allocator,
 221	 * drop the unsupported bits
 222	 */
 223	mask &= ~(__GFP_DMA32|__GFP_HIGHMEM);
 224	state = kmem_cache_alloc(extent_state_cache, mask);
 225	if (!state)
 226		return state;
 227	state->state = 0;
 228	state->failrec = NULL;
 229	RB_CLEAR_NODE(&state->rb_node);
 230	btrfs_leak_debug_add(&state->leak_list, &states);
 231	refcount_set(&state->refs, 1);
 232	init_waitqueue_head(&state->wq);
 233	trace_alloc_extent_state(state, mask, _RET_IP_);
 234	return state;
 235}
 236
 237void free_extent_state(struct extent_state *state)
 238{
 239	if (!state)
 240		return;
 241	if (refcount_dec_and_test(&state->refs)) {
 242		WARN_ON(extent_state_in_tree(state));
 243		btrfs_leak_debug_del(&state->leak_list);
 244		trace_free_extent_state(state, _RET_IP_);
 245		kmem_cache_free(extent_state_cache, state);
 246	}
 247}
 248
 249static struct rb_node *tree_insert(struct rb_root *root,
 250				   struct rb_node *search_start,
 251				   u64 offset,
 252				   struct rb_node *node,
 253				   struct rb_node ***p_in,
 254				   struct rb_node **parent_in)
 255{
 256	struct rb_node **p;
 257	struct rb_node *parent = NULL;
 258	struct tree_entry *entry;
 259
 260	if (p_in && parent_in) {
 261		p = *p_in;
 262		parent = *parent_in;
 263		goto do_insert;
 264	}
 265
 266	p = search_start ? &search_start : &root->rb_node;
 267	while (*p) {
 268		parent = *p;
 269		entry = rb_entry(parent, struct tree_entry, rb_node);
 270
 271		if (offset < entry->start)
 272			p = &(*p)->rb_left;
 273		else if (offset > entry->end)
 274			p = &(*p)->rb_right;
 275		else
 276			return parent;
 277	}
 278
 279do_insert:
 280	rb_link_node(node, parent, p);
 281	rb_insert_color(node, root);
 282	return NULL;
 283}
 284
 285static struct rb_node *__etree_search(struct extent_io_tree *tree, u64 offset,
 286				      struct rb_node **prev_ret,
 287				      struct rb_node **next_ret,
 288				      struct rb_node ***p_ret,
 289				      struct rb_node **parent_ret)
 290{
 291	struct rb_root *root = &tree->state;
 292	struct rb_node **n = &root->rb_node;
 293	struct rb_node *prev = NULL;
 294	struct rb_node *orig_prev = NULL;
 295	struct tree_entry *entry;
 296	struct tree_entry *prev_entry = NULL;
 297
 298	while (*n) {
 299		prev = *n;
 300		entry = rb_entry(prev, struct tree_entry, rb_node);
 301		prev_entry = entry;
 302
 303		if (offset < entry->start)
 304			n = &(*n)->rb_left;
 305		else if (offset > entry->end)
 306			n = &(*n)->rb_right;
 307		else
 308			return *n;
 309	}
 310
 311	if (p_ret)
 312		*p_ret = n;
 313	if (parent_ret)
 314		*parent_ret = prev;
 
 315
 316	if (prev_ret) {
 317		orig_prev = prev;
 318		while (prev && offset > prev_entry->end) {
 319			prev = rb_next(prev);
 320			prev_entry = rb_entry(prev, struct tree_entry, rb_node);
 321		}
 322		*prev_ret = prev;
 323		prev = orig_prev;
 324	}
 325
 326	if (next_ret) {
 327		prev_entry = rb_entry(prev, struct tree_entry, rb_node);
 328		while (prev && offset < prev_entry->start) {
 329			prev = rb_prev(prev);
 330			prev_entry = rb_entry(prev, struct tree_entry, rb_node);
 331		}
 332		*next_ret = prev;
 333	}
 334	return NULL;
 335}
 336
 337static inline struct rb_node *
 338tree_search_for_insert(struct extent_io_tree *tree,
 339		       u64 offset,
 340		       struct rb_node ***p_ret,
 341		       struct rb_node **parent_ret)
 342{
 343	struct rb_node *prev = NULL;
 344	struct rb_node *ret;
 345
 346	ret = __etree_search(tree, offset, &prev, NULL, p_ret, parent_ret);
 347	if (!ret)
 348		return prev;
 349	return ret;
 350}
 351
 352static inline struct rb_node *tree_search(struct extent_io_tree *tree,
 353					  u64 offset)
 354{
 355	return tree_search_for_insert(tree, offset, NULL, NULL);
 356}
 357
 358static void merge_cb(struct extent_io_tree *tree, struct extent_state *new,
 359		     struct extent_state *other)
 360{
 361	if (tree->ops && tree->ops->merge_extent_hook)
 362		tree->ops->merge_extent_hook(tree->private_data, new, other);
 363}
 364
 365/*
 366 * utility function to look for merge candidates inside a given range.
 367 * Any extents with matching state are merged together into a single
 368 * extent in the tree.  Extents with EXTENT_IO in their state field
 369 * are not merged because the end_io handlers need to be able to do
 370 * operations on them without sleeping (or doing allocations/splits).
 371 *
 372 * This should be called with the tree lock held.
 373 */
 374static void merge_state(struct extent_io_tree *tree,
 375		        struct extent_state *state)
 376{
 377	struct extent_state *other;
 378	struct rb_node *other_node;
 379
 380	if (state->state & (EXTENT_IOBITS | EXTENT_BOUNDARY))
 381		return;
 382
 383	other_node = rb_prev(&state->rb_node);
 384	if (other_node) {
 385		other = rb_entry(other_node, struct extent_state, rb_node);
 386		if (other->end == state->start - 1 &&
 387		    other->state == state->state) {
 388			merge_cb(tree, state, other);
 389			state->start = other->start;
 390			rb_erase(&other->rb_node, &tree->state);
 391			RB_CLEAR_NODE(&other->rb_node);
 392			free_extent_state(other);
 393		}
 394	}
 395	other_node = rb_next(&state->rb_node);
 396	if (other_node) {
 397		other = rb_entry(other_node, struct extent_state, rb_node);
 398		if (other->start == state->end + 1 &&
 399		    other->state == state->state) {
 400			merge_cb(tree, state, other);
 401			state->end = other->end;
 402			rb_erase(&other->rb_node, &tree->state);
 403			RB_CLEAR_NODE(&other->rb_node);
 404			free_extent_state(other);
 405		}
 406	}
 407}
 408
 409static void set_state_cb(struct extent_io_tree *tree,
 410			 struct extent_state *state, unsigned *bits)
 411{
 412	if (tree->ops && tree->ops->set_bit_hook)
 413		tree->ops->set_bit_hook(tree->private_data, state, bits);
 414}
 415
 416static void clear_state_cb(struct extent_io_tree *tree,
 417			   struct extent_state *state, unsigned *bits)
 418{
 419	if (tree->ops && tree->ops->clear_bit_hook)
 420		tree->ops->clear_bit_hook(tree->private_data, state, bits);
 421}
 422
 423static void set_state_bits(struct extent_io_tree *tree,
 424			   struct extent_state *state, unsigned *bits,
 425			   struct extent_changeset *changeset);
 426
 427/*
 428 * insert an extent_state struct into the tree.  'bits' are set on the
 429 * struct before it is inserted.
 430 *
 431 * This may return -EEXIST if the extent is already there, in which case the
 432 * state struct is freed.
 433 *
 434 * The tree lock is not taken internally.  This is a utility function and
 435 * probably isn't what you want to call (see set/clear_extent_bit).
 436 */
 437static int insert_state(struct extent_io_tree *tree,
 438			struct extent_state *state, u64 start, u64 end,
 439			struct rb_node ***p,
 440			struct rb_node **parent,
 441			unsigned *bits, struct extent_changeset *changeset)
 442{
 443	struct rb_node *node;
 444
 445	if (end < start)
 446		WARN(1, KERN_ERR "BTRFS: end < start %llu %llu\n",
 447		       end, start);
 448	state->start = start;
 449	state->end = end;
 450
 451	set_state_bits(tree, state, bits, changeset);
 452
 453	node = tree_insert(&tree->state, NULL, end, &state->rb_node, p, parent);
 454	if (node) {
 455		struct extent_state *found;
 456		found = rb_entry(node, struct extent_state, rb_node);
 457		pr_err("BTRFS: found node %llu %llu on insert of %llu %llu\n",
 458		       found->start, found->end, start, end);
 459		return -EEXIST;
 460	}
 461	merge_state(tree, state);
 462	return 0;
 463}
 464
 465static void split_cb(struct extent_io_tree *tree, struct extent_state *orig,
 466		     u64 split)
 467{
 468	if (tree->ops && tree->ops->split_extent_hook)
 469		tree->ops->split_extent_hook(tree->private_data, orig, split);
 470}
 471
 472/*
 473 * split a given extent state struct in two, inserting the preallocated
 474 * struct 'prealloc' as the newly created second half.  'split' indicates an
 475 * offset inside 'orig' where it should be split.
 476 *
 477 * Before calling,
 478 * the tree has 'orig' at [orig->start, orig->end].  After calling, there
 479 * are two extent state structs in the tree:
 480 * prealloc: [orig->start, split - 1]
 481 * orig: [ split, orig->end ]
 482 *
 483 * The tree locks are not taken by this function. They need to be held
 484 * by the caller.
 485 */
 486static int split_state(struct extent_io_tree *tree, struct extent_state *orig,
 487		       struct extent_state *prealloc, u64 split)
 488{
 489	struct rb_node *node;
 490
 491	split_cb(tree, orig, split);
 492
 493	prealloc->start = orig->start;
 494	prealloc->end = split - 1;
 495	prealloc->state = orig->state;
 496	orig->start = split;
 497
 498	node = tree_insert(&tree->state, &orig->rb_node, prealloc->end,
 499			   &prealloc->rb_node, NULL, NULL);
 500	if (node) {
 501		free_extent_state(prealloc);
 502		return -EEXIST;
 503	}
 504	return 0;
 505}
 506
 507static struct extent_state *next_state(struct extent_state *state)
 
 
 508{
 509	struct rb_node *next = rb_next(&state->rb_node);
 510	if (next)
 511		return rb_entry(next, struct extent_state, rb_node);
 512	else
 513		return NULL;
 514}
 515
 516/*
 517 * utility function to clear some bits in an extent state struct.
 518 * it will optionally wake up any one waiting on this state (wake == 1).
 519 *
 520 * If no bits are set on the state struct after clearing things, the
 521 * struct is freed and removed from the tree
 522 */
 523static struct extent_state *clear_state_bit(struct extent_io_tree *tree,
 524					    struct extent_state *state,
 525					    unsigned *bits, int wake,
 526					    struct extent_changeset *changeset)
 527{
 528	struct extent_state *next;
 529	unsigned bits_to_clear = *bits & ~EXTENT_CTLBITS;
 530	int ret;
 531
 532	if ((bits_to_clear & EXTENT_DIRTY) && (state->state & EXTENT_DIRTY)) {
 533		u64 range = state->end - state->start + 1;
 534		WARN_ON(range > tree->dirty_bytes);
 535		tree->dirty_bytes -= range;
 536	}
 537	clear_state_cb(tree, state, bits);
 538	ret = add_extent_changeset(state, bits_to_clear, changeset, 0);
 539	BUG_ON(ret < 0);
 540	state->state &= ~bits_to_clear;
 541	if (wake)
 542		wake_up(&state->wq);
 543	if (state->state == 0) {
 544		next = next_state(state);
 545		if (extent_state_in_tree(state)) {
 546			rb_erase(&state->rb_node, &tree->state);
 547			RB_CLEAR_NODE(&state->rb_node);
 548			free_extent_state(state);
 549		} else {
 550			WARN_ON(1);
 551		}
 552	} else {
 553		merge_state(tree, state);
 554		next = next_state(state);
 555	}
 556	return next;
 557}
 558
 559static struct extent_state *
 560alloc_extent_state_atomic(struct extent_state *prealloc)
 561{
 562	if (!prealloc)
 563		prealloc = alloc_extent_state(GFP_ATOMIC);
 564
 565	return prealloc;
 
 566}
 567
 568static void extent_io_tree_panic(struct extent_io_tree *tree, int err)
 
 
 569{
 570	btrfs_panic(tree_fs_info(tree), err,
 571		    "Locking error: Extent tree was modified by another thread while locked.");
 572}
 
 
 
 573
 574/*
 575 * clear some bits on a range in the tree.  This may require splitting
 576 * or inserting elements in the tree, so the gfp mask is used to
 577 * indicate which allocations or sleeping are allowed.
 578 *
 579 * pass 'wake' == 1 to kick any sleepers, and 'delete' == 1 to remove
 580 * the given range from the tree regardless of state (ie for truncate).
 581 *
 582 * the range [start, end] is inclusive.
 583 *
 584 * This takes the tree lock, and returns 0 on success and < 0 on error.
 585 */
 586int __clear_extent_bit(struct extent_io_tree *tree, u64 start, u64 end,
 587			      unsigned bits, int wake, int delete,
 588			      struct extent_state **cached_state,
 589			      gfp_t mask, struct extent_changeset *changeset)
 590{
 591	struct extent_state *state;
 592	struct extent_state *cached;
 593	struct extent_state *prealloc = NULL;
 594	struct rb_node *node;
 595	u64 last_end;
 596	int err;
 597	int clear = 0;
 598
 599	btrfs_debug_check_extent_io_range(tree, start, end);
 600
 601	if (bits & EXTENT_DELALLOC)
 602		bits |= EXTENT_NORESERVE;
 603
 604	if (delete)
 605		bits |= ~EXTENT_CTLBITS;
 606	bits |= EXTENT_FIRST_DELALLOC;
 607
 608	if (bits & (EXTENT_IOBITS | EXTENT_BOUNDARY))
 609		clear = 1;
 610again:
 611	if (!prealloc && gfpflags_allow_blocking(mask)) {
 612		/*
 613		 * Don't care for allocation failure here because we might end
 614		 * up not needing the pre-allocated extent state at all, which
 615		 * is the case if we only have in the tree extent states that
 616		 * cover our input range and don't cover too any other range.
 617		 * If we end up needing a new extent state we allocate it later.
 618		 */
 619		prealloc = alloc_extent_state(mask);
 620	}
 621
 622	spin_lock(&tree->lock);
 623	if (cached_state) {
 624		cached = *cached_state;
 625
 626		if (clear) {
 627			*cached_state = NULL;
 628			cached_state = NULL;
 629		}
 630
 631		if (cached && extent_state_in_tree(cached) &&
 632		    cached->start <= start && cached->end > start) {
 633			if (clear)
 634				refcount_dec(&cached->refs);
 635			state = cached;
 636			goto hit_next;
 637		}
 638		if (clear)
 639			free_extent_state(cached);
 640	}
 641	/*
 642	 * this search will find the extents that end after
 643	 * our range starts
 644	 */
 645	node = tree_search(tree, start);
 646	if (!node)
 647		goto out;
 648	state = rb_entry(node, struct extent_state, rb_node);
 649hit_next:
 650	if (state->start > end)
 651		goto out;
 652	WARN_ON(state->end < start);
 653	last_end = state->end;
 654
 655	/* the state doesn't have the wanted bits, go ahead */
 656	if (!(state->state & bits)) {
 657		state = next_state(state);
 658		goto next;
 659	}
 660
 661	/*
 662	 *     | ---- desired range ---- |
 663	 *  | state | or
 664	 *  | ------------- state -------------- |
 665	 *
 666	 * We need to split the extent we found, and may flip
 667	 * bits on second half.
 668	 *
 669	 * If the extent we found extends past our range, we
 670	 * just split and search again.  It'll get split again
 671	 * the next time though.
 672	 *
 673	 * If the extent we found is inside our range, we clear
 674	 * the desired bit on it.
 675	 */
 676
 677	if (state->start < start) {
 678		prealloc = alloc_extent_state_atomic(prealloc);
 679		BUG_ON(!prealloc);
 680		err = split_state(tree, state, prealloc, start);
 681		if (err)
 682			extent_io_tree_panic(tree, err);
 683
 684		prealloc = NULL;
 685		if (err)
 686			goto out;
 687		if (state->end <= end) {
 688			state = clear_state_bit(tree, state, &bits, wake,
 689						changeset);
 690			goto next;
 691		}
 692		goto search_again;
 693	}
 694	/*
 695	 * | ---- desired range ---- |
 696	 *                        | state |
 697	 * We need to split the extent, and clear the bit
 698	 * on the first half
 699	 */
 700	if (state->start <= end && state->end > end) {
 701		prealloc = alloc_extent_state_atomic(prealloc);
 702		BUG_ON(!prealloc);
 703		err = split_state(tree, state, prealloc, end + 1);
 704		if (err)
 705			extent_io_tree_panic(tree, err);
 706
 707		if (wake)
 708			wake_up(&state->wq);
 709
 710		clear_state_bit(tree, prealloc, &bits, wake, changeset);
 711
 712		prealloc = NULL;
 713		goto out;
 714	}
 715
 716	state = clear_state_bit(tree, state, &bits, wake, changeset);
 717next:
 718	if (last_end == (u64)-1)
 719		goto out;
 720	start = last_end + 1;
 721	if (start <= end && state && !need_resched())
 722		goto hit_next;
 723
 724search_again:
 725	if (start > end)
 726		goto out;
 727	spin_unlock(&tree->lock);
 728	if (gfpflags_allow_blocking(mask))
 729		cond_resched();
 730	goto again;
 731
 732out:
 733	spin_unlock(&tree->lock);
 734	if (prealloc)
 735		free_extent_state(prealloc);
 736
 737	return 0;
 738
 739}
 740
 741static void wait_on_state(struct extent_io_tree *tree,
 742			  struct extent_state *state)
 743		__releases(tree->lock)
 744		__acquires(tree->lock)
 745{
 746	DEFINE_WAIT(wait);
 747	prepare_to_wait(&state->wq, &wait, TASK_UNINTERRUPTIBLE);
 748	spin_unlock(&tree->lock);
 749	schedule();
 750	spin_lock(&tree->lock);
 751	finish_wait(&state->wq, &wait);
 752}
 753
 754/*
 755 * waits for one or more bits to clear on a range in the state tree.
 756 * The range [start, end] is inclusive.
 757 * The tree lock is taken by this function
 758 */
 759static void wait_extent_bit(struct extent_io_tree *tree, u64 start, u64 end,
 760			    unsigned long bits)
 761{
 762	struct extent_state *state;
 763	struct rb_node *node;
 764
 765	btrfs_debug_check_extent_io_range(tree, start, end);
 766
 767	spin_lock(&tree->lock);
 768again:
 769	while (1) {
 770		/*
 771		 * this search will find all the extents that end after
 772		 * our range starts
 773		 */
 774		node = tree_search(tree, start);
 775process_node:
 776		if (!node)
 777			break;
 778
 779		state = rb_entry(node, struct extent_state, rb_node);
 780
 781		if (state->start > end)
 782			goto out;
 783
 784		if (state->state & bits) {
 785			start = state->start;
 786			refcount_inc(&state->refs);
 787			wait_on_state(tree, state);
 788			free_extent_state(state);
 789			goto again;
 790		}
 791		start = state->end + 1;
 792
 793		if (start > end)
 794			break;
 795
 796		if (!cond_resched_lock(&tree->lock)) {
 797			node = rb_next(node);
 798			goto process_node;
 799		}
 800	}
 801out:
 802	spin_unlock(&tree->lock);
 803}
 804
 805static void set_state_bits(struct extent_io_tree *tree,
 806			   struct extent_state *state,
 807			   unsigned *bits, struct extent_changeset *changeset)
 808{
 809	unsigned bits_to_set = *bits & ~EXTENT_CTLBITS;
 810	int ret;
 811
 812	set_state_cb(tree, state, bits);
 813	if ((bits_to_set & EXTENT_DIRTY) && !(state->state & EXTENT_DIRTY)) {
 814		u64 range = state->end - state->start + 1;
 815		tree->dirty_bytes += range;
 816	}
 817	ret = add_extent_changeset(state, bits_to_set, changeset, 1);
 818	BUG_ON(ret < 0);
 819	state->state |= bits_to_set;
 820}
 821
 822static void cache_state_if_flags(struct extent_state *state,
 823				 struct extent_state **cached_ptr,
 824				 unsigned flags)
 825{
 826	if (cached_ptr && !(*cached_ptr)) {
 827		if (!flags || (state->state & flags)) {
 828			*cached_ptr = state;
 829			refcount_inc(&state->refs);
 830		}
 831	}
 832}
 833
 834static void cache_state(struct extent_state *state,
 835			struct extent_state **cached_ptr)
 
 836{
 837	return cache_state_if_flags(state, cached_ptr,
 838				    EXTENT_IOBITS | EXTENT_BOUNDARY);
 839}
 
 
 
 
 840
 841/*
 842 * set some bits on a range in the tree.  This may require allocations or
 843 * sleeping, so the gfp mask is used to indicate what is allowed.
 844 *
 845 * If any of the exclusive bits are set, this will fail with -EEXIST if some
 846 * part of the range already has the desired bits set.  The start of the
 847 * existing range is returned in failed_start in this case.
 848 *
 849 * [start, end] is inclusive This takes the tree lock.
 850 */
 851
 852static int __must_check
 853__set_extent_bit(struct extent_io_tree *tree, u64 start, u64 end,
 854		 unsigned bits, unsigned exclusive_bits,
 855		 u64 *failed_start, struct extent_state **cached_state,
 856		 gfp_t mask, struct extent_changeset *changeset)
 857{
 858	struct extent_state *state;
 859	struct extent_state *prealloc = NULL;
 860	struct rb_node *node;
 861	struct rb_node **p;
 862	struct rb_node *parent;
 863	int err = 0;
 864	u64 last_start;
 865	u64 last_end;
 866
 867	btrfs_debug_check_extent_io_range(tree, start, end);
 868
 869	bits |= EXTENT_FIRST_DELALLOC;
 870again:
 871	if (!prealloc && gfpflags_allow_blocking(mask)) {
 872		/*
 873		 * Don't care for allocation failure here because we might end
 874		 * up not needing the pre-allocated extent state at all, which
 875		 * is the case if we only have in the tree extent states that
 876		 * cover our input range and don't cover too any other range.
 877		 * If we end up needing a new extent state we allocate it later.
 878		 */
 879		prealloc = alloc_extent_state(mask);
 880	}
 881
 882	spin_lock(&tree->lock);
 883	if (cached_state && *cached_state) {
 884		state = *cached_state;
 885		if (state->start <= start && state->end > start &&
 886		    extent_state_in_tree(state)) {
 887			node = &state->rb_node;
 888			goto hit_next;
 889		}
 890	}
 891	/*
 892	 * this search will find all the extents that end after
 893	 * our range starts.
 894	 */
 895	node = tree_search_for_insert(tree, start, &p, &parent);
 896	if (!node) {
 897		prealloc = alloc_extent_state_atomic(prealloc);
 898		BUG_ON(!prealloc);
 899		err = insert_state(tree, prealloc, start, end,
 900				   &p, &parent, &bits, changeset);
 901		if (err)
 902			extent_io_tree_panic(tree, err);
 903
 904		cache_state(prealloc, cached_state);
 905		prealloc = NULL;
 906		goto out;
 907	}
 908	state = rb_entry(node, struct extent_state, rb_node);
 909hit_next:
 910	last_start = state->start;
 911	last_end = state->end;
 912
 913	/*
 914	 * | ---- desired range ---- |
 915	 * | state |
 916	 *
 917	 * Just lock what we found and keep going
 918	 */
 919	if (state->start == start && state->end <= end) {
 920		if (state->state & exclusive_bits) {
 921			*failed_start = state->start;
 922			err = -EEXIST;
 923			goto out;
 924		}
 925
 926		set_state_bits(tree, state, &bits, changeset);
 927		cache_state(state, cached_state);
 928		merge_state(tree, state);
 929		if (last_end == (u64)-1)
 930			goto out;
 931		start = last_end + 1;
 932		state = next_state(state);
 933		if (start < end && state && state->start == start &&
 934		    !need_resched())
 935			goto hit_next;
 936		goto search_again;
 937	}
 938
 939	/*
 940	 *     | ---- desired range ---- |
 941	 * | state |
 942	 *   or
 943	 * | ------------- state -------------- |
 944	 *
 945	 * We need to split the extent we found, and may flip bits on
 946	 * second half.
 947	 *
 948	 * If the extent we found extends past our
 949	 * range, we just split and search again.  It'll get split
 950	 * again the next time though.
 951	 *
 952	 * If the extent we found is inside our range, we set the
 953	 * desired bit on it.
 954	 */
 955	if (state->start < start) {
 956		if (state->state & exclusive_bits) {
 957			*failed_start = start;
 958			err = -EEXIST;
 959			goto out;
 960		}
 961
 962		prealloc = alloc_extent_state_atomic(prealloc);
 963		BUG_ON(!prealloc);
 964		err = split_state(tree, state, prealloc, start);
 965		if (err)
 966			extent_io_tree_panic(tree, err);
 967
 968		prealloc = NULL;
 969		if (err)
 970			goto out;
 971		if (state->end <= end) {
 972			set_state_bits(tree, state, &bits, changeset);
 973			cache_state(state, cached_state);
 974			merge_state(tree, state);
 975			if (last_end == (u64)-1)
 976				goto out;
 977			start = last_end + 1;
 978			state = next_state(state);
 979			if (start < end && state && state->start == start &&
 980			    !need_resched())
 981				goto hit_next;
 982		}
 983		goto search_again;
 984	}
 985	/*
 986	 * | ---- desired range ---- |
 987	 *     | state | or               | state |
 988	 *
 989	 * There's a hole, we need to insert something in it and
 990	 * ignore the extent we found.
 991	 */
 992	if (state->start > start) {
 993		u64 this_end;
 994		if (end < last_start)
 995			this_end = end;
 996		else
 997			this_end = last_start - 1;
 998
 999		prealloc = alloc_extent_state_atomic(prealloc);
1000		BUG_ON(!prealloc);
1001
1002		/*
1003		 * Avoid to free 'prealloc' if it can be merged with
1004		 * the later extent.
1005		 */
1006		err = insert_state(tree, prealloc, start, this_end,
1007				   NULL, NULL, &bits, changeset);
1008		if (err)
1009			extent_io_tree_panic(tree, err);
1010
1011		cache_state(prealloc, cached_state);
1012		prealloc = NULL;
1013		start = this_end + 1;
1014		goto search_again;
1015	}
1016	/*
1017	 * | ---- desired range ---- |
1018	 *                        | state |
1019	 * We need to split the extent, and set the bit
1020	 * on the first half
1021	 */
1022	if (state->start <= end && state->end > end) {
1023		if (state->state & exclusive_bits) {
1024			*failed_start = start;
1025			err = -EEXIST;
1026			goto out;
1027		}
1028
1029		prealloc = alloc_extent_state_atomic(prealloc);
1030		BUG_ON(!prealloc);
1031		err = split_state(tree, state, prealloc, end + 1);
1032		if (err)
1033			extent_io_tree_panic(tree, err);
1034
1035		set_state_bits(tree, prealloc, &bits, changeset);
1036		cache_state(prealloc, cached_state);
1037		merge_state(tree, prealloc);
1038		prealloc = NULL;
1039		goto out;
1040	}
1041
1042search_again:
1043	if (start > end)
1044		goto out;
1045	spin_unlock(&tree->lock);
1046	if (gfpflags_allow_blocking(mask))
1047		cond_resched();
1048	goto again;
1049
1050out:
1051	spin_unlock(&tree->lock);
1052	if (prealloc)
1053		free_extent_state(prealloc);
1054
1055	return err;
1056
1057}
1058
1059int set_extent_bit(struct extent_io_tree *tree, u64 start, u64 end,
1060		   unsigned bits, u64 * failed_start,
1061		   struct extent_state **cached_state, gfp_t mask)
1062{
1063	return __set_extent_bit(tree, start, end, bits, 0, failed_start,
1064				cached_state, mask, NULL);
1065}
1066
1067
1068/**
1069 * convert_extent_bit - convert all bits in a given range from one bit to
1070 * 			another
1071 * @tree:	the io tree to search
1072 * @start:	the start offset in bytes
1073 * @end:	the end offset in bytes (inclusive)
1074 * @bits:	the bits to set in this range
1075 * @clear_bits:	the bits to clear in this range
1076 * @cached_state:	state that we're going to cache
1077 *
1078 * This will go through and set bits for the given range.  If any states exist
1079 * already in this range they are set with the given bit and cleared of the
1080 * clear_bits.  This is only meant to be used by things that are mergeable, ie
1081 * converting from say DELALLOC to DIRTY.  This is not meant to be used with
1082 * boundary bits like LOCK.
1083 *
1084 * All allocations are done with GFP_NOFS.
1085 */
1086int convert_extent_bit(struct extent_io_tree *tree, u64 start, u64 end,
1087		       unsigned bits, unsigned clear_bits,
1088		       struct extent_state **cached_state)
1089{
1090	struct extent_state *state;
1091	struct extent_state *prealloc = NULL;
1092	struct rb_node *node;
1093	struct rb_node **p;
1094	struct rb_node *parent;
1095	int err = 0;
1096	u64 last_start;
1097	u64 last_end;
1098	bool first_iteration = true;
1099
1100	btrfs_debug_check_extent_io_range(tree, start, end);
1101
1102again:
1103	if (!prealloc) {
1104		/*
1105		 * Best effort, don't worry if extent state allocation fails
1106		 * here for the first iteration. We might have a cached state
1107		 * that matches exactly the target range, in which case no
1108		 * extent state allocations are needed. We'll only know this
1109		 * after locking the tree.
1110		 */
1111		prealloc = alloc_extent_state(GFP_NOFS);
1112		if (!prealloc && !first_iteration)
1113			return -ENOMEM;
1114	}
1115
1116	spin_lock(&tree->lock);
1117	if (cached_state && *cached_state) {
1118		state = *cached_state;
1119		if (state->start <= start && state->end > start &&
1120		    extent_state_in_tree(state)) {
1121			node = &state->rb_node;
1122			goto hit_next;
1123		}
1124	}
1125
1126	/*
1127	 * this search will find all the extents that end after
1128	 * our range starts.
1129	 */
1130	node = tree_search_for_insert(tree, start, &p, &parent);
1131	if (!node) {
1132		prealloc = alloc_extent_state_atomic(prealloc);
1133		if (!prealloc) {
1134			err = -ENOMEM;
1135			goto out;
1136		}
1137		err = insert_state(tree, prealloc, start, end,
1138				   &p, &parent, &bits, NULL);
1139		if (err)
1140			extent_io_tree_panic(tree, err);
1141		cache_state(prealloc, cached_state);
1142		prealloc = NULL;
1143		goto out;
1144	}
1145	state = rb_entry(node, struct extent_state, rb_node);
1146hit_next:
1147	last_start = state->start;
1148	last_end = state->end;
1149
1150	/*
1151	 * | ---- desired range ---- |
1152	 * | state |
1153	 *
1154	 * Just lock what we found and keep going
1155	 */
1156	if (state->start == start && state->end <= end) {
1157		set_state_bits(tree, state, &bits, NULL);
1158		cache_state(state, cached_state);
1159		state = clear_state_bit(tree, state, &clear_bits, 0, NULL);
1160		if (last_end == (u64)-1)
1161			goto out;
1162		start = last_end + 1;
1163		if (start < end && state && state->start == start &&
1164		    !need_resched())
1165			goto hit_next;
1166		goto search_again;
1167	}
1168
1169	/*
1170	 *     | ---- desired range ---- |
1171	 * | state |
1172	 *   or
1173	 * | ------------- state -------------- |
1174	 *
1175	 * We need to split the extent we found, and may flip bits on
1176	 * second half.
1177	 *
1178	 * If the extent we found extends past our
1179	 * range, we just split and search again.  It'll get split
1180	 * again the next time though.
1181	 *
1182	 * If the extent we found is inside our range, we set the
1183	 * desired bit on it.
1184	 */
1185	if (state->start < start) {
1186		prealloc = alloc_extent_state_atomic(prealloc);
1187		if (!prealloc) {
1188			err = -ENOMEM;
1189			goto out;
1190		}
1191		err = split_state(tree, state, prealloc, start);
1192		if (err)
1193			extent_io_tree_panic(tree, err);
1194		prealloc = NULL;
1195		if (err)
1196			goto out;
1197		if (state->end <= end) {
1198			set_state_bits(tree, state, &bits, NULL);
1199			cache_state(state, cached_state);
1200			state = clear_state_bit(tree, state, &clear_bits, 0,
1201						NULL);
1202			if (last_end == (u64)-1)
1203				goto out;
1204			start = last_end + 1;
1205			if (start < end && state && state->start == start &&
1206			    !need_resched())
1207				goto hit_next;
1208		}
1209		goto search_again;
1210	}
1211	/*
1212	 * | ---- desired range ---- |
1213	 *     | state | or               | state |
1214	 *
1215	 * There's a hole, we need to insert something in it and
1216	 * ignore the extent we found.
1217	 */
1218	if (state->start > start) {
1219		u64 this_end;
1220		if (end < last_start)
1221			this_end = end;
1222		else
1223			this_end = last_start - 1;
1224
1225		prealloc = alloc_extent_state_atomic(prealloc);
1226		if (!prealloc) {
1227			err = -ENOMEM;
1228			goto out;
1229		}
1230
1231		/*
1232		 * Avoid to free 'prealloc' if it can be merged with
1233		 * the later extent.
1234		 */
1235		err = insert_state(tree, prealloc, start, this_end,
1236				   NULL, NULL, &bits, NULL);
1237		if (err)
1238			extent_io_tree_panic(tree, err);
1239		cache_state(prealloc, cached_state);
1240		prealloc = NULL;
1241		start = this_end + 1;
1242		goto search_again;
1243	}
1244	/*
1245	 * | ---- desired range ---- |
1246	 *                        | state |
1247	 * We need to split the extent, and set the bit
1248	 * on the first half
1249	 */
1250	if (state->start <= end && state->end > end) {
1251		prealloc = alloc_extent_state_atomic(prealloc);
1252		if (!prealloc) {
1253			err = -ENOMEM;
1254			goto out;
1255		}
1256
1257		err = split_state(tree, state, prealloc, end + 1);
1258		if (err)
1259			extent_io_tree_panic(tree, err);
1260
1261		set_state_bits(tree, prealloc, &bits, NULL);
1262		cache_state(prealloc, cached_state);
1263		clear_state_bit(tree, prealloc, &clear_bits, 0, NULL);
1264		prealloc = NULL;
1265		goto out;
1266	}
1267
1268search_again:
1269	if (start > end)
1270		goto out;
1271	spin_unlock(&tree->lock);
1272	cond_resched();
1273	first_iteration = false;
1274	goto again;
1275
1276out:
1277	spin_unlock(&tree->lock);
1278	if (prealloc)
1279		free_extent_state(prealloc);
1280
1281	return err;
1282}
1283
1284/* wrappers around set/clear extent bit */
1285int set_record_extent_bits(struct extent_io_tree *tree, u64 start, u64 end,
1286			   unsigned bits, struct extent_changeset *changeset)
1287{
1288	/*
1289	 * We don't support EXTENT_LOCKED yet, as current changeset will
1290	 * record any bits changed, so for EXTENT_LOCKED case, it will
1291	 * either fail with -EEXIST or changeset will record the whole
1292	 * range.
1293	 */
1294	BUG_ON(bits & EXTENT_LOCKED);
1295
1296	return __set_extent_bit(tree, start, end, bits, 0, NULL, NULL, GFP_NOFS,
1297				changeset);
1298}
1299
1300int clear_extent_bit(struct extent_io_tree *tree, u64 start, u64 end,
1301		     unsigned bits, int wake, int delete,
1302		     struct extent_state **cached)
1303{
1304	return __clear_extent_bit(tree, start, end, bits, wake, delete,
1305				  cached, GFP_NOFS, NULL);
1306}
1307
1308int clear_record_extent_bits(struct extent_io_tree *tree, u64 start, u64 end,
1309		unsigned bits, struct extent_changeset *changeset)
1310{
1311	/*
1312	 * Don't support EXTENT_LOCKED case, same reason as
1313	 * set_record_extent_bits().
1314	 */
1315	BUG_ON(bits & EXTENT_LOCKED);
1316
1317	return __clear_extent_bit(tree, start, end, bits, 0, 0, NULL, GFP_NOFS,
1318				  changeset);
1319}
1320
1321/*
1322 * either insert or lock state struct between start and end use mask to tell
1323 * us if waiting is desired.
1324 */
1325int lock_extent_bits(struct extent_io_tree *tree, u64 start, u64 end,
1326		     struct extent_state **cached_state)
1327{
1328	int err;
1329	u64 failed_start;
1330
1331	while (1) {
1332		err = __set_extent_bit(tree, start, end, EXTENT_LOCKED,
1333				       EXTENT_LOCKED, &failed_start,
1334				       cached_state, GFP_NOFS, NULL);
1335		if (err == -EEXIST) {
1336			wait_extent_bit(tree, failed_start, end, EXTENT_LOCKED);
1337			start = failed_start;
1338		} else
1339			break;
1340		WARN_ON(start > end);
1341	}
1342	return err;
1343}
1344
1345int try_lock_extent(struct extent_io_tree *tree, u64 start, u64 end)
1346{
1347	int err;
1348	u64 failed_start;
1349
1350	err = __set_extent_bit(tree, start, end, EXTENT_LOCKED, EXTENT_LOCKED,
1351			       &failed_start, NULL, GFP_NOFS, NULL);
1352	if (err == -EEXIST) {
1353		if (failed_start > start)
1354			clear_extent_bit(tree, start, failed_start - 1,
1355					 EXTENT_LOCKED, 1, 0, NULL);
1356		return 0;
1357	}
1358	return 1;
1359}
1360
1361void extent_range_clear_dirty_for_io(struct inode *inode, u64 start, u64 end)
1362{
1363	unsigned long index = start >> PAGE_SHIFT;
1364	unsigned long end_index = end >> PAGE_SHIFT;
1365	struct page *page;
1366
1367	while (index <= end_index) {
1368		page = find_get_page(inode->i_mapping, index);
1369		BUG_ON(!page); /* Pages should be in the extent_io_tree */
1370		clear_page_dirty_for_io(page);
1371		put_page(page);
1372		index++;
1373	}
1374}
1375
1376void extent_range_redirty_for_io(struct inode *inode, u64 start, u64 end)
1377{
1378	unsigned long index = start >> PAGE_SHIFT;
1379	unsigned long end_index = end >> PAGE_SHIFT;
1380	struct page *page;
1381
1382	while (index <= end_index) {
1383		page = find_get_page(inode->i_mapping, index);
1384		BUG_ON(!page); /* Pages should be in the extent_io_tree */
1385		__set_page_dirty_nobuffers(page);
1386		account_page_redirty(page);
1387		put_page(page);
1388		index++;
1389	}
1390}
1391
1392/*
1393 * helper function to set both pages and extents in the tree writeback
1394 */
1395static void set_range_writeback(struct extent_io_tree *tree, u64 start, u64 end)
1396{
1397	tree->ops->set_range_writeback(tree->private_data, start, end);
1398}
1399
1400/* find the first state struct with 'bits' set after 'start', and
1401 * return it.  tree->lock must be held.  NULL will returned if
1402 * nothing was found after 'start'
1403 */
1404static struct extent_state *
1405find_first_extent_bit_state(struct extent_io_tree *tree,
1406			    u64 start, unsigned bits)
1407{
1408	struct rb_node *node;
1409	struct extent_state *state;
1410
1411	/*
1412	 * this search will find all the extents that end after
1413	 * our range starts.
1414	 */
1415	node = tree_search(tree, start);
1416	if (!node)
1417		goto out;
1418
1419	while (1) {
1420		state = rb_entry(node, struct extent_state, rb_node);
1421		if (state->end >= start && (state->state & bits))
1422			return state;
1423
1424		node = rb_next(node);
1425		if (!node)
1426			break;
1427	}
1428out:
1429	return NULL;
 
 
 
 
1430}
1431
1432/*
1433 * find the first offset in the io tree with 'bits' set. zero is
1434 * returned if we find something, and *start_ret and *end_ret are
1435 * set to reflect the state struct that was found.
1436 *
1437 * If nothing was found, 1 is returned. If found something, return 0.
1438 */
1439int find_first_extent_bit(struct extent_io_tree *tree, u64 start,
1440			  u64 *start_ret, u64 *end_ret, unsigned bits,
1441			  struct extent_state **cached_state)
1442{
1443	struct extent_state *state;
1444	struct rb_node *n;
1445	int ret = 1;
1446
1447	spin_lock(&tree->lock);
1448	if (cached_state && *cached_state) {
1449		state = *cached_state;
1450		if (state->end == start - 1 && extent_state_in_tree(state)) {
1451			n = rb_next(&state->rb_node);
1452			while (n) {
1453				state = rb_entry(n, struct extent_state,
1454						 rb_node);
1455				if (state->state & bits)
1456					goto got_it;
1457				n = rb_next(n);
1458			}
1459			free_extent_state(*cached_state);
1460			*cached_state = NULL;
1461			goto out;
1462		}
1463		free_extent_state(*cached_state);
1464		*cached_state = NULL;
1465	}
1466
1467	state = find_first_extent_bit_state(tree, start, bits);
1468got_it:
1469	if (state) {
1470		cache_state_if_flags(state, cached_state, 0);
1471		*start_ret = state->start;
1472		*end_ret = state->end;
1473		ret = 0;
1474	}
1475out:
1476	spin_unlock(&tree->lock);
1477	return ret;
1478}
1479
1480/*
1481 * find a contiguous range of bytes in the file marked as delalloc, not
1482 * more than 'max_bytes'.  start and end are used to return the range,
1483 *
1484 * 1 is returned if we find something, 0 if nothing was in the tree
1485 */
1486static noinline u64 find_delalloc_range(struct extent_io_tree *tree,
1487					u64 *start, u64 *end, u64 max_bytes,
1488					struct extent_state **cached_state)
1489{
1490	struct rb_node *node;
1491	struct extent_state *state;
1492	u64 cur_start = *start;
1493	u64 found = 0;
1494	u64 total_bytes = 0;
1495
1496	spin_lock(&tree->lock);
1497
1498	/*
1499	 * this search will find all the extents that end after
1500	 * our range starts.
1501	 */
1502	node = tree_search(tree, cur_start);
1503	if (!node) {
1504		if (!found)
1505			*end = (u64)-1;
1506		goto out;
1507	}
1508
1509	while (1) {
1510		state = rb_entry(node, struct extent_state, rb_node);
1511		if (found && (state->start != cur_start ||
1512			      (state->state & EXTENT_BOUNDARY))) {
1513			goto out;
1514		}
1515		if (!(state->state & EXTENT_DELALLOC)) {
1516			if (!found)
1517				*end = state->end;
1518			goto out;
1519		}
1520		if (!found) {
1521			*start = state->start;
1522			*cached_state = state;
1523			refcount_inc(&state->refs);
1524		}
1525		found++;
1526		*end = state->end;
1527		cur_start = state->end + 1;
1528		node = rb_next(node);
1529		total_bytes += state->end - state->start + 1;
1530		if (total_bytes >= max_bytes)
1531			break;
1532		if (!node)
1533			break;
1534	}
1535out:
1536	spin_unlock(&tree->lock);
1537	return found;
1538}
1539
1540static int __process_pages_contig(struct address_space *mapping,
1541				  struct page *locked_page,
1542				  pgoff_t start_index, pgoff_t end_index,
1543				  unsigned long page_ops, pgoff_t *index_ret);
1544
1545static noinline void __unlock_for_delalloc(struct inode *inode,
1546					   struct page *locked_page,
1547					   u64 start, u64 end)
1548{
1549	unsigned long index = start >> PAGE_SHIFT;
1550	unsigned long end_index = end >> PAGE_SHIFT;
1551
1552	ASSERT(locked_page);
1553	if (index == locked_page->index && end_index == index)
1554		return;
1555
1556	__process_pages_contig(inode->i_mapping, locked_page, index, end_index,
1557			       PAGE_UNLOCK, NULL);
1558}
1559
1560static noinline int lock_delalloc_pages(struct inode *inode,
1561					struct page *locked_page,
1562					u64 delalloc_start,
1563					u64 delalloc_end)
1564{
1565	unsigned long index = delalloc_start >> PAGE_SHIFT;
1566	unsigned long index_ret = index;
1567	unsigned long end_index = delalloc_end >> PAGE_SHIFT;
1568	int ret;
1569
1570	ASSERT(locked_page);
1571	if (index == locked_page->index && index == end_index)
1572		return 0;
1573
1574	ret = __process_pages_contig(inode->i_mapping, locked_page, index,
1575				     end_index, PAGE_LOCK, &index_ret);
1576	if (ret == -EAGAIN)
1577		__unlock_for_delalloc(inode, locked_page, delalloc_start,
1578				      (u64)index_ret << PAGE_SHIFT);
1579	return ret;
1580}
1581
1582/*
1583 * find a contiguous range of bytes in the file marked as delalloc, not
1584 * more than 'max_bytes'.  start and end are used to return the range,
1585 *
1586 * 1 is returned if we find something, 0 if nothing was in the tree
 
1587 */
1588STATIC u64 find_lock_delalloc_range(struct inode *inode,
1589				    struct extent_io_tree *tree,
1590				    struct page *locked_page, u64 *start,
1591				    u64 *end, u64 max_bytes)
1592{
 
 
 
 
 
 
1593	u64 delalloc_start;
1594	u64 delalloc_end;
1595	u64 found;
1596	struct extent_state *cached_state = NULL;
1597	int ret;
1598	int loops = 0;
1599
 
 
 
 
 
 
1600again:
1601	/* step one, find a bunch of delalloc bytes starting at start */
1602	delalloc_start = *start;
1603	delalloc_end = 0;
1604	found = find_delalloc_range(tree, &delalloc_start, &delalloc_end,
1605				    max_bytes, &cached_state);
1606	if (!found || delalloc_end <= *start) {
1607		*start = delalloc_start;
1608		*end = delalloc_end;
 
 
1609		free_extent_state(cached_state);
1610		return 0;
1611	}
1612
1613	/*
1614	 * start comes from the offset of locked_page.  We have to lock
1615	 * pages in order, so we can't process delalloc bytes before
1616	 * locked_page
1617	 */
1618	if (delalloc_start < *start)
1619		delalloc_start = *start;
1620
1621	/*
1622	 * make sure to limit the number of pages we try to lock down
1623	 */
1624	if (delalloc_end + 1 - delalloc_start > max_bytes)
1625		delalloc_end = delalloc_start + max_bytes - 1;
1626
1627	/* step two, lock all the pages after the page that has start */
1628	ret = lock_delalloc_pages(inode, locked_page,
1629				  delalloc_start, delalloc_end);
 
1630	if (ret == -EAGAIN) {
1631		/* some of the pages are gone, lets avoid looping by
1632		 * shortening the size of the delalloc range we're searching
1633		 */
1634		free_extent_state(cached_state);
1635		cached_state = NULL;
1636		if (!loops) {
1637			max_bytes = PAGE_SIZE;
1638			loops = 1;
1639			goto again;
1640		} else {
1641			found = 0;
1642			goto out_failed;
1643		}
1644	}
1645	BUG_ON(ret); /* Only valid values are 0 and -EAGAIN */
1646
1647	/* step three, lock the state bits for the whole range */
1648	lock_extent_bits(tree, delalloc_start, delalloc_end, &cached_state);
1649
1650	/* then test to make sure it is all still delalloc */
1651	ret = test_range_bit(tree, delalloc_start, delalloc_end,
1652			     EXTENT_DELALLOC, 1, cached_state);
 
 
1653	if (!ret) {
1654		unlock_extent_cached(tree, delalloc_start, delalloc_end,
1655				     &cached_state);
1656		__unlock_for_delalloc(inode, locked_page,
1657			      delalloc_start, delalloc_end);
1658		cond_resched();
1659		goto again;
1660	}
1661	free_extent_state(cached_state);
1662	*start = delalloc_start;
1663	*end = delalloc_end;
1664out_failed:
1665	return found;
1666}
1667
1668static int __process_pages_contig(struct address_space *mapping,
1669				  struct page *locked_page,
1670				  pgoff_t start_index, pgoff_t end_index,
1671				  unsigned long page_ops, pgoff_t *index_ret)
1672{
1673	unsigned long nr_pages = end_index - start_index + 1;
1674	unsigned long pages_locked = 0;
1675	pgoff_t index = start_index;
1676	struct page *pages[16];
1677	unsigned ret;
1678	int err = 0;
1679	int i;
1680
1681	if (page_ops & PAGE_LOCK) {
1682		ASSERT(page_ops == PAGE_LOCK);
1683		ASSERT(index_ret && *index_ret == start_index);
1684	}
1685
1686	if ((page_ops & PAGE_SET_ERROR) && nr_pages > 0)
1687		mapping_set_error(mapping, -EIO);
1688
1689	while (nr_pages > 0) {
1690		ret = find_get_pages_contig(mapping, index,
1691				     min_t(unsigned long,
1692				     nr_pages, ARRAY_SIZE(pages)), pages);
1693		if (ret == 0) {
1694			/*
1695			 * Only if we're going to lock these pages,
1696			 * can we find nothing at @index.
1697			 */
1698			ASSERT(page_ops & PAGE_LOCK);
1699			err = -EAGAIN;
1700			goto out;
1701		}
1702
1703		for (i = 0; i < ret; i++) {
1704			if (page_ops & PAGE_SET_PRIVATE2)
1705				SetPagePrivate2(pages[i]);
1706
1707			if (pages[i] == locked_page) {
1708				put_page(pages[i]);
1709				pages_locked++;
1710				continue;
1711			}
1712			if (page_ops & PAGE_CLEAR_DIRTY)
1713				clear_page_dirty_for_io(pages[i]);
1714			if (page_ops & PAGE_SET_WRITEBACK)
1715				set_page_writeback(pages[i]);
1716			if (page_ops & PAGE_SET_ERROR)
1717				SetPageError(pages[i]);
1718			if (page_ops & PAGE_END_WRITEBACK)
1719				end_page_writeback(pages[i]);
1720			if (page_ops & PAGE_UNLOCK)
1721				unlock_page(pages[i]);
1722			if (page_ops & PAGE_LOCK) {
1723				lock_page(pages[i]);
1724				if (!PageDirty(pages[i]) ||
1725				    pages[i]->mapping != mapping) {
1726					unlock_page(pages[i]);
1727					put_page(pages[i]);
1728					err = -EAGAIN;
1729					goto out;
1730				}
1731			}
1732			put_page(pages[i]);
1733			pages_locked++;
1734		}
1735		nr_pages -= ret;
1736		index += ret;
1737		cond_resched();
1738	}
1739out:
1740	if (err && index_ret)
1741		*index_ret = start_index + pages_locked - 1;
1742	return err;
1743}
1744
1745void extent_clear_unlock_delalloc(struct inode *inode, u64 start, u64 end,
1746				 u64 delalloc_end, struct page *locked_page,
1747				 unsigned clear_bits,
1748				 unsigned long page_ops)
1749{
1750	clear_extent_bit(&BTRFS_I(inode)->io_tree, start, end, clear_bits, 1, 0,
1751			 NULL);
1752
1753	__process_pages_contig(inode->i_mapping, locked_page,
1754			       start >> PAGE_SHIFT, end >> PAGE_SHIFT,
1755			       page_ops, NULL);
1756}
1757
1758/*
1759 * count the number of bytes in the tree that have a given bit(s)
1760 * set.  This can be fairly slow, except for EXTENT_DIRTY which is
1761 * cached.  The total number found is returned.
1762 */
1763u64 count_range_bits(struct extent_io_tree *tree,
1764		     u64 *start, u64 search_end, u64 max_bytes,
1765		     unsigned bits, int contig)
1766{
1767	struct rb_node *node;
1768	struct extent_state *state;
1769	u64 cur_start = *start;
1770	u64 total_bytes = 0;
1771	u64 last = 0;
1772	int found = 0;
1773
1774	if (WARN_ON(search_end <= cur_start))
1775		return 0;
1776
1777	spin_lock(&tree->lock);
1778	if (cur_start == 0 && bits == EXTENT_DIRTY) {
1779		total_bytes = tree->dirty_bytes;
1780		goto out;
1781	}
1782	/*
1783	 * this search will find all the extents that end after
1784	 * our range starts.
1785	 */
1786	node = tree_search(tree, cur_start);
1787	if (!node)
1788		goto out;
1789
1790	while (1) {
1791		state = rb_entry(node, struct extent_state, rb_node);
1792		if (state->start > search_end)
1793			break;
1794		if (contig && found && state->start > last + 1)
1795			break;
1796		if (state->end >= cur_start && (state->state & bits) == bits) {
1797			total_bytes += min(search_end, state->end) + 1 -
1798				       max(cur_start, state->start);
1799			if (total_bytes >= max_bytes)
1800				break;
1801			if (!found) {
1802				*start = max(cur_start, state->start);
1803				found = 1;
1804			}
1805			last = state->end;
1806		} else if (contig && found) {
1807			break;
1808		}
1809		node = rb_next(node);
1810		if (!node)
1811			break;
1812	}
1813out:
1814	spin_unlock(&tree->lock);
1815	return total_bytes;
1816}
1817
1818/*
1819 * set the private field for a given byte offset in the tree.  If there isn't
1820 * an extent_state there already, this does nothing.
 
 
 
 
 
 
1821 */
1822static noinline int set_state_failrec(struct extent_io_tree *tree, u64 start,
1823		struct io_failure_record *failrec)
1824{
1825	struct rb_node *node;
1826	struct extent_state *state;
1827	int ret = 0;
 
 
1828
1829	spin_lock(&tree->lock);
1830	/*
1831	 * this search will find all the extents that end after
1832	 * our range starts.
1833	 */
1834	node = tree_search(tree, start);
1835	if (!node) {
1836		ret = -ENOENT;
1837		goto out;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1838	}
1839	state = rb_entry(node, struct extent_state, rb_node);
1840	if (state->start != start) {
1841		ret = -ENOENT;
1842		goto out;
1843	}
1844	state->failrec = failrec;
1845out:
1846	spin_unlock(&tree->lock);
1847	return ret;
1848}
1849
1850static noinline int get_state_failrec(struct extent_io_tree *tree, u64 start,
1851		struct io_failure_record **failrec)
1852{
1853	struct rb_node *node;
1854	struct extent_state *state;
1855	int ret = 0;
1856
1857	spin_lock(&tree->lock);
1858	/*
1859	 * this search will find all the extents that end after
1860	 * our range starts.
1861	 */
1862	node = tree_search(tree, start);
1863	if (!node) {
1864		ret = -ENOENT;
1865		goto out;
1866	}
1867	state = rb_entry(node, struct extent_state, rb_node);
1868	if (state->start != start) {
1869		ret = -ENOENT;
1870		goto out;
1871	}
1872	*failrec = state->failrec;
1873out:
1874	spin_unlock(&tree->lock);
1875	return ret;
1876}
1877
1878/*
1879 * searches a range in the state tree for a given mask.
1880 * If 'filled' == 1, this returns 1 only if every extent in the tree
1881 * has the bits set.  Otherwise, 1 is returned if any bit in the
1882 * range is found set.
 
 
 
 
 
 
1883 */
1884int test_range_bit(struct extent_io_tree *tree, u64 start, u64 end,
1885		   unsigned bits, int filled, struct extent_state *cached)
1886{
1887	struct extent_state *state = NULL;
1888	struct rb_node *node;
1889	int bitset = 0;
1890
1891	spin_lock(&tree->lock);
1892	if (cached && extent_state_in_tree(cached) && cached->start <= start &&
1893	    cached->end > start)
1894		node = &cached->rb_node;
1895	else
1896		node = tree_search(tree, start);
1897	while (node && start <= end) {
1898		state = rb_entry(node, struct extent_state, rb_node);
 
1899
1900		if (filled && state->start > start) {
1901			bitset = 0;
1902			break;
1903		}
 
 
1904
1905		if (state->start > end)
1906			break;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1907
1908		if (state->state & bits) {
1909			bitset = 1;
1910			if (!filled)
1911				break;
1912		} else if (filled) {
1913			bitset = 0;
1914			break;
1915		}
1916
1917		if (state->end == (u64)-1)
1918			break;
 
 
 
 
 
 
 
 
 
 
 
 
1919
1920		start = state->end + 1;
1921		if (start > end)
1922			break;
1923		node = rb_next(node);
1924		if (!node) {
1925			if (filled)
1926				bitset = 0;
1927			break;
1928		}
 
 
 
1929	}
1930	spin_unlock(&tree->lock);
1931	return bitset;
1932}
1933
1934/*
1935 * helper function to set a given page up to date if all the
1936 * extents in the tree for that page are up to date
 
 
 
 
 
 
 
1937 */
1938static void check_page_uptodate(struct extent_io_tree *tree, struct page *page)
1939{
1940	u64 start = page_offset(page);
1941	u64 end = start + PAGE_SIZE - 1;
1942	if (test_range_bit(tree, start, end, EXTENT_UPTODATE, 1, NULL))
1943		SetPageUptodate(page);
1944}
1945
1946int free_io_failure(struct extent_io_tree *failure_tree,
1947		    struct extent_io_tree *io_tree,
1948		    struct io_failure_record *rec)
1949{
1950	int ret;
1951	int err = 0;
1952
1953	set_state_failrec(failure_tree, rec->start, NULL);
1954	ret = clear_extent_bits(failure_tree, rec->start,
1955				rec->start + rec->len - 1,
1956				EXTENT_LOCKED | EXTENT_DIRTY);
1957	if (ret)
1958		err = ret;
1959
1960	ret = clear_extent_bits(io_tree, rec->start,
1961				rec->start + rec->len - 1,
1962				EXTENT_DAMAGED);
1963	if (ret && !err)
1964		err = ret;
1965
1966	kfree(rec);
1967	return err;
1968}
1969
1970/*
1971 * this bypasses the standard btrfs submit functions deliberately, as
1972 * the standard behavior is to write all copies in a raid setup. here we only
1973 * want to write the one bad copy. so we do the mapping for ourselves and issue
1974 * submit_bio directly.
1975 * to avoid any synchronization issues, wait for the data after writing, which
1976 * actually prevents the read that triggered the error from finishing.
1977 * currently, there can be no more than two copies of every data bit. thus,
1978 * exactly one rewrite is required.
1979 */
1980int repair_io_failure(struct btrfs_fs_info *fs_info, u64 ino, u64 start,
1981		      u64 length, u64 logical, struct page *page,
1982		      unsigned int pg_offset, int mirror_num)
1983{
1984	struct bio *bio;
1985	struct btrfs_device *dev;
1986	u64 map_length = 0;
1987	u64 sector;
1988	struct btrfs_bio *bbio = NULL;
1989	int ret;
1990
1991	ASSERT(!(fs_info->sb->s_flags & SB_RDONLY));
1992	BUG_ON(!mirror_num);
1993
1994	bio = btrfs_io_bio_alloc(1);
1995	bio->bi_iter.bi_size = 0;
1996	map_length = length;
1997
1998	/*
1999	 * Avoid races with device replace and make sure our bbio has devices
2000	 * associated to its stripes that don't go away while we are doing the
2001	 * read repair operation.
2002	 */
2003	btrfs_bio_counter_inc_blocked(fs_info);
2004	if (btrfs_is_parity_mirror(fs_info, logical, length)) {
2005		/*
2006		 * Note that we don't use BTRFS_MAP_WRITE because it's supposed
2007		 * to update all raid stripes, but here we just want to correct
2008		 * bad stripe, thus BTRFS_MAP_READ is abused to only get the bad
2009		 * stripe's dev and sector.
2010		 */
2011		ret = btrfs_map_block(fs_info, BTRFS_MAP_READ, logical,
2012				      &map_length, &bbio, 0);
2013		if (ret) {
2014			btrfs_bio_counter_dec(fs_info);
2015			bio_put(bio);
2016			return -EIO;
2017		}
2018		ASSERT(bbio->mirror_num == 1);
2019	} else {
2020		ret = btrfs_map_block(fs_info, BTRFS_MAP_WRITE, logical,
2021				      &map_length, &bbio, mirror_num);
2022		if (ret) {
2023			btrfs_bio_counter_dec(fs_info);
2024			bio_put(bio);
2025			return -EIO;
2026		}
2027		BUG_ON(mirror_num != bbio->mirror_num);
2028	}
2029
2030	sector = bbio->stripes[bbio->mirror_num - 1].physical >> 9;
2031	bio->bi_iter.bi_sector = sector;
2032	dev = bbio->stripes[bbio->mirror_num - 1].dev;
2033	btrfs_put_bbio(bbio);
2034	if (!dev || !dev->bdev ||
2035	    !test_bit(BTRFS_DEV_STATE_WRITEABLE, &dev->dev_state)) {
2036		btrfs_bio_counter_dec(fs_info);
2037		bio_put(bio);
2038		return -EIO;
2039	}
2040	bio_set_dev(bio, dev->bdev);
2041	bio->bi_opf = REQ_OP_WRITE | REQ_SYNC;
2042	bio_add_page(bio, page, length, pg_offset);
2043
2044	if (btrfsic_submit_bio_wait(bio)) {
2045		/* try to remap that extent elsewhere? */
2046		btrfs_bio_counter_dec(fs_info);
2047		bio_put(bio);
2048		btrfs_dev_stat_inc_and_print(dev, BTRFS_DEV_STAT_WRITE_ERRS);
2049		return -EIO;
2050	}
2051
2052	btrfs_info_rl_in_rcu(fs_info,
2053		"read error corrected: ino %llu off %llu (dev %s sector %llu)",
2054				  ino, start,
2055				  rcu_str_deref(dev->name), sector);
2056	btrfs_bio_counter_dec(fs_info);
2057	bio_put(bio);
2058	return 0;
2059}
2060
2061int repair_eb_io_failure(struct btrfs_fs_info *fs_info,
2062			 struct extent_buffer *eb, int mirror_num)
2063{
2064	u64 start = eb->start;
2065	unsigned long i, num_pages = num_extent_pages(eb->start, eb->len);
2066	int ret = 0;
2067
2068	if (sb_rdonly(fs_info->sb))
2069		return -EROFS;
2070
2071	for (i = 0; i < num_pages; i++) {
2072		struct page *p = eb->pages[i];
2073
2074		ret = repair_io_failure(fs_info, 0, start, PAGE_SIZE, start, p,
2075					start - page_offset(p), mirror_num);
2076		if (ret)
2077			break;
2078		start += PAGE_SIZE;
2079	}
2080
2081	return ret;
2082}
2083
2084/*
2085 * each time an IO finishes, we do a fast check in the IO failure tree
2086 * to see if we need to process or clean up an io_failure_record
2087 */
2088int clean_io_failure(struct btrfs_fs_info *fs_info,
2089		     struct extent_io_tree *failure_tree,
2090		     struct extent_io_tree *io_tree, u64 start,
2091		     struct page *page, u64 ino, unsigned int pg_offset)
2092{
2093	u64 private;
2094	struct io_failure_record *failrec;
2095	struct extent_state *state;
2096	int num_copies;
2097	int ret;
2098
2099	private = 0;
2100	ret = count_range_bits(failure_tree, &private, (u64)-1, 1,
2101			       EXTENT_DIRTY, 0);
2102	if (!ret)
2103		return 0;
2104
2105	ret = get_state_failrec(failure_tree, start, &failrec);
2106	if (ret)
2107		return 0;
2108
2109	BUG_ON(!failrec->this_mirror);
2110
2111	if (failrec->in_validation) {
2112		/* there was no real error, just free the record */
2113		btrfs_debug(fs_info,
2114			"clean_io_failure: freeing dummy error at %llu",
2115			failrec->start);
2116		goto out;
2117	}
2118	if (sb_rdonly(fs_info->sb))
2119		goto out;
2120
2121	spin_lock(&io_tree->lock);
2122	state = find_first_extent_bit_state(io_tree,
2123					    failrec->start,
2124					    EXTENT_LOCKED);
2125	spin_unlock(&io_tree->lock);
2126
2127	if (state && state->start <= failrec->start &&
2128	    state->end >= failrec->start + failrec->len - 1) {
2129		num_copies = btrfs_num_copies(fs_info, failrec->logical,
2130					      failrec->len);
2131		if (num_copies > 1)  {
2132			repair_io_failure(fs_info, ino, start, failrec->len,
2133					  failrec->logical, page, pg_offset,
2134					  failrec->failed_mirror);
2135		}
2136	}
2137
2138out:
2139	free_io_failure(failure_tree, io_tree, failrec);
2140
2141	return 0;
2142}
2143
2144/*
2145 * Can be called when
2146 * - hold extent lock
2147 * - under ordered extent
2148 * - the inode is freeing
2149 */
2150void btrfs_free_io_failure_record(struct btrfs_inode *inode, u64 start, u64 end)
2151{
2152	struct extent_io_tree *failure_tree = &inode->io_failure_tree;
2153	struct io_failure_record *failrec;
2154	struct extent_state *state, *next;
2155
2156	if (RB_EMPTY_ROOT(&failure_tree->state))
2157		return;
2158
2159	spin_lock(&failure_tree->lock);
2160	state = find_first_extent_bit_state(failure_tree, start, EXTENT_DIRTY);
2161	while (state) {
2162		if (state->start > end)
2163			break;
2164
2165		ASSERT(state->end <= end);
2166
2167		next = next_state(state);
2168
2169		failrec = state->failrec;
2170		free_extent_state(state);
2171		kfree(failrec);
2172
2173		state = next;
2174	}
2175	spin_unlock(&failure_tree->lock);
2176}
2177
2178int btrfs_get_io_failure_record(struct inode *inode, u64 start, u64 end,
2179		struct io_failure_record **failrec_ret)
2180{
2181	struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
2182	struct io_failure_record *failrec;
2183	struct extent_map *em;
2184	struct extent_io_tree *failure_tree = &BTRFS_I(inode)->io_failure_tree;
2185	struct extent_io_tree *tree = &BTRFS_I(inode)->io_tree;
2186	struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree;
2187	int ret;
2188	u64 logical;
2189
2190	ret = get_state_failrec(failure_tree, start, &failrec);
2191	if (ret) {
2192		failrec = kzalloc(sizeof(*failrec), GFP_NOFS);
2193		if (!failrec)
2194			return -ENOMEM;
2195
2196		failrec->start = start;
2197		failrec->len = end - start + 1;
2198		failrec->this_mirror = 0;
2199		failrec->bio_flags = 0;
2200		failrec->in_validation = 0;
2201
2202		read_lock(&em_tree->lock);
2203		em = lookup_extent_mapping(em_tree, start, failrec->len);
2204		if (!em) {
2205			read_unlock(&em_tree->lock);
2206			kfree(failrec);
2207			return -EIO;
2208		}
2209
2210		if (em->start > start || em->start + em->len <= start) {
2211			free_extent_map(em);
2212			em = NULL;
2213		}
2214		read_unlock(&em_tree->lock);
2215		if (!em) {
2216			kfree(failrec);
2217			return -EIO;
2218		}
2219
2220		logical = start - em->start;
2221		logical = em->block_start + logical;
2222		if (test_bit(EXTENT_FLAG_COMPRESSED, &em->flags)) {
2223			logical = em->block_start;
2224			failrec->bio_flags = EXTENT_BIO_COMPRESSED;
2225			extent_set_compress_type(&failrec->bio_flags,
2226						 em->compress_type);
2227		}
2228
2229		btrfs_debug(fs_info,
2230			"Get IO Failure Record: (new) logical=%llu, start=%llu, len=%llu",
2231			logical, start, failrec->len);
2232
2233		failrec->logical = logical;
2234		free_extent_map(em);
2235
2236		/* set the bits in the private failure tree */
2237		ret = set_extent_bits(failure_tree, start, end,
2238					EXTENT_LOCKED | EXTENT_DIRTY);
2239		if (ret >= 0)
2240			ret = set_state_failrec(failure_tree, start, failrec);
2241		/* set the bits in the inode's tree */
2242		if (ret >= 0)
2243			ret = set_extent_bits(tree, start, end, EXTENT_DAMAGED);
2244		if (ret < 0) {
2245			kfree(failrec);
2246			return ret;
2247		}
2248	} else {
2249		btrfs_debug(fs_info,
2250			"Get IO Failure Record: (found) logical=%llu, start=%llu, len=%llu, validation=%d",
2251			failrec->logical, failrec->start, failrec->len,
2252			failrec->in_validation);
2253		/*
2254		 * when data can be on disk more than twice, add to failrec here
2255		 * (e.g. with a list for failed_mirror) to make
2256		 * clean_io_failure() clean all those errors at once.
2257		 */
2258	}
2259
2260	*failrec_ret = failrec;
2261
 
 
 
 
2262	return 0;
2263}
2264
2265bool btrfs_check_repairable(struct inode *inode, unsigned failed_bio_pages,
2266			   struct io_failure_record *failrec, int failed_mirror)
 
2267{
2268	struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
2269	int num_copies;
 
 
2270
2271	num_copies = btrfs_num_copies(fs_info, failrec->logical, failrec->len);
2272	if (num_copies == 1) {
2273		/*
2274		 * we only have a single copy of the data, so don't bother with
2275		 * all the retry and error correction code that follows. no
2276		 * matter what the error is, it is very likely to persist.
2277		 */
2278		btrfs_debug(fs_info,
2279			"Check Repairable: cannot repair, num_copies=%d, next_mirror %d, failed_mirror %d",
2280			num_copies, failrec->this_mirror, failed_mirror);
2281		return false;
2282	}
2283
2284	/*
2285	 * there are two premises:
2286	 *	a) deliver good data to the caller
2287	 *	b) correct the bad sectors on disk
 
 
 
 
 
 
2288	 */
2289	if (failed_bio_pages > 1) {
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2290		/*
2291		 * to fulfill b), we need to know the exact failing sectors, as
2292		 * we don't want to rewrite any more than the failed ones. thus,
2293		 * we need separate read requests for the failed bio
2294		 *
2295		 * if the following BUG_ON triggers, our validation request got
2296		 * merged. we need separate requests for our algorithm to work.
2297		 */
2298		BUG_ON(failrec->in_validation);
2299		failrec->in_validation = 1;
2300		failrec->this_mirror = failed_mirror;
2301	} else {
2302		/*
2303		 * we're ready to fulfill a) and b) alongside. get a good copy
2304		 * of the failed sector and if we succeed, we have setup
2305		 * everything for repair_io_failure to do the rest for us.
2306		 */
2307		if (failrec->in_validation) {
2308			BUG_ON(failrec->this_mirror != failed_mirror);
2309			failrec->in_validation = 0;
2310			failrec->this_mirror = 0;
2311		}
2312		failrec->failed_mirror = failed_mirror;
2313		failrec->this_mirror++;
2314		if (failrec->this_mirror == failed_mirror)
2315			failrec->this_mirror++;
2316	}
2317
2318	if (failrec->this_mirror > num_copies) {
2319		btrfs_debug(fs_info,
2320			"Check Repairable: (fail) num_copies=%d, next_mirror %d, failed_mirror %d",
2321			num_copies, failrec->this_mirror, failed_mirror);
2322		return false;
2323	}
2324
2325	return true;
2326}
2327
2328
2329struct bio *btrfs_create_repair_bio(struct inode *inode, struct bio *failed_bio,
2330				    struct io_failure_record *failrec,
2331				    struct page *page, int pg_offset, int icsum,
2332				    bio_end_io_t *endio_func, void *data)
2333{
2334	struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
2335	struct bio *bio;
2336	struct btrfs_io_bio *btrfs_failed_bio;
2337	struct btrfs_io_bio *btrfs_bio;
2338
2339	bio = btrfs_io_bio_alloc(1);
2340	bio->bi_end_io = endio_func;
2341	bio->bi_iter.bi_sector = failrec->logical >> 9;
2342	bio_set_dev(bio, fs_info->fs_devices->latest_bdev);
2343	bio->bi_iter.bi_size = 0;
2344	bio->bi_private = data;
2345
2346	btrfs_failed_bio = btrfs_io_bio(failed_bio);
2347	if (btrfs_failed_bio->csum) {
2348		u16 csum_size = btrfs_super_csum_size(fs_info->super_copy);
2349
2350		btrfs_bio = btrfs_io_bio(bio);
2351		btrfs_bio->csum = btrfs_bio->csum_inline;
2352		icsum *= csum_size;
2353		memcpy(btrfs_bio->csum, btrfs_failed_bio->csum + icsum,
2354		       csum_size);
2355	}
2356
2357	bio_add_page(bio, page, failrec->len, pg_offset);
2358
2359	return bio;
2360}
2361
2362/*
2363 * this is a generic handler for readpage errors (default
2364 * readpage_io_failed_hook). if other copies exist, read those and write back
2365 * good data to the failed position. does not investigate in remapping the
2366 * failed extent elsewhere, hoping the device will be smart enough to do this as
2367 * needed
2368 */
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2369
2370static int bio_readpage_error(struct bio *failed_bio, u64 phy_offset,
2371			      struct page *page, u64 start, u64 end,
2372			      int failed_mirror)
2373{
2374	struct io_failure_record *failrec;
2375	struct inode *inode = page->mapping->host;
2376	struct extent_io_tree *tree = &BTRFS_I(inode)->io_tree;
2377	struct extent_io_tree *failure_tree = &BTRFS_I(inode)->io_failure_tree;
2378	struct bio *bio;
2379	int read_mode = 0;
2380	blk_status_t status;
2381	int ret;
2382	unsigned failed_bio_pages = bio_pages_all(failed_bio);
2383
2384	BUG_ON(bio_op(failed_bio) == REQ_OP_WRITE);
 
 
 
 
2385
2386	ret = btrfs_get_io_failure_record(inode, start, end, &failrec);
2387	if (ret)
2388		return ret;
 
 
 
2389
2390	if (!btrfs_check_repairable(inode, failed_bio_pages, failrec,
2391				    failed_mirror)) {
2392		free_io_failure(failure_tree, tree, failrec);
2393		return -EIO;
2394	}
2395
2396	if (failed_bio_pages > 1)
2397		read_mode |= REQ_FAILFAST_DEV;
 
 
 
 
 
2398
2399	phy_offset >>= inode->i_sb->s_blocksize_bits;
2400	bio = btrfs_create_repair_bio(inode, failed_bio, failrec, page,
2401				      start - page_offset(page),
2402				      (int)phy_offset, failed_bio->bi_end_io,
2403				      NULL);
2404	bio_set_op_attrs(bio, REQ_OP_READ, read_mode);
2405
2406	btrfs_debug(btrfs_sb(inode->i_sb),
2407		"Repair Read Error: submitting new read[%#x] to this_mirror=%d, in_validation=%d",
2408		read_mode, failrec->this_mirror, failrec->in_validation);
2409
2410	status = tree->ops->submit_bio_hook(tree->private_data, bio, failrec->this_mirror,
2411					 failrec->bio_flags, 0);
2412	if (status) {
2413		free_io_failure(failure_tree, tree, failrec);
2414		bio_put(bio);
2415		ret = blk_status_to_errno(status);
2416	}
 
 
 
 
 
2417
2418	return ret;
 
 
 
2419}
2420
2421/* lots and lots of room for performance fixes in the end_bio funcs */
2422
2423void end_extent_writepage(struct page *page, int err, u64 start, u64 end)
2424{
2425	int uptodate = (err == 0);
2426	struct extent_io_tree *tree;
2427	int ret = 0;
2428
2429	tree = &BTRFS_I(page->mapping->host)->io_tree;
2430
2431	if (tree->ops && tree->ops->writepage_end_io_hook)
2432		tree->ops->writepage_end_io_hook(page, start, end, NULL,
2433				uptodate);
2434
2435	if (!uptodate) {
2436		ClearPageUptodate(page);
2437		SetPageError(page);
2438		ret = err < 0 ? err : -EIO;
2439		mapping_set_error(page->mapping, ret);
 
 
 
 
2440	}
2441}
2442
2443/*
2444 * after a writepage IO is done, we need to:
2445 * clear the uptodate bits on error
2446 * clear the writeback bits in the extent tree for this IO
2447 * end_page_writeback if the page has no more pending IO
2448 *
2449 * Scheduling is not allowed, so the extent state tree is expected
2450 * to have one and only one object corresponding to this IO.
2451 */
2452static void end_bio_extent_writepage(struct bio *bio)
2453{
2454	int error = blk_status_to_errno(bio->bi_status);
2455	struct bio_vec *bvec;
2456	u64 start;
2457	u64 end;
2458	int i;
2459
2460	ASSERT(!bio_flagged(bio, BIO_CLONED));
2461	bio_for_each_segment_all(bvec, bio, i) {
2462		struct page *page = bvec->bv_page;
2463		struct inode *inode = page->mapping->host;
2464		struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
2465
2466		/* We always issue full-page reads, but if some block
2467		 * in a page fails to read, blk_update_request() will
2468		 * advance bv_offset and adjust bv_len to compensate.
2469		 * Print a warning for nonzero offsets, and an error
2470		 * if they don't add up to a full page.  */
2471		if (bvec->bv_offset || bvec->bv_len != PAGE_SIZE) {
2472			if (bvec->bv_offset + bvec->bv_len != PAGE_SIZE)
2473				btrfs_err(fs_info,
2474				   "partial page write in btrfs with offset %u and length %u",
2475					bvec->bv_offset, bvec->bv_len);
2476			else
2477				btrfs_info(fs_info,
2478				   "incomplete page write in btrfs with offset %u and length %u",
2479					bvec->bv_offset, bvec->bv_len);
2480		}
2481
2482		start = page_offset(page);
2483		end = start + bvec->bv_offset + bvec->bv_len - 1;
2484
2485		end_extent_writepage(page, error, start, end);
2486		end_page_writeback(page);
2487	}
2488
2489	bio_put(bio);
2490}
2491
2492static void
2493endio_readpage_release_extent(struct extent_io_tree *tree, u64 start, u64 len,
2494			      int uptodate)
2495{
2496	struct extent_state *cached = NULL;
2497	u64 end = start + len - 1;
2498
2499	if (uptodate && tree->track_uptodate)
2500		set_extent_uptodate(tree, start, end, &cached, GFP_ATOMIC);
2501	unlock_extent_cached_atomic(tree, start, end, &cached);
2502}
2503
2504/*
2505 * after a readpage IO is done, we need to:
2506 * clear the uptodate bits on error
2507 * set the uptodate bits if things worked
2508 * set the page up to date if all extents in the tree are uptodate
2509 * clear the lock bit in the extent tree
2510 * unlock the page if there are no other extents locked for it
2511 *
2512 * Scheduling is not allowed, so the extent state tree is expected
2513 * to have one and only one object corresponding to this IO.
2514 */
2515static void end_bio_extent_readpage(struct bio *bio)
2516{
2517	struct bio_vec *bvec;
2518	int uptodate = !bio->bi_status;
2519	struct btrfs_io_bio *io_bio = btrfs_io_bio(bio);
2520	struct extent_io_tree *tree, *failure_tree;
2521	u64 offset = 0;
2522	u64 start;
2523	u64 end;
2524	u64 len;
2525	u64 extent_start = 0;
2526	u64 extent_len = 0;
2527	int mirror;
2528	int ret;
2529	int i;
2530
2531	ASSERT(!bio_flagged(bio, BIO_CLONED));
2532	bio_for_each_segment_all(bvec, bio, i) {
2533		struct page *page = bvec->bv_page;
2534		struct inode *inode = page->mapping->host;
2535		struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
2536
2537		btrfs_debug(fs_info,
2538			"end_bio_extent_readpage: bi_sector=%llu, err=%d, mirror=%u",
2539			(u64)bio->bi_iter.bi_sector, bio->bi_status,
2540			io_bio->mirror_num);
2541		tree = &BTRFS_I(inode)->io_tree;
2542		failure_tree = &BTRFS_I(inode)->io_failure_tree;
2543
2544		/* We always issue full-page reads, but if some block
2545		 * in a page fails to read, blk_update_request() will
2546		 * advance bv_offset and adjust bv_len to compensate.
2547		 * Print a warning for nonzero offsets, and an error
2548		 * if they don't add up to a full page.  */
2549		if (bvec->bv_offset || bvec->bv_len != PAGE_SIZE) {
2550			if (bvec->bv_offset + bvec->bv_len != PAGE_SIZE)
2551				btrfs_err(fs_info,
2552					"partial page read in btrfs with offset %u and length %u",
2553					bvec->bv_offset, bvec->bv_len);
2554			else
2555				btrfs_info(fs_info,
2556					"incomplete page read in btrfs with offset %u and length %u",
2557					bvec->bv_offset, bvec->bv_len);
2558		}
2559
2560		start = page_offset(page);
2561		end = start + bvec->bv_offset + bvec->bv_len - 1;
2562		len = bvec->bv_len;
2563
2564		mirror = io_bio->mirror_num;
2565		if (likely(uptodate && tree->ops)) {
2566			ret = tree->ops->readpage_end_io_hook(io_bio, offset,
2567							      page, start, end,
2568							      mirror);
2569			if (ret)
2570				uptodate = 0;
2571			else
2572				clean_io_failure(BTRFS_I(inode)->root->fs_info,
2573						 failure_tree, tree, start,
2574						 page,
2575						 btrfs_ino(BTRFS_I(inode)), 0);
2576		}
2577
2578		if (likely(uptodate))
2579			goto readpage_ok;
2580
2581		if (tree->ops) {
2582			ret = tree->ops->readpage_io_failed_hook(page, mirror);
2583			if (ret == -EAGAIN) {
2584				/*
2585				 * Data inode's readpage_io_failed_hook() always
2586				 * returns -EAGAIN.
2587				 *
2588				 * The generic bio_readpage_error handles errors
2589				 * the following way: If possible, new read
2590				 * requests are created and submitted and will
2591				 * end up in end_bio_extent_readpage as well (if
2592				 * we're lucky, not in the !uptodate case). In
2593				 * that case it returns 0 and we just go on with
2594				 * the next page in our bio. If it can't handle
2595				 * the error it will return -EIO and we remain
2596				 * responsible for that page.
2597				 */
2598				ret = bio_readpage_error(bio, offset, page,
2599							 start, end, mirror);
2600				if (ret == 0) {
2601					uptodate = !bio->bi_status;
2602					offset += len;
2603					continue;
2604				}
2605			}
2606
2607			/*
2608			 * metadata's readpage_io_failed_hook() always returns
2609			 * -EIO and fixes nothing.  -EIO is also returned if
2610			 * data inode error could not be fixed.
2611			 */
2612			ASSERT(ret == -EIO);
2613		}
2614readpage_ok:
2615		if (likely(uptodate)) {
2616			loff_t i_size = i_size_read(inode);
2617			pgoff_t end_index = i_size >> PAGE_SHIFT;
2618			unsigned off;
2619
2620			/* Zero out the end if this page straddles i_size */
2621			off = i_size & (PAGE_SIZE-1);
2622			if (page->index == end_index && off)
2623				zero_user_segment(page, off, PAGE_SIZE);
2624			SetPageUptodate(page);
2625		} else {
2626			ClearPageUptodate(page);
2627			SetPageError(page);
2628		}
2629		unlock_page(page);
2630		offset += len;
2631
2632		if (unlikely(!uptodate)) {
2633			if (extent_len) {
2634				endio_readpage_release_extent(tree,
2635							      extent_start,
2636							      extent_len, 1);
2637				extent_start = 0;
2638				extent_len = 0;
2639			}
2640			endio_readpage_release_extent(tree, start,
2641						      end - start + 1, 0);
2642		} else if (!extent_len) {
2643			extent_start = start;
2644			extent_len = end + 1 - start;
2645		} else if (extent_start + extent_len == start) {
2646			extent_len += end + 1 - start;
2647		} else {
2648			endio_readpage_release_extent(tree, extent_start,
2649						      extent_len, uptodate);
2650			extent_start = start;
2651			extent_len = end + 1 - start;
2652		}
2653	}
2654
2655	if (extent_len)
2656		endio_readpage_release_extent(tree, extent_start, extent_len,
2657					      uptodate);
2658	if (io_bio->end_io)
2659		io_bio->end_io(io_bio, blk_status_to_errno(bio->bi_status));
2660	bio_put(bio);
2661}
2662
2663/*
2664 * Initialize the members up to but not including 'bio'. Use after allocating a
2665 * new bio by bio_alloc_bioset as it does not initialize the bytes outside of
2666 * 'bio' because use of __GFP_ZERO is not supported.
2667 */
2668static inline void btrfs_io_bio_init(struct btrfs_io_bio *btrfs_bio)
2669{
2670	memset(btrfs_bio, 0, offsetof(struct btrfs_io_bio, bio));
2671}
2672
2673/*
2674 * The following helpers allocate a bio. As it's backed by a bioset, it'll
2675 * never fail.  We're returning a bio right now but you can call btrfs_io_bio
2676 * for the appropriate container_of magic
2677 */
2678struct bio *btrfs_bio_alloc(struct block_device *bdev, u64 first_byte)
2679{
2680	struct bio *bio;
2681
2682	bio = bio_alloc_bioset(GFP_NOFS, BIO_MAX_PAGES, btrfs_bioset);
2683	bio_set_dev(bio, bdev);
2684	bio->bi_iter.bi_sector = first_byte >> 9;
2685	btrfs_io_bio_init(btrfs_io_bio(bio));
2686	return bio;
2687}
2688
2689struct bio *btrfs_bio_clone(struct bio *bio)
2690{
2691	struct btrfs_io_bio *btrfs_bio;
2692	struct bio *new;
2693
2694	/* Bio allocation backed by a bioset does not fail */
2695	new = bio_clone_fast(bio, GFP_NOFS, btrfs_bioset);
2696	btrfs_bio = btrfs_io_bio(new);
2697	btrfs_io_bio_init(btrfs_bio);
2698	btrfs_bio->iter = bio->bi_iter;
2699	return new;
2700}
2701
2702struct bio *btrfs_io_bio_alloc(unsigned int nr_iovecs)
2703{
2704	struct bio *bio;
2705
2706	/* Bio allocation backed by a bioset does not fail */
2707	bio = bio_alloc_bioset(GFP_NOFS, nr_iovecs, btrfs_bioset);
2708	btrfs_io_bio_init(btrfs_io_bio(bio));
2709	return bio;
2710}
2711
2712struct bio *btrfs_bio_clone_partial(struct bio *orig, int offset, int size)
2713{
2714	struct bio *bio;
2715	struct btrfs_io_bio *btrfs_bio;
2716
2717	/* this will never fail when it's backed by a bioset */
2718	bio = bio_clone_fast(orig, GFP_NOFS, btrfs_bioset);
2719	ASSERT(bio);
2720
2721	btrfs_bio = btrfs_io_bio(bio);
2722	btrfs_io_bio_init(btrfs_bio);
2723
2724	bio_trim(bio, offset >> 9, size >> 9);
2725	btrfs_bio->iter = bio->bi_iter;
2726	return bio;
2727}
2728
2729static int __must_check submit_one_bio(struct bio *bio, int mirror_num,
2730				       unsigned long bio_flags)
2731{
2732	blk_status_t ret = 0;
2733	struct bio_vec *bvec = bio_last_bvec_all(bio);
2734	struct page *page = bvec->bv_page;
2735	struct extent_io_tree *tree = bio->bi_private;
2736	u64 start;
2737
2738	start = page_offset(page) + bvec->bv_offset;
2739
2740	bio->bi_private = NULL;
2741
2742	if (tree->ops)
2743		ret = tree->ops->submit_bio_hook(tree->private_data, bio,
2744					   mirror_num, bio_flags, start);
2745	else
2746		btrfsic_submit_bio(bio);
2747
2748	return blk_status_to_errno(ret);
2749}
2750
2751/*
2752 * @opf:	bio REQ_OP_* and REQ_* flags as one value
2753 * @tree:	tree so we can call our merge_bio hook
2754 * @wbc:	optional writeback control for io accounting
2755 * @page:	page to add to the bio
2756 * @pg_offset:	offset of the new bio or to check whether we are adding
2757 *              a contiguous page to the previous one
2758 * @size:	portion of page that we want to write
2759 * @offset:	starting offset in the page
2760 * @bdev:	attach newly created bios to this bdev
2761 * @bio_ret:	must be valid pointer, newly allocated bio will be stored there
2762 * @end_io_func:     end_io callback for new bio
2763 * @mirror_num:	     desired mirror to read/write
2764 * @prev_bio_flags:  flags of previous bio to see if we can merge the current one
2765 * @bio_flags:	flags of the current bio to see if we can merge them
2766 */
2767static int submit_extent_page(unsigned int opf, struct extent_io_tree *tree,
2768			      struct writeback_control *wbc,
2769			      struct page *page, u64 offset,
2770			      size_t size, unsigned long pg_offset,
2771			      struct block_device *bdev,
2772			      struct bio **bio_ret,
2773			      bio_end_io_t end_io_func,
2774			      int mirror_num,
2775			      unsigned long prev_bio_flags,
2776			      unsigned long bio_flags,
2777			      bool force_bio_submit)
2778{
2779	int ret = 0;
2780	struct bio *bio;
2781	size_t page_size = min_t(size_t, size, PAGE_SIZE);
2782	sector_t sector = offset >> 9;
2783
2784	ASSERT(bio_ret);
2785
2786	if (*bio_ret) {
2787		bool contig;
2788		bool can_merge = true;
2789
2790		bio = *bio_ret;
2791		if (prev_bio_flags & EXTENT_BIO_COMPRESSED)
2792			contig = bio->bi_iter.bi_sector == sector;
2793		else
2794			contig = bio_end_sector(bio) == sector;
2795
2796		if (tree->ops && tree->ops->merge_bio_hook(page, offset,
2797					page_size, bio, bio_flags))
2798			can_merge = false;
2799
2800		if (prev_bio_flags != bio_flags || !contig || !can_merge ||
2801		    force_bio_submit ||
2802		    bio_add_page(bio, page, page_size, pg_offset) < page_size) {
2803			ret = submit_one_bio(bio, mirror_num, prev_bio_flags);
2804			if (ret < 0) {
2805				*bio_ret = NULL;
2806				return ret;
2807			}
2808			bio = NULL;
2809		} else {
2810			if (wbc)
2811				wbc_account_io(wbc, page, page_size);
2812			return 0;
2813		}
2814	}
2815
2816	bio = btrfs_bio_alloc(bdev, offset);
2817	bio_add_page(bio, page, page_size, pg_offset);
2818	bio->bi_end_io = end_io_func;
2819	bio->bi_private = tree;
2820	bio->bi_write_hint = page->mapping->host->i_write_hint;
2821	bio->bi_opf = opf;
2822	if (wbc) {
2823		wbc_init_bio(wbc, bio);
2824		wbc_account_io(wbc, page, page_size);
2825	}
2826
2827	*bio_ret = bio;
2828
2829	return ret;
2830}
2831
2832static void attach_extent_buffer_page(struct extent_buffer *eb,
2833				      struct page *page)
2834{
2835	if (!PagePrivate(page)) {
2836		SetPagePrivate(page);
2837		get_page(page);
2838		set_page_private(page, (unsigned long)eb);
2839	} else {
2840		WARN_ON(page->private != (unsigned long)eb);
2841	}
2842}
2843
2844void set_page_extent_mapped(struct page *page)
2845{
2846	if (!PagePrivate(page)) {
2847		SetPagePrivate(page);
2848		get_page(page);
2849		set_page_private(page, EXTENT_PAGE_PRIVATE);
2850	}
2851}
2852
2853static struct extent_map *
2854__get_extent_map(struct inode *inode, struct page *page, size_t pg_offset,
2855		 u64 start, u64 len, get_extent_t *get_extent,
2856		 struct extent_map **em_cached)
2857{
2858	struct extent_map *em;
2859
2860	if (em_cached && *em_cached) {
2861		em = *em_cached;
2862		if (extent_map_in_tree(em) && start >= em->start &&
2863		    start < extent_map_end(em)) {
2864			refcount_inc(&em->refs);
2865			return em;
2866		}
2867
2868		free_extent_map(em);
2869		*em_cached = NULL;
2870	}
2871
2872	em = get_extent(BTRFS_I(inode), page, pg_offset, start, len, 0);
2873	if (em_cached && !IS_ERR_OR_NULL(em)) {
2874		BUG_ON(*em_cached);
2875		refcount_inc(&em->refs);
2876		*em_cached = em;
2877	}
 
2878	return em;
2879}
2880/*
2881 * basic readpage implementation.  Locked extent state structs are inserted
2882 * into the tree that are removed when the IO is done (by the end_io
2883 * handlers)
2884 * XXX JDM: This needs looking at to ensure proper page locking
2885 * return 0 on success, otherwise return error
2886 */
2887static int __do_readpage(struct extent_io_tree *tree,
2888			 struct page *page,
2889			 get_extent_t *get_extent,
2890			 struct extent_map **em_cached,
2891			 struct bio **bio, int mirror_num,
2892			 unsigned long *bio_flags, unsigned int read_flags,
2893			 u64 *prev_em_start)
2894{
2895	struct inode *inode = page->mapping->host;
2896	u64 start = page_offset(page);
 
2897	const u64 end = start + PAGE_SIZE - 1;
2898	u64 cur = start;
2899	u64 extent_offset;
2900	u64 last_byte = i_size_read(inode);
2901	u64 block_start;
2902	u64 cur_end;
2903	struct extent_map *em;
2904	struct block_device *bdev;
2905	int ret = 0;
2906	int nr = 0;
2907	size_t pg_offset = 0;
2908	size_t iosize;
2909	size_t disk_io_size;
2910	size_t blocksize = inode->i_sb->s_blocksize;
2911	unsigned long this_bio_flag = 0;
2912
2913	set_page_extent_mapped(page);
2914
2915	if (!PageUptodate(page)) {
2916		if (cleancache_get_page(page) == 0) {
2917			BUG_ON(blocksize != PAGE_SIZE);
2918			unlock_extent(tree, start, end);
2919			goto out;
2920		}
2921	}
2922
2923	if (page->index == last_byte >> PAGE_SHIFT) {
2924		char *userpage;
2925		size_t zero_offset = last_byte & (PAGE_SIZE - 1);
2926
2927		if (zero_offset) {
2928			iosize = PAGE_SIZE - zero_offset;
2929			userpage = kmap_atomic(page);
2930			memset(userpage + zero_offset, 0, iosize);
2931			flush_dcache_page(page);
2932			kunmap_atomic(userpage);
2933		}
2934	}
 
 
2935	while (cur <= end) {
 
2936		bool force_bio_submit = false;
2937		u64 offset;
2938
 
2939		if (cur >= last_byte) {
2940			char *userpage;
2941			struct extent_state *cached = NULL;
2942
2943			iosize = PAGE_SIZE - pg_offset;
2944			userpage = kmap_atomic(page);
2945			memset(userpage + pg_offset, 0, iosize);
2946			flush_dcache_page(page);
2947			kunmap_atomic(userpage);
2948			set_extent_uptodate(tree, cur, cur + iosize - 1,
2949					    &cached, GFP_NOFS);
2950			unlock_extent_cached(tree, cur,
2951					     cur + iosize - 1, &cached);
2952			break;
2953		}
2954		em = __get_extent_map(inode, page, pg_offset, cur,
2955				      end - cur + 1, get_extent, em_cached);
2956		if (IS_ERR_OR_NULL(em)) {
2957			SetPageError(page);
2958			unlock_extent(tree, cur, end);
2959			break;
2960		}
2961		extent_offset = cur - em->start;
2962		BUG_ON(extent_map_end(em) <= cur);
2963		BUG_ON(end < cur);
2964
2965		if (test_bit(EXTENT_FLAG_COMPRESSED, &em->flags)) {
2966			this_bio_flag |= EXTENT_BIO_COMPRESSED;
2967			extent_set_compress_type(&this_bio_flag,
2968						 em->compress_type);
2969		}
2970
2971		iosize = min(extent_map_end(em) - cur, end - cur + 1);
2972		cur_end = min(extent_map_end(em) - 1, end);
2973		iosize = ALIGN(iosize, blocksize);
2974		if (this_bio_flag & EXTENT_BIO_COMPRESSED) {
2975			disk_io_size = em->block_len;
2976			offset = em->block_start;
2977		} else {
2978			offset = em->block_start + extent_offset;
2979			disk_io_size = iosize;
2980		}
2981		bdev = em->bdev;
2982		block_start = em->block_start;
2983		if (test_bit(EXTENT_FLAG_PREALLOC, &em->flags))
2984			block_start = EXTENT_MAP_HOLE;
2985
2986		/*
2987		 * If we have a file range that points to a compressed extent
2988		 * and it's followed by a consecutive file range that points to
2989		 * to the same compressed extent (possibly with a different
2990		 * offset and/or length, so it either points to the whole extent
2991		 * or only part of it), we must make sure we do not submit a
2992		 * single bio to populate the pages for the 2 ranges because
2993		 * this makes the compressed extent read zero out the pages
2994		 * belonging to the 2nd range. Imagine the following scenario:
2995		 *
2996		 *  File layout
2997		 *  [0 - 8K]                     [8K - 24K]
2998		 *    |                               |
2999		 *    |                               |
3000		 * points to extent X,         points to extent X,
3001		 * offset 4K, length of 8K     offset 0, length 16K
3002		 *
3003		 * [extent X, compressed length = 4K uncompressed length = 16K]
3004		 *
3005		 * If the bio to read the compressed extent covers both ranges,
3006		 * it will decompress extent X into the pages belonging to the
3007		 * first range and then it will stop, zeroing out the remaining
3008		 * pages that belong to the other range that points to extent X.
3009		 * So here we make sure we submit 2 bios, one for the first
3010		 * range and another one for the third range. Both will target
3011		 * the same physical extent from disk, but we can't currently
3012		 * make the compressed bio endio callback populate the pages
3013		 * for both ranges because each compressed bio is tightly
3014		 * coupled with a single extent map, and each range can have
3015		 * an extent map with a different offset value relative to the
3016		 * uncompressed data of our extent and different lengths. This
3017		 * is a corner case so we prioritize correctness over
3018		 * non-optimal behavior (submitting 2 bios for the same extent).
3019		 */
3020		if (test_bit(EXTENT_FLAG_COMPRESSED, &em->flags) &&
3021		    prev_em_start && *prev_em_start != (u64)-1 &&
3022		    *prev_em_start != em->orig_start)
3023			force_bio_submit = true;
3024
3025		if (prev_em_start)
3026			*prev_em_start = em->orig_start;
3027
3028		free_extent_map(em);
3029		em = NULL;
3030
3031		/* we've found a hole, just zero and go on */
3032		if (block_start == EXTENT_MAP_HOLE) {
3033			char *userpage;
3034			struct extent_state *cached = NULL;
3035
3036			userpage = kmap_atomic(page);
3037			memset(userpage + pg_offset, 0, iosize);
3038			flush_dcache_page(page);
3039			kunmap_atomic(userpage);
3040
3041			set_extent_uptodate(tree, cur, cur + iosize - 1,
3042					    &cached, GFP_NOFS);
3043			unlock_extent_cached(tree, cur,
3044					     cur + iosize - 1, &cached);
3045			cur = cur + iosize;
3046			pg_offset += iosize;
3047			continue;
3048		}
3049		/* the get_extent function already copied into the page */
3050		if (test_range_bit(tree, cur, cur_end,
3051				   EXTENT_UPTODATE, 1, NULL)) {
3052			check_page_uptodate(tree, page);
3053			unlock_extent(tree, cur, cur + iosize - 1);
3054			cur = cur + iosize;
3055			pg_offset += iosize;
3056			continue;
3057		}
3058		/* we have an inline extent but it didn't get marked up
3059		 * to date.  Error out
3060		 */
3061		if (block_start == EXTENT_MAP_INLINE) {
3062			SetPageError(page);
3063			unlock_extent(tree, cur, cur + iosize - 1);
3064			cur = cur + iosize;
3065			pg_offset += iosize;
3066			continue;
3067		}
3068
3069		ret = submit_extent_page(REQ_OP_READ | read_flags, tree, NULL,
3070					 page, offset, disk_io_size,
3071					 pg_offset, bdev, bio,
3072					 end_bio_extent_readpage, mirror_num,
3073					 *bio_flags,
3074					 this_bio_flag,
3075					 force_bio_submit);
3076		if (!ret) {
3077			nr++;
3078			*bio_flags = this_bio_flag;
3079		} else {
3080			SetPageError(page);
3081			unlock_extent(tree, cur, cur + iosize - 1);
3082			goto out;
3083		}
 
 
 
 
 
3084		cur = cur + iosize;
3085		pg_offset += iosize;
3086	}
3087out:
3088	if (!nr) {
3089		if (!PageError(page))
3090			SetPageUptodate(page);
3091		unlock_page(page);
3092	}
3093	return ret;
3094}
3095
3096static inline void __do_contiguous_readpages(struct extent_io_tree *tree,
3097					     struct page *pages[], int nr_pages,
3098					     u64 start, u64 end,
3099					     struct extent_map **em_cached,
3100					     struct bio **bio,
3101					     unsigned long *bio_flags,
3102					     u64 *prev_em_start)
3103{
3104	struct inode *inode;
3105	struct btrfs_ordered_extent *ordered;
3106	int index;
3107
3108	inode = pages[0]->mapping->host;
3109	while (1) {
3110		lock_extent(tree, start, end);
3111		ordered = btrfs_lookup_ordered_range(BTRFS_I(inode), start,
3112						     end - start + 1);
3113		if (!ordered)
3114			break;
3115		unlock_extent(tree, start, end);
3116		btrfs_start_ordered_extent(inode, ordered, 1);
3117		btrfs_put_ordered_extent(ordered);
3118	}
3119
3120	for (index = 0; index < nr_pages; index++) {
3121		__do_readpage(tree, pages[index], btrfs_get_extent, em_cached,
3122				bio, 0, bio_flags, 0, prev_em_start);
3123		put_page(pages[index]);
3124	}
3125}
3126
3127static void __extent_readpages(struct extent_io_tree *tree,
3128			       struct page *pages[],
3129			       int nr_pages,
3130			       struct extent_map **em_cached,
3131			       struct bio **bio, unsigned long *bio_flags,
3132			       u64 *prev_em_start)
3133{
3134	u64 start = 0;
3135	u64 end = 0;
3136	u64 page_start;
3137	int index;
3138	int first_index = 0;
3139
3140	for (index = 0; index < nr_pages; index++) {
3141		page_start = page_offset(pages[index]);
3142		if (!end) {
3143			start = page_start;
3144			end = start + PAGE_SIZE - 1;
3145			first_index = index;
3146		} else if (end + 1 == page_start) {
3147			end += PAGE_SIZE;
3148		} else {
3149			__do_contiguous_readpages(tree, &pages[first_index],
3150						  index - first_index, start,
3151						  end, em_cached,
3152						  bio, bio_flags,
3153						  prev_em_start);
3154			start = page_start;
3155			end = start + PAGE_SIZE - 1;
3156			first_index = index;
3157		}
3158	}
3159
3160	if (end)
3161		__do_contiguous_readpages(tree, &pages[first_index],
3162					  index - first_index, start,
3163					  end, em_cached, bio,
3164					  bio_flags, prev_em_start);
3165}
3166
3167static int __extent_read_full_page(struct extent_io_tree *tree,
3168				   struct page *page,
3169				   get_extent_t *get_extent,
3170				   struct bio **bio, int mirror_num,
3171				   unsigned long *bio_flags,
3172				   unsigned int read_flags)
3173{
3174	struct inode *inode = page->mapping->host;
3175	struct btrfs_ordered_extent *ordered;
3176	u64 start = page_offset(page);
3177	u64 end = start + PAGE_SIZE - 1;
3178	int ret;
3179
3180	while (1) {
3181		lock_extent(tree, start, end);
3182		ordered = btrfs_lookup_ordered_range(BTRFS_I(inode), start,
3183						PAGE_SIZE);
3184		if (!ordered)
3185			break;
3186		unlock_extent(tree, start, end);
3187		btrfs_start_ordered_extent(inode, ordered, 1);
3188		btrfs_put_ordered_extent(ordered);
3189	}
3190
3191	ret = __do_readpage(tree, page, get_extent, NULL, bio, mirror_num,
3192			    bio_flags, read_flags, NULL);
 
 
 
3193	return ret;
3194}
3195
3196int extent_read_full_page(struct extent_io_tree *tree, struct page *page,
3197			    get_extent_t *get_extent, int mirror_num)
3198{
3199	struct bio *bio = NULL;
3200	unsigned long bio_flags = 0;
3201	int ret;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3202
3203	ret = __extent_read_full_page(tree, page, get_extent, &bio, mirror_num,
3204				      &bio_flags, 0);
3205	if (bio)
3206		ret = submit_one_bio(bio, mirror_num, bio_flags);
3207	return ret;
3208}
3209
3210static void update_nr_written(struct writeback_control *wbc,
3211			      unsigned long nr_written)
3212{
3213	wbc->nr_to_write -= nr_written;
3214}
3215
3216/*
3217 * helper for __extent_writepage, doing all of the delayed allocation setup.
 
 
 
3218 *
3219 * This returns 1 if our fill_delalloc function did all the work required
3220 * to write the page (copy into inline extent).  In this case the IO has
3221 * been started and the page is already unlocked.
 
3222 *
3223 * This returns 0 if all went well (page still locked)
3224 * This returns < 0 if there were errors (page still locked)
3225 */
3226static noinline_for_stack int writepage_delalloc(struct inode *inode,
3227			      struct page *page, struct writeback_control *wbc,
3228			      struct extent_page_data *epd,
3229			      u64 delalloc_start,
3230			      unsigned long *nr_written)
3231{
3232	struct extent_io_tree *tree = epd->tree;
3233	u64 page_end = delalloc_start + PAGE_SIZE - 1;
3234	u64 nr_delalloc;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3235	u64 delalloc_to_write = 0;
3236	u64 delalloc_end = 0;
3237	int ret;
3238	int page_started = 0;
 
 
 
 
 
 
 
 
 
 
3239
3240	if (epd->extent_locked || !tree->ops || !tree->ops->fill_delalloc)
3241		return 0;
3242
3243	while (delalloc_end < page_end) {
3244		nr_delalloc = find_lock_delalloc_range(inode, tree,
3245					       page,
3246					       &delalloc_start,
3247					       &delalloc_end,
3248					       BTRFS_MAX_EXTENT_SIZE);
3249		if (nr_delalloc == 0) {
3250			delalloc_start = delalloc_end + 1;
3251			continue;
3252		}
3253		ret = tree->ops->fill_delalloc(inode, page,
3254					       delalloc_start,
3255					       delalloc_end,
3256					       &page_started,
3257					       nr_written, wbc);
3258		/* File system has been set read-only */
3259		if (ret) {
3260			SetPageError(page);
3261			/* fill_delalloc should be return < 0 for error
3262			 * but just in case, we use > 0 here meaning the
3263			 * IO is started, so we don't want to return > 0
3264			 * unless things are going well.
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3265			 */
3266			ret = ret < 0 ? ret : -EIO;
3267			goto done;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3268		}
3269		/*
3270		 * delalloc_end is already one less than the total length, so
3271		 * we don't subtract one from PAGE_SIZE
3272		 */
3273		delalloc_to_write += (delalloc_end - delalloc_start +
3274				      PAGE_SIZE) >> PAGE_SHIFT;
3275		delalloc_start = delalloc_end + 1;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3276	}
 
3277	if (wbc->nr_to_write < delalloc_to_write) {
3278		int thresh = 8192;
3279
3280		if (delalloc_to_write < thresh * 2)
3281			thresh = delalloc_to_write;
3282		wbc->nr_to_write = min_t(u64, delalloc_to_write,
3283					 thresh);
3284	}
3285
3286	/* did the fill delalloc function already unlock and start
3287	 * the IO?
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3288	 */
3289	if (page_started) {
3290		/*
3291		 * we've unlocked the page, so we can't update
3292		 * the mapping's writeback index, just update
3293		 * nr_to_write.
3294		 */
3295		wbc->nr_to_write -= *nr_written;
3296		return 1;
3297	}
3298
3299	ret = 0;
3300
3301done:
3302	return ret;
3303}
3304
3305/*
3306 * helper for __extent_writepage.  This calls the writepage start hooks,
3307 * and does the loop to map the page into extents and bios.
3308 *
3309 * We return 1 if the IO is started and the page is unlocked,
3310 * 0 if all went well (page still locked)
3311 * < 0 if there were errors (page still locked)
3312 */
3313static noinline_for_stack int __extent_writepage_io(struct inode *inode,
3314				 struct page *page,
3315				 struct writeback_control *wbc,
3316				 struct extent_page_data *epd,
3317				 loff_t i_size,
3318				 unsigned long nr_written,
3319				 unsigned int write_flags, int *nr_ret)
3320{
3321	struct extent_io_tree *tree = epd->tree;
3322	u64 start = page_offset(page);
3323	u64 page_end = start + PAGE_SIZE - 1;
3324	u64 end;
3325	u64 cur = start;
3326	u64 extent_offset;
3327	u64 block_start;
3328	u64 iosize;
3329	struct extent_map *em;
3330	struct block_device *bdev;
3331	size_t pg_offset = 0;
3332	size_t blocksize;
3333	int ret = 0;
3334	int nr = 0;
3335	bool compressed;
3336
3337	if (tree->ops && tree->ops->writepage_start_hook) {
3338		ret = tree->ops->writepage_start_hook(page, start,
3339						      page_end);
3340		if (ret) {
3341			/* Fixup worker will requeue */
3342			if (ret == -EBUSY)
3343				wbc->pages_skipped++;
3344			else
3345				redirty_page_for_writepage(wbc, page);
3346
3347			update_nr_written(wbc, nr_written);
3348			unlock_page(page);
3349			return 1;
3350		}
 
 
3351	}
3352
3353	/*
3354	 * we don't want to touch the inode after unlocking the page,
3355	 * so we update the mapping writeback index now
3356	 */
3357	update_nr_written(wbc, nr_written + 1);
3358
3359	end = page_end;
3360	if (i_size <= start) {
3361		if (tree->ops && tree->ops->writepage_end_io_hook)
3362			tree->ops->writepage_end_io_hook(page, start,
3363							 page_end, NULL, 1);
3364		goto done;
3365	}
3366
3367	blocksize = inode->i_sb->s_blocksize;
3368
3369	while (cur <= end) {
3370		u64 em_end;
3371		u64 offset;
3372
3373		if (cur >= i_size) {
3374			if (tree->ops && tree->ops->writepage_end_io_hook)
3375				tree->ops->writepage_end_io_hook(page, cur,
3376							 page_end, NULL, 1);
 
 
 
 
 
 
 
 
 
3377			break;
3378		}
3379		em = btrfs_get_extent(BTRFS_I(inode), page, pg_offset, cur,
3380				     end - cur + 1, 1);
3381		if (IS_ERR_OR_NULL(em)) {
3382			SetPageError(page);
3383			ret = PTR_ERR_OR_ZERO(em);
3384			break;
3385		}
3386
3387		extent_offset = cur - em->start;
3388		em_end = extent_map_end(em);
3389		BUG_ON(em_end <= cur);
3390		BUG_ON(end < cur);
3391		iosize = min(em_end - cur, end - cur + 1);
3392		iosize = ALIGN(iosize, blocksize);
3393		offset = em->block_start + extent_offset;
3394		bdev = em->bdev;
3395		block_start = em->block_start;
3396		compressed = test_bit(EXTENT_FLAG_COMPRESSED, &em->flags);
3397		free_extent_map(em);
3398		em = NULL;
3399
3400		/*
3401		 * compressed and inline extents are written through other
3402		 * paths in the FS
3403		 */
3404		if (compressed || block_start == EXTENT_MAP_HOLE ||
3405		    block_start == EXTENT_MAP_INLINE) {
3406			/*
3407			 * end_io notification does not happen here for
3408			 * compressed extents
 
3409			 */
3410			if (!compressed && tree->ops &&
3411			    tree->ops->writepage_end_io_hook)
3412				tree->ops->writepage_end_io_hook(page, cur,
3413							 cur + iosize - 1,
3414							 NULL, 1);
3415			else if (compressed) {
3416				/* we don't want to end_page_writeback on
3417				 * a compressed extent.  this happens
3418				 * elsewhere
3419				 */
3420				nr++;
3421			}
3422
3423			cur += iosize;
3424			pg_offset += iosize;
3425			continue;
3426		}
 
 
3427
3428		set_range_writeback(tree, cur, cur + iosize - 1);
3429		if (!PageWriteback(page)) {
3430			btrfs_err(BTRFS_I(inode)->root->fs_info,
3431				   "page %lu not writeback, cur %llu end %llu",
3432			       page->index, cur, end);
3433		}
3434
3435		ret = submit_extent_page(REQ_OP_WRITE | write_flags, tree, wbc,
3436					 page, offset, iosize, pg_offset,
3437					 bdev, &epd->bio,
3438					 end_bio_extent_writepage,
3439					 0, 0, 0, false);
3440		if (ret) {
3441			SetPageError(page);
3442			if (PageWriteback(page))
3443				end_page_writeback(page);
3444		}
3445
3446		cur = cur + iosize;
3447		pg_offset += iosize;
3448		nr++;
3449	}
3450done:
3451	*nr_ret = nr;
3452	return ret;
3453}
3454
3455/*
3456 * the writepage semantics are similar to regular writepage.  extent
3457 * records are inserted to lock ranges in the tree, and as dirty areas
3458 * are found, they are marked writeback.  Then the lock bits are removed
3459 * and the end_io handler clears the writeback ranges
 
 
 
3460 */
3461static int __extent_writepage(struct page *page, struct writeback_control *wbc,
3462			      struct extent_page_data *epd)
3463{
3464	struct inode *inode = page->mapping->host;
3465	u64 start = page_offset(page);
3466	u64 page_end = start + PAGE_SIZE - 1;
3467	int ret;
3468	int nr = 0;
3469	size_t pg_offset = 0;
3470	loff_t i_size = i_size_read(inode);
3471	unsigned long end_index = i_size >> PAGE_SHIFT;
3472	unsigned int write_flags = 0;
3473	unsigned long nr_written = 0;
3474
3475	write_flags = wbc_to_write_flags(wbc);
3476
3477	trace___extent_writepage(page, inode, wbc);
3478
3479	WARN_ON(!PageLocked(page));
3480
3481	ClearPageError(page);
3482
3483	pg_offset = i_size & (PAGE_SIZE - 1);
3484	if (page->index > end_index ||
3485	   (page->index == end_index && !pg_offset)) {
3486		page->mapping->a_ops->invalidatepage(page, 0, PAGE_SIZE);
3487		unlock_page(page);
3488		return 0;
3489	}
3490
3491	if (page->index == end_index) {
3492		char *userpage;
3493
3494		userpage = kmap_atomic(page);
3495		memset(userpage + pg_offset, 0,
3496		       PAGE_SIZE - pg_offset);
3497		kunmap_atomic(userpage);
3498		flush_dcache_page(page);
3499	}
3500
3501	pg_offset = 0;
3502
3503	set_page_extent_mapped(page);
3504
3505	ret = writepage_delalloc(inode, page, wbc, epd, start, &nr_written);
3506	if (ret == 1)
3507		goto done_unlocked;
3508	if (ret)
3509		goto done;
3510
3511	ret = __extent_writepage_io(inode, page, wbc, epd,
3512				    i_size, nr_written, write_flags, &nr);
3513	if (ret == 1)
3514		goto done_unlocked;
 
 
3515
3516done:
3517	if (nr == 0) {
3518		/* make sure the mapping tag for page dirty gets cleared */
3519		set_page_writeback(page);
3520		end_page_writeback(page);
3521	}
3522	if (PageError(page)) {
3523		ret = ret < 0 ? ret : -EIO;
3524		end_extent_writepage(page, ret, start, page_end);
3525	}
3526	unlock_page(page);
3527	return ret;
3528
3529done_unlocked:
3530	return 0;
3531}
3532
3533void wait_on_extent_buffer_writeback(struct extent_buffer *eb)
3534{
3535	wait_on_bit_io(&eb->bflags, EXTENT_BUFFER_WRITEBACK,
3536		       TASK_UNINTERRUPTIBLE);
3537}
3538
3539static noinline_for_stack int
3540lock_extent_buffer_for_io(struct extent_buffer *eb,
3541			  struct btrfs_fs_info *fs_info,
3542			  struct extent_page_data *epd)
 
 
 
 
 
3543{
3544	unsigned long i, num_pages;
3545	int flush = 0;
3546	int ret = 0;
3547
3548	if (!btrfs_try_tree_write_lock(eb)) {
3549		flush = 1;
3550		flush_write_bio(epd);
 
 
 
3551		btrfs_tree_lock(eb);
3552	}
3553
3554	if (test_bit(EXTENT_BUFFER_WRITEBACK, &eb->bflags)) {
3555		btrfs_tree_unlock(eb);
3556		if (!epd->sync_io)
3557			return 0;
3558		if (!flush) {
3559			flush_write_bio(epd);
3560			flush = 1;
3561		}
3562		while (1) {
3563			wait_on_extent_buffer_writeback(eb);
3564			btrfs_tree_lock(eb);
3565			if (!test_bit(EXTENT_BUFFER_WRITEBACK, &eb->bflags))
3566				break;
3567			btrfs_tree_unlock(eb);
3568		}
3569	}
3570
3571	/*
3572	 * We need to do this to prevent races in people who check if the eb is
3573	 * under IO since we can end up having no IO bits set for a short period
3574	 * of time.
3575	 */
3576	spin_lock(&eb->refs_lock);
3577	if (test_and_clear_bit(EXTENT_BUFFER_DIRTY, &eb->bflags)) {
3578		set_bit(EXTENT_BUFFER_WRITEBACK, &eb->bflags);
3579		spin_unlock(&eb->refs_lock);
3580		btrfs_set_header_flag(eb, BTRFS_HEADER_FLAG_WRITTEN);
3581		percpu_counter_add_batch(&fs_info->dirty_metadata_bytes,
3582					 -eb->len,
3583					 fs_info->dirty_metadata_batch);
3584		ret = 1;
3585	} else {
3586		spin_unlock(&eb->refs_lock);
3587	}
3588
3589	btrfs_tree_unlock(eb);
3590
3591	if (!ret)
3592		return ret;
3593
3594	num_pages = num_extent_pages(eb->start, eb->len);
3595	for (i = 0; i < num_pages; i++) {
3596		struct page *p = eb->pages[i];
3597
3598		if (!trylock_page(p)) {
3599			if (!flush) {
3600				flush_write_bio(epd);
3601				flush = 1;
3602			}
3603			lock_page(p);
3604		}
3605	}
3606
3607	return ret;
3608}
3609
3610static void end_extent_buffer_writeback(struct extent_buffer *eb)
3611{
3612	clear_bit(EXTENT_BUFFER_WRITEBACK, &eb->bflags);
3613	smp_mb__after_atomic();
3614	wake_up_bit(&eb->bflags, EXTENT_BUFFER_WRITEBACK);
3615}
3616
3617static void set_btree_ioerr(struct page *page)
3618{
3619	struct extent_buffer *eb = (struct extent_buffer *)page->private;
 
 
3620
3621	SetPageError(page);
3622	if (test_and_set_bit(EXTENT_BUFFER_WRITE_ERR, &eb->bflags))
3623		return;
 
 
 
 
3624
3625	/*
3626	 * If writeback for a btree extent that doesn't belong to a log tree
3627	 * failed, increment the counter transaction->eb_write_errors.
3628	 * We do this because while the transaction is running and before it's
3629	 * committing (when we call filemap_fdata[write|wait]_range against
3630	 * the btree inode), we might have
3631	 * btree_inode->i_mapping->a_ops->writepages() called by the VM - if it
3632	 * returns an error or an error happens during writeback, when we're
3633	 * committing the transaction we wouldn't know about it, since the pages
3634	 * can be no longer dirty nor marked anymore for writeback (if a
3635	 * subsequent modification to the extent buffer didn't happen before the
3636	 * transaction commit), which makes filemap_fdata[write|wait]_range not
3637	 * able to find the pages tagged with SetPageError at transaction
3638	 * commit time. So if this happens we must abort the transaction,
3639	 * otherwise we commit a super block with btree roots that point to
3640	 * btree nodes/leafs whose content on disk is invalid - either garbage
3641	 * or the content of some node/leaf from a past generation that got
3642	 * cowed or deleted and is no longer valid.
3643	 *
3644	 * Note: setting AS_EIO/AS_ENOSPC in the btree inode's i_mapping would
3645	 * not be enough - we need to distinguish between log tree extents vs
3646	 * non-log tree extents, and the next filemap_fdatawait_range() call
3647	 * will catch and clear such errors in the mapping - and that call might
3648	 * be from a log sync and not from a transaction commit. Also, checking
3649	 * for the eb flag EXTENT_BUFFER_WRITE_ERR at transaction commit time is
3650	 * not done and would not be reliable - the eb might have been released
3651	 * from memory and reading it back again means that flag would not be
3652	 * set (since it's a runtime flag, not persisted on disk).
3653	 *
3654	 * Using the flags below in the btree inode also makes us achieve the
3655	 * goal of AS_EIO/AS_ENOSPC when writepages() returns success, started
3656	 * writeback for all dirty pages and before filemap_fdatawait_range()
3657	 * is called, the writeback for all dirty pages had already finished
3658	 * with errors - because we were not using AS_EIO/AS_ENOSPC,
3659	 * filemap_fdatawait_range() would return success, as it could not know
3660	 * that writeback errors happened (the pages were no longer tagged for
3661	 * writeback).
3662	 */
3663	switch (eb->log_index) {
3664	case -1:
3665		set_bit(BTRFS_FS_BTREE_ERR, &eb->fs_info->flags);
3666		break;
3667	case 0:
3668		set_bit(BTRFS_FS_LOG1_ERR, &eb->fs_info->flags);
3669		break;
3670	case 1:
3671		set_bit(BTRFS_FS_LOG2_ERR, &eb->fs_info->flags);
3672		break;
3673	default:
3674		BUG(); /* unexpected, logic error */
3675	}
3676}
3677
3678static void end_bio_extent_buffer_writepage(struct bio *bio)
 
 
 
 
 
3679{
3680	struct bio_vec *bvec;
3681	struct extent_buffer *eb;
3682	int i, done;
3683
3684	ASSERT(!bio_flagged(bio, BIO_CLONED));
3685	bio_for_each_segment_all(bvec, bio, i) {
3686		struct page *page = bvec->bv_page;
 
 
 
 
 
 
 
3687
3688		eb = (struct extent_buffer *)page->private;
3689		BUG_ON(!eb);
3690		done = atomic_dec_and_test(&eb->io_pages);
 
 
 
 
 
 
 
 
 
 
 
 
3691
3692		if (bio->bi_status ||
3693		    test_bit(EXTENT_BUFFER_WRITE_ERR, &eb->bflags)) {
3694			ClearPageUptodate(page);
3695			set_btree_ioerr(page);
3696		}
3697
3698		end_page_writeback(page);
3699
3700		if (!done)
3701			continue;
3702
3703		end_extent_buffer_writeback(eb);
3704	}
3705
3706	bio_put(bio);
3707}
3708
3709static noinline_for_stack int write_one_eb(struct extent_buffer *eb,
3710			struct btrfs_fs_info *fs_info,
3711			struct writeback_control *wbc,
3712			struct extent_page_data *epd)
3713{
3714	struct block_device *bdev = fs_info->fs_devices->latest_bdev;
3715	struct extent_io_tree *tree = &BTRFS_I(fs_info->btree_inode)->io_tree;
3716	u64 offset = eb->start;
3717	u32 nritems;
3718	unsigned long i, num_pages;
3719	unsigned long start, end;
3720	unsigned int write_flags = wbc_to_write_flags(wbc) | REQ_META;
3721	int ret = 0;
3722
3723	clear_bit(EXTENT_BUFFER_WRITE_ERR, &eb->bflags);
3724	num_pages = num_extent_pages(eb->start, eb->len);
3725	atomic_set(&eb->io_pages, num_pages);
3726
3727	/* set btree blocks beyond nritems with 0 to avoid stale content. */
3728	nritems = btrfs_header_nritems(eb);
3729	if (btrfs_header_level(eb) > 0) {
3730		end = btrfs_node_key_ptr_offset(nritems);
3731
3732		memzero_extent_buffer(eb, end, eb->len - end);
3733	} else {
3734		/*
3735		 * leaf:
3736		 * header 0 1 2 .. N ... data_N .. data_2 data_1 data_0
3737		 */
3738		start = btrfs_item_nr_offset(nritems);
3739		end = BTRFS_LEAF_DATA_OFFSET + leaf_data_end(fs_info, eb);
 
 
 
 
3740		memzero_extent_buffer(eb, start, end - start);
3741	}
 
 
 
 
 
 
 
 
 
3742
3743	for (i = 0; i < num_pages; i++) {
3744		struct page *p = eb->pages[i];
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3745
3746		clear_page_dirty_for_io(p);
3747		set_page_writeback(p);
3748		ret = submit_extent_page(REQ_OP_WRITE | write_flags, tree, wbc,
3749					 p, offset, PAGE_SIZE, 0, bdev,
3750					 &epd->bio,
3751					 end_bio_extent_buffer_writepage,
3752					 0, 0, 0, false);
3753		if (ret) {
3754			set_btree_ioerr(p);
3755			if (PageWriteback(p))
3756				end_page_writeback(p);
3757			if (atomic_sub_and_test(num_pages - i, &eb->io_pages))
3758				end_extent_buffer_writeback(eb);
3759			ret = -EIO;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3760			break;
3761		}
3762		offset += PAGE_SIZE;
3763		update_nr_written(wbc, 1);
3764		unlock_page(p);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3765	}
 
 
 
 
3766
3767	if (unlikely(ret)) {
3768		for (; i < num_pages; i++) {
3769			struct page *p = eb->pages[i];
3770			clear_page_dirty_for_io(p);
3771			unlock_page(p);
3772		}
 
 
3773	}
3774
3775	return ret;
 
 
 
 
 
 
 
 
 
 
 
 
3776}
3777
3778int btree_write_cache_pages(struct address_space *mapping,
3779				   struct writeback_control *wbc)
3780{
3781	struct extent_io_tree *tree = &BTRFS_I(mapping->host)->io_tree;
3782	struct btrfs_fs_info *fs_info = BTRFS_I(mapping->host)->root->fs_info;
3783	struct extent_buffer *eb, *prev_eb = NULL;
3784	struct extent_page_data epd = {
3785		.bio = NULL,
3786		.tree = tree,
3787		.extent_locked = 0,
3788		.sync_io = wbc->sync_mode == WB_SYNC_ALL,
3789	};
3790	int ret = 0;
3791	int done = 0;
3792	int nr_to_write_done = 0;
3793	struct pagevec pvec;
3794	int nr_pages;
3795	pgoff_t index;
3796	pgoff_t end;		/* Inclusive */
3797	int scanned = 0;
3798	int tag;
3799
3800	pagevec_init(&pvec);
3801	if (wbc->range_cyclic) {
3802		index = mapping->writeback_index; /* Start from prev offset */
3803		end = -1;
 
 
 
 
 
3804	} else {
3805		index = wbc->range_start >> PAGE_SHIFT;
3806		end = wbc->range_end >> PAGE_SHIFT;
3807		scanned = 1;
3808	}
3809	if (wbc->sync_mode == WB_SYNC_ALL)
3810		tag = PAGECACHE_TAG_TOWRITE;
3811	else
3812		tag = PAGECACHE_TAG_DIRTY;
 
3813retry:
3814	if (wbc->sync_mode == WB_SYNC_ALL)
3815		tag_pages_for_writeback(mapping, index, end);
3816	while (!done && !nr_to_write_done && (index <= end) &&
3817	       (nr_pages = pagevec_lookup_range_tag(&pvec, mapping, &index, end,
3818			tag))) {
3819		unsigned i;
3820
3821		scanned = 1;
3822		for (i = 0; i < nr_pages; i++) {
3823			struct page *page = pvec.pages[i];
3824
3825			if (!PagePrivate(page))
 
3826				continue;
3827
3828			spin_lock(&mapping->private_lock);
3829			if (!PagePrivate(page)) {
3830				spin_unlock(&mapping->private_lock);
3831				continue;
3832			}
3833
3834			eb = (struct extent_buffer *)page->private;
3835
3836			/*
3837			 * Shouldn't happen and normally this would be a BUG_ON
3838			 * but no sense in crashing the users box for something
3839			 * we can survive anyway.
3840			 */
3841			if (WARN_ON(!eb)) {
3842				spin_unlock(&mapping->private_lock);
3843				continue;
3844			}
3845
3846			if (eb == prev_eb) {
3847				spin_unlock(&mapping->private_lock);
3848				continue;
3849			}
3850
3851			ret = atomic_inc_not_zero(&eb->refs);
3852			spin_unlock(&mapping->private_lock);
3853			if (!ret)
3854				continue;
3855
3856			prev_eb = eb;
3857			ret = lock_extent_buffer_for_io(eb, fs_info, &epd);
3858			if (!ret) {
3859				free_extent_buffer(eb);
3860				continue;
3861			}
3862
3863			ret = write_one_eb(eb, fs_info, wbc, &epd);
3864			if (ret) {
3865				done = 1;
3866				free_extent_buffer(eb);
3867				break;
3868			}
3869			free_extent_buffer(eb);
3870
3871			/*
3872			 * the filesystem may choose to bump up nr_to_write.
3873			 * We have to make sure to honor the new nr_to_write
3874			 * at any time
3875			 */
3876			nr_to_write_done = wbc->nr_to_write <= 0;
3877		}
3878		pagevec_release(&pvec);
3879		cond_resched();
3880	}
3881	if (!scanned && !done) {
3882		/*
3883		 * We hit the last page and there is more work to be done: wrap
3884		 * back to the start of the file
3885		 */
3886		scanned = 1;
3887		index = 0;
3888		goto retry;
3889	}
3890	flush_write_bio(&epd);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3891	return ret;
3892}
3893
3894/**
3895 * write_cache_pages - walk the list of dirty pages of the given address space and write all of them.
3896 * @mapping: address space structure to write
3897 * @wbc: subtract the number of written pages from *@wbc->nr_to_write
3898 * @data: data passed to __extent_writepage function
 
3899 *
3900 * If a page is already under I/O, write_cache_pages() skips it, even
3901 * if it's dirty.  This is desirable behaviour for memory-cleaning writeback,
3902 * but it is INCORRECT for data-integrity system calls such as fsync().  fsync()
3903 * and msync() need to guarantee that all the data which was dirty at the time
3904 * the call was made get new I/O started against them.  If wbc->sync_mode is
3905 * WB_SYNC_ALL then we were called for data integrity and we must wait for
3906 * existing IO to complete.
3907 */
3908static int extent_write_cache_pages(struct address_space *mapping,
3909			     struct writeback_control *wbc,
3910			     struct extent_page_data *epd)
3911{
 
3912	struct inode *inode = mapping->host;
3913	int ret = 0;
3914	int done = 0;
3915	int nr_to_write_done = 0;
3916	struct pagevec pvec;
3917	int nr_pages;
3918	pgoff_t index;
3919	pgoff_t end;		/* Inclusive */
3920	pgoff_t done_index;
3921	int range_whole = 0;
3922	int scanned = 0;
3923	int tag;
3924
3925	/*
3926	 * We have to hold onto the inode so that ordered extents can do their
3927	 * work when the IO finishes.  The alternative to this is failing to add
3928	 * an ordered extent if the igrab() fails there and that is a huge pain
3929	 * to deal with, so instead just hold onto the inode throughout the
3930	 * writepages operation.  If it fails here we are freeing up the inode
3931	 * anyway and we'd rather not waste our time writing out stuff that is
3932	 * going to be truncated anyway.
3933	 */
3934	if (!igrab(inode))
3935		return 0;
3936
3937	pagevec_init(&pvec);
3938	if (wbc->range_cyclic) {
3939		index = mapping->writeback_index; /* Start from prev offset */
3940		end = -1;
 
 
 
 
 
3941	} else {
3942		index = wbc->range_start >> PAGE_SHIFT;
3943		end = wbc->range_end >> PAGE_SHIFT;
3944		if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX)
3945			range_whole = 1;
3946		scanned = 1;
3947	}
3948	if (wbc->sync_mode == WB_SYNC_ALL)
 
 
 
 
 
 
 
 
 
 
 
 
 
3949		tag = PAGECACHE_TAG_TOWRITE;
3950	else
3951		tag = PAGECACHE_TAG_DIRTY;
3952retry:
3953	if (wbc->sync_mode == WB_SYNC_ALL)
3954		tag_pages_for_writeback(mapping, index, end);
3955	done_index = index;
3956	while (!done && !nr_to_write_done && (index <= end) &&
3957			(nr_pages = pagevec_lookup_range_tag(&pvec, mapping,
3958						&index, end, tag))) {
3959		unsigned i;
3960
3961		scanned = 1;
3962		for (i = 0; i < nr_pages; i++) {
3963			struct page *page = pvec.pages[i];
3964
3965			done_index = page->index;
3966			/*
3967			 * At this point we hold neither the i_pages lock nor
3968			 * the page lock: the page may be truncated or
3969			 * invalidated (changing page->mapping to NULL),
3970			 * or even swizzled back from swapper_space to
3971			 * tmpfs file mapping
3972			 */
3973			if (!trylock_page(page)) {
3974				flush_write_bio(epd);
3975				lock_page(page);
 
 
 
 
 
3976			}
3977
3978			if (unlikely(page->mapping != mapping)) {
3979				unlock_page(page);
 
3980				continue;
3981			}
3982
3983			if (wbc->sync_mode != WB_SYNC_NONE) {
3984				if (PageWriteback(page))
3985					flush_write_bio(epd);
3986				wait_on_page_writeback(page);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3987			}
3988
3989			if (PageWriteback(page) ||
3990			    !clear_page_dirty_for_io(page)) {
3991				unlock_page(page);
3992				continue;
3993			}
3994
3995			ret = __extent_writepage(page, wbc, epd);
3996
3997			if (unlikely(ret == AOP_WRITEPAGE_ACTIVATE)) {
3998				unlock_page(page);
3999				ret = 0;
4000			}
4001			if (ret < 0) {
4002				/*
4003				 * done_index is set past this page,
4004				 * so media errors will not choke
4005				 * background writeout for the entire
4006				 * file. This has consequences for
4007				 * range_cyclic semantics (ie. it may
4008				 * not be suitable for data integrity
4009				 * writeout).
4010				 */
4011				done_index = page->index + 1;
4012				done = 1;
4013				break;
4014			}
4015
4016			/*
4017			 * the filesystem may choose to bump up nr_to_write.
4018			 * We have to make sure to honor the new nr_to_write
4019			 * at any time
4020			 */
4021			nr_to_write_done = wbc->nr_to_write <= 0;
 
4022		}
4023		pagevec_release(&pvec);
4024		cond_resched();
4025	}
4026	if (!scanned && !done) {
4027		/*
4028		 * We hit the last page and there is more work to be done: wrap
4029		 * back to the start of the file
4030		 */
4031		scanned = 1;
4032		index = 0;
 
 
 
 
 
 
 
 
4033		goto retry;
4034	}
4035
4036	if (wbc->range_cyclic || (wbc->nr_to_write > 0 && range_whole))
4037		mapping->writeback_index = done_index;
4038
4039	btrfs_add_delayed_iput(inode);
4040	return ret;
4041}
4042
4043static void flush_write_bio(struct extent_page_data *epd)
 
 
 
 
 
 
 
4044{
4045	if (epd->bio) {
4046		int ret;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4047
4048		ret = submit_one_bio(epd->bio, 0, 0);
4049		BUG_ON(ret < 0); /* -ENOMEM */
4050		epd->bio = NULL;
4051	}
4052}
4053
4054int extent_write_full_page(struct page *page, struct writeback_control *wbc)
4055{
4056	int ret;
4057	struct extent_page_data epd = {
4058		.bio = NULL,
4059		.tree = &BTRFS_I(page->mapping->host)->io_tree,
4060		.extent_locked = 0,
4061		.sync_io = wbc->sync_mode == WB_SYNC_ALL,
4062	};
4063
4064	ret = __extent_writepage(page, wbc, &epd);
 
 
 
 
 
 
 
 
 
 
4065
4066	flush_write_bio(&epd);
4067	return ret;
4068}
4069
4070int extent_write_locked_range(struct inode *inode, u64 start, u64 end,
4071			      int mode)
4072{
4073	int ret = 0;
4074	struct address_space *mapping = inode->i_mapping;
4075	struct extent_io_tree *tree = &BTRFS_I(inode)->io_tree;
4076	struct page *page;
4077	unsigned long nr_pages = (end - start + PAGE_SIZE) >>
4078		PAGE_SHIFT;
4079
4080	struct extent_page_data epd = {
4081		.bio = NULL,
4082		.tree = tree,
4083		.extent_locked = 1,
4084		.sync_io = mode == WB_SYNC_ALL,
4085	};
4086	struct writeback_control wbc_writepages = {
4087		.sync_mode	= mode,
4088		.nr_to_write	= nr_pages * 2,
4089		.range_start	= start,
4090		.range_end	= end + 1,
4091	};
4092
4093	while (start <= end) {
4094		page = find_get_page(mapping, start >> PAGE_SHIFT);
4095		if (clear_page_dirty_for_io(page))
4096			ret = __extent_writepage(page, &wbc_writepages, &epd);
4097		else {
4098			if (tree->ops && tree->ops->writepage_end_io_hook)
4099				tree->ops->writepage_end_io_hook(page, start,
4100						 start + PAGE_SIZE - 1,
4101						 NULL, 1);
4102			unlock_page(page);
4103		}
4104		put_page(page);
4105		start += PAGE_SIZE;
4106	}
4107
4108	flush_write_bio(&epd);
4109	return ret;
4110}
4111
4112int extent_writepages(struct extent_io_tree *tree,
4113		      struct address_space *mapping,
4114		      struct writeback_control *wbc)
4115{
 
4116	int ret = 0;
4117	struct extent_page_data epd = {
4118		.bio = NULL,
4119		.tree = tree,
4120		.extent_locked = 0,
4121		.sync_io = wbc->sync_mode == WB_SYNC_ALL,
4122	};
4123
4124	ret = extent_write_cache_pages(mapping, wbc, &epd);
4125	flush_write_bio(&epd);
 
 
 
 
 
 
4126	return ret;
4127}
4128
4129int extent_readpages(struct extent_io_tree *tree,
4130		     struct address_space *mapping,
4131		     struct list_head *pages, unsigned nr_pages)
4132{
4133	struct bio *bio = NULL;
4134	unsigned page_idx;
4135	unsigned long bio_flags = 0;
4136	struct page *pagepool[16];
4137	struct page *page;
4138	struct extent_map *em_cached = NULL;
4139	int nr = 0;
4140	u64 prev_em_start = (u64)-1;
4141
4142	for (page_idx = 0; page_idx < nr_pages; page_idx++) {
4143		page = list_entry(pages->prev, struct page, lru);
4144
4145		prefetchw(&page->flags);
4146		list_del(&page->lru);
4147		if (add_to_page_cache_lru(page, mapping,
4148					page->index,
4149					readahead_gfp_mask(mapping))) {
4150			put_page(page);
4151			continue;
4152		}
4153
4154		pagepool[nr++] = page;
4155		if (nr < ARRAY_SIZE(pagepool))
4156			continue;
4157		__extent_readpages(tree, pagepool, nr, &em_cached, &bio,
4158				&bio_flags, &prev_em_start);
4159		nr = 0;
4160	}
4161	if (nr)
4162		__extent_readpages(tree, pagepool, nr, &em_cached, &bio,
4163				&bio_flags, &prev_em_start);
4164
4165	if (em_cached)
4166		free_extent_map(em_cached);
4167
4168	BUG_ON(!list_empty(pages));
4169	if (bio)
4170		return submit_one_bio(bio, 0, bio_flags);
4171	return 0;
4172}
4173
4174/*
4175 * basic invalidatepage code, this waits on any locked or writeback
4176 * ranges corresponding to the page, and then deletes any extent state
4177 * records from the tree
4178 */
4179int extent_invalidatepage(struct extent_io_tree *tree,
4180			  struct page *page, unsigned long offset)
4181{
4182	struct extent_state *cached_state = NULL;
4183	u64 start = page_offset(page);
4184	u64 end = start + PAGE_SIZE - 1;
4185	size_t blocksize = page->mapping->host->i_sb->s_blocksize;
 
 
 
4186
4187	start += ALIGN(offset, blocksize);
4188	if (start > end)
4189		return 0;
4190
4191	lock_extent_bits(tree, start, end, &cached_state);
4192	wait_on_page_writeback(page);
4193	clear_extent_bit(tree, start, end,
4194			 EXTENT_LOCKED | EXTENT_DIRTY | EXTENT_DELALLOC |
4195			 EXTENT_DO_ACCOUNTING,
4196			 1, 1, &cached_state);
 
 
 
4197	return 0;
4198}
4199
4200/*
4201 * a helper for releasepage, this tests for areas of the page that
4202 * are locked or under IO and drops the related state bits if it is safe
4203 * to drop the page.
4204 */
4205static int try_release_extent_state(struct extent_map_tree *map,
4206				    struct extent_io_tree *tree,
4207				    struct page *page, gfp_t mask)
4208{
4209	u64 start = page_offset(page);
4210	u64 end = start + PAGE_SIZE - 1;
4211	int ret = 1;
 
 
 
 
 
 
 
 
4212
4213	if (test_range_bit(tree, start, end,
4214			   EXTENT_IOBITS, 0, NULL))
4215		ret = 0;
4216	else {
4217		/*
4218		 * at this point we can safely clear everything except the
4219		 * locked bit and the nodatasum bit
 
 
4220		 */
4221		ret = __clear_extent_bit(tree, start, end,
4222				 ~(EXTENT_LOCKED | EXTENT_NODATASUM),
4223				 0, 0, NULL, mask, NULL);
4224
4225		/* if clear_extent_bit failed for enomem reasons,
4226		 * we can't allow the release to continue.
4227		 */
4228		if (ret < 0)
4229			ret = 0;
4230		else
4231			ret = 1;
4232	}
4233	return ret;
4234}
4235
4236/*
4237 * a helper for releasepage.  As long as there are no locked extents
4238 * in the range corresponding to the page, both state records and extent
4239 * map records are removed
4240 */
4241int try_release_extent_mapping(struct extent_map_tree *map,
4242			       struct extent_io_tree *tree, struct page *page,
4243			       gfp_t mask)
4244{
4245	struct extent_map *em;
4246	u64 start = page_offset(page);
4247	u64 end = start + PAGE_SIZE - 1;
 
 
4248
4249	if (gfpflags_allow_blocking(mask) &&
4250	    page->mapping->host->i_size > SZ_16M) {
4251		u64 len;
4252		while (start <= end) {
4253			len = end - start + 1;
4254			write_lock(&map->lock);
4255			em = lookup_extent_mapping(map, start, len);
4256			if (!em) {
4257				write_unlock(&map->lock);
4258				break;
4259			}
4260			if (test_bit(EXTENT_FLAG_PINNED, &em->flags) ||
4261			    em->start != start) {
4262				write_unlock(&map->lock);
4263				free_extent_map(em);
4264				break;
4265			}
4266			if (!test_range_bit(tree, em->start,
4267					    extent_map_end(em) - 1,
4268					    EXTENT_LOCKED | EXTENT_WRITEBACK,
4269					    0, NULL)) {
4270				remove_extent_mapping(map, em);
4271				/* once for the rb tree */
4272				free_extent_map(em);
4273			}
4274			start = extent_map_end(em);
4275			write_unlock(&map->lock);
4276
4277			/* once for us */
 
 
 
 
 
 
 
4278			free_extent_map(em);
 
4279		}
4280	}
4281	return try_release_extent_state(map, tree, page, mask);
4282}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4283
4284/*
4285 * helper function for fiemap, which doesn't want to see any holes.
4286 * This maps until we find something past 'last'
4287 */
4288static struct extent_map *get_extent_skip_holes(struct inode *inode,
4289						u64 offset, u64 last)
4290{
4291	u64 sectorsize = btrfs_inode_sectorsize(inode);
4292	struct extent_map *em;
4293	u64 len;
4294
4295	if (offset >= last)
4296		return NULL;
 
 
 
 
 
4297
4298	while (1) {
4299		len = last - offset;
4300		if (len == 0)
4301			break;
4302		len = ALIGN(len, sectorsize);
4303		em = btrfs_get_extent_fiemap(BTRFS_I(inode), NULL, 0, offset,
4304				len, 0);
4305		if (IS_ERR_OR_NULL(em))
4306			return em;
4307
4308		/* if this isn't a hole return it */
4309		if (em->block_start != EXTENT_MAP_HOLE)
4310			return em;
4311
4312		/* this is a hole, advance to the next extent */
4313		offset = extent_map_end(em);
4314		free_extent_map(em);
4315		if (offset >= last)
4316			break;
4317	}
4318	return NULL;
4319}
4320
4321/*
4322 * To cache previous fiemap extent
4323 *
4324 * Will be used for merging fiemap extent
4325 */
4326struct fiemap_cache {
4327	u64 offset;
4328	u64 phys;
4329	u64 len;
4330	u32 flags;
4331	bool cached;
4332};
4333
4334/*
4335 * Helper to submit fiemap extent.
4336 *
4337 * Will try to merge current fiemap extent specified by @offset, @phys,
4338 * @len and @flags with cached one.
4339 * And only when we fails to merge, cached one will be submitted as
4340 * fiemap extent.
4341 *
4342 * Return value is the same as fiemap_fill_next_extent().
4343 */
4344static int emit_fiemap_extent(struct fiemap_extent_info *fieinfo,
4345				struct fiemap_cache *cache,
4346				u64 offset, u64 phys, u64 len, u32 flags)
4347{
4348	int ret = 0;
4349
4350	if (!cache->cached)
4351		goto assign;
4352
4353	/*
4354	 * Sanity check, extent_fiemap() should have ensured that new
4355	 * fiemap extent won't overlap with cahced one.
4356	 * Not recoverable.
4357	 *
4358	 * NOTE: Physical address can overlap, due to compression
4359	 */
4360	if (cache->offset + cache->len > offset) {
4361		WARN_ON(1);
4362		return -EINVAL;
4363	}
4364
4365	/*
4366	 * Only merges fiemap extents if
4367	 * 1) Their logical addresses are continuous
4368	 *
4369	 * 2) Their physical addresses are continuous
4370	 *    So truly compressed (physical size smaller than logical size)
4371	 *    extents won't get merged with each other
4372	 *
4373	 * 3) Share same flags except FIEMAP_EXTENT_LAST
4374	 *    So regular extent won't get merged with prealloc extent
4375	 */
4376	if (cache->offset + cache->len  == offset &&
4377	    cache->phys + cache->len == phys  &&
4378	    (cache->flags & ~FIEMAP_EXTENT_LAST) ==
4379			(flags & ~FIEMAP_EXTENT_LAST)) {
4380		cache->len += len;
4381		cache->flags |= flags;
4382		goto try_submit_last;
4383	}
4384
4385	/* Not mergeable, need to submit cached one */
4386	ret = fiemap_fill_next_extent(fieinfo, cache->offset, cache->phys,
4387				      cache->len, cache->flags);
4388	cache->cached = false;
4389	if (ret)
4390		return ret;
4391assign:
4392	cache->cached = true;
4393	cache->offset = offset;
4394	cache->phys = phys;
4395	cache->len = len;
4396	cache->flags = flags;
4397try_submit_last:
4398	if (cache->flags & FIEMAP_EXTENT_LAST) {
4399		ret = fiemap_fill_next_extent(fieinfo, cache->offset,
4400				cache->phys, cache->len, cache->flags);
4401		cache->cached = false;
4402	}
4403	return ret;
4404}
4405
4406/*
4407 * Emit last fiemap cache
4408 *
4409 * The last fiemap cache may still be cached in the following case:
4410 * 0		      4k		    8k
4411 * |<- Fiemap range ->|
4412 * |<------------  First extent ----------->|
4413 *
4414 * In this case, the first extent range will be cached but not emitted.
4415 * So we must emit it before ending extent_fiemap().
4416 */
4417static int emit_last_fiemap_cache(struct btrfs_fs_info *fs_info,
4418				  struct fiemap_extent_info *fieinfo,
4419				  struct fiemap_cache *cache)
4420{
4421	int ret;
4422
4423	if (!cache->cached)
4424		return 0;
4425
4426	ret = fiemap_fill_next_extent(fieinfo, cache->offset, cache->phys,
4427				      cache->len, cache->flags);
4428	cache->cached = false;
4429	if (ret > 0)
4430		ret = 0;
4431	return ret;
4432}
4433
4434int extent_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
4435		__u64 start, __u64 len)
4436{
4437	int ret = 0;
4438	u64 off = start;
4439	u64 max = start + len;
4440	u32 flags = 0;
4441	u32 found_type;
4442	u64 last;
4443	u64 last_for_get_extent = 0;
4444	u64 disko = 0;
4445	u64 isize = i_size_read(inode);
4446	struct btrfs_key found_key;
4447	struct extent_map *em = NULL;
4448	struct extent_state *cached_state = NULL;
4449	struct btrfs_path *path;
4450	struct btrfs_root *root = BTRFS_I(inode)->root;
4451	struct fiemap_cache cache = { 0 };
4452	int end = 0;
4453	u64 em_start = 0;
4454	u64 em_len = 0;
4455	u64 em_end = 0;
4456
4457	if (len == 0)
4458		return -EINVAL;
4459
4460	path = btrfs_alloc_path();
4461	if (!path)
4462		return -ENOMEM;
4463	path->leave_spinning = 1;
4464
4465	start = round_down(start, btrfs_inode_sectorsize(inode));
4466	len = round_up(max, btrfs_inode_sectorsize(inode)) - start;
4467
4468	/*
4469	 * lookup the last file extent.  We're not using i_size here
4470	 * because there might be preallocation past i_size
4471	 */
4472	ret = btrfs_lookup_file_extent(NULL, root, path,
4473			btrfs_ino(BTRFS_I(inode)), -1, 0);
4474	if (ret < 0) {
4475		btrfs_free_path(path);
4476		return ret;
4477	} else {
4478		WARN_ON(!ret);
4479		if (ret == 1)
4480			ret = 0;
4481	}
4482
4483	path->slots[0]--;
4484	btrfs_item_key_to_cpu(path->nodes[0], &found_key, path->slots[0]);
4485	found_type = found_key.type;
4486
4487	/* No extents, but there might be delalloc bits */
4488	if (found_key.objectid != btrfs_ino(BTRFS_I(inode)) ||
4489	    found_type != BTRFS_EXTENT_DATA_KEY) {
4490		/* have to trust i_size as the end */
4491		last = (u64)-1;
4492		last_for_get_extent = isize;
4493	} else {
4494		/*
4495		 * remember the start of the last extent.  There are a
4496		 * bunch of different factors that go into the length of the
4497		 * extent, so its much less complex to remember where it started
 
 
4498		 */
4499		last = found_key.offset;
4500		last_for_get_extent = last + 1;
 
 
 
 
 
 
 
 
4501	}
4502	btrfs_release_path(path);
4503
4504	/*
4505	 * we might have some extents allocated but more delalloc past those
4506	 * extents.  so, we trust isize unless the start of the last extent is
4507	 * beyond isize
4508	 */
4509	if (last < isize) {
4510		last = (u64)-1;
4511		last_for_get_extent = isize;
4512	}
4513
4514	lock_extent_bits(&BTRFS_I(inode)->io_tree, start, start + len - 1,
4515			 &cached_state);
4516
4517	em = get_extent_skip_holes(inode, start, last_for_get_extent);
4518	if (!em)
4519		goto out;
4520	if (IS_ERR(em)) {
4521		ret = PTR_ERR(em);
4522		goto out;
4523	}
4524
4525	while (!end) {
4526		u64 offset_in_extent = 0;
4527
4528		/* break if the extent we found is outside the range */
4529		if (em->start >= max || extent_map_end(em) < off)
4530			break;
4531
4532		/*
4533		 * get_extent may return an extent that starts before our
4534		 * requested range.  We have to make sure the ranges
4535		 * we return to fiemap always move forward and don't
4536		 * overlap, so adjust the offsets here
4537		 */
4538		em_start = max(em->start, off);
4539
4540		/*
4541		 * record the offset from the start of the extent
4542		 * for adjusting the disk offset below.  Only do this if the
4543		 * extent isn't compressed since our in ram offset may be past
4544		 * what we have actually allocated on disk.
4545		 */
4546		if (!test_bit(EXTENT_FLAG_COMPRESSED, &em->flags))
4547			offset_in_extent = em_start - em->start;
4548		em_end = extent_map_end(em);
4549		em_len = em_end - em_start;
4550		disko = 0;
4551		flags = 0;
4552
4553		/*
4554		 * bump off for our next call to get_extent
4555		 */
4556		off = extent_map_end(em);
4557		if (off >= max)
4558			end = 1;
4559
4560		if (em->block_start == EXTENT_MAP_LAST_BYTE) {
4561			end = 1;
4562			flags |= FIEMAP_EXTENT_LAST;
4563		} else if (em->block_start == EXTENT_MAP_INLINE) {
4564			flags |= (FIEMAP_EXTENT_DATA_INLINE |
4565				  FIEMAP_EXTENT_NOT_ALIGNED);
4566		} else if (em->block_start == EXTENT_MAP_DELALLOC) {
4567			flags |= (FIEMAP_EXTENT_DELALLOC |
4568				  FIEMAP_EXTENT_UNKNOWN);
4569		} else if (fieinfo->fi_extents_max) {
4570			u64 bytenr = em->block_start -
4571				(em->start - em->orig_start);
4572
4573			disko = em->block_start + offset_in_extent;
4574
4575			/*
4576			 * As btrfs supports shared space, this information
4577			 * can be exported to userspace tools via
4578			 * flag FIEMAP_EXTENT_SHARED.  If fi_extents_max == 0
4579			 * then we're just getting a count and we can skip the
4580			 * lookup stuff.
4581			 */
4582			ret = btrfs_check_shared(root,
4583						 btrfs_ino(BTRFS_I(inode)),
4584						 bytenr);
4585			if (ret < 0)
4586				goto out_free;
4587			if (ret)
4588				flags |= FIEMAP_EXTENT_SHARED;
4589			ret = 0;
4590		}
4591		if (test_bit(EXTENT_FLAG_COMPRESSED, &em->flags))
4592			flags |= FIEMAP_EXTENT_ENCODED;
4593		if (test_bit(EXTENT_FLAG_PREALLOC, &em->flags))
4594			flags |= FIEMAP_EXTENT_UNWRITTEN;
4595
4596		free_extent_map(em);
4597		em = NULL;
4598		if ((em_start >= last) || em_len == (u64)-1 ||
4599		   (last == (u64)-1 && isize <= em_end)) {
4600			flags |= FIEMAP_EXTENT_LAST;
4601			end = 1;
4602		}
4603
4604		/* now scan forward to see if this is really the last extent. */
4605		em = get_extent_skip_holes(inode, off, last_for_get_extent);
4606		if (IS_ERR(em)) {
4607			ret = PTR_ERR(em);
4608			goto out;
4609		}
4610		if (!em) {
4611			flags |= FIEMAP_EXTENT_LAST;
4612			end = 1;
4613		}
4614		ret = emit_fiemap_extent(fieinfo, &cache, em_start, disko,
4615					   em_len, flags);
4616		if (ret) {
4617			if (ret == 1)
4618				ret = 0;
4619			goto out_free;
4620		}
4621	}
4622out_free:
4623	if (!ret)
4624		ret = emit_last_fiemap_cache(root->fs_info, fieinfo, &cache);
4625	free_extent_map(em);
4626out:
4627	btrfs_free_path(path);
4628	unlock_extent_cached(&BTRFS_I(inode)->io_tree, start, start + len - 1,
4629			     &cached_state);
4630	return ret;
4631}
4632
4633static void __free_extent_buffer(struct extent_buffer *eb)
 
4634{
4635	btrfs_leak_debug_del(&eb->leak_list);
4636	kmem_cache_free(extent_buffer_cache, eb);
4637}
4638
4639int extent_buffer_under_io(struct extent_buffer *eb)
4640{
4641	return (atomic_read(&eb->io_pages) ||
4642		test_bit(EXTENT_BUFFER_WRITEBACK, &eb->bflags) ||
4643		test_bit(EXTENT_BUFFER_DIRTY, &eb->bflags));
4644}
4645
4646/*
4647 * Helper for releasing extent buffer page.
4648 */
4649static void btrfs_release_extent_buffer_page(struct extent_buffer *eb)
4650{
4651	unsigned long index;
4652	struct page *page;
4653	int mapped = !test_bit(EXTENT_BUFFER_DUMMY, &eb->bflags);
4654
4655	BUG_ON(extent_buffer_under_io(eb));
4656
4657	index = num_extent_pages(eb->start, eb->len);
4658	if (index == 0)
4659		return;
4660
4661	do {
4662		index--;
4663		page = eb->pages[index];
4664		if (!page)
4665			continue;
4666		if (mapped)
4667			spin_lock(&page->mapping->private_lock);
4668		/*
4669		 * We do this since we'll remove the pages after we've
4670		 * removed the eb from the radix tree, so we could race
4671		 * and have this page now attached to the new eb.  So
4672		 * only clear page_private if it's still connected to
4673		 * this eb.
4674		 */
4675		if (PagePrivate(page) &&
4676		    page->private == (unsigned long)eb) {
4677			BUG_ON(test_bit(EXTENT_BUFFER_DIRTY, &eb->bflags));
4678			BUG_ON(PageDirty(page));
4679			BUG_ON(PageWriteback(page));
4680			/*
4681			 * We need to make sure we haven't be attached
4682			 * to a new eb.
4683			 */
4684			ClearPagePrivate(page);
4685			set_page_private(page, 0);
4686			/* One for the page private */
4687			put_page(page);
4688		}
4689
4690		if (mapped)
4691			spin_unlock(&page->mapping->private_lock);
4692
4693		/* One for when we allocated the page */
4694		put_page(page);
4695	} while (index != 0);
4696}
4697
4698/*
4699 * Helper for releasing the extent buffer.
4700 */
4701static inline void btrfs_release_extent_buffer(struct extent_buffer *eb)
4702{
4703	btrfs_release_extent_buffer_page(eb);
 
4704	__free_extent_buffer(eb);
4705}
4706
4707static struct extent_buffer *
4708__alloc_extent_buffer(struct btrfs_fs_info *fs_info, u64 start,
4709		      unsigned long len)
4710{
4711	struct extent_buffer *eb = NULL;
4712
4713	eb = kmem_cache_zalloc(extent_buffer_cache, GFP_NOFS|__GFP_NOFAIL);
4714	eb->start = start;
4715	eb->len = len;
4716	eb->fs_info = fs_info;
4717	eb->bflags = 0;
4718	rwlock_init(&eb->lock);
4719	atomic_set(&eb->write_locks, 0);
4720	atomic_set(&eb->read_locks, 0);
4721	atomic_set(&eb->blocking_readers, 0);
4722	atomic_set(&eb->blocking_writers, 0);
4723	atomic_set(&eb->spinning_readers, 0);
4724	atomic_set(&eb->spinning_writers, 0);
4725	eb->lock_nested = 0;
4726	init_waitqueue_head(&eb->write_lock_wq);
4727	init_waitqueue_head(&eb->read_lock_wq);
4728
4729	btrfs_leak_debug_add(&eb->leak_list, &buffers);
4730
4731	spin_lock_init(&eb->refs_lock);
4732	atomic_set(&eb->refs, 1);
4733	atomic_set(&eb->io_pages, 0);
4734
4735	/*
4736	 * Sanity checks, currently the maximum is 64k covered by 16x 4k pages
4737	 */
4738	BUILD_BUG_ON(BTRFS_MAX_METADATA_BLOCKSIZE
4739		> MAX_INLINE_EXTENT_BUFFER_SIZE);
4740	BUG_ON(len > MAX_INLINE_EXTENT_BUFFER_SIZE);
4741
4742	return eb;
4743}
4744
4745struct extent_buffer *btrfs_clone_extent_buffer(struct extent_buffer *src)
4746{
4747	unsigned long i;
4748	struct page *p;
4749	struct extent_buffer *new;
4750	unsigned long num_pages = num_extent_pages(src->start, src->len);
 
4751
4752	new = __alloc_extent_buffer(src->fs_info, src->start, src->len);
4753	if (new == NULL)
4754		return NULL;
4755
4756	for (i = 0; i < num_pages; i++) {
4757		p = alloc_page(GFP_NOFS);
4758		if (!p) {
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4759			btrfs_release_extent_buffer(new);
4760			return NULL;
4761		}
4762		attach_extent_buffer_page(new, p);
4763		WARN_ON(PageDirty(p));
4764		SetPageUptodate(p);
4765		new->pages[i] = p;
4766		copy_page(page_address(p), page_address(src->pages[i]));
4767	}
4768
4769	set_bit(EXTENT_BUFFER_UPTODATE, &new->bflags);
4770	set_bit(EXTENT_BUFFER_DUMMY, &new->bflags);
4771
4772	return new;
4773}
4774
4775struct extent_buffer *__alloc_dummy_extent_buffer(struct btrfs_fs_info *fs_info,
4776						  u64 start, unsigned long len)
4777{
4778	struct extent_buffer *eb;
4779	unsigned long num_pages;
4780	unsigned long i;
4781
4782	num_pages = num_extent_pages(start, len);
4783
4784	eb = __alloc_extent_buffer(fs_info, start, len);
4785	if (!eb)
4786		return NULL;
4787
4788	for (i = 0; i < num_pages; i++) {
4789		eb->pages[i] = alloc_page(GFP_NOFS);
4790		if (!eb->pages[i])
 
 
 
 
 
4791			goto err;
4792	}
 
4793	set_extent_buffer_uptodate(eb);
4794	btrfs_set_header_nritems(eb, 0);
4795	set_bit(EXTENT_BUFFER_DUMMY, &eb->bflags);
4796
4797	return eb;
4798err:
4799	for (; i > 0; i--)
4800		__free_page(eb->pages[i - 1]);
 
 
 
 
4801	__free_extent_buffer(eb);
4802	return NULL;
4803}
4804
4805struct extent_buffer *alloc_dummy_extent_buffer(struct btrfs_fs_info *fs_info,
4806						u64 start)
4807{
4808	return __alloc_dummy_extent_buffer(fs_info, start, fs_info->nodesize);
4809}
4810
4811static void check_buffer_tree_ref(struct extent_buffer *eb)
4812{
4813	int refs;
4814	/* the ref bit is tricky.  We have to make sure it is set
4815	 * if we have the buffer dirty.   Otherwise the
4816	 * code to free a buffer can end up dropping a dirty
4817	 * page
4818	 *
4819	 * Once the ref bit is set, it won't go away while the
4820	 * buffer is dirty or in writeback, and it also won't
4821	 * go away while we have the reference count on the
4822	 * eb bumped.
4823	 *
4824	 * We can't just set the ref bit without bumping the
4825	 * ref on the eb because free_extent_buffer might
4826	 * see the ref bit and try to clear it.  If this happens
4827	 * free_extent_buffer might end up dropping our original
4828	 * ref by mistake and freeing the page before we are able
4829	 * to add one more ref.
4830	 *
4831	 * So bump the ref count first, then set the bit.  If someone
4832	 * beat us to it, drop the ref we added.
 
 
 
 
 
4833	 */
4834	refs = atomic_read(&eb->refs);
4835	if (refs >= 2 && test_bit(EXTENT_BUFFER_TREE_REF, &eb->bflags))
4836		return;
4837
4838	spin_lock(&eb->refs_lock);
4839	if (!test_and_set_bit(EXTENT_BUFFER_TREE_REF, &eb->bflags))
4840		atomic_inc(&eb->refs);
4841	spin_unlock(&eb->refs_lock);
4842}
4843
4844static void mark_extent_buffer_accessed(struct extent_buffer *eb,
4845		struct page *accessed)
4846{
4847	unsigned long num_pages, i;
4848
4849	check_buffer_tree_ref(eb);
4850
4851	num_pages = num_extent_pages(eb->start, eb->len);
4852	for (i = 0; i < num_pages; i++) {
4853		struct page *p = eb->pages[i];
4854
4855		if (p != accessed)
4856			mark_page_accessed(p);
4857	}
4858}
4859
4860struct extent_buffer *find_extent_buffer(struct btrfs_fs_info *fs_info,
4861					 u64 start)
4862{
4863	struct extent_buffer *eb;
4864
4865	rcu_read_lock();
4866	eb = radix_tree_lookup(&fs_info->buffer_radix,
4867			       start >> PAGE_SHIFT);
4868	if (eb && atomic_inc_not_zero(&eb->refs)) {
4869		rcu_read_unlock();
4870		/*
4871		 * Lock our eb's refs_lock to avoid races with
4872		 * free_extent_buffer. When we get our eb it might be flagged
4873		 * with EXTENT_BUFFER_STALE and another task running
4874		 * free_extent_buffer might have seen that flag set,
4875		 * eb->refs == 2, that the buffer isn't under IO (dirty and
4876		 * writeback flags not set) and it's still in the tree (flag
4877		 * EXTENT_BUFFER_TREE_REF set), therefore being in the process
4878		 * of decrementing the extent buffer's reference count twice.
4879		 * So here we could race and increment the eb's reference count,
4880		 * clear its stale flag, mark it as dirty and drop our reference
4881		 * before the other task finishes executing free_extent_buffer,
4882		 * which would later result in an attempt to free an extent
4883		 * buffer that is dirty.
4884		 */
4885		if (test_bit(EXTENT_BUFFER_STALE, &eb->bflags)) {
4886			spin_lock(&eb->refs_lock);
4887			spin_unlock(&eb->refs_lock);
4888		}
4889		mark_extent_buffer_accessed(eb, NULL);
4890		return eb;
4891	}
4892	rcu_read_unlock();
4893
4894	return NULL;
4895}
4896
4897#ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS
4898struct extent_buffer *alloc_test_extent_buffer(struct btrfs_fs_info *fs_info,
4899					u64 start)
4900{
4901	struct extent_buffer *eb, *exists = NULL;
4902	int ret;
4903
4904	eb = find_extent_buffer(fs_info, start);
4905	if (eb)
4906		return eb;
4907	eb = alloc_dummy_extent_buffer(fs_info, start);
4908	if (!eb)
4909		return NULL;
4910	eb->fs_info = fs_info;
4911again:
4912	ret = radix_tree_preload(GFP_NOFS);
4913	if (ret)
 
4914		goto free_eb;
 
4915	spin_lock(&fs_info->buffer_lock);
4916	ret = radix_tree_insert(&fs_info->buffer_radix,
4917				start >> PAGE_SHIFT, eb);
4918	spin_unlock(&fs_info->buffer_lock);
4919	radix_tree_preload_end();
4920	if (ret == -EEXIST) {
4921		exists = find_extent_buffer(fs_info, start);
4922		if (exists)
4923			goto free_eb;
4924		else
4925			goto again;
4926	}
4927	check_buffer_tree_ref(eb);
4928	set_bit(EXTENT_BUFFER_IN_TREE, &eb->bflags);
4929
4930	/*
4931	 * We will free dummy extent buffer's if they come into
4932	 * free_extent_buffer with a ref count of 2, but if we are using this we
4933	 * want the buffers to stay in memory until we're done with them, so
4934	 * bump the ref count again.
4935	 */
4936	atomic_inc(&eb->refs);
4937	return eb;
4938free_eb:
4939	btrfs_release_extent_buffer(eb);
4940	return exists;
4941}
4942#endif
4943
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4944struct extent_buffer *alloc_extent_buffer(struct btrfs_fs_info *fs_info,
4945					  u64 start)
4946{
4947	unsigned long len = fs_info->nodesize;
4948	unsigned long num_pages = num_extent_pages(start, len);
4949	unsigned long i;
4950	unsigned long index = start >> PAGE_SHIFT;
4951	struct extent_buffer *eb;
4952	struct extent_buffer *exists = NULL;
4953	struct page *p;
4954	struct address_space *mapping = fs_info->btree_inode->i_mapping;
 
4955	int uptodate = 1;
4956	int ret;
4957
4958	if (!IS_ALIGNED(start, fs_info->sectorsize)) {
4959		btrfs_err(fs_info, "bad tree block start %llu", start);
4960		return ERR_PTR(-EINVAL);
 
 
 
 
 
 
 
4961	}
 
 
 
4962
4963	eb = find_extent_buffer(fs_info, start);
4964	if (eb)
4965		return eb;
4966
4967	eb = __alloc_extent_buffer(fs_info, start, len);
4968	if (!eb)
4969		return ERR_PTR(-ENOMEM);
4970
4971	for (i = 0; i < num_pages; i++, index++) {
4972		p = find_or_create_page(mapping, index, GFP_NOFS|__GFP_NOFAIL);
4973		if (!p) {
4974			exists = ERR_PTR(-ENOMEM);
4975			goto free_eb;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4976		}
 
 
 
 
 
 
 
 
 
4977
4978		spin_lock(&mapping->private_lock);
4979		if (PagePrivate(p)) {
4980			/*
4981			 * We could have already allocated an eb for this page
4982			 * and attached one so lets see if we can get a ref on
4983			 * the existing eb, and if we can we know it's good and
4984			 * we can just return that one, else we know we can just
4985			 * overwrite page->private.
4986			 */
4987			exists = (struct extent_buffer *)p->private;
4988			if (atomic_inc_not_zero(&exists->refs)) {
4989				spin_unlock(&mapping->private_lock);
4990				unlock_page(p);
4991				put_page(p);
4992				mark_extent_buffer_accessed(exists, p);
4993				goto free_eb;
4994			}
4995			exists = NULL;
4996
4997			/*
4998			 * Do this so attach doesn't complain and we need to
4999			 * drop the ref the old guy had.
5000			 */
5001			ClearPagePrivate(p);
5002			WARN_ON(PageDirty(p));
5003			put_page(p);
5004		}
5005		attach_extent_buffer_page(eb, p);
5006		spin_unlock(&mapping->private_lock);
5007		WARN_ON(PageDirty(p));
5008		eb->pages[i] = p;
5009		if (!PageUptodate(p))
5010			uptodate = 0;
5011
5012		/*
5013		 * see below about how we avoid a nasty race with release page
5014		 * and why we unlock later
 
 
 
5015		 */
5016	}
5017	if (uptodate)
5018		set_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags);
 
 
 
5019again:
5020	ret = radix_tree_preload(GFP_NOFS);
5021	if (ret) {
5022		exists = ERR_PTR(ret);
5023		goto free_eb;
5024	}
5025
5026	spin_lock(&fs_info->buffer_lock);
5027	ret = radix_tree_insert(&fs_info->buffer_radix,
5028				start >> PAGE_SHIFT, eb);
5029	spin_unlock(&fs_info->buffer_lock);
5030	radix_tree_preload_end();
5031	if (ret == -EEXIST) {
5032		exists = find_extent_buffer(fs_info, start);
5033		if (exists)
5034			goto free_eb;
 
5035		else
5036			goto again;
5037	}
5038	/* add one reference for the tree */
5039	check_buffer_tree_ref(eb);
5040	set_bit(EXTENT_BUFFER_IN_TREE, &eb->bflags);
5041
5042	/*
5043	 * there is a race where release page may have
5044	 * tried to find this extent buffer in the radix
5045	 * but failed.  It will tell the VM it is safe to
5046	 * reclaim the, and it will clear the page private bit.
5047	 * We must make sure to set the page private bit properly
5048	 * after the extent buffer is in the radix tree so
5049	 * it doesn't get lost
5050	 */
5051	SetPageChecked(eb->pages[0]);
5052	for (i = 1; i < num_pages; i++) {
5053		p = eb->pages[i];
5054		ClearPageChecked(p);
5055		unlock_page(p);
5056	}
5057	unlock_page(eb->pages[0]);
5058	return eb;
5059
5060free_eb:
5061	WARN_ON(!atomic_dec_and_test(&eb->refs));
5062	for (i = 0; i < num_pages; i++) {
5063		if (eb->pages[i])
5064			unlock_page(eb->pages[i]);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
5065	}
 
 
 
 
 
5066
5067	btrfs_release_extent_buffer(eb);
5068	return exists;
 
 
 
5069}
5070
5071static inline void btrfs_release_extent_buffer_rcu(struct rcu_head *head)
5072{
5073	struct extent_buffer *eb =
5074			container_of(head, struct extent_buffer, rcu_head);
5075
5076	__free_extent_buffer(eb);
5077}
5078
5079/* Expects to have eb->eb_lock already held */
5080static int release_extent_buffer(struct extent_buffer *eb)
 
5081{
 
 
5082	WARN_ON(atomic_read(&eb->refs) == 0);
5083	if (atomic_dec_and_test(&eb->refs)) {
5084		if (test_and_clear_bit(EXTENT_BUFFER_IN_TREE, &eb->bflags)) {
5085			struct btrfs_fs_info *fs_info = eb->fs_info;
5086
5087			spin_unlock(&eb->refs_lock);
5088
5089			spin_lock(&fs_info->buffer_lock);
5090			radix_tree_delete(&fs_info->buffer_radix,
5091					  eb->start >> PAGE_SHIFT);
5092			spin_unlock(&fs_info->buffer_lock);
5093		} else {
5094			spin_unlock(&eb->refs_lock);
5095		}
5096
 
5097		/* Should be safe to release our pages at this point */
5098		btrfs_release_extent_buffer_page(eb);
5099#ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS
5100		if (unlikely(test_bit(EXTENT_BUFFER_DUMMY, &eb->bflags))) {
5101			__free_extent_buffer(eb);
5102			return 1;
5103		}
5104#endif
5105		call_rcu(&eb->rcu_head, btrfs_release_extent_buffer_rcu);
5106		return 1;
5107	}
5108	spin_unlock(&eb->refs_lock);
5109
5110	return 0;
5111}
5112
5113void free_extent_buffer(struct extent_buffer *eb)
5114{
5115	int refs;
5116	int old;
5117	if (!eb)
5118		return;
5119
 
5120	while (1) {
5121		refs = atomic_read(&eb->refs);
5122		if (refs <= 3)
 
5123			break;
5124		old = atomic_cmpxchg(&eb->refs, refs, refs - 1);
5125		if (old == refs)
5126			return;
5127	}
5128
5129	spin_lock(&eb->refs_lock);
5130	if (atomic_read(&eb->refs) == 2 &&
5131	    test_bit(EXTENT_BUFFER_DUMMY, &eb->bflags))
5132		atomic_dec(&eb->refs);
5133
5134	if (atomic_read(&eb->refs) == 2 &&
5135	    test_bit(EXTENT_BUFFER_STALE, &eb->bflags) &&
5136	    !extent_buffer_under_io(eb) &&
5137	    test_and_clear_bit(EXTENT_BUFFER_TREE_REF, &eb->bflags))
5138		atomic_dec(&eb->refs);
5139
5140	/*
5141	 * I know this is terrible, but it's temporary until we stop tracking
5142	 * the uptodate bits and such for the extent buffers.
5143	 */
5144	release_extent_buffer(eb);
5145}
5146
5147void free_extent_buffer_stale(struct extent_buffer *eb)
5148{
5149	if (!eb)
5150		return;
5151
5152	spin_lock(&eb->refs_lock);
5153	set_bit(EXTENT_BUFFER_STALE, &eb->bflags);
5154
5155	if (atomic_read(&eb->refs) == 2 && !extent_buffer_under_io(eb) &&
5156	    test_and_clear_bit(EXTENT_BUFFER_TREE_REF, &eb->bflags))
5157		atomic_dec(&eb->refs);
5158	release_extent_buffer(eb);
5159}
5160
5161void clear_extent_buffer_dirty(struct extent_buffer *eb)
5162{
5163	unsigned long i;
5164	unsigned long num_pages;
5165	struct page *page;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
5166
5167	num_pages = num_extent_pages(eb->start, eb->len);
 
5168
5169	for (i = 0; i < num_pages; i++) {
5170		page = eb->pages[i];
5171		if (!PageDirty(page))
5172			continue;
5173
5174		lock_page(page);
5175		WARN_ON(!PagePrivate(page));
 
5176
5177		clear_page_dirty_for_io(page);
5178		xa_lock_irq(&page->mapping->i_pages);
5179		if (!PageDirty(page)) {
5180			radix_tree_tag_clear(&page->mapping->i_pages,
5181						page_index(page),
5182						PAGECACHE_TAG_DIRTY);
5183		}
5184		xa_unlock_irq(&page->mapping->i_pages);
5185		ClearPageError(page);
5186		unlock_page(page);
5187	}
5188	WARN_ON(atomic_read(&eb->refs) == 0);
5189}
5190
5191int set_extent_buffer_dirty(struct extent_buffer *eb)
5192{
5193	unsigned long i;
5194	unsigned long num_pages;
5195	int was_dirty = 0;
5196
5197	check_buffer_tree_ref(eb);
5198
5199	was_dirty = test_and_set_bit(EXTENT_BUFFER_DIRTY, &eb->bflags);
5200
5201	num_pages = num_extent_pages(eb->start, eb->len);
5202	WARN_ON(atomic_read(&eb->refs) == 0);
5203	WARN_ON(!test_bit(EXTENT_BUFFER_TREE_REF, &eb->bflags));
 
5204
5205	for (i = 0; i < num_pages; i++)
5206		set_page_dirty(eb->pages[i]);
5207	return was_dirty;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
5208}
5209
5210void clear_extent_buffer_uptodate(struct extent_buffer *eb)
5211{
5212	unsigned long i;
5213	struct page *page;
5214	unsigned long num_pages;
5215
5216	clear_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags);
5217	num_pages = num_extent_pages(eb->start, eb->len);
5218	for (i = 0; i < num_pages; i++) {
5219		page = eb->pages[i];
5220		if (page)
5221			ClearPageUptodate(page);
 
 
 
 
 
 
 
 
 
 
5222	}
5223}
5224
5225void set_extent_buffer_uptodate(struct extent_buffer *eb)
5226{
5227	unsigned long i;
5228	struct page *page;
5229	unsigned long num_pages;
5230
5231	set_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags);
5232	num_pages = num_extent_pages(eb->start, eb->len);
5233	for (i = 0; i < num_pages; i++) {
5234		page = eb->pages[i];
5235		SetPageUptodate(page);
 
 
 
 
 
 
 
 
5236	}
5237}
5238
5239int read_extent_buffer_pages(struct extent_io_tree *tree,
5240			     struct extent_buffer *eb, int wait, int mirror_num)
5241{
5242	unsigned long i;
5243	struct page *page;
5244	int err;
5245	int ret = 0;
5246	int locked_pages = 0;
5247	int all_uptodate = 1;
5248	unsigned long num_pages;
5249	unsigned long num_reads = 0;
5250	struct bio *bio = NULL;
5251	unsigned long bio_flags = 0;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
5252
5253	if (test_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags))
5254		return 0;
5255
5256	num_pages = num_extent_pages(eb->start, eb->len);
5257	for (i = 0; i < num_pages; i++) {
5258		page = eb->pages[i];
5259		if (wait == WAIT_NONE) {
5260			if (!trylock_page(page))
5261				goto unlock_exit;
5262		} else {
5263			lock_page(page);
5264		}
5265		locked_pages++;
5266	}
5267	/*
5268	 * We need to firstly lock all pages to make sure that
5269	 * the uptodate bit of our pages won't be affected by
5270	 * clear_extent_buffer_uptodate().
5271	 */
5272	for (i = 0; i < num_pages; i++) {
5273		page = eb->pages[i];
5274		if (!PageUptodate(page)) {
5275			num_reads++;
5276			all_uptodate = 0;
5277		}
5278	}
5279
5280	if (all_uptodate) {
5281		set_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags);
5282		goto unlock_exit;
 
 
 
 
 
 
5283	}
5284
5285	clear_bit(EXTENT_BUFFER_READ_ERR, &eb->bflags);
5286	eb->read_mirror = 0;
5287	atomic_set(&eb->io_pages, num_reads);
5288	for (i = 0; i < num_pages; i++) {
5289		page = eb->pages[i];
5290
5291		if (!PageUptodate(page)) {
5292			if (ret) {
5293				atomic_dec(&eb->io_pages);
5294				unlock_page(page);
5295				continue;
5296			}
 
 
 
 
 
 
 
 
 
5297
5298			ClearPageError(page);
5299			err = __extent_read_full_page(tree, page,
5300						      btree_get_extent, &bio,
5301						      mirror_num, &bio_flags,
5302						      REQ_META);
5303			if (err) {
5304				ret = err;
5305				/*
5306				 * We use &bio in above __extent_read_full_page,
5307				 * so we ensure that if it returns error, the
5308				 * current page fails to add itself to bio and
5309				 * it's been unlocked.
5310				 *
5311				 * We must dec io_pages by ourselves.
5312				 */
5313				atomic_dec(&eb->io_pages);
5314			}
5315		} else {
5316			unlock_page(page);
5317		}
5318	}
 
5319
5320	if (bio) {
5321		err = submit_one_bio(bio, mirror_num, bio_flags);
5322		if (err)
5323			return err;
 
5324	}
5325
5326	if (ret || wait != WAIT_COMPLETE)
5327		return ret;
 
 
 
 
 
 
 
 
 
 
 
5328
5329	for (i = 0; i < num_pages; i++) {
5330		page = eb->pages[i];
5331		wait_on_page_locked(page);
5332		if (!PageUptodate(page))
5333			ret = -EIO;
5334	}
 
 
 
 
 
5335
5336	return ret;
 
 
5337
5338unlock_exit:
5339	while (locked_pages > 0) {
5340		locked_pages--;
5341		page = eb->pages[locked_pages];
5342		unlock_page(page);
5343	}
5344	return ret;
5345}
5346
5347void read_extent_buffer(const struct extent_buffer *eb, void *dstv,
5348			unsigned long start, unsigned long len)
5349{
 
5350	size_t cur;
5351	size_t offset;
5352	struct page *page;
5353	char *kaddr;
5354	char *dst = (char *)dstv;
5355	size_t start_offset = eb->start & ((u64)PAGE_SIZE - 1);
5356	unsigned long i = (start_offset + start) >> PAGE_SHIFT;
 
 
 
 
 
 
 
 
5357
5358	if (start + len > eb->len) {
5359		WARN(1, KERN_ERR "btrfs bad mapping eb start %llu len %lu, wanted %lu %lu\n",
5360		     eb->start, eb->len, start, len);
5361		memset(dst, 0, len);
5362		return;
5363	}
5364
5365	offset = (start_offset + start) & (PAGE_SIZE - 1);
5366
5367	while (len > 0) {
5368		page = eb->pages[i];
5369
5370		cur = min(len, (PAGE_SIZE - offset));
5371		kaddr = page_address(page);
5372		memcpy(dst, kaddr + offset, cur);
5373
5374		dst += cur;
5375		len -= cur;
5376		offset = 0;
5377		i++;
5378	}
5379}
5380
5381int read_extent_buffer_to_user(const struct extent_buffer *eb,
5382			       void __user *dstv,
5383			       unsigned long start, unsigned long len)
5384{
 
5385	size_t cur;
5386	size_t offset;
5387	struct page *page;
5388	char *kaddr;
5389	char __user *dst = (char __user *)dstv;
5390	size_t start_offset = eb->start & ((u64)PAGE_SIZE - 1);
5391	unsigned long i = (start_offset + start) >> PAGE_SHIFT;
5392	int ret = 0;
5393
5394	WARN_ON(start > eb->len);
5395	WARN_ON(start + len > eb->start + eb->len);
5396
5397	offset = (start_offset + start) & (PAGE_SIZE - 1);
 
 
 
 
 
 
5398
5399	while (len > 0) {
5400		page = eb->pages[i];
5401
5402		cur = min(len, (PAGE_SIZE - offset));
5403		kaddr = page_address(page);
5404		if (copy_to_user(dst, kaddr + offset, cur)) {
5405			ret = -EFAULT;
5406			break;
5407		}
5408
5409		dst += cur;
5410		len -= cur;
5411		offset = 0;
5412		i++;
5413	}
5414
5415	return ret;
5416}
5417
5418/*
5419 * return 0 if the item is found within a page.
5420 * return 1 if the item spans two pages.
5421 * return -EINVAL otherwise.
5422 */
5423int map_private_extent_buffer(const struct extent_buffer *eb,
5424			      unsigned long start, unsigned long min_len,
5425			      char **map, unsigned long *map_start,
5426			      unsigned long *map_len)
5427{
5428	size_t offset = start & (PAGE_SIZE - 1);
5429	char *kaddr;
5430	struct page *p;
5431	size_t start_offset = eb->start & ((u64)PAGE_SIZE - 1);
5432	unsigned long i = (start_offset + start) >> PAGE_SHIFT;
5433	unsigned long end_i = (start_offset + start + min_len - 1) >>
5434		PAGE_SHIFT;
5435
5436	if (start + min_len > eb->len) {
5437		WARN(1, KERN_ERR "btrfs bad mapping eb start %llu len %lu, wanted %lu %lu\n",
5438		       eb->start, eb->len, start, min_len);
5439		return -EINVAL;
5440	}
5441
5442	if (i != end_i)
5443		return 1;
5444
5445	if (i == 0) {
5446		offset = start_offset;
5447		*map_start = 0;
5448	} else {
5449		offset = 0;
5450		*map_start = ((u64)i << PAGE_SHIFT) - start_offset;
5451	}
5452
5453	p = eb->pages[i];
5454	kaddr = page_address(p);
5455	*map = kaddr + offset;
5456	*map_len = PAGE_SIZE - offset;
5457	return 0;
5458}
5459
5460int memcmp_extent_buffer(const struct extent_buffer *eb, const void *ptrv,
5461			 unsigned long start, unsigned long len)
5462{
 
5463	size_t cur;
5464	size_t offset;
5465	struct page *page;
5466	char *kaddr;
5467	char *ptr = (char *)ptrv;
5468	size_t start_offset = eb->start & ((u64)PAGE_SIZE - 1);
5469	unsigned long i = (start_offset + start) >> PAGE_SHIFT;
5470	int ret = 0;
5471
5472	WARN_ON(start > eb->len);
5473	WARN_ON(start + len > eb->start + eb->len);
 
 
 
5474
5475	offset = (start_offset + start) & (PAGE_SIZE - 1);
5476
5477	while (len > 0) {
5478		page = eb->pages[i];
5479
5480		cur = min(len, (PAGE_SIZE - offset));
5481
5482		kaddr = page_address(page);
5483		ret = memcmp(ptr, kaddr + offset, cur);
5484		if (ret)
5485			break;
5486
5487		ptr += cur;
5488		len -= cur;
5489		offset = 0;
5490		i++;
5491	}
5492	return ret;
5493}
5494
5495void write_extent_buffer_chunk_tree_uuid(struct extent_buffer *eb,
5496		const void *srcv)
 
 
 
 
 
5497{
5498	char *kaddr;
 
5499
5500	WARN_ON(!PageUptodate(eb->pages[0]));
5501	kaddr = page_address(eb->pages[0]);
5502	memcpy(kaddr + offsetof(struct btrfs_header, chunk_tree_uuid), srcv,
5503			BTRFS_FSID_SIZE);
5504}
5505
5506void write_extent_buffer_fsid(struct extent_buffer *eb, const void *srcv)
5507{
5508	char *kaddr;
 
 
 
 
 
 
 
5509
5510	WARN_ON(!PageUptodate(eb->pages[0]));
5511	kaddr = page_address(eb->pages[0]);
5512	memcpy(kaddr + offsetof(struct btrfs_header, fsid), srcv,
5513			BTRFS_FSID_SIZE);
 
 
 
 
 
5514}
5515
5516void write_extent_buffer(struct extent_buffer *eb, const void *srcv,
5517			 unsigned long start, unsigned long len)
 
5518{
 
5519	size_t cur;
5520	size_t offset;
5521	struct page *page;
5522	char *kaddr;
5523	char *src = (char *)srcv;
5524	size_t start_offset = eb->start & ((u64)PAGE_SIZE - 1);
5525	unsigned long i = (start_offset + start) >> PAGE_SHIFT;
 
 
 
 
5526
5527	WARN_ON(start > eb->len);
5528	WARN_ON(start + len > eb->start + eb->len);
 
 
 
 
 
5529
5530	offset = (start_offset + start) & (PAGE_SIZE - 1);
5531
5532	while (len > 0) {
5533		page = eb->pages[i];
5534		WARN_ON(!PageUptodate(page));
5535
5536		cur = min(len, PAGE_SIZE - offset);
5537		kaddr = page_address(page);
5538		memcpy(kaddr + offset, src, cur);
 
 
 
5539
5540		src += cur;
5541		len -= cur;
5542		offset = 0;
5543		i++;
5544	}
5545}
5546
5547void memzero_extent_buffer(struct extent_buffer *eb, unsigned long start,
5548		unsigned long len)
5549{
5550	size_t cur;
5551	size_t offset;
5552	struct page *page;
5553	char *kaddr;
5554	size_t start_offset = eb->start & ((u64)PAGE_SIZE - 1);
5555	unsigned long i = (start_offset + start) >> PAGE_SHIFT;
5556
5557	WARN_ON(start > eb->len);
5558	WARN_ON(start + len > eb->start + eb->len);
 
 
 
5559
5560	offset = (start_offset + start) & (PAGE_SIZE - 1);
 
 
 
5561
5562	while (len > 0) {
5563		page = eb->pages[i];
5564		WARN_ON(!PageUptodate(page));
 
5565
5566		cur = min(len, PAGE_SIZE - offset);
5567		kaddr = page_address(page);
5568		memset(kaddr + offset, 0, cur);
5569
5570		len -= cur;
5571		offset = 0;
5572		i++;
5573	}
5574}
5575
5576void copy_extent_buffer_full(struct extent_buffer *dst,
5577			     struct extent_buffer *src)
 
 
 
 
 
 
 
 
5578{
5579	int i;
5580	unsigned num_pages;
5581
5582	ASSERT(dst->len == src->len);
5583
5584	num_pages = num_extent_pages(dst->start, dst->len);
5585	for (i = 0; i < num_pages; i++)
5586		copy_page(page_address(dst->pages[i]),
5587				page_address(src->pages[i]));
 
 
 
 
 
 
5588}
5589
5590void copy_extent_buffer(struct extent_buffer *dst, struct extent_buffer *src,
 
5591			unsigned long dst_offset, unsigned long src_offset,
5592			unsigned long len)
5593{
 
5594	u64 dst_len = dst->len;
5595	size_t cur;
5596	size_t offset;
5597	struct page *page;
5598	char *kaddr;
5599	size_t start_offset = dst->start & ((u64)PAGE_SIZE - 1);
5600	unsigned long i = (start_offset + dst_offset) >> PAGE_SHIFT;
 
 
 
5601
5602	WARN_ON(src->len != dst_len);
5603
5604	offset = (start_offset + dst_offset) &
5605		(PAGE_SIZE - 1);
5606
5607	while (len > 0) {
5608		page = dst->pages[i];
5609		WARN_ON(!PageUptodate(page));
5610
5611		cur = min(len, (unsigned long)(PAGE_SIZE - offset));
5612
5613		kaddr = page_address(page);
5614		read_extent_buffer(src, kaddr + offset, src_offset, cur);
5615
5616		src_offset += cur;
5617		len -= cur;
5618		offset = 0;
5619		i++;
5620	}
5621}
5622
5623void le_bitmap_set(u8 *map, unsigned int start, int len)
5624{
5625	u8 *p = map + BIT_BYTE(start);
5626	const unsigned int size = start + len;
5627	int bits_to_set = BITS_PER_BYTE - (start % BITS_PER_BYTE);
5628	u8 mask_to_set = BITMAP_FIRST_BYTE_MASK(start);
5629
5630	while (len - bits_to_set >= 0) {
5631		*p |= mask_to_set;
5632		len -= bits_to_set;
5633		bits_to_set = BITS_PER_BYTE;
5634		mask_to_set = ~0;
5635		p++;
5636	}
5637	if (len) {
5638		mask_to_set &= BITMAP_LAST_BYTE_MASK(size);
5639		*p |= mask_to_set;
5640	}
5641}
5642
5643void le_bitmap_clear(u8 *map, unsigned int start, int len)
5644{
5645	u8 *p = map + BIT_BYTE(start);
5646	const unsigned int size = start + len;
5647	int bits_to_clear = BITS_PER_BYTE - (start % BITS_PER_BYTE);
5648	u8 mask_to_clear = BITMAP_FIRST_BYTE_MASK(start);
5649
5650	while (len - bits_to_clear >= 0) {
5651		*p &= ~mask_to_clear;
5652		len -= bits_to_clear;
5653		bits_to_clear = BITS_PER_BYTE;
5654		mask_to_clear = ~0;
5655		p++;
5656	}
5657	if (len) {
5658		mask_to_clear &= BITMAP_LAST_BYTE_MASK(size);
5659		*p &= ~mask_to_clear;
5660	}
5661}
5662
5663/*
5664 * eb_bitmap_offset() - calculate the page and offset of the byte containing the
5665 * given bit number
5666 * @eb: the extent buffer
5667 * @start: offset of the bitmap item in the extent buffer
5668 * @nr: bit number
5669 * @page_index: return index of the page in the extent buffer that contains the
5670 * given bit number
5671 * @page_offset: return offset into the page given by page_index
5672 *
5673 * This helper hides the ugliness of finding the byte in an extent buffer which
5674 * contains a given bit.
5675 */
5676static inline void eb_bitmap_offset(struct extent_buffer *eb,
5677				    unsigned long start, unsigned long nr,
5678				    unsigned long *page_index,
5679				    size_t *page_offset)
5680{
5681	size_t start_offset = eb->start & ((u64)PAGE_SIZE - 1);
5682	size_t byte_offset = BIT_BYTE(nr);
5683	size_t offset;
5684
5685	/*
5686	 * The byte we want is the offset of the extent buffer + the offset of
5687	 * the bitmap item in the extent buffer + the offset of the byte in the
5688	 * bitmap item.
5689	 */
5690	offset = start_offset + start + byte_offset;
5691
5692	*page_index = offset >> PAGE_SHIFT;
5693	*page_offset = offset & (PAGE_SIZE - 1);
5694}
5695
5696/**
5697 * extent_buffer_test_bit - determine whether a bit in a bitmap item is set
5698 * @eb: the extent buffer
5699 * @start: offset of the bitmap item in the extent buffer
5700 * @nr: bit number to test
 
5701 */
5702int extent_buffer_test_bit(struct extent_buffer *eb, unsigned long start,
5703			   unsigned long nr)
5704{
5705	u8 *kaddr;
5706	struct page *page;
5707	unsigned long i;
5708	size_t offset;
 
5709
5710	eb_bitmap_offset(eb, start, nr, &i, &offset);
5711	page = eb->pages[i];
5712	WARN_ON(!PageUptodate(page));
5713	kaddr = page_address(page);
5714	return 1U & (kaddr[offset] >> (nr & (BITS_PER_BYTE - 1)));
5715}
5716
5717/**
5718 * extent_buffer_bitmap_set - set an area of a bitmap
5719 * @eb: the extent buffer
5720 * @start: offset of the bitmap item in the extent buffer
5721 * @pos: bit number of the first bit
5722 * @len: number of bits to set
 
 
 
 
 
 
 
 
 
 
5723 */
5724void extent_buffer_bitmap_set(struct extent_buffer *eb, unsigned long start,
5725			      unsigned long pos, unsigned long len)
5726{
 
 
 
 
5727	u8 *kaddr;
5728	struct page *page;
5729	unsigned long i;
5730	size_t offset;
5731	const unsigned int size = pos + len;
5732	int bits_to_set = BITS_PER_BYTE - (pos % BITS_PER_BYTE);
5733	u8 mask_to_set = BITMAP_FIRST_BYTE_MASK(pos);
5734
5735	eb_bitmap_offset(eb, start, pos, &i, &offset);
5736	page = eb->pages[i];
5737	WARN_ON(!PageUptodate(page));
5738	kaddr = page_address(page);
5739
5740	while (len >= bits_to_set) {
5741		kaddr[offset] |= mask_to_set;
5742		len -= bits_to_set;
5743		bits_to_set = BITS_PER_BYTE;
5744		mask_to_set = ~0;
5745		if (++offset >= PAGE_SIZE && len > 0) {
5746			offset = 0;
5747			page = eb->pages[++i];
5748			WARN_ON(!PageUptodate(page));
5749			kaddr = page_address(page);
5750		}
5751	}
5752	if (len) {
5753		mask_to_set &= BITMAP_LAST_BYTE_MASK(size);
5754		kaddr[offset] |= mask_to_set;
5755	}
5756}
5757
5758
5759/**
5760 * extent_buffer_bitmap_clear - clear an area of a bitmap
5761 * @eb: the extent buffer
5762 * @start: offset of the bitmap item in the extent buffer
5763 * @pos: bit number of the first bit
5764 * @len: number of bits to clear
5765 */
5766void extent_buffer_bitmap_clear(struct extent_buffer *eb, unsigned long start,
5767				unsigned long pos, unsigned long len)
5768{
 
 
 
 
 
 
5769	u8 *kaddr;
5770	struct page *page;
5771	unsigned long i;
5772	size_t offset;
5773	const unsigned int size = pos + len;
5774	int bits_to_clear = BITS_PER_BYTE - (pos % BITS_PER_BYTE);
5775	u8 mask_to_clear = BITMAP_FIRST_BYTE_MASK(pos);
5776
5777	eb_bitmap_offset(eb, start, pos, &i, &offset);
5778	page = eb->pages[i];
5779	WARN_ON(!PageUptodate(page));
5780	kaddr = page_address(page);
5781
5782	while (len >= bits_to_clear) {
5783		kaddr[offset] &= ~mask_to_clear;
5784		len -= bits_to_clear;
5785		bits_to_clear = BITS_PER_BYTE;
5786		mask_to_clear = ~0;
5787		if (++offset >= PAGE_SIZE && len > 0) {
5788			offset = 0;
5789			page = eb->pages[++i];
5790			WARN_ON(!PageUptodate(page));
5791			kaddr = page_address(page);
5792		}
5793	}
5794	if (len) {
5795		mask_to_clear &= BITMAP_LAST_BYTE_MASK(size);
5796		kaddr[offset] &= ~mask_to_clear;
5797	}
5798}
5799
5800static inline bool areas_overlap(unsigned long src, unsigned long dst, unsigned long len)
5801{
5802	unsigned long distance = (src > dst) ? src - dst : dst - src;
5803	return distance < len;
5804}
5805
5806static void copy_pages(struct page *dst_page, struct page *src_page,
5807		       unsigned long dst_off, unsigned long src_off,
5808		       unsigned long len)
5809{
5810	char *dst_kaddr = page_address(dst_page);
5811	char *src_kaddr;
5812	int must_memmove = 0;
5813
5814	if (dst_page != src_page) {
5815		src_kaddr = page_address(src_page);
5816	} else {
5817		src_kaddr = dst_kaddr;
5818		if (areas_overlap(src_off, dst_off, len))
5819			must_memmove = 1;
5820	}
5821
5822	if (must_memmove)
5823		memmove(dst_kaddr + dst_off, src_kaddr + src_off, len);
5824	else
5825		memcpy(dst_kaddr + dst_off, src_kaddr + src_off, len);
5826}
5827
5828void memcpy_extent_buffer(struct extent_buffer *dst, unsigned long dst_offset,
5829			   unsigned long src_offset, unsigned long len)
5830{
5831	struct btrfs_fs_info *fs_info = dst->fs_info;
5832	size_t cur;
5833	size_t dst_off_in_page;
5834	size_t src_off_in_page;
5835	size_t start_offset = dst->start & ((u64)PAGE_SIZE - 1);
5836	unsigned long dst_i;
5837	unsigned long src_i;
5838
5839	if (src_offset + len > dst->len) {
5840		btrfs_err(fs_info,
5841			"memmove bogus src_offset %lu move len %lu dst len %lu",
5842			 src_offset, len, dst->len);
5843		BUG_ON(1);
5844	}
5845	if (dst_offset + len > dst->len) {
5846		btrfs_err(fs_info,
5847			"memmove bogus dst_offset %lu move len %lu dst len %lu",
5848			 dst_offset, len, dst->len);
5849		BUG_ON(1);
5850	}
5851
5852	while (len > 0) {
5853		dst_off_in_page = (start_offset + dst_offset) &
5854			(PAGE_SIZE - 1);
5855		src_off_in_page = (start_offset + src_offset) &
5856			(PAGE_SIZE - 1);
5857
5858		dst_i = (start_offset + dst_offset) >> PAGE_SHIFT;
5859		src_i = (start_offset + src_offset) >> PAGE_SHIFT;
5860
5861		cur = min(len, (unsigned long)(PAGE_SIZE -
5862					       src_off_in_page));
5863		cur = min_t(unsigned long, cur,
5864			(unsigned long)(PAGE_SIZE - dst_off_in_page));
5865
5866		copy_pages(dst->pages[dst_i], dst->pages[src_i],
5867			   dst_off_in_page, src_off_in_page, cur);
5868
5869		src_offset += cur;
5870		dst_offset += cur;
5871		len -= cur;
 
 
 
 
 
 
 
 
 
 
5872	}
5873}
5874
5875void memmove_extent_buffer(struct extent_buffer *dst, unsigned long dst_offset,
5876			   unsigned long src_offset, unsigned long len)
 
5877{
5878	struct btrfs_fs_info *fs_info = dst->fs_info;
5879	size_t cur;
5880	size_t dst_off_in_page;
5881	size_t src_off_in_page;
5882	unsigned long dst_end = dst_offset + len - 1;
5883	unsigned long src_end = src_offset + len - 1;
5884	size_t start_offset = dst->start & ((u64)PAGE_SIZE - 1);
5885	unsigned long dst_i;
5886	unsigned long src_i;
5887
5888	if (src_offset + len > dst->len) {
5889		btrfs_err(fs_info,
5890			  "memmove bogus src_offset %lu move len %lu len %lu",
5891			  src_offset, len, dst->len);
5892		BUG_ON(1);
5893	}
5894	if (dst_offset + len > dst->len) {
5895		btrfs_err(fs_info,
5896			  "memmove bogus dst_offset %lu move len %lu len %lu",
5897			  dst_offset, len, dst->len);
5898		BUG_ON(1);
5899	}
5900	if (dst_offset < src_offset) {
5901		memcpy_extent_buffer(dst, dst_offset, src_offset, len);
5902		return;
5903	}
 
 
 
 
 
 
5904	while (len > 0) {
5905		dst_i = (start_offset + dst_end) >> PAGE_SHIFT;
5906		src_i = (start_offset + src_end) >> PAGE_SHIFT;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
5907
5908		dst_off_in_page = (start_offset + dst_end) &
5909			(PAGE_SIZE - 1);
5910		src_off_in_page = (start_offset + src_end) &
5911			(PAGE_SIZE - 1);
5912
5913		cur = min_t(unsigned long, len, src_off_in_page + 1);
5914		cur = min(cur, dst_off_in_page + 1);
5915		copy_pages(dst->pages[dst_i], dst->pages[src_i],
5916			   dst_off_in_page - cur + 1,
5917			   src_off_in_page - cur + 1, cur);
5918
5919		dst_end -= cur;
5920		src_end -= cur;
5921		len -= cur;
5922	}
5923}
5924
5925int try_release_extent_buffer(struct page *page)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
5926{
5927	struct extent_buffer *eb;
5928
 
 
 
5929	/*
5930	 * We need to make sure nobody is attaching this page to an eb right
5931	 * now.
5932	 */
5933	spin_lock(&page->mapping->private_lock);
5934	if (!PagePrivate(page)) {
5935		spin_unlock(&page->mapping->private_lock);
5936		return 1;
5937	}
5938
5939	eb = (struct extent_buffer *)page->private;
5940	BUG_ON(!eb);
5941
5942	/*
5943	 * This is a little awful but should be ok, we need to make sure that
5944	 * the eb doesn't disappear out from under us while we're looking at
5945	 * this page.
5946	 */
5947	spin_lock(&eb->refs_lock);
5948	if (atomic_read(&eb->refs) != 1 || extent_buffer_under_io(eb)) {
5949		spin_unlock(&eb->refs_lock);
5950		spin_unlock(&page->mapping->private_lock);
5951		return 0;
5952	}
5953	spin_unlock(&page->mapping->private_lock);
5954
5955	/*
5956	 * If tree ref isn't set then we know the ref on this eb is a real ref,
5957	 * so just return, this page will likely be freed soon anyway.
5958	 */
5959	if (!test_and_clear_bit(EXTENT_BUFFER_TREE_REF, &eb->bflags)) {
5960		spin_unlock(&eb->refs_lock);
5961		return 0;
5962	}
5963
5964	return release_extent_buffer(eb);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
5965}