Linux Audio

Check our new training course

Loading...
v6.2
   1// SPDX-License-Identifier: GPL-2.0
   2
   3#include <linux/bitops.h>
   4#include <linux/slab.h>
   5#include <linux/bio.h>
   6#include <linux/mm.h>
   7#include <linux/pagemap.h>
   8#include <linux/page-flags.h>
   9#include <linux/sched/mm.h>
  10#include <linux/spinlock.h>
  11#include <linux/blkdev.h>
  12#include <linux/swap.h>
  13#include <linux/writeback.h>
  14#include <linux/pagevec.h>
  15#include <linux/prefetch.h>
  16#include <linux/fsverity.h>
  17#include "misc.h"
  18#include "extent_io.h"
  19#include "extent-io-tree.h"
  20#include "extent_map.h"
  21#include "ctree.h"
  22#include "btrfs_inode.h"
  23#include "bio.h"
  24#include "check-integrity.h"
  25#include "locking.h"
  26#include "rcu-string.h"
  27#include "backref.h"
  28#include "disk-io.h"
  29#include "subpage.h"
  30#include "zoned.h"
  31#include "block-group.h"
  32#include "compression.h"
  33#include "fs.h"
  34#include "accessors.h"
  35#include "file-item.h"
  36#include "file.h"
  37#include "dev-replace.h"
  38#include "super.h"
  39
 
  40static struct kmem_cache *extent_buffer_cache;
 
 
 
 
 
 
  41
  42#ifdef CONFIG_BTRFS_DEBUG
  43static inline void btrfs_leak_debug_add_eb(struct extent_buffer *eb)
 
 
 
 
 
 
  44{
  45	struct btrfs_fs_info *fs_info = eb->fs_info;
  46	unsigned long flags;
  47
  48	spin_lock_irqsave(&fs_info->eb_leak_lock, flags);
  49	list_add(&eb->leak_list, &fs_info->allocated_ebs);
  50	spin_unlock_irqrestore(&fs_info->eb_leak_lock, flags);
  51}
  52
  53static inline void btrfs_leak_debug_del_eb(struct extent_buffer *eb)
 
  54{
  55	struct btrfs_fs_info *fs_info = eb->fs_info;
  56	unsigned long flags;
  57
  58	spin_lock_irqsave(&fs_info->eb_leak_lock, flags);
  59	list_del(&eb->leak_list);
  60	spin_unlock_irqrestore(&fs_info->eb_leak_lock, flags);
  61}
  62
  63void btrfs_extent_buffer_leak_debug_check(struct btrfs_fs_info *fs_info)
 
  64{
 
  65	struct extent_buffer *eb;
  66	unsigned long flags;
  67
  68	/*
  69	 * If we didn't get into open_ctree our allocated_ebs will not be
  70	 * initialized, so just skip this.
  71	 */
  72	if (!fs_info->allocated_ebs.next)
  73		return;
 
 
 
  74
  75	WARN_ON(!list_empty(&fs_info->allocated_ebs));
  76	spin_lock_irqsave(&fs_info->eb_leak_lock, flags);
  77	while (!list_empty(&fs_info->allocated_ebs)) {
  78		eb = list_first_entry(&fs_info->allocated_ebs,
  79				      struct extent_buffer, leak_list);
  80		pr_err(
  81	"BTRFS: buffer leak start %llu len %lu refs %d bflags %lu owner %llu\n",
  82		       eb->start, eb->len, atomic_read(&eb->refs), eb->bflags,
  83		       btrfs_header_owner(eb));
  84		list_del(&eb->leak_list);
  85		kmem_cache_free(extent_buffer_cache, eb);
  86	}
  87	spin_unlock_irqrestore(&fs_info->eb_leak_lock, flags);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
  88}
  89#else
  90#define btrfs_leak_debug_add_eb(eb)			do {} while (0)
  91#define btrfs_leak_debug_del_eb(eb)			do {} while (0)
 
 
  92#endif
  93
  94/*
  95 * Structure to record info about the bio being assembled, and other info like
  96 * how many bytes are there before stripe/ordered extent boundary.
  97 */
  98struct btrfs_bio_ctrl {
 
 
 
 
  99	struct bio *bio;
 100	int mirror_num;
 101	enum btrfs_compression_type compress_type;
 102	u32 len_to_stripe_boundary;
 103	u32 len_to_oe_boundary;
 104	btrfs_bio_end_io_t end_io_func;
 105
 106	/*
 107	 * This is for metadata read, to provide the extra needed verification
 108	 * info.  This has to be provided for submit_one_bio(), as
 109	 * submit_one_bio() can submit a bio if it ends at stripe boundary.  If
 110	 * no such parent_check is provided, the metadata can hit false alert at
 111	 * endio time.
 112	 */
 113	struct btrfs_tree_parent_check *parent_check;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 114
 115	/*
 116	 * Tell writepage not to lock the state bits for this range, it still
 117	 * does the unlocking.
 118	 */
 119	bool extent_locked;
 
 
 
 
 
 120
 121	/* Tell the submit_bio code to use REQ_SYNC */
 122	bool sync_io;
 123};
 
 
 
 
 
 
 124
 125static void submit_one_bio(struct btrfs_bio_ctrl *bio_ctrl)
 126{
 127	struct bio *bio;
 128	struct bio_vec *bv;
 129	struct btrfs_inode *inode;
 130	int mirror_num;
 
 
 
 
 
 
 
 
 
 
 131
 132	if (!bio_ctrl->bio)
 
 
 133		return;
 
 
 
 
 
 
 
 134
 135	bio = bio_ctrl->bio;
 136	bv = bio_first_bvec_all(bio);
 137	inode = BTRFS_I(bv->bv_page->mapping->host);
 138	mirror_num = bio_ctrl->mirror_num;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 139
 140	/* Caller should ensure the bio has at least some range added */
 141	ASSERT(bio->bi_iter.bi_size);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 142
 143	btrfs_bio(bio)->file_offset = page_offset(bv->bv_page) + bv->bv_offset;
 
 
 
 144
 145	if (!is_data_inode(&inode->vfs_inode)) {
 146		if (btrfs_op(bio) != BTRFS_MAP_WRITE) {
 147			/*
 148			 * For metadata read, we should have the parent_check,
 149			 * and copy it to bbio for metadata verification.
 150			 */
 151			ASSERT(bio_ctrl->parent_check);
 152			memcpy(&btrfs_bio(bio)->parent_check,
 153			       bio_ctrl->parent_check,
 154			       sizeof(struct btrfs_tree_parent_check));
 155		}
 156		btrfs_submit_metadata_bio(inode, bio, mirror_num);
 157	} else if (btrfs_op(bio) == BTRFS_MAP_WRITE) {
 158		btrfs_submit_data_write_bio(inode, bio, mirror_num);
 159	} else {
 160		btrfs_submit_data_read_bio(inode, bio, mirror_num,
 161					   bio_ctrl->compress_type);
 162	}
 163
 164	/* The bio is owned by the end_io handler now */
 165	bio_ctrl->bio = NULL;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 166}
 167
 168/*
 169 * Submit or fail the current bio in the bio_ctrl structure.
 
 
 
 
 
 
 170 */
 171static void submit_write_bio(struct btrfs_bio_ctrl *bio_ctrl, int ret)
 
 172{
 173	struct bio *bio = bio_ctrl->bio;
 
 174
 175	if (!bio)
 176		return;
 177
 178	if (ret) {
 179		ASSERT(ret < 0);
 180		btrfs_bio_end_io(btrfs_bio(bio), errno_to_blk_status(ret));
 181		/* The bio is owned by the end_io handler now */
 182		bio_ctrl->bio = NULL;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 183	} else {
 184		submit_one_bio(bio_ctrl);
 
 185	}
 
 186}
 187
 188int __init extent_buffer_init_cachep(void)
 
 189{
 190	extent_buffer_cache = kmem_cache_create("btrfs_extent_buffer",
 191			sizeof(struct extent_buffer), 0,
 192			SLAB_MEM_SPREAD, NULL);
 193	if (!extent_buffer_cache)
 194		return -ENOMEM;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 195
 196	return 0;
 
 
 
 
 
 
 
 
 197}
 198
 199void __cold extent_buffer_free_cachep(void)
 
 
 
 200{
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 201	/*
 202	 * Make sure all delayed rcu free are flushed before we
 203	 * destroy caches.
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 204	 */
 205	rcu_barrier();
 206	kmem_cache_destroy(extent_buffer_cache);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 207}
 208
 209void extent_range_clear_dirty_for_io(struct inode *inode, u64 start, u64 end)
 210{
 211	unsigned long index = start >> PAGE_SHIFT;
 212	unsigned long end_index = end >> PAGE_SHIFT;
 213	struct page *page;
 214
 215	while (index <= end_index) {
 216		page = find_get_page(inode->i_mapping, index);
 217		BUG_ON(!page); /* Pages should be in the extent_io_tree */
 218		clear_page_dirty_for_io(page);
 219		put_page(page);
 220		index++;
 221	}
 222}
 223
 224void extent_range_redirty_for_io(struct inode *inode, u64 start, u64 end)
 225{
 226	struct address_space *mapping = inode->i_mapping;
 227	unsigned long index = start >> PAGE_SHIFT;
 228	unsigned long end_index = end >> PAGE_SHIFT;
 229	struct folio *folio;
 230
 231	while (index <= end_index) {
 232		folio = filemap_get_folio(mapping, index);
 233		filemap_dirty_folio(mapping, folio);
 234		folio_account_redirty(folio);
 235		index += folio_nr_pages(folio);
 236		folio_put(folio);
 
 237	}
 238}
 239
 240/*
 241 * Process one page for __process_pages_contig().
 242 *
 243 * Return >0 if we hit @page == @locked_page.
 244 * Return 0 if we updated the page status.
 245 * Return -EGAIN if the we need to try again.
 246 * (For PAGE_LOCK case but got dirty page or page not belong to mapping)
 247 */
 248static int process_one_page(struct btrfs_fs_info *fs_info,
 249			    struct address_space *mapping,
 250			    struct page *page, struct page *locked_page,
 251			    unsigned long page_ops, u64 start, u64 end)
 252{
 253	u32 len;
 254
 255	ASSERT(end + 1 - start != 0 && end + 1 - start < U32_MAX);
 256	len = end + 1 - start;
 257
 258	if (page_ops & PAGE_SET_ORDERED)
 259		btrfs_page_clamp_set_ordered(fs_info, page, start, len);
 260	if (page_ops & PAGE_SET_ERROR)
 261		btrfs_page_clamp_set_error(fs_info, page, start, len);
 262	if (page_ops & PAGE_START_WRITEBACK) {
 263		btrfs_page_clamp_clear_dirty(fs_info, page, start, len);
 264		btrfs_page_clamp_set_writeback(fs_info, page, start, len);
 265	}
 266	if (page_ops & PAGE_END_WRITEBACK)
 267		btrfs_page_clamp_clear_writeback(fs_info, page, start, len);
 268
 269	if (page == locked_page)
 270		return 1;
 
 
 
 
 
 
 
 
 271
 272	if (page_ops & PAGE_LOCK) {
 273		int ret;
 
 
 
 
 
 274
 275		ret = btrfs_page_start_writer_lock(fs_info, page, start, len);
 276		if (ret)
 277			return ret;
 278		if (!PageDirty(page) || page->mapping != mapping) {
 279			btrfs_page_end_writer_lock(fs_info, page, start, len);
 280			return -EAGAIN;
 281		}
 
 282	}
 283	if (page_ops & PAGE_UNLOCK)
 284		btrfs_page_end_writer_lock(fs_info, page, start, len);
 285	return 0;
 286}
 287
 288static int __process_pages_contig(struct address_space *mapping,
 289				  struct page *locked_page,
 290				  u64 start, u64 end, unsigned long page_ops,
 291				  u64 *processed_end)
 292{
 293	struct btrfs_fs_info *fs_info = btrfs_sb(mapping->host->i_sb);
 294	pgoff_t start_index = start >> PAGE_SHIFT;
 295	pgoff_t end_index = end >> PAGE_SHIFT;
 296	pgoff_t index = start_index;
 297	unsigned long pages_processed = 0;
 298	struct folio_batch fbatch;
 299	int err = 0;
 300	int i;
 
 301
 302	if (page_ops & PAGE_LOCK) {
 303		ASSERT(page_ops == PAGE_LOCK);
 304		ASSERT(processed_end && *processed_end == start);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 305	}
 306
 307	if ((page_ops & PAGE_SET_ERROR) && start_index <= end_index)
 308		mapping_set_error(mapping, -EIO);
 
 
 
 
 
 
 
 
 
 
 309
 310	folio_batch_init(&fbatch);
 311	while (index <= end_index) {
 312		int found_folios;
 
 
 
 
 
 
 
 
 
 
 
 
 313
 314		found_folios = filemap_get_folios_contig(mapping, &index,
 315				end_index, &fbatch);
 316
 317		if (found_folios == 0) {
 318			/*
 319			 * Only if we're going to lock these pages, we can find
 320			 * nothing at @index.
 321			 */
 322			ASSERT(page_ops & PAGE_LOCK);
 323			err = -EAGAIN;
 
 
 
 
 
 
 
 
 324			goto out;
 325		}
 326
 327		for (i = 0; i < found_folios; i++) {
 328			int process_ret;
 329			struct folio *folio = fbatch.folios[i];
 330			process_ret = process_one_page(fs_info, mapping,
 331					&folio->page, locked_page, page_ops,
 332					start, end);
 333			if (process_ret < 0) {
 334				err = -EAGAIN;
 335				folio_batch_release(&fbatch);
 336				goto out;
 337			}
 338			pages_processed += folio_nr_pages(folio);
 339		}
 340		folio_batch_release(&fbatch);
 341		cond_resched();
 
 
 
 
 
 
 
 
 
 
 
 
 342	}
 343out:
 344	if (err && processed_end) {
 345		/*
 346		 * Update @processed_end. I know this is awful since it has
 347		 * two different return value patterns (inclusive vs exclusive).
 348		 *
 349		 * But the exclusive pattern is necessary if @start is 0, or we
 350		 * underflow and check against processed_end won't work as
 351		 * expected.
 352		 */
 353		if (pages_processed)
 354			*processed_end = min(end,
 355			((u64)(start_index + pages_processed) << PAGE_SHIFT) - 1);
 356		else
 357			*processed_end = start;
 358	}
 359	return err;
 360}
 361
 362static noinline void __unlock_for_delalloc(struct inode *inode,
 363					   struct page *locked_page,
 364					   u64 start, u64 end)
 365{
 
 
 366	unsigned long index = start >> PAGE_SHIFT;
 367	unsigned long end_index = end >> PAGE_SHIFT;
 
 
 368
 369	ASSERT(locked_page);
 370	if (index == locked_page->index && end_index == index)
 371		return;
 372
 373	__process_pages_contig(inode->i_mapping, locked_page, start, end,
 374			       PAGE_UNLOCK, NULL);
 
 
 
 
 
 
 
 
 
 
 
 375}
 376
 377static noinline int lock_delalloc_pages(struct inode *inode,
 378					struct page *locked_page,
 379					u64 delalloc_start,
 380					u64 delalloc_end)
 381{
 382	unsigned long index = delalloc_start >> PAGE_SHIFT;
 
 383	unsigned long end_index = delalloc_end >> PAGE_SHIFT;
 384	u64 processed_end = delalloc_start;
 
 
 385	int ret;
 
 386
 387	ASSERT(locked_page);
 388	if (index == locked_page->index && index == end_index)
 389		return 0;
 390
 391	ret = __process_pages_contig(inode->i_mapping, locked_page, delalloc_start,
 392				     delalloc_end, PAGE_LOCK, &processed_end);
 393	if (ret == -EAGAIN && processed_end > delalloc_start)
 394		__unlock_for_delalloc(inode, locked_page, delalloc_start,
 395				      processed_end);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 396	return ret;
 397}
 398
 399/*
 400 * Find and lock a contiguous range of bytes in the file marked as delalloc, no
 401 * more than @max_bytes.
 402 *
 403 * @start:	The original start bytenr to search.
 404 *		Will store the extent range start bytenr.
 405 * @end:	The original end bytenr of the search range
 406 *		Will store the extent range end bytenr.
 407 *
 408 * Return true if we find a delalloc range which starts inside the original
 409 * range, and @start/@end will store the delalloc range start/end.
 410 *
 411 * Return false if we can't find any delalloc range which starts inside the
 412 * original range, and @start/@end will be the non-delalloc range start/end.
 413 */
 414EXPORT_FOR_TESTS
 415noinline_for_stack bool find_lock_delalloc_range(struct inode *inode,
 416				    struct page *locked_page, u64 *start,
 417				    u64 *end)
 418{
 419	struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
 420	struct extent_io_tree *tree = &BTRFS_I(inode)->io_tree;
 421	const u64 orig_start = *start;
 422	const u64 orig_end = *end;
 423	/* The sanity tests may not set a valid fs_info. */
 424	u64 max_bytes = fs_info ? fs_info->max_extent_size : BTRFS_MAX_EXTENT_SIZE;
 425	u64 delalloc_start;
 426	u64 delalloc_end;
 427	bool found;
 428	struct extent_state *cached_state = NULL;
 429	int ret;
 430	int loops = 0;
 431
 432	/* Caller should pass a valid @end to indicate the search range end */
 433	ASSERT(orig_end > orig_start);
 434
 435	/* The range should at least cover part of the page */
 436	ASSERT(!(orig_start >= page_offset(locked_page) + PAGE_SIZE ||
 437		 orig_end <= page_offset(locked_page)));
 438again:
 439	/* step one, find a bunch of delalloc bytes starting at start */
 440	delalloc_start = *start;
 441	delalloc_end = 0;
 442	found = btrfs_find_delalloc_range(tree, &delalloc_start, &delalloc_end,
 443					  max_bytes, &cached_state);
 444	if (!found || delalloc_end <= *start || delalloc_start > orig_end) {
 445		*start = delalloc_start;
 446
 447		/* @delalloc_end can be -1, never go beyond @orig_end */
 448		*end = min(delalloc_end, orig_end);
 449		free_extent_state(cached_state);
 450		return false;
 451	}
 452
 453	/*
 454	 * start comes from the offset of locked_page.  We have to lock
 455	 * pages in order, so we can't process delalloc bytes before
 456	 * locked_page
 457	 */
 458	if (delalloc_start < *start)
 459		delalloc_start = *start;
 460
 461	/*
 462	 * make sure to limit the number of pages we try to lock down
 463	 */
 464	if (delalloc_end + 1 - delalloc_start > max_bytes)
 465		delalloc_end = delalloc_start + max_bytes - 1;
 466
 467	/* step two, lock all the pages after the page that has start */
 468	ret = lock_delalloc_pages(inode, locked_page,
 469				  delalloc_start, delalloc_end);
 470	ASSERT(!ret || ret == -EAGAIN);
 471	if (ret == -EAGAIN) {
 472		/* some of the pages are gone, lets avoid looping by
 473		 * shortening the size of the delalloc range we're searching
 474		 */
 475		free_extent_state(cached_state);
 476		cached_state = NULL;
 477		if (!loops) {
 478			max_bytes = PAGE_SIZE;
 479			loops = 1;
 480			goto again;
 481		} else {
 482			found = false;
 483			goto out_failed;
 484		}
 485	}
 
 486
 487	/* step three, lock the state bits for the whole range */
 488	lock_extent(tree, delalloc_start, delalloc_end, &cached_state);
 489
 490	/* then test to make sure it is all still delalloc */
 491	ret = test_range_bit(tree, delalloc_start, delalloc_end,
 492			     EXTENT_DELALLOC, 1, cached_state);
 493	if (!ret) {
 494		unlock_extent(tree, delalloc_start, delalloc_end,
 495			      &cached_state);
 496		__unlock_for_delalloc(inode, locked_page,
 497			      delalloc_start, delalloc_end);
 498		cond_resched();
 499		goto again;
 500	}
 501	free_extent_state(cached_state);
 502	*start = delalloc_start;
 503	*end = delalloc_end;
 504out_failed:
 505	return found;
 506}
 507
 508void extent_clear_unlock_delalloc(struct btrfs_inode *inode, u64 start, u64 end,
 509				  struct page *locked_page,
 510				  u32 clear_bits, unsigned long page_ops)
 
 511{
 512	clear_extent_bit(&inode->io_tree, start, end, clear_bits, NULL);
 
 
 
 
 
 
 513
 514	__process_pages_contig(inode->vfs_inode.i_mapping, locked_page,
 515			       start, end, page_ops, NULL);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 516}
 517
 518static int insert_failrec(struct btrfs_inode *inode,
 519			  struct io_failure_record *failrec)
 
 
 
 
 
 
 520{
 521	struct rb_node *exist;
 
 
 
 
 
 522
 523	spin_lock(&inode->io_failure_lock);
 524	exist = rb_simple_insert(&inode->io_failure_tree, failrec->bytenr,
 525				 &failrec->rb_node);
 526	spin_unlock(&inode->io_failure_lock);
 
 
 
 
 
 
 
 
 
 
 
 527
 528	return (exist == NULL) ? 0 : -EEXIST;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 529}
 530
 531static struct io_failure_record *get_failrec(struct btrfs_inode *inode, u64 start)
 
 
 
 
 
 532{
 533	struct rb_node *node;
 534	struct io_failure_record *failrec = ERR_PTR(-ENOENT);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 535
 536	spin_lock(&inode->io_failure_lock);
 537	node = rb_simple_search(&inode->io_failure_tree, start);
 538	if (node)
 539		failrec = rb_entry(node, struct io_failure_record, rb_node);
 540	spin_unlock(&inode->io_failure_lock);
 541	return failrec;
 542}
 543
 544static void free_io_failure(struct btrfs_inode *inode,
 545			    struct io_failure_record *rec)
 546{
 547	spin_lock(&inode->io_failure_lock);
 548	rb_erase(&rec->rb_node, &inode->io_failure_tree);
 549	spin_unlock(&inode->io_failure_lock);
 
 
 
 
 550
 551	kfree(rec);
 
 552}
 553
 554static int next_mirror(const struct io_failure_record *failrec, int cur_mirror)
 
 
 
 
 
 
 
 
 
 
 
 555{
 556	if (cur_mirror == failrec->num_copies)
 557		return cur_mirror + 1 - failrec->num_copies;
 558	return cur_mirror + 1;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 559}
 560
 561static int prev_mirror(const struct io_failure_record *failrec, int cur_mirror)
 
 562{
 563	if (cur_mirror == 1)
 564		return failrec->num_copies;
 565	return cur_mirror - 1;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 566}
 567
 568/*
 569 * each time an IO finishes, we do a fast check in the IO failure tree
 570 * to see if we need to process or clean up an io_failure_record
 571 */
 572int btrfs_clean_io_failure(struct btrfs_inode *inode, u64 start,
 573			   struct page *page, unsigned int pg_offset)
 574{
 575	struct btrfs_fs_info *fs_info = inode->root->fs_info;
 576	struct extent_io_tree *io_tree = &inode->io_tree;
 577	u64 ino = btrfs_ino(inode);
 578	u64 locked_start, locked_end;
 579	struct io_failure_record *failrec;
 580	int mirror;
 
 
 581	int ret;
 582
 583	failrec = get_failrec(inode, start);
 584	if (IS_ERR(failrec))
 
 
 
 
 
 
 
 585		return 0;
 586
 587	BUG_ON(!failrec->this_mirror);
 588
 589	if (sb_rdonly(fs_info->sb))
 
 
 
 590		goto out;
 591
 592	ret = find_first_extent_bit(io_tree, failrec->bytenr, &locked_start,
 593				    &locked_end, EXTENT_LOCKED, NULL);
 594	if (ret || locked_start > failrec->bytenr ||
 595	    locked_end < failrec->bytenr + failrec->len - 1)
 596		goto out;
 597
 598	mirror = failrec->this_mirror;
 599	do {
 600		mirror = prev_mirror(failrec, mirror);
 601		btrfs_repair_io_failure(fs_info, ino, start, failrec->len,
 602				  failrec->logical, page, pg_offset, mirror);
 603	} while (mirror != failrec->failed_mirror);
 
 
 
 
 
 
 
 
 
 
 604
 605out:
 606	free_io_failure(inode, failrec);
 
 607	return 0;
 608}
 609
 610/*
 611 * Can be called when
 612 * - hold extent lock
 613 * - under ordered extent
 614 * - the inode is freeing
 615 */
 616void btrfs_free_io_failure_record(struct btrfs_inode *inode, u64 start, u64 end)
 617{
 
 618	struct io_failure_record *failrec;
 619	struct rb_node *node, *next;
 620
 621	if (RB_EMPTY_ROOT(&inode->io_failure_tree))
 622		return;
 623
 624	spin_lock(&inode->io_failure_lock);
 625	node = rb_simple_search_first(&inode->io_failure_tree, start);
 626	while (node) {
 627		failrec = rb_entry(node, struct io_failure_record, rb_node);
 628		if (failrec->bytenr > end)
 629			break;
 630
 631		next = rb_next(node);
 632		rb_erase(&failrec->rb_node, &inode->io_failure_tree);
 
 
 
 
 633		kfree(failrec);
 634
 635		node = next;
 636	}
 637	spin_unlock(&inode->io_failure_lock);
 638}
 639
 640static struct io_failure_record *btrfs_get_io_failure_record(struct inode *inode,
 641							     struct btrfs_bio *bbio,
 642							     unsigned int bio_offset)
 643{
 644	struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
 645	u64 start = bbio->file_offset + bio_offset;
 646	struct io_failure_record *failrec;
 647	const u32 sectorsize = fs_info->sectorsize;
 
 
 
 648	int ret;
 
 649
 650	failrec = get_failrec(BTRFS_I(inode), start);
 651	if (!IS_ERR(failrec)) {
 652		btrfs_debug(fs_info,
 653	"Get IO Failure Record: (found) logical=%llu, start=%llu, len=%llu",
 654			failrec->logical, failrec->bytenr, failrec->len);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 655		/*
 656		 * when data can be on disk more than twice, add to failrec here
 657		 * (e.g. with a list for failed_mirror) to make
 658		 * clean_io_failure() clean all those errors at once.
 659		 */
 660		ASSERT(failrec->this_mirror == bbio->mirror_num);
 661		ASSERT(failrec->len == fs_info->sectorsize);
 662		return failrec;
 663	}
 664
 665	failrec = kzalloc(sizeof(*failrec), GFP_NOFS);
 666	if (!failrec)
 667		return ERR_PTR(-ENOMEM);
 668
 669	RB_CLEAR_NODE(&failrec->rb_node);
 670	failrec->bytenr = start;
 671	failrec->len = sectorsize;
 672	failrec->failed_mirror = bbio->mirror_num;
 673	failrec->this_mirror = bbio->mirror_num;
 674	failrec->logical = (bbio->iter.bi_sector << SECTOR_SHIFT) + bio_offset;
 675
 676	btrfs_debug(fs_info,
 677		    "new io failure record logical %llu start %llu",
 678		    failrec->logical, start);
 679
 680	failrec->num_copies = btrfs_num_copies(fs_info, failrec->logical, sectorsize);
 681	if (failrec->num_copies == 1) {
 682		/*
 683		 * We only have a single copy of the data, so don't bother with
 684		 * all the retry and error correction code that follows. No
 685		 * matter what the error is, it is very likely to persist.
 686		 */
 687		btrfs_debug(fs_info,
 688			"cannot repair logical %llu num_copies %d",
 689			failrec->logical, failrec->num_copies);
 690		kfree(failrec);
 691		return ERR_PTR(-EIO);
 692	}
 693
 694	/* Set the bits in the private failure tree */
 695	ret = insert_failrec(BTRFS_I(inode), failrec);
 696	if (ret) {
 697		kfree(failrec);
 698		return ERR_PTR(ret);
 699	}
 700
 701	return failrec;
 702}
 703
 704int btrfs_repair_one_sector(struct btrfs_inode *inode, struct btrfs_bio *failed_bbio,
 705			    u32 bio_offset, struct page *page, unsigned int pgoff,
 706			    bool submit_buffered)
 707{
 708	u64 start = failed_bbio->file_offset + bio_offset;
 709	struct io_failure_record *failrec;
 710	struct btrfs_fs_info *fs_info = inode->root->fs_info;
 711	struct bio *failed_bio = &failed_bbio->bio;
 712	const int icsum = bio_offset >> fs_info->sectorsize_bits;
 713	struct bio *repair_bio;
 714	struct btrfs_bio *repair_bbio;
 715
 716	btrfs_debug(fs_info,
 717		   "repair read error: read error at %llu", start);
 718
 719	BUG_ON(bio_op(failed_bio) == REQ_OP_WRITE);
 720
 721	failrec = btrfs_get_io_failure_record(&inode->vfs_inode, failed_bbio, bio_offset);
 722	if (IS_ERR(failrec))
 723		return PTR_ERR(failrec);
 724
 725	/*
 726	 * There are two premises:
 727	 * a) deliver good data to the caller
 728	 * b) correct the bad sectors on disk
 729	 *
 730	 * Since we're only doing repair for one sector, we only need to get
 731	 * a good copy of the failed sector and if we succeed, we have setup
 732	 * everything for btrfs_repair_io_failure to do the rest for us.
 733	 */
 734	failrec->this_mirror = next_mirror(failrec, failrec->this_mirror);
 735	if (failrec->this_mirror == failrec->failed_mirror) {
 736		btrfs_debug(fs_info,
 737			"failed to repair num_copies %d this_mirror %d failed_mirror %d",
 738			failrec->num_copies, failrec->this_mirror, failrec->failed_mirror);
 739		free_io_failure(inode, failrec);
 740		return -EIO;
 741	}
 742
 743	repair_bio = btrfs_bio_alloc(1, REQ_OP_READ, failed_bbio->end_io,
 744				     failed_bbio->private);
 745	repair_bbio = btrfs_bio(repair_bio);
 746	repair_bbio->file_offset = start;
 747	repair_bio->bi_iter.bi_sector = failrec->logical >> 9;
 748
 749	if (failed_bbio->csum) {
 750		const u32 csum_size = fs_info->csum_size;
 751
 752		repair_bbio->csum = repair_bbio->csum_inline;
 753		memcpy(repair_bbio->csum,
 754		       failed_bbio->csum + csum_size * icsum, csum_size);
 755	}
 756
 757	bio_add_page(repair_bio, page, failrec->len, pgoff);
 758	repair_bbio->iter = repair_bio->bi_iter;
 759
 760	btrfs_debug(fs_info,
 761		    "repair read error: submitting new read to mirror %d",
 762		    failrec->this_mirror);
 763
 764	/*
 765	 * At this point we have a bio, so any errors from bio submission will
 766	 * be handled by the endio on the repair_bio, so we can't return an
 767	 * error here.
 768	 */
 769	if (submit_buffered)
 770		btrfs_submit_data_read_bio(inode, repair_bio,
 771					   failrec->this_mirror, 0);
 772	else
 773		btrfs_submit_dio_repair_bio(inode, repair_bio, failrec->this_mirror);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 774
 775	return BLK_STS_OK;
 776}
 777
 778static void end_page_read(struct page *page, bool uptodate, u64 start, u32 len)
 
 
 
 
 779{
 780	struct btrfs_fs_info *fs_info = btrfs_sb(page->mapping->host->i_sb);
 
 
 781
 782	ASSERT(page_offset(page) <= start &&
 783	       start + len <= page_offset(page) + PAGE_SIZE);
 
 784
 785	if (uptodate) {
 786		if (fsverity_active(page->mapping->host) &&
 787		    !PageError(page) &&
 788		    !PageUptodate(page) &&
 789		    start < i_size_read(page->mapping->host) &&
 790		    !fsverity_verify_page(page)) {
 791			btrfs_page_set_error(fs_info, page, start, len);
 792		} else {
 793			btrfs_page_set_uptodate(fs_info, page, start, len);
 794		}
 795	} else {
 796		btrfs_page_clear_uptodate(fs_info, page, start, len);
 797		btrfs_page_set_error(fs_info, page, start, len);
 798	}
 799
 800	if (!btrfs_is_subpage(fs_info, page))
 801		unlock_page(page);
 802	else
 803		btrfs_subpage_end_reader(fs_info, page, start, len);
 804}
 805
 806static void end_sector_io(struct page *page, u64 offset, bool uptodate)
 807{
 808	struct btrfs_inode *inode = BTRFS_I(page->mapping->host);
 809	const u32 sectorsize = inode->root->fs_info->sectorsize;
 
 
 
 
 810
 811	end_page_read(page, uptodate, offset, sectorsize);
 812	unlock_extent(&inode->io_tree, offset, offset + sectorsize - 1, NULL);
 813}
 814
 815static void submit_data_read_repair(struct inode *inode,
 816				    struct btrfs_bio *failed_bbio,
 817				    u32 bio_offset, const struct bio_vec *bvec,
 818				    unsigned int error_bitmap)
 
 
 
 
 
 
 
 819{
 820	const unsigned int pgoff = bvec->bv_offset;
 821	struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
 822	struct page *page = bvec->bv_page;
 823	const u64 start = page_offset(bvec->bv_page) + bvec->bv_offset;
 824	const u64 end = start + bvec->bv_len - 1;
 825	const u32 sectorsize = fs_info->sectorsize;
 826	const int nr_bits = (end + 1 - start) >> fs_info->sectorsize_bits;
 827	int i;
 828
 829	BUG_ON(bio_op(&failed_bbio->bio) == REQ_OP_WRITE);
 830
 831	/* This repair is only for data */
 832	ASSERT(is_data_inode(inode));
 
 833
 834	/* We're here because we had some read errors or csum mismatch */
 835	ASSERT(error_bitmap);
 
 
 
 836
 837	/*
 838	 * We only get called on buffered IO, thus page must be mapped and bio
 839	 * must not be cloned.
 840	 */
 841	ASSERT(page->mapping && !bio_flagged(&failed_bbio->bio, BIO_CLONED));
 842
 843	/* Iterate through all the sectors in the range */
 844	for (i = 0; i < nr_bits; i++) {
 845		const unsigned int offset = i * sectorsize;
 846		bool uptodate = false;
 847		int ret;
 
 
 
 
 848
 849		if (!(error_bitmap & (1U << i))) {
 850			/*
 851			 * This sector has no error, just end the page read
 852			 * and unlock the range.
 853			 */
 854			uptodate = true;
 855			goto next;
 856		}
 857
 858		ret = btrfs_repair_one_sector(BTRFS_I(inode), failed_bbio,
 859				bio_offset + offset, page, pgoff + offset,
 860				true);
 861		if (!ret) {
 862			/*
 863			 * We have submitted the read repair, the page release
 864			 * will be handled by the endio function of the
 865			 * submitted repair bio.
 866			 * Thus we don't need to do any thing here.
 867			 */
 868			continue;
 869		}
 870		/*
 871		 * Continue on failed repair, otherwise the remaining sectors
 872		 * will not be properly unlocked.
 873		 */
 874next:
 875		end_sector_io(page, start + offset, uptodate);
 876	}
 
 
 877}
 878
 879/* lots and lots of room for performance fixes in the end_bio funcs */
 880
 881void end_extent_writepage(struct page *page, int err, u64 start, u64 end)
 882{
 883	struct btrfs_inode *inode;
 884	const bool uptodate = (err == 0);
 885	int ret = 0;
 886
 887	ASSERT(page && page->mapping);
 888	inode = BTRFS_I(page->mapping->host);
 889	btrfs_writepage_endio_finish_ordered(inode, page, start, end, uptodate);
 890
 891	if (!uptodate) {
 892		const struct btrfs_fs_info *fs_info = inode->root->fs_info;
 893		u32 len;
 894
 895		ASSERT(end + 1 - start <= U32_MAX);
 896		len = end + 1 - start;
 897
 898		btrfs_page_clear_uptodate(fs_info, page, start, len);
 899		btrfs_page_set_error(fs_info, page, start, len);
 900		ret = err < 0 ? err : -EIO;
 
 901		mapping_set_error(page->mapping, ret);
 902	}
 903}
 904
 905/*
 906 * after a writepage IO is done, we need to:
 907 * clear the uptodate bits on error
 908 * clear the writeback bits in the extent tree for this IO
 909 * end_page_writeback if the page has no more pending IO
 910 *
 911 * Scheduling is not allowed, so the extent state tree is expected
 912 * to have one and only one object corresponding to this IO.
 913 */
 914static void end_bio_extent_writepage(struct btrfs_bio *bbio)
 915{
 916	struct bio *bio = &bbio->bio;
 917	int error = blk_status_to_errno(bio->bi_status);
 918	struct bio_vec *bvec;
 919	u64 start;
 920	u64 end;
 921	struct bvec_iter_all iter_all;
 922	bool first_bvec = true;
 923
 924	ASSERT(!bio_flagged(bio, BIO_CLONED));
 925	bio_for_each_segment_all(bvec, bio, iter_all) {
 926		struct page *page = bvec->bv_page;
 927		struct inode *inode = page->mapping->host;
 928		struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
 929		const u32 sectorsize = fs_info->sectorsize;
 930
 931		/* Our read/write should always be sector aligned. */
 932		if (!IS_ALIGNED(bvec->bv_offset, sectorsize))
 933			btrfs_err(fs_info,
 934		"partial page write in btrfs with offset %u and length %u",
 935				  bvec->bv_offset, bvec->bv_len);
 936		else if (!IS_ALIGNED(bvec->bv_len, sectorsize))
 937			btrfs_info(fs_info,
 938		"incomplete page write with offset %u and length %u",
 939				   bvec->bv_offset, bvec->bv_len);
 940
 941		start = page_offset(page) + bvec->bv_offset;
 942		end = start + bvec->bv_len - 1;
 943
 944		if (first_bvec) {
 945			btrfs_record_physical_zoned(inode, start, bio);
 946			first_bvec = false;
 947		}
 948
 949		end_extent_writepage(page, error, start, end);
 
 950
 951		btrfs_page_clear_writeback(fs_info, page, start, bvec->bv_len);
 
 952	}
 953
 954	bio_put(bio);
 955}
 956
 957/*
 958 * Record previously processed extent range
 959 *
 960 * For endio_readpage_release_extent() to handle a full extent range, reducing
 961 * the extent io operations.
 962 */
 963struct processed_extent {
 964	struct btrfs_inode *inode;
 965	/* Start of the range in @inode */
 966	u64 start;
 967	/* End of the range in @inode */
 968	u64 end;
 969	bool uptodate;
 970};
 971
 972/*
 973 * Try to release processed extent range
 974 *
 975 * May not release the extent range right now if the current range is
 976 * contiguous to processed extent.
 977 *
 978 * Will release processed extent when any of @inode, @uptodate, the range is
 979 * no longer contiguous to the processed range.
 980 *
 981 * Passing @inode == NULL will force processed extent to be released.
 982 */
 983static void endio_readpage_release_extent(struct processed_extent *processed,
 984			      struct btrfs_inode *inode, u64 start, u64 end,
 985			      bool uptodate)
 986{
 987	struct extent_state *cached = NULL;
 988	struct extent_io_tree *tree;
 989
 990	/* The first extent, initialize @processed */
 991	if (!processed->inode)
 992		goto update;
 993
 994	/*
 995	 * Contiguous to processed extent, just uptodate the end.
 996	 *
 997	 * Several things to notice:
 998	 *
 999	 * - bio can be merged as long as on-disk bytenr is contiguous
1000	 *   This means we can have page belonging to other inodes, thus need to
1001	 *   check if the inode still matches.
1002	 * - bvec can contain range beyond current page for multi-page bvec
1003	 *   Thus we need to do processed->end + 1 >= start check
1004	 */
1005	if (processed->inode == inode && processed->uptodate == uptodate &&
1006	    processed->end + 1 >= start && end >= processed->end) {
1007		processed->end = end;
1008		return;
1009	}
1010
1011	tree = &processed->inode->io_tree;
1012	/*
1013	 * Now we don't have range contiguous to the processed range, release
1014	 * the processed range now.
1015	 */
1016	unlock_extent(tree, processed->start, processed->end, &cached);
1017
1018update:
1019	/* Update processed to current range */
1020	processed->inode = inode;
1021	processed->start = start;
1022	processed->end = end;
1023	processed->uptodate = uptodate;
1024}
1025
1026static void begin_page_read(struct btrfs_fs_info *fs_info, struct page *page)
1027{
1028	ASSERT(PageLocked(page));
1029	if (!btrfs_is_subpage(fs_info, page))
1030		return;
1031
1032	ASSERT(PagePrivate(page));
1033	btrfs_subpage_start_reader(fs_info, page, page_offset(page), PAGE_SIZE);
1034}
1035
1036/*
1037 * Find extent buffer for a givne bytenr.
1038 *
1039 * This is for end_bio_extent_readpage(), thus we can't do any unsafe locking
1040 * in endio context.
1041 */
1042static struct extent_buffer *find_extent_buffer_readpage(
1043		struct btrfs_fs_info *fs_info, struct page *page, u64 bytenr)
1044{
1045	struct extent_buffer *eb;
1046
1047	/*
1048	 * For regular sectorsize, we can use page->private to grab extent
1049	 * buffer
1050	 */
1051	if (fs_info->nodesize >= PAGE_SIZE) {
1052		ASSERT(PagePrivate(page) && page->private);
1053		return (struct extent_buffer *)page->private;
1054	}
1055
1056	/* For subpage case, we need to lookup buffer radix tree */
1057	rcu_read_lock();
1058	eb = radix_tree_lookup(&fs_info->buffer_radix,
1059			       bytenr >> fs_info->sectorsize_bits);
1060	rcu_read_unlock();
1061	ASSERT(eb);
1062	return eb;
1063}
1064
1065/*
1066 * after a readpage IO is done, we need to:
1067 * clear the uptodate bits on error
1068 * set the uptodate bits if things worked
1069 * set the page up to date if all extents in the tree are uptodate
1070 * clear the lock bit in the extent tree
1071 * unlock the page if there are no other extents locked for it
1072 *
1073 * Scheduling is not allowed, so the extent state tree is expected
1074 * to have one and only one object corresponding to this IO.
1075 */
1076static void end_bio_extent_readpage(struct btrfs_bio *bbio)
1077{
1078	struct bio *bio = &bbio->bio;
1079	struct bio_vec *bvec;
1080	struct processed_extent processed = { 0 };
1081	/*
1082	 * The offset to the beginning of a bio, since one bio can never be
1083	 * larger than UINT_MAX, u32 here is enough.
1084	 */
1085	u32 bio_offset = 0;
 
 
 
1086	int mirror;
1087	struct bvec_iter_all iter_all;
 
1088
1089	ASSERT(!bio_flagged(bio, BIO_CLONED));
1090	bio_for_each_segment_all(bvec, bio, iter_all) {
1091		bool uptodate = !bio->bi_status;
1092		struct page *page = bvec->bv_page;
1093		struct inode *inode = page->mapping->host;
1094		struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
1095		const u32 sectorsize = fs_info->sectorsize;
1096		unsigned int error_bitmap = (unsigned int)-1;
1097		bool repair = false;
1098		u64 start;
1099		u64 end;
1100		u32 len;
1101
1102		btrfs_debug(fs_info,
1103			"end_bio_extent_readpage: bi_sector=%llu, err=%d, mirror=%u",
1104			bio->bi_iter.bi_sector, bio->bi_status,
1105			bbio->mirror_num);
1106
1107		/*
1108		 * We always issue full-sector reads, but if some block in a
1109		 * page fails to read, blk_update_request() will advance
1110		 * bv_offset and adjust bv_len to compensate.  Print a warning
1111		 * for unaligned offsets, and an error if they don't add up to
1112		 * a full sector.
1113		 */
1114		if (!IS_ALIGNED(bvec->bv_offset, sectorsize))
1115			btrfs_err(fs_info,
1116		"partial page read in btrfs with offset %u and length %u",
1117				  bvec->bv_offset, bvec->bv_len);
1118		else if (!IS_ALIGNED(bvec->bv_offset + bvec->bv_len,
1119				     sectorsize))
1120			btrfs_info(fs_info,
1121		"incomplete page read with offset %u and length %u",
1122				   bvec->bv_offset, bvec->bv_len);
 
 
 
 
 
1123
1124		start = page_offset(page) + bvec->bv_offset;
1125		end = start + bvec->bv_len - 1;
1126		len = bvec->bv_len;
1127
1128		mirror = bbio->mirror_num;
1129		if (likely(uptodate)) {
1130			if (is_data_inode(inode)) {
1131				error_bitmap = btrfs_verify_data_csum(bbio,
1132						bio_offset, page, start, end);
1133				if (error_bitmap)
1134					uptodate = false;
1135			} else {
1136				if (btrfs_validate_metadata_buffer(bbio,
1137						page, start, end, mirror))
1138					uptodate = false;
1139			}
1140		}
1141
1142		if (likely(uptodate)) {
1143			loff_t i_size = i_size_read(inode);
1144			pgoff_t end_index = i_size >> PAGE_SHIFT;
1145
1146			btrfs_clean_io_failure(BTRFS_I(inode), start, page, 0);
1147
 
 
 
 
 
1148			/*
1149			 * Zero out the remaining part if this range straddles
1150			 * i_size.
1151			 *
1152			 * Here we should only zero the range inside the bvec,
1153			 * not touch anything else.
1154			 *
1155			 * NOTE: i_size is exclusive while end is inclusive.
 
1156			 */
1157			if (page->index == end_index && i_size <= end) {
1158				u32 zero_start = max(offset_in_page(i_size),
1159						     offset_in_page(start));
1160
1161				zero_user_segment(page, zero_start,
1162						  offset_in_page(end) + 1);
1163			}
1164		} else if (is_data_inode(inode)) {
1165			/*
1166			 * Only try to repair bios that actually made it to a
1167			 * device.  If the bio failed to be submitted mirror
1168			 * is 0 and we need to fail it without retrying.
1169			 *
1170			 * This also includes the high level bios for compressed
1171			 * extents - these never make it to a device and repair
1172			 * is already handled on the lower compressed bio.
1173			 */
1174			if (mirror > 0)
1175				repair = true;
1176		} else {
1177			struct extent_buffer *eb;
1178
1179			eb = find_extent_buffer_readpage(fs_info, page, start);
1180			set_bit(EXTENT_BUFFER_READ_ERR, &eb->bflags);
1181			eb->read_mirror = mirror;
1182			atomic_dec(&eb->io_pages);
1183		}
 
 
 
 
 
1184
1185		if (repair) {
1186			/*
1187			 * submit_data_read_repair() will handle all the good
1188			 * and bad sectors, we just continue to the next bvec.
1189			 */
1190			submit_data_read_repair(inode, bbio, bio_offset, bvec,
1191						error_bitmap);
1192		} else {
1193			/* Update page status and unlock */
1194			end_page_read(page, uptodate, start, len);
1195			endio_readpage_release_extent(&processed, BTRFS_I(inode),
1196					start, end, PageUptodate(page));
1197		}
 
 
1198
1199		ASSERT(bio_offset + len > bio_offset);
1200		bio_offset += len;
1201
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1202	}
1203	/* Release the last extent */
1204	endio_readpage_release_extent(&processed, NULL, 0, 0, false);
1205	btrfs_bio_free_csum(bbio);
 
 
 
1206	bio_put(bio);
1207}
1208
1209/*
1210 * Populate every free slot in a provided array with pages.
1211 *
1212 * @nr_pages:   number of pages to allocate
1213 * @page_array: the array to fill with pages; any existing non-null entries in
1214 * 		the array will be skipped
1215 *
1216 * Return: 0        if all pages were able to be allocated;
1217 *         -ENOMEM  otherwise, and the caller is responsible for freeing all
1218 *                  non-null page pointers in the array.
1219 */
1220int btrfs_alloc_page_array(unsigned int nr_pages, struct page **page_array)
 
 
1221{
1222	unsigned int allocated;
1223
1224	for (allocated = 0; allocated < nr_pages;) {
1225		unsigned int last = allocated;
1226
1227		allocated = alloc_pages_bulk_array(GFP_NOFS, nr_pages, page_array);
1228
1229		if (allocated == nr_pages)
1230			return 0;
1231
1232		/*
1233		 * During this iteration, no page could be allocated, even
1234		 * though alloc_pages_bulk_array() falls back to alloc_page()
1235		 * if  it could not bulk-allocate. So we must be out of memory.
1236		 */
1237		if (allocated == last)
1238			return -ENOMEM;
1239
1240		memalloc_retry_wait(GFP_NOFS);
 
 
 
 
 
 
1241	}
1242	return 0;
1243}
1244
1245/*
1246 * Attempt to add a page to bio.
1247 *
1248 * @bio_ctrl:       record both the bio, and its bio_flags
1249 * @page:	    page to add to the bio
1250 * @disk_bytenr:    offset of the new bio or to check whether we are adding
1251 *                  a contiguous page to the previous one
1252 * @size:	    portion of page that we want to write
1253 * @pg_offset:	    starting offset in the page
1254 * @compress_type:  compression type of the current bio to see if we can merge them
1255 *
1256 * Attempt to add a page to bio considering stripe alignment etc.
1257 *
1258 * Return >= 0 for the number of bytes added to the bio.
1259 * Can return 0 if the current bio is already at stripe/zone boundary.
1260 * Return <0 for error.
1261 */
1262static int btrfs_bio_add_page(struct btrfs_bio_ctrl *bio_ctrl,
1263			      struct page *page,
1264			      u64 disk_bytenr, unsigned int size,
1265			      unsigned int pg_offset,
1266			      enum btrfs_compression_type compress_type)
1267{
1268	struct bio *bio = bio_ctrl->bio;
1269	u32 bio_size = bio->bi_iter.bi_size;
1270	u32 real_size;
1271	const sector_t sector = disk_bytenr >> SECTOR_SHIFT;
1272	bool contig = false;
1273	int ret;
1274
1275	ASSERT(bio);
1276	/* The limit should be calculated when bio_ctrl->bio is allocated */
1277	ASSERT(bio_ctrl->len_to_oe_boundary && bio_ctrl->len_to_stripe_boundary);
1278	if (bio_ctrl->compress_type != compress_type)
1279		return 0;
1280
1281
1282	if (bio->bi_iter.bi_size == 0) {
1283		/* We can always add a page into an empty bio. */
1284		contig = true;
1285	} else if (bio_ctrl->compress_type == BTRFS_COMPRESS_NONE) {
1286		struct bio_vec *bvec = bio_last_bvec_all(bio);
 
1287
1288		/*
1289		 * The contig check requires the following conditions to be met:
1290		 * 1) The pages are belonging to the same inode
1291		 *    This is implied by the call chain.
1292		 *
1293		 * 2) The range has adjacent logical bytenr
1294		 *
1295		 * 3) The range has adjacent file offset
1296		 *    This is required for the usage of btrfs_bio->file_offset.
1297		 */
1298		if (bio_end_sector(bio) == sector &&
1299		    page_offset(bvec->bv_page) + bvec->bv_offset +
1300		    bvec->bv_len == page_offset(page) + pg_offset)
1301			contig = true;
1302	} else {
1303		/*
1304		 * For compression, all IO should have its logical bytenr
1305		 * set to the starting bytenr of the compressed extent.
1306		 */
1307		contig = bio->bi_iter.bi_sector == sector;
1308	}
1309
1310	if (!contig)
1311		return 0;
1312
1313	real_size = min(bio_ctrl->len_to_oe_boundary,
1314			bio_ctrl->len_to_stripe_boundary) - bio_size;
1315	real_size = min(real_size, size);
1316
1317	/*
1318	 * If real_size is 0, never call bio_add_*_page(), as even size is 0,
1319	 * bio will still execute its endio function on the page!
1320	 */
1321	if (real_size == 0)
1322		return 0;
1323
1324	if (bio_op(bio) == REQ_OP_ZONE_APPEND)
1325		ret = bio_add_zone_append_page(bio, page, real_size, pg_offset);
1326	else
1327		ret = bio_add_page(bio, page, real_size, pg_offset);
1328
1329	return ret;
1330}
1331
1332static int calc_bio_boundaries(struct btrfs_bio_ctrl *bio_ctrl,
1333			       struct btrfs_inode *inode, u64 file_offset)
1334{
1335	struct btrfs_fs_info *fs_info = inode->root->fs_info;
1336	struct btrfs_io_geometry geom;
1337	struct btrfs_ordered_extent *ordered;
1338	struct extent_map *em;
1339	u64 logical = (bio_ctrl->bio->bi_iter.bi_sector << SECTOR_SHIFT);
1340	int ret;
1341
1342	/*
1343	 * Pages for compressed extent are never submitted to disk directly,
1344	 * thus it has no real boundary, just set them to U32_MAX.
1345	 *
1346	 * The split happens for real compressed bio, which happens in
1347	 * btrfs_submit_compressed_read/write().
1348	 */
1349	if (bio_ctrl->compress_type != BTRFS_COMPRESS_NONE) {
1350		bio_ctrl->len_to_oe_boundary = U32_MAX;
1351		bio_ctrl->len_to_stripe_boundary = U32_MAX;
1352		return 0;
1353	}
1354	em = btrfs_get_chunk_map(fs_info, logical, fs_info->sectorsize);
1355	if (IS_ERR(em))
1356		return PTR_ERR(em);
1357	ret = btrfs_get_io_geometry(fs_info, em, btrfs_op(bio_ctrl->bio),
1358				    logical, &geom);
1359	free_extent_map(em);
1360	if (ret < 0) {
1361		return ret;
1362	}
1363	if (geom.len > U32_MAX)
1364		bio_ctrl->len_to_stripe_boundary = U32_MAX;
1365	else
1366		bio_ctrl->len_to_stripe_boundary = (u32)geom.len;
1367
1368	if (bio_op(bio_ctrl->bio) != REQ_OP_ZONE_APPEND) {
1369		bio_ctrl->len_to_oe_boundary = U32_MAX;
1370		return 0;
1371	}
1372
1373	/* Ordered extent not yet created, so we're good */
1374	ordered = btrfs_lookup_ordered_extent(inode, file_offset);
1375	if (!ordered) {
1376		bio_ctrl->len_to_oe_boundary = U32_MAX;
1377		return 0;
1378	}
 
 
1379
1380	bio_ctrl->len_to_oe_boundary = min_t(u32, U32_MAX,
1381		ordered->disk_bytenr + ordered->disk_num_bytes - logical);
1382	btrfs_put_ordered_extent(ordered);
1383	return 0;
1384}
1385
1386static int alloc_new_bio(struct btrfs_inode *inode,
1387			 struct btrfs_bio_ctrl *bio_ctrl,
1388			 struct writeback_control *wbc,
1389			 blk_opf_t opf,
1390			 u64 disk_bytenr, u32 offset, u64 file_offset,
1391			 enum btrfs_compression_type compress_type)
1392{
1393	struct btrfs_fs_info *fs_info = inode->root->fs_info;
1394	struct bio *bio;
1395	int ret;
1396
1397	ASSERT(bio_ctrl->end_io_func);
1398
1399	bio = btrfs_bio_alloc(BIO_MAX_VECS, opf, bio_ctrl->end_io_func, NULL);
1400	/*
1401	 * For compressed page range, its disk_bytenr is always @disk_bytenr
1402	 * passed in, no matter if we have added any range into previous bio.
1403	 */
1404	if (compress_type != BTRFS_COMPRESS_NONE)
1405		bio->bi_iter.bi_sector = disk_bytenr >> SECTOR_SHIFT;
1406	else
1407		bio->bi_iter.bi_sector = (disk_bytenr + offset) >> SECTOR_SHIFT;
1408	bio_ctrl->bio = bio;
1409	bio_ctrl->compress_type = compress_type;
1410	ret = calc_bio_boundaries(bio_ctrl, inode, file_offset);
1411	if (ret < 0)
1412		goto error;
1413
1414	if (wbc) {
1415		/*
1416		 * For Zone append we need the correct block_device that we are
1417		 * going to write to set in the bio to be able to respect the
1418		 * hardware limitation.  Look it up here:
1419		 */
1420		if (bio_op(bio) == REQ_OP_ZONE_APPEND) {
1421			struct btrfs_device *dev;
1422
1423			dev = btrfs_zoned_get_device(fs_info, disk_bytenr,
1424						     fs_info->sectorsize);
1425			if (IS_ERR(dev)) {
1426				ret = PTR_ERR(dev);
1427				goto error;
1428			}
1429
1430			bio_set_dev(bio, dev->bdev);
1431		} else {
1432			/*
1433			 * Otherwise pick the last added device to support
1434			 * cgroup writeback.  For multi-device file systems this
1435			 * means blk-cgroup policies have to always be set on the
1436			 * last added/replaced device.  This is a bit odd but has
1437			 * been like that for a long time.
1438			 */
1439			bio_set_dev(bio, fs_info->fs_devices->latest_dev->bdev);
1440		}
1441		wbc_init_bio(wbc, bio);
1442	} else {
1443		ASSERT(bio_op(bio) != REQ_OP_ZONE_APPEND);
1444	}
1445	return 0;
1446error:
1447	bio_ctrl->bio = NULL;
1448	btrfs_bio_end_io(btrfs_bio(bio), errno_to_blk_status(ret));
1449	return ret;
 
1450}
1451
1452/*
1453 * @opf:	bio REQ_OP_* and REQ_* flags as one value
1454 * @wbc:	optional writeback control for io accounting
1455 * @disk_bytenr: logical bytenr where the write will be
1456 * @page:	page to add to the bio
1457 * @size:	portion of page that we want to write to
1458 * @pg_offset:	offset of the new bio or to check whether we are adding
1459 *              a contiguous page to the previous one
1460 * @compress_type:   compress type for current bio
1461 *
1462 * The will either add the page into the existing @bio_ctrl->bio, or allocate a
1463 * new one in @bio_ctrl->bio.
1464 * The mirror number for this IO should already be initizlied in
1465 * @bio_ctrl->mirror_num.
1466 */
1467static int submit_extent_page(blk_opf_t opf,
1468			      struct writeback_control *wbc,
1469			      struct btrfs_bio_ctrl *bio_ctrl,
1470			      u64 disk_bytenr, struct page *page,
1471			      size_t size, unsigned long pg_offset,
1472			      enum btrfs_compression_type compress_type,
 
 
 
 
 
1473			      bool force_bio_submit)
1474{
1475	int ret = 0;
1476	struct btrfs_inode *inode = BTRFS_I(page->mapping->host);
1477	unsigned int cur = pg_offset;
1478
1479	ASSERT(bio_ctrl);
1480
1481	ASSERT(pg_offset < PAGE_SIZE && size <= PAGE_SIZE &&
1482	       pg_offset + size <= PAGE_SIZE);
1483
1484	ASSERT(bio_ctrl->end_io_func);
1485
1486	if (force_bio_submit)
1487		submit_one_bio(bio_ctrl);
1488
1489	while (cur < pg_offset + size) {
1490		u32 offset = cur - pg_offset;
1491		int added;
1492
1493		/* Allocate new bio if needed */
1494		if (!bio_ctrl->bio) {
1495			ret = alloc_new_bio(inode, bio_ctrl, wbc, opf,
1496					    disk_bytenr, offset,
1497					    page_offset(page) + cur,
1498					    compress_type);
1499			if (ret < 0)
 
1500				return ret;
 
 
 
 
 
 
1501		}
1502		/*
1503		 * We must go through btrfs_bio_add_page() to ensure each
1504		 * page range won't cross various boundaries.
1505		 */
1506		if (compress_type != BTRFS_COMPRESS_NONE)
1507			added = btrfs_bio_add_page(bio_ctrl, page, disk_bytenr,
1508					size - offset, pg_offset + offset,
1509					compress_type);
1510		else
1511			added = btrfs_bio_add_page(bio_ctrl, page,
1512					disk_bytenr + offset, size - offset,
1513					pg_offset + offset, compress_type);
1514
1515		/* Metadata page range should never be split */
1516		if (!is_data_inode(&inode->vfs_inode))
1517			ASSERT(added == 0 || added == size - offset);
1518
1519		/* At least we added some page, update the account */
1520		if (wbc && added)
1521			wbc_account_cgroup_owner(wbc, page, added);
1522
1523		/* We have reached boundary, submit right now */
1524		if (added < size - offset) {
1525			/* The bio should contain some page(s) */
1526			ASSERT(bio_ctrl->bio->bi_iter.bi_size);
1527			submit_one_bio(bio_ctrl);
1528		}
1529		cur += added;
1530	}
1531	return 0;
1532}
1533
1534static int attach_extent_buffer_page(struct extent_buffer *eb,
1535				     struct page *page,
1536				     struct btrfs_subpage *prealloc)
1537{
1538	struct btrfs_fs_info *fs_info = eb->fs_info;
1539	int ret = 0;
1540
1541	/*
1542	 * If the page is mapped to btree inode, we should hold the private
1543	 * lock to prevent race.
1544	 * For cloned or dummy extent buffers, their pages are not mapped and
1545	 * will not race with any other ebs.
1546	 */
1547	if (page->mapping)
1548		lockdep_assert_held(&page->mapping->private_lock);
1549
1550	if (fs_info->nodesize >= PAGE_SIZE) {
1551		if (!PagePrivate(page))
1552			attach_page_private(page, eb);
1553		else
1554			WARN_ON(page->private != (unsigned long)eb);
1555		return 0;
1556	}
1557
1558	/* Already mapped, just free prealloc */
1559	if (PagePrivate(page)) {
1560		btrfs_free_subpage(prealloc);
1561		return 0;
 
 
1562	}
1563
1564	if (prealloc)
1565		/* Has preallocated memory for subpage */
1566		attach_page_private(page, prealloc);
1567	else
1568		/* Do new allocation to attach subpage */
1569		ret = btrfs_attach_subpage(fs_info, page,
1570					   BTRFS_SUBPAGE_METADATA);
1571	return ret;
1572}
1573
1574int set_page_extent_mapped(struct page *page)
 
1575{
1576	struct btrfs_fs_info *fs_info;
1577
1578	ASSERT(page->mapping);
1579
1580	if (PagePrivate(page))
1581		return 0;
1582
1583	fs_info = btrfs_sb(page->mapping->host->i_sb);
1584
1585	if (btrfs_is_subpage(fs_info, page))
1586		return btrfs_attach_subpage(fs_info, page, BTRFS_SUBPAGE_DATA);
1587
1588	attach_page_private(page, (void *)EXTENT_PAGE_PRIVATE);
1589	return 0;
1590}
1591
1592void clear_page_extent_mapped(struct page *page)
1593{
1594	struct btrfs_fs_info *fs_info;
1595
1596	ASSERT(page->mapping);
1597
1598	if (!PagePrivate(page))
1599		return;
1600
1601	fs_info = btrfs_sb(page->mapping->host->i_sb);
1602	if (btrfs_is_subpage(fs_info, page))
1603		return btrfs_detach_subpage(fs_info, page);
1604
1605	detach_page_private(page);
1606}
1607
1608static struct extent_map *
1609__get_extent_map(struct inode *inode, struct page *page, size_t pg_offset,
1610		 u64 start, u64 len, struct extent_map **em_cached)
 
1611{
1612	struct extent_map *em;
1613
1614	if (em_cached && *em_cached) {
1615		em = *em_cached;
1616		if (extent_map_in_tree(em) && start >= em->start &&
1617		    start < extent_map_end(em)) {
1618			refcount_inc(&em->refs);
1619			return em;
1620		}
1621
1622		free_extent_map(em);
1623		*em_cached = NULL;
1624	}
1625
1626	em = btrfs_get_extent(BTRFS_I(inode), page, pg_offset, start, len);
1627	if (em_cached && !IS_ERR(em)) {
1628		BUG_ON(*em_cached);
1629		refcount_inc(&em->refs);
1630		*em_cached = em;
1631	}
1632	return em;
1633}
1634/*
1635 * basic readpage implementation.  Locked extent state structs are inserted
1636 * into the tree that are removed when the IO is done (by the end_io
1637 * handlers)
1638 * XXX JDM: This needs looking at to ensure proper page locking
1639 * return 0 on success, otherwise return error
1640 */
1641static int btrfs_do_readpage(struct page *page, struct extent_map **em_cached,
1642		      struct btrfs_bio_ctrl *bio_ctrl,
1643		      blk_opf_t read_flags, u64 *prev_em_start)
 
 
 
 
1644{
1645	struct inode *inode = page->mapping->host;
1646	struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
1647	u64 start = page_offset(page);
1648	const u64 end = start + PAGE_SIZE - 1;
 
1649	u64 cur = start;
1650	u64 extent_offset;
1651	u64 last_byte = i_size_read(inode);
1652	u64 block_start;
 
 
1653	struct extent_map *em;
1654	int ret = 0;
 
 
1655	size_t pg_offset = 0;
1656	size_t iosize;
 
1657	size_t blocksize = inode->i_sb->s_blocksize;
1658	struct extent_io_tree *tree = &BTRFS_I(inode)->io_tree;
1659
1660	ret = set_page_extent_mapped(page);
1661	if (ret < 0) {
1662		unlock_extent(tree, start, end, NULL);
1663		btrfs_page_set_error(fs_info, page, start, PAGE_SIZE);
1664		unlock_page(page);
1665		goto out;
 
 
 
1666	}
1667
1668	if (page->index == last_byte >> PAGE_SHIFT) {
1669		size_t zero_offset = offset_in_page(last_byte);
 
1670
1671		if (zero_offset) {
1672			iosize = PAGE_SIZE - zero_offset;
1673			memzero_page(page, zero_offset, iosize);
 
 
 
1674		}
1675	}
1676	bio_ctrl->end_io_func = end_bio_extent_readpage;
1677	begin_page_read(fs_info, page);
1678	while (cur <= end) {
1679		unsigned long this_bio_flag = 0;
1680		bool force_bio_submit = false;
1681		u64 disk_bytenr;
1682
1683		ASSERT(IS_ALIGNED(cur, fs_info->sectorsize));
1684		if (cur >= last_byte) {
 
 
 
1685			iosize = PAGE_SIZE - pg_offset;
1686			memzero_page(page, pg_offset, iosize);
1687			unlock_extent(tree, cur, cur + iosize - 1, NULL);
1688			end_page_read(page, true, cur, iosize);
 
 
 
 
 
 
1689			break;
1690		}
1691		em = __get_extent_map(inode, page, pg_offset, cur,
1692				      end - cur + 1, em_cached);
1693		if (IS_ERR(em)) {
1694			unlock_extent(tree, cur, end, NULL);
1695			end_page_read(page, false, cur, end + 1 - cur);
1696			ret = PTR_ERR(em);
1697			break;
1698		}
1699		extent_offset = cur - em->start;
1700		BUG_ON(extent_map_end(em) <= cur);
1701		BUG_ON(end < cur);
1702
1703		if (test_bit(EXTENT_FLAG_COMPRESSED, &em->flags))
1704			this_bio_flag = em->compress_type;
 
 
 
1705
1706		iosize = min(extent_map_end(em) - cur, end - cur + 1);
 
1707		iosize = ALIGN(iosize, blocksize);
1708		if (this_bio_flag != BTRFS_COMPRESS_NONE)
1709			disk_bytenr = em->block_start;
1710		else
1711			disk_bytenr = em->block_start + extent_offset;
 
 
 
 
1712		block_start = em->block_start;
1713		if (test_bit(EXTENT_FLAG_PREALLOC, &em->flags))
1714			block_start = EXTENT_MAP_HOLE;
1715
1716		/*
1717		 * If we have a file range that points to a compressed extent
1718		 * and it's followed by a consecutive file range that points
1719		 * to the same compressed extent (possibly with a different
1720		 * offset and/or length, so it either points to the whole extent
1721		 * or only part of it), we must make sure we do not submit a
1722		 * single bio to populate the pages for the 2 ranges because
1723		 * this makes the compressed extent read zero out the pages
1724		 * belonging to the 2nd range. Imagine the following scenario:
1725		 *
1726		 *  File layout
1727		 *  [0 - 8K]                     [8K - 24K]
1728		 *    |                               |
1729		 *    |                               |
1730		 * points to extent X,         points to extent X,
1731		 * offset 4K, length of 8K     offset 0, length 16K
1732		 *
1733		 * [extent X, compressed length = 4K uncompressed length = 16K]
1734		 *
1735		 * If the bio to read the compressed extent covers both ranges,
1736		 * it will decompress extent X into the pages belonging to the
1737		 * first range and then it will stop, zeroing out the remaining
1738		 * pages that belong to the other range that points to extent X.
1739		 * So here we make sure we submit 2 bios, one for the first
1740		 * range and another one for the third range. Both will target
1741		 * the same physical extent from disk, but we can't currently
1742		 * make the compressed bio endio callback populate the pages
1743		 * for both ranges because each compressed bio is tightly
1744		 * coupled with a single extent map, and each range can have
1745		 * an extent map with a different offset value relative to the
1746		 * uncompressed data of our extent and different lengths. This
1747		 * is a corner case so we prioritize correctness over
1748		 * non-optimal behavior (submitting 2 bios for the same extent).
1749		 */
1750		if (test_bit(EXTENT_FLAG_COMPRESSED, &em->flags) &&
1751		    prev_em_start && *prev_em_start != (u64)-1 &&
1752		    *prev_em_start != em->start)
1753			force_bio_submit = true;
1754
1755		if (prev_em_start)
1756			*prev_em_start = em->start;
1757
1758		free_extent_map(em);
1759		em = NULL;
1760
1761		/* we've found a hole, just zero and go on */
1762		if (block_start == EXTENT_MAP_HOLE) {
1763			memzero_page(page, pg_offset, iosize);
 
1764
1765			unlock_extent(tree, cur, cur + iosize - 1, NULL);
1766			end_page_read(page, true, cur, iosize);
 
 
 
 
 
 
 
 
1767			cur = cur + iosize;
1768			pg_offset += iosize;
1769			continue;
1770		}
1771		/* the get_extent function already copied into the page */
 
 
 
 
 
 
 
 
 
 
 
1772		if (block_start == EXTENT_MAP_INLINE) {
1773			unlock_extent(tree, cur, cur + iosize - 1, NULL);
1774			end_page_read(page, true, cur, iosize);
1775			cur = cur + iosize;
1776			pg_offset += iosize;
1777			continue;
1778		}
1779
1780		ret = submit_extent_page(REQ_OP_READ | read_flags, NULL,
1781					 bio_ctrl, disk_bytenr, page, iosize,
1782					 pg_offset, this_bio_flag,
 
 
 
 
1783					 force_bio_submit);
1784		if (ret) {
1785			/*
1786			 * We have to unlock the remaining range, or the page
1787			 * will never be unlocked.
1788			 */
1789			unlock_extent(tree, cur, end, NULL);
1790			end_page_read(page, false, cur, end + 1 - cur);
1791			goto out;
1792		}
1793		cur = cur + iosize;
1794		pg_offset += iosize;
1795	}
1796out:
1797	return ret;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1798}
1799
1800int btrfs_read_folio(struct file *file, struct folio *folio)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1801{
1802	struct page *page = &folio->page;
1803	struct btrfs_inode *inode = BTRFS_I(page->mapping->host);
1804	u64 start = page_offset(page);
1805	u64 end = start + PAGE_SIZE - 1;
1806	struct btrfs_bio_ctrl bio_ctrl = { 0 };
1807	int ret;
1808
1809	btrfs_lock_and_flush_ordered_range(inode, start, end, NULL);
 
 
 
 
 
 
 
 
 
1810
1811	ret = btrfs_do_readpage(page, NULL, &bio_ctrl, 0, NULL);
1812	/*
1813	 * If btrfs_do_readpage() failed we will want to submit the assembled
1814	 * bio to do the cleanup.
1815	 */
1816	submit_one_bio(&bio_ctrl);
1817	return ret;
1818}
1819
1820static inline void contiguous_readpages(struct page *pages[], int nr_pages,
1821					u64 start, u64 end,
1822					struct extent_map **em_cached,
1823					struct btrfs_bio_ctrl *bio_ctrl,
1824					u64 *prev_em_start)
1825{
1826	struct btrfs_inode *inode = BTRFS_I(pages[0]->mapping->host);
1827	int index;
 
1828
1829	btrfs_lock_and_flush_ordered_range(inode, start, end, NULL);
 
 
 
 
 
1830
1831	for (index = 0; index < nr_pages; index++) {
1832		btrfs_do_readpage(pages[index], em_cached, bio_ctrl,
1833				  REQ_RAHEAD, prev_em_start);
1834		put_page(pages[index]);
1835	}
 
 
 
1836}
1837
1838/*
1839 * helper for __extent_writepage, doing all of the delayed allocation setup.
1840 *
1841 * This returns 1 if btrfs_run_delalloc_range function did all the work required
1842 * to write the page (copy into inline extent).  In this case the IO has
1843 * been started and the page is already unlocked.
1844 *
1845 * This returns 0 if all went well (page still locked)
1846 * This returns < 0 if there were errors (page still locked)
1847 */
1848static noinline_for_stack int writepage_delalloc(struct btrfs_inode *inode,
1849		struct page *page, struct writeback_control *wbc)
1850{
1851	const u64 page_end = page_offset(page) + PAGE_SIZE - 1;
1852	u64 delalloc_start = page_offset(page);
 
 
 
 
1853	u64 delalloc_to_write = 0;
1854	/* How many pages are started by btrfs_run_delalloc_range() */
1855	unsigned long nr_written = 0;
1856	int ret;
1857	int page_started = 0;
1858
1859	while (delalloc_start < page_end) {
1860		u64 delalloc_end = page_end;
1861		bool found;
1862
1863		found = find_lock_delalloc_range(&inode->vfs_inode, page,
 
 
1864					       &delalloc_start,
1865					       &delalloc_end);
1866		if (!found) {
 
1867			delalloc_start = delalloc_end + 1;
1868			continue;
1869		}
1870		ret = btrfs_run_delalloc_range(inode, page, delalloc_start,
1871				delalloc_end, &page_started, &nr_written, wbc);
 
 
 
 
1872		if (ret) {
1873			btrfs_page_set_error(inode->root->fs_info, page,
1874					     page_offset(page), PAGE_SIZE);
1875			return ret;
 
 
 
 
 
1876		}
1877		/*
1878		 * delalloc_end is already one less than the total length, so
1879		 * we don't subtract one from PAGE_SIZE
1880		 */
1881		delalloc_to_write += (delalloc_end - delalloc_start +
1882				      PAGE_SIZE) >> PAGE_SHIFT;
1883		delalloc_start = delalloc_end + 1;
1884	}
1885	if (wbc->nr_to_write < delalloc_to_write) {
1886		int thresh = 8192;
1887
1888		if (delalloc_to_write < thresh * 2)
1889			thresh = delalloc_to_write;
1890		wbc->nr_to_write = min_t(u64, delalloc_to_write,
1891					 thresh);
1892	}
1893
1894	/* Did btrfs_run_dealloc_range() already unlock and start the IO? */
 
 
1895	if (page_started) {
1896		/*
1897		 * We've unlocked the page, so we can't update the mapping's
1898		 * writeback index, just update nr_to_write.
 
1899		 */
1900		wbc->nr_to_write -= nr_written;
1901		return 1;
1902	}
1903
1904	return 0;
1905}
1906
1907/*
1908 * Find the first byte we need to write.
1909 *
1910 * For subpage, one page can contain several sectors, and
1911 * __extent_writepage_io() will just grab all extent maps in the page
1912 * range and try to submit all non-inline/non-compressed extents.
1913 *
1914 * This is a big problem for subpage, we shouldn't re-submit already written
1915 * data at all.
1916 * This function will lookup subpage dirty bit to find which range we really
1917 * need to submit.
1918 *
1919 * Return the next dirty range in [@start, @end).
1920 * If no dirty range is found, @start will be page_offset(page) + PAGE_SIZE.
1921 */
1922static void find_next_dirty_byte(struct btrfs_fs_info *fs_info,
1923				 struct page *page, u64 *start, u64 *end)
1924{
1925	struct btrfs_subpage *subpage = (struct btrfs_subpage *)page->private;
1926	struct btrfs_subpage_info *spi = fs_info->subpage_info;
1927	u64 orig_start = *start;
1928	/* Declare as unsigned long so we can use bitmap ops */
1929	unsigned long flags;
1930	int range_start_bit;
1931	int range_end_bit;
1932
1933	/*
1934	 * For regular sector size == page size case, since one page only
1935	 * contains one sector, we return the page offset directly.
1936	 */
1937	if (!btrfs_is_subpage(fs_info, page)) {
1938		*start = page_offset(page);
1939		*end = page_offset(page) + PAGE_SIZE;
1940		return;
1941	}
1942
1943	range_start_bit = spi->dirty_offset +
1944			  (offset_in_page(orig_start) >> fs_info->sectorsize_bits);
1945
1946	/* We should have the page locked, but just in case */
1947	spin_lock_irqsave(&subpage->lock, flags);
1948	bitmap_next_set_region(subpage->bitmaps, &range_start_bit, &range_end_bit,
1949			       spi->dirty_offset + spi->bitmap_nr_bits);
1950	spin_unlock_irqrestore(&subpage->lock, flags);
1951
1952	range_start_bit -= spi->dirty_offset;
1953	range_end_bit -= spi->dirty_offset;
1954
1955	*start = page_offset(page) + range_start_bit * fs_info->sectorsize;
1956	*end = page_offset(page) + range_end_bit * fs_info->sectorsize;
1957}
1958
1959/*
1960 * helper for __extent_writepage.  This calls the writepage start hooks,
1961 * and does the loop to map the page into extents and bios.
1962 *
1963 * We return 1 if the IO is started and the page is unlocked,
1964 * 0 if all went well (page still locked)
1965 * < 0 if there were errors (page still locked)
1966 */
1967static noinline_for_stack int __extent_writepage_io(struct btrfs_inode *inode,
1968				 struct page *page,
1969				 struct writeback_control *wbc,
1970				 struct btrfs_bio_ctrl *bio_ctrl,
1971				 loff_t i_size,
1972				 int *nr_ret)
 
1973{
1974	struct btrfs_fs_info *fs_info = inode->root->fs_info;
1975	u64 cur = page_offset(page);
1976	u64 end = cur + PAGE_SIZE - 1;
 
 
1977	u64 extent_offset;
1978	u64 block_start;
 
 
 
1979	struct extent_map *em;
1980	int saved_ret = 0;
 
 
1981	int ret = 0;
1982	int nr = 0;
1983	enum req_op op = REQ_OP_WRITE;
1984	const blk_opf_t write_flags = wbc_to_write_flags(wbc);
1985	bool has_error = false;
1986	bool compressed;
1987
1988	ret = btrfs_writepage_cow_fixup(page);
1989	if (ret) {
1990		/* Fixup worker will requeue */
1991		redirty_page_for_writepage(wbc, page);
1992		unlock_page(page);
1993		return 1;
 
 
 
 
 
 
 
 
 
1994	}
1995
1996	/*
1997	 * we don't want to touch the inode after unlocking the page,
1998	 * so we update the mapping writeback index now
1999	 */
2000	wbc->nr_to_write--;
 
 
 
 
 
 
 
 
 
 
2001
2002	bio_ctrl->end_io_func = end_bio_extent_writepage;
2003	while (cur <= end) {
2004		u64 disk_bytenr;
2005		u64 em_end;
2006		u64 dirty_range_start = cur;
2007		u64 dirty_range_end;
2008		u32 iosize;
2009
2010		if (cur >= i_size) {
2011			btrfs_writepage_endio_finish_ordered(inode, page, cur,
2012							     end, true);
2013			/*
2014			 * This range is beyond i_size, thus we don't need to
2015			 * bother writing back.
2016			 * But we still need to clear the dirty subpage bit, or
2017			 * the next time the page gets dirtied, we will try to
2018			 * writeback the sectors with subpage dirty bits,
2019			 * causing writeback without ordered extent.
2020			 */
2021			btrfs_page_clear_dirty(fs_info, page, cur, end + 1 - cur);
2022			break;
2023		}
2024
2025		find_next_dirty_byte(fs_info, page, &dirty_range_start,
2026				     &dirty_range_end);
2027		if (cur < dirty_range_start) {
2028			cur = dirty_range_start;
2029			continue;
2030		}
2031
2032		em = btrfs_get_extent(inode, NULL, 0, cur, end - cur + 1);
2033		if (IS_ERR(em)) {
2034			btrfs_page_set_error(fs_info, page, cur, end - cur + 1);
2035			ret = PTR_ERR_OR_ZERO(em);
2036			has_error = true;
2037			if (!saved_ret)
2038				saved_ret = ret;
2039			break;
2040		}
2041
2042		extent_offset = cur - em->start;
2043		em_end = extent_map_end(em);
2044		ASSERT(cur <= em_end);
2045		ASSERT(cur < end);
2046		ASSERT(IS_ALIGNED(em->start, fs_info->sectorsize));
2047		ASSERT(IS_ALIGNED(em->len, fs_info->sectorsize));
 
 
2048		block_start = em->block_start;
2049		compressed = test_bit(EXTENT_FLAG_COMPRESSED, &em->flags);
2050		disk_bytenr = em->block_start + extent_offset;
2051
2052		/*
2053		 * Note that em_end from extent_map_end() and dirty_range_end from
2054		 * find_next_dirty_byte() are all exclusive
2055		 */
2056		iosize = min(min(em_end, end + 1), dirty_range_end) - cur;
2057
2058		if (btrfs_use_zone_append(inode, em->block_start))
2059			op = REQ_OP_ZONE_APPEND;
2060
2061		free_extent_map(em);
2062		em = NULL;
2063
2064		/*
2065		 * compressed and inline extents are written through other
2066		 * paths in the FS
2067		 */
2068		if (compressed || block_start == EXTENT_MAP_HOLE ||
2069		    block_start == EXTENT_MAP_INLINE) {
2070			if (compressed)
 
 
 
 
 
 
 
 
 
 
 
 
 
2071				nr++;
2072			else
2073				btrfs_writepage_endio_finish_ordered(inode,
2074						page, cur, cur + iosize - 1, true);
2075			btrfs_page_clear_dirty(fs_info, page, cur, iosize);
2076			cur += iosize;
 
2077			continue;
2078		}
2079
2080		btrfs_set_range_writeback(inode, cur, cur + iosize - 1);
2081		if (!PageWriteback(page)) {
2082			btrfs_err(inode->root->fs_info,
2083				   "page %lu not writeback, cur %llu end %llu",
2084			       page->index, cur, end);
2085		}
2086
2087		/*
2088		 * Although the PageDirty bit is cleared before entering this
2089		 * function, subpage dirty bit is not cleared.
2090		 * So clear subpage dirty bit here so next time we won't submit
2091		 * page for range already written to disk.
2092		 */
2093		btrfs_page_clear_dirty(fs_info, page, cur, iosize);
2094
2095		ret = submit_extent_page(op | write_flags, wbc,
2096					 bio_ctrl, disk_bytenr,
2097					 page, iosize,
2098					 cur - page_offset(page),
2099					 0, false);
2100		if (ret) {
2101			has_error = true;
2102			if (!saved_ret)
2103				saved_ret = ret;
2104
2105			btrfs_page_set_error(fs_info, page, cur, iosize);
2106			if (PageWriteback(page))
2107				btrfs_page_clear_writeback(fs_info, page, cur,
2108							   iosize);
2109		}
 
2110
2111		cur += iosize;
 
 
 
 
 
 
 
 
 
2112		nr++;
2113	}
2114	/*
2115	 * If we finish without problem, we should not only clear page dirty,
2116	 * but also empty subpage dirty bits
2117	 */
2118	if (!has_error)
2119		btrfs_page_assert_not_dirty(fs_info, page);
2120	else
2121		ret = saved_ret;
2122	*nr_ret = nr;
 
 
 
 
 
2123	return ret;
2124}
2125
2126/*
2127 * the writepage semantics are similar to regular writepage.  extent
2128 * records are inserted to lock ranges in the tree, and as dirty areas
2129 * are found, they are marked writeback.  Then the lock bits are removed
2130 * and the end_io handler clears the writeback ranges
2131 *
2132 * Return 0 if everything goes well.
2133 * Return <0 for error.
2134 */
2135static int __extent_writepage(struct page *page, struct writeback_control *wbc,
2136			      struct btrfs_bio_ctrl *bio_ctrl)
2137{
2138	struct folio *folio = page_folio(page);
2139	struct inode *inode = page->mapping->host;
2140	struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
2141	const u64 page_start = page_offset(page);
2142	const u64 page_end = page_start + PAGE_SIZE - 1;
2143	int ret;
2144	int nr = 0;
2145	size_t pg_offset;
2146	loff_t i_size = i_size_read(inode);
2147	unsigned long end_index = i_size >> PAGE_SHIFT;
 
 
 
 
 
 
 
2148
2149	trace___extent_writepage(page, inode, wbc);
2150
2151	WARN_ON(!PageLocked(page));
2152
2153	btrfs_page_clear_error(btrfs_sb(inode->i_sb), page,
2154			       page_offset(page), PAGE_SIZE);
2155
2156	pg_offset = offset_in_page(i_size);
2157	if (page->index > end_index ||
2158	   (page->index == end_index && !pg_offset)) {
2159		folio_invalidate(folio, 0, folio_size(folio));
2160		folio_unlock(folio);
2161		return 0;
2162	}
2163
2164	if (page->index == end_index)
2165		memzero_page(page, pg_offset, PAGE_SIZE - pg_offset);
2166
2167	ret = set_page_extent_mapped(page);
2168	if (ret < 0) {
2169		SetPageError(page);
2170		goto done;
 
2171	}
2172
2173	if (!bio_ctrl->extent_locked) {
2174		ret = writepage_delalloc(BTRFS_I(inode), page, wbc);
2175		if (ret == 1)
2176			return 0;
2177		if (ret)
2178			goto done;
2179	}
2180
2181	ret = __extent_writepage_io(BTRFS_I(inode), page, wbc, bio_ctrl, i_size,
2182				    &nr);
2183	if (ret == 1)
2184		return 0;
 
 
 
 
 
 
 
2185
2186done:
2187	if (nr == 0) {
2188		/* make sure the mapping tag for page dirty gets cleared */
2189		set_page_writeback(page);
2190		end_page_writeback(page);
2191	}
2192	/*
2193	 * Here we used to have a check for PageError() and then set @ret and
2194	 * call end_extent_writepage().
2195	 *
2196	 * But in fact setting @ret here will cause different error paths
2197	 * between subpage and regular sectorsize.
2198	 *
2199	 * For regular page size, we never submit current page, but only add
2200	 * current page to current bio.
2201	 * The bio submission can only happen in next page.
2202	 * Thus if we hit the PageError() branch, @ret is already set to
2203	 * non-zero value and will not get updated for regular sectorsize.
2204	 *
2205	 * But for subpage case, it's possible we submit part of current page,
2206	 * thus can get PageError() set by submitted bio of the same page,
2207	 * while our @ret is still 0.
2208	 *
2209	 * So here we unify the behavior and don't set @ret.
2210	 * Error can still be properly passed to higher layer as page will
2211	 * be set error, here we just don't handle the IO failure.
2212	 *
2213	 * NOTE: This is just a hotfix for subpage.
2214	 * The root fix will be properly ending ordered extent when we hit
2215	 * an error during writeback.
2216	 *
2217	 * But that needs a bigger refactoring, as we not only need to grab the
2218	 * submitted OE, but also need to know exactly at which bytenr we hit
2219	 * the error.
2220	 * Currently the full page based __extent_writepage_io() is not
2221	 * capable of that.
2222	 */
2223	if (PageError(page))
2224		end_extent_writepage(page, ret, page_start, page_end);
2225	if (bio_ctrl->extent_locked) {
2226		/*
2227		 * If bio_ctrl->extent_locked, it's from extent_write_locked_range(),
2228		 * the page can either be locked by lock_page() or
2229		 * process_one_page().
2230		 * Let btrfs_page_unlock_writer() handle both cases.
2231		 */
2232		ASSERT(wbc);
2233		btrfs_page_unlock_writer(fs_info, page, wbc->range_start,
2234					 wbc->range_end + 1 - wbc->range_start);
2235	} else {
2236		unlock_page(page);
2237	}
2238	ASSERT(ret <= 0);
2239	return ret;
 
 
 
2240}
2241
2242void wait_on_extent_buffer_writeback(struct extent_buffer *eb)
2243{
2244	wait_on_bit_io(&eb->bflags, EXTENT_BUFFER_WRITEBACK,
2245		       TASK_UNINTERRUPTIBLE);
2246}
2247
2248static void end_extent_buffer_writeback(struct extent_buffer *eb)
2249{
2250	clear_bit(EXTENT_BUFFER_WRITEBACK, &eb->bflags);
2251	smp_mb__after_atomic();
2252	wake_up_bit(&eb->bflags, EXTENT_BUFFER_WRITEBACK);
2253}
2254
2255/*
2256 * Lock extent buffer status and pages for writeback.
2257 *
2258 * May try to flush write bio if we can't get the lock.
2259 *
2260 * Return  0 if the extent buffer doesn't need to be submitted.
2261 *           (E.g. the extent buffer is not dirty)
2262 * Return >0 is the extent buffer is submitted to bio.
2263 * Return <0 if something went wrong, no page is locked.
2264 */
2265static noinline_for_stack int lock_extent_buffer_for_io(struct extent_buffer *eb,
2266			  struct btrfs_bio_ctrl *bio_ctrl)
2267{
2268	struct btrfs_fs_info *fs_info = eb->fs_info;
2269	int i, num_pages;
2270	int flush = 0;
2271	int ret = 0;
2272
2273	if (!btrfs_try_tree_write_lock(eb)) {
2274		submit_write_bio(bio_ctrl, 0);
2275		flush = 1;
 
2276		btrfs_tree_lock(eb);
2277	}
2278
2279	if (test_bit(EXTENT_BUFFER_WRITEBACK, &eb->bflags)) {
2280		btrfs_tree_unlock(eb);
2281		if (!bio_ctrl->sync_io)
2282			return 0;
2283		if (!flush) {
2284			submit_write_bio(bio_ctrl, 0);
2285			flush = 1;
2286		}
2287		while (1) {
2288			wait_on_extent_buffer_writeback(eb);
2289			btrfs_tree_lock(eb);
2290			if (!test_bit(EXTENT_BUFFER_WRITEBACK, &eb->bflags))
2291				break;
2292			btrfs_tree_unlock(eb);
2293		}
2294	}
2295
2296	/*
2297	 * We need to do this to prevent races in people who check if the eb is
2298	 * under IO since we can end up having no IO bits set for a short period
2299	 * of time.
2300	 */
2301	spin_lock(&eb->refs_lock);
2302	if (test_and_clear_bit(EXTENT_BUFFER_DIRTY, &eb->bflags)) {
2303		set_bit(EXTENT_BUFFER_WRITEBACK, &eb->bflags);
2304		spin_unlock(&eb->refs_lock);
2305		btrfs_set_header_flag(eb, BTRFS_HEADER_FLAG_WRITTEN);
2306		percpu_counter_add_batch(&fs_info->dirty_metadata_bytes,
2307					 -eb->len,
2308					 fs_info->dirty_metadata_batch);
2309		ret = 1;
2310	} else {
2311		spin_unlock(&eb->refs_lock);
2312	}
2313
2314	btrfs_tree_unlock(eb);
2315
2316	/*
2317	 * Either we don't need to submit any tree block, or we're submitting
2318	 * subpage eb.
2319	 * Subpage metadata doesn't use page locking at all, so we can skip
2320	 * the page locking.
2321	 */
2322	if (!ret || fs_info->nodesize < PAGE_SIZE)
2323		return ret;
2324
2325	num_pages = num_extent_pages(eb);
2326	for (i = 0; i < num_pages; i++) {
2327		struct page *p = eb->pages[i];
2328
2329		if (!trylock_page(p)) {
2330			if (!flush) {
2331				submit_write_bio(bio_ctrl, 0);
2332				flush = 1;
2333			}
2334			lock_page(p);
2335		}
2336	}
2337
2338	return ret;
2339}
2340
2341static void set_btree_ioerr(struct page *page, struct extent_buffer *eb)
2342{
2343	struct btrfs_fs_info *fs_info = eb->fs_info;
 
 
 
 
 
 
 
 
2344
2345	btrfs_page_set_error(fs_info, page, eb->start, eb->len);
2346	if (test_and_set_bit(EXTENT_BUFFER_WRITE_ERR, &eb->bflags))
2347		return;
2348
2349	/*
2350	 * A read may stumble upon this buffer later, make sure that it gets an
2351	 * error and knows there was an error.
2352	 */
2353	clear_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags);
2354
2355	/*
2356	 * We need to set the mapping with the io error as well because a write
2357	 * error will flip the file system readonly, and then syncfs() will
2358	 * return a 0 because we are readonly if we don't modify the err seq for
2359	 * the superblock.
2360	 */
2361	mapping_set_error(page->mapping, -EIO);
2362
2363	/*
2364	 * If we error out, we should add back the dirty_metadata_bytes
2365	 * to make it consistent.
2366	 */
2367	percpu_counter_add_batch(&fs_info->dirty_metadata_bytes,
2368				 eb->len, fs_info->dirty_metadata_batch);
2369
2370	/*
2371	 * If writeback for a btree extent that doesn't belong to a log tree
2372	 * failed, increment the counter transaction->eb_write_errors.
2373	 * We do this because while the transaction is running and before it's
2374	 * committing (when we call filemap_fdata[write|wait]_range against
2375	 * the btree inode), we might have
2376	 * btree_inode->i_mapping->a_ops->writepages() called by the VM - if it
2377	 * returns an error or an error happens during writeback, when we're
2378	 * committing the transaction we wouldn't know about it, since the pages
2379	 * can be no longer dirty nor marked anymore for writeback (if a
2380	 * subsequent modification to the extent buffer didn't happen before the
2381	 * transaction commit), which makes filemap_fdata[write|wait]_range not
2382	 * able to find the pages tagged with SetPageError at transaction
2383	 * commit time. So if this happens we must abort the transaction,
2384	 * otherwise we commit a super block with btree roots that point to
2385	 * btree nodes/leafs whose content on disk is invalid - either garbage
2386	 * or the content of some node/leaf from a past generation that got
2387	 * cowed or deleted and is no longer valid.
2388	 *
2389	 * Note: setting AS_EIO/AS_ENOSPC in the btree inode's i_mapping would
2390	 * not be enough - we need to distinguish between log tree extents vs
2391	 * non-log tree extents, and the next filemap_fdatawait_range() call
2392	 * will catch and clear such errors in the mapping - and that call might
2393	 * be from a log sync and not from a transaction commit. Also, checking
2394	 * for the eb flag EXTENT_BUFFER_WRITE_ERR at transaction commit time is
2395	 * not done and would not be reliable - the eb might have been released
2396	 * from memory and reading it back again means that flag would not be
2397	 * set (since it's a runtime flag, not persisted on disk).
2398	 *
2399	 * Using the flags below in the btree inode also makes us achieve the
2400	 * goal of AS_EIO/AS_ENOSPC when writepages() returns success, started
2401	 * writeback for all dirty pages and before filemap_fdatawait_range()
2402	 * is called, the writeback for all dirty pages had already finished
2403	 * with errors - because we were not using AS_EIO/AS_ENOSPC,
2404	 * filemap_fdatawait_range() would return success, as it could not know
2405	 * that writeback errors happened (the pages were no longer tagged for
2406	 * writeback).
2407	 */
2408	switch (eb->log_index) {
2409	case -1:
2410		set_bit(BTRFS_FS_BTREE_ERR, &fs_info->flags);
2411		break;
2412	case 0:
2413		set_bit(BTRFS_FS_LOG1_ERR, &fs_info->flags);
2414		break;
2415	case 1:
2416		set_bit(BTRFS_FS_LOG2_ERR, &fs_info->flags);
2417		break;
2418	default:
2419		BUG(); /* unexpected, logic error */
2420	}
2421}
2422
2423/*
2424 * The endio specific version which won't touch any unsafe spinlock in endio
2425 * context.
2426 */
2427static struct extent_buffer *find_extent_buffer_nolock(
2428		struct btrfs_fs_info *fs_info, u64 start)
2429{
2430	struct extent_buffer *eb;
2431
2432	rcu_read_lock();
2433	eb = radix_tree_lookup(&fs_info->buffer_radix,
2434			       start >> fs_info->sectorsize_bits);
2435	if (eb && atomic_inc_not_zero(&eb->refs)) {
2436		rcu_read_unlock();
2437		return eb;
2438	}
2439	rcu_read_unlock();
2440	return NULL;
2441}
2442
2443/*
2444 * The endio function for subpage extent buffer write.
2445 *
2446 * Unlike end_bio_extent_buffer_writepage(), we only call end_page_writeback()
2447 * after all extent buffers in the page has finished their writeback.
2448 */
2449static void end_bio_subpage_eb_writepage(struct btrfs_bio *bbio)
2450{
2451	struct bio *bio = &bbio->bio;
2452	struct btrfs_fs_info *fs_info;
2453	struct bio_vec *bvec;
2454	struct bvec_iter_all iter_all;
2455
2456	fs_info = btrfs_sb(bio_first_page_all(bio)->mapping->host->i_sb);
2457	ASSERT(fs_info->nodesize < PAGE_SIZE);
2458
2459	ASSERT(!bio_flagged(bio, BIO_CLONED));
2460	bio_for_each_segment_all(bvec, bio, iter_all) {
2461		struct page *page = bvec->bv_page;
2462		u64 bvec_start = page_offset(page) + bvec->bv_offset;
2463		u64 bvec_end = bvec_start + bvec->bv_len - 1;
2464		u64 cur_bytenr = bvec_start;
2465
2466		ASSERT(IS_ALIGNED(bvec->bv_len, fs_info->nodesize));
2467
2468		/* Iterate through all extent buffers in the range */
2469		while (cur_bytenr <= bvec_end) {
2470			struct extent_buffer *eb;
2471			int done;
2472
2473			/*
2474			 * Here we can't use find_extent_buffer(), as it may
2475			 * try to lock eb->refs_lock, which is not safe in endio
2476			 * context.
2477			 */
2478			eb = find_extent_buffer_nolock(fs_info, cur_bytenr);
2479			ASSERT(eb);
2480
2481			cur_bytenr = eb->start + eb->len;
2482
2483			ASSERT(test_bit(EXTENT_BUFFER_WRITEBACK, &eb->bflags));
2484			done = atomic_dec_and_test(&eb->io_pages);
2485			ASSERT(done);
2486
2487			if (bio->bi_status ||
2488			    test_bit(EXTENT_BUFFER_WRITE_ERR, &eb->bflags)) {
2489				ClearPageUptodate(page);
2490				set_btree_ioerr(page, eb);
2491			}
2492
2493			btrfs_subpage_clear_writeback(fs_info, page, eb->start,
2494						      eb->len);
2495			end_extent_buffer_writeback(eb);
2496			/*
2497			 * free_extent_buffer() will grab spinlock which is not
2498			 * safe in endio context. Thus here we manually dec
2499			 * the ref.
2500			 */
2501			atomic_dec(&eb->refs);
2502		}
2503	}
2504	bio_put(bio);
2505}
2506
2507static void end_bio_extent_buffer_writepage(struct btrfs_bio *bbio)
2508{
2509	struct bio *bio = &bbio->bio;
2510	struct bio_vec *bvec;
2511	struct extent_buffer *eb;
2512	int done;
2513	struct bvec_iter_all iter_all;
2514
2515	ASSERT(!bio_flagged(bio, BIO_CLONED));
2516	bio_for_each_segment_all(bvec, bio, iter_all) {
2517		struct page *page = bvec->bv_page;
2518
2519		eb = (struct extent_buffer *)page->private;
2520		BUG_ON(!eb);
2521		done = atomic_dec_and_test(&eb->io_pages);
2522
2523		if (bio->bi_status ||
2524		    test_bit(EXTENT_BUFFER_WRITE_ERR, &eb->bflags)) {
2525			ClearPageUptodate(page);
2526			set_btree_ioerr(page, eb);
2527		}
2528
2529		end_page_writeback(page);
2530
2531		if (!done)
2532			continue;
2533
2534		end_extent_buffer_writeback(eb);
2535	}
2536
2537	bio_put(bio);
2538}
2539
2540static void prepare_eb_write(struct extent_buffer *eb)
2541{
2542	u32 nritems;
2543	unsigned long start;
2544	unsigned long end;
2545
2546	clear_bit(EXTENT_BUFFER_WRITE_ERR, &eb->bflags);
2547	atomic_set(&eb->io_pages, num_extent_pages(eb));
2548
2549	/* Set btree blocks beyond nritems with 0 to avoid stale content */
2550	nritems = btrfs_header_nritems(eb);
2551	if (btrfs_header_level(eb) > 0) {
2552		end = btrfs_node_key_ptr_offset(eb, nritems);
2553		memzero_extent_buffer(eb, end, eb->len - end);
2554	} else {
2555		/*
2556		 * Leaf:
2557		 * header 0 1 2 .. N ... data_N .. data_2 data_1 data_0
2558		 */
2559		start = btrfs_item_nr_offset(eb, nritems);
2560		end = btrfs_item_nr_offset(eb, 0);
2561		if (nritems == 0)
2562			end += BTRFS_LEAF_DATA_SIZE(eb->fs_info);
2563		else
2564			end += btrfs_item_offset(eb, nritems - 1);
2565		memzero_extent_buffer(eb, start, end - start);
2566	}
2567}
2568
2569/*
2570 * Unlike the work in write_one_eb(), we rely completely on extent locking.
2571 * Page locking is only utilized at minimum to keep the VMM code happy.
2572 */
2573static int write_one_subpage_eb(struct extent_buffer *eb,
2574				struct writeback_control *wbc,
2575				struct btrfs_bio_ctrl *bio_ctrl)
2576{
2577	struct btrfs_fs_info *fs_info = eb->fs_info;
2578	struct page *page = eb->pages[0];
2579	blk_opf_t write_flags = wbc_to_write_flags(wbc);
2580	bool no_dirty_ebs = false;
2581	int ret;
2582
2583	prepare_eb_write(eb);
2584
2585	/* clear_page_dirty_for_io() in subpage helper needs page locked */
2586	lock_page(page);
2587	btrfs_subpage_set_writeback(fs_info, page, eb->start, eb->len);
2588
2589	/* Check if this is the last dirty bit to update nr_written */
2590	no_dirty_ebs = btrfs_subpage_clear_and_test_dirty(fs_info, page,
2591							  eb->start, eb->len);
2592	if (no_dirty_ebs)
2593		clear_page_dirty_for_io(page);
2594
2595	bio_ctrl->end_io_func = end_bio_subpage_eb_writepage;
2596
2597	ret = submit_extent_page(REQ_OP_WRITE | write_flags, wbc,
2598			bio_ctrl, eb->start, page, eb->len,
2599			eb->start - page_offset(page), 0, false);
2600	if (ret) {
2601		btrfs_subpage_clear_writeback(fs_info, page, eb->start, eb->len);
2602		set_btree_ioerr(page, eb);
2603		unlock_page(page);
2604
2605		if (atomic_dec_and_test(&eb->io_pages))
2606			end_extent_buffer_writeback(eb);
2607		return -EIO;
2608	}
2609	unlock_page(page);
2610	/*
2611	 * Submission finished without problem, if no range of the page is
2612	 * dirty anymore, we have submitted a page.  Update nr_written in wbc.
2613	 */
2614	if (no_dirty_ebs)
2615		wbc->nr_to_write--;
2616	return ret;
2617}
2618
2619static noinline_for_stack int write_one_eb(struct extent_buffer *eb,
 
2620			struct writeback_control *wbc,
2621			struct btrfs_bio_ctrl *bio_ctrl)
2622{
2623	u64 disk_bytenr = eb->start;
2624	int i, num_pages;
2625	blk_opf_t write_flags = wbc_to_write_flags(wbc);
 
 
 
2626	int ret = 0;
2627
2628	prepare_eb_write(eb);
2629
2630	bio_ctrl->end_io_func = end_bio_extent_buffer_writepage;
 
 
2631
2632	num_pages = num_extent_pages(eb);
2633	for (i = 0; i < num_pages; i++) {
2634		struct page *p = eb->pages[i];
2635
2636		clear_page_dirty_for_io(p);
2637		set_page_writeback(p);
2638		ret = submit_extent_page(REQ_OP_WRITE | write_flags, wbc,
2639					 bio_ctrl, disk_bytenr, p,
2640					 PAGE_SIZE, 0, 0, false);
 
 
2641		if (ret) {
2642			set_btree_ioerr(p, eb);
2643			if (PageWriteback(p))
2644				end_page_writeback(p);
2645			if (atomic_sub_and_test(num_pages - i, &eb->io_pages))
2646				end_extent_buffer_writeback(eb);
2647			ret = -EIO;
2648			break;
2649		}
2650		disk_bytenr += PAGE_SIZE;
2651		wbc->nr_to_write--;
2652		unlock_page(p);
2653	}
2654
2655	if (unlikely(ret)) {
2656		for (; i < num_pages; i++) {
2657			struct page *p = eb->pages[i];
2658			clear_page_dirty_for_io(p);
2659			unlock_page(p);
2660		}
2661	}
2662
2663	return ret;
2664}
2665
2666/*
2667 * Submit one subpage btree page.
2668 *
2669 * The main difference to submit_eb_page() is:
2670 * - Page locking
2671 *   For subpage, we don't rely on page locking at all.
2672 *
2673 * - Flush write bio
2674 *   We only flush bio if we may be unable to fit current extent buffers into
2675 *   current bio.
2676 *
2677 * Return >=0 for the number of submitted extent buffers.
2678 * Return <0 for fatal error.
2679 */
2680static int submit_eb_subpage(struct page *page,
2681			     struct writeback_control *wbc,
2682			     struct btrfs_bio_ctrl *bio_ctrl)
2683{
2684	struct btrfs_fs_info *fs_info = btrfs_sb(page->mapping->host->i_sb);
2685	int submitted = 0;
2686	u64 page_start = page_offset(page);
2687	int bit_start = 0;
2688	int sectors_per_node = fs_info->nodesize >> fs_info->sectorsize_bits;
2689	int ret;
2690
2691	/* Lock and write each dirty extent buffers in the range */
2692	while (bit_start < fs_info->subpage_info->bitmap_nr_bits) {
2693		struct btrfs_subpage *subpage = (struct btrfs_subpage *)page->private;
2694		struct extent_buffer *eb;
2695		unsigned long flags;
2696		u64 start;
2697
2698		/*
2699		 * Take private lock to ensure the subpage won't be detached
2700		 * in the meantime.
2701		 */
2702		spin_lock(&page->mapping->private_lock);
2703		if (!PagePrivate(page)) {
2704			spin_unlock(&page->mapping->private_lock);
2705			break;
2706		}
2707		spin_lock_irqsave(&subpage->lock, flags);
2708		if (!test_bit(bit_start + fs_info->subpage_info->dirty_offset,
2709			      subpage->bitmaps)) {
2710			spin_unlock_irqrestore(&subpage->lock, flags);
2711			spin_unlock(&page->mapping->private_lock);
2712			bit_start++;
2713			continue;
2714		}
2715
2716		start = page_start + bit_start * fs_info->sectorsize;
2717		bit_start += sectors_per_node;
2718
2719		/*
2720		 * Here we just want to grab the eb without touching extra
2721		 * spin locks, so call find_extent_buffer_nolock().
2722		 */
2723		eb = find_extent_buffer_nolock(fs_info, start);
2724		spin_unlock_irqrestore(&subpage->lock, flags);
2725		spin_unlock(&page->mapping->private_lock);
2726
2727		/*
2728		 * The eb has already reached 0 refs thus find_extent_buffer()
2729		 * doesn't return it. We don't need to write back such eb
2730		 * anyway.
2731		 */
2732		if (!eb)
2733			continue;
2734
2735		ret = lock_extent_buffer_for_io(eb, bio_ctrl);
2736		if (ret == 0) {
2737			free_extent_buffer(eb);
2738			continue;
2739		}
2740		if (ret < 0) {
2741			free_extent_buffer(eb);
2742			goto cleanup;
2743		}
2744		ret = write_one_subpage_eb(eb, wbc, bio_ctrl);
2745		free_extent_buffer(eb);
2746		if (ret < 0)
2747			goto cleanup;
2748		submitted++;
2749	}
2750	return submitted;
2751
2752cleanup:
2753	/* We hit error, end bio for the submitted extent buffers */
2754	submit_write_bio(bio_ctrl, ret);
2755	return ret;
2756}
2757
2758/*
2759 * Submit all page(s) of one extent buffer.
2760 *
2761 * @page:	the page of one extent buffer
2762 * @eb_context:	to determine if we need to submit this page, if current page
2763 *		belongs to this eb, we don't need to submit
2764 *
2765 * The caller should pass each page in their bytenr order, and here we use
2766 * @eb_context to determine if we have submitted pages of one extent buffer.
2767 *
2768 * If we have, we just skip until we hit a new page that doesn't belong to
2769 * current @eb_context.
2770 *
2771 * If not, we submit all the page(s) of the extent buffer.
2772 *
2773 * Return >0 if we have submitted the extent buffer successfully.
2774 * Return 0 if we don't need to submit the page, as it's already submitted by
2775 * previous call.
2776 * Return <0 for fatal error.
2777 */
2778static int submit_eb_page(struct page *page, struct writeback_control *wbc,
2779			  struct btrfs_bio_ctrl *bio_ctrl,
2780			  struct extent_buffer **eb_context)
2781{
2782	struct address_space *mapping = page->mapping;
2783	struct btrfs_block_group *cache = NULL;
2784	struct extent_buffer *eb;
2785	int ret;
2786
2787	if (!PagePrivate(page))
2788		return 0;
2789
2790	if (btrfs_sb(page->mapping->host->i_sb)->nodesize < PAGE_SIZE)
2791		return submit_eb_subpage(page, wbc, bio_ctrl);
2792
2793	spin_lock(&mapping->private_lock);
2794	if (!PagePrivate(page)) {
2795		spin_unlock(&mapping->private_lock);
2796		return 0;
2797	}
2798
2799	eb = (struct extent_buffer *)page->private;
2800
2801	/*
2802	 * Shouldn't happen and normally this would be a BUG_ON but no point
2803	 * crashing the machine for something we can survive anyway.
2804	 */
2805	if (WARN_ON(!eb)) {
2806		spin_unlock(&mapping->private_lock);
2807		return 0;
2808	}
2809
2810	if (eb == *eb_context) {
2811		spin_unlock(&mapping->private_lock);
2812		return 0;
2813	}
2814	ret = atomic_inc_not_zero(&eb->refs);
2815	spin_unlock(&mapping->private_lock);
2816	if (!ret)
2817		return 0;
2818
2819	if (!btrfs_check_meta_write_pointer(eb->fs_info, eb, &cache)) {
2820		/*
2821		 * If for_sync, this hole will be filled with
2822		 * trasnsaction commit.
2823		 */
2824		if (wbc->sync_mode == WB_SYNC_ALL && !wbc->for_sync)
2825			ret = -EAGAIN;
2826		else
2827			ret = 0;
2828		free_extent_buffer(eb);
2829		return ret;
2830	}
2831
2832	*eb_context = eb;
2833
2834	ret = lock_extent_buffer_for_io(eb, bio_ctrl);
2835	if (ret <= 0) {
2836		btrfs_revert_meta_write_pointer(cache, eb);
2837		if (cache)
2838			btrfs_put_block_group(cache);
2839		free_extent_buffer(eb);
2840		return ret;
2841	}
2842	if (cache) {
2843		/*
2844		 * Implies write in zoned mode. Mark the last eb in a block group.
2845		 */
2846		btrfs_schedule_zone_finish_bg(cache, eb);
2847		btrfs_put_block_group(cache);
2848	}
2849	ret = write_one_eb(eb, wbc, bio_ctrl);
2850	free_extent_buffer(eb);
2851	if (ret < 0)
2852		return ret;
2853	return 1;
2854}
2855
2856int btree_write_cache_pages(struct address_space *mapping,
2857				   struct writeback_control *wbc)
2858{
2859	struct extent_buffer *eb_context = NULL;
2860	struct btrfs_bio_ctrl bio_ctrl = {
 
 
 
 
2861		.extent_locked = 0,
2862		.sync_io = (wbc->sync_mode == WB_SYNC_ALL),
 
2863	};
2864	struct btrfs_fs_info *fs_info = BTRFS_I(mapping->host)->root->fs_info;
2865	int ret = 0;
2866	int done = 0;
2867	int nr_to_write_done = 0;
2868	struct pagevec pvec;
2869	int nr_pages;
2870	pgoff_t index;
2871	pgoff_t end;		/* Inclusive */
2872	int scanned = 0;
2873	xa_mark_t tag;
2874
2875	pagevec_init(&pvec);
2876	if (wbc->range_cyclic) {
2877		index = mapping->writeback_index; /* Start from prev offset */
2878		end = -1;
2879		/*
2880		 * Start from the beginning does not need to cycle over the
2881		 * range, mark it as scanned.
2882		 */
2883		scanned = (index == 0);
2884	} else {
2885		index = wbc->range_start >> PAGE_SHIFT;
2886		end = wbc->range_end >> PAGE_SHIFT;
2887		scanned = 1;
2888	}
2889	if (wbc->sync_mode == WB_SYNC_ALL)
2890		tag = PAGECACHE_TAG_TOWRITE;
2891	else
2892		tag = PAGECACHE_TAG_DIRTY;
2893	btrfs_zoned_meta_io_lock(fs_info);
2894retry:
2895	if (wbc->sync_mode == WB_SYNC_ALL)
2896		tag_pages_for_writeback(mapping, index, end);
2897	while (!done && !nr_to_write_done && (index <= end) &&
2898	       (nr_pages = pagevec_lookup_range_tag(&pvec, mapping, &index, end,
2899			tag))) {
2900		unsigned i;
2901
 
2902		for (i = 0; i < nr_pages; i++) {
2903			struct page *page = pvec.pages[i];
2904
2905			ret = submit_eb_page(page, wbc, &bio_ctrl, &eb_context);
2906			if (ret == 0)
2907				continue;
2908			if (ret < 0) {
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2909				done = 1;
 
2910				break;
2911			}
 
2912
2913			/*
2914			 * the filesystem may choose to bump up nr_to_write.
2915			 * We have to make sure to honor the new nr_to_write
2916			 * at any time
2917			 */
2918			nr_to_write_done = wbc->nr_to_write <= 0;
2919		}
2920		pagevec_release(&pvec);
2921		cond_resched();
2922	}
2923	if (!scanned && !done) {
2924		/*
2925		 * We hit the last page and there is more work to be done: wrap
2926		 * back to the start of the file
2927		 */
2928		scanned = 1;
2929		index = 0;
2930		goto retry;
2931	}
2932	/*
2933	 * If something went wrong, don't allow any metadata write bio to be
2934	 * submitted.
2935	 *
2936	 * This would prevent use-after-free if we had dirty pages not
2937	 * cleaned up, which can still happen by fuzzed images.
2938	 *
2939	 * - Bad extent tree
2940	 *   Allowing existing tree block to be allocated for other trees.
2941	 *
2942	 * - Log tree operations
2943	 *   Exiting tree blocks get allocated to log tree, bumps its
2944	 *   generation, then get cleaned in tree re-balance.
2945	 *   Such tree block will not be written back, since it's clean,
2946	 *   thus no WRITTEN flag set.
2947	 *   And after log writes back, this tree block is not traced by
2948	 *   any dirty extent_io_tree.
2949	 *
2950	 * - Offending tree block gets re-dirtied from its original owner
2951	 *   Since it has bumped generation, no WRITTEN flag, it can be
2952	 *   reused without COWing. This tree block will not be traced
2953	 *   by btrfs_transaction::dirty_pages.
2954	 *
2955	 *   Now such dirty tree block will not be cleaned by any dirty
2956	 *   extent io tree. Thus we don't want to submit such wild eb
2957	 *   if the fs already has error.
2958	 *
2959	 * We can get ret > 0 from submit_extent_page() indicating how many ebs
2960	 * were submitted. Reset it to 0 to avoid false alerts for the caller.
2961	 */
2962	if (ret > 0)
2963		ret = 0;
2964	if (!ret && BTRFS_FS_ERROR(fs_info))
2965		ret = -EROFS;
2966	submit_write_bio(&bio_ctrl, ret);
2967
2968	btrfs_zoned_meta_io_unlock(fs_info);
2969	return ret;
2970}
2971
2972/*
2973 * Walk the list of dirty pages of the given address space and write all of them.
2974 *
2975 * @mapping:   address space structure to write
2976 * @wbc:       subtract the number of written pages from *@wbc->nr_to_write
2977 * @bio_ctrl:  holds context for the write, namely the bio
2978 *
2979 * If a page is already under I/O, write_cache_pages() skips it, even
2980 * if it's dirty.  This is desirable behaviour for memory-cleaning writeback,
2981 * but it is INCORRECT for data-integrity system calls such as fsync().  fsync()
2982 * and msync() need to guarantee that all the data which was dirty at the time
2983 * the call was made get new I/O started against them.  If wbc->sync_mode is
2984 * WB_SYNC_ALL then we were called for data integrity and we must wait for
2985 * existing IO to complete.
2986 */
2987static int extent_write_cache_pages(struct address_space *mapping,
 
2988			     struct writeback_control *wbc,
2989			     struct btrfs_bio_ctrl *bio_ctrl)
 
2990{
2991	struct inode *inode = mapping->host;
2992	int ret = 0;
2993	int done = 0;
 
2994	int nr_to_write_done = 0;
2995	struct pagevec pvec;
2996	int nr_pages;
2997	pgoff_t index;
2998	pgoff_t end;		/* Inclusive */
2999	pgoff_t done_index;
3000	int range_whole = 0;
3001	int scanned = 0;
3002	xa_mark_t tag;
3003
3004	/*
3005	 * We have to hold onto the inode so that ordered extents can do their
3006	 * work when the IO finishes.  The alternative to this is failing to add
3007	 * an ordered extent if the igrab() fails there and that is a huge pain
3008	 * to deal with, so instead just hold onto the inode throughout the
3009	 * writepages operation.  If it fails here we are freeing up the inode
3010	 * anyway and we'd rather not waste our time writing out stuff that is
3011	 * going to be truncated anyway.
3012	 */
3013	if (!igrab(inode))
3014		return 0;
3015
3016	pagevec_init(&pvec);
3017	if (wbc->range_cyclic) {
3018		index = mapping->writeback_index; /* Start from prev offset */
3019		end = -1;
3020		/*
3021		 * Start from the beginning does not need to cycle over the
3022		 * range, mark it as scanned.
3023		 */
3024		scanned = (index == 0);
3025	} else {
3026		index = wbc->range_start >> PAGE_SHIFT;
3027		end = wbc->range_end >> PAGE_SHIFT;
3028		if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX)
3029			range_whole = 1;
3030		scanned = 1;
3031	}
3032
3033	/*
3034	 * We do the tagged writepage as long as the snapshot flush bit is set
3035	 * and we are the first one who do the filemap_flush() on this inode.
3036	 *
3037	 * The nr_to_write == LONG_MAX is needed to make sure other flushers do
3038	 * not race in and drop the bit.
3039	 */
3040	if (range_whole && wbc->nr_to_write == LONG_MAX &&
3041	    test_and_clear_bit(BTRFS_INODE_SNAPSHOT_FLUSH,
3042			       &BTRFS_I(inode)->runtime_flags))
3043		wbc->tagged_writepages = 1;
3044
3045	if (wbc->sync_mode == WB_SYNC_ALL || wbc->tagged_writepages)
3046		tag = PAGECACHE_TAG_TOWRITE;
3047	else
3048		tag = PAGECACHE_TAG_DIRTY;
3049retry:
3050	if (wbc->sync_mode == WB_SYNC_ALL || wbc->tagged_writepages)
3051		tag_pages_for_writeback(mapping, index, end);
3052	done_index = index;
3053	while (!done && !nr_to_write_done && (index <= end) &&
3054			(nr_pages = pagevec_lookup_range_tag(&pvec, mapping,
3055						&index, end, tag))) {
3056		unsigned i;
3057
 
3058		for (i = 0; i < nr_pages; i++) {
3059			struct page *page = pvec.pages[i];
3060
3061			done_index = page->index + 1;
3062			/*
3063			 * At this point we hold neither the i_pages lock nor
3064			 * the page lock: the page may be truncated or
3065			 * invalidated (changing page->mapping to NULL),
3066			 * or even swizzled back from swapper_space to
3067			 * tmpfs file mapping
3068			 */
3069			if (!trylock_page(page)) {
3070				submit_write_bio(bio_ctrl, 0);
3071				lock_page(page);
3072			}
3073
3074			if (unlikely(page->mapping != mapping)) {
3075				unlock_page(page);
3076				continue;
3077			}
3078
 
 
 
 
 
 
3079			if (wbc->sync_mode != WB_SYNC_NONE) {
3080				if (PageWriteback(page))
3081					submit_write_bio(bio_ctrl, 0);
3082				wait_on_page_writeback(page);
3083			}
3084
3085			if (PageWriteback(page) ||
3086			    !clear_page_dirty_for_io(page)) {
3087				unlock_page(page);
3088				continue;
3089			}
3090
3091			ret = __extent_writepage(page, wbc, bio_ctrl);
3092			if (ret < 0) {
3093				done = 1;
3094				break;
 
3095			}
 
 
3096
3097			/*
3098			 * the filesystem may choose to bump up nr_to_write.
3099			 * We have to make sure to honor the new nr_to_write
3100			 * at any time
3101			 */
3102			nr_to_write_done = wbc->nr_to_write <= 0;
3103		}
3104		pagevec_release(&pvec);
3105		cond_resched();
3106	}
3107	if (!scanned && !done) {
3108		/*
3109		 * We hit the last page and there is more work to be done: wrap
3110		 * back to the start of the file
3111		 */
3112		scanned = 1;
3113		index = 0;
3114
3115		/*
3116		 * If we're looping we could run into a page that is locked by a
3117		 * writer and that writer could be waiting on writeback for a
3118		 * page in our current bio, and thus deadlock, so flush the
3119		 * write bio here.
3120		 */
3121		submit_write_bio(bio_ctrl, 0);
3122		goto retry;
3123	}
 
 
 
 
 
 
 
 
 
3124
3125	if (wbc->range_cyclic || (wbc->nr_to_write > 0 && range_whole))
3126		mapping->writeback_index = done_index;
3127
3128	btrfs_add_delayed_iput(BTRFS_I(inode));
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3129	return ret;
3130}
3131
3132/*
3133 * Submit the pages in the range to bio for call sites which delalloc range has
3134 * already been ran (aka, ordered extent inserted) and all pages are still
3135 * locked.
3136 */
3137int extent_write_locked_range(struct inode *inode, u64 start, u64 end)
3138{
3139	bool found_error = false;
3140	int first_error = 0;
3141	int ret = 0;
3142	struct address_space *mapping = inode->i_mapping;
3143	struct page *page;
3144	u64 cur = start;
3145	unsigned long nr_pages;
3146	const u32 sectorsize = btrfs_sb(inode->i_sb)->sectorsize;
3147	struct btrfs_bio_ctrl bio_ctrl = {
 
 
 
3148		.extent_locked = 1,
3149		.sync_io = 1,
 
3150	};
3151	struct writeback_control wbc_writepages = {
3152		.sync_mode	= WB_SYNC_ALL,
 
3153		.range_start	= start,
3154		.range_end	= end + 1,
3155		/* We're called from an async helper function */
3156		.punt_to_cgroup	= 1,
3157		.no_cgroup_owner = 1,
3158	};
3159
3160	ASSERT(IS_ALIGNED(start, sectorsize) && IS_ALIGNED(end + 1, sectorsize));
3161	nr_pages = (round_up(end, PAGE_SIZE) - round_down(start, PAGE_SIZE)) >>
3162		   PAGE_SHIFT;
3163	wbc_writepages.nr_to_write = nr_pages * 2;
3164
3165	wbc_attach_fdatawrite_inode(&wbc_writepages, inode);
3166	while (cur <= end) {
3167		u64 cur_end = min(round_down(cur, PAGE_SIZE) + PAGE_SIZE - 1, end);
3168
3169		page = find_get_page(mapping, cur >> PAGE_SHIFT);
3170		/*
3171		 * All pages in the range are locked since
3172		 * btrfs_run_delalloc_range(), thus there is no way to clear
3173		 * the page dirty flag.
3174		 */
3175		ASSERT(PageLocked(page));
3176		ASSERT(PageDirty(page));
3177		clear_page_dirty_for_io(page);
3178		ret = __extent_writepage(page, &wbc_writepages, &bio_ctrl);
3179		ASSERT(ret <= 0);
3180		if (ret < 0) {
3181			found_error = true;
3182			first_error = ret;
3183		}
3184		put_page(page);
3185		cur = cur_end + 1;
3186	}
3187
3188	submit_write_bio(&bio_ctrl, found_error ? ret : 0);
3189
3190	wbc_detach_inode(&wbc_writepages);
3191	if (found_error)
3192		return first_error;
3193	return ret;
3194}
3195
3196int extent_writepages(struct address_space *mapping,
 
 
3197		      struct writeback_control *wbc)
3198{
3199	struct inode *inode = mapping->host;
3200	int ret = 0;
3201	struct btrfs_bio_ctrl bio_ctrl = {
 
 
 
3202		.extent_locked = 0,
3203		.sync_io = (wbc->sync_mode == WB_SYNC_ALL),
 
3204	};
3205
3206	/*
3207	 * Allow only a single thread to do the reloc work in zoned mode to
3208	 * protect the write pointer updates.
3209	 */
3210	btrfs_zoned_data_reloc_lock(BTRFS_I(inode));
3211	ret = extent_write_cache_pages(mapping, wbc, &bio_ctrl);
3212	submit_write_bio(&bio_ctrl, ret);
3213	btrfs_zoned_data_reloc_unlock(BTRFS_I(inode));
3214	return ret;
3215}
3216
3217void extent_readahead(struct readahead_control *rac)
3218{
3219	struct btrfs_bio_ctrl bio_ctrl = { 0 };
 
 
 
 
 
3220	struct page *pagepool[16];
 
3221	struct extent_map *em_cached = NULL;
 
3222	u64 prev_em_start = (u64)-1;
3223	int nr;
3224
3225	while ((nr = readahead_page_batch(rac, pagepool))) {
3226		u64 contig_start = readahead_pos(rac);
3227		u64 contig_end = contig_start + readahead_batch_length(rac) - 1;
3228
3229		contiguous_readpages(pagepool, nr, contig_start, contig_end,
3230				&em_cached, &bio_ctrl, &prev_em_start);
3231	}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3232
3233	if (em_cached)
3234		free_extent_map(em_cached);
3235	submit_one_bio(&bio_ctrl);
 
 
 
 
3236}
3237
3238/*
3239 * basic invalidate_folio code, this waits on any locked or writeback
3240 * ranges corresponding to the folio, and then deletes any extent state
3241 * records from the tree
3242 */
3243int extent_invalidate_folio(struct extent_io_tree *tree,
3244			  struct folio *folio, size_t offset)
3245{
3246	struct extent_state *cached_state = NULL;
3247	u64 start = folio_pos(folio);
3248	u64 end = start + folio_size(folio) - 1;
3249	size_t blocksize = folio->mapping->host->i_sb->s_blocksize;
3250
3251	/* This function is only called for the btree inode */
3252	ASSERT(tree->owner == IO_TREE_BTREE_INODE_IO);
3253
3254	start += ALIGN(offset, blocksize);
3255	if (start > end)
3256		return 0;
3257
3258	lock_extent(tree, start, end, &cached_state);
3259	folio_wait_writeback(folio);
3260
3261	/*
3262	 * Currently for btree io tree, only EXTENT_LOCKED is utilized,
3263	 * so here we only need to unlock the extent range to free any
3264	 * existing extent state.
3265	 */
3266	unlock_extent(tree, start, end, &cached_state);
3267	return 0;
3268}
3269
3270/*
3271 * a helper for release_folio, this tests for areas of the page that
3272 * are locked or under IO and drops the related state bits if it is safe
3273 * to drop the page.
3274 */
3275static int try_release_extent_state(struct extent_io_tree *tree,
 
3276				    struct page *page, gfp_t mask)
3277{
3278	u64 start = page_offset(page);
3279	u64 end = start + PAGE_SIZE - 1;
3280	int ret = 1;
3281
3282	if (test_range_bit(tree, start, end, EXTENT_LOCKED, 0, NULL)) {
 
3283		ret = 0;
3284	} else {
3285		u32 clear_bits = ~(EXTENT_LOCKED | EXTENT_NODATASUM |
3286				   EXTENT_DELALLOC_NEW | EXTENT_CTLBITS);
3287
3288		/*
3289		 * At this point we can safely clear everything except the
3290		 * locked bit, the nodatasum bit and the delalloc new bit.
3291		 * The delalloc new bit will be cleared by ordered extent
3292		 * completion.
3293		 */
3294		ret = __clear_extent_bit(tree, start, end, clear_bits, NULL,
3295					 mask, NULL);
3296
3297		/* if clear_extent_bit failed for enomem reasons,
3298		 * we can't allow the release to continue.
3299		 */
3300		if (ret < 0)
3301			ret = 0;
3302		else
3303			ret = 1;
3304	}
3305	return ret;
3306}
3307
3308/*
3309 * a helper for release_folio.  As long as there are no locked extents
3310 * in the range corresponding to the page, both state records and extent
3311 * map records are removed
3312 */
3313int try_release_extent_mapping(struct page *page, gfp_t mask)
 
 
3314{
3315	struct extent_map *em;
3316	u64 start = page_offset(page);
3317	u64 end = start + PAGE_SIZE - 1;
3318	struct btrfs_inode *btrfs_inode = BTRFS_I(page->mapping->host);
3319	struct extent_io_tree *tree = &btrfs_inode->io_tree;
3320	struct extent_map_tree *map = &btrfs_inode->extent_tree;
3321
3322	if (gfpflags_allow_blocking(mask) &&
3323	    page->mapping->host->i_size > SZ_16M) {
3324		u64 len;
3325		while (start <= end) {
3326			struct btrfs_fs_info *fs_info;
3327			u64 cur_gen;
3328
3329			len = end - start + 1;
3330			write_lock(&map->lock);
3331			em = lookup_extent_mapping(map, start, len);
3332			if (!em) {
3333				write_unlock(&map->lock);
3334				break;
3335			}
3336			if (test_bit(EXTENT_FLAG_PINNED, &em->flags) ||
3337			    em->start != start) {
3338				write_unlock(&map->lock);
3339				free_extent_map(em);
3340				break;
3341			}
3342			if (test_range_bit(tree, em->start,
3343					   extent_map_end(em) - 1,
3344					   EXTENT_LOCKED, 0, NULL))
3345				goto next;
3346			/*
3347			 * If it's not in the list of modified extents, used
3348			 * by a fast fsync, we can remove it. If it's being
3349			 * logged we can safely remove it since fsync took an
3350			 * extra reference on the em.
3351			 */
3352			if (list_empty(&em->list) ||
3353			    test_bit(EXTENT_FLAG_LOGGING, &em->flags))
3354				goto remove_em;
3355			/*
3356			 * If it's in the list of modified extents, remove it
3357			 * only if its generation is older then the current one,
3358			 * in which case we don't need it for a fast fsync.
3359			 * Otherwise don't remove it, we could be racing with an
3360			 * ongoing fast fsync that could miss the new extent.
3361			 */
3362			fs_info = btrfs_inode->root->fs_info;
3363			spin_lock(&fs_info->trans_lock);
3364			cur_gen = fs_info->generation;
3365			spin_unlock(&fs_info->trans_lock);
3366			if (em->generation >= cur_gen)
3367				goto next;
3368remove_em:
3369			/*
3370			 * We only remove extent maps that are not in the list of
3371			 * modified extents or that are in the list but with a
3372			 * generation lower then the current generation, so there
3373			 * is no need to set the full fsync flag on the inode (it
3374			 * hurts the fsync performance for workloads with a data
3375			 * size that exceeds or is close to the system's memory).
3376			 */
3377			remove_extent_mapping(map, em);
3378			/* once for the rb tree */
3379			free_extent_map(em);
3380next:
3381			start = extent_map_end(em);
3382			write_unlock(&map->lock);
3383
3384			/* once for us */
3385			free_extent_map(em);
3386
3387			cond_resched(); /* Allow large-extent preemption. */
3388		}
3389	}
3390	return try_release_extent_state(tree, page, mask);
3391}
3392
3393/*
3394 * To cache previous fiemap extent
3395 *
3396 * Will be used for merging fiemap extent
3397 */
3398struct fiemap_cache {
3399	u64 offset;
3400	u64 phys;
3401	u64 len;
3402	u32 flags;
3403	bool cached;
3404};
3405
3406/*
3407 * Helper to submit fiemap extent.
3408 *
3409 * Will try to merge current fiemap extent specified by @offset, @phys,
3410 * @len and @flags with cached one.
3411 * And only when we fails to merge, cached one will be submitted as
3412 * fiemap extent.
3413 *
3414 * Return value is the same as fiemap_fill_next_extent().
3415 */
3416static int emit_fiemap_extent(struct fiemap_extent_info *fieinfo,
3417				struct fiemap_cache *cache,
3418				u64 offset, u64 phys, u64 len, u32 flags)
3419{
3420	int ret = 0;
 
 
3421
3422	/* Set at the end of extent_fiemap(). */
3423	ASSERT((flags & FIEMAP_EXTENT_LAST) == 0);
3424
3425	if (!cache->cached)
3426		goto assign;
 
 
 
 
 
 
3427
3428	/*
3429	 * Sanity check, extent_fiemap() should have ensured that new
3430	 * fiemap extent won't overlap with cached one.
3431	 * Not recoverable.
3432	 *
3433	 * NOTE: Physical address can overlap, due to compression
3434	 */
3435	if (cache->offset + cache->len > offset) {
3436		WARN_ON(1);
3437		return -EINVAL;
3438	}
3439
3440	/*
3441	 * Only merges fiemap extents if
3442	 * 1) Their logical addresses are continuous
3443	 *
3444	 * 2) Their physical addresses are continuous
3445	 *    So truly compressed (physical size smaller than logical size)
3446	 *    extents won't get merged with each other
3447	 *
3448	 * 3) Share same flags
3449	 */
3450	if (cache->offset + cache->len  == offset &&
3451	    cache->phys + cache->len == phys  &&
3452	    cache->flags == flags) {
3453		cache->len += len;
3454		return 0;
3455	}
3456
3457	/* Not mergeable, need to submit cached one */
3458	ret = fiemap_fill_next_extent(fieinfo, cache->offset, cache->phys,
3459				      cache->len, cache->flags);
3460	cache->cached = false;
3461	if (ret)
3462		return ret;
3463assign:
3464	cache->cached = true;
3465	cache->offset = offset;
3466	cache->phys = phys;
3467	cache->len = len;
3468	cache->flags = flags;
3469
3470	return 0;
3471}
3472
3473/*
3474 * Emit last fiemap cache
3475 *
3476 * The last fiemap cache may still be cached in the following case:
3477 * 0		      4k		    8k
3478 * |<- Fiemap range ->|
3479 * |<------------  First extent ----------->|
3480 *
3481 * In this case, the first extent range will be cached but not emitted.
3482 * So we must emit it before ending extent_fiemap().
3483 */
3484static int emit_last_fiemap_cache(struct fiemap_extent_info *fieinfo,
3485				  struct fiemap_cache *cache)
3486{
3487	int ret;
3488
3489	if (!cache->cached)
3490		return 0;
3491
3492	ret = fiemap_fill_next_extent(fieinfo, cache->offset, cache->phys,
3493				      cache->len, cache->flags);
3494	cache->cached = false;
3495	if (ret > 0)
3496		ret = 0;
3497	return ret;
3498}
 
 
 
 
 
 
3499
3500static int fiemap_next_leaf_item(struct btrfs_inode *inode, struct btrfs_path *path)
3501{
3502	struct extent_buffer *clone;
3503	struct btrfs_key key;
3504	int slot;
3505	int ret;
3506
3507	path->slots[0]++;
3508	if (path->slots[0] < btrfs_header_nritems(path->nodes[0]))
3509		return 0;
 
3510
3511	ret = btrfs_next_leaf(inode->root, path);
3512	if (ret != 0)
3513		return ret;
3514
3515	/*
3516	 * Don't bother with cloning if there are no more file extent items for
3517	 * our inode.
3518	 */
3519	btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0]);
3520	if (key.objectid != btrfs_ino(inode) || key.type != BTRFS_EXTENT_DATA_KEY)
3521		return 1;
3522
3523	/* See the comment at fiemap_search_slot() about why we clone. */
3524	clone = btrfs_clone_extent_buffer(path->nodes[0]);
3525	if (!clone)
3526		return -ENOMEM;
3527
3528	slot = path->slots[0];
3529	btrfs_release_path(path);
3530	path->nodes[0] = clone;
3531	path->slots[0] = slot;
3532
3533	return 0;
3534}
3535
3536/*
3537 * Search for the first file extent item that starts at a given file offset or
3538 * the one that starts immediately before that offset.
3539 * Returns: 0 on success, < 0 on error, 1 if not found.
3540 */
3541static int fiemap_search_slot(struct btrfs_inode *inode, struct btrfs_path *path,
3542			      u64 file_offset)
3543{
3544	const u64 ino = btrfs_ino(inode);
3545	struct btrfs_root *root = inode->root;
3546	struct extent_buffer *clone;
3547	struct btrfs_key key;
3548	int slot;
3549	int ret;
3550
3551	key.objectid = ino;
3552	key.type = BTRFS_EXTENT_DATA_KEY;
3553	key.offset = file_offset;
3554
3555	ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
3556	if (ret < 0)
3557		return ret;
3558
3559	if (ret > 0 && path->slots[0] > 0) {
3560		btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0] - 1);
3561		if (key.objectid == ino && key.type == BTRFS_EXTENT_DATA_KEY)
3562			path->slots[0]--;
3563	}
 
 
 
 
3564
3565	if (path->slots[0] >= btrfs_header_nritems(path->nodes[0])) {
3566		ret = btrfs_next_leaf(root, path);
3567		if (ret != 0)
3568			return ret;
3569
3570		btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0]);
3571		if (key.objectid != ino || key.type != BTRFS_EXTENT_DATA_KEY)
3572			return 1;
3573	}
3574
3575	/*
3576	 * We clone the leaf and use it during fiemap. This is because while
3577	 * using the leaf we do expensive things like checking if an extent is
3578	 * shared, which can take a long time. In order to prevent blocking
3579	 * other tasks for too long, we use a clone of the leaf. We have locked
3580	 * the file range in the inode's io tree, so we know none of our file
3581	 * extent items can change. This way we avoid blocking other tasks that
3582	 * want to insert items for other inodes in the same leaf or b+tree
3583	 * rebalance operations (triggered for example when someone is trying
3584	 * to push items into this leaf when trying to insert an item in a
3585	 * neighbour leaf).
3586	 * We also need the private clone because holding a read lock on an
3587	 * extent buffer of the subvolume's b+tree will make lockdep unhappy
3588	 * when we call fiemap_fill_next_extent(), because that may cause a page
3589	 * fault when filling the user space buffer with fiemap data.
3590	 */
3591	clone = btrfs_clone_extent_buffer(path->nodes[0]);
3592	if (!clone)
3593		return -ENOMEM;
3594
3595	slot = path->slots[0];
3596	btrfs_release_path(path);
3597	path->nodes[0] = clone;
3598	path->slots[0] = slot;
3599
3600	return 0;
3601}
3602
3603/*
3604 * Process a range which is a hole or a prealloc extent in the inode's subvolume
3605 * btree. If @disk_bytenr is 0, we are dealing with a hole, otherwise a prealloc
3606 * extent. The end offset (@end) is inclusive.
3607 */
3608static int fiemap_process_hole(struct btrfs_inode *inode,
3609			       struct fiemap_extent_info *fieinfo,
3610			       struct fiemap_cache *cache,
3611			       struct extent_state **delalloc_cached_state,
3612			       struct btrfs_backref_share_check_ctx *backref_ctx,
3613			       u64 disk_bytenr, u64 extent_offset,
3614			       u64 extent_gen,
3615			       u64 start, u64 end)
3616{
3617	const u64 i_size = i_size_read(&inode->vfs_inode);
3618	u64 cur_offset = start;
3619	u64 last_delalloc_end = 0;
3620	u32 prealloc_flags = FIEMAP_EXTENT_UNWRITTEN;
3621	bool checked_extent_shared = false;
3622	int ret;
3623
3624	/*
3625	 * There can be no delalloc past i_size, so don't waste time looking for
3626	 * it beyond i_size.
3627	 */
3628	while (cur_offset < end && cur_offset < i_size) {
3629		u64 delalloc_start;
3630		u64 delalloc_end;
3631		u64 prealloc_start;
3632		u64 prealloc_len = 0;
3633		bool delalloc;
3634
3635		delalloc = btrfs_find_delalloc_in_range(inode, cur_offset, end,
3636							delalloc_cached_state,
3637							&delalloc_start,
3638							&delalloc_end);
3639		if (!delalloc)
3640			break;
3641
3642		/*
3643		 * If this is a prealloc extent we have to report every section
3644		 * of it that has no delalloc.
 
3645		 */
3646		if (disk_bytenr != 0) {
3647			if (last_delalloc_end == 0) {
3648				prealloc_start = start;
3649				prealloc_len = delalloc_start - start;
3650			} else {
3651				prealloc_start = last_delalloc_end + 1;
3652				prealloc_len = delalloc_start - prealloc_start;
3653			}
3654		}
3655
3656		if (prealloc_len > 0) {
3657			if (!checked_extent_shared && fieinfo->fi_extents_max) {
3658				ret = btrfs_is_data_extent_shared(inode,
3659								  disk_bytenr,
3660								  extent_gen,
3661								  backref_ctx);
3662				if (ret < 0)
3663					return ret;
3664				else if (ret > 0)
3665					prealloc_flags |= FIEMAP_EXTENT_SHARED;
3666
3667				checked_extent_shared = true;
3668			}
3669			ret = emit_fiemap_extent(fieinfo, cache, prealloc_start,
3670						 disk_bytenr + extent_offset,
3671						 prealloc_len, prealloc_flags);
3672			if (ret)
3673				return ret;
3674			extent_offset += prealloc_len;
3675		}
3676
3677		ret = emit_fiemap_extent(fieinfo, cache, delalloc_start, 0,
3678					 delalloc_end + 1 - delalloc_start,
3679					 FIEMAP_EXTENT_DELALLOC |
3680					 FIEMAP_EXTENT_UNKNOWN);
3681		if (ret)
3682			return ret;
3683
3684		last_delalloc_end = delalloc_end;
3685		cur_offset = delalloc_end + 1;
3686		extent_offset += cur_offset - delalloc_start;
3687		cond_resched();
3688	}
3689
3690	/*
3691	 * Either we found no delalloc for the whole prealloc extent or we have
3692	 * a prealloc extent that spans i_size or starts at or after i_size.
3693	 */
3694	if (disk_bytenr != 0 && last_delalloc_end < end) {
3695		u64 prealloc_start;
3696		u64 prealloc_len;
3697
3698		if (last_delalloc_end == 0) {
3699			prealloc_start = start;
3700			prealloc_len = end + 1 - start;
3701		} else {
3702			prealloc_start = last_delalloc_end + 1;
3703			prealloc_len = end + 1 - prealloc_start;
3704		}
3705
3706		if (!checked_extent_shared && fieinfo->fi_extents_max) {
3707			ret = btrfs_is_data_extent_shared(inode,
3708							  disk_bytenr,
3709							  extent_gen,
3710							  backref_ctx);
3711			if (ret < 0)
3712				return ret;
3713			else if (ret > 0)
3714				prealloc_flags |= FIEMAP_EXTENT_SHARED;
3715		}
3716		ret = emit_fiemap_extent(fieinfo, cache, prealloc_start,
3717					 disk_bytenr + extent_offset,
3718					 prealloc_len, prealloc_flags);
3719		if (ret)
3720			return ret;
3721	}
3722
3723	return 0;
3724}
3725
3726static int fiemap_find_last_extent_offset(struct btrfs_inode *inode,
3727					  struct btrfs_path *path,
3728					  u64 *last_extent_end_ret)
3729{
3730	const u64 ino = btrfs_ino(inode);
3731	struct btrfs_root *root = inode->root;
3732	struct extent_buffer *leaf;
3733	struct btrfs_file_extent_item *ei;
3734	struct btrfs_key key;
3735	u64 disk_bytenr;
3736	int ret;
3737
3738	/*
3739	 * Lookup the last file extent. We're not using i_size here because
3740	 * there might be preallocation past i_size.
3741	 */
3742	ret = btrfs_lookup_file_extent(NULL, root, path, ino, (u64)-1, 0);
3743	/* There can't be a file extent item at offset (u64)-1 */
3744	ASSERT(ret != 0);
3745	if (ret < 0)
3746		return ret;
3747
3748	/*
3749	 * For a non-existing key, btrfs_search_slot() always leaves us at a
3750	 * slot > 0, except if the btree is empty, which is impossible because
3751	 * at least it has the inode item for this inode and all the items for
3752	 * the root inode 256.
3753	 */
3754	ASSERT(path->slots[0] > 0);
3755	path->slots[0]--;
3756	leaf = path->nodes[0];
3757	btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
3758	if (key.objectid != ino || key.type != BTRFS_EXTENT_DATA_KEY) {
3759		/* No file extent items in the subvolume tree. */
3760		*last_extent_end_ret = 0;
3761		return 0;
3762	}
3763
3764	/*
3765	 * For an inline extent, the disk_bytenr is where inline data starts at,
3766	 * so first check if we have an inline extent item before checking if we
3767	 * have an implicit hole (disk_bytenr == 0).
3768	 */
3769	ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_file_extent_item);
3770	if (btrfs_file_extent_type(leaf, ei) == BTRFS_FILE_EXTENT_INLINE) {
3771		*last_extent_end_ret = btrfs_file_extent_end(path);
3772		return 0;
3773	}
3774
3775	/*
3776	 * Find the last file extent item that is not a hole (when NO_HOLES is
3777	 * not enabled). This should take at most 2 iterations in the worst
3778	 * case: we have one hole file extent item at slot 0 of a leaf and
3779	 * another hole file extent item as the last item in the previous leaf.
3780	 * This is because we merge file extent items that represent holes.
3781	 */
3782	disk_bytenr = btrfs_file_extent_disk_bytenr(leaf, ei);
3783	while (disk_bytenr == 0) {
3784		ret = btrfs_previous_item(root, path, ino, BTRFS_EXTENT_DATA_KEY);
3785		if (ret < 0) {
3786			return ret;
3787		} else if (ret > 0) {
3788			/* No file extent items that are not holes. */
3789			*last_extent_end_ret = 0;
3790			return 0;
3791		}
3792		leaf = path->nodes[0];
3793		ei = btrfs_item_ptr(leaf, path->slots[0],
3794				    struct btrfs_file_extent_item);
3795		disk_bytenr = btrfs_file_extent_disk_bytenr(leaf, ei);
3796	}
3797
3798	*last_extent_end_ret = btrfs_file_extent_end(path);
3799	return 0;
3800}
3801
3802int extent_fiemap(struct btrfs_inode *inode, struct fiemap_extent_info *fieinfo,
3803		  u64 start, u64 len)
3804{
3805	const u64 ino = btrfs_ino(inode);
3806	struct extent_state *cached_state = NULL;
3807	struct extent_state *delalloc_cached_state = NULL;
3808	struct btrfs_path *path;
3809	struct fiemap_cache cache = { 0 };
3810	struct btrfs_backref_share_check_ctx *backref_ctx;
3811	u64 last_extent_end;
3812	u64 prev_extent_end;
3813	u64 lockstart;
3814	u64 lockend;
3815	bool stopped = false;
3816	int ret;
3817
3818	backref_ctx = btrfs_alloc_backref_share_check_ctx();
3819	path = btrfs_alloc_path();
3820	if (!backref_ctx || !path) {
3821		ret = -ENOMEM;
3822		goto out;
3823	}
3824
3825	lockstart = round_down(start, inode->root->fs_info->sectorsize);
3826	lockend = round_up(start + len, inode->root->fs_info->sectorsize);
3827	prev_extent_end = lockstart;
3828
3829	btrfs_inode_lock(inode, BTRFS_ILOCK_SHARED);
3830	lock_extent(&inode->io_tree, lockstart, lockend, &cached_state);
3831
3832	ret = fiemap_find_last_extent_offset(inode, path, &last_extent_end);
3833	if (ret < 0)
3834		goto out_unlock;
3835	btrfs_release_path(path);
3836
3837	path->reada = READA_FORWARD;
3838	ret = fiemap_search_slot(inode, path, lockstart);
3839	if (ret < 0) {
3840		goto out_unlock;
3841	} else if (ret > 0) {
3842		/*
3843		 * No file extent item found, but we may have delalloc between
3844		 * the current offset and i_size. So check for that.
3845		 */
3846		ret = 0;
3847		goto check_eof_delalloc;
3848	}
3849
3850	while (prev_extent_end < lockend) {
3851		struct extent_buffer *leaf = path->nodes[0];
3852		struct btrfs_file_extent_item *ei;
3853		struct btrfs_key key;
3854		u64 extent_end;
3855		u64 extent_len;
3856		u64 extent_offset = 0;
3857		u64 extent_gen;
3858		u64 disk_bytenr = 0;
3859		u64 flags = 0;
3860		int extent_type;
3861		u8 compression;
3862
3863		btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
3864		if (key.objectid != ino || key.type != BTRFS_EXTENT_DATA_KEY)
3865			break;
3866
3867		extent_end = btrfs_file_extent_end(path);
3868
3869		/*
3870		 * The first iteration can leave us at an extent item that ends
3871		 * before our range's start. Move to the next item.
 
 
3872		 */
3873		if (extent_end <= lockstart)
3874			goto next_item;
3875
3876		backref_ctx->curr_leaf_bytenr = leaf->start;
3877
3878		/* We have in implicit hole (NO_HOLES feature enabled). */
3879		if (prev_extent_end < key.offset) {
3880			const u64 range_end = min(key.offset, lockend) - 1;
3881
3882			ret = fiemap_process_hole(inode, fieinfo, &cache,
3883						  &delalloc_cached_state,
3884						  backref_ctx, 0, 0, 0,
3885						  prev_extent_end, range_end);
3886			if (ret < 0) {
3887				goto out_unlock;
3888			} else if (ret > 0) {
3889				/* fiemap_fill_next_extent() told us to stop. */
3890				stopped = true;
3891				break;
3892			}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3893
3894			/* We've reached the end of the fiemap range, stop. */
3895			if (key.offset >= lockend) {
3896				stopped = true;
3897				break;
3898			}
3899		}
3900
3901		extent_len = extent_end - key.offset;
3902		ei = btrfs_item_ptr(leaf, path->slots[0],
3903				    struct btrfs_file_extent_item);
3904		compression = btrfs_file_extent_compression(leaf, ei);
3905		extent_type = btrfs_file_extent_type(leaf, ei);
3906		extent_gen = btrfs_file_extent_generation(leaf, ei);
3907
3908		if (extent_type != BTRFS_FILE_EXTENT_INLINE) {
3909			disk_bytenr = btrfs_file_extent_disk_bytenr(leaf, ei);
3910			if (compression == BTRFS_COMPRESS_NONE)
3911				extent_offset = btrfs_file_extent_offset(leaf, ei);
 
 
 
 
3912		}
3913
3914		if (compression != BTRFS_COMPRESS_NONE)
3915			flags |= FIEMAP_EXTENT_ENCODED;
 
 
3916
3917		if (extent_type == BTRFS_FILE_EXTENT_INLINE) {
3918			flags |= FIEMAP_EXTENT_DATA_INLINE;
3919			flags |= FIEMAP_EXTENT_NOT_ALIGNED;
3920			ret = emit_fiemap_extent(fieinfo, &cache, key.offset, 0,
3921						 extent_len, flags);
3922		} else if (extent_type == BTRFS_FILE_EXTENT_PREALLOC) {
3923			ret = fiemap_process_hole(inode, fieinfo, &cache,
3924						  &delalloc_cached_state,
3925						  backref_ctx,
3926						  disk_bytenr, extent_offset,
3927						  extent_gen, key.offset,
3928						  extent_end - 1);
3929		} else if (disk_bytenr == 0) {
3930			/* We have an explicit hole. */
3931			ret = fiemap_process_hole(inode, fieinfo, &cache,
3932						  &delalloc_cached_state,
3933						  backref_ctx, 0, 0, 0,
3934						  key.offset, extent_end - 1);
3935		} else {
3936			/* We have a regular extent. */
3937			if (fieinfo->fi_extents_max) {
3938				ret = btrfs_is_data_extent_shared(inode,
3939								  disk_bytenr,
3940								  extent_gen,
3941								  backref_ctx);
3942				if (ret < 0)
3943					goto out_unlock;
3944				else if (ret > 0)
3945					flags |= FIEMAP_EXTENT_SHARED;
3946			}
3947
3948			ret = emit_fiemap_extent(fieinfo, &cache, key.offset,
3949						 disk_bytenr + extent_offset,
3950						 extent_len, flags);
3951		}
3952
3953		if (ret < 0) {
3954			goto out_unlock;
3955		} else if (ret > 0) {
3956			/* fiemap_fill_next_extent() told us to stop. */
3957			stopped = true;
3958			break;
3959		}
3960
3961		prev_extent_end = extent_end;
3962next_item:
3963		if (fatal_signal_pending(current)) {
3964			ret = -EINTR;
3965			goto out_unlock;
 
3966		}
3967
3968		ret = fiemap_next_leaf_item(inode, path);
3969		if (ret < 0) {
3970			goto out_unlock;
3971		} else if (ret > 0) {
3972			/* No more file extent items for this inode. */
3973			break;
3974		}
3975		cond_resched();
3976	}
3977
3978check_eof_delalloc:
3979	/*
3980	 * Release (and free) the path before emitting any final entries to
3981	 * fiemap_fill_next_extent() to keep lockdep happy. This is because
3982	 * once we find no more file extent items exist, we may have a
3983	 * non-cloned leaf, and fiemap_fill_next_extent() can trigger page
3984	 * faults when copying data to the user space buffer.
3985	 */
3986	btrfs_free_path(path);
3987	path = NULL;
3988
3989	if (!stopped && prev_extent_end < lockend) {
3990		ret = fiemap_process_hole(inode, fieinfo, &cache,
3991					  &delalloc_cached_state, backref_ctx,
3992					  0, 0, 0, prev_extent_end, lockend - 1);
3993		if (ret < 0)
3994			goto out_unlock;
3995		prev_extent_end = lockend;
3996	}
3997
3998	if (cache.cached && cache.offset + cache.len >= last_extent_end) {
3999		const u64 i_size = i_size_read(&inode->vfs_inode);
4000
4001		if (prev_extent_end < i_size) {
4002			u64 delalloc_start;
4003			u64 delalloc_end;
4004			bool delalloc;
4005
4006			delalloc = btrfs_find_delalloc_in_range(inode,
4007								prev_extent_end,
4008								i_size - 1,
4009								&delalloc_cached_state,
4010								&delalloc_start,
4011								&delalloc_end);
4012			if (!delalloc)
4013				cache.flags |= FIEMAP_EXTENT_LAST;
4014		} else {
4015			cache.flags |= FIEMAP_EXTENT_LAST;
4016		}
4017	}
4018
4019	ret = emit_last_fiemap_cache(fieinfo, &cache);
4020
4021out_unlock:
4022	unlock_extent(&inode->io_tree, lockstart, lockend, &cached_state);
4023	btrfs_inode_unlock(inode, BTRFS_ILOCK_SHARED);
4024out:
4025	free_extent_state(delalloc_cached_state);
4026	btrfs_free_backref_share_ctx(backref_ctx);
4027	btrfs_free_path(path);
 
 
4028	return ret;
4029}
4030
4031static void __free_extent_buffer(struct extent_buffer *eb)
4032{
 
4033	kmem_cache_free(extent_buffer_cache, eb);
4034}
4035
4036int extent_buffer_under_io(const struct extent_buffer *eb)
4037{
4038	return (atomic_read(&eb->io_pages) ||
4039		test_bit(EXTENT_BUFFER_WRITEBACK, &eb->bflags) ||
4040		test_bit(EXTENT_BUFFER_DIRTY, &eb->bflags));
4041}
4042
4043static bool page_range_has_eb(struct btrfs_fs_info *fs_info, struct page *page)
4044{
4045	struct btrfs_subpage *subpage;
4046
4047	lockdep_assert_held(&page->mapping->private_lock);
4048
4049	if (PagePrivate(page)) {
4050		subpage = (struct btrfs_subpage *)page->private;
4051		if (atomic_read(&subpage->eb_refs))
4052			return true;
4053		/*
4054		 * Even there is no eb refs here, we may still have
4055		 * end_page_read() call relying on page::private.
4056		 */
4057		if (atomic_read(&subpage->readers))
4058			return true;
4059	}
4060	return false;
4061}
4062
4063static void detach_extent_buffer_page(struct extent_buffer *eb, struct page *page)
4064{
4065	struct btrfs_fs_info *fs_info = eb->fs_info;
4066	const bool mapped = !test_bit(EXTENT_BUFFER_UNMAPPED, &eb->bflags);
 
4067
4068	/*
4069	 * For mapped eb, we're going to change the page private, which should
4070	 * be done under the private_lock.
4071	 */
4072	if (mapped)
4073		spin_lock(&page->mapping->private_lock);
4074
4075	if (!PagePrivate(page)) {
4076		if (mapped)
4077			spin_unlock(&page->mapping->private_lock);
4078		return;
4079	}
4080
4081	if (fs_info->nodesize >= PAGE_SIZE) {
 
 
 
 
 
 
4082		/*
4083		 * We do this since we'll remove the pages after we've
4084		 * removed the eb from the radix tree, so we could race
4085		 * and have this page now attached to the new eb.  So
4086		 * only clear page_private if it's still connected to
4087		 * this eb.
4088		 */
4089		if (PagePrivate(page) &&
4090		    page->private == (unsigned long)eb) {
4091			BUG_ON(test_bit(EXTENT_BUFFER_DIRTY, &eb->bflags));
4092			BUG_ON(PageDirty(page));
4093			BUG_ON(PageWriteback(page));
4094			/*
4095			 * We need to make sure we haven't be attached
4096			 * to a new eb.
4097			 */
4098			detach_page_private(page);
 
 
 
4099		}
 
4100		if (mapped)
4101			spin_unlock(&page->mapping->private_lock);
4102		return;
4103	}
4104
4105	/*
4106	 * For subpage, we can have dummy eb with page private.  In this case,
4107	 * we can directly detach the private as such page is only attached to
4108	 * one dummy eb, no sharing.
4109	 */
4110	if (!mapped) {
4111		btrfs_detach_subpage(fs_info, page);
4112		return;
4113	}
4114
4115	btrfs_page_dec_eb_refs(fs_info, page);
4116
4117	/*
4118	 * We can only detach the page private if there are no other ebs in the
4119	 * page range and no unfinished IO.
4120	 */
4121	if (!page_range_has_eb(fs_info, page))
4122		btrfs_detach_subpage(fs_info, page);
4123
4124	spin_unlock(&page->mapping->private_lock);
4125}
4126
4127/* Release all pages attached to the extent buffer */
4128static void btrfs_release_extent_buffer_pages(struct extent_buffer *eb)
4129{
4130	int i;
4131	int num_pages;
4132
4133	ASSERT(!extent_buffer_under_io(eb));
4134
4135	num_pages = num_extent_pages(eb);
4136	for (i = 0; i < num_pages; i++) {
4137		struct page *page = eb->pages[i];
4138
4139		if (!page)
4140			continue;
4141
4142		detach_extent_buffer_page(eb, page);
4143
4144		/* One for when we allocated the page */
4145		put_page(page);
4146	}
4147}
4148
4149/*
4150 * Helper for releasing the extent buffer.
4151 */
4152static inline void btrfs_release_extent_buffer(struct extent_buffer *eb)
4153{
4154	btrfs_release_extent_buffer_pages(eb);
4155	btrfs_leak_debug_del_eb(eb);
4156	__free_extent_buffer(eb);
4157}
4158
4159static struct extent_buffer *
4160__alloc_extent_buffer(struct btrfs_fs_info *fs_info, u64 start,
4161		      unsigned long len)
4162{
4163	struct extent_buffer *eb = NULL;
4164
4165	eb = kmem_cache_zalloc(extent_buffer_cache, GFP_NOFS|__GFP_NOFAIL);
4166	eb->start = start;
4167	eb->len = len;
4168	eb->fs_info = fs_info;
4169	init_rwsem(&eb->lock);
 
 
 
 
 
 
 
 
 
 
4170
4171	btrfs_leak_debug_add_eb(eb);
4172	INIT_LIST_HEAD(&eb->release_list);
4173
4174	spin_lock_init(&eb->refs_lock);
4175	atomic_set(&eb->refs, 1);
4176	atomic_set(&eb->io_pages, 0);
4177
4178	ASSERT(len <= BTRFS_MAX_METADATA_BLOCKSIZE);
 
 
 
 
 
4179
4180	return eb;
4181}
4182
4183struct extent_buffer *btrfs_clone_extent_buffer(const struct extent_buffer *src)
4184{
4185	int i;
 
4186	struct extent_buffer *new;
4187	int num_pages = num_extent_pages(src);
4188	int ret;
4189
4190	new = __alloc_extent_buffer(src->fs_info, src->start, src->len);
4191	if (new == NULL)
4192		return NULL;
4193
4194	/*
4195	 * Set UNMAPPED before calling btrfs_release_extent_buffer(), as
4196	 * btrfs_release_extent_buffer() have different behavior for
4197	 * UNMAPPED subpage extent buffer.
4198	 */
4199	set_bit(EXTENT_BUFFER_UNMAPPED, &new->bflags);
4200
4201	ret = btrfs_alloc_page_array(num_pages, new->pages);
4202	if (ret) {
4203		btrfs_release_extent_buffer(new);
4204		return NULL;
4205	}
4206
4207	for (i = 0; i < num_pages; i++) {
4208		int ret;
4209		struct page *p = new->pages[i];
4210
4211		ret = attach_extent_buffer_page(new, p, NULL);
4212		if (ret < 0) {
4213			btrfs_release_extent_buffer(new);
4214			return NULL;
4215		}
 
4216		WARN_ON(PageDirty(p));
4217		copy_page(page_address(p), page_address(src->pages[i]));
 
4218	}
4219	set_extent_buffer_uptodate(new);
 
 
 
4220
4221	return new;
4222}
4223
4224struct extent_buffer *__alloc_dummy_extent_buffer(struct btrfs_fs_info *fs_info,
4225						  u64 start, unsigned long len)
4226{
4227	struct extent_buffer *eb;
4228	int num_pages;
4229	int i;
4230	int ret;
 
4231
4232	eb = __alloc_extent_buffer(fs_info, start, len);
4233	if (!eb)
4234		return NULL;
4235
4236	num_pages = num_extent_pages(eb);
4237	ret = btrfs_alloc_page_array(num_pages, eb->pages);
4238	if (ret)
4239		goto err;
4240
4241	for (i = 0; i < num_pages; i++) {
4242		struct page *p = eb->pages[i];
4243
4244		ret = attach_extent_buffer_page(eb, p, NULL);
4245		if (ret < 0)
4246			goto err;
4247	}
4248
4249	set_extent_buffer_uptodate(eb);
4250	btrfs_set_header_nritems(eb, 0);
4251	set_bit(EXTENT_BUFFER_UNMAPPED, &eb->bflags);
4252
4253	return eb;
4254err:
4255	for (i = 0; i < num_pages; i++) {
4256		if (eb->pages[i]) {
4257			detach_extent_buffer_page(eb, eb->pages[i]);
4258			__free_page(eb->pages[i]);
4259		}
4260	}
4261	__free_extent_buffer(eb);
4262	return NULL;
4263}
4264
4265struct extent_buffer *alloc_dummy_extent_buffer(struct btrfs_fs_info *fs_info,
4266						u64 start)
4267{
4268	return __alloc_dummy_extent_buffer(fs_info, start, fs_info->nodesize);
 
 
 
 
 
 
 
 
 
 
 
 
4269}
4270
4271static void check_buffer_tree_ref(struct extent_buffer *eb)
4272{
4273	int refs;
4274	/*
4275	 * The TREE_REF bit is first set when the extent_buffer is added
4276	 * to the radix tree. It is also reset, if unset, when a new reference
4277	 * is created by find_extent_buffer.
4278	 *
4279	 * It is only cleared in two cases: freeing the last non-tree
4280	 * reference to the extent_buffer when its STALE bit is set or
4281	 * calling release_folio when the tree reference is the only reference.
 
4282	 *
4283	 * In both cases, care is taken to ensure that the extent_buffer's
4284	 * pages are not under io. However, release_folio can be concurrently
4285	 * called with creating new references, which is prone to race
4286	 * conditions between the calls to check_buffer_tree_ref in those
4287	 * codepaths and clearing TREE_REF in try_release_extent_buffer.
 
4288	 *
4289	 * The actual lifetime of the extent_buffer in the radix tree is
4290	 * adequately protected by the refcount, but the TREE_REF bit and
4291	 * its corresponding reference are not. To protect against this
4292	 * class of races, we call check_buffer_tree_ref from the codepaths
4293	 * which trigger io after they set eb->io_pages. Note that once io is
4294	 * initiated, TREE_REF can no longer be cleared, so that is the
4295	 * moment at which any such race is best fixed.
4296	 */
4297	refs = atomic_read(&eb->refs);
4298	if (refs >= 2 && test_bit(EXTENT_BUFFER_TREE_REF, &eb->bflags))
4299		return;
4300
4301	spin_lock(&eb->refs_lock);
4302	if (!test_and_set_bit(EXTENT_BUFFER_TREE_REF, &eb->bflags))
4303		atomic_inc(&eb->refs);
4304	spin_unlock(&eb->refs_lock);
4305}
4306
4307static void mark_extent_buffer_accessed(struct extent_buffer *eb,
4308		struct page *accessed)
4309{
4310	int num_pages, i;
4311
4312	check_buffer_tree_ref(eb);
4313
4314	num_pages = num_extent_pages(eb);
4315	for (i = 0; i < num_pages; i++) {
4316		struct page *p = eb->pages[i];
4317
4318		if (p != accessed)
4319			mark_page_accessed(p);
4320	}
4321}
4322
4323struct extent_buffer *find_extent_buffer(struct btrfs_fs_info *fs_info,
4324					 u64 start)
4325{
4326	struct extent_buffer *eb;
4327
4328	eb = find_extent_buffer_nolock(fs_info, start);
4329	if (!eb)
4330		return NULL;
4331	/*
4332	 * Lock our eb's refs_lock to avoid races with free_extent_buffer().
4333	 * When we get our eb it might be flagged with EXTENT_BUFFER_STALE and
4334	 * another task running free_extent_buffer() might have seen that flag
4335	 * set, eb->refs == 2, that the buffer isn't under IO (dirty and
4336	 * writeback flags not set) and it's still in the tree (flag
4337	 * EXTENT_BUFFER_TREE_REF set), therefore being in the process of
4338	 * decrementing the extent buffer's reference count twice.  So here we
4339	 * could race and increment the eb's reference count, clear its stale
4340	 * flag, mark it as dirty and drop our reference before the other task
4341	 * finishes executing free_extent_buffer, which would later result in
4342	 * an attempt to free an extent buffer that is dirty.
4343	 */
4344	if (test_bit(EXTENT_BUFFER_STALE, &eb->bflags)) {
4345		spin_lock(&eb->refs_lock);
4346		spin_unlock(&eb->refs_lock);
 
 
 
 
 
 
 
4347	}
4348	mark_extent_buffer_accessed(eb, NULL);
4349	return eb;
 
4350}
4351
4352#ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS
4353struct extent_buffer *alloc_test_extent_buffer(struct btrfs_fs_info *fs_info,
4354					u64 start)
4355{
4356	struct extent_buffer *eb, *exists = NULL;
4357	int ret;
4358
4359	eb = find_extent_buffer(fs_info, start);
4360	if (eb)
4361		return eb;
4362	eb = alloc_dummy_extent_buffer(fs_info, start);
4363	if (!eb)
4364		return ERR_PTR(-ENOMEM);
4365	eb->fs_info = fs_info;
4366again:
4367	ret = radix_tree_preload(GFP_NOFS);
4368	if (ret) {
4369		exists = ERR_PTR(ret);
4370		goto free_eb;
4371	}
4372	spin_lock(&fs_info->buffer_lock);
4373	ret = radix_tree_insert(&fs_info->buffer_radix,
4374				start >> fs_info->sectorsize_bits, eb);
4375	spin_unlock(&fs_info->buffer_lock);
4376	radix_tree_preload_end();
4377	if (ret == -EEXIST) {
4378		exists = find_extent_buffer(fs_info, start);
4379		if (exists)
4380			goto free_eb;
4381		else
4382			goto again;
4383	}
4384	check_buffer_tree_ref(eb);
4385	set_bit(EXTENT_BUFFER_IN_TREE, &eb->bflags);
4386
 
 
 
 
 
 
 
4387	return eb;
4388free_eb:
4389	btrfs_release_extent_buffer(eb);
4390	return exists;
4391}
4392#endif
4393
4394static struct extent_buffer *grab_extent_buffer(
4395		struct btrfs_fs_info *fs_info, struct page *page)
4396{
4397	struct extent_buffer *exists;
4398
4399	/*
4400	 * For subpage case, we completely rely on radix tree to ensure we
4401	 * don't try to insert two ebs for the same bytenr.  So here we always
4402	 * return NULL and just continue.
4403	 */
4404	if (fs_info->nodesize < PAGE_SIZE)
4405		return NULL;
4406
4407	/* Page not yet attached to an extent buffer */
4408	if (!PagePrivate(page))
4409		return NULL;
4410
4411	/*
4412	 * We could have already allocated an eb for this page and attached one
4413	 * so lets see if we can get a ref on the existing eb, and if we can we
4414	 * know it's good and we can just return that one, else we know we can
4415	 * just overwrite page->private.
4416	 */
4417	exists = (struct extent_buffer *)page->private;
4418	if (atomic_inc_not_zero(&exists->refs))
4419		return exists;
4420
4421	WARN_ON(PageDirty(page));
4422	detach_page_private(page);
4423	return NULL;
4424}
4425
4426static int check_eb_alignment(struct btrfs_fs_info *fs_info, u64 start)
4427{
4428	if (!IS_ALIGNED(start, fs_info->sectorsize)) {
4429		btrfs_err(fs_info, "bad tree block start %llu", start);
4430		return -EINVAL;
4431	}
4432
4433	if (fs_info->nodesize < PAGE_SIZE &&
4434	    offset_in_page(start) + fs_info->nodesize > PAGE_SIZE) {
4435		btrfs_err(fs_info,
4436		"tree block crosses page boundary, start %llu nodesize %u",
4437			  start, fs_info->nodesize);
4438		return -EINVAL;
4439	}
4440	if (fs_info->nodesize >= PAGE_SIZE &&
4441	    !PAGE_ALIGNED(start)) {
4442		btrfs_err(fs_info,
4443		"tree block is not page aligned, start %llu nodesize %u",
4444			  start, fs_info->nodesize);
4445		return -EINVAL;
4446	}
4447	return 0;
4448}
4449
4450struct extent_buffer *alloc_extent_buffer(struct btrfs_fs_info *fs_info,
4451					  u64 start, u64 owner_root, int level)
4452{
4453	unsigned long len = fs_info->nodesize;
4454	int num_pages;
4455	int i;
4456	unsigned long index = start >> PAGE_SHIFT;
4457	struct extent_buffer *eb;
4458	struct extent_buffer *exists = NULL;
4459	struct page *p;
4460	struct address_space *mapping = fs_info->btree_inode->i_mapping;
4461	u64 lockdep_owner = owner_root;
4462	int uptodate = 1;
4463	int ret;
4464
4465	if (check_eb_alignment(fs_info, start))
4466		return ERR_PTR(-EINVAL);
4467
4468#if BITS_PER_LONG == 32
4469	if (start >= MAX_LFS_FILESIZE) {
4470		btrfs_err_rl(fs_info,
4471		"extent buffer %llu is beyond 32bit page cache limit", start);
4472		btrfs_err_32bit_limit(fs_info);
4473		return ERR_PTR(-EOVERFLOW);
4474	}
4475	if (start >= BTRFS_32BIT_EARLY_WARN_THRESHOLD)
4476		btrfs_warn_32bit_limit(fs_info);
4477#endif
4478
4479	eb = find_extent_buffer(fs_info, start);
4480	if (eb)
4481		return eb;
4482
4483	eb = __alloc_extent_buffer(fs_info, start, len);
4484	if (!eb)
4485		return ERR_PTR(-ENOMEM);
4486
4487	/*
4488	 * The reloc trees are just snapshots, so we need them to appear to be
4489	 * just like any other fs tree WRT lockdep.
4490	 */
4491	if (lockdep_owner == BTRFS_TREE_RELOC_OBJECTID)
4492		lockdep_owner = BTRFS_FS_TREE_OBJECTID;
4493
4494	btrfs_set_buffer_lockdep_class(lockdep_owner, eb, level);
4495
4496	num_pages = num_extent_pages(eb);
4497	for (i = 0; i < num_pages; i++, index++) {
4498		struct btrfs_subpage *prealloc = NULL;
4499
4500		p = find_or_create_page(mapping, index, GFP_NOFS|__GFP_NOFAIL);
4501		if (!p) {
4502			exists = ERR_PTR(-ENOMEM);
4503			goto free_eb;
4504		}
4505
4506		/*
4507		 * Preallocate page->private for subpage case, so that we won't
4508		 * allocate memory with private_lock hold.  The memory will be
4509		 * freed by attach_extent_buffer_page() or freed manually if
4510		 * we exit earlier.
4511		 *
4512		 * Although we have ensured one subpage eb can only have one
4513		 * page, but it may change in the future for 16K page size
4514		 * support, so we still preallocate the memory in the loop.
4515		 */
4516		if (fs_info->nodesize < PAGE_SIZE) {
4517			prealloc = btrfs_alloc_subpage(fs_info, BTRFS_SUBPAGE_METADATA);
4518			if (IS_ERR(prealloc)) {
4519				ret = PTR_ERR(prealloc);
4520				unlock_page(p);
4521				put_page(p);
4522				exists = ERR_PTR(ret);
4523				goto free_eb;
4524			}
4525		}
4526
4527		spin_lock(&mapping->private_lock);
4528		exists = grab_extent_buffer(fs_info, p);
4529		if (exists) {
4530			spin_unlock(&mapping->private_lock);
4531			unlock_page(p);
 
4532			put_page(p);
4533			mark_extent_buffer_accessed(exists, p);
4534			btrfs_free_subpage(prealloc);
4535			goto free_eb;
4536		}
4537		/* Should not fail, as we have preallocated the memory */
4538		ret = attach_extent_buffer_page(eb, p, prealloc);
4539		ASSERT(!ret);
4540		/*
4541		 * To inform we have extra eb under allocation, so that
4542		 * detach_extent_buffer_page() won't release the page private
4543		 * when the eb hasn't yet been inserted into radix tree.
4544		 *
4545		 * The ref will be decreased when the eb released the page, in
4546		 * detach_extent_buffer_page().
4547		 * Thus needs no special handling in error path.
4548		 */
4549		btrfs_page_inc_eb_refs(fs_info, p);
4550		spin_unlock(&mapping->private_lock);
4551
4552		WARN_ON(btrfs_page_test_dirty(fs_info, p, eb->start, eb->len));
4553		eb->pages[i] = p;
4554		if (!PageUptodate(p))
4555			uptodate = 0;
4556
4557		/*
4558		 * We can't unlock the pages just yet since the extent buffer
4559		 * hasn't been properly inserted in the radix tree, this
4560		 * opens a race with btree_release_folio which can free a page
4561		 * while we are still filling in all pages for the buffer and
4562		 * we could crash.
4563		 */
4564	}
4565	if (uptodate)
4566		set_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags);
4567again:
4568	ret = radix_tree_preload(GFP_NOFS);
4569	if (ret) {
4570		exists = ERR_PTR(ret);
4571		goto free_eb;
4572	}
4573
4574	spin_lock(&fs_info->buffer_lock);
4575	ret = radix_tree_insert(&fs_info->buffer_radix,
4576				start >> fs_info->sectorsize_bits, eb);
4577	spin_unlock(&fs_info->buffer_lock);
4578	radix_tree_preload_end();
4579	if (ret == -EEXIST) {
4580		exists = find_extent_buffer(fs_info, start);
4581		if (exists)
4582			goto free_eb;
4583		else
4584			goto again;
4585	}
4586	/* add one reference for the tree */
4587	check_buffer_tree_ref(eb);
4588	set_bit(EXTENT_BUFFER_IN_TREE, &eb->bflags);
4589
4590	/*
4591	 * Now it's safe to unlock the pages because any calls to
4592	 * btree_release_folio will correctly detect that a page belongs to a
4593	 * live buffer and won't free them prematurely.
4594	 */
4595	for (i = 0; i < num_pages; i++)
4596		unlock_page(eb->pages[i]);
 
 
 
 
 
 
 
 
 
4597	return eb;
4598
4599free_eb:
4600	WARN_ON(!atomic_dec_and_test(&eb->refs));
4601	for (i = 0; i < num_pages; i++) {
4602		if (eb->pages[i])
4603			unlock_page(eb->pages[i]);
4604	}
4605
4606	btrfs_release_extent_buffer(eb);
4607	return exists;
4608}
4609
4610static inline void btrfs_release_extent_buffer_rcu(struct rcu_head *head)
4611{
4612	struct extent_buffer *eb =
4613			container_of(head, struct extent_buffer, rcu_head);
4614
4615	__free_extent_buffer(eb);
4616}
4617
 
4618static int release_extent_buffer(struct extent_buffer *eb)
4619	__releases(&eb->refs_lock)
4620{
4621	lockdep_assert_held(&eb->refs_lock);
4622
4623	WARN_ON(atomic_read(&eb->refs) == 0);
4624	if (atomic_dec_and_test(&eb->refs)) {
4625		if (test_and_clear_bit(EXTENT_BUFFER_IN_TREE, &eb->bflags)) {
4626			struct btrfs_fs_info *fs_info = eb->fs_info;
4627
4628			spin_unlock(&eb->refs_lock);
4629
4630			spin_lock(&fs_info->buffer_lock);
4631			radix_tree_delete(&fs_info->buffer_radix,
4632					  eb->start >> fs_info->sectorsize_bits);
4633			spin_unlock(&fs_info->buffer_lock);
4634		} else {
4635			spin_unlock(&eb->refs_lock);
4636		}
4637
4638		btrfs_leak_debug_del_eb(eb);
4639		/* Should be safe to release our pages at this point */
4640		btrfs_release_extent_buffer_pages(eb);
4641#ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS
4642		if (unlikely(test_bit(EXTENT_BUFFER_UNMAPPED, &eb->bflags))) {
4643			__free_extent_buffer(eb);
4644			return 1;
4645		}
4646#endif
4647		call_rcu(&eb->rcu_head, btrfs_release_extent_buffer_rcu);
4648		return 1;
4649	}
4650	spin_unlock(&eb->refs_lock);
4651
4652	return 0;
4653}
4654
4655void free_extent_buffer(struct extent_buffer *eb)
4656{
4657	int refs;
 
4658	if (!eb)
4659		return;
4660
4661	refs = atomic_read(&eb->refs);
4662	while (1) {
4663		if ((!test_bit(EXTENT_BUFFER_UNMAPPED, &eb->bflags) && refs <= 3)
4664		    || (test_bit(EXTENT_BUFFER_UNMAPPED, &eb->bflags) &&
4665			refs == 1))
4666			break;
4667		if (atomic_try_cmpxchg(&eb->refs, &refs, refs - 1))
 
4668			return;
4669	}
4670
4671	spin_lock(&eb->refs_lock);
4672	if (atomic_read(&eb->refs) == 2 &&
 
 
 
 
4673	    test_bit(EXTENT_BUFFER_STALE, &eb->bflags) &&
4674	    !extent_buffer_under_io(eb) &&
4675	    test_and_clear_bit(EXTENT_BUFFER_TREE_REF, &eb->bflags))
4676		atomic_dec(&eb->refs);
4677
4678	/*
4679	 * I know this is terrible, but it's temporary until we stop tracking
4680	 * the uptodate bits and such for the extent buffers.
4681	 */
4682	release_extent_buffer(eb);
4683}
4684
4685void free_extent_buffer_stale(struct extent_buffer *eb)
4686{
4687	if (!eb)
4688		return;
4689
4690	spin_lock(&eb->refs_lock);
4691	set_bit(EXTENT_BUFFER_STALE, &eb->bflags);
4692
4693	if (atomic_read(&eb->refs) == 2 && !extent_buffer_under_io(eb) &&
4694	    test_and_clear_bit(EXTENT_BUFFER_TREE_REF, &eb->bflags))
4695		atomic_dec(&eb->refs);
4696	release_extent_buffer(eb);
4697}
4698
4699static void btree_clear_page_dirty(struct page *page)
4700{
4701	ASSERT(PageDirty(page));
4702	ASSERT(PageLocked(page));
4703	clear_page_dirty_for_io(page);
4704	xa_lock_irq(&page->mapping->i_pages);
4705	if (!PageDirty(page))
4706		__xa_clear_mark(&page->mapping->i_pages,
4707				page_index(page), PAGECACHE_TAG_DIRTY);
4708	xa_unlock_irq(&page->mapping->i_pages);
4709}
4710
4711static void clear_subpage_extent_buffer_dirty(const struct extent_buffer *eb)
4712{
4713	struct btrfs_fs_info *fs_info = eb->fs_info;
4714	struct page *page = eb->pages[0];
4715	bool last;
4716
4717	/* btree_clear_page_dirty() needs page locked */
4718	lock_page(page);
4719	last = btrfs_subpage_clear_and_test_dirty(fs_info, page, eb->start,
4720						  eb->len);
4721	if (last)
4722		btree_clear_page_dirty(page);
4723	unlock_page(page);
4724	WARN_ON(atomic_read(&eb->refs) == 0);
4725}
4726
4727void clear_extent_buffer_dirty(const struct extent_buffer *eb)
4728{
4729	int i;
4730	int num_pages;
4731	struct page *page;
4732
4733	if (eb->fs_info->nodesize < PAGE_SIZE)
4734		return clear_subpage_extent_buffer_dirty(eb);
4735
4736	num_pages = num_extent_pages(eb);
4737
4738	for (i = 0; i < num_pages; i++) {
4739		page = eb->pages[i];
4740		if (!PageDirty(page))
4741			continue;
 
4742		lock_page(page);
4743		btree_clear_page_dirty(page);
 
 
 
 
 
 
 
 
 
4744		ClearPageError(page);
4745		unlock_page(page);
4746	}
4747	WARN_ON(atomic_read(&eb->refs) == 0);
4748}
4749
4750bool set_extent_buffer_dirty(struct extent_buffer *eb)
4751{
4752	int i;
4753	int num_pages;
4754	bool was_dirty;
4755
4756	check_buffer_tree_ref(eb);
4757
4758	was_dirty = test_and_set_bit(EXTENT_BUFFER_DIRTY, &eb->bflags);
4759
4760	num_pages = num_extent_pages(eb);
4761	WARN_ON(atomic_read(&eb->refs) == 0);
4762	WARN_ON(!test_bit(EXTENT_BUFFER_TREE_REF, &eb->bflags));
4763
4764	if (!was_dirty) {
4765		bool subpage = eb->fs_info->nodesize < PAGE_SIZE;
4766
4767		/*
4768		 * For subpage case, we can have other extent buffers in the
4769		 * same page, and in clear_subpage_extent_buffer_dirty() we
4770		 * have to clear page dirty without subpage lock held.
4771		 * This can cause race where our page gets dirty cleared after
4772		 * we just set it.
4773		 *
4774		 * Thankfully, clear_subpage_extent_buffer_dirty() has locked
4775		 * its page for other reasons, we can use page lock to prevent
4776		 * the above race.
4777		 */
4778		if (subpage)
4779			lock_page(eb->pages[0]);
4780		for (i = 0; i < num_pages; i++)
4781			btrfs_page_set_dirty(eb->fs_info, eb->pages[i],
4782					     eb->start, eb->len);
4783		if (subpage)
4784			unlock_page(eb->pages[0]);
4785	}
4786#ifdef CONFIG_BTRFS_DEBUG
4787	for (i = 0; i < num_pages; i++)
4788		ASSERT(PageDirty(eb->pages[i]));
4789#endif
4790
4791	return was_dirty;
4792}
4793
4794void clear_extent_buffer_uptodate(struct extent_buffer *eb)
4795{
4796	struct btrfs_fs_info *fs_info = eb->fs_info;
4797	struct page *page;
4798	int num_pages;
4799	int i;
4800
4801	clear_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags);
4802	num_pages = num_extent_pages(eb);
4803	for (i = 0; i < num_pages; i++) {
4804		page = eb->pages[i];
4805		if (!page)
4806			continue;
4807
4808		/*
4809		 * This is special handling for metadata subpage, as regular
4810		 * btrfs_is_subpage() can not handle cloned/dummy metadata.
4811		 */
4812		if (fs_info->nodesize >= PAGE_SIZE)
4813			ClearPageUptodate(page);
4814		else
4815			btrfs_subpage_clear_uptodate(fs_info, page, eb->start,
4816						     eb->len);
4817	}
4818}
4819
4820void set_extent_buffer_uptodate(struct extent_buffer *eb)
4821{
4822	struct btrfs_fs_info *fs_info = eb->fs_info;
4823	struct page *page;
4824	int num_pages;
4825	int i;
4826
4827	set_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags);
4828	num_pages = num_extent_pages(eb);
4829	for (i = 0; i < num_pages; i++) {
4830		page = eb->pages[i];
4831
4832		/*
4833		 * This is special handling for metadata subpage, as regular
4834		 * btrfs_is_subpage() can not handle cloned/dummy metadata.
4835		 */
4836		if (fs_info->nodesize >= PAGE_SIZE)
4837			SetPageUptodate(page);
4838		else
4839			btrfs_subpage_set_uptodate(fs_info, page, eb->start,
4840						   eb->len);
4841	}
4842}
4843
4844static int read_extent_buffer_subpage(struct extent_buffer *eb, int wait,
4845				      int mirror_num,
4846				      struct btrfs_tree_parent_check *check)
4847{
4848	struct btrfs_fs_info *fs_info = eb->fs_info;
4849	struct extent_io_tree *io_tree;
4850	struct page *page = eb->pages[0];
4851	struct extent_state *cached_state = NULL;
4852	struct btrfs_bio_ctrl bio_ctrl = {
4853		.mirror_num = mirror_num,
4854		.parent_check = check,
4855	};
4856	int ret = 0;
4857
4858	ASSERT(!test_bit(EXTENT_BUFFER_UNMAPPED, &eb->bflags));
4859	ASSERT(PagePrivate(page));
4860	ASSERT(check);
4861	io_tree = &BTRFS_I(fs_info->btree_inode)->io_tree;
4862
4863	if (wait == WAIT_NONE) {
4864		if (!try_lock_extent(io_tree, eb->start, eb->start + eb->len - 1,
4865				     &cached_state))
4866			return -EAGAIN;
4867	} else {
4868		ret = lock_extent(io_tree, eb->start, eb->start + eb->len - 1,
4869				  &cached_state);
4870		if (ret < 0)
4871			return ret;
4872	}
4873
4874	ret = 0;
4875	if (test_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags) ||
4876	    PageUptodate(page) ||
4877	    btrfs_subpage_test_uptodate(fs_info, page, eb->start, eb->len)) {
4878		set_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags);
4879		unlock_extent(io_tree, eb->start, eb->start + eb->len - 1,
4880			      &cached_state);
4881		return ret;
4882	}
4883
4884	clear_bit(EXTENT_BUFFER_READ_ERR, &eb->bflags);
4885	eb->read_mirror = 0;
4886	atomic_set(&eb->io_pages, 1);
4887	check_buffer_tree_ref(eb);
4888	bio_ctrl.end_io_func = end_bio_extent_readpage;
4889
4890	btrfs_subpage_clear_error(fs_info, page, eb->start, eb->len);
4891
4892	btrfs_subpage_start_reader(fs_info, page, eb->start, eb->len);
4893	ret = submit_extent_page(REQ_OP_READ, NULL, &bio_ctrl,
4894				 eb->start, page, eb->len,
4895				 eb->start - page_offset(page), 0, true);
4896	if (ret) {
4897		/*
4898		 * In the endio function, if we hit something wrong we will
4899		 * increase the io_pages, so here we need to decrease it for
4900		 * error path.
4901		 */
4902		atomic_dec(&eb->io_pages);
4903	}
4904	submit_one_bio(&bio_ctrl);
4905	if (ret || wait != WAIT_COMPLETE) {
4906		free_extent_state(cached_state);
4907		return ret;
4908	}
4909
4910	wait_extent_bit(io_tree, eb->start, eb->start + eb->len - 1,
4911			EXTENT_LOCKED, &cached_state);
4912	if (!test_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags))
4913		ret = -EIO;
4914	return ret;
4915}
4916
4917int read_extent_buffer_pages(struct extent_buffer *eb, int wait, int mirror_num,
4918			     struct btrfs_tree_parent_check *check)
 
4919{
4920	int i;
 
4921	struct page *page;
4922	int err;
4923	int ret = 0;
4924	int locked_pages = 0;
4925	int all_uptodate = 1;
4926	int num_pages;
4927	unsigned long num_reads = 0;
4928	struct btrfs_bio_ctrl bio_ctrl = {
4929		.mirror_num = mirror_num,
4930		.parent_check = check,
4931	};
4932
4933	if (test_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags))
4934		return 0;
4935
4936	/*
4937	 * We could have had EXTENT_BUFFER_UPTODATE cleared by the write
4938	 * operation, which could potentially still be in flight.  In this case
4939	 * we simply want to return an error.
4940	 */
4941	if (unlikely(test_bit(EXTENT_BUFFER_WRITE_ERR, &eb->bflags)))
4942		return -EIO;
4943
4944	if (eb->fs_info->nodesize < PAGE_SIZE)
4945		return read_extent_buffer_subpage(eb, wait, mirror_num, check);
4946
4947	num_pages = num_extent_pages(eb);
4948	for (i = 0; i < num_pages; i++) {
4949		page = eb->pages[i];
4950		if (wait == WAIT_NONE) {
4951			/*
4952			 * WAIT_NONE is only utilized by readahead. If we can't
4953			 * acquire the lock atomically it means either the eb
4954			 * is being read out or under modification.
4955			 * Either way the eb will be or has been cached,
4956			 * readahead can exit safely.
4957			 */
4958			if (!trylock_page(page))
4959				goto unlock_exit;
4960		} else {
4961			lock_page(page);
4962		}
4963		locked_pages++;
4964	}
4965	/*
4966	 * We need to firstly lock all pages to make sure that
4967	 * the uptodate bit of our pages won't be affected by
4968	 * clear_extent_buffer_uptodate().
4969	 */
4970	for (i = 0; i < num_pages; i++) {
4971		page = eb->pages[i];
4972		if (!PageUptodate(page)) {
4973			num_reads++;
4974			all_uptodate = 0;
4975		}
4976	}
4977
4978	if (all_uptodate) {
4979		set_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags);
 
4980		goto unlock_exit;
4981	}
4982
4983	clear_bit(EXTENT_BUFFER_READ_ERR, &eb->bflags);
4984	eb->read_mirror = 0;
4985	atomic_set(&eb->io_pages, num_reads);
4986	/*
4987	 * It is possible for release_folio to clear the TREE_REF bit before we
4988	 * set io_pages. See check_buffer_tree_ref for a more detailed comment.
4989	 */
4990	check_buffer_tree_ref(eb);
4991	bio_ctrl.end_io_func = end_bio_extent_readpage;
4992	for (i = 0; i < num_pages; i++) {
4993		page = eb->pages[i];
4994
4995		if (!PageUptodate(page)) {
4996			if (ret) {
4997				atomic_dec(&eb->io_pages);
4998				unlock_page(page);
4999				continue;
5000			}
5001
5002			ClearPageError(page);
5003			err = submit_extent_page(REQ_OP_READ, NULL,
5004					 &bio_ctrl, page_offset(page), page,
5005					 PAGE_SIZE, 0, 0, false);
5006			if (err) {
5007				/*
5008				 * We failed to submit the bio so it's the
5009				 * caller's responsibility to perform cleanup
5010				 * i.e unlock page/set error bit.
5011				 */
5012				ret = err;
5013				SetPageError(page);
5014				unlock_page(page);
5015				atomic_dec(&eb->io_pages);
5016			}
5017		} else {
5018			unlock_page(page);
5019		}
5020	}
5021
5022	submit_one_bio(&bio_ctrl);
 
 
 
 
 
5023
5024	if (ret || wait != WAIT_COMPLETE)
5025		return ret;
5026
5027	for (i = 0; i < num_pages; i++) {
5028		page = eb->pages[i];
5029		wait_on_page_locked(page);
5030		if (!PageUptodate(page))
5031			ret = -EIO;
5032	}
5033
5034	return ret;
5035
5036unlock_exit:
 
5037	while (locked_pages > 0) {
5038		locked_pages--;
5039		page = eb->pages[locked_pages];
5040		unlock_page(page);
 
5041	}
5042	return ret;
5043}
5044
5045static bool report_eb_range(const struct extent_buffer *eb, unsigned long start,
5046			    unsigned long len)
5047{
5048	btrfs_warn(eb->fs_info,
5049		"access to eb bytenr %llu len %lu out of range start %lu len %lu",
5050		eb->start, eb->len, start, len);
5051	WARN_ON(IS_ENABLED(CONFIG_BTRFS_DEBUG));
5052
5053	return true;
5054}
5055
5056/*
5057 * Check if the [start, start + len) range is valid before reading/writing
5058 * the eb.
5059 * NOTE: @start and @len are offset inside the eb, not logical address.
5060 *
5061 * Caller should not touch the dst/src memory if this function returns error.
5062 */
5063static inline int check_eb_range(const struct extent_buffer *eb,
5064				 unsigned long start, unsigned long len)
5065{
5066	unsigned long offset;
5067
5068	/* start, start + len should not go beyond eb->len nor overflow */
5069	if (unlikely(check_add_overflow(start, len, &offset) || offset > eb->len))
5070		return report_eb_range(eb, start, len);
5071
5072	return false;
5073}
5074
5075void read_extent_buffer(const struct extent_buffer *eb, void *dstv,
5076			unsigned long start, unsigned long len)
5077{
5078	size_t cur;
5079	size_t offset;
5080	struct page *page;
5081	char *kaddr;
5082	char *dst = (char *)dstv;
5083	unsigned long i = get_eb_page_index(start);
 
5084
5085	if (check_eb_range(eb, start, len))
5086		return;
5087
5088	offset = get_eb_offset_in_page(eb, start);
5089
5090	while (len > 0) {
5091		page = eb->pages[i];
5092
5093		cur = min(len, (PAGE_SIZE - offset));
5094		kaddr = page_address(page);
5095		memcpy(dst, kaddr + offset, cur);
5096
5097		dst += cur;
5098		len -= cur;
5099		offset = 0;
5100		i++;
5101	}
5102}
5103
5104int read_extent_buffer_to_user_nofault(const struct extent_buffer *eb,
5105				       void __user *dstv,
5106				       unsigned long start, unsigned long len)
5107{
5108	size_t cur;
5109	size_t offset;
5110	struct page *page;
5111	char *kaddr;
5112	char __user *dst = (char __user *)dstv;
5113	unsigned long i = get_eb_page_index(start);
 
5114	int ret = 0;
5115
5116	WARN_ON(start > eb->len);
5117	WARN_ON(start + len > eb->start + eb->len);
5118
5119	offset = get_eb_offset_in_page(eb, start);
5120
5121	while (len > 0) {
5122		page = eb->pages[i];
5123
5124		cur = min(len, (PAGE_SIZE - offset));
5125		kaddr = page_address(page);
5126		if (copy_to_user_nofault(dst, kaddr + offset, cur)) {
5127			ret = -EFAULT;
5128			break;
5129		}
5130
5131		dst += cur;
5132		len -= cur;
5133		offset = 0;
5134		i++;
5135	}
5136
5137	return ret;
5138}
5139
5140int memcmp_extent_buffer(const struct extent_buffer *eb, const void *ptrv,
5141			 unsigned long start, unsigned long len)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
5142{
5143	size_t cur;
5144	size_t offset;
5145	struct page *page;
5146	char *kaddr;
5147	char *ptr = (char *)ptrv;
5148	unsigned long i = get_eb_page_index(start);
 
5149	int ret = 0;
5150
5151	if (check_eb_range(eb, start, len))
5152		return -EINVAL;
5153
5154	offset = get_eb_offset_in_page(eb, start);
5155
5156	while (len > 0) {
5157		page = eb->pages[i];
5158
5159		cur = min(len, (PAGE_SIZE - offset));
5160
5161		kaddr = page_address(page);
5162		ret = memcmp(ptr, kaddr + offset, cur);
5163		if (ret)
5164			break;
5165
5166		ptr += cur;
5167		len -= cur;
5168		offset = 0;
5169		i++;
5170	}
5171	return ret;
5172}
5173
5174/*
5175 * Check that the extent buffer is uptodate.
5176 *
5177 * For regular sector size == PAGE_SIZE case, check if @page is uptodate.
5178 * For subpage case, check if the range covered by the eb has EXTENT_UPTODATE.
5179 */
5180static void assert_eb_page_uptodate(const struct extent_buffer *eb,
5181				    struct page *page)
5182{
5183	struct btrfs_fs_info *fs_info = eb->fs_info;
5184
5185	/*
5186	 * If we are using the commit root we could potentially clear a page
5187	 * Uptodate while we're using the extent buffer that we've previously
5188	 * looked up.  We don't want to complain in this case, as the page was
5189	 * valid before, we just didn't write it out.  Instead we want to catch
5190	 * the case where we didn't actually read the block properly, which
5191	 * would have !PageUptodate && !PageError, as we clear PageError before
5192	 * reading.
5193	 */
5194	if (fs_info->nodesize < PAGE_SIZE) {
5195		bool uptodate, error;
5196
5197		uptodate = btrfs_subpage_test_uptodate(fs_info, page,
5198						       eb->start, eb->len);
5199		error = btrfs_subpage_test_error(fs_info, page, eb->start, eb->len);
5200		WARN_ON(!uptodate && !error);
5201	} else {
5202		WARN_ON(!PageUptodate(page) && !PageError(page));
5203	}
5204}
5205
5206void write_extent_buffer_chunk_tree_uuid(const struct extent_buffer *eb,
5207		const void *srcv)
5208{
5209	char *kaddr;
5210
5211	assert_eb_page_uptodate(eb, eb->pages[0]);
5212	kaddr = page_address(eb->pages[0]) +
5213		get_eb_offset_in_page(eb, offsetof(struct btrfs_header,
5214						   chunk_tree_uuid));
5215	memcpy(kaddr, srcv, BTRFS_FSID_SIZE);
5216}
5217
5218void write_extent_buffer_fsid(const struct extent_buffer *eb, const void *srcv)
5219{
5220	char *kaddr;
5221
5222	assert_eb_page_uptodate(eb, eb->pages[0]);
5223	kaddr = page_address(eb->pages[0]) +
5224		get_eb_offset_in_page(eb, offsetof(struct btrfs_header, fsid));
5225	memcpy(kaddr, srcv, BTRFS_FSID_SIZE);
5226}
5227
5228void write_extent_buffer(const struct extent_buffer *eb, const void *srcv,
5229			 unsigned long start, unsigned long len)
5230{
5231	size_t cur;
5232	size_t offset;
5233	struct page *page;
5234	char *kaddr;
5235	char *src = (char *)srcv;
5236	unsigned long i = get_eb_page_index(start);
5237
5238	WARN_ON(test_bit(EXTENT_BUFFER_NO_CHECK, &eb->bflags));
5239
5240	if (check_eb_range(eb, start, len))
5241		return;
5242
5243	offset = get_eb_offset_in_page(eb, start);
5244
5245	while (len > 0) {
5246		page = eb->pages[i];
5247		assert_eb_page_uptodate(eb, page);
5248
5249		cur = min(len, PAGE_SIZE - offset);
5250		kaddr = page_address(page);
5251		memcpy(kaddr + offset, src, cur);
5252
5253		src += cur;
5254		len -= cur;
5255		offset = 0;
5256		i++;
5257	}
5258}
5259
5260void memzero_extent_buffer(const struct extent_buffer *eb, unsigned long start,
5261		unsigned long len)
5262{
5263	size_t cur;
5264	size_t offset;
5265	struct page *page;
5266	char *kaddr;
5267	unsigned long i = get_eb_page_index(start);
 
5268
5269	if (check_eb_range(eb, start, len))
5270		return;
5271
5272	offset = get_eb_offset_in_page(eb, start);
5273
5274	while (len > 0) {
5275		page = eb->pages[i];
5276		assert_eb_page_uptodate(eb, page);
5277
5278		cur = min(len, PAGE_SIZE - offset);
5279		kaddr = page_address(page);
5280		memset(kaddr + offset, 0, cur);
5281
5282		len -= cur;
5283		offset = 0;
5284		i++;
5285	}
5286}
5287
5288void copy_extent_buffer_full(const struct extent_buffer *dst,
5289			     const struct extent_buffer *src)
5290{
5291	int i;
5292	int num_pages;
5293
5294	ASSERT(dst->len == src->len);
5295
5296	if (dst->fs_info->nodesize >= PAGE_SIZE) {
5297		num_pages = num_extent_pages(dst);
5298		for (i = 0; i < num_pages; i++)
5299			copy_page(page_address(dst->pages[i]),
5300				  page_address(src->pages[i]));
5301	} else {
5302		size_t src_offset = get_eb_offset_in_page(src, 0);
5303		size_t dst_offset = get_eb_offset_in_page(dst, 0);
5304
5305		ASSERT(src->fs_info->nodesize < PAGE_SIZE);
5306		memcpy(page_address(dst->pages[0]) + dst_offset,
5307		       page_address(src->pages[0]) + src_offset,
5308		       src->len);
5309	}
5310}
5311
5312void copy_extent_buffer(const struct extent_buffer *dst,
5313			const struct extent_buffer *src,
5314			unsigned long dst_offset, unsigned long src_offset,
5315			unsigned long len)
5316{
5317	u64 dst_len = dst->len;
5318	size_t cur;
5319	size_t offset;
5320	struct page *page;
5321	char *kaddr;
5322	unsigned long i = get_eb_page_index(dst_offset);
5323
5324	if (check_eb_range(dst, dst_offset, len) ||
5325	    check_eb_range(src, src_offset, len))
5326		return;
5327
5328	WARN_ON(src->len != dst_len);
5329
5330	offset = get_eb_offset_in_page(dst, dst_offset);
 
5331
5332	while (len > 0) {
5333		page = dst->pages[i];
5334		assert_eb_page_uptodate(dst, page);
5335
5336		cur = min(len, (unsigned long)(PAGE_SIZE - offset));
5337
5338		kaddr = page_address(page);
5339		read_extent_buffer(src, kaddr + offset, src_offset, cur);
5340
5341		src_offset += cur;
5342		len -= cur;
5343		offset = 0;
5344		i++;
5345	}
5346}
5347
5348/*
 
 
 
 
 
 
 
 
 
 
 
 
5349 * eb_bitmap_offset() - calculate the page and offset of the byte containing the
5350 * given bit number
5351 * @eb: the extent buffer
5352 * @start: offset of the bitmap item in the extent buffer
5353 * @nr: bit number
5354 * @page_index: return index of the page in the extent buffer that contains the
5355 * given bit number
5356 * @page_offset: return offset into the page given by page_index
5357 *
5358 * This helper hides the ugliness of finding the byte in an extent buffer which
5359 * contains a given bit.
5360 */
5361static inline void eb_bitmap_offset(const struct extent_buffer *eb,
5362				    unsigned long start, unsigned long nr,
5363				    unsigned long *page_index,
5364				    size_t *page_offset)
5365{
 
5366	size_t byte_offset = BIT_BYTE(nr);
5367	size_t offset;
5368
5369	/*
5370	 * The byte we want is the offset of the extent buffer + the offset of
5371	 * the bitmap item in the extent buffer + the offset of the byte in the
5372	 * bitmap item.
5373	 */
5374	offset = start + offset_in_page(eb->start) + byte_offset;
5375
5376	*page_index = offset >> PAGE_SHIFT;
5377	*page_offset = offset_in_page(offset);
5378}
5379
5380/*
5381 * Determine whether a bit in a bitmap item is set.
5382 *
5383 * @eb:     the extent buffer
5384 * @start:  offset of the bitmap item in the extent buffer
5385 * @nr:     bit number to test
5386 */
5387int extent_buffer_test_bit(const struct extent_buffer *eb, unsigned long start,
5388			   unsigned long nr)
5389{
5390	u8 *kaddr;
5391	struct page *page;
5392	unsigned long i;
5393	size_t offset;
5394
5395	eb_bitmap_offset(eb, start, nr, &i, &offset);
5396	page = eb->pages[i];
5397	assert_eb_page_uptodate(eb, page);
5398	kaddr = page_address(page);
5399	return 1U & (kaddr[offset] >> (nr & (BITS_PER_BYTE - 1)));
5400}
5401
5402/*
5403 * Set an area of a bitmap to 1.
5404 *
5405 * @eb:     the extent buffer
5406 * @start:  offset of the bitmap item in the extent buffer
5407 * @pos:    bit number of the first bit
5408 * @len:    number of bits to set
5409 */
5410void extent_buffer_bitmap_set(const struct extent_buffer *eb, unsigned long start,
5411			      unsigned long pos, unsigned long len)
5412{
5413	u8 *kaddr;
5414	struct page *page;
5415	unsigned long i;
5416	size_t offset;
5417	const unsigned int size = pos + len;
5418	int bits_to_set = BITS_PER_BYTE - (pos % BITS_PER_BYTE);
5419	u8 mask_to_set = BITMAP_FIRST_BYTE_MASK(pos);
5420
5421	eb_bitmap_offset(eb, start, pos, &i, &offset);
5422	page = eb->pages[i];
5423	assert_eb_page_uptodate(eb, page);
5424	kaddr = page_address(page);
5425
5426	while (len >= bits_to_set) {
5427		kaddr[offset] |= mask_to_set;
5428		len -= bits_to_set;
5429		bits_to_set = BITS_PER_BYTE;
5430		mask_to_set = ~0;
5431		if (++offset >= PAGE_SIZE && len > 0) {
5432			offset = 0;
5433			page = eb->pages[++i];
5434			assert_eb_page_uptodate(eb, page);
5435			kaddr = page_address(page);
5436		}
5437	}
5438	if (len) {
5439		mask_to_set &= BITMAP_LAST_BYTE_MASK(size);
5440		kaddr[offset] |= mask_to_set;
5441	}
5442}
5443
5444
5445/*
5446 * Clear an area of a bitmap.
5447 *
5448 * @eb:     the extent buffer
5449 * @start:  offset of the bitmap item in the extent buffer
5450 * @pos:    bit number of the first bit
5451 * @len:    number of bits to clear
5452 */
5453void extent_buffer_bitmap_clear(const struct extent_buffer *eb,
5454				unsigned long start, unsigned long pos,
5455				unsigned long len)
5456{
5457	u8 *kaddr;
5458	struct page *page;
5459	unsigned long i;
5460	size_t offset;
5461	const unsigned int size = pos + len;
5462	int bits_to_clear = BITS_PER_BYTE - (pos % BITS_PER_BYTE);
5463	u8 mask_to_clear = BITMAP_FIRST_BYTE_MASK(pos);
5464
5465	eb_bitmap_offset(eb, start, pos, &i, &offset);
5466	page = eb->pages[i];
5467	assert_eb_page_uptodate(eb, page);
5468	kaddr = page_address(page);
5469
5470	while (len >= bits_to_clear) {
5471		kaddr[offset] &= ~mask_to_clear;
5472		len -= bits_to_clear;
5473		bits_to_clear = BITS_PER_BYTE;
5474		mask_to_clear = ~0;
5475		if (++offset >= PAGE_SIZE && len > 0) {
5476			offset = 0;
5477			page = eb->pages[++i];
5478			assert_eb_page_uptodate(eb, page);
5479			kaddr = page_address(page);
5480		}
5481	}
5482	if (len) {
5483		mask_to_clear &= BITMAP_LAST_BYTE_MASK(size);
5484		kaddr[offset] &= ~mask_to_clear;
5485	}
5486}
5487
5488static inline bool areas_overlap(unsigned long src, unsigned long dst, unsigned long len)
5489{
5490	unsigned long distance = (src > dst) ? src - dst : dst - src;
5491	return distance < len;
5492}
5493
5494static void copy_pages(struct page *dst_page, struct page *src_page,
5495		       unsigned long dst_off, unsigned long src_off,
5496		       unsigned long len)
5497{
5498	char *dst_kaddr = page_address(dst_page);
5499	char *src_kaddr;
5500	int must_memmove = 0;
5501
5502	if (dst_page != src_page) {
5503		src_kaddr = page_address(src_page);
5504	} else {
5505		src_kaddr = dst_kaddr;
5506		if (areas_overlap(src_off, dst_off, len))
5507			must_memmove = 1;
5508	}
5509
5510	if (must_memmove)
5511		memmove(dst_kaddr + dst_off, src_kaddr + src_off, len);
5512	else
5513		memcpy(dst_kaddr + dst_off, src_kaddr + src_off, len);
5514}
5515
5516void memcpy_extent_buffer(const struct extent_buffer *dst,
5517			  unsigned long dst_offset, unsigned long src_offset,
5518			  unsigned long len)
5519{
5520	size_t cur;
5521	size_t dst_off_in_page;
5522	size_t src_off_in_page;
 
5523	unsigned long dst_i;
5524	unsigned long src_i;
5525
5526	if (check_eb_range(dst, dst_offset, len) ||
5527	    check_eb_range(dst, src_offset, len))
5528		return;
 
 
 
 
 
 
 
 
 
5529
5530	while (len > 0) {
5531		dst_off_in_page = get_eb_offset_in_page(dst, dst_offset);
5532		src_off_in_page = get_eb_offset_in_page(dst, src_offset);
 
 
5533
5534		dst_i = get_eb_page_index(dst_offset);
5535		src_i = get_eb_page_index(src_offset);
5536
5537		cur = min(len, (unsigned long)(PAGE_SIZE -
5538					       src_off_in_page));
5539		cur = min_t(unsigned long, cur,
5540			(unsigned long)(PAGE_SIZE - dst_off_in_page));
5541
5542		copy_pages(dst->pages[dst_i], dst->pages[src_i],
5543			   dst_off_in_page, src_off_in_page, cur);
5544
5545		src_offset += cur;
5546		dst_offset += cur;
5547		len -= cur;
5548	}
5549}
5550
5551void memmove_extent_buffer(const struct extent_buffer *dst,
5552			   unsigned long dst_offset, unsigned long src_offset,
5553			   unsigned long len)
5554{
5555	size_t cur;
5556	size_t dst_off_in_page;
5557	size_t src_off_in_page;
5558	unsigned long dst_end = dst_offset + len - 1;
5559	unsigned long src_end = src_offset + len - 1;
 
5560	unsigned long dst_i;
5561	unsigned long src_i;
5562
5563	if (check_eb_range(dst, dst_offset, len) ||
5564	    check_eb_range(dst, src_offset, len))
5565		return;
 
 
 
 
 
 
 
5566	if (dst_offset < src_offset) {
5567		memcpy_extent_buffer(dst, dst_offset, src_offset, len);
5568		return;
5569	}
5570	while (len > 0) {
5571		dst_i = get_eb_page_index(dst_end);
5572		src_i = get_eb_page_index(src_end);
5573
5574		dst_off_in_page = get_eb_offset_in_page(dst, dst_end);
5575		src_off_in_page = get_eb_offset_in_page(dst, src_end);
 
 
5576
5577		cur = min_t(unsigned long, len, src_off_in_page + 1);
5578		cur = min(cur, dst_off_in_page + 1);
5579		copy_pages(dst->pages[dst_i], dst->pages[src_i],
5580			   dst_off_in_page - cur + 1,
5581			   src_off_in_page - cur + 1, cur);
5582
5583		dst_end -= cur;
5584		src_end -= cur;
5585		len -= cur;
5586	}
5587}
5588
5589#define GANG_LOOKUP_SIZE	16
5590static struct extent_buffer *get_next_extent_buffer(
5591		struct btrfs_fs_info *fs_info, struct page *page, u64 bytenr)
5592{
5593	struct extent_buffer *gang[GANG_LOOKUP_SIZE];
5594	struct extent_buffer *found = NULL;
5595	u64 page_start = page_offset(page);
5596	u64 cur = page_start;
5597
5598	ASSERT(in_range(bytenr, page_start, PAGE_SIZE));
5599	lockdep_assert_held(&fs_info->buffer_lock);
5600
5601	while (cur < page_start + PAGE_SIZE) {
5602		int ret;
5603		int i;
5604
5605		ret = radix_tree_gang_lookup(&fs_info->buffer_radix,
5606				(void **)gang, cur >> fs_info->sectorsize_bits,
5607				min_t(unsigned int, GANG_LOOKUP_SIZE,
5608				      PAGE_SIZE / fs_info->nodesize));
5609		if (ret == 0)
5610			goto out;
5611		for (i = 0; i < ret; i++) {
5612			/* Already beyond page end */
5613			if (gang[i]->start >= page_start + PAGE_SIZE)
5614				goto out;
5615			/* Found one */
5616			if (gang[i]->start >= bytenr) {
5617				found = gang[i];
5618				goto out;
5619			}
5620		}
5621		cur = gang[ret - 1]->start + gang[ret - 1]->len;
5622	}
5623out:
5624	return found;
5625}
5626
5627static int try_release_subpage_extent_buffer(struct page *page)
5628{
5629	struct btrfs_fs_info *fs_info = btrfs_sb(page->mapping->host->i_sb);
5630	u64 cur = page_offset(page);
5631	const u64 end = page_offset(page) + PAGE_SIZE;
5632	int ret;
5633
5634	while (cur < end) {
5635		struct extent_buffer *eb = NULL;
5636
5637		/*
5638		 * Unlike try_release_extent_buffer() which uses page->private
5639		 * to grab buffer, for subpage case we rely on radix tree, thus
5640		 * we need to ensure radix tree consistency.
5641		 *
5642		 * We also want an atomic snapshot of the radix tree, thus go
5643		 * with spinlock rather than RCU.
5644		 */
5645		spin_lock(&fs_info->buffer_lock);
5646		eb = get_next_extent_buffer(fs_info, page, cur);
5647		if (!eb) {
5648			/* No more eb in the page range after or at cur */
5649			spin_unlock(&fs_info->buffer_lock);
5650			break;
5651		}
5652		cur = eb->start + eb->len;
5653
5654		/*
5655		 * The same as try_release_extent_buffer(), to ensure the eb
5656		 * won't disappear out from under us.
5657		 */
5658		spin_lock(&eb->refs_lock);
5659		if (atomic_read(&eb->refs) != 1 || extent_buffer_under_io(eb)) {
5660			spin_unlock(&eb->refs_lock);
5661			spin_unlock(&fs_info->buffer_lock);
5662			break;
5663		}
5664		spin_unlock(&fs_info->buffer_lock);
5665
5666		/*
5667		 * If tree ref isn't set then we know the ref on this eb is a
5668		 * real ref, so just return, this eb will likely be freed soon
5669		 * anyway.
5670		 */
5671		if (!test_and_clear_bit(EXTENT_BUFFER_TREE_REF, &eb->bflags)) {
5672			spin_unlock(&eb->refs_lock);
5673			break;
5674		}
5675
5676		/*
5677		 * Here we don't care about the return value, we will always
5678		 * check the page private at the end.  And
5679		 * release_extent_buffer() will release the refs_lock.
5680		 */
5681		release_extent_buffer(eb);
5682	}
5683	/*
5684	 * Finally to check if we have cleared page private, as if we have
5685	 * released all ebs in the page, the page private should be cleared now.
5686	 */
5687	spin_lock(&page->mapping->private_lock);
5688	if (!PagePrivate(page))
5689		ret = 1;
5690	else
5691		ret = 0;
5692	spin_unlock(&page->mapping->private_lock);
5693	return ret;
5694
5695}
5696
5697int try_release_extent_buffer(struct page *page)
5698{
5699	struct extent_buffer *eb;
5700
5701	if (btrfs_sb(page->mapping->host->i_sb)->nodesize < PAGE_SIZE)
5702		return try_release_subpage_extent_buffer(page);
5703
5704	/*
5705	 * We need to make sure nobody is changing page->private, as we rely on
5706	 * page->private as the pointer to extent buffer.
5707	 */
5708	spin_lock(&page->mapping->private_lock);
5709	if (!PagePrivate(page)) {
5710		spin_unlock(&page->mapping->private_lock);
5711		return 1;
5712	}
5713
5714	eb = (struct extent_buffer *)page->private;
5715	BUG_ON(!eb);
5716
5717	/*
5718	 * This is a little awful but should be ok, we need to make sure that
5719	 * the eb doesn't disappear out from under us while we're looking at
5720	 * this page.
5721	 */
5722	spin_lock(&eb->refs_lock);
5723	if (atomic_read(&eb->refs) != 1 || extent_buffer_under_io(eb)) {
5724		spin_unlock(&eb->refs_lock);
5725		spin_unlock(&page->mapping->private_lock);
5726		return 0;
5727	}
5728	spin_unlock(&page->mapping->private_lock);
5729
5730	/*
5731	 * If tree ref isn't set then we know the ref on this eb is a real ref,
5732	 * so just return, this page will likely be freed soon anyway.
5733	 */
5734	if (!test_and_clear_bit(EXTENT_BUFFER_TREE_REF, &eb->bflags)) {
5735		spin_unlock(&eb->refs_lock);
5736		return 0;
5737	}
5738
5739	return release_extent_buffer(eb);
5740}
5741
5742/*
5743 * btrfs_readahead_tree_block - attempt to readahead a child block
5744 * @fs_info:	the fs_info
5745 * @bytenr:	bytenr to read
5746 * @owner_root: objectid of the root that owns this eb
5747 * @gen:	generation for the uptodate check, can be 0
5748 * @level:	level for the eb
5749 *
5750 * Attempt to readahead a tree block at @bytenr.  If @gen is 0 then we do a
5751 * normal uptodate check of the eb, without checking the generation.  If we have
5752 * to read the block we will not block on anything.
5753 */
5754void btrfs_readahead_tree_block(struct btrfs_fs_info *fs_info,
5755				u64 bytenr, u64 owner_root, u64 gen, int level)
5756{
5757	struct btrfs_tree_parent_check check = {
5758		.has_first_key = 0,
5759		.level = level,
5760		.transid = gen
5761	};
5762	struct extent_buffer *eb;
5763	int ret;
5764
5765	eb = btrfs_find_create_tree_block(fs_info, bytenr, owner_root, level);
5766	if (IS_ERR(eb))
5767		return;
5768
5769	if (btrfs_buffer_uptodate(eb, gen, 1)) {
5770		free_extent_buffer(eb);
5771		return;
5772	}
5773
5774	ret = read_extent_buffer_pages(eb, WAIT_NONE, 0, &check);
5775	if (ret < 0)
5776		free_extent_buffer_stale(eb);
5777	else
5778		free_extent_buffer(eb);
5779}
5780
5781/*
5782 * btrfs_readahead_node_child - readahead a node's child block
5783 * @node:	parent node we're reading from
5784 * @slot:	slot in the parent node for the child we want to read
5785 *
5786 * A helper for btrfs_readahead_tree_block, we simply read the bytenr pointed at
5787 * the slot in the node provided.
5788 */
5789void btrfs_readahead_node_child(struct extent_buffer *node, int slot)
5790{
5791	btrfs_readahead_tree_block(node->fs_info,
5792				   btrfs_node_blockptr(node, slot),
5793				   btrfs_header_owner(node),
5794				   btrfs_node_ptr_generation(node, slot),
5795				   btrfs_header_level(node) - 1);
5796}
v4.6
 
 
   1#include <linux/bitops.h>
   2#include <linux/slab.h>
   3#include <linux/bio.h>
   4#include <linux/mm.h>
   5#include <linux/pagemap.h>
   6#include <linux/page-flags.h>
 
   7#include <linux/spinlock.h>
   8#include <linux/blkdev.h>
   9#include <linux/swap.h>
  10#include <linux/writeback.h>
  11#include <linux/pagevec.h>
  12#include <linux/prefetch.h>
  13#include <linux/cleancache.h>
 
  14#include "extent_io.h"
 
  15#include "extent_map.h"
  16#include "ctree.h"
  17#include "btrfs_inode.h"
  18#include "volumes.h"
  19#include "check-integrity.h"
  20#include "locking.h"
  21#include "rcu-string.h"
  22#include "backref.h"
 
 
 
 
 
 
 
 
 
 
 
  23
  24static struct kmem_cache *extent_state_cache;
  25static struct kmem_cache *extent_buffer_cache;
  26static struct bio_set *btrfs_bioset;
  27
  28static inline bool extent_state_in_tree(const struct extent_state *state)
  29{
  30	return !RB_EMPTY_NODE(&state->rb_node);
  31}
  32
  33#ifdef CONFIG_BTRFS_DEBUG
  34static LIST_HEAD(buffers);
  35static LIST_HEAD(states);
  36
  37static DEFINE_SPINLOCK(leak_lock);
  38
  39static inline
  40void btrfs_leak_debug_add(struct list_head *new, struct list_head *head)
  41{
 
  42	unsigned long flags;
  43
  44	spin_lock_irqsave(&leak_lock, flags);
  45	list_add(new, head);
  46	spin_unlock_irqrestore(&leak_lock, flags);
  47}
  48
  49static inline
  50void btrfs_leak_debug_del(struct list_head *entry)
  51{
 
  52	unsigned long flags;
  53
  54	spin_lock_irqsave(&leak_lock, flags);
  55	list_del(entry);
  56	spin_unlock_irqrestore(&leak_lock, flags);
  57}
  58
  59static inline
  60void btrfs_leak_debug_check(void)
  61{
  62	struct extent_state *state;
  63	struct extent_buffer *eb;
 
  64
  65	while (!list_empty(&states)) {
  66		state = list_entry(states.next, struct extent_state, leak_list);
  67		pr_err("BTRFS: state leak: start %llu end %llu state %u in tree %d refs %d\n",
  68		       state->start, state->end, state->state,
  69		       extent_state_in_tree(state),
  70		       atomic_read(&state->refs));
  71		list_del(&state->leak_list);
  72		kmem_cache_free(extent_state_cache, state);
  73	}
  74
  75	while (!list_empty(&buffers)) {
  76		eb = list_entry(buffers.next, struct extent_buffer, leak_list);
  77		printk(KERN_ERR "BTRFS: buffer leak start %llu len %lu "
  78		       "refs %d\n",
  79		       eb->start, eb->len, atomic_read(&eb->refs));
 
 
 
 
  80		list_del(&eb->leak_list);
  81		kmem_cache_free(extent_buffer_cache, eb);
  82	}
  83}
  84
  85#define btrfs_debug_check_extent_io_range(tree, start, end)		\
  86	__btrfs_debug_check_extent_io_range(__func__, (tree), (start), (end))
  87static inline void __btrfs_debug_check_extent_io_range(const char *caller,
  88		struct extent_io_tree *tree, u64 start, u64 end)
  89{
  90	struct inode *inode;
  91	u64 isize;
  92
  93	if (!tree->mapping)
  94		return;
  95
  96	inode = tree->mapping->host;
  97	isize = i_size_read(inode);
  98	if (end >= PAGE_SIZE && (end % 2) == 0 && end != isize - 1) {
  99		btrfs_debug_rl(BTRFS_I(inode)->root->fs_info,
 100		    "%s: ino %llu isize %llu odd range [%llu,%llu]",
 101				caller, btrfs_ino(inode), isize, start, end);
 102	}
 103}
 104#else
 105#define btrfs_leak_debug_add(new, head)	do {} while (0)
 106#define btrfs_leak_debug_del(entry)	do {} while (0)
 107#define btrfs_leak_debug_check()	do {} while (0)
 108#define btrfs_debug_check_extent_io_range(c, s, e)	do {} while (0)
 109#endif
 110
 111#define BUFFER_LRU_MAX 64
 112
 113struct tree_entry {
 114	u64 start;
 115	u64 end;
 116	struct rb_node rb_node;
 117};
 118
 119struct extent_page_data {
 120	struct bio *bio;
 121	struct extent_io_tree *tree;
 122	get_extent_t *get_extent;
 123	unsigned long bio_flags;
 
 
 124
 125	/* tells writepage not to lock the state bits for this range
 126	 * it still does the unlocking
 
 
 
 
 127	 */
 128	unsigned int extent_locked:1;
 129
 130	/* tells the submit_bio code to use a WRITE_SYNC */
 131	unsigned int sync_io:1;
 132};
 133
 134static void add_extent_changeset(struct extent_state *state, unsigned bits,
 135				 struct extent_changeset *changeset,
 136				 int set)
 137{
 138	int ret;
 139
 140	if (!changeset)
 141		return;
 142	if (set && (state->state & bits) == bits)
 143		return;
 144	if (!set && (state->state & bits) == 0)
 145		return;
 146	changeset->bytes_changed += state->end - state->start + 1;
 147	ret = ulist_add(changeset->range_changed, state->start, state->end,
 148			GFP_ATOMIC);
 149	/* ENOMEM */
 150	BUG_ON(ret < 0);
 151}
 152
 153static noinline void flush_write_bio(void *data);
 154static inline struct btrfs_fs_info *
 155tree_fs_info(struct extent_io_tree *tree)
 156{
 157	if (!tree->mapping)
 158		return NULL;
 159	return btrfs_sb(tree->mapping->host->i_sb);
 160}
 161
 162int __init extent_io_init(void)
 163{
 164	extent_state_cache = kmem_cache_create("btrfs_extent_state",
 165			sizeof(struct extent_state), 0,
 166			SLAB_RECLAIM_ACCOUNT | SLAB_MEM_SPREAD, NULL);
 167	if (!extent_state_cache)
 168		return -ENOMEM;
 169
 170	extent_buffer_cache = kmem_cache_create("btrfs_extent_buffer",
 171			sizeof(struct extent_buffer), 0,
 172			SLAB_RECLAIM_ACCOUNT | SLAB_MEM_SPREAD, NULL);
 173	if (!extent_buffer_cache)
 174		goto free_state_cache;
 175
 176	btrfs_bioset = bioset_create(BIO_POOL_SIZE,
 177				     offsetof(struct btrfs_io_bio, bio));
 178	if (!btrfs_bioset)
 179		goto free_buffer_cache;
 180
 181	if (bioset_integrity_create(btrfs_bioset, BIO_POOL_SIZE))
 182		goto free_bioset;
 183
 184	return 0;
 185
 186free_bioset:
 187	bioset_free(btrfs_bioset);
 188	btrfs_bioset = NULL;
 189
 190free_buffer_cache:
 191	kmem_cache_destroy(extent_buffer_cache);
 192	extent_buffer_cache = NULL;
 193
 194free_state_cache:
 195	kmem_cache_destroy(extent_state_cache);
 196	extent_state_cache = NULL;
 197	return -ENOMEM;
 198}
 199
 200void extent_io_exit(void)
 201{
 202	btrfs_leak_debug_check();
 203
 204	/*
 205	 * Make sure all delayed rcu free are flushed before we
 206	 * destroy caches.
 207	 */
 208	rcu_barrier();
 209	kmem_cache_destroy(extent_state_cache);
 210	kmem_cache_destroy(extent_buffer_cache);
 211	if (btrfs_bioset)
 212		bioset_free(btrfs_bioset);
 213}
 214
 215void extent_io_tree_init(struct extent_io_tree *tree,
 216			 struct address_space *mapping)
 217{
 218	tree->state = RB_ROOT;
 219	tree->ops = NULL;
 220	tree->dirty_bytes = 0;
 221	spin_lock_init(&tree->lock);
 222	tree->mapping = mapping;
 223}
 224
 225static struct extent_state *alloc_extent_state(gfp_t mask)
 226{
 227	struct extent_state *state;
 228
 229	state = kmem_cache_alloc(extent_state_cache, mask);
 230	if (!state)
 231		return state;
 232	state->state = 0;
 233	state->failrec = NULL;
 234	RB_CLEAR_NODE(&state->rb_node);
 235	btrfs_leak_debug_add(&state->leak_list, &states);
 236	atomic_set(&state->refs, 1);
 237	init_waitqueue_head(&state->wq);
 238	trace_alloc_extent_state(state, mask, _RET_IP_);
 239	return state;
 240}
 241
 242void free_extent_state(struct extent_state *state)
 243{
 244	if (!state)
 245		return;
 246	if (atomic_dec_and_test(&state->refs)) {
 247		WARN_ON(extent_state_in_tree(state));
 248		btrfs_leak_debug_del(&state->leak_list);
 249		trace_free_extent_state(state, _RET_IP_);
 250		kmem_cache_free(extent_state_cache, state);
 251	}
 252}
 253
 254static struct rb_node *tree_insert(struct rb_root *root,
 255				   struct rb_node *search_start,
 256				   u64 offset,
 257				   struct rb_node *node,
 258				   struct rb_node ***p_in,
 259				   struct rb_node **parent_in)
 260{
 261	struct rb_node **p;
 262	struct rb_node *parent = NULL;
 263	struct tree_entry *entry;
 264
 265	if (p_in && parent_in) {
 266		p = *p_in;
 267		parent = *parent_in;
 268		goto do_insert;
 269	}
 270
 271	p = search_start ? &search_start : &root->rb_node;
 272	while (*p) {
 273		parent = *p;
 274		entry = rb_entry(parent, struct tree_entry, rb_node);
 275
 276		if (offset < entry->start)
 277			p = &(*p)->rb_left;
 278		else if (offset > entry->end)
 279			p = &(*p)->rb_right;
 280		else
 281			return parent;
 282	}
 283
 284do_insert:
 285	rb_link_node(node, parent, p);
 286	rb_insert_color(node, root);
 287	return NULL;
 288}
 289
 290static struct rb_node *__etree_search(struct extent_io_tree *tree, u64 offset,
 291				      struct rb_node **prev_ret,
 292				      struct rb_node **next_ret,
 293				      struct rb_node ***p_ret,
 294				      struct rb_node **parent_ret)
 295{
 296	struct rb_root *root = &tree->state;
 297	struct rb_node **n = &root->rb_node;
 298	struct rb_node *prev = NULL;
 299	struct rb_node *orig_prev = NULL;
 300	struct tree_entry *entry;
 301	struct tree_entry *prev_entry = NULL;
 302
 303	while (*n) {
 304		prev = *n;
 305		entry = rb_entry(prev, struct tree_entry, rb_node);
 306		prev_entry = entry;
 307
 308		if (offset < entry->start)
 309			n = &(*n)->rb_left;
 310		else if (offset > entry->end)
 311			n = &(*n)->rb_right;
 312		else
 313			return *n;
 314	}
 315
 316	if (p_ret)
 317		*p_ret = n;
 318	if (parent_ret)
 319		*parent_ret = prev;
 320
 321	if (prev_ret) {
 322		orig_prev = prev;
 323		while (prev && offset > prev_entry->end) {
 324			prev = rb_next(prev);
 325			prev_entry = rb_entry(prev, struct tree_entry, rb_node);
 326		}
 327		*prev_ret = prev;
 328		prev = orig_prev;
 
 
 
 
 
 
 
 
 
 329	}
 330
 331	if (next_ret) {
 332		prev_entry = rb_entry(prev, struct tree_entry, rb_node);
 333		while (prev && offset < prev_entry->start) {
 334			prev = rb_prev(prev);
 335			prev_entry = rb_entry(prev, struct tree_entry, rb_node);
 336		}
 337		*next_ret = prev;
 338	}
 339	return NULL;
 340}
 341
 342static inline struct rb_node *
 343tree_search_for_insert(struct extent_io_tree *tree,
 344		       u64 offset,
 345		       struct rb_node ***p_ret,
 346		       struct rb_node **parent_ret)
 347{
 348	struct rb_node *prev = NULL;
 349	struct rb_node *ret;
 350
 351	ret = __etree_search(tree, offset, &prev, NULL, p_ret, parent_ret);
 352	if (!ret)
 353		return prev;
 354	return ret;
 355}
 356
 357static inline struct rb_node *tree_search(struct extent_io_tree *tree,
 358					  u64 offset)
 359{
 360	return tree_search_for_insert(tree, offset, NULL, NULL);
 361}
 362
 363static void merge_cb(struct extent_io_tree *tree, struct extent_state *new,
 364		     struct extent_state *other)
 365{
 366	if (tree->ops && tree->ops->merge_extent_hook)
 367		tree->ops->merge_extent_hook(tree->mapping->host, new,
 368					     other);
 369}
 370
 371/*
 372 * utility function to look for merge candidates inside a given range.
 373 * Any extents with matching state are merged together into a single
 374 * extent in the tree.  Extents with EXTENT_IO in their state field
 375 * are not merged because the end_io handlers need to be able to do
 376 * operations on them without sleeping (or doing allocations/splits).
 377 *
 378 * This should be called with the tree lock held.
 379 */
 380static void merge_state(struct extent_io_tree *tree,
 381		        struct extent_state *state)
 382{
 383	struct extent_state *other;
 384	struct rb_node *other_node;
 385
 386	if (state->state & (EXTENT_IOBITS | EXTENT_BOUNDARY))
 387		return;
 388
 389	other_node = rb_prev(&state->rb_node);
 390	if (other_node) {
 391		other = rb_entry(other_node, struct extent_state, rb_node);
 392		if (other->end == state->start - 1 &&
 393		    other->state == state->state) {
 394			merge_cb(tree, state, other);
 395			state->start = other->start;
 396			rb_erase(&other->rb_node, &tree->state);
 397			RB_CLEAR_NODE(&other->rb_node);
 398			free_extent_state(other);
 399		}
 400	}
 401	other_node = rb_next(&state->rb_node);
 402	if (other_node) {
 403		other = rb_entry(other_node, struct extent_state, rb_node);
 404		if (other->start == state->end + 1 &&
 405		    other->state == state->state) {
 406			merge_cb(tree, state, other);
 407			state->end = other->end;
 408			rb_erase(&other->rb_node, &tree->state);
 409			RB_CLEAR_NODE(&other->rb_node);
 410			free_extent_state(other);
 411		}
 412	}
 413}
 414
 415static void set_state_cb(struct extent_io_tree *tree,
 416			 struct extent_state *state, unsigned *bits)
 417{
 418	if (tree->ops && tree->ops->set_bit_hook)
 419		tree->ops->set_bit_hook(tree->mapping->host, state, bits);
 420}
 421
 422static void clear_state_cb(struct extent_io_tree *tree,
 423			   struct extent_state *state, unsigned *bits)
 424{
 425	if (tree->ops && tree->ops->clear_bit_hook)
 426		tree->ops->clear_bit_hook(tree->mapping->host, state, bits);
 427}
 428
 429static void set_state_bits(struct extent_io_tree *tree,
 430			   struct extent_state *state, unsigned *bits,
 431			   struct extent_changeset *changeset);
 432
 433/*
 434 * insert an extent_state struct into the tree.  'bits' are set on the
 435 * struct before it is inserted.
 436 *
 437 * This may return -EEXIST if the extent is already there, in which case the
 438 * state struct is freed.
 439 *
 440 * The tree lock is not taken internally.  This is a utility function and
 441 * probably isn't what you want to call (see set/clear_extent_bit).
 442 */
 443static int insert_state(struct extent_io_tree *tree,
 444			struct extent_state *state, u64 start, u64 end,
 445			struct rb_node ***p,
 446			struct rb_node **parent,
 447			unsigned *bits, struct extent_changeset *changeset)
 448{
 449	struct rb_node *node;
 450
 451	if (end < start)
 452		WARN(1, KERN_ERR "BTRFS: end < start %llu %llu\n",
 453		       end, start);
 454	state->start = start;
 455	state->end = end;
 456
 457	set_state_bits(tree, state, bits, changeset);
 458
 459	node = tree_insert(&tree->state, NULL, end, &state->rb_node, p, parent);
 460	if (node) {
 461		struct extent_state *found;
 462		found = rb_entry(node, struct extent_state, rb_node);
 463		printk(KERN_ERR "BTRFS: found node %llu %llu on insert of "
 464		       "%llu %llu\n",
 465		       found->start, found->end, start, end);
 466		return -EEXIST;
 467	}
 468	merge_state(tree, state);
 469	return 0;
 470}
 471
 472static void split_cb(struct extent_io_tree *tree, struct extent_state *orig,
 473		     u64 split)
 474{
 475	if (tree->ops && tree->ops->split_extent_hook)
 476		tree->ops->split_extent_hook(tree->mapping->host, orig, split);
 477}
 478
 479/*
 480 * split a given extent state struct in two, inserting the preallocated
 481 * struct 'prealloc' as the newly created second half.  'split' indicates an
 482 * offset inside 'orig' where it should be split.
 483 *
 484 * Before calling,
 485 * the tree has 'orig' at [orig->start, orig->end].  After calling, there
 486 * are two extent state structs in the tree:
 487 * prealloc: [orig->start, split - 1]
 488 * orig: [ split, orig->end ]
 489 *
 490 * The tree locks are not taken by this function. They need to be held
 491 * by the caller.
 492 */
 493static int split_state(struct extent_io_tree *tree, struct extent_state *orig,
 494		       struct extent_state *prealloc, u64 split)
 495{
 496	struct rb_node *node;
 497
 498	split_cb(tree, orig, split);
 499
 500	prealloc->start = orig->start;
 501	prealloc->end = split - 1;
 502	prealloc->state = orig->state;
 503	orig->start = split;
 504
 505	node = tree_insert(&tree->state, &orig->rb_node, prealloc->end,
 506			   &prealloc->rb_node, NULL, NULL);
 507	if (node) {
 508		free_extent_state(prealloc);
 509		return -EEXIST;
 510	}
 511	return 0;
 512}
 513
 514static struct extent_state *next_state(struct extent_state *state)
 515{
 516	struct rb_node *next = rb_next(&state->rb_node);
 517	if (next)
 518		return rb_entry(next, struct extent_state, rb_node);
 519	else
 520		return NULL;
 521}
 522
 523/*
 524 * utility function to clear some bits in an extent state struct.
 525 * it will optionally wake up any one waiting on this state (wake == 1).
 526 *
 527 * If no bits are set on the state struct after clearing things, the
 528 * struct is freed and removed from the tree
 529 */
 530static struct extent_state *clear_state_bit(struct extent_io_tree *tree,
 531					    struct extent_state *state,
 532					    unsigned *bits, int wake,
 533					    struct extent_changeset *changeset)
 534{
 535	struct extent_state *next;
 536	unsigned bits_to_clear = *bits & ~EXTENT_CTLBITS;
 537
 538	if ((bits_to_clear & EXTENT_DIRTY) && (state->state & EXTENT_DIRTY)) {
 539		u64 range = state->end - state->start + 1;
 540		WARN_ON(range > tree->dirty_bytes);
 541		tree->dirty_bytes -= range;
 542	}
 543	clear_state_cb(tree, state, bits);
 544	add_extent_changeset(state, bits_to_clear, changeset, 0);
 545	state->state &= ~bits_to_clear;
 546	if (wake)
 547		wake_up(&state->wq);
 548	if (state->state == 0) {
 549		next = next_state(state);
 550		if (extent_state_in_tree(state)) {
 551			rb_erase(&state->rb_node, &tree->state);
 552			RB_CLEAR_NODE(&state->rb_node);
 553			free_extent_state(state);
 554		} else {
 555			WARN_ON(1);
 556		}
 557	} else {
 558		merge_state(tree, state);
 559		next = next_state(state);
 560	}
 561	return next;
 562}
 563
 564static struct extent_state *
 565alloc_extent_state_atomic(struct extent_state *prealloc)
 566{
 567	if (!prealloc)
 568		prealloc = alloc_extent_state(GFP_ATOMIC);
 569
 570	return prealloc;
 571}
 572
 573static void extent_io_tree_panic(struct extent_io_tree *tree, int err)
 574{
 575	btrfs_panic(tree_fs_info(tree), err, "Locking error: "
 576		    "Extent tree was modified by another "
 577		    "thread while locked.");
 578}
 579
 580/*
 581 * clear some bits on a range in the tree.  This may require splitting
 582 * or inserting elements in the tree, so the gfp mask is used to
 583 * indicate which allocations or sleeping are allowed.
 584 *
 585 * pass 'wake' == 1 to kick any sleepers, and 'delete' == 1 to remove
 586 * the given range from the tree regardless of state (ie for truncate).
 587 *
 588 * the range [start, end] is inclusive.
 589 *
 590 * This takes the tree lock, and returns 0 on success and < 0 on error.
 591 */
 592static int __clear_extent_bit(struct extent_io_tree *tree, u64 start, u64 end,
 593			      unsigned bits, int wake, int delete,
 594			      struct extent_state **cached_state,
 595			      gfp_t mask, struct extent_changeset *changeset)
 596{
 597	struct extent_state *state;
 598	struct extent_state *cached;
 599	struct extent_state *prealloc = NULL;
 600	struct rb_node *node;
 601	u64 last_end;
 602	int err;
 603	int clear = 0;
 604
 605	btrfs_debug_check_extent_io_range(tree, start, end);
 606
 607	if (bits & EXTENT_DELALLOC)
 608		bits |= EXTENT_NORESERVE;
 609
 610	if (delete)
 611		bits |= ~EXTENT_CTLBITS;
 612	bits |= EXTENT_FIRST_DELALLOC;
 613
 614	if (bits & (EXTENT_IOBITS | EXTENT_BOUNDARY))
 615		clear = 1;
 616again:
 617	if (!prealloc && gfpflags_allow_blocking(mask)) {
 618		/*
 619		 * Don't care for allocation failure here because we might end
 620		 * up not needing the pre-allocated extent state at all, which
 621		 * is the case if we only have in the tree extent states that
 622		 * cover our input range and don't cover too any other range.
 623		 * If we end up needing a new extent state we allocate it later.
 624		 */
 625		prealloc = alloc_extent_state(mask);
 626	}
 627
 628	spin_lock(&tree->lock);
 629	if (cached_state) {
 630		cached = *cached_state;
 631
 632		if (clear) {
 633			*cached_state = NULL;
 634			cached_state = NULL;
 635		}
 636
 637		if (cached && extent_state_in_tree(cached) &&
 638		    cached->start <= start && cached->end > start) {
 639			if (clear)
 640				atomic_dec(&cached->refs);
 641			state = cached;
 642			goto hit_next;
 643		}
 644		if (clear)
 645			free_extent_state(cached);
 646	}
 647	/*
 648	 * this search will find the extents that end after
 649	 * our range starts
 650	 */
 651	node = tree_search(tree, start);
 652	if (!node)
 653		goto out;
 654	state = rb_entry(node, struct extent_state, rb_node);
 655hit_next:
 656	if (state->start > end)
 657		goto out;
 658	WARN_ON(state->end < start);
 659	last_end = state->end;
 660
 661	/* the state doesn't have the wanted bits, go ahead */
 662	if (!(state->state & bits)) {
 663		state = next_state(state);
 664		goto next;
 665	}
 666
 667	/*
 668	 *     | ---- desired range ---- |
 669	 *  | state | or
 670	 *  | ------------- state -------------- |
 671	 *
 672	 * We need to split the extent we found, and may flip
 673	 * bits on second half.
 674	 *
 675	 * If the extent we found extends past our range, we
 676	 * just split and search again.  It'll get split again
 677	 * the next time though.
 678	 *
 679	 * If the extent we found is inside our range, we clear
 680	 * the desired bit on it.
 681	 */
 682
 683	if (state->start < start) {
 684		prealloc = alloc_extent_state_atomic(prealloc);
 685		BUG_ON(!prealloc);
 686		err = split_state(tree, state, prealloc, start);
 687		if (err)
 688			extent_io_tree_panic(tree, err);
 689
 690		prealloc = NULL;
 691		if (err)
 692			goto out;
 693		if (state->end <= end) {
 694			state = clear_state_bit(tree, state, &bits, wake,
 695						changeset);
 696			goto next;
 697		}
 698		goto search_again;
 699	}
 700	/*
 701	 * | ---- desired range ---- |
 702	 *                        | state |
 703	 * We need to split the extent, and clear the bit
 704	 * on the first half
 705	 */
 706	if (state->start <= end && state->end > end) {
 707		prealloc = alloc_extent_state_atomic(prealloc);
 708		BUG_ON(!prealloc);
 709		err = split_state(tree, state, prealloc, end + 1);
 710		if (err)
 711			extent_io_tree_panic(tree, err);
 712
 713		if (wake)
 714			wake_up(&state->wq);
 715
 716		clear_state_bit(tree, prealloc, &bits, wake, changeset);
 717
 718		prealloc = NULL;
 719		goto out;
 720	}
 721
 722	state = clear_state_bit(tree, state, &bits, wake, changeset);
 723next:
 724	if (last_end == (u64)-1)
 725		goto out;
 726	start = last_end + 1;
 727	if (start <= end && state && !need_resched())
 728		goto hit_next;
 729	goto search_again;
 730
 731out:
 732	spin_unlock(&tree->lock);
 733	if (prealloc)
 734		free_extent_state(prealloc);
 735
 736	return 0;
 737
 738search_again:
 739	if (start > end)
 740		goto out;
 741	spin_unlock(&tree->lock);
 742	if (gfpflags_allow_blocking(mask))
 743		cond_resched();
 744	goto again;
 745}
 746
 747static void wait_on_state(struct extent_io_tree *tree,
 748			  struct extent_state *state)
 749		__releases(tree->lock)
 750		__acquires(tree->lock)
 751{
 752	DEFINE_WAIT(wait);
 753	prepare_to_wait(&state->wq, &wait, TASK_UNINTERRUPTIBLE);
 754	spin_unlock(&tree->lock);
 755	schedule();
 756	spin_lock(&tree->lock);
 757	finish_wait(&state->wq, &wait);
 758}
 759
 760/*
 761 * waits for one or more bits to clear on a range in the state tree.
 762 * The range [start, end] is inclusive.
 763 * The tree lock is taken by this function
 764 */
 765static void wait_extent_bit(struct extent_io_tree *tree, u64 start, u64 end,
 766			    unsigned long bits)
 767{
 768	struct extent_state *state;
 769	struct rb_node *node;
 770
 771	btrfs_debug_check_extent_io_range(tree, start, end);
 772
 773	spin_lock(&tree->lock);
 774again:
 775	while (1) {
 776		/*
 777		 * this search will find all the extents that end after
 778		 * our range starts
 779		 */
 780		node = tree_search(tree, start);
 781process_node:
 782		if (!node)
 783			break;
 784
 785		state = rb_entry(node, struct extent_state, rb_node);
 786
 787		if (state->start > end)
 788			goto out;
 789
 790		if (state->state & bits) {
 791			start = state->start;
 792			atomic_inc(&state->refs);
 793			wait_on_state(tree, state);
 794			free_extent_state(state);
 795			goto again;
 796		}
 797		start = state->end + 1;
 798
 799		if (start > end)
 800			break;
 801
 802		if (!cond_resched_lock(&tree->lock)) {
 803			node = rb_next(node);
 804			goto process_node;
 805		}
 806	}
 807out:
 808	spin_unlock(&tree->lock);
 809}
 810
 811static void set_state_bits(struct extent_io_tree *tree,
 812			   struct extent_state *state,
 813			   unsigned *bits, struct extent_changeset *changeset)
 814{
 815	unsigned bits_to_set = *bits & ~EXTENT_CTLBITS;
 816
 817	set_state_cb(tree, state, bits);
 818	if ((bits_to_set & EXTENT_DIRTY) && !(state->state & EXTENT_DIRTY)) {
 819		u64 range = state->end - state->start + 1;
 820		tree->dirty_bytes += range;
 821	}
 822	add_extent_changeset(state, bits_to_set, changeset, 1);
 823	state->state |= bits_to_set;
 824}
 825
 826static void cache_state_if_flags(struct extent_state *state,
 827				 struct extent_state **cached_ptr,
 828				 unsigned flags)
 829{
 830	if (cached_ptr && !(*cached_ptr)) {
 831		if (!flags || (state->state & flags)) {
 832			*cached_ptr = state;
 833			atomic_inc(&state->refs);
 834		}
 835	}
 836}
 837
 838static void cache_state(struct extent_state *state,
 839			struct extent_state **cached_ptr)
 840{
 841	return cache_state_if_flags(state, cached_ptr,
 842				    EXTENT_IOBITS | EXTENT_BOUNDARY);
 843}
 844
 845/*
 846 * set some bits on a range in the tree.  This may require allocations or
 847 * sleeping, so the gfp mask is used to indicate what is allowed.
 848 *
 849 * If any of the exclusive bits are set, this will fail with -EEXIST if some
 850 * part of the range already has the desired bits set.  The start of the
 851 * existing range is returned in failed_start in this case.
 852 *
 853 * [start, end] is inclusive This takes the tree lock.
 854 */
 855
 856static int __must_check
 857__set_extent_bit(struct extent_io_tree *tree, u64 start, u64 end,
 858		 unsigned bits, unsigned exclusive_bits,
 859		 u64 *failed_start, struct extent_state **cached_state,
 860		 gfp_t mask, struct extent_changeset *changeset)
 861{
 862	struct extent_state *state;
 863	struct extent_state *prealloc = NULL;
 864	struct rb_node *node;
 865	struct rb_node **p;
 866	struct rb_node *parent;
 867	int err = 0;
 868	u64 last_start;
 869	u64 last_end;
 870
 871	btrfs_debug_check_extent_io_range(tree, start, end);
 872
 873	bits |= EXTENT_FIRST_DELALLOC;
 874again:
 875	if (!prealloc && gfpflags_allow_blocking(mask)) {
 876		prealloc = alloc_extent_state(mask);
 877		BUG_ON(!prealloc);
 878	}
 879
 880	spin_lock(&tree->lock);
 881	if (cached_state && *cached_state) {
 882		state = *cached_state;
 883		if (state->start <= start && state->end > start &&
 884		    extent_state_in_tree(state)) {
 885			node = &state->rb_node;
 886			goto hit_next;
 887		}
 888	}
 889	/*
 890	 * this search will find all the extents that end after
 891	 * our range starts.
 892	 */
 893	node = tree_search_for_insert(tree, start, &p, &parent);
 894	if (!node) {
 895		prealloc = alloc_extent_state_atomic(prealloc);
 896		BUG_ON(!prealloc);
 897		err = insert_state(tree, prealloc, start, end,
 898				   &p, &parent, &bits, changeset);
 899		if (err)
 900			extent_io_tree_panic(tree, err);
 901
 902		cache_state(prealloc, cached_state);
 903		prealloc = NULL;
 904		goto out;
 905	}
 906	state = rb_entry(node, struct extent_state, rb_node);
 907hit_next:
 908	last_start = state->start;
 909	last_end = state->end;
 910
 911	/*
 912	 * | ---- desired range ---- |
 913	 * | state |
 914	 *
 915	 * Just lock what we found and keep going
 916	 */
 917	if (state->start == start && state->end <= end) {
 918		if (state->state & exclusive_bits) {
 919			*failed_start = state->start;
 920			err = -EEXIST;
 921			goto out;
 922		}
 923
 924		set_state_bits(tree, state, &bits, changeset);
 925		cache_state(state, cached_state);
 926		merge_state(tree, state);
 927		if (last_end == (u64)-1)
 928			goto out;
 929		start = last_end + 1;
 930		state = next_state(state);
 931		if (start < end && state && state->start == start &&
 932		    !need_resched())
 933			goto hit_next;
 934		goto search_again;
 935	}
 936
 937	/*
 938	 *     | ---- desired range ---- |
 939	 * | state |
 940	 *   or
 941	 * | ------------- state -------------- |
 942	 *
 943	 * We need to split the extent we found, and may flip bits on
 944	 * second half.
 945	 *
 946	 * If the extent we found extends past our
 947	 * range, we just split and search again.  It'll get split
 948	 * again the next time though.
 949	 *
 950	 * If the extent we found is inside our range, we set the
 951	 * desired bit on it.
 952	 */
 953	if (state->start < start) {
 954		if (state->state & exclusive_bits) {
 955			*failed_start = start;
 956			err = -EEXIST;
 957			goto out;
 958		}
 959
 960		prealloc = alloc_extent_state_atomic(prealloc);
 961		BUG_ON(!prealloc);
 962		err = split_state(tree, state, prealloc, start);
 963		if (err)
 964			extent_io_tree_panic(tree, err);
 965
 966		prealloc = NULL;
 967		if (err)
 968			goto out;
 969		if (state->end <= end) {
 970			set_state_bits(tree, state, &bits, changeset);
 971			cache_state(state, cached_state);
 972			merge_state(tree, state);
 973			if (last_end == (u64)-1)
 974				goto out;
 975			start = last_end + 1;
 976			state = next_state(state);
 977			if (start < end && state && state->start == start &&
 978			    !need_resched())
 979				goto hit_next;
 980		}
 981		goto search_again;
 982	}
 983	/*
 984	 * | ---- desired range ---- |
 985	 *     | state | or               | state |
 986	 *
 987	 * There's a hole, we need to insert something in it and
 988	 * ignore the extent we found.
 989	 */
 990	if (state->start > start) {
 991		u64 this_end;
 992		if (end < last_start)
 993			this_end = end;
 994		else
 995			this_end = last_start - 1;
 996
 997		prealloc = alloc_extent_state_atomic(prealloc);
 998		BUG_ON(!prealloc);
 999
1000		/*
1001		 * Avoid to free 'prealloc' if it can be merged with
1002		 * the later extent.
1003		 */
1004		err = insert_state(tree, prealloc, start, this_end,
1005				   NULL, NULL, &bits, changeset);
1006		if (err)
1007			extent_io_tree_panic(tree, err);
1008
1009		cache_state(prealloc, cached_state);
1010		prealloc = NULL;
1011		start = this_end + 1;
1012		goto search_again;
1013	}
1014	/*
1015	 * | ---- desired range ---- |
1016	 *                        | state |
1017	 * We need to split the extent, and set the bit
1018	 * on the first half
1019	 */
1020	if (state->start <= end && state->end > end) {
1021		if (state->state & exclusive_bits) {
1022			*failed_start = start;
1023			err = -EEXIST;
1024			goto out;
1025		}
1026
1027		prealloc = alloc_extent_state_atomic(prealloc);
1028		BUG_ON(!prealloc);
1029		err = split_state(tree, state, prealloc, end + 1);
1030		if (err)
1031			extent_io_tree_panic(tree, err);
1032
1033		set_state_bits(tree, prealloc, &bits, changeset);
1034		cache_state(prealloc, cached_state);
1035		merge_state(tree, prealloc);
1036		prealloc = NULL;
1037		goto out;
1038	}
1039
1040	goto search_again;
1041
1042out:
1043	spin_unlock(&tree->lock);
1044	if (prealloc)
1045		free_extent_state(prealloc);
1046
1047	return err;
1048
1049search_again:
1050	if (start > end)
1051		goto out;
1052	spin_unlock(&tree->lock);
1053	if (gfpflags_allow_blocking(mask))
1054		cond_resched();
1055	goto again;
1056}
1057
1058int set_extent_bit(struct extent_io_tree *tree, u64 start, u64 end,
1059		   unsigned bits, u64 * failed_start,
1060		   struct extent_state **cached_state, gfp_t mask)
1061{
1062	return __set_extent_bit(tree, start, end, bits, 0, failed_start,
1063				cached_state, mask, NULL);
1064}
1065
1066
1067/**
1068 * convert_extent_bit - convert all bits in a given range from one bit to
1069 * 			another
1070 * @tree:	the io tree to search
1071 * @start:	the start offset in bytes
1072 * @end:	the end offset in bytes (inclusive)
1073 * @bits:	the bits to set in this range
1074 * @clear_bits:	the bits to clear in this range
1075 * @cached_state:	state that we're going to cache
1076 * @mask:	the allocation mask
1077 *
1078 * This will go through and set bits for the given range.  If any states exist
1079 * already in this range they are set with the given bit and cleared of the
1080 * clear_bits.  This is only meant to be used by things that are mergeable, ie
1081 * converting from say DELALLOC to DIRTY.  This is not meant to be used with
1082 * boundary bits like LOCK.
1083 */
1084int convert_extent_bit(struct extent_io_tree *tree, u64 start, u64 end,
1085		       unsigned bits, unsigned clear_bits,
1086		       struct extent_state **cached_state, gfp_t mask)
1087{
1088	struct extent_state *state;
1089	struct extent_state *prealloc = NULL;
1090	struct rb_node *node;
1091	struct rb_node **p;
1092	struct rb_node *parent;
1093	int err = 0;
1094	u64 last_start;
1095	u64 last_end;
1096	bool first_iteration = true;
1097
1098	btrfs_debug_check_extent_io_range(tree, start, end);
1099
1100again:
1101	if (!prealloc && gfpflags_allow_blocking(mask)) {
1102		/*
1103		 * Best effort, don't worry if extent state allocation fails
1104		 * here for the first iteration. We might have a cached state
1105		 * that matches exactly the target range, in which case no
1106		 * extent state allocations are needed. We'll only know this
1107		 * after locking the tree.
1108		 */
1109		prealloc = alloc_extent_state(mask);
1110		if (!prealloc && !first_iteration)
1111			return -ENOMEM;
1112	}
1113
1114	spin_lock(&tree->lock);
1115	if (cached_state && *cached_state) {
1116		state = *cached_state;
1117		if (state->start <= start && state->end > start &&
1118		    extent_state_in_tree(state)) {
1119			node = &state->rb_node;
1120			goto hit_next;
1121		}
1122	}
1123
1124	/*
1125	 * this search will find all the extents that end after
1126	 * our range starts.
1127	 */
1128	node = tree_search_for_insert(tree, start, &p, &parent);
1129	if (!node) {
1130		prealloc = alloc_extent_state_atomic(prealloc);
1131		if (!prealloc) {
1132			err = -ENOMEM;
1133			goto out;
1134		}
1135		err = insert_state(tree, prealloc, start, end,
1136				   &p, &parent, &bits, NULL);
1137		if (err)
1138			extent_io_tree_panic(tree, err);
1139		cache_state(prealloc, cached_state);
1140		prealloc = NULL;
1141		goto out;
1142	}
1143	state = rb_entry(node, struct extent_state, rb_node);
1144hit_next:
1145	last_start = state->start;
1146	last_end = state->end;
1147
1148	/*
1149	 * | ---- desired range ---- |
1150	 * | state |
1151	 *
1152	 * Just lock what we found and keep going
1153	 */
1154	if (state->start == start && state->end <= end) {
1155		set_state_bits(tree, state, &bits, NULL);
1156		cache_state(state, cached_state);
1157		state = clear_state_bit(tree, state, &clear_bits, 0, NULL);
1158		if (last_end == (u64)-1)
1159			goto out;
1160		start = last_end + 1;
1161		if (start < end && state && state->start == start &&
1162		    !need_resched())
1163			goto hit_next;
1164		goto search_again;
1165	}
1166
1167	/*
1168	 *     | ---- desired range ---- |
1169	 * | state |
1170	 *   or
1171	 * | ------------- state -------------- |
1172	 *
1173	 * We need to split the extent we found, and may flip bits on
1174	 * second half.
1175	 *
1176	 * If the extent we found extends past our
1177	 * range, we just split and search again.  It'll get split
1178	 * again the next time though.
1179	 *
1180	 * If the extent we found is inside our range, we set the
1181	 * desired bit on it.
1182	 */
1183	if (state->start < start) {
1184		prealloc = alloc_extent_state_atomic(prealloc);
1185		if (!prealloc) {
1186			err = -ENOMEM;
1187			goto out;
1188		}
1189		err = split_state(tree, state, prealloc, start);
1190		if (err)
1191			extent_io_tree_panic(tree, err);
1192		prealloc = NULL;
1193		if (err)
1194			goto out;
1195		if (state->end <= end) {
1196			set_state_bits(tree, state, &bits, NULL);
1197			cache_state(state, cached_state);
1198			state = clear_state_bit(tree, state, &clear_bits, 0,
1199						NULL);
1200			if (last_end == (u64)-1)
1201				goto out;
1202			start = last_end + 1;
1203			if (start < end && state && state->start == start &&
1204			    !need_resched())
1205				goto hit_next;
1206		}
1207		goto search_again;
1208	}
1209	/*
1210	 * | ---- desired range ---- |
1211	 *     | state | or               | state |
1212	 *
1213	 * There's a hole, we need to insert something in it and
1214	 * ignore the extent we found.
1215	 */
1216	if (state->start > start) {
1217		u64 this_end;
1218		if (end < last_start)
1219			this_end = end;
1220		else
1221			this_end = last_start - 1;
1222
1223		prealloc = alloc_extent_state_atomic(prealloc);
1224		if (!prealloc) {
1225			err = -ENOMEM;
1226			goto out;
1227		}
1228
1229		/*
1230		 * Avoid to free 'prealloc' if it can be merged with
1231		 * the later extent.
1232		 */
1233		err = insert_state(tree, prealloc, start, this_end,
1234				   NULL, NULL, &bits, NULL);
1235		if (err)
1236			extent_io_tree_panic(tree, err);
1237		cache_state(prealloc, cached_state);
1238		prealloc = NULL;
1239		start = this_end + 1;
1240		goto search_again;
1241	}
1242	/*
1243	 * | ---- desired range ---- |
1244	 *                        | state |
1245	 * We need to split the extent, and set the bit
1246	 * on the first half
1247	 */
1248	if (state->start <= end && state->end > end) {
1249		prealloc = alloc_extent_state_atomic(prealloc);
1250		if (!prealloc) {
1251			err = -ENOMEM;
1252			goto out;
1253		}
1254
1255		err = split_state(tree, state, prealloc, end + 1);
1256		if (err)
1257			extent_io_tree_panic(tree, err);
1258
1259		set_state_bits(tree, prealloc, &bits, NULL);
1260		cache_state(prealloc, cached_state);
1261		clear_state_bit(tree, prealloc, &clear_bits, 0, NULL);
1262		prealloc = NULL;
1263		goto out;
1264	}
1265
1266	goto search_again;
1267
1268out:
1269	spin_unlock(&tree->lock);
1270	if (prealloc)
1271		free_extent_state(prealloc);
1272
1273	return err;
1274
1275search_again:
1276	if (start > end)
1277		goto out;
1278	spin_unlock(&tree->lock);
1279	if (gfpflags_allow_blocking(mask))
1280		cond_resched();
1281	first_iteration = false;
1282	goto again;
1283}
1284
1285/* wrappers around set/clear extent bit */
1286int set_record_extent_bits(struct extent_io_tree *tree, u64 start, u64 end,
1287			   unsigned bits, gfp_t mask,
1288			   struct extent_changeset *changeset)
1289{
1290	/*
1291	 * We don't support EXTENT_LOCKED yet, as current changeset will
1292	 * record any bits changed, so for EXTENT_LOCKED case, it will
1293	 * either fail with -EEXIST or changeset will record the whole
1294	 * range.
1295	 */
1296	BUG_ON(bits & EXTENT_LOCKED);
1297
1298	return __set_extent_bit(tree, start, end, bits, 0, NULL, NULL, mask,
1299				changeset);
1300}
1301
1302int clear_extent_bit(struct extent_io_tree *tree, u64 start, u64 end,
1303		     unsigned bits, int wake, int delete,
1304		     struct extent_state **cached, gfp_t mask)
1305{
1306	return __clear_extent_bit(tree, start, end, bits, wake, delete,
1307				  cached, mask, NULL);
1308}
1309
1310int clear_record_extent_bits(struct extent_io_tree *tree, u64 start, u64 end,
1311			     unsigned bits, gfp_t mask,
1312			     struct extent_changeset *changeset)
1313{
1314	/*
1315	 * Don't support EXTENT_LOCKED case, same reason as
1316	 * set_record_extent_bits().
1317	 */
1318	BUG_ON(bits & EXTENT_LOCKED);
1319
1320	return __clear_extent_bit(tree, start, end, bits, 0, 0, NULL, mask,
1321				  changeset);
1322}
1323
1324/*
1325 * either insert or lock state struct between start and end use mask to tell
1326 * us if waiting is desired.
1327 */
1328int lock_extent_bits(struct extent_io_tree *tree, u64 start, u64 end,
1329		     struct extent_state **cached_state)
1330{
1331	int err;
1332	u64 failed_start;
1333
1334	while (1) {
1335		err = __set_extent_bit(tree, start, end, EXTENT_LOCKED,
1336				       EXTENT_LOCKED, &failed_start,
1337				       cached_state, GFP_NOFS, NULL);
1338		if (err == -EEXIST) {
1339			wait_extent_bit(tree, failed_start, end, EXTENT_LOCKED);
1340			start = failed_start;
1341		} else
1342			break;
1343		WARN_ON(start > end);
1344	}
1345	return err;
1346}
1347
1348int try_lock_extent(struct extent_io_tree *tree, u64 start, u64 end)
1349{
1350	int err;
1351	u64 failed_start;
1352
1353	err = __set_extent_bit(tree, start, end, EXTENT_LOCKED, EXTENT_LOCKED,
1354			       &failed_start, NULL, GFP_NOFS, NULL);
1355	if (err == -EEXIST) {
1356		if (failed_start > start)
1357			clear_extent_bit(tree, start, failed_start - 1,
1358					 EXTENT_LOCKED, 1, 0, NULL, GFP_NOFS);
1359		return 0;
1360	}
1361	return 1;
1362}
1363
1364void extent_range_clear_dirty_for_io(struct inode *inode, u64 start, u64 end)
1365{
1366	unsigned long index = start >> PAGE_SHIFT;
1367	unsigned long end_index = end >> PAGE_SHIFT;
1368	struct page *page;
1369
1370	while (index <= end_index) {
1371		page = find_get_page(inode->i_mapping, index);
1372		BUG_ON(!page); /* Pages should be in the extent_io_tree */
1373		clear_page_dirty_for_io(page);
1374		put_page(page);
1375		index++;
1376	}
1377}
1378
1379void extent_range_redirty_for_io(struct inode *inode, u64 start, u64 end)
1380{
 
1381	unsigned long index = start >> PAGE_SHIFT;
1382	unsigned long end_index = end >> PAGE_SHIFT;
1383	struct page *page;
1384
1385	while (index <= end_index) {
1386		page = find_get_page(inode->i_mapping, index);
1387		BUG_ON(!page); /* Pages should be in the extent_io_tree */
1388		__set_page_dirty_nobuffers(page);
1389		account_page_redirty(page);
1390		put_page(page);
1391		index++;
1392	}
1393}
1394
1395/*
1396 * helper function to set both pages and extents in the tree writeback
1397 */
1398static void set_range_writeback(struct extent_io_tree *tree, u64 start, u64 end)
1399{
1400	unsigned long index = start >> PAGE_SHIFT;
1401	unsigned long end_index = end >> PAGE_SHIFT;
1402	struct page *page;
1403
1404	while (index <= end_index) {
1405		page = find_get_page(tree->mapping, index);
1406		BUG_ON(!page); /* Pages should be in the extent_io_tree */
1407		set_page_writeback(page);
1408		put_page(page);
1409		index++;
 
 
 
 
 
 
 
 
 
 
1410	}
1411}
 
1412
1413/* find the first state struct with 'bits' set after 'start', and
1414 * return it.  tree->lock must be held.  NULL will returned if
1415 * nothing was found after 'start'
1416 */
1417static struct extent_state *
1418find_first_extent_bit_state(struct extent_io_tree *tree,
1419			    u64 start, unsigned bits)
1420{
1421	struct rb_node *node;
1422	struct extent_state *state;
1423
1424	/*
1425	 * this search will find all the extents that end after
1426	 * our range starts.
1427	 */
1428	node = tree_search(tree, start);
1429	if (!node)
1430		goto out;
1431
1432	while (1) {
1433		state = rb_entry(node, struct extent_state, rb_node);
1434		if (state->end >= start && (state->state & bits))
1435			return state;
1436
1437		node = rb_next(node);
1438		if (!node)
1439			break;
1440	}
1441out:
1442	return NULL;
 
1443}
1444
1445/*
1446 * find the first offset in the io tree with 'bits' set. zero is
1447 * returned if we find something, and *start_ret and *end_ret are
1448 * set to reflect the state struct that was found.
1449 *
1450 * If nothing was found, 1 is returned. If found something, return 0.
1451 */
1452int find_first_extent_bit(struct extent_io_tree *tree, u64 start,
1453			  u64 *start_ret, u64 *end_ret, unsigned bits,
1454			  struct extent_state **cached_state)
1455{
1456	struct extent_state *state;
1457	struct rb_node *n;
1458	int ret = 1;
1459
1460	spin_lock(&tree->lock);
1461	if (cached_state && *cached_state) {
1462		state = *cached_state;
1463		if (state->end == start - 1 && extent_state_in_tree(state)) {
1464			n = rb_next(&state->rb_node);
1465			while (n) {
1466				state = rb_entry(n, struct extent_state,
1467						 rb_node);
1468				if (state->state & bits)
1469					goto got_it;
1470				n = rb_next(n);
1471			}
1472			free_extent_state(*cached_state);
1473			*cached_state = NULL;
1474			goto out;
1475		}
1476		free_extent_state(*cached_state);
1477		*cached_state = NULL;
1478	}
1479
1480	state = find_first_extent_bit_state(tree, start, bits);
1481got_it:
1482	if (state) {
1483		cache_state_if_flags(state, cached_state, 0);
1484		*start_ret = state->start;
1485		*end_ret = state->end;
1486		ret = 0;
1487	}
1488out:
1489	spin_unlock(&tree->lock);
1490	return ret;
1491}
1492
1493/*
1494 * find a contiguous range of bytes in the file marked as delalloc, not
1495 * more than 'max_bytes'.  start and end are used to return the range,
1496 *
1497 * 1 is returned if we find something, 0 if nothing was in the tree
1498 */
1499static noinline u64 find_delalloc_range(struct extent_io_tree *tree,
1500					u64 *start, u64 *end, u64 max_bytes,
1501					struct extent_state **cached_state)
1502{
1503	struct rb_node *node;
1504	struct extent_state *state;
1505	u64 cur_start = *start;
1506	u64 found = 0;
1507	u64 total_bytes = 0;
1508
1509	spin_lock(&tree->lock);
 
1510
1511	/*
1512	 * this search will find all the extents that end after
1513	 * our range starts.
1514	 */
1515	node = tree_search(tree, cur_start);
1516	if (!node) {
1517		if (!found)
1518			*end = (u64)-1;
1519		goto out;
1520	}
1521
1522	while (1) {
1523		state = rb_entry(node, struct extent_state, rb_node);
1524		if (found && (state->start != cur_start ||
1525			      (state->state & EXTENT_BOUNDARY))) {
1526			goto out;
1527		}
1528		if (!(state->state & EXTENT_DELALLOC)) {
1529			if (!found)
1530				*end = state->end;
1531			goto out;
 
 
 
 
 
 
 
 
 
1532		}
1533		if (!found) {
1534			*start = state->start;
1535			*cached_state = state;
1536			atomic_inc(&state->refs);
1537		}
1538		found++;
1539		*end = state->end;
1540		cur_start = state->end + 1;
1541		node = rb_next(node);
1542		total_bytes += state->end - state->start + 1;
1543		if (total_bytes >= max_bytes)
1544			break;
1545		if (!node)
1546			break;
1547	}
1548out:
1549	spin_unlock(&tree->lock);
1550	return found;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1551}
1552
1553static noinline void __unlock_for_delalloc(struct inode *inode,
1554					   struct page *locked_page,
1555					   u64 start, u64 end)
1556{
1557	int ret;
1558	struct page *pages[16];
1559	unsigned long index = start >> PAGE_SHIFT;
1560	unsigned long end_index = end >> PAGE_SHIFT;
1561	unsigned long nr_pages = end_index - index + 1;
1562	int i;
1563
 
1564	if (index == locked_page->index && end_index == index)
1565		return;
1566
1567	while (nr_pages > 0) {
1568		ret = find_get_pages_contig(inode->i_mapping, index,
1569				     min_t(unsigned long, nr_pages,
1570				     ARRAY_SIZE(pages)), pages);
1571		for (i = 0; i < ret; i++) {
1572			if (pages[i] != locked_page)
1573				unlock_page(pages[i]);
1574			put_page(pages[i]);
1575		}
1576		nr_pages -= ret;
1577		index += ret;
1578		cond_resched();
1579	}
1580}
1581
1582static noinline int lock_delalloc_pages(struct inode *inode,
1583					struct page *locked_page,
1584					u64 delalloc_start,
1585					u64 delalloc_end)
1586{
1587	unsigned long index = delalloc_start >> PAGE_SHIFT;
1588	unsigned long start_index = index;
1589	unsigned long end_index = delalloc_end >> PAGE_SHIFT;
1590	unsigned long pages_locked = 0;
1591	struct page *pages[16];
1592	unsigned long nrpages;
1593	int ret;
1594	int i;
1595
1596	/* the caller is responsible for locking the start index */
1597	if (index == locked_page->index && index == end_index)
1598		return 0;
1599
1600	/* skip the page at the start index */
1601	nrpages = end_index - index + 1;
1602	while (nrpages > 0) {
1603		ret = find_get_pages_contig(inode->i_mapping, index,
1604				     min_t(unsigned long,
1605				     nrpages, ARRAY_SIZE(pages)), pages);
1606		if (ret == 0) {
1607			ret = -EAGAIN;
1608			goto done;
1609		}
1610		/* now we have an array of pages, lock them all */
1611		for (i = 0; i < ret; i++) {
1612			/*
1613			 * the caller is taking responsibility for
1614			 * locked_page
1615			 */
1616			if (pages[i] != locked_page) {
1617				lock_page(pages[i]);
1618				if (!PageDirty(pages[i]) ||
1619				    pages[i]->mapping != inode->i_mapping) {
1620					ret = -EAGAIN;
1621					unlock_page(pages[i]);
1622					put_page(pages[i]);
1623					goto done;
1624				}
1625			}
1626			put_page(pages[i]);
1627			pages_locked++;
1628		}
1629		nrpages -= ret;
1630		index += ret;
1631		cond_resched();
1632	}
1633	ret = 0;
1634done:
1635	if (ret && pages_locked) {
1636		__unlock_for_delalloc(inode, locked_page,
1637			      delalloc_start,
1638			      ((u64)(start_index + pages_locked - 1)) <<
1639			      PAGE_SHIFT);
1640	}
1641	return ret;
1642}
1643
1644/*
1645 * find a contiguous range of bytes in the file marked as delalloc, not
1646 * more than 'max_bytes'.  start and end are used to return the range,
 
 
 
 
 
1647 *
1648 * 1 is returned if we find something, 0 if nothing was in the tree
 
 
 
 
1649 */
1650STATIC u64 find_lock_delalloc_range(struct inode *inode,
1651				    struct extent_io_tree *tree,
1652				    struct page *locked_page, u64 *start,
1653				    u64 *end, u64 max_bytes)
1654{
 
 
 
 
 
 
1655	u64 delalloc_start;
1656	u64 delalloc_end;
1657	u64 found;
1658	struct extent_state *cached_state = NULL;
1659	int ret;
1660	int loops = 0;
1661
 
 
 
 
 
 
1662again:
1663	/* step one, find a bunch of delalloc bytes starting at start */
1664	delalloc_start = *start;
1665	delalloc_end = 0;
1666	found = find_delalloc_range(tree, &delalloc_start, &delalloc_end,
1667				    max_bytes, &cached_state);
1668	if (!found || delalloc_end <= *start) {
1669		*start = delalloc_start;
1670		*end = delalloc_end;
 
 
1671		free_extent_state(cached_state);
1672		return 0;
1673	}
1674
1675	/*
1676	 * start comes from the offset of locked_page.  We have to lock
1677	 * pages in order, so we can't process delalloc bytes before
1678	 * locked_page
1679	 */
1680	if (delalloc_start < *start)
1681		delalloc_start = *start;
1682
1683	/*
1684	 * make sure to limit the number of pages we try to lock down
1685	 */
1686	if (delalloc_end + 1 - delalloc_start > max_bytes)
1687		delalloc_end = delalloc_start + max_bytes - 1;
1688
1689	/* step two, lock all the pages after the page that has start */
1690	ret = lock_delalloc_pages(inode, locked_page,
1691				  delalloc_start, delalloc_end);
 
1692	if (ret == -EAGAIN) {
1693		/* some of the pages are gone, lets avoid looping by
1694		 * shortening the size of the delalloc range we're searching
1695		 */
1696		free_extent_state(cached_state);
1697		cached_state = NULL;
1698		if (!loops) {
1699			max_bytes = PAGE_SIZE;
1700			loops = 1;
1701			goto again;
1702		} else {
1703			found = 0;
1704			goto out_failed;
1705		}
1706	}
1707	BUG_ON(ret); /* Only valid values are 0 and -EAGAIN */
1708
1709	/* step three, lock the state bits for the whole range */
1710	lock_extent_bits(tree, delalloc_start, delalloc_end, &cached_state);
1711
1712	/* then test to make sure it is all still delalloc */
1713	ret = test_range_bit(tree, delalloc_start, delalloc_end,
1714			     EXTENT_DELALLOC, 1, cached_state);
1715	if (!ret) {
1716		unlock_extent_cached(tree, delalloc_start, delalloc_end,
1717				     &cached_state, GFP_NOFS);
1718		__unlock_for_delalloc(inode, locked_page,
1719			      delalloc_start, delalloc_end);
1720		cond_resched();
1721		goto again;
1722	}
1723	free_extent_state(cached_state);
1724	*start = delalloc_start;
1725	*end = delalloc_end;
1726out_failed:
1727	return found;
1728}
1729
1730void extent_clear_unlock_delalloc(struct inode *inode, u64 start, u64 end,
1731				 struct page *locked_page,
1732				 unsigned clear_bits,
1733				 unsigned long page_ops)
1734{
1735	struct extent_io_tree *tree = &BTRFS_I(inode)->io_tree;
1736	int ret;
1737	struct page *pages[16];
1738	unsigned long index = start >> PAGE_SHIFT;
1739	unsigned long end_index = end >> PAGE_SHIFT;
1740	unsigned long nr_pages = end_index - index + 1;
1741	int i;
1742
1743	clear_extent_bit(tree, start, end, clear_bits, 1, 0, NULL, GFP_NOFS);
1744	if (page_ops == 0)
1745		return;
1746
1747	if ((page_ops & PAGE_SET_ERROR) && nr_pages > 0)
1748		mapping_set_error(inode->i_mapping, -EIO);
1749
1750	while (nr_pages > 0) {
1751		ret = find_get_pages_contig(inode->i_mapping, index,
1752				     min_t(unsigned long,
1753				     nr_pages, ARRAY_SIZE(pages)), pages);
1754		for (i = 0; i < ret; i++) {
1755
1756			if (page_ops & PAGE_SET_PRIVATE2)
1757				SetPagePrivate2(pages[i]);
1758
1759			if (pages[i] == locked_page) {
1760				put_page(pages[i]);
1761				continue;
1762			}
1763			if (page_ops & PAGE_CLEAR_DIRTY)
1764				clear_page_dirty_for_io(pages[i]);
1765			if (page_ops & PAGE_SET_WRITEBACK)
1766				set_page_writeback(pages[i]);
1767			if (page_ops & PAGE_SET_ERROR)
1768				SetPageError(pages[i]);
1769			if (page_ops & PAGE_END_WRITEBACK)
1770				end_page_writeback(pages[i]);
1771			if (page_ops & PAGE_UNLOCK)
1772				unlock_page(pages[i]);
1773			put_page(pages[i]);
1774		}
1775		nr_pages -= ret;
1776		index += ret;
1777		cond_resched();
1778	}
1779}
1780
1781/*
1782 * count the number of bytes in the tree that have a given bit(s)
1783 * set.  This can be fairly slow, except for EXTENT_DIRTY which is
1784 * cached.  The total number found is returned.
1785 */
1786u64 count_range_bits(struct extent_io_tree *tree,
1787		     u64 *start, u64 search_end, u64 max_bytes,
1788		     unsigned bits, int contig)
1789{
1790	struct rb_node *node;
1791	struct extent_state *state;
1792	u64 cur_start = *start;
1793	u64 total_bytes = 0;
1794	u64 last = 0;
1795	int found = 0;
1796
1797	if (WARN_ON(search_end <= cur_start))
1798		return 0;
1799
1800	spin_lock(&tree->lock);
1801	if (cur_start == 0 && bits == EXTENT_DIRTY) {
1802		total_bytes = tree->dirty_bytes;
1803		goto out;
1804	}
1805	/*
1806	 * this search will find all the extents that end after
1807	 * our range starts.
1808	 */
1809	node = tree_search(tree, cur_start);
1810	if (!node)
1811		goto out;
1812
1813	while (1) {
1814		state = rb_entry(node, struct extent_state, rb_node);
1815		if (state->start > search_end)
1816			break;
1817		if (contig && found && state->start > last + 1)
1818			break;
1819		if (state->end >= cur_start && (state->state & bits) == bits) {
1820			total_bytes += min(search_end, state->end) + 1 -
1821				       max(cur_start, state->start);
1822			if (total_bytes >= max_bytes)
1823				break;
1824			if (!found) {
1825				*start = max(cur_start, state->start);
1826				found = 1;
1827			}
1828			last = state->end;
1829		} else if (contig && found) {
1830			break;
1831		}
1832		node = rb_next(node);
1833		if (!node)
1834			break;
1835	}
1836out:
1837	spin_unlock(&tree->lock);
1838	return total_bytes;
1839}
1840
1841/*
1842 * set the private field for a given byte offset in the tree.  If there isn't
1843 * an extent_state there already, this does nothing.
1844 */
1845static noinline int set_state_failrec(struct extent_io_tree *tree, u64 start,
1846		struct io_failure_record *failrec)
1847{
1848	struct rb_node *node;
1849	struct extent_state *state;
1850	int ret = 0;
1851
1852	spin_lock(&tree->lock);
1853	/*
1854	 * this search will find all the extents that end after
1855	 * our range starts.
1856	 */
1857	node = tree_search(tree, start);
1858	if (!node) {
1859		ret = -ENOENT;
1860		goto out;
1861	}
1862	state = rb_entry(node, struct extent_state, rb_node);
1863	if (state->start != start) {
1864		ret = -ENOENT;
1865		goto out;
1866	}
1867	state->failrec = failrec;
1868out:
1869	spin_unlock(&tree->lock);
1870	return ret;
1871}
1872
1873static noinline int get_state_failrec(struct extent_io_tree *tree, u64 start,
1874		struct io_failure_record **failrec)
1875{
1876	struct rb_node *node;
1877	struct extent_state *state;
1878	int ret = 0;
1879
1880	spin_lock(&tree->lock);
1881	/*
1882	 * this search will find all the extents that end after
1883	 * our range starts.
1884	 */
1885	node = tree_search(tree, start);
1886	if (!node) {
1887		ret = -ENOENT;
1888		goto out;
1889	}
1890	state = rb_entry(node, struct extent_state, rb_node);
1891	if (state->start != start) {
1892		ret = -ENOENT;
1893		goto out;
1894	}
1895	*failrec = state->failrec;
1896out:
1897	spin_unlock(&tree->lock);
1898	return ret;
1899}
1900
1901/*
1902 * searches a range in the state tree for a given mask.
1903 * If 'filled' == 1, this returns 1 only if every extent in the tree
1904 * has the bits set.  Otherwise, 1 is returned if any bit in the
1905 * range is found set.
1906 */
1907int test_range_bit(struct extent_io_tree *tree, u64 start, u64 end,
1908		   unsigned bits, int filled, struct extent_state *cached)
1909{
1910	struct extent_state *state = NULL;
1911	struct rb_node *node;
1912	int bitset = 0;
1913
1914	spin_lock(&tree->lock);
1915	if (cached && extent_state_in_tree(cached) && cached->start <= start &&
1916	    cached->end > start)
1917		node = &cached->rb_node;
1918	else
1919		node = tree_search(tree, start);
1920	while (node && start <= end) {
1921		state = rb_entry(node, struct extent_state, rb_node);
1922
1923		if (filled && state->start > start) {
1924			bitset = 0;
1925			break;
1926		}
1927
1928		if (state->start > end)
1929			break;
1930
1931		if (state->state & bits) {
1932			bitset = 1;
1933			if (!filled)
1934				break;
1935		} else if (filled) {
1936			bitset = 0;
1937			break;
1938		}
1939
1940		if (state->end == (u64)-1)
1941			break;
1942
1943		start = state->end + 1;
1944		if (start > end)
1945			break;
1946		node = rb_next(node);
1947		if (!node) {
1948			if (filled)
1949				bitset = 0;
1950			break;
1951		}
1952	}
1953	spin_unlock(&tree->lock);
1954	return bitset;
1955}
1956
1957/*
1958 * helper function to set a given page up to date if all the
1959 * extents in the tree for that page are up to date
1960 */
1961static void check_page_uptodate(struct extent_io_tree *tree, struct page *page)
1962{
1963	u64 start = page_offset(page);
1964	u64 end = start + PAGE_SIZE - 1;
1965	if (test_range_bit(tree, start, end, EXTENT_UPTODATE, 1, NULL))
1966		SetPageUptodate(page);
1967}
1968
1969int free_io_failure(struct inode *inode, struct io_failure_record *rec)
1970{
1971	int ret;
1972	int err = 0;
1973	struct extent_io_tree *failure_tree = &BTRFS_I(inode)->io_failure_tree;
1974
1975	set_state_failrec(failure_tree, rec->start, NULL);
1976	ret = clear_extent_bits(failure_tree, rec->start,
1977				rec->start + rec->len - 1,
1978				EXTENT_LOCKED | EXTENT_DIRTY, GFP_NOFS);
1979	if (ret)
1980		err = ret;
1981
1982	ret = clear_extent_bits(&BTRFS_I(inode)->io_tree, rec->start,
1983				rec->start + rec->len - 1,
1984				EXTENT_DAMAGED, GFP_NOFS);
1985	if (ret && !err)
1986		err = ret;
1987
1988	kfree(rec);
1989	return err;
1990}
1991
1992/*
1993 * this bypasses the standard btrfs submit functions deliberately, as
1994 * the standard behavior is to write all copies in a raid setup. here we only
1995 * want to write the one bad copy. so we do the mapping for ourselves and issue
1996 * submit_bio directly.
1997 * to avoid any synchronization issues, wait for the data after writing, which
1998 * actually prevents the read that triggered the error from finishing.
1999 * currently, there can be no more than two copies of every data bit. thus,
2000 * exactly one rewrite is required.
2001 */
2002int repair_io_failure(struct inode *inode, u64 start, u64 length, u64 logical,
2003		      struct page *page, unsigned int pg_offset, int mirror_num)
2004{
2005	struct btrfs_fs_info *fs_info = BTRFS_I(inode)->root->fs_info;
2006	struct bio *bio;
2007	struct btrfs_device *dev;
2008	u64 map_length = 0;
2009	u64 sector;
2010	struct btrfs_bio *bbio = NULL;
2011	struct btrfs_mapping_tree *map_tree = &fs_info->mapping_tree;
2012	int ret;
2013
2014	ASSERT(!(fs_info->sb->s_flags & MS_RDONLY));
2015	BUG_ON(!mirror_num);
2016
2017	/* we can't repair anything in raid56 yet */
2018	if (btrfs_is_parity_mirror(map_tree, logical, length, mirror_num))
2019		return 0;
2020
2021	bio = btrfs_io_bio_alloc(GFP_NOFS, 1);
2022	if (!bio)
2023		return -EIO;
2024	bio->bi_iter.bi_size = 0;
2025	map_length = length;
2026
2027	ret = btrfs_map_block(fs_info, WRITE, logical,
2028			      &map_length, &bbio, mirror_num);
2029	if (ret) {
2030		bio_put(bio);
2031		return -EIO;
2032	}
2033	BUG_ON(mirror_num != bbio->mirror_num);
2034	sector = bbio->stripes[mirror_num-1].physical >> 9;
2035	bio->bi_iter.bi_sector = sector;
2036	dev = bbio->stripes[mirror_num-1].dev;
2037	btrfs_put_bbio(bbio);
2038	if (!dev || !dev->bdev || !dev->writeable) {
2039		bio_put(bio);
2040		return -EIO;
2041	}
2042	bio->bi_bdev = dev->bdev;
2043	bio_add_page(bio, page, length, pg_offset);
2044
2045	if (btrfsic_submit_bio_wait(WRITE_SYNC, bio)) {
2046		/* try to remap that extent elsewhere? */
2047		bio_put(bio);
2048		btrfs_dev_stat_inc_and_print(dev, BTRFS_DEV_STAT_WRITE_ERRS);
2049		return -EIO;
2050	}
2051
2052	btrfs_info_rl_in_rcu(fs_info,
2053		"read error corrected: ino %llu off %llu (dev %s sector %llu)",
2054				  btrfs_ino(inode), start,
2055				  rcu_str_deref(dev->name), sector);
2056	bio_put(bio);
2057	return 0;
2058}
2059
2060int repair_eb_io_failure(struct btrfs_root *root, struct extent_buffer *eb,
2061			 int mirror_num)
2062{
2063	u64 start = eb->start;
2064	unsigned long i, num_pages = num_extent_pages(eb->start, eb->len);
2065	int ret = 0;
2066
2067	if (root->fs_info->sb->s_flags & MS_RDONLY)
2068		return -EROFS;
2069
2070	for (i = 0; i < num_pages; i++) {
2071		struct page *p = eb->pages[i];
2072
2073		ret = repair_io_failure(root->fs_info->btree_inode, start,
2074					PAGE_SIZE, start, p,
2075					start - page_offset(p), mirror_num);
2076		if (ret)
2077			break;
2078		start += PAGE_SIZE;
2079	}
2080
2081	return ret;
2082}
2083
2084/*
2085 * each time an IO finishes, we do a fast check in the IO failure tree
2086 * to see if we need to process or clean up an io_failure_record
2087 */
2088int clean_io_failure(struct inode *inode, u64 start, struct page *page,
2089		     unsigned int pg_offset)
2090{
2091	u64 private;
 
 
 
2092	struct io_failure_record *failrec;
2093	struct btrfs_fs_info *fs_info = BTRFS_I(inode)->root->fs_info;
2094	struct extent_state *state;
2095	int num_copies;
2096	int ret;
2097
2098	private = 0;
2099	ret = count_range_bits(&BTRFS_I(inode)->io_failure_tree, &private,
2100				(u64)-1, 1, EXTENT_DIRTY, 0);
2101	if (!ret)
2102		return 0;
2103
2104	ret = get_state_failrec(&BTRFS_I(inode)->io_failure_tree, start,
2105			&failrec);
2106	if (ret)
2107		return 0;
2108
2109	BUG_ON(!failrec->this_mirror);
2110
2111	if (failrec->in_validation) {
2112		/* there was no real error, just free the record */
2113		pr_debug("clean_io_failure: freeing dummy error at %llu\n",
2114			 failrec->start);
2115		goto out;
2116	}
2117	if (fs_info->sb->s_flags & MS_RDONLY)
 
 
 
2118		goto out;
2119
2120	spin_lock(&BTRFS_I(inode)->io_tree.lock);
2121	state = find_first_extent_bit_state(&BTRFS_I(inode)->io_tree,
2122					    failrec->start,
2123					    EXTENT_LOCKED);
2124	spin_unlock(&BTRFS_I(inode)->io_tree.lock);
2125
2126	if (state && state->start <= failrec->start &&
2127	    state->end >= failrec->start + failrec->len - 1) {
2128		num_copies = btrfs_num_copies(fs_info, failrec->logical,
2129					      failrec->len);
2130		if (num_copies > 1)  {
2131			repair_io_failure(inode, start, failrec->len,
2132					  failrec->logical, page,
2133					  pg_offset, failrec->failed_mirror);
2134		}
2135	}
2136
2137out:
2138	free_io_failure(inode, failrec);
2139
2140	return 0;
2141}
2142
2143/*
2144 * Can be called when
2145 * - hold extent lock
2146 * - under ordered extent
2147 * - the inode is freeing
2148 */
2149void btrfs_free_io_failure_record(struct inode *inode, u64 start, u64 end)
2150{
2151	struct extent_io_tree *failure_tree = &BTRFS_I(inode)->io_failure_tree;
2152	struct io_failure_record *failrec;
2153	struct extent_state *state, *next;
2154
2155	if (RB_EMPTY_ROOT(&failure_tree->state))
2156		return;
2157
2158	spin_lock(&failure_tree->lock);
2159	state = find_first_extent_bit_state(failure_tree, start, EXTENT_DIRTY);
2160	while (state) {
2161		if (state->start > end)
 
2162			break;
2163
2164		ASSERT(state->end <= end);
2165
2166		next = next_state(state);
2167
2168		failrec = state->failrec;
2169		free_extent_state(state);
2170		kfree(failrec);
2171
2172		state = next;
2173	}
2174	spin_unlock(&failure_tree->lock);
2175}
2176
2177int btrfs_get_io_failure_record(struct inode *inode, u64 start, u64 end,
2178		struct io_failure_record **failrec_ret)
 
2179{
 
 
2180	struct io_failure_record *failrec;
2181	struct extent_map *em;
2182	struct extent_io_tree *failure_tree = &BTRFS_I(inode)->io_failure_tree;
2183	struct extent_io_tree *tree = &BTRFS_I(inode)->io_tree;
2184	struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree;
2185	int ret;
2186	u64 logical;
2187
2188	ret = get_state_failrec(failure_tree, start, &failrec);
2189	if (ret) {
2190		failrec = kzalloc(sizeof(*failrec), GFP_NOFS);
2191		if (!failrec)
2192			return -ENOMEM;
2193
2194		failrec->start = start;
2195		failrec->len = end - start + 1;
2196		failrec->this_mirror = 0;
2197		failrec->bio_flags = 0;
2198		failrec->in_validation = 0;
2199
2200		read_lock(&em_tree->lock);
2201		em = lookup_extent_mapping(em_tree, start, failrec->len);
2202		if (!em) {
2203			read_unlock(&em_tree->lock);
2204			kfree(failrec);
2205			return -EIO;
2206		}
2207
2208		if (em->start > start || em->start + em->len <= start) {
2209			free_extent_map(em);
2210			em = NULL;
2211		}
2212		read_unlock(&em_tree->lock);
2213		if (!em) {
2214			kfree(failrec);
2215			return -EIO;
2216		}
2217
2218		logical = start - em->start;
2219		logical = em->block_start + logical;
2220		if (test_bit(EXTENT_FLAG_COMPRESSED, &em->flags)) {
2221			logical = em->block_start;
2222			failrec->bio_flags = EXTENT_BIO_COMPRESSED;
2223			extent_set_compress_type(&failrec->bio_flags,
2224						 em->compress_type);
2225		}
2226
2227		pr_debug("Get IO Failure Record: (new) logical=%llu, start=%llu, len=%llu\n",
2228			 logical, start, failrec->len);
2229
2230		failrec->logical = logical;
2231		free_extent_map(em);
2232
2233		/* set the bits in the private failure tree */
2234		ret = set_extent_bits(failure_tree, start, end,
2235					EXTENT_LOCKED | EXTENT_DIRTY, GFP_NOFS);
2236		if (ret >= 0)
2237			ret = set_state_failrec(failure_tree, start, failrec);
2238		/* set the bits in the inode's tree */
2239		if (ret >= 0)
2240			ret = set_extent_bits(tree, start, end, EXTENT_DAMAGED,
2241						GFP_NOFS);
2242		if (ret < 0) {
2243			kfree(failrec);
2244			return ret;
2245		}
2246	} else {
2247		pr_debug("Get IO Failure Record: (found) logical=%llu, start=%llu, len=%llu, validation=%d\n",
2248			 failrec->logical, failrec->start, failrec->len,
2249			 failrec->in_validation);
2250		/*
2251		 * when data can be on disk more than twice, add to failrec here
2252		 * (e.g. with a list for failed_mirror) to make
2253		 * clean_io_failure() clean all those errors at once.
2254		 */
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2255	}
2256
2257	*failrec_ret = failrec;
 
 
 
 
 
2258
2259	return 0;
2260}
2261
2262int btrfs_check_repairable(struct inode *inode, struct bio *failed_bio,
2263			   struct io_failure_record *failrec, int failed_mirror)
 
2264{
2265	int num_copies;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2266
2267	num_copies = btrfs_num_copies(BTRFS_I(inode)->root->fs_info,
2268				      failrec->logical, failrec->len);
2269	if (num_copies == 1) {
2270		/*
2271		 * we only have a single copy of the data, so don't bother with
2272		 * all the retry and error correction code that follows. no
2273		 * matter what the error is, it is very likely to persist.
2274		 */
2275		pr_debug("Check Repairable: cannot repair, num_copies=%d, next_mirror %d, failed_mirror %d\n",
2276			 num_copies, failrec->this_mirror, failed_mirror);
2277		return 0;
 
 
 
 
 
2278	}
2279
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2280	/*
2281	 * there are two premises:
2282	 *	a) deliver good data to the caller
2283	 *	b) correct the bad sectors on disk
2284	 */
2285	if (failed_bio->bi_vcnt > 1) {
2286		/*
2287		 * to fulfill b), we need to know the exact failing sectors, as
2288		 * we don't want to rewrite any more than the failed ones. thus,
2289		 * we need separate read requests for the failed bio
2290		 *
2291		 * if the following BUG_ON triggers, our validation request got
2292		 * merged. we need separate requests for our algorithm to work.
2293		 */
2294		BUG_ON(failrec->in_validation);
2295		failrec->in_validation = 1;
2296		failrec->this_mirror = failed_mirror;
2297	} else {
2298		/*
2299		 * we're ready to fulfill a) and b) alongside. get a good copy
2300		 * of the failed sector and if we succeed, we have setup
2301		 * everything for repair_io_failure to do the rest for us.
2302		 */
2303		if (failrec->in_validation) {
2304			BUG_ON(failrec->this_mirror != failed_mirror);
2305			failrec->in_validation = 0;
2306			failrec->this_mirror = 0;
2307		}
2308		failrec->failed_mirror = failed_mirror;
2309		failrec->this_mirror++;
2310		if (failrec->this_mirror == failed_mirror)
2311			failrec->this_mirror++;
2312	}
2313
2314	if (failrec->this_mirror > num_copies) {
2315		pr_debug("Check Repairable: (fail) num_copies=%d, next_mirror %d, failed_mirror %d\n",
2316			 num_copies, failrec->this_mirror, failed_mirror);
2317		return 0;
2318	}
2319
2320	return 1;
2321}
2322
2323
2324struct bio *btrfs_create_repair_bio(struct inode *inode, struct bio *failed_bio,
2325				    struct io_failure_record *failrec,
2326				    struct page *page, int pg_offset, int icsum,
2327				    bio_end_io_t *endio_func, void *data)
2328{
2329	struct bio *bio;
2330	struct btrfs_io_bio *btrfs_failed_bio;
2331	struct btrfs_io_bio *btrfs_bio;
2332
2333	bio = btrfs_io_bio_alloc(GFP_NOFS, 1);
2334	if (!bio)
2335		return NULL;
2336
2337	bio->bi_end_io = endio_func;
2338	bio->bi_iter.bi_sector = failrec->logical >> 9;
2339	bio->bi_bdev = BTRFS_I(inode)->root->fs_info->fs_devices->latest_bdev;
2340	bio->bi_iter.bi_size = 0;
2341	bio->bi_private = data;
 
 
 
 
 
 
 
 
 
2342
2343	btrfs_failed_bio = btrfs_io_bio(failed_bio);
2344	if (btrfs_failed_bio->csum) {
2345		struct btrfs_fs_info *fs_info = BTRFS_I(inode)->root->fs_info;
2346		u16 csum_size = btrfs_super_csum_size(fs_info->super_copy);
 
2347
2348		btrfs_bio = btrfs_io_bio(bio);
2349		btrfs_bio->csum = btrfs_bio->csum_inline;
2350		icsum *= csum_size;
2351		memcpy(btrfs_bio->csum, btrfs_failed_bio->csum + icsum,
2352		       csum_size);
2353	}
2354
2355	bio_add_page(bio, page, failrec->len, pg_offset);
2356
2357	return bio;
 
2358}
2359
2360/*
2361 * this is a generic handler for readpage errors (default
2362 * readpage_io_failed_hook). if other copies exist, read those and write back
2363 * good data to the failed position. does not investigate in remapping the
2364 * failed extent elsewhere, hoping the device will be smart enough to do this as
2365 * needed
2366 */
2367
2368static int bio_readpage_error(struct bio *failed_bio, u64 phy_offset,
2369			      struct page *page, u64 start, u64 end,
2370			      int failed_mirror)
2371{
2372	struct io_failure_record *failrec;
2373	struct inode *inode = page->mapping->host;
2374	struct extent_io_tree *tree = &BTRFS_I(inode)->io_tree;
2375	struct bio *bio;
2376	int read_mode;
2377	int ret;
 
 
2378
2379	BUG_ON(failed_bio->bi_rw & REQ_WRITE);
2380
2381	ret = btrfs_get_io_failure_record(inode, start, end, &failrec);
2382	if (ret)
2383		return ret;
2384
2385	ret = btrfs_check_repairable(inode, failed_bio, failrec, failed_mirror);
2386	if (!ret) {
2387		free_io_failure(inode, failrec);
2388		return -EIO;
2389	}
2390
2391	if (failed_bio->bi_vcnt > 1)
2392		read_mode = READ_SYNC | REQ_FAILFAST_DEV;
2393	else
2394		read_mode = READ_SYNC;
 
2395
2396	phy_offset >>= inode->i_sb->s_blocksize_bits;
2397	bio = btrfs_create_repair_bio(inode, failed_bio, failrec, page,
2398				      start - page_offset(page),
2399				      (int)phy_offset, failed_bio->bi_end_io,
2400				      NULL);
2401	if (!bio) {
2402		free_io_failure(inode, failrec);
2403		return -EIO;
2404	}
2405
2406	pr_debug("Repair Read Error: submitting new read[%#x] to this_mirror=%d, in_validation=%d\n",
2407		 read_mode, failrec->this_mirror, failrec->in_validation);
 
 
 
 
 
 
2408
2409	ret = tree->ops->submit_bio_hook(inode, read_mode, bio,
2410					 failrec->this_mirror,
2411					 failrec->bio_flags, 0);
2412	if (ret) {
2413		free_io_failure(inode, failrec);
2414		bio_put(bio);
 
 
 
 
 
 
 
 
 
 
 
 
2415	}
2416
2417	return ret;
2418}
2419
2420/* lots and lots of room for performance fixes in the end_bio funcs */
2421
2422void end_extent_writepage(struct page *page, int err, u64 start, u64 end)
2423{
2424	int uptodate = (err == 0);
2425	struct extent_io_tree *tree;
2426	int ret = 0;
2427
2428	tree = &BTRFS_I(page->mapping->host)->io_tree;
 
 
2429
2430	if (tree->ops && tree->ops->writepage_end_io_hook) {
2431		ret = tree->ops->writepage_end_io_hook(page, start,
2432					       end, NULL, uptodate);
2433		if (ret)
2434			uptodate = 0;
2435	}
2436
2437	if (!uptodate) {
2438		ClearPageUptodate(page);
2439		SetPageError(page);
2440		ret = ret < 0 ? ret : -EIO;
2441		mapping_set_error(page->mapping, ret);
2442	}
2443}
2444
2445/*
2446 * after a writepage IO is done, we need to:
2447 * clear the uptodate bits on error
2448 * clear the writeback bits in the extent tree for this IO
2449 * end_page_writeback if the page has no more pending IO
2450 *
2451 * Scheduling is not allowed, so the extent state tree is expected
2452 * to have one and only one object corresponding to this IO.
2453 */
2454static void end_bio_extent_writepage(struct bio *bio)
2455{
 
 
2456	struct bio_vec *bvec;
2457	u64 start;
2458	u64 end;
2459	int i;
 
2460
2461	bio_for_each_segment_all(bvec, bio, i) {
 
2462		struct page *page = bvec->bv_page;
 
 
 
2463
2464		/* We always issue full-page reads, but if some block
2465		 * in a page fails to read, blk_update_request() will
2466		 * advance bv_offset and adjust bv_len to compensate.
2467		 * Print a warning for nonzero offsets, and an error
2468		 * if they don't add up to a full page.  */
2469		if (bvec->bv_offset || bvec->bv_len != PAGE_SIZE) {
2470			if (bvec->bv_offset + bvec->bv_len != PAGE_SIZE)
2471				btrfs_err(BTRFS_I(page->mapping->host)->root->fs_info,
2472				   "partial page write in btrfs with offset %u and length %u",
2473					bvec->bv_offset, bvec->bv_len);
2474			else
2475				btrfs_info(BTRFS_I(page->mapping->host)->root->fs_info,
2476				   "incomplete page write in btrfs with offset %u and "
2477				   "length %u",
2478					bvec->bv_offset, bvec->bv_len);
 
2479		}
2480
2481		start = page_offset(page);
2482		end = start + bvec->bv_offset + bvec->bv_len - 1;
2483
2484		end_extent_writepage(page, bio->bi_error, start, end);
2485		end_page_writeback(page);
2486	}
2487
2488	bio_put(bio);
2489}
2490
2491static void
2492endio_readpage_release_extent(struct extent_io_tree *tree, u64 start, u64 len,
2493			      int uptodate)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2494{
2495	struct extent_state *cached = NULL;
2496	u64 end = start + len - 1;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2497
2498	if (uptodate && tree->track_uptodate)
2499		set_extent_uptodate(tree, start, end, &cached, GFP_ATOMIC);
2500	unlock_extent_cached(tree, start, end, &cached, GFP_ATOMIC);
 
 
 
 
2501}
2502
2503/*
2504 * after a readpage IO is done, we need to:
2505 * clear the uptodate bits on error
2506 * set the uptodate bits if things worked
2507 * set the page up to date if all extents in the tree are uptodate
2508 * clear the lock bit in the extent tree
2509 * unlock the page if there are no other extents locked for it
2510 *
2511 * Scheduling is not allowed, so the extent state tree is expected
2512 * to have one and only one object corresponding to this IO.
2513 */
2514static void end_bio_extent_readpage(struct bio *bio)
2515{
 
2516	struct bio_vec *bvec;
2517	int uptodate = !bio->bi_error;
2518	struct btrfs_io_bio *io_bio = btrfs_io_bio(bio);
2519	struct extent_io_tree *tree;
2520	u64 offset = 0;
2521	u64 start;
2522	u64 end;
2523	u64 len;
2524	u64 extent_start = 0;
2525	u64 extent_len = 0;
2526	int mirror;
2527	int ret;
2528	int i;
2529
2530	bio_for_each_segment_all(bvec, bio, i) {
 
 
2531		struct page *page = bvec->bv_page;
2532		struct inode *inode = page->mapping->host;
 
 
 
 
 
 
 
 
 
 
 
 
2533
2534		pr_debug("end_bio_extent_readpage: bi_sector=%llu, err=%d, "
2535			 "mirror=%u\n", (u64)bio->bi_iter.bi_sector,
2536			 bio->bi_error, io_bio->mirror_num);
2537		tree = &BTRFS_I(inode)->io_tree;
2538
2539		/* We always issue full-page reads, but if some block
2540		 * in a page fails to read, blk_update_request() will
2541		 * advance bv_offset and adjust bv_len to compensate.
2542		 * Print a warning for nonzero offsets, and an error
2543		 * if they don't add up to a full page.  */
2544		if (bvec->bv_offset || bvec->bv_len != PAGE_SIZE) {
2545			if (bvec->bv_offset + bvec->bv_len != PAGE_SIZE)
2546				btrfs_err(BTRFS_I(page->mapping->host)->root->fs_info,
2547				   "partial page read in btrfs with offset %u and length %u",
2548					bvec->bv_offset, bvec->bv_len);
2549			else
2550				btrfs_info(BTRFS_I(page->mapping->host)->root->fs_info,
2551				   "incomplete page read in btrfs with offset %u and "
2552				   "length %u",
2553					bvec->bv_offset, bvec->bv_len);
2554		}
2555
2556		start = page_offset(page);
2557		end = start + bvec->bv_offset + bvec->bv_len - 1;
2558		len = bvec->bv_len;
2559
2560		mirror = io_bio->mirror_num;
2561		if (likely(uptodate && tree->ops &&
2562			   tree->ops->readpage_end_io_hook)) {
2563			ret = tree->ops->readpage_end_io_hook(io_bio, offset,
2564							      page, start, end,
2565							      mirror);
2566			if (ret)
2567				uptodate = 0;
2568			else
2569				clean_io_failure(inode, start, page, 0);
 
 
2570		}
2571
2572		if (likely(uptodate))
2573			goto readpage_ok;
 
 
 
2574
2575		if (tree->ops && tree->ops->readpage_io_failed_hook) {
2576			ret = tree->ops->readpage_io_failed_hook(page, mirror);
2577			if (!ret && !bio->bi_error)
2578				uptodate = 1;
2579		} else {
2580			/*
2581			 * The generic bio_readpage_error handles errors the
2582			 * following way: If possible, new read requests are
2583			 * created and submitted and will end up in
2584			 * end_bio_extent_readpage as well (if we're lucky, not
2585			 * in the !uptodate case). In that case it returns 0 and
2586			 * we just go on with the next page in our bio. If it
2587			 * can't handle the error it will return -EIO and we
2588			 * remain responsible for that page.
2589			 */
2590			ret = bio_readpage_error(bio, offset, page, start, end,
2591						 mirror);
2592			if (ret == 0) {
2593				uptodate = !bio->bi_error;
2594				offset += len;
2595				continue;
2596			}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2597		}
2598readpage_ok:
2599		if (likely(uptodate)) {
2600			loff_t i_size = i_size_read(inode);
2601			pgoff_t end_index = i_size >> PAGE_SHIFT;
2602			unsigned off;
2603
2604			/* Zero out the end if this page straddles i_size */
2605			off = i_size & (PAGE_SIZE-1);
2606			if (page->index == end_index && off)
2607				zero_user_segment(page, off, PAGE_SIZE);
2608			SetPageUptodate(page);
 
 
2609		} else {
2610			ClearPageUptodate(page);
2611			SetPageError(page);
 
 
2612		}
2613		unlock_page(page);
2614		offset += len;
2615
2616		if (unlikely(!uptodate)) {
2617			if (extent_len) {
2618				endio_readpage_release_extent(tree,
2619							      extent_start,
2620							      extent_len, 1);
2621				extent_start = 0;
2622				extent_len = 0;
2623			}
2624			endio_readpage_release_extent(tree, start,
2625						      end - start + 1, 0);
2626		} else if (!extent_len) {
2627			extent_start = start;
2628			extent_len = end + 1 - start;
2629		} else if (extent_start + extent_len == start) {
2630			extent_len += end + 1 - start;
2631		} else {
2632			endio_readpage_release_extent(tree, extent_start,
2633						      extent_len, uptodate);
2634			extent_start = start;
2635			extent_len = end + 1 - start;
2636		}
2637	}
2638
2639	if (extent_len)
2640		endio_readpage_release_extent(tree, extent_start, extent_len,
2641					      uptodate);
2642	if (io_bio->end_io)
2643		io_bio->end_io(io_bio, bio->bi_error);
2644	bio_put(bio);
2645}
2646
2647/*
2648 * this allocates from the btrfs_bioset.  We're returning a bio right now
2649 * but you can call btrfs_io_bio for the appropriate container_of magic
 
 
 
 
 
 
 
2650 */
2651struct bio *
2652btrfs_bio_alloc(struct block_device *bdev, u64 first_sector, int nr_vecs,
2653		gfp_t gfp_flags)
2654{
2655	struct btrfs_io_bio *btrfs_bio;
2656	struct bio *bio;
 
 
 
 
2657
2658	bio = bio_alloc_bioset(gfp_flags, nr_vecs, btrfs_bioset);
 
2659
2660	if (bio == NULL && (current->flags & PF_MEMALLOC)) {
2661		while (!bio && (nr_vecs /= 2)) {
2662			bio = bio_alloc_bioset(gfp_flags,
2663					       nr_vecs, btrfs_bioset);
2664		}
2665	}
 
2666
2667	if (bio) {
2668		bio->bi_bdev = bdev;
2669		bio->bi_iter.bi_sector = first_sector;
2670		btrfs_bio = btrfs_io_bio(bio);
2671		btrfs_bio->csum = NULL;
2672		btrfs_bio->csum_allocated = NULL;
2673		btrfs_bio->end_io = NULL;
2674	}
2675	return bio;
2676}
2677
2678struct bio *btrfs_bio_clone(struct bio *bio, gfp_t gfp_mask)
2679{
2680	struct btrfs_io_bio *btrfs_bio;
2681	struct bio *new;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2682
2683	new = bio_clone_bioset(bio, gfp_mask, btrfs_bioset);
2684	if (new) {
2685		btrfs_bio = btrfs_io_bio(new);
2686		btrfs_bio->csum = NULL;
2687		btrfs_bio->csum_allocated = NULL;
2688		btrfs_bio->end_io = NULL;
2689
2690#ifdef CONFIG_BLK_CGROUP
2691		/* FIXME, put this into bio_clone_bioset */
2692		if (bio->bi_css)
2693			bio_associate_blkcg(new, bio->bi_css);
2694#endif
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2695	}
2696	return new;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2697}
2698
2699/* this also allocates from the btrfs_bioset */
2700struct bio *btrfs_io_bio_alloc(gfp_t gfp_mask, unsigned int nr_iovecs)
2701{
2702	struct btrfs_io_bio *btrfs_bio;
2703	struct bio *bio;
 
 
 
 
2704
2705	bio = bio_alloc_bioset(gfp_mask, nr_iovecs, btrfs_bioset);
2706	if (bio) {
2707		btrfs_bio = btrfs_io_bio(bio);
2708		btrfs_bio->csum = NULL;
2709		btrfs_bio->csum_allocated = NULL;
2710		btrfs_bio->end_io = NULL;
 
 
 
 
 
2711	}
2712	return bio;
2713}
 
 
 
 
 
 
 
 
 
 
 
2714
 
 
 
 
2715
2716static int __must_check submit_one_bio(int rw, struct bio *bio,
2717				       int mirror_num, unsigned long bio_flags)
2718{
2719	int ret = 0;
2720	struct bio_vec *bvec = bio->bi_io_vec + bio->bi_vcnt - 1;
2721	struct page *page = bvec->bv_page;
2722	struct extent_io_tree *tree = bio->bi_private;
2723	u64 start;
2724
2725	start = page_offset(page) + bvec->bv_offset;
 
 
 
 
2726
2727	bio->bi_private = NULL;
 
 
 
 
 
 
 
 
 
2728
2729	bio_get(bio);
2730
2731	if (tree->ops && tree->ops->submit_bio_hook)
2732		ret = tree->ops->submit_bio_hook(page->mapping->host, rw, bio,
2733					   mirror_num, bio_flags, start);
 
 
 
 
2734	else
2735		btrfsic_submit_bio(rw, bio);
 
 
 
 
 
2736
2737	bio_put(bio);
2738	return ret;
2739}
 
 
 
 
 
 
 
 
 
 
 
 
2740
2741static int merge_bio(int rw, struct extent_io_tree *tree, struct page *page,
2742		     unsigned long offset, size_t size, struct bio *bio,
2743		     unsigned long bio_flags)
2744{
2745	int ret = 0;
2746	if (tree->ops && tree->ops->merge_bio_hook)
2747		ret = tree->ops->merge_bio_hook(rw, page, offset, size, bio,
2748						bio_flags);
2749	BUG_ON(ret < 0);
 
 
 
 
 
 
 
 
 
 
2750	return ret;
2751
2752}
2753
2754static int submit_extent_page(int rw, struct extent_io_tree *tree,
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2755			      struct writeback_control *wbc,
2756			      struct page *page, sector_t sector,
2757			      size_t size, unsigned long offset,
2758			      struct block_device *bdev,
2759			      struct bio **bio_ret,
2760			      unsigned long max_pages,
2761			      bio_end_io_t end_io_func,
2762			      int mirror_num,
2763			      unsigned long prev_bio_flags,
2764			      unsigned long bio_flags,
2765			      bool force_bio_submit)
2766{
2767	int ret = 0;
2768	struct bio *bio;
2769	int contig = 0;
2770	int old_compressed = prev_bio_flags & EXTENT_BIO_COMPRESSED;
2771	size_t page_size = min_t(size_t, size, PAGE_SIZE);
2772
2773	if (bio_ret && *bio_ret) {
2774		bio = *bio_ret;
2775		if (old_compressed)
2776			contig = bio->bi_iter.bi_sector == sector;
2777		else
2778			contig = bio_end_sector(bio) == sector;
 
 
 
 
 
2779
2780		if (prev_bio_flags != bio_flags || !contig ||
2781		    force_bio_submit ||
2782		    merge_bio(rw, tree, page, offset, page_size, bio, bio_flags) ||
2783		    bio_add_page(bio, page, page_size, offset) < page_size) {
2784			ret = submit_one_bio(rw, bio, mirror_num,
2785					     prev_bio_flags);
2786			if (ret < 0) {
2787				*bio_ret = NULL;
2788				return ret;
2789			}
2790			bio = NULL;
2791		} else {
2792			if (wbc)
2793				wbc_account_io(wbc, page, page_size);
2794			return 0;
2795		}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2796	}
 
 
2797
2798	bio = btrfs_bio_alloc(bdev, sector, BIO_MAX_PAGES,
2799			GFP_NOFS | __GFP_HIGH);
2800	if (!bio)
2801		return -ENOMEM;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2802
2803	bio_add_page(bio, page, page_size, offset);
2804	bio->bi_end_io = end_io_func;
2805	bio->bi_private = tree;
2806	if (wbc) {
2807		wbc_init_bio(wbc, bio);
2808		wbc_account_io(wbc, page, page_size);
2809	}
2810
2811	if (bio_ret)
2812		*bio_ret = bio;
 
2813	else
2814		ret = submit_one_bio(rw, bio, mirror_num, bio_flags);
2815
 
2816	return ret;
2817}
2818
2819static void attach_extent_buffer_page(struct extent_buffer *eb,
2820				      struct page *page)
2821{
2822	if (!PagePrivate(page)) {
2823		SetPagePrivate(page);
2824		get_page(page);
2825		set_page_private(page, (unsigned long)eb);
2826	} else {
2827		WARN_ON(page->private != (unsigned long)eb);
2828	}
 
 
 
 
 
 
 
2829}
2830
2831void set_page_extent_mapped(struct page *page)
2832{
2833	if (!PagePrivate(page)) {
2834		SetPagePrivate(page);
2835		get_page(page);
2836		set_page_private(page, EXTENT_PAGE_PRIVATE);
2837	}
 
 
 
 
 
 
 
2838}
2839
2840static struct extent_map *
2841__get_extent_map(struct inode *inode, struct page *page, size_t pg_offset,
2842		 u64 start, u64 len, get_extent_t *get_extent,
2843		 struct extent_map **em_cached)
2844{
2845	struct extent_map *em;
2846
2847	if (em_cached && *em_cached) {
2848		em = *em_cached;
2849		if (extent_map_in_tree(em) && start >= em->start &&
2850		    start < extent_map_end(em)) {
2851			atomic_inc(&em->refs);
2852			return em;
2853		}
2854
2855		free_extent_map(em);
2856		*em_cached = NULL;
2857	}
2858
2859	em = get_extent(inode, page, pg_offset, start, len, 0);
2860	if (em_cached && !IS_ERR_OR_NULL(em)) {
2861		BUG_ON(*em_cached);
2862		atomic_inc(&em->refs);
2863		*em_cached = em;
2864	}
2865	return em;
2866}
2867/*
2868 * basic readpage implementation.  Locked extent state structs are inserted
2869 * into the tree that are removed when the IO is done (by the end_io
2870 * handlers)
2871 * XXX JDM: This needs looking at to ensure proper page locking
 
2872 */
2873static int __do_readpage(struct extent_io_tree *tree,
2874			 struct page *page,
2875			 get_extent_t *get_extent,
2876			 struct extent_map **em_cached,
2877			 struct bio **bio, int mirror_num,
2878			 unsigned long *bio_flags, int rw,
2879			 u64 *prev_em_start)
2880{
2881	struct inode *inode = page->mapping->host;
 
2882	u64 start = page_offset(page);
2883	u64 page_end = start + PAGE_SIZE - 1;
2884	u64 end;
2885	u64 cur = start;
2886	u64 extent_offset;
2887	u64 last_byte = i_size_read(inode);
2888	u64 block_start;
2889	u64 cur_end;
2890	sector_t sector;
2891	struct extent_map *em;
2892	struct block_device *bdev;
2893	int ret;
2894	int nr = 0;
2895	size_t pg_offset = 0;
2896	size_t iosize;
2897	size_t disk_io_size;
2898	size_t blocksize = inode->i_sb->s_blocksize;
2899	unsigned long this_bio_flag = 0;
2900
2901	set_page_extent_mapped(page);
2902
2903	end = page_end;
2904	if (!PageUptodate(page)) {
2905		if (cleancache_get_page(page) == 0) {
2906			BUG_ON(blocksize != PAGE_SIZE);
2907			unlock_extent(tree, start, end);
2908			goto out;
2909		}
2910	}
2911
2912	if (page->index == last_byte >> PAGE_SHIFT) {
2913		char *userpage;
2914		size_t zero_offset = last_byte & (PAGE_SIZE - 1);
2915
2916		if (zero_offset) {
2917			iosize = PAGE_SIZE - zero_offset;
2918			userpage = kmap_atomic(page);
2919			memset(userpage + zero_offset, 0, iosize);
2920			flush_dcache_page(page);
2921			kunmap_atomic(userpage);
2922		}
2923	}
 
 
2924	while (cur <= end) {
2925		unsigned long pnr = (last_byte >> PAGE_SHIFT) + 1;
2926		bool force_bio_submit = false;
 
2927
 
2928		if (cur >= last_byte) {
2929			char *userpage;
2930			struct extent_state *cached = NULL;
2931
2932			iosize = PAGE_SIZE - pg_offset;
2933			userpage = kmap_atomic(page);
2934			memset(userpage + pg_offset, 0, iosize);
2935			flush_dcache_page(page);
2936			kunmap_atomic(userpage);
2937			set_extent_uptodate(tree, cur, cur + iosize - 1,
2938					    &cached, GFP_NOFS);
2939			unlock_extent_cached(tree, cur,
2940					     cur + iosize - 1,
2941					     &cached, GFP_NOFS);
2942			break;
2943		}
2944		em = __get_extent_map(inode, page, pg_offset, cur,
2945				      end - cur + 1, get_extent, em_cached);
2946		if (IS_ERR_OR_NULL(em)) {
2947			SetPageError(page);
2948			unlock_extent(tree, cur, end);
 
2949			break;
2950		}
2951		extent_offset = cur - em->start;
2952		BUG_ON(extent_map_end(em) <= cur);
2953		BUG_ON(end < cur);
2954
2955		if (test_bit(EXTENT_FLAG_COMPRESSED, &em->flags)) {
2956			this_bio_flag |= EXTENT_BIO_COMPRESSED;
2957			extent_set_compress_type(&this_bio_flag,
2958						 em->compress_type);
2959		}
2960
2961		iosize = min(extent_map_end(em) - cur, end - cur + 1);
2962		cur_end = min(extent_map_end(em) - 1, end);
2963		iosize = ALIGN(iosize, blocksize);
2964		if (this_bio_flag & EXTENT_BIO_COMPRESSED) {
2965			disk_io_size = em->block_len;
2966			sector = em->block_start >> 9;
2967		} else {
2968			sector = (em->block_start + extent_offset) >> 9;
2969			disk_io_size = iosize;
2970		}
2971		bdev = em->bdev;
2972		block_start = em->block_start;
2973		if (test_bit(EXTENT_FLAG_PREALLOC, &em->flags))
2974			block_start = EXTENT_MAP_HOLE;
2975
2976		/*
2977		 * If we have a file range that points to a compressed extent
2978		 * and it's followed by a consecutive file range that points to
2979		 * to the same compressed extent (possibly with a different
2980		 * offset and/or length, so it either points to the whole extent
2981		 * or only part of it), we must make sure we do not submit a
2982		 * single bio to populate the pages for the 2 ranges because
2983		 * this makes the compressed extent read zero out the pages
2984		 * belonging to the 2nd range. Imagine the following scenario:
2985		 *
2986		 *  File layout
2987		 *  [0 - 8K]                     [8K - 24K]
2988		 *    |                               |
2989		 *    |                               |
2990		 * points to extent X,         points to extent X,
2991		 * offset 4K, length of 8K     offset 0, length 16K
2992		 *
2993		 * [extent X, compressed length = 4K uncompressed length = 16K]
2994		 *
2995		 * If the bio to read the compressed extent covers both ranges,
2996		 * it will decompress extent X into the pages belonging to the
2997		 * first range and then it will stop, zeroing out the remaining
2998		 * pages that belong to the other range that points to extent X.
2999		 * So here we make sure we submit 2 bios, one for the first
3000		 * range and another one for the third range. Both will target
3001		 * the same physical extent from disk, but we can't currently
3002		 * make the compressed bio endio callback populate the pages
3003		 * for both ranges because each compressed bio is tightly
3004		 * coupled with a single extent map, and each range can have
3005		 * an extent map with a different offset value relative to the
3006		 * uncompressed data of our extent and different lengths. This
3007		 * is a corner case so we prioritize correctness over
3008		 * non-optimal behavior (submitting 2 bios for the same extent).
3009		 */
3010		if (test_bit(EXTENT_FLAG_COMPRESSED, &em->flags) &&
3011		    prev_em_start && *prev_em_start != (u64)-1 &&
3012		    *prev_em_start != em->orig_start)
3013			force_bio_submit = true;
3014
3015		if (prev_em_start)
3016			*prev_em_start = em->orig_start;
3017
3018		free_extent_map(em);
3019		em = NULL;
3020
3021		/* we've found a hole, just zero and go on */
3022		if (block_start == EXTENT_MAP_HOLE) {
3023			char *userpage;
3024			struct extent_state *cached = NULL;
3025
3026			userpage = kmap_atomic(page);
3027			memset(userpage + pg_offset, 0, iosize);
3028			flush_dcache_page(page);
3029			kunmap_atomic(userpage);
3030
3031			set_extent_uptodate(tree, cur, cur + iosize - 1,
3032					    &cached, GFP_NOFS);
3033			unlock_extent_cached(tree, cur,
3034					     cur + iosize - 1,
3035					     &cached, GFP_NOFS);
3036			cur = cur + iosize;
3037			pg_offset += iosize;
3038			continue;
3039		}
3040		/* the get_extent function already copied into the page */
3041		if (test_range_bit(tree, cur, cur_end,
3042				   EXTENT_UPTODATE, 1, NULL)) {
3043			check_page_uptodate(tree, page);
3044			unlock_extent(tree, cur, cur + iosize - 1);
3045			cur = cur + iosize;
3046			pg_offset += iosize;
3047			continue;
3048		}
3049		/* we have an inline extent but it didn't get marked up
3050		 * to date.  Error out
3051		 */
3052		if (block_start == EXTENT_MAP_INLINE) {
3053			SetPageError(page);
3054			unlock_extent(tree, cur, cur + iosize - 1);
3055			cur = cur + iosize;
3056			pg_offset += iosize;
3057			continue;
3058		}
3059
3060		pnr -= page->index;
3061		ret = submit_extent_page(rw, tree, NULL, page,
3062					 sector, disk_io_size, pg_offset,
3063					 bdev, bio, pnr,
3064					 end_bio_extent_readpage, mirror_num,
3065					 *bio_flags,
3066					 this_bio_flag,
3067					 force_bio_submit);
3068		if (!ret) {
3069			nr++;
3070			*bio_flags = this_bio_flag;
3071		} else {
3072			SetPageError(page);
3073			unlock_extent(tree, cur, cur + iosize - 1);
 
 
3074		}
3075		cur = cur + iosize;
3076		pg_offset += iosize;
3077	}
3078out:
3079	if (!nr) {
3080		if (!PageError(page))
3081			SetPageUptodate(page);
3082		unlock_page(page);
3083	}
3084	return 0;
3085}
3086
3087static inline void __do_contiguous_readpages(struct extent_io_tree *tree,
3088					     struct page *pages[], int nr_pages,
3089					     u64 start, u64 end,
3090					     get_extent_t *get_extent,
3091					     struct extent_map **em_cached,
3092					     struct bio **bio, int mirror_num,
3093					     unsigned long *bio_flags, int rw,
3094					     u64 *prev_em_start)
3095{
3096	struct inode *inode;
3097	struct btrfs_ordered_extent *ordered;
3098	int index;
3099
3100	inode = pages[0]->mapping->host;
3101	while (1) {
3102		lock_extent(tree, start, end);
3103		ordered = btrfs_lookup_ordered_range(inode, start,
3104						     end - start + 1);
3105		if (!ordered)
3106			break;
3107		unlock_extent(tree, start, end);
3108		btrfs_start_ordered_extent(inode, ordered, 1);
3109		btrfs_put_ordered_extent(ordered);
3110	}
3111
3112	for (index = 0; index < nr_pages; index++) {
3113		__do_readpage(tree, pages[index], get_extent, em_cached, bio,
3114			      mirror_num, bio_flags, rw, prev_em_start);
3115		put_page(pages[index]);
3116	}
3117}
3118
3119static void __extent_readpages(struct extent_io_tree *tree,
3120			       struct page *pages[],
3121			       int nr_pages, get_extent_t *get_extent,
3122			       struct extent_map **em_cached,
3123			       struct bio **bio, int mirror_num,
3124			       unsigned long *bio_flags, int rw,
3125			       u64 *prev_em_start)
3126{
3127	u64 start = 0;
3128	u64 end = 0;
3129	u64 page_start;
3130	int index;
3131	int first_index = 0;
3132
3133	for (index = 0; index < nr_pages; index++) {
3134		page_start = page_offset(pages[index]);
3135		if (!end) {
3136			start = page_start;
3137			end = start + PAGE_SIZE - 1;
3138			first_index = index;
3139		} else if (end + 1 == page_start) {
3140			end += PAGE_SIZE;
3141		} else {
3142			__do_contiguous_readpages(tree, &pages[first_index],
3143						  index - first_index, start,
3144						  end, get_extent, em_cached,
3145						  bio, mirror_num, bio_flags,
3146						  rw, prev_em_start);
3147			start = page_start;
3148			end = start + PAGE_SIZE - 1;
3149			first_index = index;
3150		}
3151	}
3152
3153	if (end)
3154		__do_contiguous_readpages(tree, &pages[first_index],
3155					  index - first_index, start,
3156					  end, get_extent, em_cached, bio,
3157					  mirror_num, bio_flags, rw,
3158					  prev_em_start);
3159}
3160
3161static int __extent_read_full_page(struct extent_io_tree *tree,
3162				   struct page *page,
3163				   get_extent_t *get_extent,
3164				   struct bio **bio, int mirror_num,
3165				   unsigned long *bio_flags, int rw)
3166{
3167	struct inode *inode = page->mapping->host;
3168	struct btrfs_ordered_extent *ordered;
3169	u64 start = page_offset(page);
3170	u64 end = start + PAGE_SIZE - 1;
 
3171	int ret;
3172
3173	while (1) {
3174		lock_extent(tree, start, end);
3175		ordered = btrfs_lookup_ordered_range(inode, start,
3176						PAGE_SIZE);
3177		if (!ordered)
3178			break;
3179		unlock_extent(tree, start, end);
3180		btrfs_start_ordered_extent(inode, ordered, 1);
3181		btrfs_put_ordered_extent(ordered);
3182	}
3183
3184	ret = __do_readpage(tree, page, get_extent, NULL, bio, mirror_num,
3185			    bio_flags, rw, NULL);
 
 
 
 
3186	return ret;
3187}
3188
3189int extent_read_full_page(struct extent_io_tree *tree, struct page *page,
3190			    get_extent_t *get_extent, int mirror_num)
 
 
 
3191{
3192	struct bio *bio = NULL;
3193	unsigned long bio_flags = 0;
3194	int ret;
3195
3196	ret = __extent_read_full_page(tree, page, get_extent, &bio, mirror_num,
3197				      &bio_flags, READ);
3198	if (bio)
3199		ret = submit_one_bio(READ, bio, mirror_num, bio_flags);
3200	return ret;
3201}
3202
3203static noinline void update_nr_written(struct page *page,
3204				      struct writeback_control *wbc,
3205				      unsigned long nr_written)
3206{
3207	wbc->nr_to_write -= nr_written;
3208	if (wbc->range_cyclic || (wbc->nr_to_write > 0 &&
3209	    wbc->range_start == 0 && wbc->range_end == LLONG_MAX))
3210		page->mapping->writeback_index = page->index + nr_written;
3211}
3212
3213/*
3214 * helper for __extent_writepage, doing all of the delayed allocation setup.
3215 *
3216 * This returns 1 if our fill_delalloc function did all the work required
3217 * to write the page (copy into inline extent).  In this case the IO has
3218 * been started and the page is already unlocked.
3219 *
3220 * This returns 0 if all went well (page still locked)
3221 * This returns < 0 if there were errors (page still locked)
3222 */
3223static noinline_for_stack int writepage_delalloc(struct inode *inode,
3224			      struct page *page, struct writeback_control *wbc,
3225			      struct extent_page_data *epd,
3226			      u64 delalloc_start,
3227			      unsigned long *nr_written)
3228{
3229	struct extent_io_tree *tree = epd->tree;
3230	u64 page_end = delalloc_start + PAGE_SIZE - 1;
3231	u64 nr_delalloc;
3232	u64 delalloc_to_write = 0;
3233	u64 delalloc_end = 0;
 
3234	int ret;
3235	int page_started = 0;
3236
3237	if (epd->extent_locked || !tree->ops || !tree->ops->fill_delalloc)
3238		return 0;
 
3239
3240	while (delalloc_end < page_end) {
3241		nr_delalloc = find_lock_delalloc_range(inode, tree,
3242					       page,
3243					       &delalloc_start,
3244					       &delalloc_end,
3245					       BTRFS_MAX_EXTENT_SIZE);
3246		if (nr_delalloc == 0) {
3247			delalloc_start = delalloc_end + 1;
3248			continue;
3249		}
3250		ret = tree->ops->fill_delalloc(inode, page,
3251					       delalloc_start,
3252					       delalloc_end,
3253					       &page_started,
3254					       nr_written);
3255		/* File system has been set read-only */
3256		if (ret) {
3257			SetPageError(page);
3258			/* fill_delalloc should be return < 0 for error
3259			 * but just in case, we use > 0 here meaning the
3260			 * IO is started, so we don't want to return > 0
3261			 * unless things are going well.
3262			 */
3263			ret = ret < 0 ? ret : -EIO;
3264			goto done;
3265		}
3266		/*
3267		 * delalloc_end is already one less than the total length, so
3268		 * we don't subtract one from PAGE_SIZE
3269		 */
3270		delalloc_to_write += (delalloc_end - delalloc_start +
3271				      PAGE_SIZE) >> PAGE_SHIFT;
3272		delalloc_start = delalloc_end + 1;
3273	}
3274	if (wbc->nr_to_write < delalloc_to_write) {
3275		int thresh = 8192;
3276
3277		if (delalloc_to_write < thresh * 2)
3278			thresh = delalloc_to_write;
3279		wbc->nr_to_write = min_t(u64, delalloc_to_write,
3280					 thresh);
3281	}
3282
3283	/* did the fill delalloc function already unlock and start
3284	 * the IO?
3285	 */
3286	if (page_started) {
3287		/*
3288		 * we've unlocked the page, so we can't update
3289		 * the mapping's writeback index, just update
3290		 * nr_to_write.
3291		 */
3292		wbc->nr_to_write -= *nr_written;
3293		return 1;
3294	}
3295
3296	ret = 0;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3297
3298done:
3299	return ret;
3300}
3301
3302/*
3303 * helper for __extent_writepage.  This calls the writepage start hooks,
3304 * and does the loop to map the page into extents and bios.
3305 *
3306 * We return 1 if the IO is started and the page is unlocked,
3307 * 0 if all went well (page still locked)
3308 * < 0 if there were errors (page still locked)
3309 */
3310static noinline_for_stack int __extent_writepage_io(struct inode *inode,
3311				 struct page *page,
3312				 struct writeback_control *wbc,
3313				 struct extent_page_data *epd,
3314				 loff_t i_size,
3315				 unsigned long nr_written,
3316				 int write_flags, int *nr_ret)
3317{
3318	struct extent_io_tree *tree = epd->tree;
3319	u64 start = page_offset(page);
3320	u64 page_end = start + PAGE_SIZE - 1;
3321	u64 end;
3322	u64 cur = start;
3323	u64 extent_offset;
3324	u64 block_start;
3325	u64 iosize;
3326	sector_t sector;
3327	struct extent_state *cached_state = NULL;
3328	struct extent_map *em;
3329	struct block_device *bdev;
3330	size_t pg_offset = 0;
3331	size_t blocksize;
3332	int ret = 0;
3333	int nr = 0;
 
 
 
3334	bool compressed;
3335
3336	if (tree->ops && tree->ops->writepage_start_hook) {
3337		ret = tree->ops->writepage_start_hook(page, start,
3338						      page_end);
3339		if (ret) {
3340			/* Fixup worker will requeue */
3341			if (ret == -EBUSY)
3342				wbc->pages_skipped++;
3343			else
3344				redirty_page_for_writepage(wbc, page);
3345
3346			update_nr_written(page, wbc, nr_written);
3347			unlock_page(page);
3348			ret = 1;
3349			goto done_unlocked;
3350		}
3351	}
3352
3353	/*
3354	 * we don't want to touch the inode after unlocking the page,
3355	 * so we update the mapping writeback index now
3356	 */
3357	update_nr_written(page, wbc, nr_written + 1);
3358
3359	end = page_end;
3360	if (i_size <= start) {
3361		if (tree->ops && tree->ops->writepage_end_io_hook)
3362			tree->ops->writepage_end_io_hook(page, start,
3363							 page_end, NULL, 1);
3364		goto done;
3365	}
3366
3367	blocksize = inode->i_sb->s_blocksize;
3368
 
3369	while (cur <= end) {
 
3370		u64 em_end;
 
 
 
 
3371		if (cur >= i_size) {
3372			if (tree->ops && tree->ops->writepage_end_io_hook)
3373				tree->ops->writepage_end_io_hook(page, cur,
3374							 page_end, NULL, 1);
 
 
 
 
 
 
 
 
3375			break;
3376		}
3377		em = epd->get_extent(inode, page, pg_offset, cur,
3378				     end - cur + 1, 1);
3379		if (IS_ERR_OR_NULL(em)) {
3380			SetPageError(page);
 
 
 
 
 
 
 
3381			ret = PTR_ERR_OR_ZERO(em);
 
 
 
3382			break;
3383		}
3384
3385		extent_offset = cur - em->start;
3386		em_end = extent_map_end(em);
3387		BUG_ON(em_end <= cur);
3388		BUG_ON(end < cur);
3389		iosize = min(em_end - cur, end - cur + 1);
3390		iosize = ALIGN(iosize, blocksize);
3391		sector = (em->block_start + extent_offset) >> 9;
3392		bdev = em->bdev;
3393		block_start = em->block_start;
3394		compressed = test_bit(EXTENT_FLAG_COMPRESSED, &em->flags);
 
 
 
 
 
 
 
 
 
 
 
3395		free_extent_map(em);
3396		em = NULL;
3397
3398		/*
3399		 * compressed and inline extents are written through other
3400		 * paths in the FS
3401		 */
3402		if (compressed || block_start == EXTENT_MAP_HOLE ||
3403		    block_start == EXTENT_MAP_INLINE) {
3404			/*
3405			 * end_io notification does not happen here for
3406			 * compressed extents
3407			 */
3408			if (!compressed && tree->ops &&
3409			    tree->ops->writepage_end_io_hook)
3410				tree->ops->writepage_end_io_hook(page, cur,
3411							 cur + iosize - 1,
3412							 NULL, 1);
3413			else if (compressed) {
3414				/* we don't want to end_page_writeback on
3415				 * a compressed extent.  this happens
3416				 * elsewhere
3417				 */
3418				nr++;
3419			}
3420
 
 
3421			cur += iosize;
3422			pg_offset += iosize;
3423			continue;
3424		}
3425
3426		if (tree->ops && tree->ops->writepage_io_hook) {
3427			ret = tree->ops->writepage_io_hook(page, cur,
3428						cur + iosize - 1);
3429		} else {
3430			ret = 0;
3431		}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3432		if (ret) {
3433			SetPageError(page);
3434		} else {
3435			unsigned long max_nr = (i_size >> PAGE_SHIFT) + 1;
3436
3437			set_range_writeback(tree, cur, cur + iosize - 1);
3438			if (!PageWriteback(page)) {
3439				btrfs_err(BTRFS_I(inode)->root->fs_info,
3440					   "page %lu not writeback, cur %llu end %llu",
3441				       page->index, cur, end);
3442			}
3443
3444			ret = submit_extent_page(write_flags, tree, wbc, page,
3445						 sector, iosize, pg_offset,
3446						 bdev, &epd->bio, max_nr,
3447						 end_bio_extent_writepage,
3448						 0, 0, 0, false);
3449			if (ret)
3450				SetPageError(page);
3451		}
3452		cur = cur + iosize;
3453		pg_offset += iosize;
3454		nr++;
3455	}
3456done:
 
 
 
 
 
 
 
3457	*nr_ret = nr;
3458
3459done_unlocked:
3460
3461	/* drop our reference on any cached states */
3462	free_extent_state(cached_state);
3463	return ret;
3464}
3465
3466/*
3467 * the writepage semantics are similar to regular writepage.  extent
3468 * records are inserted to lock ranges in the tree, and as dirty areas
3469 * are found, they are marked writeback.  Then the lock bits are removed
3470 * and the end_io handler clears the writeback ranges
 
 
 
3471 */
3472static int __extent_writepage(struct page *page, struct writeback_control *wbc,
3473			      void *data)
3474{
 
3475	struct inode *inode = page->mapping->host;
3476	struct extent_page_data *epd = data;
3477	u64 start = page_offset(page);
3478	u64 page_end = start + PAGE_SIZE - 1;
3479	int ret;
3480	int nr = 0;
3481	size_t pg_offset = 0;
3482	loff_t i_size = i_size_read(inode);
3483	unsigned long end_index = i_size >> PAGE_SHIFT;
3484	int write_flags;
3485	unsigned long nr_written = 0;
3486
3487	if (wbc->sync_mode == WB_SYNC_ALL)
3488		write_flags = WRITE_SYNC;
3489	else
3490		write_flags = WRITE;
3491
3492	trace___extent_writepage(page, inode, wbc);
3493
3494	WARN_ON(!PageLocked(page));
3495
3496	ClearPageError(page);
 
3497
3498	pg_offset = i_size & (PAGE_SIZE - 1);
3499	if (page->index > end_index ||
3500	   (page->index == end_index && !pg_offset)) {
3501		page->mapping->a_ops->invalidatepage(page, 0, PAGE_SIZE);
3502		unlock_page(page);
3503		return 0;
3504	}
3505
3506	if (page->index == end_index) {
3507		char *userpage;
3508
3509		userpage = kmap_atomic(page);
3510		memset(userpage + pg_offset, 0,
3511		       PAGE_SIZE - pg_offset);
3512		kunmap_atomic(userpage);
3513		flush_dcache_page(page);
3514	}
3515
3516	pg_offset = 0;
3517
3518	set_page_extent_mapped(page);
 
 
 
 
3519
3520	ret = writepage_delalloc(inode, page, wbc, epd, start, &nr_written);
 
3521	if (ret == 1)
3522		goto done_unlocked;
3523	if (ret)
3524		goto done;
3525
3526	ret = __extent_writepage_io(inode, page, wbc, epd,
3527				    i_size, nr_written, write_flags, &nr);
3528	if (ret == 1)
3529		goto done_unlocked;
3530
3531done:
3532	if (nr == 0) {
3533		/* make sure the mapping tag for page dirty gets cleared */
3534		set_page_writeback(page);
3535		end_page_writeback(page);
3536	}
3537	if (PageError(page)) {
3538		ret = ret < 0 ? ret : -EIO;
3539		end_extent_writepage(page, ret, start, page_end);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3540	}
3541	unlock_page(page);
3542	return ret;
3543
3544done_unlocked:
3545	return 0;
3546}
3547
3548void wait_on_extent_buffer_writeback(struct extent_buffer *eb)
3549{
3550	wait_on_bit_io(&eb->bflags, EXTENT_BUFFER_WRITEBACK,
3551		       TASK_UNINTERRUPTIBLE);
3552}
3553
3554static noinline_for_stack int
3555lock_extent_buffer_for_io(struct extent_buffer *eb,
3556			  struct btrfs_fs_info *fs_info,
3557			  struct extent_page_data *epd)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3558{
3559	unsigned long i, num_pages;
 
3560	int flush = 0;
3561	int ret = 0;
3562
3563	if (!btrfs_try_tree_write_lock(eb)) {
 
3564		flush = 1;
3565		flush_write_bio(epd);
3566		btrfs_tree_lock(eb);
3567	}
3568
3569	if (test_bit(EXTENT_BUFFER_WRITEBACK, &eb->bflags)) {
3570		btrfs_tree_unlock(eb);
3571		if (!epd->sync_io)
3572			return 0;
3573		if (!flush) {
3574			flush_write_bio(epd);
3575			flush = 1;
3576		}
3577		while (1) {
3578			wait_on_extent_buffer_writeback(eb);
3579			btrfs_tree_lock(eb);
3580			if (!test_bit(EXTENT_BUFFER_WRITEBACK, &eb->bflags))
3581				break;
3582			btrfs_tree_unlock(eb);
3583		}
3584	}
3585
3586	/*
3587	 * We need to do this to prevent races in people who check if the eb is
3588	 * under IO since we can end up having no IO bits set for a short period
3589	 * of time.
3590	 */
3591	spin_lock(&eb->refs_lock);
3592	if (test_and_clear_bit(EXTENT_BUFFER_DIRTY, &eb->bflags)) {
3593		set_bit(EXTENT_BUFFER_WRITEBACK, &eb->bflags);
3594		spin_unlock(&eb->refs_lock);
3595		btrfs_set_header_flag(eb, BTRFS_HEADER_FLAG_WRITTEN);
3596		__percpu_counter_add(&fs_info->dirty_metadata_bytes,
3597				     -eb->len,
3598				     fs_info->dirty_metadata_batch);
3599		ret = 1;
3600	} else {
3601		spin_unlock(&eb->refs_lock);
3602	}
3603
3604	btrfs_tree_unlock(eb);
3605
3606	if (!ret)
 
 
 
 
 
 
3607		return ret;
3608
3609	num_pages = num_extent_pages(eb->start, eb->len);
3610	for (i = 0; i < num_pages; i++) {
3611		struct page *p = eb->pages[i];
3612
3613		if (!trylock_page(p)) {
3614			if (!flush) {
3615				flush_write_bio(epd);
3616				flush = 1;
3617			}
3618			lock_page(p);
3619		}
3620	}
3621
3622	return ret;
3623}
3624
3625static void end_extent_buffer_writeback(struct extent_buffer *eb)
3626{
3627	clear_bit(EXTENT_BUFFER_WRITEBACK, &eb->bflags);
3628	smp_mb__after_atomic();
3629	wake_up_bit(&eb->bflags, EXTENT_BUFFER_WRITEBACK);
3630}
3631
3632static void set_btree_ioerr(struct page *page)
3633{
3634	struct extent_buffer *eb = (struct extent_buffer *)page->private;
3635	struct btrfs_inode *btree_ino = BTRFS_I(eb->fs_info->btree_inode);
3636
3637	SetPageError(page);
3638	if (test_and_set_bit(EXTENT_BUFFER_WRITE_ERR, &eb->bflags))
3639		return;
3640
3641	/*
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3642	 * If writeback for a btree extent that doesn't belong to a log tree
3643	 * failed, increment the counter transaction->eb_write_errors.
3644	 * We do this because while the transaction is running and before it's
3645	 * committing (when we call filemap_fdata[write|wait]_range against
3646	 * the btree inode), we might have
3647	 * btree_inode->i_mapping->a_ops->writepages() called by the VM - if it
3648	 * returns an error or an error happens during writeback, when we're
3649	 * committing the transaction we wouldn't know about it, since the pages
3650	 * can be no longer dirty nor marked anymore for writeback (if a
3651	 * subsequent modification to the extent buffer didn't happen before the
3652	 * transaction commit), which makes filemap_fdata[write|wait]_range not
3653	 * able to find the pages tagged with SetPageError at transaction
3654	 * commit time. So if this happens we must abort the transaction,
3655	 * otherwise we commit a super block with btree roots that point to
3656	 * btree nodes/leafs whose content on disk is invalid - either garbage
3657	 * or the content of some node/leaf from a past generation that got
3658	 * cowed or deleted and is no longer valid.
3659	 *
3660	 * Note: setting AS_EIO/AS_ENOSPC in the btree inode's i_mapping would
3661	 * not be enough - we need to distinguish between log tree extents vs
3662	 * non-log tree extents, and the next filemap_fdatawait_range() call
3663	 * will catch and clear such errors in the mapping - and that call might
3664	 * be from a log sync and not from a transaction commit. Also, checking
3665	 * for the eb flag EXTENT_BUFFER_WRITE_ERR at transaction commit time is
3666	 * not done and would not be reliable - the eb might have been released
3667	 * from memory and reading it back again means that flag would not be
3668	 * set (since it's a runtime flag, not persisted on disk).
3669	 *
3670	 * Using the flags below in the btree inode also makes us achieve the
3671	 * goal of AS_EIO/AS_ENOSPC when writepages() returns success, started
3672	 * writeback for all dirty pages and before filemap_fdatawait_range()
3673	 * is called, the writeback for all dirty pages had already finished
3674	 * with errors - because we were not using AS_EIO/AS_ENOSPC,
3675	 * filemap_fdatawait_range() would return success, as it could not know
3676	 * that writeback errors happened (the pages were no longer tagged for
3677	 * writeback).
3678	 */
3679	switch (eb->log_index) {
3680	case -1:
3681		set_bit(BTRFS_INODE_BTREE_ERR, &btree_ino->runtime_flags);
3682		break;
3683	case 0:
3684		set_bit(BTRFS_INODE_BTREE_LOG1_ERR, &btree_ino->runtime_flags);
3685		break;
3686	case 1:
3687		set_bit(BTRFS_INODE_BTREE_LOG2_ERR, &btree_ino->runtime_flags);
3688		break;
3689	default:
3690		BUG(); /* unexpected, logic error */
3691	}
3692}
3693
3694static void end_bio_extent_buffer_writepage(struct bio *bio)
 
 
 
 
 
3695{
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3696	struct bio_vec *bvec;
3697	struct extent_buffer *eb;
3698	int i, done;
 
3699
3700	bio_for_each_segment_all(bvec, bio, i) {
 
3701		struct page *page = bvec->bv_page;
3702
3703		eb = (struct extent_buffer *)page->private;
3704		BUG_ON(!eb);
3705		done = atomic_dec_and_test(&eb->io_pages);
3706
3707		if (bio->bi_error ||
3708		    test_bit(EXTENT_BUFFER_WRITE_ERR, &eb->bflags)) {
3709			ClearPageUptodate(page);
3710			set_btree_ioerr(page);
3711		}
3712
3713		end_page_writeback(page);
3714
3715		if (!done)
3716			continue;
3717
3718		end_extent_buffer_writeback(eb);
3719	}
3720
3721	bio_put(bio);
3722}
3723
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3724static noinline_for_stack int write_one_eb(struct extent_buffer *eb,
3725			struct btrfs_fs_info *fs_info,
3726			struct writeback_control *wbc,
3727			struct extent_page_data *epd)
3728{
3729	struct block_device *bdev = fs_info->fs_devices->latest_bdev;
3730	struct extent_io_tree *tree = &BTRFS_I(fs_info->btree_inode)->io_tree;
3731	u64 offset = eb->start;
3732	unsigned long i, num_pages;
3733	unsigned long bio_flags = 0;
3734	int rw = (epd->sync_io ? WRITE_SYNC : WRITE) | REQ_META;
3735	int ret = 0;
3736
3737	clear_bit(EXTENT_BUFFER_WRITE_ERR, &eb->bflags);
3738	num_pages = num_extent_pages(eb->start, eb->len);
3739	atomic_set(&eb->io_pages, num_pages);
3740	if (btrfs_header_owner(eb) == BTRFS_TREE_LOG_OBJECTID)
3741		bio_flags = EXTENT_BIO_TREE_LOG;
3742
 
3743	for (i = 0; i < num_pages; i++) {
3744		struct page *p = eb->pages[i];
3745
3746		clear_page_dirty_for_io(p);
3747		set_page_writeback(p);
3748		ret = submit_extent_page(rw, tree, wbc, p, offset >> 9,
3749					 PAGE_SIZE, 0, bdev, &epd->bio,
3750					 -1, end_bio_extent_buffer_writepage,
3751					 0, epd->bio_flags, bio_flags, false);
3752		epd->bio_flags = bio_flags;
3753		if (ret) {
3754			set_btree_ioerr(p);
3755			end_page_writeback(p);
 
3756			if (atomic_sub_and_test(num_pages - i, &eb->io_pages))
3757				end_extent_buffer_writeback(eb);
3758			ret = -EIO;
3759			break;
3760		}
3761		offset += PAGE_SIZE;
3762		update_nr_written(p, wbc, 1);
3763		unlock_page(p);
3764	}
3765
3766	if (unlikely(ret)) {
3767		for (; i < num_pages; i++) {
3768			struct page *p = eb->pages[i];
3769			clear_page_dirty_for_io(p);
3770			unlock_page(p);
3771		}
3772	}
3773
3774	return ret;
3775}
3776
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3777int btree_write_cache_pages(struct address_space *mapping,
3778				   struct writeback_control *wbc)
3779{
3780	struct extent_io_tree *tree = &BTRFS_I(mapping->host)->io_tree;
3781	struct btrfs_fs_info *fs_info = BTRFS_I(mapping->host)->root->fs_info;
3782	struct extent_buffer *eb, *prev_eb = NULL;
3783	struct extent_page_data epd = {
3784		.bio = NULL,
3785		.tree = tree,
3786		.extent_locked = 0,
3787		.sync_io = wbc->sync_mode == WB_SYNC_ALL,
3788		.bio_flags = 0,
3789	};
 
3790	int ret = 0;
3791	int done = 0;
3792	int nr_to_write_done = 0;
3793	struct pagevec pvec;
3794	int nr_pages;
3795	pgoff_t index;
3796	pgoff_t end;		/* Inclusive */
3797	int scanned = 0;
3798	int tag;
3799
3800	pagevec_init(&pvec, 0);
3801	if (wbc->range_cyclic) {
3802		index = mapping->writeback_index; /* Start from prev offset */
3803		end = -1;
 
 
 
 
 
3804	} else {
3805		index = wbc->range_start >> PAGE_SHIFT;
3806		end = wbc->range_end >> PAGE_SHIFT;
3807		scanned = 1;
3808	}
3809	if (wbc->sync_mode == WB_SYNC_ALL)
3810		tag = PAGECACHE_TAG_TOWRITE;
3811	else
3812		tag = PAGECACHE_TAG_DIRTY;
 
3813retry:
3814	if (wbc->sync_mode == WB_SYNC_ALL)
3815		tag_pages_for_writeback(mapping, index, end);
3816	while (!done && !nr_to_write_done && (index <= end) &&
3817	       (nr_pages = pagevec_lookup_tag(&pvec, mapping, &index, tag,
3818			min(end - index, (pgoff_t)PAGEVEC_SIZE-1) + 1))) {
3819		unsigned i;
3820
3821		scanned = 1;
3822		for (i = 0; i < nr_pages; i++) {
3823			struct page *page = pvec.pages[i];
3824
3825			if (!PagePrivate(page))
 
3826				continue;
3827
3828			if (!wbc->range_cyclic && page->index > end) {
3829				done = 1;
3830				break;
3831			}
3832
3833			spin_lock(&mapping->private_lock);
3834			if (!PagePrivate(page)) {
3835				spin_unlock(&mapping->private_lock);
3836				continue;
3837			}
3838
3839			eb = (struct extent_buffer *)page->private;
3840
3841			/*
3842			 * Shouldn't happen and normally this would be a BUG_ON
3843			 * but no sense in crashing the users box for something
3844			 * we can survive anyway.
3845			 */
3846			if (WARN_ON(!eb)) {
3847				spin_unlock(&mapping->private_lock);
3848				continue;
3849			}
3850
3851			if (eb == prev_eb) {
3852				spin_unlock(&mapping->private_lock);
3853				continue;
3854			}
3855
3856			ret = atomic_inc_not_zero(&eb->refs);
3857			spin_unlock(&mapping->private_lock);
3858			if (!ret)
3859				continue;
3860
3861			prev_eb = eb;
3862			ret = lock_extent_buffer_for_io(eb, fs_info, &epd);
3863			if (!ret) {
3864				free_extent_buffer(eb);
3865				continue;
3866			}
3867
3868			ret = write_one_eb(eb, fs_info, wbc, &epd);
3869			if (ret) {
3870				done = 1;
3871				free_extent_buffer(eb);
3872				break;
3873			}
3874			free_extent_buffer(eb);
3875
3876			/*
3877			 * the filesystem may choose to bump up nr_to_write.
3878			 * We have to make sure to honor the new nr_to_write
3879			 * at any time
3880			 */
3881			nr_to_write_done = wbc->nr_to_write <= 0;
3882		}
3883		pagevec_release(&pvec);
3884		cond_resched();
3885	}
3886	if (!scanned && !done) {
3887		/*
3888		 * We hit the last page and there is more work to be done: wrap
3889		 * back to the start of the file
3890		 */
3891		scanned = 1;
3892		index = 0;
3893		goto retry;
3894	}
3895	flush_write_bio(&epd);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3896	return ret;
3897}
3898
3899/**
3900 * write_cache_pages - walk the list of dirty pages of the given address space and write all of them.
3901 * @mapping: address space structure to write
3902 * @wbc: subtract the number of written pages from *@wbc->nr_to_write
3903 * @writepage: function called for each page
3904 * @data: data passed to writepage function
3905 *
3906 * If a page is already under I/O, write_cache_pages() skips it, even
3907 * if it's dirty.  This is desirable behaviour for memory-cleaning writeback,
3908 * but it is INCORRECT for data-integrity system calls such as fsync().  fsync()
3909 * and msync() need to guarantee that all the data which was dirty at the time
3910 * the call was made get new I/O started against them.  If wbc->sync_mode is
3911 * WB_SYNC_ALL then we were called for data integrity and we must wait for
3912 * existing IO to complete.
3913 */
3914static int extent_write_cache_pages(struct extent_io_tree *tree,
3915			     struct address_space *mapping,
3916			     struct writeback_control *wbc,
3917			     writepage_t writepage, void *data,
3918			     void (*flush_fn)(void *))
3919{
3920	struct inode *inode = mapping->host;
3921	int ret = 0;
3922	int done = 0;
3923	int err = 0;
3924	int nr_to_write_done = 0;
3925	struct pagevec pvec;
3926	int nr_pages;
3927	pgoff_t index;
3928	pgoff_t end;		/* Inclusive */
 
 
3929	int scanned = 0;
3930	int tag;
3931
3932	/*
3933	 * We have to hold onto the inode so that ordered extents can do their
3934	 * work when the IO finishes.  The alternative to this is failing to add
3935	 * an ordered extent if the igrab() fails there and that is a huge pain
3936	 * to deal with, so instead just hold onto the inode throughout the
3937	 * writepages operation.  If it fails here we are freeing up the inode
3938	 * anyway and we'd rather not waste our time writing out stuff that is
3939	 * going to be truncated anyway.
3940	 */
3941	if (!igrab(inode))
3942		return 0;
3943
3944	pagevec_init(&pvec, 0);
3945	if (wbc->range_cyclic) {
3946		index = mapping->writeback_index; /* Start from prev offset */
3947		end = -1;
 
 
 
 
 
3948	} else {
3949		index = wbc->range_start >> PAGE_SHIFT;
3950		end = wbc->range_end >> PAGE_SHIFT;
 
 
3951		scanned = 1;
3952	}
3953	if (wbc->sync_mode == WB_SYNC_ALL)
 
 
 
 
 
 
 
 
 
 
 
 
 
3954		tag = PAGECACHE_TAG_TOWRITE;
3955	else
3956		tag = PAGECACHE_TAG_DIRTY;
3957retry:
3958	if (wbc->sync_mode == WB_SYNC_ALL)
3959		tag_pages_for_writeback(mapping, index, end);
 
3960	while (!done && !nr_to_write_done && (index <= end) &&
3961	       (nr_pages = pagevec_lookup_tag(&pvec, mapping, &index, tag,
3962			min(end - index, (pgoff_t)PAGEVEC_SIZE-1) + 1))) {
3963		unsigned i;
3964
3965		scanned = 1;
3966		for (i = 0; i < nr_pages; i++) {
3967			struct page *page = pvec.pages[i];
3968
 
3969			/*
3970			 * At this point we hold neither mapping->tree_lock nor
3971			 * lock on the page itself: the page may be truncated or
3972			 * invalidated (changing page->mapping to NULL), or even
3973			 * swizzled back from swapper_space to tmpfs file
3974			 * mapping
3975			 */
3976			if (!trylock_page(page)) {
3977				flush_fn(data);
3978				lock_page(page);
3979			}
3980
3981			if (unlikely(page->mapping != mapping)) {
3982				unlock_page(page);
3983				continue;
3984			}
3985
3986			if (!wbc->range_cyclic && page->index > end) {
3987				done = 1;
3988				unlock_page(page);
3989				continue;
3990			}
3991
3992			if (wbc->sync_mode != WB_SYNC_NONE) {
3993				if (PageWriteback(page))
3994					flush_fn(data);
3995				wait_on_page_writeback(page);
3996			}
3997
3998			if (PageWriteback(page) ||
3999			    !clear_page_dirty_for_io(page)) {
4000				unlock_page(page);
4001				continue;
4002			}
4003
4004			ret = (*writepage)(page, wbc, data);
4005
4006			if (unlikely(ret == AOP_WRITEPAGE_ACTIVATE)) {
4007				unlock_page(page);
4008				ret = 0;
4009			}
4010			if (!err && ret < 0)
4011				err = ret;
4012
4013			/*
4014			 * the filesystem may choose to bump up nr_to_write.
4015			 * We have to make sure to honor the new nr_to_write
4016			 * at any time
4017			 */
4018			nr_to_write_done = wbc->nr_to_write <= 0;
4019		}
4020		pagevec_release(&pvec);
4021		cond_resched();
4022	}
4023	if (!scanned && !done && !err) {
4024		/*
4025		 * We hit the last page and there is more work to be done: wrap
4026		 * back to the start of the file
4027		 */
4028		scanned = 1;
4029		index = 0;
 
 
 
 
 
 
 
 
4030		goto retry;
4031	}
4032	btrfs_add_delayed_iput(inode);
4033	return err;
4034}
4035
4036static void flush_epd_write_bio(struct extent_page_data *epd)
4037{
4038	if (epd->bio) {
4039		int rw = WRITE;
4040		int ret;
4041
4042		if (epd->sync_io)
4043			rw = WRITE_SYNC;
4044
4045		ret = submit_one_bio(rw, epd->bio, 0, epd->bio_flags);
4046		BUG_ON(ret < 0); /* -ENOMEM */
4047		epd->bio = NULL;
4048	}
4049}
4050
4051static noinline void flush_write_bio(void *data)
4052{
4053	struct extent_page_data *epd = data;
4054	flush_epd_write_bio(epd);
4055}
4056
4057int extent_write_full_page(struct extent_io_tree *tree, struct page *page,
4058			  get_extent_t *get_extent,
4059			  struct writeback_control *wbc)
4060{
4061	int ret;
4062	struct extent_page_data epd = {
4063		.bio = NULL,
4064		.tree = tree,
4065		.get_extent = get_extent,
4066		.extent_locked = 0,
4067		.sync_io = wbc->sync_mode == WB_SYNC_ALL,
4068		.bio_flags = 0,
4069	};
4070
4071	ret = __extent_writepage(page, wbc, &epd);
4072
4073	flush_epd_write_bio(&epd);
4074	return ret;
4075}
4076
4077int extent_write_locked_range(struct extent_io_tree *tree, struct inode *inode,
4078			      u64 start, u64 end, get_extent_t *get_extent,
4079			      int mode)
 
 
 
4080{
 
 
4081	int ret = 0;
4082	struct address_space *mapping = inode->i_mapping;
4083	struct page *page;
4084	unsigned long nr_pages = (end - start + PAGE_SIZE) >>
4085		PAGE_SHIFT;
4086
4087	struct extent_page_data epd = {
4088		.bio = NULL,
4089		.tree = tree,
4090		.get_extent = get_extent,
4091		.extent_locked = 1,
4092		.sync_io = mode == WB_SYNC_ALL,
4093		.bio_flags = 0,
4094	};
4095	struct writeback_control wbc_writepages = {
4096		.sync_mode	= mode,
4097		.nr_to_write	= nr_pages * 2,
4098		.range_start	= start,
4099		.range_end	= end + 1,
 
 
 
4100	};
4101
4102	while (start <= end) {
4103		page = find_get_page(mapping, start >> PAGE_SHIFT);
4104		if (clear_page_dirty_for_io(page))
4105			ret = __extent_writepage(page, &wbc_writepages, &epd);
4106		else {
4107			if (tree->ops && tree->ops->writepage_end_io_hook)
4108				tree->ops->writepage_end_io_hook(page, start,
4109						 start + PAGE_SIZE - 1,
4110						 NULL, 1);
4111			unlock_page(page);
 
 
 
 
 
 
 
 
 
 
 
 
 
4112		}
4113		put_page(page);
4114		start += PAGE_SIZE;
4115	}
4116
4117	flush_epd_write_bio(&epd);
 
 
 
 
4118	return ret;
4119}
4120
4121int extent_writepages(struct extent_io_tree *tree,
4122		      struct address_space *mapping,
4123		      get_extent_t *get_extent,
4124		      struct writeback_control *wbc)
4125{
 
4126	int ret = 0;
4127	struct extent_page_data epd = {
4128		.bio = NULL,
4129		.tree = tree,
4130		.get_extent = get_extent,
4131		.extent_locked = 0,
4132		.sync_io = wbc->sync_mode == WB_SYNC_ALL,
4133		.bio_flags = 0,
4134	};
4135
4136	ret = extent_write_cache_pages(tree, mapping, wbc,
4137				       __extent_writepage, &epd,
4138				       flush_write_bio);
4139	flush_epd_write_bio(&epd);
 
 
 
 
4140	return ret;
4141}
4142
4143int extent_readpages(struct extent_io_tree *tree,
4144		     struct address_space *mapping,
4145		     struct list_head *pages, unsigned nr_pages,
4146		     get_extent_t get_extent)
4147{
4148	struct bio *bio = NULL;
4149	unsigned page_idx;
4150	unsigned long bio_flags = 0;
4151	struct page *pagepool[16];
4152	struct page *page;
4153	struct extent_map *em_cached = NULL;
4154	int nr = 0;
4155	u64 prev_em_start = (u64)-1;
 
4156
4157	for (page_idx = 0; page_idx < nr_pages; page_idx++) {
4158		page = list_entry(pages->prev, struct page, lru);
 
4159
4160		prefetchw(&page->flags);
4161		list_del(&page->lru);
4162		if (add_to_page_cache_lru(page, mapping,
4163					page->index, GFP_NOFS)) {
4164			put_page(page);
4165			continue;
4166		}
4167
4168		pagepool[nr++] = page;
4169		if (nr < ARRAY_SIZE(pagepool))
4170			continue;
4171		__extent_readpages(tree, pagepool, nr, get_extent, &em_cached,
4172				   &bio, 0, &bio_flags, READ, &prev_em_start);
4173		nr = 0;
4174	}
4175	if (nr)
4176		__extent_readpages(tree, pagepool, nr, get_extent, &em_cached,
4177				   &bio, 0, &bio_flags, READ, &prev_em_start);
4178
4179	if (em_cached)
4180		free_extent_map(em_cached);
4181
4182	BUG_ON(!list_empty(pages));
4183	if (bio)
4184		return submit_one_bio(READ, bio, 0, bio_flags);
4185	return 0;
4186}
4187
4188/*
4189 * basic invalidatepage code, this waits on any locked or writeback
4190 * ranges corresponding to the page, and then deletes any extent state
4191 * records from the tree
4192 */
4193int extent_invalidatepage(struct extent_io_tree *tree,
4194			  struct page *page, unsigned long offset)
4195{
4196	struct extent_state *cached_state = NULL;
4197	u64 start = page_offset(page);
4198	u64 end = start + PAGE_SIZE - 1;
4199	size_t blocksize = page->mapping->host->i_sb->s_blocksize;
 
 
 
4200
4201	start += ALIGN(offset, blocksize);
4202	if (start > end)
4203		return 0;
4204
4205	lock_extent_bits(tree, start, end, &cached_state);
4206	wait_on_page_writeback(page);
4207	clear_extent_bit(tree, start, end,
4208			 EXTENT_LOCKED | EXTENT_DIRTY | EXTENT_DELALLOC |
4209			 EXTENT_DO_ACCOUNTING,
4210			 1, 1, &cached_state, GFP_NOFS);
 
 
 
4211	return 0;
4212}
4213
4214/*
4215 * a helper for releasepage, this tests for areas of the page that
4216 * are locked or under IO and drops the related state bits if it is safe
4217 * to drop the page.
4218 */
4219static int try_release_extent_state(struct extent_map_tree *map,
4220				    struct extent_io_tree *tree,
4221				    struct page *page, gfp_t mask)
4222{
4223	u64 start = page_offset(page);
4224	u64 end = start + PAGE_SIZE - 1;
4225	int ret = 1;
4226
4227	if (test_range_bit(tree, start, end,
4228			   EXTENT_IOBITS, 0, NULL))
4229		ret = 0;
4230	else {
4231		if ((mask & GFP_NOFS) == GFP_NOFS)
4232			mask = GFP_NOFS;
4233		/*
4234		 * at this point we can safely clear everything except the
4235		 * locked bit and the nodatasum bit
4236		 */
4237		ret = clear_extent_bit(tree, start, end,
4238				 ~(EXTENT_LOCKED | EXTENT_NODATASUM),
4239				 0, 0, NULL, mask);
 
 
4240
4241		/* if clear_extent_bit failed for enomem reasons,
4242		 * we can't allow the release to continue.
4243		 */
4244		if (ret < 0)
4245			ret = 0;
4246		else
4247			ret = 1;
4248	}
4249	return ret;
4250}
4251
4252/*
4253 * a helper for releasepage.  As long as there are no locked extents
4254 * in the range corresponding to the page, both state records and extent
4255 * map records are removed
4256 */
4257int try_release_extent_mapping(struct extent_map_tree *map,
4258			       struct extent_io_tree *tree, struct page *page,
4259			       gfp_t mask)
4260{
4261	struct extent_map *em;
4262	u64 start = page_offset(page);
4263	u64 end = start + PAGE_SIZE - 1;
 
 
 
4264
4265	if (gfpflags_allow_blocking(mask) &&
4266	    page->mapping->host->i_size > SZ_16M) {
4267		u64 len;
4268		while (start <= end) {
 
 
 
4269			len = end - start + 1;
4270			write_lock(&map->lock);
4271			em = lookup_extent_mapping(map, start, len);
4272			if (!em) {
4273				write_unlock(&map->lock);
4274				break;
4275			}
4276			if (test_bit(EXTENT_FLAG_PINNED, &em->flags) ||
4277			    em->start != start) {
4278				write_unlock(&map->lock);
4279				free_extent_map(em);
4280				break;
4281			}
4282			if (!test_range_bit(tree, em->start,
4283					    extent_map_end(em) - 1,
4284					    EXTENT_LOCKED | EXTENT_WRITEBACK,
4285					    0, NULL)) {
4286				remove_extent_mapping(map, em);
4287				/* once for the rb tree */
4288				free_extent_map(em);
4289			}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4290			start = extent_map_end(em);
4291			write_unlock(&map->lock);
4292
4293			/* once for us */
4294			free_extent_map(em);
 
 
4295		}
4296	}
4297	return try_release_extent_state(map, tree, page, mask);
4298}
4299
4300/*
4301 * helper function for fiemap, which doesn't want to see any holes.
4302 * This maps until we find something past 'last'
 
4303 */
4304static struct extent_map *get_extent_skip_holes(struct inode *inode,
4305						u64 offset,
4306						u64 last,
4307						get_extent_t *get_extent)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4308{
4309	u64 sectorsize = BTRFS_I(inode)->root->sectorsize;
4310	struct extent_map *em;
4311	u64 len;
4312
4313	if (offset >= last)
4314		return NULL;
4315
4316	while (1) {
4317		len = last - offset;
4318		if (len == 0)
4319			break;
4320		len = ALIGN(len, sectorsize);
4321		em = get_extent(inode, NULL, 0, offset, len, 0);
4322		if (IS_ERR_OR_NULL(em))
4323			return em;
4324
4325		/* if this isn't a hole return it */
4326		if (!test_bit(EXTENT_FLAG_VACANCY, &em->flags) &&
4327		    em->block_start != EXTENT_MAP_HOLE) {
4328			return em;
4329		}
 
 
 
 
 
 
4330
4331		/* this is a hole, advance to the next extent */
4332		offset = extent_map_end(em);
4333		free_extent_map(em);
4334		if (offset >= last)
4335			break;
 
 
 
 
 
 
 
 
 
 
4336	}
4337	return NULL;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4338}
4339
4340int extent_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
4341		__u64 start, __u64 len, get_extent_t *get_extent)
 
 
 
 
 
 
 
 
 
 
 
4342{
4343	int ret = 0;
4344	u64 off = start;
4345	u64 max = start + len;
4346	u32 flags = 0;
4347	u32 found_type;
4348	u64 last;
4349	u64 last_for_get_extent = 0;
4350	u64 disko = 0;
4351	u64 isize = i_size_read(inode);
4352	struct btrfs_key found_key;
4353	struct extent_map *em = NULL;
4354	struct extent_state *cached_state = NULL;
4355	struct btrfs_path *path;
4356	struct btrfs_root *root = BTRFS_I(inode)->root;
4357	int end = 0;
4358	u64 em_start = 0;
4359	u64 em_len = 0;
4360	u64 em_end = 0;
4361
4362	if (len == 0)
4363		return -EINVAL;
 
 
 
 
4364
4365	path = btrfs_alloc_path();
4366	if (!path)
4367		return -ENOMEM;
4368	path->leave_spinning = 1;
4369
4370	start = round_down(start, BTRFS_I(inode)->root->sectorsize);
4371	len = round_up(max, BTRFS_I(inode)->root->sectorsize) - start;
 
4372
4373	/*
4374	 * lookup the last file extent.  We're not using i_size here
4375	 * because there might be preallocation past i_size
4376	 */
4377	ret = btrfs_lookup_file_extent(NULL, root, path, btrfs_ino(inode), -1,
4378				       0);
4379	if (ret < 0) {
4380		btrfs_free_path(path);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4381		return ret;
 
 
 
 
 
4382	}
4383	WARN_ON(!ret);
4384	path->slots[0]--;
4385	btrfs_item_key_to_cpu(path->nodes[0], &found_key, path->slots[0]);
4386	found_type = found_key.type;
4387
4388	/* No extents, but there might be delalloc bits */
4389	if (found_key.objectid != btrfs_ino(inode) ||
4390	    found_type != BTRFS_EXTENT_DATA_KEY) {
4391		/* have to trust i_size as the end */
4392		last = (u64)-1;
4393		last_for_get_extent = isize;
4394	} else {
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4395		/*
4396		 * remember the start of the last extent.  There are a
4397		 * bunch of different factors that go into the length of the
4398		 * extent, so its much less complex to remember where it started
4399		 */
4400		last = found_key.offset;
4401		last_for_get_extent = last + 1;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4402	}
4403	btrfs_release_path(path);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4404
4405	/*
4406	 * we might have some extents allocated but more delalloc past those
4407	 * extents.  so, we trust isize unless the start of the last extent is
4408	 * beyond isize
 
4409	 */
4410	if (last < isize) {
4411		last = (u64)-1;
4412		last_for_get_extent = isize;
 
 
 
 
 
4413	}
4414
4415	lock_extent_bits(&BTRFS_I(inode)->io_tree, start, start + len - 1,
4416			 &cached_state);
 
 
 
 
 
 
 
 
4417
4418	em = get_extent_skip_holes(inode, start, last_for_get_extent,
4419				   get_extent);
4420	if (!em)
4421		goto out;
4422	if (IS_ERR(em)) {
4423		ret = PTR_ERR(em);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4424		goto out;
4425	}
4426
4427	while (!end) {
4428		u64 offset_in_extent = 0;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4429
4430		/* break if the extent we found is outside the range */
4431		if (em->start >= max || extent_map_end(em) < off)
4432			break;
4433
 
 
4434		/*
4435		 * get_extent may return an extent that starts before our
4436		 * requested range.  We have to make sure the ranges
4437		 * we return to fiemap always move forward and don't
4438		 * overlap, so adjust the offsets here
4439		 */
4440		em_start = max(em->start, off);
 
 
 
4441
4442		/*
4443		 * record the offset from the start of the extent
4444		 * for adjusting the disk offset below.  Only do this if the
4445		 * extent isn't compressed since our in ram offset may be past
4446		 * what we have actually allocated on disk.
4447		 */
4448		if (!test_bit(EXTENT_FLAG_COMPRESSED, &em->flags))
4449			offset_in_extent = em_start - em->start;
4450		em_end = extent_map_end(em);
4451		em_len = em_end - em_start;
4452		disko = 0;
4453		flags = 0;
4454
4455		/*
4456		 * bump off for our next call to get_extent
4457		 */
4458		off = extent_map_end(em);
4459		if (off >= max)
4460			end = 1;
4461
4462		if (em->block_start == EXTENT_MAP_LAST_BYTE) {
4463			end = 1;
4464			flags |= FIEMAP_EXTENT_LAST;
4465		} else if (em->block_start == EXTENT_MAP_INLINE) {
4466			flags |= (FIEMAP_EXTENT_DATA_INLINE |
4467				  FIEMAP_EXTENT_NOT_ALIGNED);
4468		} else if (em->block_start == EXTENT_MAP_DELALLOC) {
4469			flags |= (FIEMAP_EXTENT_DELALLOC |
4470				  FIEMAP_EXTENT_UNKNOWN);
4471		} else if (fieinfo->fi_extents_max) {
4472			u64 bytenr = em->block_start -
4473				(em->start - em->orig_start);
4474
4475			disko = em->block_start + offset_in_extent;
 
 
 
 
 
4476
4477			/*
4478			 * As btrfs supports shared space, this information
4479			 * can be exported to userspace tools via
4480			 * flag FIEMAP_EXTENT_SHARED.  If fi_extents_max == 0
4481			 * then we're just getting a count and we can skip the
4482			 * lookup stuff.
4483			 */
4484			ret = btrfs_check_shared(NULL, root->fs_info,
4485						 root->objectid,
4486						 btrfs_ino(inode), bytenr);
4487			if (ret < 0)
4488				goto out_free;
4489			if (ret)
4490				flags |= FIEMAP_EXTENT_SHARED;
4491			ret = 0;
4492		}
4493		if (test_bit(EXTENT_FLAG_COMPRESSED, &em->flags))
 
4494			flags |= FIEMAP_EXTENT_ENCODED;
4495		if (test_bit(EXTENT_FLAG_PREALLOC, &em->flags))
4496			flags |= FIEMAP_EXTENT_UNWRITTEN;
4497
4498		free_extent_map(em);
4499		em = NULL;
4500		if ((em_start >= last) || em_len == (u64)-1 ||
4501		   (last == (u64)-1 && isize <= em_end)) {
4502			flags |= FIEMAP_EXTENT_LAST;
4503			end = 1;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4504		}
4505
4506		/* now scan forward to see if this is really the last extent. */
4507		em = get_extent_skip_holes(inode, off, last_for_get_extent,
4508					   get_extent);
4509		if (IS_ERR(em)) {
4510			ret = PTR_ERR(em);
4511			goto out;
4512		}
4513		if (!em) {
4514			flags |= FIEMAP_EXTENT_LAST;
4515			end = 1;
 
 
 
 
4516		}
4517		ret = fiemap_fill_next_extent(fieinfo, em_start, disko,
4518					      em_len, flags);
4519		if (ret) {
4520			if (ret == 1)
4521				ret = 0;
4522			goto out_free;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4523		}
4524	}
4525out_free:
4526	free_extent_map(em);
 
 
 
 
4527out:
 
 
4528	btrfs_free_path(path);
4529	unlock_extent_cached(&BTRFS_I(inode)->io_tree, start, start + len - 1,
4530			     &cached_state, GFP_NOFS);
4531	return ret;
4532}
4533
4534static void __free_extent_buffer(struct extent_buffer *eb)
4535{
4536	btrfs_leak_debug_del(&eb->leak_list);
4537	kmem_cache_free(extent_buffer_cache, eb);
4538}
4539
4540int extent_buffer_under_io(struct extent_buffer *eb)
4541{
4542	return (atomic_read(&eb->io_pages) ||
4543		test_bit(EXTENT_BUFFER_WRITEBACK, &eb->bflags) ||
4544		test_bit(EXTENT_BUFFER_DIRTY, &eb->bflags));
4545}
4546
4547/*
4548 * Helper for releasing extent buffer page.
4549 */
4550static void btrfs_release_extent_buffer_page(struct extent_buffer *eb)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4551{
4552	unsigned long index;
4553	struct page *page;
4554	int mapped = !test_bit(EXTENT_BUFFER_DUMMY, &eb->bflags);
4555
4556	BUG_ON(extent_buffer_under_io(eb));
 
 
 
 
 
4557
4558	index = num_extent_pages(eb->start, eb->len);
4559	if (index == 0)
 
4560		return;
 
4561
4562	do {
4563		index--;
4564		page = eb->pages[index];
4565		if (!page)
4566			continue;
4567		if (mapped)
4568			spin_lock(&page->mapping->private_lock);
4569		/*
4570		 * We do this since we'll remove the pages after we've
4571		 * removed the eb from the radix tree, so we could race
4572		 * and have this page now attached to the new eb.  So
4573		 * only clear page_private if it's still connected to
4574		 * this eb.
4575		 */
4576		if (PagePrivate(page) &&
4577		    page->private == (unsigned long)eb) {
4578			BUG_ON(test_bit(EXTENT_BUFFER_DIRTY, &eb->bflags));
4579			BUG_ON(PageDirty(page));
4580			BUG_ON(PageWriteback(page));
4581			/*
4582			 * We need to make sure we haven't be attached
4583			 * to a new eb.
4584			 */
4585			ClearPagePrivate(page);
4586			set_page_private(page, 0);
4587			/* One for the page private */
4588			put_page(page);
4589		}
4590
4591		if (mapped)
4592			spin_unlock(&page->mapping->private_lock);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4593
4594		/* One for when we alloced the page */
 
 
 
 
 
4595		put_page(page);
4596	} while (index != 0);
4597}
4598
4599/*
4600 * Helper for releasing the extent buffer.
4601 */
4602static inline void btrfs_release_extent_buffer(struct extent_buffer *eb)
4603{
4604	btrfs_release_extent_buffer_page(eb);
 
4605	__free_extent_buffer(eb);
4606}
4607
4608static struct extent_buffer *
4609__alloc_extent_buffer(struct btrfs_fs_info *fs_info, u64 start,
4610		      unsigned long len)
4611{
4612	struct extent_buffer *eb = NULL;
4613
4614	eb = kmem_cache_zalloc(extent_buffer_cache, GFP_NOFS|__GFP_NOFAIL);
4615	eb->start = start;
4616	eb->len = len;
4617	eb->fs_info = fs_info;
4618	eb->bflags = 0;
4619	rwlock_init(&eb->lock);
4620	atomic_set(&eb->write_locks, 0);
4621	atomic_set(&eb->read_locks, 0);
4622	atomic_set(&eb->blocking_readers, 0);
4623	atomic_set(&eb->blocking_writers, 0);
4624	atomic_set(&eb->spinning_readers, 0);
4625	atomic_set(&eb->spinning_writers, 0);
4626	eb->lock_nested = 0;
4627	init_waitqueue_head(&eb->write_lock_wq);
4628	init_waitqueue_head(&eb->read_lock_wq);
4629
4630	btrfs_leak_debug_add(&eb->leak_list, &buffers);
 
4631
4632	spin_lock_init(&eb->refs_lock);
4633	atomic_set(&eb->refs, 1);
4634	atomic_set(&eb->io_pages, 0);
4635
4636	/*
4637	 * Sanity checks, currently the maximum is 64k covered by 16x 4k pages
4638	 */
4639	BUILD_BUG_ON(BTRFS_MAX_METADATA_BLOCKSIZE
4640		> MAX_INLINE_EXTENT_BUFFER_SIZE);
4641	BUG_ON(len > MAX_INLINE_EXTENT_BUFFER_SIZE);
4642
4643	return eb;
4644}
4645
4646struct extent_buffer *btrfs_clone_extent_buffer(struct extent_buffer *src)
4647{
4648	unsigned long i;
4649	struct page *p;
4650	struct extent_buffer *new;
4651	unsigned long num_pages = num_extent_pages(src->start, src->len);
 
4652
4653	new = __alloc_extent_buffer(src->fs_info, src->start, src->len);
4654	if (new == NULL)
4655		return NULL;
4656
 
 
 
 
 
 
 
 
 
 
 
 
 
4657	for (i = 0; i < num_pages; i++) {
4658		p = alloc_page(GFP_NOFS);
4659		if (!p) {
 
 
 
4660			btrfs_release_extent_buffer(new);
4661			return NULL;
4662		}
4663		attach_extent_buffer_page(new, p);
4664		WARN_ON(PageDirty(p));
4665		SetPageUptodate(p);
4666		new->pages[i] = p;
4667	}
4668
4669	copy_extent_buffer(new, src, 0, 0, src->len);
4670	set_bit(EXTENT_BUFFER_UPTODATE, &new->bflags);
4671	set_bit(EXTENT_BUFFER_DUMMY, &new->bflags);
4672
4673	return new;
4674}
4675
4676struct extent_buffer *__alloc_dummy_extent_buffer(struct btrfs_fs_info *fs_info,
4677						  u64 start, unsigned long len)
4678{
4679	struct extent_buffer *eb;
4680	unsigned long num_pages;
4681	unsigned long i;
4682
4683	num_pages = num_extent_pages(start, len);
4684
4685	eb = __alloc_extent_buffer(fs_info, start, len);
4686	if (!eb)
4687		return NULL;
4688
 
 
 
 
 
4689	for (i = 0; i < num_pages; i++) {
4690		eb->pages[i] = alloc_page(GFP_NOFS);
4691		if (!eb->pages[i])
 
 
4692			goto err;
4693	}
 
4694	set_extent_buffer_uptodate(eb);
4695	btrfs_set_header_nritems(eb, 0);
4696	set_bit(EXTENT_BUFFER_DUMMY, &eb->bflags);
4697
4698	return eb;
4699err:
4700	for (; i > 0; i--)
4701		__free_page(eb->pages[i - 1]);
 
 
 
 
4702	__free_extent_buffer(eb);
4703	return NULL;
4704}
4705
4706struct extent_buffer *alloc_dummy_extent_buffer(struct btrfs_fs_info *fs_info,
4707						u64 start)
4708{
4709	unsigned long len;
4710
4711	if (!fs_info) {
4712		/*
4713		 * Called only from tests that don't always have a fs_info
4714		 * available, but we know that nodesize is 4096
4715		 */
4716		len = 4096;
4717	} else {
4718		len = fs_info->tree_root->nodesize;
4719	}
4720
4721	return __alloc_dummy_extent_buffer(fs_info, start, len);
4722}
4723
4724static void check_buffer_tree_ref(struct extent_buffer *eb)
4725{
4726	int refs;
4727	/* the ref bit is tricky.  We have to make sure it is set
4728	 * if we have the buffer dirty.   Otherwise the
4729	 * code to free a buffer can end up dropping a dirty
4730	 * page
4731	 *
4732	 * Once the ref bit is set, it won't go away while the
4733	 * buffer is dirty or in writeback, and it also won't
4734	 * go away while we have the reference count on the
4735	 * eb bumped.
4736	 *
4737	 * We can't just set the ref bit without bumping the
4738	 * ref on the eb because free_extent_buffer might
4739	 * see the ref bit and try to clear it.  If this happens
4740	 * free_extent_buffer might end up dropping our original
4741	 * ref by mistake and freeing the page before we are able
4742	 * to add one more ref.
4743	 *
4744	 * So bump the ref count first, then set the bit.  If someone
4745	 * beat us to it, drop the ref we added.
 
 
 
 
 
4746	 */
4747	refs = atomic_read(&eb->refs);
4748	if (refs >= 2 && test_bit(EXTENT_BUFFER_TREE_REF, &eb->bflags))
4749		return;
4750
4751	spin_lock(&eb->refs_lock);
4752	if (!test_and_set_bit(EXTENT_BUFFER_TREE_REF, &eb->bflags))
4753		atomic_inc(&eb->refs);
4754	spin_unlock(&eb->refs_lock);
4755}
4756
4757static void mark_extent_buffer_accessed(struct extent_buffer *eb,
4758		struct page *accessed)
4759{
4760	unsigned long num_pages, i;
4761
4762	check_buffer_tree_ref(eb);
4763
4764	num_pages = num_extent_pages(eb->start, eb->len);
4765	for (i = 0; i < num_pages; i++) {
4766		struct page *p = eb->pages[i];
4767
4768		if (p != accessed)
4769			mark_page_accessed(p);
4770	}
4771}
4772
4773struct extent_buffer *find_extent_buffer(struct btrfs_fs_info *fs_info,
4774					 u64 start)
4775{
4776	struct extent_buffer *eb;
4777
4778	rcu_read_lock();
4779	eb = radix_tree_lookup(&fs_info->buffer_radix,
4780			       start >> PAGE_SHIFT);
4781	if (eb && atomic_inc_not_zero(&eb->refs)) {
4782		rcu_read_unlock();
4783		/*
4784		 * Lock our eb's refs_lock to avoid races with
4785		 * free_extent_buffer. When we get our eb it might be flagged
4786		 * with EXTENT_BUFFER_STALE and another task running
4787		 * free_extent_buffer might have seen that flag set,
4788		 * eb->refs == 2, that the buffer isn't under IO (dirty and
4789		 * writeback flags not set) and it's still in the tree (flag
4790		 * EXTENT_BUFFER_TREE_REF set), therefore being in the process
4791		 * of decrementing the extent buffer's reference count twice.
4792		 * So here we could race and increment the eb's reference count,
4793		 * clear its stale flag, mark it as dirty and drop our reference
4794		 * before the other task finishes executing free_extent_buffer,
4795		 * which would later result in an attempt to free an extent
4796		 * buffer that is dirty.
4797		 */
4798		if (test_bit(EXTENT_BUFFER_STALE, &eb->bflags)) {
4799			spin_lock(&eb->refs_lock);
4800			spin_unlock(&eb->refs_lock);
4801		}
4802		mark_extent_buffer_accessed(eb, NULL);
4803		return eb;
4804	}
4805	rcu_read_unlock();
4806
4807	return NULL;
4808}
4809
4810#ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS
4811struct extent_buffer *alloc_test_extent_buffer(struct btrfs_fs_info *fs_info,
4812					       u64 start)
4813{
4814	struct extent_buffer *eb, *exists = NULL;
4815	int ret;
4816
4817	eb = find_extent_buffer(fs_info, start);
4818	if (eb)
4819		return eb;
4820	eb = alloc_dummy_extent_buffer(fs_info, start);
4821	if (!eb)
4822		return NULL;
4823	eb->fs_info = fs_info;
4824again:
4825	ret = radix_tree_preload(GFP_NOFS & ~__GFP_HIGHMEM);
4826	if (ret)
 
4827		goto free_eb;
 
4828	spin_lock(&fs_info->buffer_lock);
4829	ret = radix_tree_insert(&fs_info->buffer_radix,
4830				start >> PAGE_SHIFT, eb);
4831	spin_unlock(&fs_info->buffer_lock);
4832	radix_tree_preload_end();
4833	if (ret == -EEXIST) {
4834		exists = find_extent_buffer(fs_info, start);
4835		if (exists)
4836			goto free_eb;
4837		else
4838			goto again;
4839	}
4840	check_buffer_tree_ref(eb);
4841	set_bit(EXTENT_BUFFER_IN_TREE, &eb->bflags);
4842
4843	/*
4844	 * We will free dummy extent buffer's if they come into
4845	 * free_extent_buffer with a ref count of 2, but if we are using this we
4846	 * want the buffers to stay in memory until we're done with them, so
4847	 * bump the ref count again.
4848	 */
4849	atomic_inc(&eb->refs);
4850	return eb;
4851free_eb:
4852	btrfs_release_extent_buffer(eb);
4853	return exists;
4854}
4855#endif
4856
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4857struct extent_buffer *alloc_extent_buffer(struct btrfs_fs_info *fs_info,
4858					  u64 start)
4859{
4860	unsigned long len = fs_info->tree_root->nodesize;
4861	unsigned long num_pages = num_extent_pages(start, len);
4862	unsigned long i;
4863	unsigned long index = start >> PAGE_SHIFT;
4864	struct extent_buffer *eb;
4865	struct extent_buffer *exists = NULL;
4866	struct page *p;
4867	struct address_space *mapping = fs_info->btree_inode->i_mapping;
 
4868	int uptodate = 1;
4869	int ret;
4870
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4871	eb = find_extent_buffer(fs_info, start);
4872	if (eb)
4873		return eb;
4874
4875	eb = __alloc_extent_buffer(fs_info, start, len);
4876	if (!eb)
4877		return NULL;
 
 
 
 
 
 
 
 
 
4878
 
4879	for (i = 0; i < num_pages; i++, index++) {
 
 
4880		p = find_or_create_page(mapping, index, GFP_NOFS|__GFP_NOFAIL);
4881		if (!p)
 
4882			goto free_eb;
 
4883
4884		spin_lock(&mapping->private_lock);
4885		if (PagePrivate(p)) {
4886			/*
4887			 * We could have already allocated an eb for this page
4888			 * and attached one so lets see if we can get a ref on
4889			 * the existing eb, and if we can we know it's good and
4890			 * we can just return that one, else we know we can just
4891			 * overwrite page->private.
4892			 */
4893			exists = (struct extent_buffer *)p->private;
4894			if (atomic_inc_not_zero(&exists->refs)) {
4895				spin_unlock(&mapping->private_lock);
 
 
4896				unlock_page(p);
4897				put_page(p);
4898				mark_extent_buffer_accessed(exists, p);
4899				goto free_eb;
4900			}
4901			exists = NULL;
4902
4903			/*
4904			 * Do this so attach doesn't complain and we need to
4905			 * drop the ref the old guy had.
4906			 */
4907			ClearPagePrivate(p);
4908			WARN_ON(PageDirty(p));
4909			put_page(p);
 
 
 
4910		}
4911		attach_extent_buffer_page(eb, p);
 
 
 
 
 
 
 
 
 
 
 
 
4912		spin_unlock(&mapping->private_lock);
4913		WARN_ON(PageDirty(p));
 
4914		eb->pages[i] = p;
4915		if (!PageUptodate(p))
4916			uptodate = 0;
4917
4918		/*
4919		 * see below about how we avoid a nasty race with release page
4920		 * and why we unlock later
 
 
 
4921		 */
4922	}
4923	if (uptodate)
4924		set_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags);
4925again:
4926	ret = radix_tree_preload(GFP_NOFS & ~__GFP_HIGHMEM);
4927	if (ret)
 
4928		goto free_eb;
 
4929
4930	spin_lock(&fs_info->buffer_lock);
4931	ret = radix_tree_insert(&fs_info->buffer_radix,
4932				start >> PAGE_SHIFT, eb);
4933	spin_unlock(&fs_info->buffer_lock);
4934	radix_tree_preload_end();
4935	if (ret == -EEXIST) {
4936		exists = find_extent_buffer(fs_info, start);
4937		if (exists)
4938			goto free_eb;
4939		else
4940			goto again;
4941	}
4942	/* add one reference for the tree */
4943	check_buffer_tree_ref(eb);
4944	set_bit(EXTENT_BUFFER_IN_TREE, &eb->bflags);
4945
4946	/*
4947	 * there is a race where release page may have
4948	 * tried to find this extent buffer in the radix
4949	 * but failed.  It will tell the VM it is safe to
4950	 * reclaim the, and it will clear the page private bit.
4951	 * We must make sure to set the page private bit properly
4952	 * after the extent buffer is in the radix tree so
4953	 * it doesn't get lost
4954	 */
4955	SetPageChecked(eb->pages[0]);
4956	for (i = 1; i < num_pages; i++) {
4957		p = eb->pages[i];
4958		ClearPageChecked(p);
4959		unlock_page(p);
4960	}
4961	unlock_page(eb->pages[0]);
4962	return eb;
4963
4964free_eb:
4965	WARN_ON(!atomic_dec_and_test(&eb->refs));
4966	for (i = 0; i < num_pages; i++) {
4967		if (eb->pages[i])
4968			unlock_page(eb->pages[i]);
4969	}
4970
4971	btrfs_release_extent_buffer(eb);
4972	return exists;
4973}
4974
4975static inline void btrfs_release_extent_buffer_rcu(struct rcu_head *head)
4976{
4977	struct extent_buffer *eb =
4978			container_of(head, struct extent_buffer, rcu_head);
4979
4980	__free_extent_buffer(eb);
4981}
4982
4983/* Expects to have eb->eb_lock already held */
4984static int release_extent_buffer(struct extent_buffer *eb)
 
4985{
 
 
4986	WARN_ON(atomic_read(&eb->refs) == 0);
4987	if (atomic_dec_and_test(&eb->refs)) {
4988		if (test_and_clear_bit(EXTENT_BUFFER_IN_TREE, &eb->bflags)) {
4989			struct btrfs_fs_info *fs_info = eb->fs_info;
4990
4991			spin_unlock(&eb->refs_lock);
4992
4993			spin_lock(&fs_info->buffer_lock);
4994			radix_tree_delete(&fs_info->buffer_radix,
4995					  eb->start >> PAGE_SHIFT);
4996			spin_unlock(&fs_info->buffer_lock);
4997		} else {
4998			spin_unlock(&eb->refs_lock);
4999		}
5000
 
5001		/* Should be safe to release our pages at this point */
5002		btrfs_release_extent_buffer_page(eb);
5003#ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS
5004		if (unlikely(test_bit(EXTENT_BUFFER_DUMMY, &eb->bflags))) {
5005			__free_extent_buffer(eb);
5006			return 1;
5007		}
5008#endif
5009		call_rcu(&eb->rcu_head, btrfs_release_extent_buffer_rcu);
5010		return 1;
5011	}
5012	spin_unlock(&eb->refs_lock);
5013
5014	return 0;
5015}
5016
5017void free_extent_buffer(struct extent_buffer *eb)
5018{
5019	int refs;
5020	int old;
5021	if (!eb)
5022		return;
5023
 
5024	while (1) {
5025		refs = atomic_read(&eb->refs);
5026		if (refs <= 3)
 
5027			break;
5028		old = atomic_cmpxchg(&eb->refs, refs, refs - 1);
5029		if (old == refs)
5030			return;
5031	}
5032
5033	spin_lock(&eb->refs_lock);
5034	if (atomic_read(&eb->refs) == 2 &&
5035	    test_bit(EXTENT_BUFFER_DUMMY, &eb->bflags))
5036		atomic_dec(&eb->refs);
5037
5038	if (atomic_read(&eb->refs) == 2 &&
5039	    test_bit(EXTENT_BUFFER_STALE, &eb->bflags) &&
5040	    !extent_buffer_under_io(eb) &&
5041	    test_and_clear_bit(EXTENT_BUFFER_TREE_REF, &eb->bflags))
5042		atomic_dec(&eb->refs);
5043
5044	/*
5045	 * I know this is terrible, but it's temporary until we stop tracking
5046	 * the uptodate bits and such for the extent buffers.
5047	 */
5048	release_extent_buffer(eb);
5049}
5050
5051void free_extent_buffer_stale(struct extent_buffer *eb)
5052{
5053	if (!eb)
5054		return;
5055
5056	spin_lock(&eb->refs_lock);
5057	set_bit(EXTENT_BUFFER_STALE, &eb->bflags);
5058
5059	if (atomic_read(&eb->refs) == 2 && !extent_buffer_under_io(eb) &&
5060	    test_and_clear_bit(EXTENT_BUFFER_TREE_REF, &eb->bflags))
5061		atomic_dec(&eb->refs);
5062	release_extent_buffer(eb);
5063}
5064
5065void clear_extent_buffer_dirty(struct extent_buffer *eb)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
5066{
5067	unsigned long i;
5068	unsigned long num_pages;
5069	struct page *page;
5070
5071	num_pages = num_extent_pages(eb->start, eb->len);
 
 
 
5072
5073	for (i = 0; i < num_pages; i++) {
5074		page = eb->pages[i];
5075		if (!PageDirty(page))
5076			continue;
5077
5078		lock_page(page);
5079		WARN_ON(!PagePrivate(page));
5080
5081		clear_page_dirty_for_io(page);
5082		spin_lock_irq(&page->mapping->tree_lock);
5083		if (!PageDirty(page)) {
5084			radix_tree_tag_clear(&page->mapping->page_tree,
5085						page_index(page),
5086						PAGECACHE_TAG_DIRTY);
5087		}
5088		spin_unlock_irq(&page->mapping->tree_lock);
5089		ClearPageError(page);
5090		unlock_page(page);
5091	}
5092	WARN_ON(atomic_read(&eb->refs) == 0);
5093}
5094
5095int set_extent_buffer_dirty(struct extent_buffer *eb)
5096{
5097	unsigned long i;
5098	unsigned long num_pages;
5099	int was_dirty = 0;
5100
5101	check_buffer_tree_ref(eb);
5102
5103	was_dirty = test_and_set_bit(EXTENT_BUFFER_DIRTY, &eb->bflags);
5104
5105	num_pages = num_extent_pages(eb->start, eb->len);
5106	WARN_ON(atomic_read(&eb->refs) == 0);
5107	WARN_ON(!test_bit(EXTENT_BUFFER_TREE_REF, &eb->bflags));
5108
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
5109	for (i = 0; i < num_pages; i++)
5110		set_page_dirty(eb->pages[i]);
 
 
5111	return was_dirty;
5112}
5113
5114void clear_extent_buffer_uptodate(struct extent_buffer *eb)
5115{
5116	unsigned long i;
5117	struct page *page;
5118	unsigned long num_pages;
 
5119
5120	clear_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags);
5121	num_pages = num_extent_pages(eb->start, eb->len);
5122	for (i = 0; i < num_pages; i++) {
5123		page = eb->pages[i];
5124		if (page)
 
 
 
 
 
 
 
5125			ClearPageUptodate(page);
 
 
 
5126	}
5127}
5128
5129void set_extent_buffer_uptodate(struct extent_buffer *eb)
5130{
5131	unsigned long i;
5132	struct page *page;
5133	unsigned long num_pages;
 
5134
5135	set_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags);
5136	num_pages = num_extent_pages(eb->start, eb->len);
5137	for (i = 0; i < num_pages; i++) {
5138		page = eb->pages[i];
5139		SetPageUptodate(page);
 
 
 
 
 
 
 
 
 
5140	}
5141}
5142
5143int extent_buffer_uptodate(struct extent_buffer *eb)
 
 
5144{
5145	return test_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
5146}
5147
5148int read_extent_buffer_pages(struct extent_io_tree *tree,
5149			     struct extent_buffer *eb, u64 start, int wait,
5150			     get_extent_t *get_extent, int mirror_num)
5151{
5152	unsigned long i;
5153	unsigned long start_i;
5154	struct page *page;
5155	int err;
5156	int ret = 0;
5157	int locked_pages = 0;
5158	int all_uptodate = 1;
5159	unsigned long num_pages;
5160	unsigned long num_reads = 0;
5161	struct bio *bio = NULL;
5162	unsigned long bio_flags = 0;
 
 
5163
5164	if (test_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags))
5165		return 0;
5166
5167	if (start) {
5168		WARN_ON(start < eb->start);
5169		start_i = (start >> PAGE_SHIFT) -
5170			(eb->start >> PAGE_SHIFT);
5171	} else {
5172		start_i = 0;
5173	}
 
 
 
5174
5175	num_pages = num_extent_pages(eb->start, eb->len);
5176	for (i = start_i; i < num_pages; i++) {
5177		page = eb->pages[i];
5178		if (wait == WAIT_NONE) {
 
 
 
 
 
 
 
5179			if (!trylock_page(page))
5180				goto unlock_exit;
5181		} else {
5182			lock_page(page);
5183		}
5184		locked_pages++;
 
 
 
 
 
 
 
 
5185		if (!PageUptodate(page)) {
5186			num_reads++;
5187			all_uptodate = 0;
5188		}
5189	}
 
5190	if (all_uptodate) {
5191		if (start_i == 0)
5192			set_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags);
5193		goto unlock_exit;
5194	}
5195
5196	clear_bit(EXTENT_BUFFER_READ_ERR, &eb->bflags);
5197	eb->read_mirror = 0;
5198	atomic_set(&eb->io_pages, num_reads);
5199	for (i = start_i; i < num_pages; i++) {
 
 
 
 
 
 
5200		page = eb->pages[i];
 
5201		if (!PageUptodate(page)) {
 
 
 
 
 
 
5202			ClearPageError(page);
5203			err = __extent_read_full_page(tree, page,
5204						      get_extent, &bio,
5205						      mirror_num, &bio_flags,
5206						      READ | REQ_META);
5207			if (err)
 
 
 
 
5208				ret = err;
 
 
 
 
5209		} else {
5210			unlock_page(page);
5211		}
5212	}
5213
5214	if (bio) {
5215		err = submit_one_bio(READ | REQ_META, bio, mirror_num,
5216				     bio_flags);
5217		if (err)
5218			return err;
5219	}
5220
5221	if (ret || wait != WAIT_COMPLETE)
5222		return ret;
5223
5224	for (i = start_i; i < num_pages; i++) {
5225		page = eb->pages[i];
5226		wait_on_page_locked(page);
5227		if (!PageUptodate(page))
5228			ret = -EIO;
5229	}
5230
5231	return ret;
5232
5233unlock_exit:
5234	i = start_i;
5235	while (locked_pages > 0) {
5236		page = eb->pages[i];
5237		i++;
5238		unlock_page(page);
5239		locked_pages--;
5240	}
5241	return ret;
5242}
5243
5244void read_extent_buffer(struct extent_buffer *eb, void *dstv,
5245			unsigned long start,
5246			unsigned long len)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
5247{
5248	size_t cur;
5249	size_t offset;
5250	struct page *page;
5251	char *kaddr;
5252	char *dst = (char *)dstv;
5253	size_t start_offset = eb->start & ((u64)PAGE_SIZE - 1);
5254	unsigned long i = (start_offset + start) >> PAGE_SHIFT;
5255
5256	WARN_ON(start > eb->len);
5257	WARN_ON(start + len > eb->start + eb->len);
5258
5259	offset = (start_offset + start) & (PAGE_SIZE - 1);
5260
5261	while (len > 0) {
5262		page = eb->pages[i];
5263
5264		cur = min(len, (PAGE_SIZE - offset));
5265		kaddr = page_address(page);
5266		memcpy(dst, kaddr + offset, cur);
5267
5268		dst += cur;
5269		len -= cur;
5270		offset = 0;
5271		i++;
5272	}
5273}
5274
5275int read_extent_buffer_to_user(struct extent_buffer *eb, void __user *dstv,
5276			unsigned long start,
5277			unsigned long len)
5278{
5279	size_t cur;
5280	size_t offset;
5281	struct page *page;
5282	char *kaddr;
5283	char __user *dst = (char __user *)dstv;
5284	size_t start_offset = eb->start & ((u64)PAGE_SIZE - 1);
5285	unsigned long i = (start_offset + start) >> PAGE_SHIFT;
5286	int ret = 0;
5287
5288	WARN_ON(start > eb->len);
5289	WARN_ON(start + len > eb->start + eb->len);
5290
5291	offset = (start_offset + start) & (PAGE_SIZE - 1);
5292
5293	while (len > 0) {
5294		page = eb->pages[i];
5295
5296		cur = min(len, (PAGE_SIZE - offset));
5297		kaddr = page_address(page);
5298		if (copy_to_user(dst, kaddr + offset, cur)) {
5299			ret = -EFAULT;
5300			break;
5301		}
5302
5303		dst += cur;
5304		len -= cur;
5305		offset = 0;
5306		i++;
5307	}
5308
5309	return ret;
5310}
5311
5312int map_private_extent_buffer(struct extent_buffer *eb, unsigned long start,
5313			       unsigned long min_len, char **map,
5314			       unsigned long *map_start,
5315			       unsigned long *map_len)
5316{
5317	size_t offset = start & (PAGE_SIZE - 1);
5318	char *kaddr;
5319	struct page *p;
5320	size_t start_offset = eb->start & ((u64)PAGE_SIZE - 1);
5321	unsigned long i = (start_offset + start) >> PAGE_SHIFT;
5322	unsigned long end_i = (start_offset + start + min_len - 1) >>
5323		PAGE_SHIFT;
5324
5325	if (i != end_i)
5326		return -EINVAL;
5327
5328	if (i == 0) {
5329		offset = start_offset;
5330		*map_start = 0;
5331	} else {
5332		offset = 0;
5333		*map_start = ((u64)i << PAGE_SHIFT) - start_offset;
5334	}
5335
5336	if (start + min_len > eb->len) {
5337		WARN(1, KERN_ERR "btrfs bad mapping eb start %llu len %lu, "
5338		       "wanted %lu %lu\n",
5339		       eb->start, eb->len, start, min_len);
5340		return -EINVAL;
5341	}
5342
5343	p = eb->pages[i];
5344	kaddr = page_address(p);
5345	*map = kaddr + offset;
5346	*map_len = PAGE_SIZE - offset;
5347	return 0;
5348}
5349
5350int memcmp_extent_buffer(struct extent_buffer *eb, const void *ptrv,
5351			  unsigned long start,
5352			  unsigned long len)
5353{
5354	size_t cur;
5355	size_t offset;
5356	struct page *page;
5357	char *kaddr;
5358	char *ptr = (char *)ptrv;
5359	size_t start_offset = eb->start & ((u64)PAGE_SIZE - 1);
5360	unsigned long i = (start_offset + start) >> PAGE_SHIFT;
5361	int ret = 0;
5362
5363	WARN_ON(start > eb->len);
5364	WARN_ON(start + len > eb->start + eb->len);
5365
5366	offset = (start_offset + start) & (PAGE_SIZE - 1);
5367
5368	while (len > 0) {
5369		page = eb->pages[i];
5370
5371		cur = min(len, (PAGE_SIZE - offset));
5372
5373		kaddr = page_address(page);
5374		ret = memcmp(ptr, kaddr + offset, cur);
5375		if (ret)
5376			break;
5377
5378		ptr += cur;
5379		len -= cur;
5380		offset = 0;
5381		i++;
5382	}
5383	return ret;
5384}
5385
5386void write_extent_buffer(struct extent_buffer *eb, const void *srcv,
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
5387			 unsigned long start, unsigned long len)
5388{
5389	size_t cur;
5390	size_t offset;
5391	struct page *page;
5392	char *kaddr;
5393	char *src = (char *)srcv;
5394	size_t start_offset = eb->start & ((u64)PAGE_SIZE - 1);
5395	unsigned long i = (start_offset + start) >> PAGE_SHIFT;
 
5396
5397	WARN_ON(start > eb->len);
5398	WARN_ON(start + len > eb->start + eb->len);
5399
5400	offset = (start_offset + start) & (PAGE_SIZE - 1);
5401
5402	while (len > 0) {
5403		page = eb->pages[i];
5404		WARN_ON(!PageUptodate(page));
5405
5406		cur = min(len, PAGE_SIZE - offset);
5407		kaddr = page_address(page);
5408		memcpy(kaddr + offset, src, cur);
5409
5410		src += cur;
5411		len -= cur;
5412		offset = 0;
5413		i++;
5414	}
5415}
5416
5417void memset_extent_buffer(struct extent_buffer *eb, char c,
5418			  unsigned long start, unsigned long len)
5419{
5420	size_t cur;
5421	size_t offset;
5422	struct page *page;
5423	char *kaddr;
5424	size_t start_offset = eb->start & ((u64)PAGE_SIZE - 1);
5425	unsigned long i = (start_offset + start) >> PAGE_SHIFT;
5426
5427	WARN_ON(start > eb->len);
5428	WARN_ON(start + len > eb->start + eb->len);
5429
5430	offset = (start_offset + start) & (PAGE_SIZE - 1);
5431
5432	while (len > 0) {
5433		page = eb->pages[i];
5434		WARN_ON(!PageUptodate(page));
5435
5436		cur = min(len, PAGE_SIZE - offset);
5437		kaddr = page_address(page);
5438		memset(kaddr + offset, c, cur);
5439
5440		len -= cur;
5441		offset = 0;
5442		i++;
5443	}
5444}
5445
5446void copy_extent_buffer(struct extent_buffer *dst, struct extent_buffer *src,
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
5447			unsigned long dst_offset, unsigned long src_offset,
5448			unsigned long len)
5449{
5450	u64 dst_len = dst->len;
5451	size_t cur;
5452	size_t offset;
5453	struct page *page;
5454	char *kaddr;
5455	size_t start_offset = dst->start & ((u64)PAGE_SIZE - 1);
5456	unsigned long i = (start_offset + dst_offset) >> PAGE_SHIFT;
 
 
 
5457
5458	WARN_ON(src->len != dst_len);
5459
5460	offset = (start_offset + dst_offset) &
5461		(PAGE_SIZE - 1);
5462
5463	while (len > 0) {
5464		page = dst->pages[i];
5465		WARN_ON(!PageUptodate(page));
5466
5467		cur = min(len, (unsigned long)(PAGE_SIZE - offset));
5468
5469		kaddr = page_address(page);
5470		read_extent_buffer(src, kaddr + offset, src_offset, cur);
5471
5472		src_offset += cur;
5473		len -= cur;
5474		offset = 0;
5475		i++;
5476	}
5477}
5478
5479/*
5480 * The extent buffer bitmap operations are done with byte granularity because
5481 * bitmap items are not guaranteed to be aligned to a word and therefore a
5482 * single word in a bitmap may straddle two pages in the extent buffer.
5483 */
5484#define BIT_BYTE(nr) ((nr) / BITS_PER_BYTE)
5485#define BYTE_MASK ((1 << BITS_PER_BYTE) - 1)
5486#define BITMAP_FIRST_BYTE_MASK(start) \
5487	((BYTE_MASK << ((start) & (BITS_PER_BYTE - 1))) & BYTE_MASK)
5488#define BITMAP_LAST_BYTE_MASK(nbits) \
5489	(BYTE_MASK >> (-(nbits) & (BITS_PER_BYTE - 1)))
5490
5491/*
5492 * eb_bitmap_offset() - calculate the page and offset of the byte containing the
5493 * given bit number
5494 * @eb: the extent buffer
5495 * @start: offset of the bitmap item in the extent buffer
5496 * @nr: bit number
5497 * @page_index: return index of the page in the extent buffer that contains the
5498 * given bit number
5499 * @page_offset: return offset into the page given by page_index
5500 *
5501 * This helper hides the ugliness of finding the byte in an extent buffer which
5502 * contains a given bit.
5503 */
5504static inline void eb_bitmap_offset(struct extent_buffer *eb,
5505				    unsigned long start, unsigned long nr,
5506				    unsigned long *page_index,
5507				    size_t *page_offset)
5508{
5509	size_t start_offset = eb->start & ((u64)PAGE_SIZE - 1);
5510	size_t byte_offset = BIT_BYTE(nr);
5511	size_t offset;
5512
5513	/*
5514	 * The byte we want is the offset of the extent buffer + the offset of
5515	 * the bitmap item in the extent buffer + the offset of the byte in the
5516	 * bitmap item.
5517	 */
5518	offset = start_offset + start + byte_offset;
5519
5520	*page_index = offset >> PAGE_SHIFT;
5521	*page_offset = offset & (PAGE_SIZE - 1);
5522}
5523
5524/**
5525 * extent_buffer_test_bit - determine whether a bit in a bitmap item is set
5526 * @eb: the extent buffer
5527 * @start: offset of the bitmap item in the extent buffer
5528 * @nr: bit number to test
 
5529 */
5530int extent_buffer_test_bit(struct extent_buffer *eb, unsigned long start,
5531			   unsigned long nr)
5532{
5533	char *kaddr;
5534	struct page *page;
5535	unsigned long i;
5536	size_t offset;
5537
5538	eb_bitmap_offset(eb, start, nr, &i, &offset);
5539	page = eb->pages[i];
5540	WARN_ON(!PageUptodate(page));
5541	kaddr = page_address(page);
5542	return 1U & (kaddr[offset] >> (nr & (BITS_PER_BYTE - 1)));
5543}
5544
5545/**
5546 * extent_buffer_bitmap_set - set an area of a bitmap
5547 * @eb: the extent buffer
5548 * @start: offset of the bitmap item in the extent buffer
5549 * @pos: bit number of the first bit
5550 * @len: number of bits to set
 
5551 */
5552void extent_buffer_bitmap_set(struct extent_buffer *eb, unsigned long start,
5553			      unsigned long pos, unsigned long len)
5554{
5555	char *kaddr;
5556	struct page *page;
5557	unsigned long i;
5558	size_t offset;
5559	const unsigned int size = pos + len;
5560	int bits_to_set = BITS_PER_BYTE - (pos % BITS_PER_BYTE);
5561	unsigned int mask_to_set = BITMAP_FIRST_BYTE_MASK(pos);
5562
5563	eb_bitmap_offset(eb, start, pos, &i, &offset);
5564	page = eb->pages[i];
5565	WARN_ON(!PageUptodate(page));
5566	kaddr = page_address(page);
5567
5568	while (len >= bits_to_set) {
5569		kaddr[offset] |= mask_to_set;
5570		len -= bits_to_set;
5571		bits_to_set = BITS_PER_BYTE;
5572		mask_to_set = ~0U;
5573		if (++offset >= PAGE_SIZE && len > 0) {
5574			offset = 0;
5575			page = eb->pages[++i];
5576			WARN_ON(!PageUptodate(page));
5577			kaddr = page_address(page);
5578		}
5579	}
5580	if (len) {
5581		mask_to_set &= BITMAP_LAST_BYTE_MASK(size);
5582		kaddr[offset] |= mask_to_set;
5583	}
5584}
5585
5586
5587/**
5588 * extent_buffer_bitmap_clear - clear an area of a bitmap
5589 * @eb: the extent buffer
5590 * @start: offset of the bitmap item in the extent buffer
5591 * @pos: bit number of the first bit
5592 * @len: number of bits to clear
5593 */
5594void extent_buffer_bitmap_clear(struct extent_buffer *eb, unsigned long start,
5595				unsigned long pos, unsigned long len)
 
 
5596{
5597	char *kaddr;
5598	struct page *page;
5599	unsigned long i;
5600	size_t offset;
5601	const unsigned int size = pos + len;
5602	int bits_to_clear = BITS_PER_BYTE - (pos % BITS_PER_BYTE);
5603	unsigned int mask_to_clear = BITMAP_FIRST_BYTE_MASK(pos);
5604
5605	eb_bitmap_offset(eb, start, pos, &i, &offset);
5606	page = eb->pages[i];
5607	WARN_ON(!PageUptodate(page));
5608	kaddr = page_address(page);
5609
5610	while (len >= bits_to_clear) {
5611		kaddr[offset] &= ~mask_to_clear;
5612		len -= bits_to_clear;
5613		bits_to_clear = BITS_PER_BYTE;
5614		mask_to_clear = ~0U;
5615		if (++offset >= PAGE_SIZE && len > 0) {
5616			offset = 0;
5617			page = eb->pages[++i];
5618			WARN_ON(!PageUptodate(page));
5619			kaddr = page_address(page);
5620		}
5621	}
5622	if (len) {
5623		mask_to_clear &= BITMAP_LAST_BYTE_MASK(size);
5624		kaddr[offset] &= ~mask_to_clear;
5625	}
5626}
5627
5628static inline bool areas_overlap(unsigned long src, unsigned long dst, unsigned long len)
5629{
5630	unsigned long distance = (src > dst) ? src - dst : dst - src;
5631	return distance < len;
5632}
5633
5634static void copy_pages(struct page *dst_page, struct page *src_page,
5635		       unsigned long dst_off, unsigned long src_off,
5636		       unsigned long len)
5637{
5638	char *dst_kaddr = page_address(dst_page);
5639	char *src_kaddr;
5640	int must_memmove = 0;
5641
5642	if (dst_page != src_page) {
5643		src_kaddr = page_address(src_page);
5644	} else {
5645		src_kaddr = dst_kaddr;
5646		if (areas_overlap(src_off, dst_off, len))
5647			must_memmove = 1;
5648	}
5649
5650	if (must_memmove)
5651		memmove(dst_kaddr + dst_off, src_kaddr + src_off, len);
5652	else
5653		memcpy(dst_kaddr + dst_off, src_kaddr + src_off, len);
5654}
5655
5656void memcpy_extent_buffer(struct extent_buffer *dst, unsigned long dst_offset,
5657			   unsigned long src_offset, unsigned long len)
 
5658{
5659	size_t cur;
5660	size_t dst_off_in_page;
5661	size_t src_off_in_page;
5662	size_t start_offset = dst->start & ((u64)PAGE_SIZE - 1);
5663	unsigned long dst_i;
5664	unsigned long src_i;
5665
5666	if (src_offset + len > dst->len) {
5667		btrfs_err(dst->fs_info,
5668			"memmove bogus src_offset %lu move "
5669		       "len %lu dst len %lu", src_offset, len, dst->len);
5670		BUG_ON(1);
5671	}
5672	if (dst_offset + len > dst->len) {
5673		btrfs_err(dst->fs_info,
5674			"memmove bogus dst_offset %lu move "
5675		       "len %lu dst len %lu", dst_offset, len, dst->len);
5676		BUG_ON(1);
5677	}
5678
5679	while (len > 0) {
5680		dst_off_in_page = (start_offset + dst_offset) &
5681			(PAGE_SIZE - 1);
5682		src_off_in_page = (start_offset + src_offset) &
5683			(PAGE_SIZE - 1);
5684
5685		dst_i = (start_offset + dst_offset) >> PAGE_SHIFT;
5686		src_i = (start_offset + src_offset) >> PAGE_SHIFT;
5687
5688		cur = min(len, (unsigned long)(PAGE_SIZE -
5689					       src_off_in_page));
5690		cur = min_t(unsigned long, cur,
5691			(unsigned long)(PAGE_SIZE - dst_off_in_page));
5692
5693		copy_pages(dst->pages[dst_i], dst->pages[src_i],
5694			   dst_off_in_page, src_off_in_page, cur);
5695
5696		src_offset += cur;
5697		dst_offset += cur;
5698		len -= cur;
5699	}
5700}
5701
5702void memmove_extent_buffer(struct extent_buffer *dst, unsigned long dst_offset,
5703			   unsigned long src_offset, unsigned long len)
 
5704{
5705	size_t cur;
5706	size_t dst_off_in_page;
5707	size_t src_off_in_page;
5708	unsigned long dst_end = dst_offset + len - 1;
5709	unsigned long src_end = src_offset + len - 1;
5710	size_t start_offset = dst->start & ((u64)PAGE_SIZE - 1);
5711	unsigned long dst_i;
5712	unsigned long src_i;
5713
5714	if (src_offset + len > dst->len) {
5715		btrfs_err(dst->fs_info, "memmove bogus src_offset %lu move "
5716		       "len %lu len %lu", src_offset, len, dst->len);
5717		BUG_ON(1);
5718	}
5719	if (dst_offset + len > dst->len) {
5720		btrfs_err(dst->fs_info, "memmove bogus dst_offset %lu move "
5721		       "len %lu len %lu", dst_offset, len, dst->len);
5722		BUG_ON(1);
5723	}
5724	if (dst_offset < src_offset) {
5725		memcpy_extent_buffer(dst, dst_offset, src_offset, len);
5726		return;
5727	}
5728	while (len > 0) {
5729		dst_i = (start_offset + dst_end) >> PAGE_SHIFT;
5730		src_i = (start_offset + src_end) >> PAGE_SHIFT;
5731
5732		dst_off_in_page = (start_offset + dst_end) &
5733			(PAGE_SIZE - 1);
5734		src_off_in_page = (start_offset + src_end) &
5735			(PAGE_SIZE - 1);
5736
5737		cur = min_t(unsigned long, len, src_off_in_page + 1);
5738		cur = min(cur, dst_off_in_page + 1);
5739		copy_pages(dst->pages[dst_i], dst->pages[src_i],
5740			   dst_off_in_page - cur + 1,
5741			   src_off_in_page - cur + 1, cur);
5742
5743		dst_end -= cur;
5744		src_end -= cur;
5745		len -= cur;
5746	}
5747}
5748
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
5749int try_release_extent_buffer(struct page *page)
5750{
5751	struct extent_buffer *eb;
5752
 
 
 
5753	/*
5754	 * We need to make sure noboody is attaching this page to an eb right
5755	 * now.
5756	 */
5757	spin_lock(&page->mapping->private_lock);
5758	if (!PagePrivate(page)) {
5759		spin_unlock(&page->mapping->private_lock);
5760		return 1;
5761	}
5762
5763	eb = (struct extent_buffer *)page->private;
5764	BUG_ON(!eb);
5765
5766	/*
5767	 * This is a little awful but should be ok, we need to make sure that
5768	 * the eb doesn't disappear out from under us while we're looking at
5769	 * this page.
5770	 */
5771	spin_lock(&eb->refs_lock);
5772	if (atomic_read(&eb->refs) != 1 || extent_buffer_under_io(eb)) {
5773		spin_unlock(&eb->refs_lock);
5774		spin_unlock(&page->mapping->private_lock);
5775		return 0;
5776	}
5777	spin_unlock(&page->mapping->private_lock);
5778
5779	/*
5780	 * If tree ref isn't set then we know the ref on this eb is a real ref,
5781	 * so just return, this page will likely be freed soon anyway.
5782	 */
5783	if (!test_and_clear_bit(EXTENT_BUFFER_TREE_REF, &eb->bflags)) {
5784		spin_unlock(&eb->refs_lock);
5785		return 0;
5786	}
5787
5788	return release_extent_buffer(eb);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
5789}