Linux Audio

Check our new training course

Loading...
v6.13.7
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 * Copyright (C) 2007 Oracle.  All rights reserved.
   4 */
   5
   6#include <linux/fs.h>
   7#include <linux/blkdev.h>
   8#include <linux/radix-tree.h>
   9#include <linux/writeback.h>
  10#include <linux/workqueue.h>
  11#include <linux/kthread.h>
  12#include <linux/slab.h>
  13#include <linux/migrate.h>
  14#include <linux/ratelimit.h>
  15#include <linux/uuid.h>
  16#include <linux/semaphore.h>
  17#include <linux/error-injection.h>
  18#include <linux/crc32c.h>
  19#include <linux/sched/mm.h>
  20#include <linux/unaligned.h>
  21#include <crypto/hash.h>
  22#include "ctree.h"
  23#include "disk-io.h"
  24#include "transaction.h"
  25#include "btrfs_inode.h"
  26#include "bio.h"
  27#include "print-tree.h"
  28#include "locking.h"
  29#include "tree-log.h"
  30#include "free-space-cache.h"
  31#include "free-space-tree.h"
 
  32#include "dev-replace.h"
  33#include "raid56.h"
  34#include "sysfs.h"
  35#include "qgroup.h"
  36#include "compression.h"
  37#include "tree-checker.h"
  38#include "ref-verify.h"
  39#include "block-group.h"
  40#include "discard.h"
  41#include "space-info.h"
  42#include "zoned.h"
  43#include "subpage.h"
  44#include "fs.h"
  45#include "accessors.h"
  46#include "extent-tree.h"
  47#include "root-tree.h"
  48#include "defrag.h"
  49#include "uuid-tree.h"
  50#include "relocation.h"
  51#include "scrub.h"
  52#include "super.h"
  53
  54#define BTRFS_SUPER_FLAG_SUPP	(BTRFS_HEADER_FLAG_WRITTEN |\
  55				 BTRFS_HEADER_FLAG_RELOC |\
  56				 BTRFS_SUPER_FLAG_ERROR |\
  57				 BTRFS_SUPER_FLAG_SEEDING |\
  58				 BTRFS_SUPER_FLAG_METADUMP |\
  59				 BTRFS_SUPER_FLAG_METADUMP_V2)
  60
  61static int btrfs_cleanup_transaction(struct btrfs_fs_info *fs_info);
  62static void btrfs_error_commit_super(struct btrfs_fs_info *fs_info);
  63
  64static void btrfs_free_csum_hash(struct btrfs_fs_info *fs_info)
  65{
  66	if (fs_info->csum_shash)
  67		crypto_free_shash(fs_info->csum_shash);
  68}
  69
  70/*
  71 * Compute the csum of a btree block and store the result to provided buffer.
  72 */
  73static void csum_tree_block(struct extent_buffer *buf, u8 *result)
  74{
  75	struct btrfs_fs_info *fs_info = buf->fs_info;
  76	int num_pages;
  77	u32 first_page_part;
  78	SHASH_DESC_ON_STACK(shash, fs_info->csum_shash);
  79	char *kaddr;
  80	int i;
  81
  82	shash->tfm = fs_info->csum_shash;
  83	crypto_shash_init(shash);
  84
  85	if (buf->addr) {
  86		/* Pages are contiguous, handle them as a big one. */
  87		kaddr = buf->addr;
  88		first_page_part = fs_info->nodesize;
  89		num_pages = 1;
  90	} else {
  91		kaddr = folio_address(buf->folios[0]);
  92		first_page_part = min_t(u32, PAGE_SIZE, fs_info->nodesize);
  93		num_pages = num_extent_pages(buf);
  94	}
  95
  96	crypto_shash_update(shash, kaddr + BTRFS_CSUM_SIZE,
  97			    first_page_part - BTRFS_CSUM_SIZE);
  98
  99	/*
 100	 * Multiple single-page folios case would reach here.
 101	 *
 102	 * nodesize <= PAGE_SIZE and large folio all handled by above
 103	 * crypto_shash_update() already.
 104	 */
 105	for (i = 1; i < num_pages && INLINE_EXTENT_BUFFER_PAGES > 1; i++) {
 106		kaddr = folio_address(buf->folios[i]);
 107		crypto_shash_update(shash, kaddr, PAGE_SIZE);
 108	}
 109	memset(result, 0, BTRFS_CSUM_SIZE);
 110	crypto_shash_final(shash, result);
 111}
 112
 113/*
 114 * we can't consider a given block up to date unless the transid of the
 115 * block matches the transid in the parent node's pointer.  This is how we
 116 * detect blocks that either didn't get written at all or got written
 117 * in the wrong place.
 118 */
 119int btrfs_buffer_uptodate(struct extent_buffer *eb, u64 parent_transid, int atomic)
 120{
 121	if (!extent_buffer_uptodate(eb))
 122		return 0;
 123
 124	if (!parent_transid || btrfs_header_generation(eb) == parent_transid)
 125		return 1;
 126
 127	if (atomic)
 128		return -EAGAIN;
 129
 130	if (!extent_buffer_uptodate(eb) ||
 131	    btrfs_header_generation(eb) != parent_transid) {
 132		btrfs_err_rl(eb->fs_info,
 133"parent transid verify failed on logical %llu mirror %u wanted %llu found %llu",
 134			eb->start, eb->read_mirror,
 135			parent_transid, btrfs_header_generation(eb));
 136		clear_extent_buffer_uptodate(eb);
 137		return 0;
 138	}
 139	return 1;
 140}
 141
 142static bool btrfs_supported_super_csum(u16 csum_type)
 143{
 144	switch (csum_type) {
 145	case BTRFS_CSUM_TYPE_CRC32:
 146	case BTRFS_CSUM_TYPE_XXHASH:
 147	case BTRFS_CSUM_TYPE_SHA256:
 148	case BTRFS_CSUM_TYPE_BLAKE2:
 149		return true;
 150	default:
 151		return false;
 152	}
 153}
 154
 155/*
 156 * Return 0 if the superblock checksum type matches the checksum value of that
 157 * algorithm. Pass the raw disk superblock data.
 158 */
 159int btrfs_check_super_csum(struct btrfs_fs_info *fs_info,
 160			   const struct btrfs_super_block *disk_sb)
 161{
 162	char result[BTRFS_CSUM_SIZE];
 163	SHASH_DESC_ON_STACK(shash, fs_info->csum_shash);
 164
 165	shash->tfm = fs_info->csum_shash;
 166
 167	/*
 168	 * The super_block structure does not span the whole
 169	 * BTRFS_SUPER_INFO_SIZE range, we expect that the unused space is
 170	 * filled with zeros and is included in the checksum.
 171	 */
 172	crypto_shash_digest(shash, (const u8 *)disk_sb + BTRFS_CSUM_SIZE,
 173			    BTRFS_SUPER_INFO_SIZE - BTRFS_CSUM_SIZE, result);
 174
 175	if (memcmp(disk_sb->csum, result, fs_info->csum_size))
 176		return 1;
 177
 178	return 0;
 179}
 180
 181static int btrfs_repair_eb_io_failure(const struct extent_buffer *eb,
 182				      int mirror_num)
 183{
 184	struct btrfs_fs_info *fs_info = eb->fs_info;
 185	int num_folios = num_extent_folios(eb);
 186	int ret = 0;
 187
 188	if (sb_rdonly(fs_info->sb))
 189		return -EROFS;
 190
 191	for (int i = 0; i < num_folios; i++) {
 192		struct folio *folio = eb->folios[i];
 193		u64 start = max_t(u64, eb->start, folio_pos(folio));
 194		u64 end = min_t(u64, eb->start + eb->len,
 195				folio_pos(folio) + eb->folio_size);
 196		u32 len = end - start;
 197
 198		ret = btrfs_repair_io_failure(fs_info, 0, start, len,
 199					      start, folio, offset_in_folio(folio, start),
 200					      mirror_num);
 201		if (ret)
 202			break;
 203	}
 204
 205	return ret;
 206}
 207
 208/*
 209 * helper to read a given tree block, doing retries as required when
 210 * the checksums don't match and we have alternate mirrors to try.
 211 *
 212 * @check:		expected tree parentness check, see the comments of the
 213 *			structure for details.
 214 */
 215int btrfs_read_extent_buffer(struct extent_buffer *eb,
 216			     const struct btrfs_tree_parent_check *check)
 217{
 218	struct btrfs_fs_info *fs_info = eb->fs_info;
 219	int failed = 0;
 220	int ret;
 221	int num_copies = 0;
 222	int mirror_num = 0;
 223	int failed_mirror = 0;
 224
 225	ASSERT(check);
 226
 227	while (1) {
 228		clear_bit(EXTENT_BUFFER_CORRUPT, &eb->bflags);
 229		ret = read_extent_buffer_pages(eb, WAIT_COMPLETE, mirror_num, check);
 230		if (!ret)
 231			break;
 232
 233		num_copies = btrfs_num_copies(fs_info,
 234					      eb->start, eb->len);
 235		if (num_copies == 1)
 236			break;
 237
 238		if (!failed_mirror) {
 239			failed = 1;
 240			failed_mirror = eb->read_mirror;
 241		}
 242
 243		mirror_num++;
 244		if (mirror_num == failed_mirror)
 245			mirror_num++;
 246
 247		if (mirror_num > num_copies)
 248			break;
 249	}
 250
 251	if (failed && !ret && failed_mirror)
 252		btrfs_repair_eb_io_failure(eb, failed_mirror);
 253
 254	return ret;
 255}
 256
 257/*
 258 * Checksum a dirty tree block before IO.
 259 */
 260blk_status_t btree_csum_one_bio(struct btrfs_bio *bbio)
 261{
 262	struct extent_buffer *eb = bbio->private;
 263	struct btrfs_fs_info *fs_info = eb->fs_info;
 264	u64 found_start = btrfs_header_bytenr(eb);
 265	u64 last_trans;
 266	u8 result[BTRFS_CSUM_SIZE];
 267	int ret;
 268
 269	/* Btree blocks are always contiguous on disk. */
 270	if (WARN_ON_ONCE(bbio->file_offset != eb->start))
 271		return BLK_STS_IOERR;
 272	if (WARN_ON_ONCE(bbio->bio.bi_iter.bi_size != eb->len))
 273		return BLK_STS_IOERR;
 274
 275	/*
 276	 * If an extent_buffer is marked as EXTENT_BUFFER_ZONED_ZEROOUT, don't
 277	 * checksum it but zero-out its content. This is done to preserve
 278	 * ordering of I/O without unnecessarily writing out data.
 279	 */
 280	if (test_bit(EXTENT_BUFFER_ZONED_ZEROOUT, &eb->bflags)) {
 281		memzero_extent_buffer(eb, 0, eb->len);
 282		return BLK_STS_OK;
 283	}
 284
 285	if (WARN_ON_ONCE(found_start != eb->start))
 286		return BLK_STS_IOERR;
 287	if (WARN_ON(!btrfs_folio_test_uptodate(fs_info, eb->folios[0],
 288					       eb->start, eb->len)))
 289		return BLK_STS_IOERR;
 290
 291	ASSERT(memcmp_extent_buffer(eb, fs_info->fs_devices->metadata_uuid,
 292				    offsetof(struct btrfs_header, fsid),
 293				    BTRFS_FSID_SIZE) == 0);
 294	csum_tree_block(eb, result);
 295
 296	if (btrfs_header_level(eb))
 297		ret = btrfs_check_node(eb);
 298	else
 299		ret = btrfs_check_leaf(eb);
 300
 301	if (ret < 0)
 302		goto error;
 303
 304	/*
 305	 * Also check the generation, the eb reached here must be newer than
 306	 * last committed. Or something seriously wrong happened.
 307	 */
 308	last_trans = btrfs_get_last_trans_committed(fs_info);
 309	if (unlikely(btrfs_header_generation(eb) <= last_trans)) {
 310		ret = -EUCLEAN;
 311		btrfs_err(fs_info,
 312			"block=%llu bad generation, have %llu expect > %llu",
 313			  eb->start, btrfs_header_generation(eb), last_trans);
 314		goto error;
 315	}
 316	write_extent_buffer(eb, result, 0, fs_info->csum_size);
 317	return BLK_STS_OK;
 318
 319error:
 320	btrfs_print_tree(eb, 0);
 321	btrfs_err(fs_info, "block=%llu write time tree block corruption detected",
 322		  eb->start);
 323	/*
 324	 * Be noisy if this is an extent buffer from a log tree. We don't abort
 325	 * a transaction in case there's a bad log tree extent buffer, we just
 326	 * fallback to a transaction commit. Still we want to know when there is
 327	 * a bad log tree extent buffer, as that may signal a bug somewhere.
 328	 */
 329	WARN_ON(IS_ENABLED(CONFIG_BTRFS_DEBUG) ||
 330		btrfs_header_owner(eb) == BTRFS_TREE_LOG_OBJECTID);
 331	return errno_to_blk_status(ret);
 332}
 333
 334static bool check_tree_block_fsid(struct extent_buffer *eb)
 335{
 336	struct btrfs_fs_info *fs_info = eb->fs_info;
 337	struct btrfs_fs_devices *fs_devices = fs_info->fs_devices, *seed_devs;
 338	u8 fsid[BTRFS_FSID_SIZE];
 339
 340	read_extent_buffer(eb, fsid, offsetof(struct btrfs_header, fsid),
 341			   BTRFS_FSID_SIZE);
 342
 343	/*
 344	 * alloc_fsid_devices() copies the fsid into fs_devices::metadata_uuid.
 345	 * This is then overwritten by metadata_uuid if it is present in the
 346	 * device_list_add(). The same true for a seed device as well. So use of
 347	 * fs_devices::metadata_uuid is appropriate here.
 348	 */
 349	if (memcmp(fsid, fs_info->fs_devices->metadata_uuid, BTRFS_FSID_SIZE) == 0)
 350		return false;
 351
 352	list_for_each_entry(seed_devs, &fs_devices->seed_list, seed_list)
 353		if (!memcmp(fsid, seed_devs->fsid, BTRFS_FSID_SIZE))
 354			return false;
 355
 356	return true;
 357}
 358
 359/* Do basic extent buffer checks at read time */
 360int btrfs_validate_extent_buffer(struct extent_buffer *eb,
 361				 const struct btrfs_tree_parent_check *check)
 362{
 363	struct btrfs_fs_info *fs_info = eb->fs_info;
 364	u64 found_start;
 365	const u32 csum_size = fs_info->csum_size;
 366	u8 found_level;
 367	u8 result[BTRFS_CSUM_SIZE];
 368	const u8 *header_csum;
 369	int ret = 0;
 370	const bool ignore_csum = btrfs_test_opt(fs_info, IGNOREMETACSUMS);
 371
 372	ASSERT(check);
 373
 374	found_start = btrfs_header_bytenr(eb);
 375	if (found_start != eb->start) {
 376		btrfs_err_rl(fs_info,
 377			"bad tree block start, mirror %u want %llu have %llu",
 378			     eb->read_mirror, eb->start, found_start);
 379		ret = -EIO;
 380		goto out;
 381	}
 382	if (check_tree_block_fsid(eb)) {
 383		btrfs_err_rl(fs_info, "bad fsid on logical %llu mirror %u",
 384			     eb->start, eb->read_mirror);
 385		ret = -EIO;
 386		goto out;
 387	}
 388	found_level = btrfs_header_level(eb);
 389	if (found_level >= BTRFS_MAX_LEVEL) {
 390		btrfs_err(fs_info,
 391			"bad tree block level, mirror %u level %d on logical %llu",
 392			eb->read_mirror, btrfs_header_level(eb), eb->start);
 393		ret = -EIO;
 394		goto out;
 395	}
 396
 397	csum_tree_block(eb, result);
 398	header_csum = folio_address(eb->folios[0]) +
 399		get_eb_offset_in_folio(eb, offsetof(struct btrfs_header, csum));
 400
 401	if (memcmp(result, header_csum, csum_size) != 0) {
 402		btrfs_warn_rl(fs_info,
 403"checksum verify failed on logical %llu mirror %u wanted " CSUM_FMT " found " CSUM_FMT " level %d%s",
 404			      eb->start, eb->read_mirror,
 405			      CSUM_FMT_VALUE(csum_size, header_csum),
 406			      CSUM_FMT_VALUE(csum_size, result),
 407			      btrfs_header_level(eb),
 408			      ignore_csum ? ", ignored" : "");
 409		if (!ignore_csum) {
 410			ret = -EUCLEAN;
 411			goto out;
 412		}
 413	}
 414
 415	if (found_level != check->level) {
 416		btrfs_err(fs_info,
 417		"level verify failed on logical %llu mirror %u wanted %u found %u",
 418			  eb->start, eb->read_mirror, check->level, found_level);
 419		ret = -EIO;
 420		goto out;
 421	}
 422	if (unlikely(check->transid &&
 423		     btrfs_header_generation(eb) != check->transid)) {
 424		btrfs_err_rl(eb->fs_info,
 425"parent transid verify failed on logical %llu mirror %u wanted %llu found %llu",
 426				eb->start, eb->read_mirror, check->transid,
 427				btrfs_header_generation(eb));
 428		ret = -EIO;
 429		goto out;
 430	}
 431	if (check->has_first_key) {
 432		const struct btrfs_key *expect_key = &check->first_key;
 433		struct btrfs_key found_key;
 434
 435		if (found_level)
 436			btrfs_node_key_to_cpu(eb, &found_key, 0);
 437		else
 438			btrfs_item_key_to_cpu(eb, &found_key, 0);
 439		if (unlikely(btrfs_comp_cpu_keys(expect_key, &found_key))) {
 440			btrfs_err(fs_info,
 441"tree first key mismatch detected, bytenr=%llu parent_transid=%llu key expected=(%llu,%u,%llu) has=(%llu,%u,%llu)",
 442				  eb->start, check->transid,
 443				  expect_key->objectid,
 444				  expect_key->type, expect_key->offset,
 445				  found_key.objectid, found_key.type,
 446				  found_key.offset);
 447			ret = -EUCLEAN;
 448			goto out;
 449		}
 450	}
 451	if (check->owner_root) {
 452		ret = btrfs_check_eb_owner(eb, check->owner_root);
 453		if (ret < 0)
 454			goto out;
 455	}
 456
 457	/*
 458	 * If this is a leaf block and it is corrupt, set the corrupt bit so
 459	 * that we don't try and read the other copies of this block, just
 460	 * return -EIO.
 461	 */
 462	if (found_level == 0 && btrfs_check_leaf(eb)) {
 463		set_bit(EXTENT_BUFFER_CORRUPT, &eb->bflags);
 464		ret = -EIO;
 465	}
 466
 467	if (found_level > 0 && btrfs_check_node(eb))
 468		ret = -EIO;
 469
 470	if (ret)
 471		btrfs_err(fs_info,
 472		"read time tree block corruption detected on logical %llu mirror %u",
 473			  eb->start, eb->read_mirror);
 474out:
 475	return ret;
 476}
 477
 478#ifdef CONFIG_MIGRATION
 479static int btree_migrate_folio(struct address_space *mapping,
 480		struct folio *dst, struct folio *src, enum migrate_mode mode)
 481{
 482	/*
 483	 * we can't safely write a btree page from here,
 484	 * we haven't done the locking hook
 485	 */
 486	if (folio_test_dirty(src))
 487		return -EAGAIN;
 488	/*
 489	 * Buffers may be managed in a filesystem specific way.
 490	 * We must have no buffers or drop them.
 491	 */
 492	if (folio_get_private(src) &&
 493	    !filemap_release_folio(src, GFP_KERNEL))
 494		return -EAGAIN;
 495	return migrate_folio(mapping, dst, src, mode);
 496}
 497#else
 498#define btree_migrate_folio NULL
 499#endif
 500
 501static int btree_writepages(struct address_space *mapping,
 502			    struct writeback_control *wbc)
 503{
 
 504	int ret;
 505
 506	if (wbc->sync_mode == WB_SYNC_NONE) {
 507		struct btrfs_fs_info *fs_info;
 508
 509		if (wbc->for_kupdate)
 510			return 0;
 511
 512		fs_info = inode_to_fs_info(mapping->host);
 513		/* this is a bit racy, but that's ok */
 514		ret = __percpu_counter_compare(&fs_info->dirty_metadata_bytes,
 515					     BTRFS_DIRTY_METADATA_THRESH,
 516					     fs_info->dirty_metadata_batch);
 517		if (ret < 0)
 518			return 0;
 519	}
 520	return btree_write_cache_pages(mapping, wbc);
 521}
 522
 523static bool btree_release_folio(struct folio *folio, gfp_t gfp_flags)
 524{
 525	if (folio_test_writeback(folio) || folio_test_dirty(folio))
 526		return false;
 527
 528	return try_release_extent_buffer(folio);
 529}
 530
 531static void btree_invalidate_folio(struct folio *folio, size_t offset,
 532				 size_t length)
 533{
 534	struct extent_io_tree *tree;
 535
 536	tree = &folio_to_inode(folio)->io_tree;
 537	extent_invalidate_folio(tree, folio, offset);
 538	btree_release_folio(folio, GFP_NOFS);
 539	if (folio_get_private(folio)) {
 540		btrfs_warn(folio_to_fs_info(folio),
 541			   "folio private not zero on folio %llu",
 542			   (unsigned long long)folio_pos(folio));
 543		folio_detach_private(folio);
 544	}
 545}
 546
 547#ifdef DEBUG
 548static bool btree_dirty_folio(struct address_space *mapping,
 549		struct folio *folio)
 550{
 551	struct btrfs_fs_info *fs_info = inode_to_fs_info(mapping->host);
 552	struct btrfs_subpage_info *spi = fs_info->subpage_info;
 553	struct btrfs_subpage *subpage;
 554	struct extent_buffer *eb;
 555	int cur_bit = 0;
 556	u64 page_start = folio_pos(folio);
 557
 558	if (fs_info->sectorsize == PAGE_SIZE) {
 559		eb = folio_get_private(folio);
 560		BUG_ON(!eb);
 561		BUG_ON(!test_bit(EXTENT_BUFFER_DIRTY, &eb->bflags));
 562		BUG_ON(!atomic_read(&eb->refs));
 563		btrfs_assert_tree_write_locked(eb);
 564		return filemap_dirty_folio(mapping, folio);
 565	}
 566
 567	ASSERT(spi);
 568	subpage = folio_get_private(folio);
 569
 570	for (cur_bit = spi->dirty_offset;
 571	     cur_bit < spi->dirty_offset + spi->bitmap_nr_bits;
 572	     cur_bit++) {
 573		unsigned long flags;
 574		u64 cur;
 575
 576		spin_lock_irqsave(&subpage->lock, flags);
 577		if (!test_bit(cur_bit, subpage->bitmaps)) {
 578			spin_unlock_irqrestore(&subpage->lock, flags);
 579			continue;
 580		}
 581		spin_unlock_irqrestore(&subpage->lock, flags);
 582		cur = page_start + cur_bit * fs_info->sectorsize;
 583
 584		eb = find_extent_buffer(fs_info, cur);
 585		ASSERT(eb);
 586		ASSERT(test_bit(EXTENT_BUFFER_DIRTY, &eb->bflags));
 587		ASSERT(atomic_read(&eb->refs));
 588		btrfs_assert_tree_write_locked(eb);
 589		free_extent_buffer(eb);
 590
 591		cur_bit += (fs_info->nodesize >> fs_info->sectorsize_bits) - 1;
 592	}
 593	return filemap_dirty_folio(mapping, folio);
 594}
 595#else
 596#define btree_dirty_folio filemap_dirty_folio
 597#endif
 598
 599static const struct address_space_operations btree_aops = {
 600	.writepages	= btree_writepages,
 601	.release_folio	= btree_release_folio,
 602	.invalidate_folio = btree_invalidate_folio,
 603	.migrate_folio	= btree_migrate_folio,
 604	.dirty_folio	= btree_dirty_folio,
 605};
 606
 607struct extent_buffer *btrfs_find_create_tree_block(
 608						struct btrfs_fs_info *fs_info,
 609						u64 bytenr, u64 owner_root,
 610						int level)
 611{
 612	if (btrfs_is_testing(fs_info))
 613		return alloc_test_extent_buffer(fs_info, bytenr);
 614	return alloc_extent_buffer(fs_info, bytenr, owner_root, level);
 615}
 616
 617/*
 618 * Read tree block at logical address @bytenr and do variant basic but critical
 619 * verification.
 620 *
 621 * @check:		expected tree parentness check, see comments of the
 622 *			structure for details.
 623 */
 624struct extent_buffer *read_tree_block(struct btrfs_fs_info *fs_info, u64 bytenr,
 625				      struct btrfs_tree_parent_check *check)
 626{
 627	struct extent_buffer *buf = NULL;
 628	int ret;
 629
 630	ASSERT(check);
 631
 632	buf = btrfs_find_create_tree_block(fs_info, bytenr, check->owner_root,
 633					   check->level);
 634	if (IS_ERR(buf))
 635		return buf;
 636
 637	ret = btrfs_read_extent_buffer(buf, check);
 638	if (ret) {
 639		free_extent_buffer_stale(buf);
 640		return ERR_PTR(ret);
 641	}
 
 
 
 
 642	return buf;
 643
 644}
 645
 646static void __setup_root(struct btrfs_root *root, struct btrfs_fs_info *fs_info,
 647			 u64 objectid)
 648{
 649	bool dummy = btrfs_is_testing(fs_info);
 650
 651	memset(&root->root_key, 0, sizeof(root->root_key));
 652	memset(&root->root_item, 0, sizeof(root->root_item));
 653	memset(&root->defrag_progress, 0, sizeof(root->defrag_progress));
 654	root->fs_info = fs_info;
 655	root->root_key.objectid = objectid;
 656	root->node = NULL;
 657	root->commit_root = NULL;
 658	root->state = 0;
 659	RB_CLEAR_NODE(&root->rb_node);
 660
 661	btrfs_set_root_last_trans(root, 0);
 662	root->free_objectid = 0;
 663	root->nr_delalloc_inodes = 0;
 664	root->nr_ordered_extents = 0;
 665	xa_init(&root->inodes);
 666	xa_init(&root->delayed_nodes);
 
 667
 668	btrfs_init_root_block_rsv(root);
 669
 670	INIT_LIST_HEAD(&root->dirty_list);
 671	INIT_LIST_HEAD(&root->root_list);
 672	INIT_LIST_HEAD(&root->delalloc_inodes);
 673	INIT_LIST_HEAD(&root->delalloc_root);
 674	INIT_LIST_HEAD(&root->ordered_extents);
 675	INIT_LIST_HEAD(&root->ordered_root);
 676	INIT_LIST_HEAD(&root->reloc_dirty_list);
 
 677	spin_lock_init(&root->delalloc_lock);
 678	spin_lock_init(&root->ordered_extent_lock);
 679	spin_lock_init(&root->accounting_lock);
 680	spin_lock_init(&root->qgroup_meta_rsv_lock);
 681	mutex_init(&root->objectid_mutex);
 682	mutex_init(&root->log_mutex);
 683	mutex_init(&root->ordered_extent_mutex);
 684	mutex_init(&root->delalloc_mutex);
 685	init_waitqueue_head(&root->qgroup_flush_wait);
 686	init_waitqueue_head(&root->log_writer_wait);
 687	init_waitqueue_head(&root->log_commit_wait[0]);
 688	init_waitqueue_head(&root->log_commit_wait[1]);
 689	INIT_LIST_HEAD(&root->log_ctxs[0]);
 690	INIT_LIST_HEAD(&root->log_ctxs[1]);
 691	atomic_set(&root->log_commit[0], 0);
 692	atomic_set(&root->log_commit[1], 0);
 693	atomic_set(&root->log_writers, 0);
 694	atomic_set(&root->log_batch, 0);
 695	refcount_set(&root->refs, 1);
 696	atomic_set(&root->snapshot_force_cow, 0);
 697	atomic_set(&root->nr_swapfiles, 0);
 698	btrfs_set_root_log_transid(root, 0);
 699	root->log_transid_committed = -1;
 700	btrfs_set_root_last_log_commit(root, 0);
 701	root->anon_dev = 0;
 702	if (!dummy) {
 703		extent_io_tree_init(fs_info, &root->dirty_log_pages,
 704				    IO_TREE_ROOT_DIRTY_LOG_PAGES);
 705		extent_io_tree_init(fs_info, &root->log_csum_range,
 706				    IO_TREE_LOG_CSUM_RANGE);
 707	}
 708
 709	spin_lock_init(&root->root_item_lock);
 710	btrfs_qgroup_init_swapped_blocks(&root->swapped_blocks);
 711#ifdef CONFIG_BTRFS_DEBUG
 712	INIT_LIST_HEAD(&root->leak_list);
 713	spin_lock(&fs_info->fs_roots_radix_lock);
 714	list_add_tail(&root->leak_list, &fs_info->allocated_roots);
 715	spin_unlock(&fs_info->fs_roots_radix_lock);
 716#endif
 717}
 718
 719static struct btrfs_root *btrfs_alloc_root(struct btrfs_fs_info *fs_info,
 720					   u64 objectid, gfp_t flags)
 721{
 722	struct btrfs_root *root = kzalloc(sizeof(*root), flags);
 723	if (root)
 724		__setup_root(root, fs_info, objectid);
 725	return root;
 726}
 727
 728#ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS
 729/* Should only be used by the testing infrastructure */
 730struct btrfs_root *btrfs_alloc_dummy_root(struct btrfs_fs_info *fs_info)
 731{
 732	struct btrfs_root *root;
 733
 734	if (!fs_info)
 735		return ERR_PTR(-EINVAL);
 736
 737	root = btrfs_alloc_root(fs_info, BTRFS_ROOT_TREE_OBJECTID, GFP_KERNEL);
 738	if (!root)
 739		return ERR_PTR(-ENOMEM);
 740
 741	/* We don't use the stripesize in selftest, set it as sectorsize */
 742	root->alloc_bytenr = 0;
 743
 744	return root;
 745}
 746#endif
 747
 748static int global_root_cmp(struct rb_node *a_node, const struct rb_node *b_node)
 749{
 750	const struct btrfs_root *a = rb_entry(a_node, struct btrfs_root, rb_node);
 751	const struct btrfs_root *b = rb_entry(b_node, struct btrfs_root, rb_node);
 752
 753	return btrfs_comp_cpu_keys(&a->root_key, &b->root_key);
 754}
 755
 756static int global_root_key_cmp(const void *k, const struct rb_node *node)
 757{
 758	const struct btrfs_key *key = k;
 759	const struct btrfs_root *root = rb_entry(node, struct btrfs_root, rb_node);
 760
 761	return btrfs_comp_cpu_keys(key, &root->root_key);
 762}
 763
 764int btrfs_global_root_insert(struct btrfs_root *root)
 765{
 766	struct btrfs_fs_info *fs_info = root->fs_info;
 767	struct rb_node *tmp;
 768	int ret = 0;
 769
 770	write_lock(&fs_info->global_root_lock);
 771	tmp = rb_find_add(&root->rb_node, &fs_info->global_root_tree, global_root_cmp);
 772	write_unlock(&fs_info->global_root_lock);
 773
 774	if (tmp) {
 775		ret = -EEXIST;
 776		btrfs_warn(fs_info, "global root %llu %llu already exists",
 777			   btrfs_root_id(root), root->root_key.offset);
 778	}
 779	return ret;
 780}
 781
 782void btrfs_global_root_delete(struct btrfs_root *root)
 783{
 784	struct btrfs_fs_info *fs_info = root->fs_info;
 785
 786	write_lock(&fs_info->global_root_lock);
 787	rb_erase(&root->rb_node, &fs_info->global_root_tree);
 788	write_unlock(&fs_info->global_root_lock);
 789}
 790
 791struct btrfs_root *btrfs_global_root(struct btrfs_fs_info *fs_info,
 792				     struct btrfs_key *key)
 793{
 794	struct rb_node *node;
 795	struct btrfs_root *root = NULL;
 796
 797	read_lock(&fs_info->global_root_lock);
 798	node = rb_find(key, &fs_info->global_root_tree, global_root_key_cmp);
 799	if (node)
 800		root = container_of(node, struct btrfs_root, rb_node);
 801	read_unlock(&fs_info->global_root_lock);
 802
 803	return root;
 804}
 805
 806static u64 btrfs_global_root_id(struct btrfs_fs_info *fs_info, u64 bytenr)
 807{
 808	struct btrfs_block_group *block_group;
 809	u64 ret;
 810
 811	if (!btrfs_fs_incompat(fs_info, EXTENT_TREE_V2))
 812		return 0;
 813
 814	if (bytenr)
 815		block_group = btrfs_lookup_block_group(fs_info, bytenr);
 816	else
 817		block_group = btrfs_lookup_first_block_group(fs_info, bytenr);
 818	ASSERT(block_group);
 819	if (!block_group)
 820		return 0;
 821	ret = block_group->global_root_id;
 822	btrfs_put_block_group(block_group);
 823
 824	return ret;
 825}
 826
 827struct btrfs_root *btrfs_csum_root(struct btrfs_fs_info *fs_info, u64 bytenr)
 828{
 829	struct btrfs_key key = {
 830		.objectid = BTRFS_CSUM_TREE_OBJECTID,
 831		.type = BTRFS_ROOT_ITEM_KEY,
 832		.offset = btrfs_global_root_id(fs_info, bytenr),
 833	};
 834
 835	return btrfs_global_root(fs_info, &key);
 836}
 837
 838struct btrfs_root *btrfs_extent_root(struct btrfs_fs_info *fs_info, u64 bytenr)
 839{
 840	struct btrfs_key key = {
 841		.objectid = BTRFS_EXTENT_TREE_OBJECTID,
 842		.type = BTRFS_ROOT_ITEM_KEY,
 843		.offset = btrfs_global_root_id(fs_info, bytenr),
 844	};
 845
 846	return btrfs_global_root(fs_info, &key);
 847}
 848
 
 
 
 
 
 
 
 849struct btrfs_root *btrfs_create_tree(struct btrfs_trans_handle *trans,
 850				     u64 objectid)
 851{
 852	struct btrfs_fs_info *fs_info = trans->fs_info;
 853	struct extent_buffer *leaf;
 854	struct btrfs_root *tree_root = fs_info->tree_root;
 855	struct btrfs_root *root;
 856	struct btrfs_key key;
 857	unsigned int nofs_flag;
 858	int ret = 0;
 859
 860	/*
 861	 * We're holding a transaction handle, so use a NOFS memory allocation
 862	 * context to avoid deadlock if reclaim happens.
 863	 */
 864	nofs_flag = memalloc_nofs_save();
 865	root = btrfs_alloc_root(fs_info, objectid, GFP_KERNEL);
 866	memalloc_nofs_restore(nofs_flag);
 867	if (!root)
 868		return ERR_PTR(-ENOMEM);
 869
 870	root->root_key.objectid = objectid;
 871	root->root_key.type = BTRFS_ROOT_ITEM_KEY;
 872	root->root_key.offset = 0;
 873
 874	leaf = btrfs_alloc_tree_block(trans, root, 0, objectid, NULL, 0, 0, 0,
 875				      0, BTRFS_NESTING_NORMAL);
 876	if (IS_ERR(leaf)) {
 877		ret = PTR_ERR(leaf);
 878		leaf = NULL;
 879		goto fail;
 880	}
 881
 882	root->node = leaf;
 883	btrfs_mark_buffer_dirty(trans, leaf);
 884
 885	root->commit_root = btrfs_root_node(root);
 886	set_bit(BTRFS_ROOT_TRACK_DIRTY, &root->state);
 887
 888	btrfs_set_root_flags(&root->root_item, 0);
 889	btrfs_set_root_limit(&root->root_item, 0);
 890	btrfs_set_root_bytenr(&root->root_item, leaf->start);
 891	btrfs_set_root_generation(&root->root_item, trans->transid);
 892	btrfs_set_root_level(&root->root_item, 0);
 893	btrfs_set_root_refs(&root->root_item, 1);
 894	btrfs_set_root_used(&root->root_item, leaf->len);
 895	btrfs_set_root_last_snapshot(&root->root_item, 0);
 896	btrfs_set_root_dirid(&root->root_item, 0);
 897	if (is_fstree(objectid))
 898		generate_random_guid(root->root_item.uuid);
 899	else
 900		export_guid(root->root_item.uuid, &guid_null);
 901	btrfs_set_root_drop_level(&root->root_item, 0);
 902
 903	btrfs_tree_unlock(leaf);
 904
 905	key.objectid = objectid;
 906	key.type = BTRFS_ROOT_ITEM_KEY;
 907	key.offset = 0;
 908	ret = btrfs_insert_root(trans, tree_root, &key, &root->root_item);
 909	if (ret)
 910		goto fail;
 911
 912	return root;
 913
 914fail:
 915	btrfs_put_root(root);
 916
 917	return ERR_PTR(ret);
 918}
 919
 920static struct btrfs_root *alloc_log_tree(struct btrfs_fs_info *fs_info)
 
 921{
 922	struct btrfs_root *root;
 923
 924	root = btrfs_alloc_root(fs_info, BTRFS_TREE_LOG_OBJECTID, GFP_NOFS);
 925	if (!root)
 926		return ERR_PTR(-ENOMEM);
 927
 928	root->root_key.objectid = BTRFS_TREE_LOG_OBJECTID;
 929	root->root_key.type = BTRFS_ROOT_ITEM_KEY;
 930	root->root_key.offset = BTRFS_TREE_LOG_OBJECTID;
 931
 932	return root;
 933}
 934
 935int btrfs_alloc_log_tree_node(struct btrfs_trans_handle *trans,
 936			      struct btrfs_root *root)
 937{
 938	struct extent_buffer *leaf;
 939
 940	/*
 941	 * DON'T set SHAREABLE bit for log trees.
 942	 *
 943	 * Log trees are not exposed to user space thus can't be snapshotted,
 944	 * and they go away before a real commit is actually done.
 945	 *
 946	 * They do store pointers to file data extents, and those reference
 947	 * counts still get updated (along with back refs to the log tree).
 948	 */
 949
 950	leaf = btrfs_alloc_tree_block(trans, root, 0, BTRFS_TREE_LOG_OBJECTID,
 951			NULL, 0, 0, 0, 0, BTRFS_NESTING_NORMAL);
 952	if (IS_ERR(leaf))
 953		return PTR_ERR(leaf);
 954
 955	root->node = leaf;
 956
 957	btrfs_mark_buffer_dirty(trans, root->node);
 958	btrfs_tree_unlock(root->node);
 959
 960	return 0;
 961}
 962
 963int btrfs_init_log_root_tree(struct btrfs_trans_handle *trans,
 964			     struct btrfs_fs_info *fs_info)
 965{
 966	struct btrfs_root *log_root;
 967
 968	log_root = alloc_log_tree(fs_info);
 969	if (IS_ERR(log_root))
 970		return PTR_ERR(log_root);
 971
 972	if (!btrfs_is_zoned(fs_info)) {
 973		int ret = btrfs_alloc_log_tree_node(trans, log_root);
 974
 975		if (ret) {
 976			btrfs_put_root(log_root);
 977			return ret;
 978		}
 979	}
 980
 981	WARN_ON(fs_info->log_root_tree);
 982	fs_info->log_root_tree = log_root;
 983	return 0;
 984}
 985
 986int btrfs_add_log_tree(struct btrfs_trans_handle *trans,
 987		       struct btrfs_root *root)
 988{
 989	struct btrfs_fs_info *fs_info = root->fs_info;
 990	struct btrfs_root *log_root;
 991	struct btrfs_inode_item *inode_item;
 992	int ret;
 993
 994	log_root = alloc_log_tree(fs_info);
 995	if (IS_ERR(log_root))
 996		return PTR_ERR(log_root);
 997
 998	ret = btrfs_alloc_log_tree_node(trans, log_root);
 999	if (ret) {
1000		btrfs_put_root(log_root);
1001		return ret;
1002	}
1003
1004	btrfs_set_root_last_trans(log_root, trans->transid);
1005	log_root->root_key.offset = btrfs_root_id(root);
1006
1007	inode_item = &log_root->root_item.inode;
1008	btrfs_set_stack_inode_generation(inode_item, 1);
1009	btrfs_set_stack_inode_size(inode_item, 3);
1010	btrfs_set_stack_inode_nlink(inode_item, 1);
1011	btrfs_set_stack_inode_nbytes(inode_item,
1012				     fs_info->nodesize);
1013	btrfs_set_stack_inode_mode(inode_item, S_IFDIR | 0755);
1014
1015	btrfs_set_root_node(&log_root->root_item, log_root->node);
1016
1017	WARN_ON(root->log_root);
1018	root->log_root = log_root;
1019	btrfs_set_root_log_transid(root, 0);
1020	root->log_transid_committed = -1;
1021	btrfs_set_root_last_log_commit(root, 0);
1022	return 0;
1023}
1024
1025static struct btrfs_root *read_tree_root_path(struct btrfs_root *tree_root,
1026					      struct btrfs_path *path,
1027					      const struct btrfs_key *key)
1028{
1029	struct btrfs_root *root;
1030	struct btrfs_tree_parent_check check = { 0 };
1031	struct btrfs_fs_info *fs_info = tree_root->fs_info;
1032	u64 generation;
1033	int ret;
1034	int level;
1035
1036	root = btrfs_alloc_root(fs_info, key->objectid, GFP_NOFS);
1037	if (!root)
1038		return ERR_PTR(-ENOMEM);
1039
1040	ret = btrfs_find_root(tree_root, key, path,
1041			      &root->root_item, &root->root_key);
1042	if (ret) {
1043		if (ret > 0)
1044			ret = -ENOENT;
1045		goto fail;
1046	}
1047
1048	generation = btrfs_root_generation(&root->root_item);
1049	level = btrfs_root_level(&root->root_item);
1050	check.level = level;
1051	check.transid = generation;
1052	check.owner_root = key->objectid;
1053	root->node = read_tree_block(fs_info, btrfs_root_bytenr(&root->root_item),
1054				     &check);
1055	if (IS_ERR(root->node)) {
1056		ret = PTR_ERR(root->node);
1057		root->node = NULL;
1058		goto fail;
1059	}
1060	if (!btrfs_buffer_uptodate(root->node, generation, 0)) {
1061		ret = -EIO;
1062		goto fail;
1063	}
1064
1065	/*
1066	 * For real fs, and not log/reloc trees, root owner must
1067	 * match its root node owner
1068	 */
1069	if (!btrfs_is_testing(fs_info) &&
1070	    btrfs_root_id(root) != BTRFS_TREE_LOG_OBJECTID &&
1071	    btrfs_root_id(root) != BTRFS_TREE_RELOC_OBJECTID &&
1072	    btrfs_root_id(root) != btrfs_header_owner(root->node)) {
1073		btrfs_crit(fs_info,
1074"root=%llu block=%llu, tree root owner mismatch, have %llu expect %llu",
1075			   btrfs_root_id(root), root->node->start,
1076			   btrfs_header_owner(root->node),
1077			   btrfs_root_id(root));
1078		ret = -EUCLEAN;
1079		goto fail;
1080	}
1081	root->commit_root = btrfs_root_node(root);
1082	return root;
1083fail:
1084	btrfs_put_root(root);
1085	return ERR_PTR(ret);
1086}
1087
1088struct btrfs_root *btrfs_read_tree_root(struct btrfs_root *tree_root,
1089					const struct btrfs_key *key)
1090{
1091	struct btrfs_root *root;
1092	struct btrfs_path *path;
1093
1094	path = btrfs_alloc_path();
1095	if (!path)
1096		return ERR_PTR(-ENOMEM);
1097	root = read_tree_root_path(tree_root, path, key);
1098	btrfs_free_path(path);
1099
1100	return root;
1101}
1102
1103/*
1104 * Initialize subvolume root in-memory structure
1105 *
1106 * @anon_dev:	anonymous device to attach to the root, if zero, allocate new
1107 */
1108static int btrfs_init_fs_root(struct btrfs_root *root, dev_t anon_dev)
1109{
1110	int ret;
1111
1112	btrfs_drew_lock_init(&root->snapshot_lock);
1113
1114	if (btrfs_root_id(root) != BTRFS_TREE_LOG_OBJECTID &&
1115	    !btrfs_is_data_reloc_root(root) &&
1116	    is_fstree(btrfs_root_id(root))) {
1117		set_bit(BTRFS_ROOT_SHAREABLE, &root->state);
1118		btrfs_check_and_init_root_item(&root->root_item);
1119	}
1120
1121	/*
1122	 * Don't assign anonymous block device to roots that are not exposed to
1123	 * userspace, the id pool is limited to 1M
1124	 */
1125	if (is_fstree(btrfs_root_id(root)) &&
1126	    btrfs_root_refs(&root->root_item) > 0) {
1127		if (!anon_dev) {
1128			ret = get_anon_bdev(&root->anon_dev);
1129			if (ret)
1130				goto fail;
1131		} else {
1132			root->anon_dev = anon_dev;
1133		}
1134	}
1135
1136	mutex_lock(&root->objectid_mutex);
1137	ret = btrfs_init_root_free_objectid(root);
1138	if (ret) {
1139		mutex_unlock(&root->objectid_mutex);
1140		goto fail;
1141	}
1142
1143	ASSERT(root->free_objectid <= BTRFS_LAST_FREE_OBJECTID);
1144
1145	mutex_unlock(&root->objectid_mutex);
1146
1147	return 0;
1148fail:
1149	/* The caller is responsible to call btrfs_free_fs_root */
1150	return ret;
1151}
1152
1153static struct btrfs_root *btrfs_lookup_fs_root(struct btrfs_fs_info *fs_info,
1154					       u64 root_id)
1155{
1156	struct btrfs_root *root;
1157
1158	spin_lock(&fs_info->fs_roots_radix_lock);
1159	root = radix_tree_lookup(&fs_info->fs_roots_radix,
1160				 (unsigned long)root_id);
1161	root = btrfs_grab_root(root);
1162	spin_unlock(&fs_info->fs_roots_radix_lock);
1163	return root;
1164}
1165
1166static struct btrfs_root *btrfs_get_global_root(struct btrfs_fs_info *fs_info,
1167						u64 objectid)
1168{
1169	struct btrfs_key key = {
1170		.objectid = objectid,
1171		.type = BTRFS_ROOT_ITEM_KEY,
1172		.offset = 0,
1173	};
1174
1175	switch (objectid) {
1176	case BTRFS_ROOT_TREE_OBJECTID:
1177		return btrfs_grab_root(fs_info->tree_root);
1178	case BTRFS_EXTENT_TREE_OBJECTID:
1179		return btrfs_grab_root(btrfs_global_root(fs_info, &key));
1180	case BTRFS_CHUNK_TREE_OBJECTID:
1181		return btrfs_grab_root(fs_info->chunk_root);
1182	case BTRFS_DEV_TREE_OBJECTID:
1183		return btrfs_grab_root(fs_info->dev_root);
1184	case BTRFS_CSUM_TREE_OBJECTID:
1185		return btrfs_grab_root(btrfs_global_root(fs_info, &key));
1186	case BTRFS_QUOTA_TREE_OBJECTID:
1187		return btrfs_grab_root(fs_info->quota_root);
1188	case BTRFS_UUID_TREE_OBJECTID:
1189		return btrfs_grab_root(fs_info->uuid_root);
1190	case BTRFS_BLOCK_GROUP_TREE_OBJECTID:
1191		return btrfs_grab_root(fs_info->block_group_root);
1192	case BTRFS_FREE_SPACE_TREE_OBJECTID:
1193		return btrfs_grab_root(btrfs_global_root(fs_info, &key));
1194	case BTRFS_RAID_STRIPE_TREE_OBJECTID:
1195		return btrfs_grab_root(fs_info->stripe_root);
1196	default:
1197		return NULL;
1198	}
1199}
1200
1201int btrfs_insert_fs_root(struct btrfs_fs_info *fs_info,
1202			 struct btrfs_root *root)
1203{
1204	int ret;
1205
1206	ret = radix_tree_preload(GFP_NOFS);
1207	if (ret)
1208		return ret;
1209
1210	spin_lock(&fs_info->fs_roots_radix_lock);
1211	ret = radix_tree_insert(&fs_info->fs_roots_radix,
1212				(unsigned long)btrfs_root_id(root),
1213				root);
1214	if (ret == 0) {
1215		btrfs_grab_root(root);
1216		set_bit(BTRFS_ROOT_IN_RADIX, &root->state);
1217	}
1218	spin_unlock(&fs_info->fs_roots_radix_lock);
1219	radix_tree_preload_end();
1220
1221	return ret;
1222}
1223
1224void btrfs_check_leaked_roots(const struct btrfs_fs_info *fs_info)
1225{
1226#ifdef CONFIG_BTRFS_DEBUG
1227	struct btrfs_root *root;
1228
1229	while (!list_empty(&fs_info->allocated_roots)) {
1230		char buf[BTRFS_ROOT_NAME_BUF_LEN];
1231
1232		root = list_first_entry(&fs_info->allocated_roots,
1233					struct btrfs_root, leak_list);
1234		btrfs_err(fs_info, "leaked root %s refcount %d",
1235			  btrfs_root_name(&root->root_key, buf),
1236			  refcount_read(&root->refs));
1237		WARN_ON_ONCE(1);
1238		while (refcount_read(&root->refs) > 1)
1239			btrfs_put_root(root);
1240		btrfs_put_root(root);
1241	}
1242#endif
1243}
1244
1245static void free_global_roots(struct btrfs_fs_info *fs_info)
1246{
1247	struct btrfs_root *root;
1248	struct rb_node *node;
1249
1250	while ((node = rb_first_postorder(&fs_info->global_root_tree)) != NULL) {
1251		root = rb_entry(node, struct btrfs_root, rb_node);
1252		rb_erase(&root->rb_node, &fs_info->global_root_tree);
1253		btrfs_put_root(root);
1254	}
1255}
1256
1257void btrfs_free_fs_info(struct btrfs_fs_info *fs_info)
1258{
1259	struct percpu_counter *em_counter = &fs_info->evictable_extent_maps;
1260
1261	percpu_counter_destroy(&fs_info->dirty_metadata_bytes);
1262	percpu_counter_destroy(&fs_info->delalloc_bytes);
1263	percpu_counter_destroy(&fs_info->ordered_bytes);
1264	if (percpu_counter_initialized(em_counter))
1265		ASSERT(percpu_counter_sum_positive(em_counter) == 0);
1266	percpu_counter_destroy(em_counter);
1267	percpu_counter_destroy(&fs_info->dev_replace.bio_counter);
1268	btrfs_free_csum_hash(fs_info);
1269	btrfs_free_stripe_hash_table(fs_info);
1270	btrfs_free_ref_cache(fs_info);
1271	kfree(fs_info->balance_ctl);
1272	kfree(fs_info->delayed_root);
1273	free_global_roots(fs_info);
1274	btrfs_put_root(fs_info->tree_root);
1275	btrfs_put_root(fs_info->chunk_root);
1276	btrfs_put_root(fs_info->dev_root);
1277	btrfs_put_root(fs_info->quota_root);
1278	btrfs_put_root(fs_info->uuid_root);
1279	btrfs_put_root(fs_info->fs_root);
1280	btrfs_put_root(fs_info->data_reloc_root);
1281	btrfs_put_root(fs_info->block_group_root);
1282	btrfs_put_root(fs_info->stripe_root);
1283	btrfs_check_leaked_roots(fs_info);
1284	btrfs_extent_buffer_leak_debug_check(fs_info);
1285	kfree(fs_info->super_copy);
1286	kfree(fs_info->super_for_commit);
 
1287	kvfree(fs_info);
1288}
1289
1290
1291/*
1292 * Get an in-memory reference of a root structure.
1293 *
1294 * For essential trees like root/extent tree, we grab it from fs_info directly.
1295 * For subvolume trees, we check the cached filesystem roots first. If not
1296 * found, then read it from disk and add it to cached fs roots.
1297 *
1298 * Caller should release the root by calling btrfs_put_root() after the usage.
1299 *
1300 * NOTE: Reloc and log trees can't be read by this function as they share the
1301 *	 same root objectid.
1302 *
1303 * @objectid:	root id
1304 * @anon_dev:	preallocated anonymous block device number for new roots,
1305 *		pass NULL for a new allocation.
1306 * @check_ref:	whether to check root item references, If true, return -ENOENT
1307 *		for orphan roots
1308 */
1309static struct btrfs_root *btrfs_get_root_ref(struct btrfs_fs_info *fs_info,
1310					     u64 objectid, dev_t *anon_dev,
1311					     bool check_ref)
1312{
1313	struct btrfs_root *root;
1314	struct btrfs_path *path;
1315	struct btrfs_key key;
1316	int ret;
1317
1318	root = btrfs_get_global_root(fs_info, objectid);
1319	if (root)
1320		return root;
1321
1322	/*
1323	 * If we're called for non-subvolume trees, and above function didn't
1324	 * find one, do not try to read it from disk.
1325	 *
1326	 * This is namely for free-space-tree and quota tree, which can change
1327	 * at runtime and should only be grabbed from fs_info.
1328	 */
1329	if (!is_fstree(objectid) && objectid != BTRFS_DATA_RELOC_TREE_OBJECTID)
1330		return ERR_PTR(-ENOENT);
1331again:
1332	root = btrfs_lookup_fs_root(fs_info, objectid);
1333	if (root) {
1334		/*
1335		 * Some other caller may have read out the newly inserted
1336		 * subvolume already (for things like backref walk etc).  Not
1337		 * that common but still possible.  In that case, we just need
1338		 * to free the anon_dev.
1339		 */
1340		if (unlikely(anon_dev && *anon_dev)) {
1341			free_anon_bdev(*anon_dev);
1342			*anon_dev = 0;
1343		}
1344
1345		if (check_ref && btrfs_root_refs(&root->root_item) == 0) {
1346			btrfs_put_root(root);
1347			return ERR_PTR(-ENOENT);
1348		}
1349		return root;
1350	}
1351
1352	key.objectid = objectid;
1353	key.type = BTRFS_ROOT_ITEM_KEY;
1354	key.offset = (u64)-1;
1355	root = btrfs_read_tree_root(fs_info->tree_root, &key);
1356	if (IS_ERR(root))
1357		return root;
1358
1359	if (check_ref && btrfs_root_refs(&root->root_item) == 0) {
1360		ret = -ENOENT;
1361		goto fail;
1362	}
1363
1364	ret = btrfs_init_fs_root(root, anon_dev ? *anon_dev : 0);
1365	if (ret)
1366		goto fail;
1367
1368	path = btrfs_alloc_path();
1369	if (!path) {
1370		ret = -ENOMEM;
1371		goto fail;
1372	}
1373	key.objectid = BTRFS_ORPHAN_OBJECTID;
1374	key.type = BTRFS_ORPHAN_ITEM_KEY;
1375	key.offset = objectid;
1376
1377	ret = btrfs_search_slot(NULL, fs_info->tree_root, &key, path, 0, 0);
1378	btrfs_free_path(path);
1379	if (ret < 0)
1380		goto fail;
1381	if (ret == 0)
1382		set_bit(BTRFS_ROOT_ORPHAN_ITEM_INSERTED, &root->state);
1383
1384	ret = btrfs_insert_fs_root(fs_info, root);
1385	if (ret) {
1386		if (ret == -EEXIST) {
1387			btrfs_put_root(root);
1388			goto again;
1389		}
1390		goto fail;
1391	}
1392	return root;
1393fail:
1394	/*
1395	 * If our caller provided us an anonymous device, then it's his
1396	 * responsibility to free it in case we fail. So we have to set our
1397	 * root's anon_dev to 0 to avoid a double free, once by btrfs_put_root()
1398	 * and once again by our caller.
1399	 */
1400	if (anon_dev && *anon_dev)
1401		root->anon_dev = 0;
1402	btrfs_put_root(root);
1403	return ERR_PTR(ret);
1404}
1405
1406/*
1407 * Get in-memory reference of a root structure
1408 *
1409 * @objectid:	tree objectid
1410 * @check_ref:	if set, verify that the tree exists and the item has at least
1411 *		one reference
1412 */
1413struct btrfs_root *btrfs_get_fs_root(struct btrfs_fs_info *fs_info,
1414				     u64 objectid, bool check_ref)
1415{
1416	return btrfs_get_root_ref(fs_info, objectid, NULL, check_ref);
1417}
1418
1419/*
1420 * Get in-memory reference of a root structure, created as new, optionally pass
1421 * the anonymous block device id
1422 *
1423 * @objectid:	tree objectid
1424 * @anon_dev:	if NULL, allocate a new anonymous block device or use the
1425 *		parameter value if not NULL
1426 */
1427struct btrfs_root *btrfs_get_new_fs_root(struct btrfs_fs_info *fs_info,
1428					 u64 objectid, dev_t *anon_dev)
1429{
1430	return btrfs_get_root_ref(fs_info, objectid, anon_dev, true);
1431}
1432
1433/*
1434 * Return a root for the given objectid.
1435 *
1436 * @fs_info:	the fs_info
1437 * @objectid:	the objectid we need to lookup
1438 *
1439 * This is exclusively used for backref walking, and exists specifically because
1440 * of how qgroups does lookups.  Qgroups will do a backref lookup at delayed ref
1441 * creation time, which means we may have to read the tree_root in order to look
1442 * up a fs root that is not in memory.  If the root is not in memory we will
1443 * read the tree root commit root and look up the fs root from there.  This is a
1444 * temporary root, it will not be inserted into the radix tree as it doesn't
1445 * have the most uptodate information, it'll simply be discarded once the
1446 * backref code is finished using the root.
1447 */
1448struct btrfs_root *btrfs_get_fs_root_commit_root(struct btrfs_fs_info *fs_info,
1449						 struct btrfs_path *path,
1450						 u64 objectid)
1451{
1452	struct btrfs_root *root;
1453	struct btrfs_key key;
1454
1455	ASSERT(path->search_commit_root && path->skip_locking);
1456
1457	/*
1458	 * This can return -ENOENT if we ask for a root that doesn't exist, but
1459	 * since this is called via the backref walking code we won't be looking
1460	 * up a root that doesn't exist, unless there's corruption.  So if root
1461	 * != NULL just return it.
1462	 */
1463	root = btrfs_get_global_root(fs_info, objectid);
1464	if (root)
1465		return root;
1466
1467	root = btrfs_lookup_fs_root(fs_info, objectid);
1468	if (root)
1469		return root;
1470
1471	key.objectid = objectid;
1472	key.type = BTRFS_ROOT_ITEM_KEY;
1473	key.offset = (u64)-1;
1474	root = read_tree_root_path(fs_info->tree_root, path, &key);
1475	btrfs_release_path(path);
1476
1477	return root;
1478}
1479
1480static int cleaner_kthread(void *arg)
1481{
1482	struct btrfs_fs_info *fs_info = arg;
1483	int again;
1484
1485	while (1) {
1486		again = 0;
1487
1488		set_bit(BTRFS_FS_CLEANER_RUNNING, &fs_info->flags);
1489
1490		/* Make the cleaner go to sleep early. */
1491		if (btrfs_need_cleaner_sleep(fs_info))
1492			goto sleep;
1493
1494		/*
1495		 * Do not do anything if we might cause open_ctree() to block
1496		 * before we have finished mounting the filesystem.
1497		 */
1498		if (!test_bit(BTRFS_FS_OPEN, &fs_info->flags))
1499			goto sleep;
1500
1501		if (!mutex_trylock(&fs_info->cleaner_mutex))
1502			goto sleep;
1503
1504		/*
1505		 * Avoid the problem that we change the status of the fs
1506		 * during the above check and trylock.
1507		 */
1508		if (btrfs_need_cleaner_sleep(fs_info)) {
1509			mutex_unlock(&fs_info->cleaner_mutex);
1510			goto sleep;
1511		}
1512
1513		if (test_and_clear_bit(BTRFS_FS_FEATURE_CHANGED, &fs_info->flags))
1514			btrfs_sysfs_feature_update(fs_info);
1515
1516		btrfs_run_delayed_iputs(fs_info);
1517
1518		again = btrfs_clean_one_deleted_snapshot(fs_info);
1519		mutex_unlock(&fs_info->cleaner_mutex);
1520
1521		/*
1522		 * The defragger has dealt with the R/O remount and umount,
1523		 * needn't do anything special here.
1524		 */
1525		btrfs_run_defrag_inodes(fs_info);
1526
1527		/*
1528		 * Acquires fs_info->reclaim_bgs_lock to avoid racing
1529		 * with relocation (btrfs_relocate_chunk) and relocation
1530		 * acquires fs_info->cleaner_mutex (btrfs_relocate_block_group)
1531		 * after acquiring fs_info->reclaim_bgs_lock. So we
1532		 * can't hold, nor need to, fs_info->cleaner_mutex when deleting
1533		 * unused block groups.
1534		 */
1535		btrfs_delete_unused_bgs(fs_info);
1536
1537		/*
1538		 * Reclaim block groups in the reclaim_bgs list after we deleted
1539		 * all unused block_groups. This possibly gives us some more free
1540		 * space.
1541		 */
1542		btrfs_reclaim_bgs(fs_info);
1543sleep:
1544		clear_and_wake_up_bit(BTRFS_FS_CLEANER_RUNNING, &fs_info->flags);
1545		if (kthread_should_park())
1546			kthread_parkme();
1547		if (kthread_should_stop())
1548			return 0;
1549		if (!again) {
1550			set_current_state(TASK_INTERRUPTIBLE);
1551			schedule();
1552			__set_current_state(TASK_RUNNING);
1553		}
1554	}
1555}
1556
1557static int transaction_kthread(void *arg)
1558{
1559	struct btrfs_root *root = arg;
1560	struct btrfs_fs_info *fs_info = root->fs_info;
1561	struct btrfs_trans_handle *trans;
1562	struct btrfs_transaction *cur;
1563	u64 transid;
1564	time64_t delta;
1565	unsigned long delay;
1566	bool cannot_commit;
1567
1568	do {
1569		cannot_commit = false;
1570		delay = msecs_to_jiffies(fs_info->commit_interval * 1000);
1571		mutex_lock(&fs_info->transaction_kthread_mutex);
1572
1573		spin_lock(&fs_info->trans_lock);
1574		cur = fs_info->running_transaction;
1575		if (!cur) {
1576			spin_unlock(&fs_info->trans_lock);
1577			goto sleep;
1578		}
1579
1580		delta = ktime_get_seconds() - cur->start_time;
1581		if (!test_and_clear_bit(BTRFS_FS_COMMIT_TRANS, &fs_info->flags) &&
1582		    cur->state < TRANS_STATE_COMMIT_PREP &&
1583		    delta < fs_info->commit_interval) {
1584			spin_unlock(&fs_info->trans_lock);
1585			delay -= msecs_to_jiffies((delta - 1) * 1000);
1586			delay = min(delay,
1587				    msecs_to_jiffies(fs_info->commit_interval * 1000));
1588			goto sleep;
1589		}
1590		transid = cur->transid;
1591		spin_unlock(&fs_info->trans_lock);
1592
1593		/* If the file system is aborted, this will always fail. */
1594		trans = btrfs_attach_transaction(root);
1595		if (IS_ERR(trans)) {
1596			if (PTR_ERR(trans) != -ENOENT)
1597				cannot_commit = true;
1598			goto sleep;
1599		}
1600		if (transid == trans->transid) {
1601			btrfs_commit_transaction(trans);
1602		} else {
1603			btrfs_end_transaction(trans);
1604		}
1605sleep:
1606		wake_up_process(fs_info->cleaner_kthread);
1607		mutex_unlock(&fs_info->transaction_kthread_mutex);
1608
1609		if (BTRFS_FS_ERROR(fs_info))
1610			btrfs_cleanup_transaction(fs_info);
1611		if (!kthread_should_stop() &&
1612				(!btrfs_transaction_blocked(fs_info) ||
1613				 cannot_commit))
1614			schedule_timeout_interruptible(delay);
1615	} while (!kthread_should_stop());
1616	return 0;
1617}
1618
1619/*
1620 * This will find the highest generation in the array of root backups.  The
1621 * index of the highest array is returned, or -EINVAL if we can't find
1622 * anything.
1623 *
1624 * We check to make sure the array is valid by comparing the
1625 * generation of the latest  root in the array with the generation
1626 * in the super block.  If they don't match we pitch it.
1627 */
1628static int find_newest_super_backup(struct btrfs_fs_info *info)
1629{
1630	const u64 newest_gen = btrfs_super_generation(info->super_copy);
1631	u64 cur;
1632	struct btrfs_root_backup *root_backup;
1633	int i;
1634
1635	for (i = 0; i < BTRFS_NUM_BACKUP_ROOTS; i++) {
1636		root_backup = info->super_copy->super_roots + i;
1637		cur = btrfs_backup_tree_root_gen(root_backup);
1638		if (cur == newest_gen)
1639			return i;
1640	}
1641
1642	return -EINVAL;
1643}
1644
1645/*
1646 * copy all the root pointers into the super backup array.
1647 * this will bump the backup pointer by one when it is
1648 * done
1649 */
1650static void backup_super_roots(struct btrfs_fs_info *info)
1651{
1652	const int next_backup = info->backup_root_index;
1653	struct btrfs_root_backup *root_backup;
1654
1655	root_backup = info->super_for_commit->super_roots + next_backup;
1656
1657	/*
1658	 * make sure all of our padding and empty slots get zero filled
1659	 * regardless of which ones we use today
1660	 */
1661	memset(root_backup, 0, sizeof(*root_backup));
1662
1663	info->backup_root_index = (next_backup + 1) % BTRFS_NUM_BACKUP_ROOTS;
1664
1665	btrfs_set_backup_tree_root(root_backup, info->tree_root->node->start);
1666	btrfs_set_backup_tree_root_gen(root_backup,
1667			       btrfs_header_generation(info->tree_root->node));
1668
1669	btrfs_set_backup_tree_root_level(root_backup,
1670			       btrfs_header_level(info->tree_root->node));
1671
1672	btrfs_set_backup_chunk_root(root_backup, info->chunk_root->node->start);
1673	btrfs_set_backup_chunk_root_gen(root_backup,
1674			       btrfs_header_generation(info->chunk_root->node));
1675	btrfs_set_backup_chunk_root_level(root_backup,
1676			       btrfs_header_level(info->chunk_root->node));
1677
1678	if (!btrfs_fs_compat_ro(info, BLOCK_GROUP_TREE)) {
1679		struct btrfs_root *extent_root = btrfs_extent_root(info, 0);
1680		struct btrfs_root *csum_root = btrfs_csum_root(info, 0);
1681
1682		btrfs_set_backup_extent_root(root_backup,
1683					     extent_root->node->start);
1684		btrfs_set_backup_extent_root_gen(root_backup,
1685				btrfs_header_generation(extent_root->node));
1686		btrfs_set_backup_extent_root_level(root_backup,
1687					btrfs_header_level(extent_root->node));
1688
1689		btrfs_set_backup_csum_root(root_backup, csum_root->node->start);
1690		btrfs_set_backup_csum_root_gen(root_backup,
1691					       btrfs_header_generation(csum_root->node));
1692		btrfs_set_backup_csum_root_level(root_backup,
1693						 btrfs_header_level(csum_root->node));
1694	}
1695
1696	/*
1697	 * we might commit during log recovery, which happens before we set
1698	 * the fs_root.  Make sure it is valid before we fill it in.
1699	 */
1700	if (info->fs_root && info->fs_root->node) {
1701		btrfs_set_backup_fs_root(root_backup,
1702					 info->fs_root->node->start);
1703		btrfs_set_backup_fs_root_gen(root_backup,
1704			       btrfs_header_generation(info->fs_root->node));
1705		btrfs_set_backup_fs_root_level(root_backup,
1706			       btrfs_header_level(info->fs_root->node));
1707	}
1708
1709	btrfs_set_backup_dev_root(root_backup, info->dev_root->node->start);
1710	btrfs_set_backup_dev_root_gen(root_backup,
1711			       btrfs_header_generation(info->dev_root->node));
1712	btrfs_set_backup_dev_root_level(root_backup,
1713				       btrfs_header_level(info->dev_root->node));
1714
1715	btrfs_set_backup_total_bytes(root_backup,
1716			     btrfs_super_total_bytes(info->super_copy));
1717	btrfs_set_backup_bytes_used(root_backup,
1718			     btrfs_super_bytes_used(info->super_copy));
1719	btrfs_set_backup_num_devices(root_backup,
1720			     btrfs_super_num_devices(info->super_copy));
1721
1722	/*
1723	 * if we don't copy this out to the super_copy, it won't get remembered
1724	 * for the next commit
1725	 */
1726	memcpy(&info->super_copy->super_roots,
1727	       &info->super_for_commit->super_roots,
1728	       sizeof(*root_backup) * BTRFS_NUM_BACKUP_ROOTS);
1729}
1730
1731/*
1732 * Reads a backup root based on the passed priority. Prio 0 is the newest, prio
1733 * 1/2/3 are 2nd newest/3rd newest/4th (oldest) backup roots
1734 *
1735 * @fs_info:  filesystem whose backup roots need to be read
1736 * @priority: priority of backup root required
1737 *
1738 * Returns backup root index on success and -EINVAL otherwise.
1739 */
1740static int read_backup_root(struct btrfs_fs_info *fs_info, u8 priority)
1741{
1742	int backup_index = find_newest_super_backup(fs_info);
1743	struct btrfs_super_block *super = fs_info->super_copy;
1744	struct btrfs_root_backup *root_backup;
1745
1746	if (priority < BTRFS_NUM_BACKUP_ROOTS && backup_index >= 0) {
1747		if (priority == 0)
1748			return backup_index;
1749
1750		backup_index = backup_index + BTRFS_NUM_BACKUP_ROOTS - priority;
1751		backup_index %= BTRFS_NUM_BACKUP_ROOTS;
1752	} else {
1753		return -EINVAL;
1754	}
1755
1756	root_backup = super->super_roots + backup_index;
1757
1758	btrfs_set_super_generation(super,
1759				   btrfs_backup_tree_root_gen(root_backup));
1760	btrfs_set_super_root(super, btrfs_backup_tree_root(root_backup));
1761	btrfs_set_super_root_level(super,
1762				   btrfs_backup_tree_root_level(root_backup));
1763	btrfs_set_super_bytes_used(super, btrfs_backup_bytes_used(root_backup));
1764
1765	/*
1766	 * Fixme: the total bytes and num_devices need to match or we should
1767	 * need a fsck
1768	 */
1769	btrfs_set_super_total_bytes(super, btrfs_backup_total_bytes(root_backup));
1770	btrfs_set_super_num_devices(super, btrfs_backup_num_devices(root_backup));
1771
1772	return backup_index;
1773}
1774
1775/* helper to cleanup workers */
1776static void btrfs_stop_all_workers(struct btrfs_fs_info *fs_info)
1777{
1778	btrfs_destroy_workqueue(fs_info->fixup_workers);
1779	btrfs_destroy_workqueue(fs_info->delalloc_workers);
1780	btrfs_destroy_workqueue(fs_info->workers);
1781	if (fs_info->endio_workers)
1782		destroy_workqueue(fs_info->endio_workers);
1783	if (fs_info->rmw_workers)
1784		destroy_workqueue(fs_info->rmw_workers);
1785	if (fs_info->compressed_write_workers)
1786		destroy_workqueue(fs_info->compressed_write_workers);
1787	btrfs_destroy_workqueue(fs_info->endio_write_workers);
1788	btrfs_destroy_workqueue(fs_info->endio_freespace_worker);
1789	btrfs_destroy_workqueue(fs_info->delayed_workers);
1790	btrfs_destroy_workqueue(fs_info->caching_workers);
1791	btrfs_destroy_workqueue(fs_info->flush_workers);
1792	btrfs_destroy_workqueue(fs_info->qgroup_rescan_workers);
1793	if (fs_info->discard_ctl.discard_workers)
1794		destroy_workqueue(fs_info->discard_ctl.discard_workers);
1795	/*
1796	 * Now that all other work queues are destroyed, we can safely destroy
1797	 * the queues used for metadata I/O, since tasks from those other work
1798	 * queues can do metadata I/O operations.
1799	 */
1800	if (fs_info->endio_meta_workers)
1801		destroy_workqueue(fs_info->endio_meta_workers);
1802}
1803
1804static void free_root_extent_buffers(struct btrfs_root *root)
1805{
1806	if (root) {
1807		free_extent_buffer(root->node);
1808		free_extent_buffer(root->commit_root);
1809		root->node = NULL;
1810		root->commit_root = NULL;
1811	}
1812}
1813
1814static void free_global_root_pointers(struct btrfs_fs_info *fs_info)
1815{
1816	struct btrfs_root *root, *tmp;
1817
1818	rbtree_postorder_for_each_entry_safe(root, tmp,
1819					     &fs_info->global_root_tree,
1820					     rb_node)
1821		free_root_extent_buffers(root);
1822}
1823
1824/* helper to cleanup tree roots */
1825static void free_root_pointers(struct btrfs_fs_info *info, bool free_chunk_root)
1826{
1827	free_root_extent_buffers(info->tree_root);
1828
1829	free_global_root_pointers(info);
1830	free_root_extent_buffers(info->dev_root);
1831	free_root_extent_buffers(info->quota_root);
1832	free_root_extent_buffers(info->uuid_root);
1833	free_root_extent_buffers(info->fs_root);
1834	free_root_extent_buffers(info->data_reloc_root);
1835	free_root_extent_buffers(info->block_group_root);
1836	free_root_extent_buffers(info->stripe_root);
1837	if (free_chunk_root)
1838		free_root_extent_buffers(info->chunk_root);
1839}
1840
1841void btrfs_put_root(struct btrfs_root *root)
1842{
1843	if (!root)
1844		return;
1845
1846	if (refcount_dec_and_test(&root->refs)) {
1847		if (WARN_ON(!xa_empty(&root->inodes)))
1848			xa_destroy(&root->inodes);
1849		WARN_ON(test_bit(BTRFS_ROOT_DEAD_RELOC_TREE, &root->state));
1850		if (root->anon_dev)
1851			free_anon_bdev(root->anon_dev);
1852		free_root_extent_buffers(root);
1853#ifdef CONFIG_BTRFS_DEBUG
1854		spin_lock(&root->fs_info->fs_roots_radix_lock);
1855		list_del_init(&root->leak_list);
1856		spin_unlock(&root->fs_info->fs_roots_radix_lock);
1857#endif
1858		kfree(root);
1859	}
1860}
1861
1862void btrfs_free_fs_roots(struct btrfs_fs_info *fs_info)
1863{
1864	int ret;
1865	struct btrfs_root *gang[8];
1866	int i;
1867
1868	while (!list_empty(&fs_info->dead_roots)) {
1869		gang[0] = list_entry(fs_info->dead_roots.next,
1870				     struct btrfs_root, root_list);
1871		list_del(&gang[0]->root_list);
1872
1873		if (test_bit(BTRFS_ROOT_IN_RADIX, &gang[0]->state))
1874			btrfs_drop_and_free_fs_root(fs_info, gang[0]);
1875		btrfs_put_root(gang[0]);
1876	}
1877
1878	while (1) {
1879		ret = radix_tree_gang_lookup(&fs_info->fs_roots_radix,
1880					     (void **)gang, 0,
1881					     ARRAY_SIZE(gang));
1882		if (!ret)
1883			break;
1884		for (i = 0; i < ret; i++)
1885			btrfs_drop_and_free_fs_root(fs_info, gang[i]);
1886	}
1887}
1888
1889static void btrfs_init_scrub(struct btrfs_fs_info *fs_info)
1890{
1891	mutex_init(&fs_info->scrub_lock);
1892	atomic_set(&fs_info->scrubs_running, 0);
1893	atomic_set(&fs_info->scrub_pause_req, 0);
1894	atomic_set(&fs_info->scrubs_paused, 0);
1895	atomic_set(&fs_info->scrub_cancel_req, 0);
1896	init_waitqueue_head(&fs_info->scrub_pause_wait);
1897	refcount_set(&fs_info->scrub_workers_refcnt, 0);
1898}
1899
1900static void btrfs_init_balance(struct btrfs_fs_info *fs_info)
1901{
1902	spin_lock_init(&fs_info->balance_lock);
1903	mutex_init(&fs_info->balance_mutex);
1904	atomic_set(&fs_info->balance_pause_req, 0);
1905	atomic_set(&fs_info->balance_cancel_req, 0);
1906	fs_info->balance_ctl = NULL;
1907	init_waitqueue_head(&fs_info->balance_wait_q);
1908	atomic_set(&fs_info->reloc_cancel_req, 0);
1909}
1910
1911static int btrfs_init_btree_inode(struct super_block *sb)
1912{
1913	struct btrfs_fs_info *fs_info = btrfs_sb(sb);
1914	unsigned long hash = btrfs_inode_hash(BTRFS_BTREE_INODE_OBJECTID,
1915					      fs_info->tree_root);
1916	struct inode *inode;
1917
1918	inode = new_inode(sb);
1919	if (!inode)
1920		return -ENOMEM;
1921
1922	btrfs_set_inode_number(BTRFS_I(inode), BTRFS_BTREE_INODE_OBJECTID);
1923	set_nlink(inode, 1);
1924	/*
1925	 * we set the i_size on the btree inode to the max possible int.
1926	 * the real end of the address space is determined by all of
1927	 * the devices in the system
1928	 */
1929	inode->i_size = OFFSET_MAX;
1930	inode->i_mapping->a_ops = &btree_aops;
1931	mapping_set_gfp_mask(inode->i_mapping, GFP_NOFS);
1932
 
1933	extent_io_tree_init(fs_info, &BTRFS_I(inode)->io_tree,
1934			    IO_TREE_BTREE_INODE_IO);
1935	extent_map_tree_init(&BTRFS_I(inode)->extent_tree);
1936
1937	BTRFS_I(inode)->root = btrfs_grab_root(fs_info->tree_root);
 
 
 
1938	set_bit(BTRFS_INODE_DUMMY, &BTRFS_I(inode)->runtime_flags);
1939	__insert_inode_hash(inode, hash);
1940	fs_info->btree_inode = inode;
1941
1942	return 0;
1943}
1944
1945static void btrfs_init_dev_replace_locks(struct btrfs_fs_info *fs_info)
1946{
1947	mutex_init(&fs_info->dev_replace.lock_finishing_cancel_unmount);
1948	init_rwsem(&fs_info->dev_replace.rwsem);
1949	init_waitqueue_head(&fs_info->dev_replace.replace_wait);
1950}
1951
1952static void btrfs_init_qgroup(struct btrfs_fs_info *fs_info)
1953{
1954	spin_lock_init(&fs_info->qgroup_lock);
1955	mutex_init(&fs_info->qgroup_ioctl_lock);
1956	fs_info->qgroup_tree = RB_ROOT;
1957	INIT_LIST_HEAD(&fs_info->dirty_qgroups);
1958	fs_info->qgroup_seq = 1;
1959	fs_info->qgroup_ulist = NULL;
1960	fs_info->qgroup_rescan_running = false;
1961	fs_info->qgroup_drop_subtree_thres = BTRFS_QGROUP_DROP_SUBTREE_THRES_DEFAULT;
1962	mutex_init(&fs_info->qgroup_rescan_lock);
1963}
1964
1965static int btrfs_init_workqueues(struct btrfs_fs_info *fs_info)
1966{
1967	u32 max_active = fs_info->thread_pool_size;
1968	unsigned int flags = WQ_MEM_RECLAIM | WQ_FREEZABLE | WQ_UNBOUND;
1969	unsigned int ordered_flags = WQ_MEM_RECLAIM | WQ_FREEZABLE;
1970
1971	fs_info->workers =
1972		btrfs_alloc_workqueue(fs_info, "worker", flags, max_active, 16);
1973
1974	fs_info->delalloc_workers =
1975		btrfs_alloc_workqueue(fs_info, "delalloc",
1976				      flags, max_active, 2);
1977
1978	fs_info->flush_workers =
1979		btrfs_alloc_workqueue(fs_info, "flush_delalloc",
1980				      flags, max_active, 0);
1981
1982	fs_info->caching_workers =
1983		btrfs_alloc_workqueue(fs_info, "cache", flags, max_active, 0);
1984
1985	fs_info->fixup_workers =
1986		btrfs_alloc_ordered_workqueue(fs_info, "fixup", ordered_flags);
1987
1988	fs_info->endio_workers =
1989		alloc_workqueue("btrfs-endio", flags, max_active);
1990	fs_info->endio_meta_workers =
1991		alloc_workqueue("btrfs-endio-meta", flags, max_active);
1992	fs_info->rmw_workers = alloc_workqueue("btrfs-rmw", flags, max_active);
1993	fs_info->endio_write_workers =
1994		btrfs_alloc_workqueue(fs_info, "endio-write", flags,
1995				      max_active, 2);
1996	fs_info->compressed_write_workers =
1997		alloc_workqueue("btrfs-compressed-write", flags, max_active);
1998	fs_info->endio_freespace_worker =
1999		btrfs_alloc_workqueue(fs_info, "freespace-write", flags,
2000				      max_active, 0);
2001	fs_info->delayed_workers =
2002		btrfs_alloc_workqueue(fs_info, "delayed-meta", flags,
2003				      max_active, 0);
2004	fs_info->qgroup_rescan_workers =
2005		btrfs_alloc_ordered_workqueue(fs_info, "qgroup-rescan",
2006					      ordered_flags);
2007	fs_info->discard_ctl.discard_workers =
2008		alloc_ordered_workqueue("btrfs_discard", WQ_FREEZABLE);
2009
2010	if (!(fs_info->workers &&
2011	      fs_info->delalloc_workers && fs_info->flush_workers &&
2012	      fs_info->endio_workers && fs_info->endio_meta_workers &&
2013	      fs_info->compressed_write_workers &&
2014	      fs_info->endio_write_workers &&
2015	      fs_info->endio_freespace_worker && fs_info->rmw_workers &&
2016	      fs_info->caching_workers && fs_info->fixup_workers &&
2017	      fs_info->delayed_workers && fs_info->qgroup_rescan_workers &&
2018	      fs_info->discard_ctl.discard_workers)) {
2019		return -ENOMEM;
2020	}
2021
2022	return 0;
2023}
2024
2025static int btrfs_init_csum_hash(struct btrfs_fs_info *fs_info, u16 csum_type)
2026{
2027	struct crypto_shash *csum_shash;
2028	const char *csum_driver = btrfs_super_csum_driver(csum_type);
2029
2030	csum_shash = crypto_alloc_shash(csum_driver, 0, 0);
2031
2032	if (IS_ERR(csum_shash)) {
2033		btrfs_err(fs_info, "error allocating %s hash for checksum",
2034			  csum_driver);
2035		return PTR_ERR(csum_shash);
2036	}
2037
2038	fs_info->csum_shash = csum_shash;
2039
2040	/*
2041	 * Check if the checksum implementation is a fast accelerated one.
2042	 * As-is this is a bit of a hack and should be replaced once the csum
2043	 * implementations provide that information themselves.
2044	 */
2045	switch (csum_type) {
2046	case BTRFS_CSUM_TYPE_CRC32:
2047		if (!strstr(crypto_shash_driver_name(csum_shash), "generic"))
2048			set_bit(BTRFS_FS_CSUM_IMPL_FAST, &fs_info->flags);
2049		break;
2050	case BTRFS_CSUM_TYPE_XXHASH:
2051		set_bit(BTRFS_FS_CSUM_IMPL_FAST, &fs_info->flags);
2052		break;
2053	default:
2054		break;
2055	}
2056
2057	btrfs_info(fs_info, "using %s (%s) checksum algorithm",
2058			btrfs_super_csum_name(csum_type),
2059			crypto_shash_driver_name(csum_shash));
2060	return 0;
2061}
2062
2063static int btrfs_replay_log(struct btrfs_fs_info *fs_info,
2064			    struct btrfs_fs_devices *fs_devices)
2065{
2066	int ret;
2067	struct btrfs_tree_parent_check check = { 0 };
2068	struct btrfs_root *log_tree_root;
2069	struct btrfs_super_block *disk_super = fs_info->super_copy;
2070	u64 bytenr = btrfs_super_log_root(disk_super);
2071	int level = btrfs_super_log_root_level(disk_super);
2072
2073	if (fs_devices->rw_devices == 0) {
2074		btrfs_warn(fs_info, "log replay required on RO media");
2075		return -EIO;
2076	}
2077
2078	log_tree_root = btrfs_alloc_root(fs_info, BTRFS_TREE_LOG_OBJECTID,
2079					 GFP_KERNEL);
2080	if (!log_tree_root)
2081		return -ENOMEM;
2082
2083	check.level = level;
2084	check.transid = fs_info->generation + 1;
2085	check.owner_root = BTRFS_TREE_LOG_OBJECTID;
2086	log_tree_root->node = read_tree_block(fs_info, bytenr, &check);
2087	if (IS_ERR(log_tree_root->node)) {
2088		btrfs_warn(fs_info, "failed to read log tree");
2089		ret = PTR_ERR(log_tree_root->node);
2090		log_tree_root->node = NULL;
2091		btrfs_put_root(log_tree_root);
2092		return ret;
2093	}
2094	if (!extent_buffer_uptodate(log_tree_root->node)) {
2095		btrfs_err(fs_info, "failed to read log tree");
2096		btrfs_put_root(log_tree_root);
2097		return -EIO;
2098	}
2099
2100	/* returns with log_tree_root freed on success */
2101	ret = btrfs_recover_log_trees(log_tree_root);
2102	if (ret) {
2103		btrfs_handle_fs_error(fs_info, ret,
2104				      "Failed to recover log tree");
2105		btrfs_put_root(log_tree_root);
2106		return ret;
2107	}
2108
2109	if (sb_rdonly(fs_info->sb)) {
2110		ret = btrfs_commit_super(fs_info);
2111		if (ret)
2112			return ret;
2113	}
2114
2115	return 0;
2116}
2117
2118static int load_global_roots_objectid(struct btrfs_root *tree_root,
2119				      struct btrfs_path *path, u64 objectid,
2120				      const char *name)
2121{
2122	struct btrfs_fs_info *fs_info = tree_root->fs_info;
2123	struct btrfs_root *root;
2124	u64 max_global_id = 0;
2125	int ret;
2126	struct btrfs_key key = {
2127		.objectid = objectid,
2128		.type = BTRFS_ROOT_ITEM_KEY,
2129		.offset = 0,
2130	};
2131	bool found = false;
2132
2133	/* If we have IGNOREDATACSUMS skip loading these roots. */
2134	if (objectid == BTRFS_CSUM_TREE_OBJECTID &&
2135	    btrfs_test_opt(fs_info, IGNOREDATACSUMS)) {
2136		set_bit(BTRFS_FS_STATE_NO_DATA_CSUMS, &fs_info->fs_state);
2137		return 0;
2138	}
2139
2140	while (1) {
2141		ret = btrfs_search_slot(NULL, tree_root, &key, path, 0, 0);
2142		if (ret < 0)
2143			break;
2144
2145		if (path->slots[0] >= btrfs_header_nritems(path->nodes[0])) {
2146			ret = btrfs_next_leaf(tree_root, path);
2147			if (ret) {
2148				if (ret > 0)
2149					ret = 0;
2150				break;
2151			}
2152		}
2153		ret = 0;
2154
2155		btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0]);
2156		if (key.objectid != objectid)
2157			break;
2158		btrfs_release_path(path);
2159
2160		/*
2161		 * Just worry about this for extent tree, it'll be the same for
2162		 * everybody.
2163		 */
2164		if (objectid == BTRFS_EXTENT_TREE_OBJECTID)
2165			max_global_id = max(max_global_id, key.offset);
2166
2167		found = true;
2168		root = read_tree_root_path(tree_root, path, &key);
2169		if (IS_ERR(root)) {
2170			if (!btrfs_test_opt(fs_info, IGNOREBADROOTS))
2171				ret = PTR_ERR(root);
2172			break;
2173		}
2174		set_bit(BTRFS_ROOT_TRACK_DIRTY, &root->state);
2175		ret = btrfs_global_root_insert(root);
2176		if (ret) {
2177			btrfs_put_root(root);
2178			break;
2179		}
2180		key.offset++;
2181	}
2182	btrfs_release_path(path);
2183
2184	if (objectid == BTRFS_EXTENT_TREE_OBJECTID)
2185		fs_info->nr_global_roots = max_global_id + 1;
2186
2187	if (!found || ret) {
2188		if (objectid == BTRFS_CSUM_TREE_OBJECTID)
2189			set_bit(BTRFS_FS_STATE_NO_DATA_CSUMS, &fs_info->fs_state);
2190
2191		if (!btrfs_test_opt(fs_info, IGNOREBADROOTS))
2192			ret = ret ? ret : -ENOENT;
2193		else
2194			ret = 0;
2195		btrfs_err(fs_info, "failed to load root %s", name);
2196	}
2197	return ret;
2198}
2199
2200static int load_global_roots(struct btrfs_root *tree_root)
2201{
2202	struct btrfs_path *path;
2203	int ret = 0;
2204
2205	path = btrfs_alloc_path();
2206	if (!path)
2207		return -ENOMEM;
2208
2209	ret = load_global_roots_objectid(tree_root, path,
2210					 BTRFS_EXTENT_TREE_OBJECTID, "extent");
2211	if (ret)
2212		goto out;
2213	ret = load_global_roots_objectid(tree_root, path,
2214					 BTRFS_CSUM_TREE_OBJECTID, "csum");
2215	if (ret)
2216		goto out;
2217	if (!btrfs_fs_compat_ro(tree_root->fs_info, FREE_SPACE_TREE))
2218		goto out;
2219	ret = load_global_roots_objectid(tree_root, path,
2220					 BTRFS_FREE_SPACE_TREE_OBJECTID,
2221					 "free space");
2222out:
2223	btrfs_free_path(path);
2224	return ret;
2225}
2226
2227static int btrfs_read_roots(struct btrfs_fs_info *fs_info)
2228{
2229	struct btrfs_root *tree_root = fs_info->tree_root;
2230	struct btrfs_root *root;
2231	struct btrfs_key location;
2232	int ret;
2233
2234	ASSERT(fs_info->tree_root);
2235
2236	ret = load_global_roots(tree_root);
2237	if (ret)
2238		return ret;
2239
2240	location.type = BTRFS_ROOT_ITEM_KEY;
2241	location.offset = 0;
2242
2243	if (btrfs_fs_compat_ro(fs_info, BLOCK_GROUP_TREE)) {
2244		location.objectid = BTRFS_BLOCK_GROUP_TREE_OBJECTID;
2245		root = btrfs_read_tree_root(tree_root, &location);
2246		if (IS_ERR(root)) {
2247			if (!btrfs_test_opt(fs_info, IGNOREBADROOTS)) {
2248				ret = PTR_ERR(root);
2249				goto out;
2250			}
2251		} else {
2252			set_bit(BTRFS_ROOT_TRACK_DIRTY, &root->state);
2253			fs_info->block_group_root = root;
2254		}
2255	}
2256
2257	location.objectid = BTRFS_DEV_TREE_OBJECTID;
2258	root = btrfs_read_tree_root(tree_root, &location);
2259	if (IS_ERR(root)) {
2260		if (!btrfs_test_opt(fs_info, IGNOREBADROOTS)) {
2261			ret = PTR_ERR(root);
2262			goto out;
2263		}
2264	} else {
2265		set_bit(BTRFS_ROOT_TRACK_DIRTY, &root->state);
2266		fs_info->dev_root = root;
2267	}
2268	/* Initialize fs_info for all devices in any case */
2269	ret = btrfs_init_devices_late(fs_info);
2270	if (ret)
2271		goto out;
2272
2273	/*
2274	 * This tree can share blocks with some other fs tree during relocation
2275	 * and we need a proper setup by btrfs_get_fs_root
2276	 */
2277	root = btrfs_get_fs_root(tree_root->fs_info,
2278				 BTRFS_DATA_RELOC_TREE_OBJECTID, true);
2279	if (IS_ERR(root)) {
2280		if (!btrfs_test_opt(fs_info, IGNOREBADROOTS)) {
2281			ret = PTR_ERR(root);
2282			goto out;
2283		}
2284	} else {
2285		set_bit(BTRFS_ROOT_TRACK_DIRTY, &root->state);
2286		fs_info->data_reloc_root = root;
2287	}
2288
2289	location.objectid = BTRFS_QUOTA_TREE_OBJECTID;
2290	root = btrfs_read_tree_root(tree_root, &location);
2291	if (!IS_ERR(root)) {
2292		set_bit(BTRFS_ROOT_TRACK_DIRTY, &root->state);
2293		fs_info->quota_root = root;
2294	}
2295
2296	location.objectid = BTRFS_UUID_TREE_OBJECTID;
2297	root = btrfs_read_tree_root(tree_root, &location);
2298	if (IS_ERR(root)) {
2299		if (!btrfs_test_opt(fs_info, IGNOREBADROOTS)) {
2300			ret = PTR_ERR(root);
2301			if (ret != -ENOENT)
2302				goto out;
2303		}
2304	} else {
2305		set_bit(BTRFS_ROOT_TRACK_DIRTY, &root->state);
2306		fs_info->uuid_root = root;
2307	}
2308
2309	if (btrfs_fs_incompat(fs_info, RAID_STRIPE_TREE)) {
2310		location.objectid = BTRFS_RAID_STRIPE_TREE_OBJECTID;
2311		root = btrfs_read_tree_root(tree_root, &location);
2312		if (IS_ERR(root)) {
2313			if (!btrfs_test_opt(fs_info, IGNOREBADROOTS)) {
2314				ret = PTR_ERR(root);
2315				goto out;
2316			}
2317		} else {
2318			set_bit(BTRFS_ROOT_TRACK_DIRTY, &root->state);
2319			fs_info->stripe_root = root;
2320		}
2321	}
2322
2323	return 0;
2324out:
2325	btrfs_warn(fs_info, "failed to read root (objectid=%llu): %d",
2326		   location.objectid, ret);
2327	return ret;
2328}
2329
2330/*
2331 * Real super block validation
2332 * NOTE: super csum type and incompat features will not be checked here.
2333 *
2334 * @sb:		super block to check
2335 * @mirror_num:	the super block number to check its bytenr:
2336 * 		0	the primary (1st) sb
2337 * 		1, 2	2nd and 3rd backup copy
2338 * 	       -1	skip bytenr check
2339 */
2340int btrfs_validate_super(const struct btrfs_fs_info *fs_info,
2341			 const struct btrfs_super_block *sb, int mirror_num)
2342{
2343	u64 nodesize = btrfs_super_nodesize(sb);
2344	u64 sectorsize = btrfs_super_sectorsize(sb);
2345	int ret = 0;
2346	const bool ignore_flags = btrfs_test_opt(fs_info, IGNORESUPERFLAGS);
2347
2348	if (btrfs_super_magic(sb) != BTRFS_MAGIC) {
2349		btrfs_err(fs_info, "no valid FS found");
2350		ret = -EINVAL;
2351	}
2352	if ((btrfs_super_flags(sb) & ~BTRFS_SUPER_FLAG_SUPP)) {
2353		if (!ignore_flags) {
2354			btrfs_err(fs_info,
2355			"unrecognized or unsupported super flag 0x%llx",
2356				  btrfs_super_flags(sb) & ~BTRFS_SUPER_FLAG_SUPP);
2357			ret = -EINVAL;
2358		} else {
2359			btrfs_info(fs_info,
2360			"unrecognized or unsupported super flags: 0x%llx, ignored",
2361				   btrfs_super_flags(sb) & ~BTRFS_SUPER_FLAG_SUPP);
2362		}
2363	}
2364	if (btrfs_super_root_level(sb) >= BTRFS_MAX_LEVEL) {
2365		btrfs_err(fs_info, "tree_root level too big: %d >= %d",
2366				btrfs_super_root_level(sb), BTRFS_MAX_LEVEL);
2367		ret = -EINVAL;
2368	}
2369	if (btrfs_super_chunk_root_level(sb) >= BTRFS_MAX_LEVEL) {
2370		btrfs_err(fs_info, "chunk_root level too big: %d >= %d",
2371				btrfs_super_chunk_root_level(sb), BTRFS_MAX_LEVEL);
2372		ret = -EINVAL;
2373	}
2374	if (btrfs_super_log_root_level(sb) >= BTRFS_MAX_LEVEL) {
2375		btrfs_err(fs_info, "log_root level too big: %d >= %d",
2376				btrfs_super_log_root_level(sb), BTRFS_MAX_LEVEL);
2377		ret = -EINVAL;
2378	}
2379
2380	/*
2381	 * Check sectorsize and nodesize first, other check will need it.
2382	 * Check all possible sectorsize(4K, 8K, 16K, 32K, 64K) here.
2383	 */
2384	if (!is_power_of_2(sectorsize) || sectorsize < 4096 ||
2385	    sectorsize > BTRFS_MAX_METADATA_BLOCKSIZE) {
2386		btrfs_err(fs_info, "invalid sectorsize %llu", sectorsize);
2387		ret = -EINVAL;
2388	}
2389
2390	/*
2391	 * We only support at most two sectorsizes: 4K and PAGE_SIZE.
2392	 *
2393	 * We can support 16K sectorsize with 64K page size without problem,
2394	 * but such sectorsize/pagesize combination doesn't make much sense.
2395	 * 4K will be our future standard, PAGE_SIZE is supported from the very
2396	 * beginning.
2397	 */
2398	if (sectorsize > PAGE_SIZE || (sectorsize != SZ_4K && sectorsize != PAGE_SIZE)) {
2399		btrfs_err(fs_info,
2400			"sectorsize %llu not yet supported for page size %lu",
2401			sectorsize, PAGE_SIZE);
2402		ret = -EINVAL;
2403	}
2404
2405	if (!is_power_of_2(nodesize) || nodesize < sectorsize ||
2406	    nodesize > BTRFS_MAX_METADATA_BLOCKSIZE) {
2407		btrfs_err(fs_info, "invalid nodesize %llu", nodesize);
2408		ret = -EINVAL;
2409	}
2410	if (nodesize != le32_to_cpu(sb->__unused_leafsize)) {
2411		btrfs_err(fs_info, "invalid leafsize %u, should be %llu",
2412			  le32_to_cpu(sb->__unused_leafsize), nodesize);
2413		ret = -EINVAL;
2414	}
2415
2416	/* Root alignment check */
2417	if (!IS_ALIGNED(btrfs_super_root(sb), sectorsize)) {
2418		btrfs_warn(fs_info, "tree_root block unaligned: %llu",
2419			   btrfs_super_root(sb));
2420		ret = -EINVAL;
2421	}
2422	if (!IS_ALIGNED(btrfs_super_chunk_root(sb), sectorsize)) {
2423		btrfs_warn(fs_info, "chunk_root block unaligned: %llu",
2424			   btrfs_super_chunk_root(sb));
2425		ret = -EINVAL;
2426	}
2427	if (!IS_ALIGNED(btrfs_super_log_root(sb), sectorsize)) {
2428		btrfs_warn(fs_info, "log_root block unaligned: %llu",
2429			   btrfs_super_log_root(sb));
2430		ret = -EINVAL;
2431	}
2432
2433	if (!fs_info->fs_devices->temp_fsid &&
2434	    memcmp(fs_info->fs_devices->fsid, sb->fsid, BTRFS_FSID_SIZE) != 0) {
2435		btrfs_err(fs_info,
2436		"superblock fsid doesn't match fsid of fs_devices: %pU != %pU",
2437			  sb->fsid, fs_info->fs_devices->fsid);
2438		ret = -EINVAL;
2439	}
2440
2441	if (memcmp(fs_info->fs_devices->metadata_uuid, btrfs_sb_fsid_ptr(sb),
2442		   BTRFS_FSID_SIZE) != 0) {
2443		btrfs_err(fs_info,
2444"superblock metadata_uuid doesn't match metadata uuid of fs_devices: %pU != %pU",
2445			  btrfs_sb_fsid_ptr(sb), fs_info->fs_devices->metadata_uuid);
2446		ret = -EINVAL;
2447	}
2448
2449	if (memcmp(fs_info->fs_devices->metadata_uuid, sb->dev_item.fsid,
2450		   BTRFS_FSID_SIZE) != 0) {
2451		btrfs_err(fs_info,
2452			"dev_item UUID does not match metadata fsid: %pU != %pU",
2453			fs_info->fs_devices->metadata_uuid, sb->dev_item.fsid);
2454		ret = -EINVAL;
2455	}
2456
2457	/*
2458	 * Artificial requirement for block-group-tree to force newer features
2459	 * (free-space-tree, no-holes) so the test matrix is smaller.
2460	 */
2461	if (btrfs_fs_compat_ro(fs_info, BLOCK_GROUP_TREE) &&
2462	    (!btrfs_fs_compat_ro(fs_info, FREE_SPACE_TREE_VALID) ||
2463	     !btrfs_fs_incompat(fs_info, NO_HOLES))) {
2464		btrfs_err(fs_info,
2465		"block-group-tree feature requires free-space-tree and no-holes");
2466		ret = -EINVAL;
2467	}
2468
2469	/*
2470	 * Hint to catch really bogus numbers, bitflips or so, more exact checks are
2471	 * done later
2472	 */
2473	if (btrfs_super_bytes_used(sb) < 6 * btrfs_super_nodesize(sb)) {
2474		btrfs_err(fs_info, "bytes_used is too small %llu",
2475			  btrfs_super_bytes_used(sb));
2476		ret = -EINVAL;
2477	}
2478	if (!is_power_of_2(btrfs_super_stripesize(sb))) {
2479		btrfs_err(fs_info, "invalid stripesize %u",
2480			  btrfs_super_stripesize(sb));
2481		ret = -EINVAL;
2482	}
2483	if (btrfs_super_num_devices(sb) > (1UL << 31))
2484		btrfs_warn(fs_info, "suspicious number of devices: %llu",
2485			   btrfs_super_num_devices(sb));
2486	if (btrfs_super_num_devices(sb) == 0) {
2487		btrfs_err(fs_info, "number of devices is 0");
2488		ret = -EINVAL;
2489	}
2490
2491	if (mirror_num >= 0 &&
2492	    btrfs_super_bytenr(sb) != btrfs_sb_offset(mirror_num)) {
2493		btrfs_err(fs_info, "super offset mismatch %llu != %u",
2494			  btrfs_super_bytenr(sb), BTRFS_SUPER_INFO_OFFSET);
2495		ret = -EINVAL;
2496	}
2497
2498	/*
2499	 * Obvious sys_chunk_array corruptions, it must hold at least one key
2500	 * and one chunk
2501	 */
2502	if (btrfs_super_sys_array_size(sb) > BTRFS_SYSTEM_CHUNK_ARRAY_SIZE) {
2503		btrfs_err(fs_info, "system chunk array too big %u > %u",
2504			  btrfs_super_sys_array_size(sb),
2505			  BTRFS_SYSTEM_CHUNK_ARRAY_SIZE);
2506		ret = -EINVAL;
2507	}
2508	if (btrfs_super_sys_array_size(sb) < sizeof(struct btrfs_disk_key)
2509			+ sizeof(struct btrfs_chunk)) {
2510		btrfs_err(fs_info, "system chunk array too small %u < %zu",
2511			  btrfs_super_sys_array_size(sb),
2512			  sizeof(struct btrfs_disk_key)
2513			  + sizeof(struct btrfs_chunk));
2514		ret = -EINVAL;
2515	}
2516
2517	/*
2518	 * The generation is a global counter, we'll trust it more than the others
2519	 * but it's still possible that it's the one that's wrong.
2520	 */
2521	if (btrfs_super_generation(sb) < btrfs_super_chunk_root_generation(sb))
2522		btrfs_warn(fs_info,
2523			"suspicious: generation < chunk_root_generation: %llu < %llu",
2524			btrfs_super_generation(sb),
2525			btrfs_super_chunk_root_generation(sb));
2526	if (btrfs_super_generation(sb) < btrfs_super_cache_generation(sb)
2527	    && btrfs_super_cache_generation(sb) != (u64)-1)
2528		btrfs_warn(fs_info,
2529			"suspicious: generation < cache_generation: %llu < %llu",
2530			btrfs_super_generation(sb),
2531			btrfs_super_cache_generation(sb));
2532
2533	return ret;
2534}
2535
2536/*
2537 * Validation of super block at mount time.
2538 * Some checks already done early at mount time, like csum type and incompat
2539 * flags will be skipped.
2540 */
2541static int btrfs_validate_mount_super(struct btrfs_fs_info *fs_info)
2542{
2543	return btrfs_validate_super(fs_info, fs_info->super_copy, 0);
2544}
2545
2546/*
2547 * Validation of super block at write time.
2548 * Some checks like bytenr check will be skipped as their values will be
2549 * overwritten soon.
2550 * Extra checks like csum type and incompat flags will be done here.
2551 */
2552static int btrfs_validate_write_super(struct btrfs_fs_info *fs_info,
2553				      struct btrfs_super_block *sb)
2554{
2555	int ret;
2556
2557	ret = btrfs_validate_super(fs_info, sb, -1);
2558	if (ret < 0)
2559		goto out;
2560	if (!btrfs_supported_super_csum(btrfs_super_csum_type(sb))) {
2561		ret = -EUCLEAN;
2562		btrfs_err(fs_info, "invalid csum type, has %u want %u",
2563			  btrfs_super_csum_type(sb), BTRFS_CSUM_TYPE_CRC32);
2564		goto out;
2565	}
2566	if (btrfs_super_incompat_flags(sb) & ~BTRFS_FEATURE_INCOMPAT_SUPP) {
2567		ret = -EUCLEAN;
2568		btrfs_err(fs_info,
2569		"invalid incompat flags, has 0x%llx valid mask 0x%llx",
2570			  btrfs_super_incompat_flags(sb),
2571			  (unsigned long long)BTRFS_FEATURE_INCOMPAT_SUPP);
2572		goto out;
2573	}
2574out:
2575	if (ret < 0)
2576		btrfs_err(fs_info,
2577		"super block corruption detected before writing it to disk");
2578	return ret;
2579}
2580
2581static int load_super_root(struct btrfs_root *root, u64 bytenr, u64 gen, int level)
2582{
2583	struct btrfs_tree_parent_check check = {
2584		.level = level,
2585		.transid = gen,
2586		.owner_root = btrfs_root_id(root)
2587	};
2588	int ret = 0;
2589
2590	root->node = read_tree_block(root->fs_info, bytenr, &check);
2591	if (IS_ERR(root->node)) {
2592		ret = PTR_ERR(root->node);
2593		root->node = NULL;
2594		return ret;
2595	}
2596	if (!extent_buffer_uptodate(root->node)) {
2597		free_extent_buffer(root->node);
2598		root->node = NULL;
2599		return -EIO;
2600	}
2601
2602	btrfs_set_root_node(&root->root_item, root->node);
2603	root->commit_root = btrfs_root_node(root);
2604	btrfs_set_root_refs(&root->root_item, 1);
2605	return ret;
2606}
2607
2608static int load_important_roots(struct btrfs_fs_info *fs_info)
2609{
2610	struct btrfs_super_block *sb = fs_info->super_copy;
2611	u64 gen, bytenr;
2612	int level, ret;
2613
2614	bytenr = btrfs_super_root(sb);
2615	gen = btrfs_super_generation(sb);
2616	level = btrfs_super_root_level(sb);
2617	ret = load_super_root(fs_info->tree_root, bytenr, gen, level);
2618	if (ret) {
2619		btrfs_warn(fs_info, "couldn't read tree root");
2620		return ret;
2621	}
2622	return 0;
2623}
2624
2625static int __cold init_tree_roots(struct btrfs_fs_info *fs_info)
2626{
2627	int backup_index = find_newest_super_backup(fs_info);
2628	struct btrfs_super_block *sb = fs_info->super_copy;
2629	struct btrfs_root *tree_root = fs_info->tree_root;
2630	bool handle_error = false;
2631	int ret = 0;
2632	int i;
2633
2634	for (i = 0; i < BTRFS_NUM_BACKUP_ROOTS; i++) {
2635		if (handle_error) {
2636			if (!IS_ERR(tree_root->node))
2637				free_extent_buffer(tree_root->node);
2638			tree_root->node = NULL;
2639
2640			if (!btrfs_test_opt(fs_info, USEBACKUPROOT))
2641				break;
2642
2643			free_root_pointers(fs_info, 0);
2644
2645			/*
2646			 * Don't use the log in recovery mode, it won't be
2647			 * valid
2648			 */
2649			btrfs_set_super_log_root(sb, 0);
2650
2651			btrfs_warn(fs_info, "try to load backup roots slot %d", i);
2652			ret = read_backup_root(fs_info, i);
2653			backup_index = ret;
2654			if (ret < 0)
2655				return ret;
2656		}
2657
2658		ret = load_important_roots(fs_info);
2659		if (ret) {
2660			handle_error = true;
2661			continue;
2662		}
2663
2664		/*
2665		 * No need to hold btrfs_root::objectid_mutex since the fs
2666		 * hasn't been fully initialised and we are the only user
2667		 */
2668		ret = btrfs_init_root_free_objectid(tree_root);
2669		if (ret < 0) {
2670			handle_error = true;
2671			continue;
2672		}
2673
2674		ASSERT(tree_root->free_objectid <= BTRFS_LAST_FREE_OBJECTID);
2675
2676		ret = btrfs_read_roots(fs_info);
2677		if (ret < 0) {
2678			handle_error = true;
2679			continue;
2680		}
2681
2682		/* All successful */
2683		fs_info->generation = btrfs_header_generation(tree_root->node);
2684		btrfs_set_last_trans_committed(fs_info, fs_info->generation);
2685		fs_info->last_reloc_trans = 0;
2686
2687		/* Always begin writing backup roots after the one being used */
2688		if (backup_index < 0) {
2689			fs_info->backup_root_index = 0;
2690		} else {
2691			fs_info->backup_root_index = backup_index + 1;
2692			fs_info->backup_root_index %= BTRFS_NUM_BACKUP_ROOTS;
2693		}
2694		break;
2695	}
2696
2697	return ret;
2698}
2699
2700void btrfs_init_fs_info(struct btrfs_fs_info *fs_info)
2701{
2702	INIT_RADIX_TREE(&fs_info->fs_roots_radix, GFP_ATOMIC);
2703	INIT_RADIX_TREE(&fs_info->buffer_radix, GFP_ATOMIC);
2704	INIT_LIST_HEAD(&fs_info->trans_list);
2705	INIT_LIST_HEAD(&fs_info->dead_roots);
2706	INIT_LIST_HEAD(&fs_info->delayed_iputs);
2707	INIT_LIST_HEAD(&fs_info->delalloc_roots);
2708	INIT_LIST_HEAD(&fs_info->caching_block_groups);
2709	spin_lock_init(&fs_info->delalloc_root_lock);
2710	spin_lock_init(&fs_info->trans_lock);
2711	spin_lock_init(&fs_info->fs_roots_radix_lock);
2712	spin_lock_init(&fs_info->delayed_iput_lock);
2713	spin_lock_init(&fs_info->defrag_inodes_lock);
2714	spin_lock_init(&fs_info->super_lock);
2715	spin_lock_init(&fs_info->buffer_lock);
2716	spin_lock_init(&fs_info->unused_bgs_lock);
2717	spin_lock_init(&fs_info->treelog_bg_lock);
2718	spin_lock_init(&fs_info->zone_active_bgs_lock);
2719	spin_lock_init(&fs_info->relocation_bg_lock);
2720	rwlock_init(&fs_info->tree_mod_log_lock);
2721	rwlock_init(&fs_info->global_root_lock);
2722	mutex_init(&fs_info->unused_bg_unpin_mutex);
2723	mutex_init(&fs_info->reclaim_bgs_lock);
2724	mutex_init(&fs_info->reloc_mutex);
2725	mutex_init(&fs_info->delalloc_root_mutex);
2726	mutex_init(&fs_info->zoned_meta_io_lock);
2727	mutex_init(&fs_info->zoned_data_reloc_io_lock);
2728	seqlock_init(&fs_info->profiles_lock);
2729
2730	btrfs_lockdep_init_map(fs_info, btrfs_trans_num_writers);
2731	btrfs_lockdep_init_map(fs_info, btrfs_trans_num_extwriters);
2732	btrfs_lockdep_init_map(fs_info, btrfs_trans_pending_ordered);
2733	btrfs_lockdep_init_map(fs_info, btrfs_ordered_extent);
2734	btrfs_state_lockdep_init_map(fs_info, btrfs_trans_commit_prep,
2735				     BTRFS_LOCKDEP_TRANS_COMMIT_PREP);
2736	btrfs_state_lockdep_init_map(fs_info, btrfs_trans_unblocked,
2737				     BTRFS_LOCKDEP_TRANS_UNBLOCKED);
2738	btrfs_state_lockdep_init_map(fs_info, btrfs_trans_super_committed,
2739				     BTRFS_LOCKDEP_TRANS_SUPER_COMMITTED);
2740	btrfs_state_lockdep_init_map(fs_info, btrfs_trans_completed,
2741				     BTRFS_LOCKDEP_TRANS_COMPLETED);
2742
2743	INIT_LIST_HEAD(&fs_info->dirty_cowonly_roots);
2744	INIT_LIST_HEAD(&fs_info->space_info);
2745	INIT_LIST_HEAD(&fs_info->tree_mod_seq_list);
2746	INIT_LIST_HEAD(&fs_info->unused_bgs);
2747	INIT_LIST_HEAD(&fs_info->reclaim_bgs);
2748	INIT_LIST_HEAD(&fs_info->zone_active_bgs);
2749#ifdef CONFIG_BTRFS_DEBUG
2750	INIT_LIST_HEAD(&fs_info->allocated_roots);
2751	INIT_LIST_HEAD(&fs_info->allocated_ebs);
2752	spin_lock_init(&fs_info->eb_leak_lock);
2753#endif
2754	fs_info->mapping_tree = RB_ROOT_CACHED;
2755	rwlock_init(&fs_info->mapping_tree_lock);
2756	btrfs_init_block_rsv(&fs_info->global_block_rsv,
2757			     BTRFS_BLOCK_RSV_GLOBAL);
2758	btrfs_init_block_rsv(&fs_info->trans_block_rsv, BTRFS_BLOCK_RSV_TRANS);
2759	btrfs_init_block_rsv(&fs_info->chunk_block_rsv, BTRFS_BLOCK_RSV_CHUNK);
2760	btrfs_init_block_rsv(&fs_info->empty_block_rsv, BTRFS_BLOCK_RSV_EMPTY);
2761	btrfs_init_block_rsv(&fs_info->delayed_block_rsv,
2762			     BTRFS_BLOCK_RSV_DELOPS);
2763	btrfs_init_block_rsv(&fs_info->delayed_refs_rsv,
2764			     BTRFS_BLOCK_RSV_DELREFS);
2765
2766	atomic_set(&fs_info->async_delalloc_pages, 0);
2767	atomic_set(&fs_info->defrag_running, 0);
2768	atomic_set(&fs_info->nr_delayed_iputs, 0);
2769	atomic64_set(&fs_info->tree_mod_seq, 0);
2770	fs_info->global_root_tree = RB_ROOT;
2771	fs_info->max_inline = BTRFS_DEFAULT_MAX_INLINE;
2772	fs_info->metadata_ratio = 0;
2773	fs_info->defrag_inodes = RB_ROOT;
2774	atomic64_set(&fs_info->free_chunk_space, 0);
2775	fs_info->tree_mod_log = RB_ROOT;
2776	fs_info->commit_interval = BTRFS_DEFAULT_COMMIT_INTERVAL;
2777	btrfs_init_ref_verify(fs_info);
2778
2779	fs_info->thread_pool_size = min_t(unsigned long,
2780					  num_online_cpus() + 2, 8);
2781
2782	INIT_LIST_HEAD(&fs_info->ordered_roots);
2783	spin_lock_init(&fs_info->ordered_root_lock);
2784
2785	btrfs_init_scrub(fs_info);
2786	btrfs_init_balance(fs_info);
2787	btrfs_init_async_reclaim_work(fs_info);
2788	btrfs_init_extent_map_shrinker_work(fs_info);
2789
2790	rwlock_init(&fs_info->block_group_cache_lock);
2791	fs_info->block_group_cache_tree = RB_ROOT_CACHED;
2792
2793	extent_io_tree_init(fs_info, &fs_info->excluded_extents,
2794			    IO_TREE_FS_EXCLUDED_EXTENTS);
2795
2796	mutex_init(&fs_info->ordered_operations_mutex);
2797	mutex_init(&fs_info->tree_log_mutex);
2798	mutex_init(&fs_info->chunk_mutex);
2799	mutex_init(&fs_info->transaction_kthread_mutex);
2800	mutex_init(&fs_info->cleaner_mutex);
2801	mutex_init(&fs_info->ro_block_group_mutex);
2802	init_rwsem(&fs_info->commit_root_sem);
2803	init_rwsem(&fs_info->cleanup_work_sem);
2804	init_rwsem(&fs_info->subvol_sem);
2805	sema_init(&fs_info->uuid_tree_rescan_sem, 1);
2806
2807	btrfs_init_dev_replace_locks(fs_info);
2808	btrfs_init_qgroup(fs_info);
2809	btrfs_discard_init(fs_info);
2810
2811	btrfs_init_free_cluster(&fs_info->meta_alloc_cluster);
2812	btrfs_init_free_cluster(&fs_info->data_alloc_cluster);
2813
2814	init_waitqueue_head(&fs_info->transaction_throttle);
2815	init_waitqueue_head(&fs_info->transaction_wait);
2816	init_waitqueue_head(&fs_info->transaction_blocked_wait);
2817	init_waitqueue_head(&fs_info->async_submit_wait);
2818	init_waitqueue_head(&fs_info->delayed_iputs_wait);
2819
2820	/* Usable values until the real ones are cached from the superblock */
2821	fs_info->nodesize = 4096;
2822	fs_info->sectorsize = 4096;
2823	fs_info->sectorsize_bits = ilog2(4096);
2824	fs_info->stripesize = 4096;
2825
2826	/* Default compress algorithm when user does -o compress */
2827	fs_info->compress_type = BTRFS_COMPRESS_ZLIB;
2828
2829	fs_info->max_extent_size = BTRFS_MAX_EXTENT_SIZE;
2830
2831	spin_lock_init(&fs_info->swapfile_pins_lock);
2832	fs_info->swapfile_pins = RB_ROOT;
2833
2834	fs_info->bg_reclaim_threshold = BTRFS_DEFAULT_RECLAIM_THRESH;
2835	INIT_WORK(&fs_info->reclaim_bgs_work, btrfs_reclaim_bgs_work);
2836}
2837
2838static int init_mount_fs_info(struct btrfs_fs_info *fs_info, struct super_block *sb)
2839{
2840	int ret;
2841
2842	fs_info->sb = sb;
2843	/* Temporary fixed values for block size until we read the superblock. */
2844	sb->s_blocksize = BTRFS_BDEV_BLOCKSIZE;
2845	sb->s_blocksize_bits = blksize_bits(BTRFS_BDEV_BLOCKSIZE);
2846
2847	ret = percpu_counter_init(&fs_info->ordered_bytes, 0, GFP_KERNEL);
2848	if (ret)
2849		return ret;
2850
2851	ret = percpu_counter_init(&fs_info->evictable_extent_maps, 0, GFP_KERNEL);
2852	if (ret)
2853		return ret;
2854
2855	ret = percpu_counter_init(&fs_info->dirty_metadata_bytes, 0, GFP_KERNEL);
2856	if (ret)
2857		return ret;
2858
2859	fs_info->dirty_metadata_batch = PAGE_SIZE *
2860					(1 + ilog2(nr_cpu_ids));
2861
2862	ret = percpu_counter_init(&fs_info->delalloc_bytes, 0, GFP_KERNEL);
2863	if (ret)
2864		return ret;
2865
2866	ret = percpu_counter_init(&fs_info->dev_replace.bio_counter, 0,
2867			GFP_KERNEL);
2868	if (ret)
2869		return ret;
2870
2871	fs_info->delayed_root = kmalloc(sizeof(struct btrfs_delayed_root),
2872					GFP_KERNEL);
2873	if (!fs_info->delayed_root)
2874		return -ENOMEM;
2875	btrfs_init_delayed_root(fs_info->delayed_root);
2876
2877	if (sb_rdonly(sb))
2878		set_bit(BTRFS_FS_STATE_RO, &fs_info->fs_state);
2879	if (btrfs_test_opt(fs_info, IGNOREMETACSUMS))
2880		set_bit(BTRFS_FS_STATE_SKIP_META_CSUMS, &fs_info->fs_state);
2881
2882	return btrfs_alloc_stripe_hash_table(fs_info);
2883}
2884
2885static int btrfs_uuid_rescan_kthread(void *data)
2886{
2887	struct btrfs_fs_info *fs_info = data;
2888	int ret;
2889
2890	/*
2891	 * 1st step is to iterate through the existing UUID tree and
2892	 * to delete all entries that contain outdated data.
2893	 * 2nd step is to add all missing entries to the UUID tree.
2894	 */
2895	ret = btrfs_uuid_tree_iterate(fs_info);
2896	if (ret < 0) {
2897		if (ret != -EINTR)
2898			btrfs_warn(fs_info, "iterating uuid_tree failed %d",
2899				   ret);
2900		up(&fs_info->uuid_tree_rescan_sem);
2901		return ret;
2902	}
2903	return btrfs_uuid_scan_kthread(data);
2904}
2905
2906static int btrfs_check_uuid_tree(struct btrfs_fs_info *fs_info)
2907{
2908	struct task_struct *task;
2909
2910	down(&fs_info->uuid_tree_rescan_sem);
2911	task = kthread_run(btrfs_uuid_rescan_kthread, fs_info, "btrfs-uuid");
2912	if (IS_ERR(task)) {
2913		/* fs_info->update_uuid_tree_gen remains 0 in all error case */
2914		btrfs_warn(fs_info, "failed to start uuid_rescan task");
2915		up(&fs_info->uuid_tree_rescan_sem);
2916		return PTR_ERR(task);
2917	}
2918
2919	return 0;
2920}
2921
2922static int btrfs_cleanup_fs_roots(struct btrfs_fs_info *fs_info)
2923{
2924	u64 root_objectid = 0;
2925	struct btrfs_root *gang[8];
2926	int ret = 0;
 
 
2927
2928	while (1) {
2929		unsigned int found;
2930
2931		spin_lock(&fs_info->fs_roots_radix_lock);
2932		found = radix_tree_gang_lookup(&fs_info->fs_roots_radix,
2933					     (void **)gang, root_objectid,
2934					     ARRAY_SIZE(gang));
2935		if (!found) {
2936			spin_unlock(&fs_info->fs_roots_radix_lock);
2937			break;
2938		}
2939		root_objectid = btrfs_root_id(gang[found - 1]) + 1;
2940
2941		for (int i = 0; i < found; i++) {
2942			/* Avoid to grab roots in dead_roots. */
2943			if (btrfs_root_refs(&gang[i]->root_item) == 0) {
2944				gang[i] = NULL;
2945				continue;
2946			}
2947			/* Grab all the search result for later use. */
2948			gang[i] = btrfs_grab_root(gang[i]);
2949		}
2950		spin_unlock(&fs_info->fs_roots_radix_lock);
2951
2952		for (int i = 0; i < found; i++) {
2953			if (!gang[i])
2954				continue;
2955			root_objectid = btrfs_root_id(gang[i]);
2956			/*
2957			 * Continue to release the remaining roots after the first
2958			 * error without cleanup and preserve the first error
2959			 * for the return.
2960			 */
2961			if (!ret)
2962				ret = btrfs_orphan_cleanup(gang[i]);
2963			btrfs_put_root(gang[i]);
2964		}
2965		if (ret)
2966			break;
2967
2968		root_objectid++;
2969	}
2970	return ret;
 
 
 
 
 
 
2971}
2972
2973/*
2974 * Mounting logic specific to read-write file systems. Shared by open_ctree
2975 * and btrfs_remount when remounting from read-only to read-write.
2976 */
2977int btrfs_start_pre_rw_mount(struct btrfs_fs_info *fs_info)
2978{
2979	int ret;
2980	const bool cache_opt = btrfs_test_opt(fs_info, SPACE_CACHE);
2981	bool rebuild_free_space_tree = false;
2982
2983	if (btrfs_test_opt(fs_info, CLEAR_CACHE) &&
2984	    btrfs_fs_compat_ro(fs_info, FREE_SPACE_TREE)) {
2985		if (btrfs_fs_incompat(fs_info, EXTENT_TREE_V2))
2986			btrfs_warn(fs_info,
2987				   "'clear_cache' option is ignored with extent tree v2");
2988		else
2989			rebuild_free_space_tree = true;
2990	} else if (btrfs_fs_compat_ro(fs_info, FREE_SPACE_TREE) &&
2991		   !btrfs_fs_compat_ro(fs_info, FREE_SPACE_TREE_VALID)) {
2992		btrfs_warn(fs_info, "free space tree is invalid");
2993		rebuild_free_space_tree = true;
2994	}
2995
2996	if (rebuild_free_space_tree) {
2997		btrfs_info(fs_info, "rebuilding free space tree");
2998		ret = btrfs_rebuild_free_space_tree(fs_info);
2999		if (ret) {
3000			btrfs_warn(fs_info,
3001				   "failed to rebuild free space tree: %d", ret);
3002			goto out;
3003		}
3004	}
3005
3006	if (btrfs_fs_compat_ro(fs_info, FREE_SPACE_TREE) &&
3007	    !btrfs_test_opt(fs_info, FREE_SPACE_TREE)) {
3008		btrfs_info(fs_info, "disabling free space tree");
3009		ret = btrfs_delete_free_space_tree(fs_info);
3010		if (ret) {
3011			btrfs_warn(fs_info,
3012				   "failed to disable free space tree: %d", ret);
3013			goto out;
3014		}
3015	}
3016
3017	/*
3018	 * btrfs_find_orphan_roots() is responsible for finding all the dead
3019	 * roots (with 0 refs), flag them with BTRFS_ROOT_DEAD_TREE and load
3020	 * them into the fs_info->fs_roots_radix tree. This must be done before
3021	 * calling btrfs_orphan_cleanup() on the tree root. If we don't do it
3022	 * first, then btrfs_orphan_cleanup() will delete a dead root's orphan
3023	 * item before the root's tree is deleted - this means that if we unmount
3024	 * or crash before the deletion completes, on the next mount we will not
3025	 * delete what remains of the tree because the orphan item does not
3026	 * exists anymore, which is what tells us we have a pending deletion.
3027	 */
3028	ret = btrfs_find_orphan_roots(fs_info);
3029	if (ret)
3030		goto out;
3031
3032	ret = btrfs_cleanup_fs_roots(fs_info);
3033	if (ret)
3034		goto out;
3035
3036	down_read(&fs_info->cleanup_work_sem);
3037	if ((ret = btrfs_orphan_cleanup(fs_info->fs_root)) ||
3038	    (ret = btrfs_orphan_cleanup(fs_info->tree_root))) {
3039		up_read(&fs_info->cleanup_work_sem);
3040		goto out;
3041	}
3042	up_read(&fs_info->cleanup_work_sem);
3043
3044	mutex_lock(&fs_info->cleaner_mutex);
3045	ret = btrfs_recover_relocation(fs_info);
3046	mutex_unlock(&fs_info->cleaner_mutex);
3047	if (ret < 0) {
3048		btrfs_warn(fs_info, "failed to recover relocation: %d", ret);
3049		goto out;
3050	}
3051
3052	if (btrfs_test_opt(fs_info, FREE_SPACE_TREE) &&
3053	    !btrfs_fs_compat_ro(fs_info, FREE_SPACE_TREE)) {
3054		btrfs_info(fs_info, "creating free space tree");
3055		ret = btrfs_create_free_space_tree(fs_info);
3056		if (ret) {
3057			btrfs_warn(fs_info,
3058				"failed to create free space tree: %d", ret);
3059			goto out;
3060		}
3061	}
3062
3063	if (cache_opt != btrfs_free_space_cache_v1_active(fs_info)) {
3064		ret = btrfs_set_free_space_cache_v1_active(fs_info, cache_opt);
3065		if (ret)
3066			goto out;
3067	}
3068
3069	ret = btrfs_resume_balance_async(fs_info);
3070	if (ret)
3071		goto out;
3072
3073	ret = btrfs_resume_dev_replace_async(fs_info);
3074	if (ret) {
3075		btrfs_warn(fs_info, "failed to resume dev_replace");
3076		goto out;
3077	}
3078
3079	btrfs_qgroup_rescan_resume(fs_info);
3080
3081	if (!fs_info->uuid_root) {
3082		btrfs_info(fs_info, "creating UUID tree");
3083		ret = btrfs_create_uuid_tree(fs_info);
3084		if (ret) {
3085			btrfs_warn(fs_info,
3086				   "failed to create the UUID tree %d", ret);
3087			goto out;
3088		}
3089	}
3090
3091out:
3092	return ret;
3093}
3094
3095/*
3096 * Do various sanity and dependency checks of different features.
3097 *
3098 * @is_rw_mount:	If the mount is read-write.
3099 *
3100 * This is the place for less strict checks (like for subpage or artificial
3101 * feature dependencies).
3102 *
3103 * For strict checks or possible corruption detection, see
3104 * btrfs_validate_super().
3105 *
3106 * This should be called after btrfs_parse_options(), as some mount options
3107 * (space cache related) can modify on-disk format like free space tree and
3108 * screw up certain feature dependencies.
3109 */
3110int btrfs_check_features(struct btrfs_fs_info *fs_info, bool is_rw_mount)
3111{
3112	struct btrfs_super_block *disk_super = fs_info->super_copy;
3113	u64 incompat = btrfs_super_incompat_flags(disk_super);
3114	const u64 compat_ro = btrfs_super_compat_ro_flags(disk_super);
3115	const u64 compat_ro_unsupp = (compat_ro & ~BTRFS_FEATURE_COMPAT_RO_SUPP);
3116
3117	if (incompat & ~BTRFS_FEATURE_INCOMPAT_SUPP) {
3118		btrfs_err(fs_info,
3119		"cannot mount because of unknown incompat features (0x%llx)",
3120		    incompat);
3121		return -EINVAL;
3122	}
3123
3124	/* Runtime limitation for mixed block groups. */
3125	if ((incompat & BTRFS_FEATURE_INCOMPAT_MIXED_GROUPS) &&
3126	    (fs_info->sectorsize != fs_info->nodesize)) {
3127		btrfs_err(fs_info,
3128"unequal nodesize/sectorsize (%u != %u) are not allowed for mixed block groups",
3129			fs_info->nodesize, fs_info->sectorsize);
3130		return -EINVAL;
3131	}
3132
3133	/* Mixed backref is an always-enabled feature. */
3134	incompat |= BTRFS_FEATURE_INCOMPAT_MIXED_BACKREF;
3135
3136	/* Set compression related flags just in case. */
3137	if (fs_info->compress_type == BTRFS_COMPRESS_LZO)
3138		incompat |= BTRFS_FEATURE_INCOMPAT_COMPRESS_LZO;
3139	else if (fs_info->compress_type == BTRFS_COMPRESS_ZSTD)
3140		incompat |= BTRFS_FEATURE_INCOMPAT_COMPRESS_ZSTD;
3141
3142	/*
3143	 * An ancient flag, which should really be marked deprecated.
3144	 * Such runtime limitation doesn't really need a incompat flag.
3145	 */
3146	if (btrfs_super_nodesize(disk_super) > PAGE_SIZE)
3147		incompat |= BTRFS_FEATURE_INCOMPAT_BIG_METADATA;
3148
3149	if (compat_ro_unsupp && is_rw_mount) {
3150		btrfs_err(fs_info,
3151	"cannot mount read-write because of unknown compat_ro features (0x%llx)",
3152		       compat_ro);
3153		return -EINVAL;
3154	}
3155
3156	/*
3157	 * We have unsupported RO compat features, although RO mounted, we
3158	 * should not cause any metadata writes, including log replay.
3159	 * Or we could screw up whatever the new feature requires.
3160	 */
3161	if (compat_ro_unsupp && btrfs_super_log_root(disk_super) &&
3162	    !btrfs_test_opt(fs_info, NOLOGREPLAY)) {
3163		btrfs_err(fs_info,
3164"cannot replay dirty log with unsupported compat_ro features (0x%llx), try rescue=nologreplay",
3165			  compat_ro);
3166		return -EINVAL;
3167	}
3168
3169	/*
3170	 * Artificial limitations for block group tree, to force
3171	 * block-group-tree to rely on no-holes and free-space-tree.
3172	 */
3173	if (btrfs_fs_compat_ro(fs_info, BLOCK_GROUP_TREE) &&
3174	    (!btrfs_fs_incompat(fs_info, NO_HOLES) ||
3175	     !btrfs_test_opt(fs_info, FREE_SPACE_TREE))) {
3176		btrfs_err(fs_info,
3177"block-group-tree feature requires no-holes and free-space-tree features");
3178		return -EINVAL;
3179	}
3180
3181	/*
3182	 * Subpage runtime limitation on v1 cache.
3183	 *
3184	 * V1 space cache still has some hard codeed PAGE_SIZE usage, while
3185	 * we're already defaulting to v2 cache, no need to bother v1 as it's
3186	 * going to be deprecated anyway.
3187	 */
3188	if (fs_info->sectorsize < PAGE_SIZE && btrfs_test_opt(fs_info, SPACE_CACHE)) {
3189		btrfs_warn(fs_info,
3190	"v1 space cache is not supported for page size %lu with sectorsize %u",
3191			   PAGE_SIZE, fs_info->sectorsize);
3192		return -EINVAL;
3193	}
3194
3195	/* This can be called by remount, we need to protect the super block. */
3196	spin_lock(&fs_info->super_lock);
3197	btrfs_set_super_incompat_flags(disk_super, incompat);
3198	spin_unlock(&fs_info->super_lock);
3199
3200	return 0;
3201}
3202
3203int __cold open_ctree(struct super_block *sb, struct btrfs_fs_devices *fs_devices)
 
3204{
3205	u32 sectorsize;
3206	u32 nodesize;
3207	u32 stripesize;
3208	u64 generation;
3209	u16 csum_type;
3210	struct btrfs_super_block *disk_super;
3211	struct btrfs_fs_info *fs_info = btrfs_sb(sb);
3212	struct btrfs_root *tree_root;
3213	struct btrfs_root *chunk_root;
3214	int ret;
3215	int level;
3216
3217	ret = init_mount_fs_info(fs_info, sb);
3218	if (ret)
3219		goto fail;
3220
3221	/* These need to be init'ed before we start creating inodes and such. */
3222	tree_root = btrfs_alloc_root(fs_info, BTRFS_ROOT_TREE_OBJECTID,
3223				     GFP_KERNEL);
3224	fs_info->tree_root = tree_root;
3225	chunk_root = btrfs_alloc_root(fs_info, BTRFS_CHUNK_TREE_OBJECTID,
3226				      GFP_KERNEL);
3227	fs_info->chunk_root = chunk_root;
3228	if (!tree_root || !chunk_root) {
3229		ret = -ENOMEM;
3230		goto fail;
3231	}
3232
3233	ret = btrfs_init_btree_inode(sb);
3234	if (ret)
3235		goto fail;
3236
3237	invalidate_bdev(fs_devices->latest_dev->bdev);
3238
3239	/*
3240	 * Read super block and check the signature bytes only
3241	 */
3242	disk_super = btrfs_read_dev_super(fs_devices->latest_dev->bdev);
3243	if (IS_ERR(disk_super)) {
3244		ret = PTR_ERR(disk_super);
3245		goto fail_alloc;
3246	}
3247
3248	btrfs_info(fs_info, "first mount of filesystem %pU", disk_super->fsid);
3249	/*
3250	 * Verify the type first, if that or the checksum value are
3251	 * corrupted, we'll find out
3252	 */
3253	csum_type = btrfs_super_csum_type(disk_super);
3254	if (!btrfs_supported_super_csum(csum_type)) {
3255		btrfs_err(fs_info, "unsupported checksum algorithm: %u",
3256			  csum_type);
3257		ret = -EINVAL;
3258		btrfs_release_disk_super(disk_super);
3259		goto fail_alloc;
3260	}
3261
3262	fs_info->csum_size = btrfs_super_csum_size(disk_super);
3263
3264	ret = btrfs_init_csum_hash(fs_info, csum_type);
3265	if (ret) {
3266		btrfs_release_disk_super(disk_super);
3267		goto fail_alloc;
3268	}
3269
3270	/*
3271	 * We want to check superblock checksum, the type is stored inside.
3272	 * Pass the whole disk block of size BTRFS_SUPER_INFO_SIZE (4k).
3273	 */
3274	if (btrfs_check_super_csum(fs_info, disk_super)) {
3275		btrfs_err(fs_info, "superblock checksum mismatch");
3276		ret = -EINVAL;
3277		btrfs_release_disk_super(disk_super);
3278		goto fail_alloc;
3279	}
3280
3281	/*
3282	 * super_copy is zeroed at allocation time and we never touch the
3283	 * following bytes up to INFO_SIZE, the checksum is calculated from
3284	 * the whole block of INFO_SIZE
3285	 */
3286	memcpy(fs_info->super_copy, disk_super, sizeof(*fs_info->super_copy));
3287	btrfs_release_disk_super(disk_super);
3288
3289	disk_super = fs_info->super_copy;
3290
3291	memcpy(fs_info->super_for_commit, fs_info->super_copy,
3292	       sizeof(*fs_info->super_for_commit));
3293
3294	ret = btrfs_validate_mount_super(fs_info);
3295	if (ret) {
3296		btrfs_err(fs_info, "superblock contains fatal errors");
3297		ret = -EINVAL;
3298		goto fail_alloc;
3299	}
3300
3301	if (!btrfs_super_root(disk_super)) {
3302		btrfs_err(fs_info, "invalid superblock tree root bytenr");
3303		ret = -EINVAL;
3304		goto fail_alloc;
3305	}
3306
3307	/* check FS state, whether FS is broken. */
3308	if (btrfs_super_flags(disk_super) & BTRFS_SUPER_FLAG_ERROR)
3309		WRITE_ONCE(fs_info->fs_error, -EUCLEAN);
3310
3311	/* Set up fs_info before parsing mount options */
3312	nodesize = btrfs_super_nodesize(disk_super);
3313	sectorsize = btrfs_super_sectorsize(disk_super);
3314	stripesize = sectorsize;
3315	fs_info->dirty_metadata_batch = nodesize * (1 + ilog2(nr_cpu_ids));
3316	fs_info->delalloc_batch = sectorsize * 512 * (1 + ilog2(nr_cpu_ids));
3317
3318	fs_info->nodesize = nodesize;
3319	fs_info->sectorsize = sectorsize;
3320	fs_info->sectorsize_bits = ilog2(sectorsize);
3321	fs_info->sectors_per_page = (PAGE_SIZE >> fs_info->sectorsize_bits);
3322	fs_info->csums_per_leaf = BTRFS_MAX_ITEM_SIZE(fs_info) / fs_info->csum_size;
3323	fs_info->stripesize = stripesize;
3324
3325	/*
3326	 * Handle the space caching options appropriately now that we have the
3327	 * super block loaded and validated.
3328	 */
3329	btrfs_set_free_space_cache_settings(fs_info);
3330
3331	if (!btrfs_check_options(fs_info, &fs_info->mount_opt, sb->s_flags)) {
3332		ret = -EINVAL;
3333		goto fail_alloc;
3334	}
3335
3336	ret = btrfs_check_features(fs_info, !sb_rdonly(sb));
3337	if (ret < 0)
3338		goto fail_alloc;
3339
3340	/*
3341	 * At this point our mount options are validated, if we set ->max_inline
3342	 * to something non-standard make sure we truncate it to sectorsize.
3343	 */
3344	fs_info->max_inline = min_t(u64, fs_info->max_inline, fs_info->sectorsize);
3345
3346	if (sectorsize < PAGE_SIZE)
 
 
3347		btrfs_warn(fs_info,
3348		"read-write for sector size %u with page size %lu is experimental",
3349			   sectorsize, PAGE_SIZE);
 
 
 
 
 
 
 
 
3350
3351	ret = btrfs_init_workqueues(fs_info);
3352	if (ret)
3353		goto fail_sb_buffer;
3354
3355	sb->s_bdi->ra_pages *= btrfs_super_num_devices(disk_super);
3356	sb->s_bdi->ra_pages = max(sb->s_bdi->ra_pages, SZ_4M / PAGE_SIZE);
3357
3358	/* Update the values for the current filesystem. */
3359	sb->s_blocksize = sectorsize;
3360	sb->s_blocksize_bits = blksize_bits(sectorsize);
3361	memcpy(&sb->s_uuid, fs_info->fs_devices->fsid, BTRFS_FSID_SIZE);
3362
3363	mutex_lock(&fs_info->chunk_mutex);
3364	ret = btrfs_read_sys_array(fs_info);
3365	mutex_unlock(&fs_info->chunk_mutex);
3366	if (ret) {
3367		btrfs_err(fs_info, "failed to read the system array: %d", ret);
3368		goto fail_sb_buffer;
3369	}
3370
3371	generation = btrfs_super_chunk_root_generation(disk_super);
3372	level = btrfs_super_chunk_root_level(disk_super);
3373	ret = load_super_root(chunk_root, btrfs_super_chunk_root(disk_super),
3374			      generation, level);
3375	if (ret) {
3376		btrfs_err(fs_info, "failed to read chunk root");
3377		goto fail_tree_roots;
3378	}
3379
3380	read_extent_buffer(chunk_root->node, fs_info->chunk_tree_uuid,
3381			   offsetof(struct btrfs_header, chunk_tree_uuid),
3382			   BTRFS_UUID_SIZE);
3383
3384	ret = btrfs_read_chunk_tree(fs_info);
3385	if (ret) {
3386		btrfs_err(fs_info, "failed to read chunk tree: %d", ret);
3387		goto fail_tree_roots;
3388	}
3389
3390	/*
3391	 * At this point we know all the devices that make this filesystem,
3392	 * including the seed devices but we don't know yet if the replace
3393	 * target is required. So free devices that are not part of this
3394	 * filesystem but skip the replace target device which is checked
3395	 * below in btrfs_init_dev_replace().
3396	 */
3397	btrfs_free_extra_devids(fs_devices);
3398	if (!fs_devices->latest_dev->bdev) {
3399		btrfs_err(fs_info, "failed to read devices");
3400		ret = -EIO;
3401		goto fail_tree_roots;
3402	}
3403
3404	ret = init_tree_roots(fs_info);
3405	if (ret)
3406		goto fail_tree_roots;
3407
3408	/*
3409	 * Get zone type information of zoned block devices. This will also
3410	 * handle emulation of a zoned filesystem if a regular device has the
3411	 * zoned incompat feature flag set.
3412	 */
3413	ret = btrfs_get_dev_zone_info_all_devices(fs_info);
3414	if (ret) {
3415		btrfs_err(fs_info,
3416			  "zoned: failed to read device zone info: %d", ret);
3417		goto fail_block_groups;
3418	}
3419
3420	/*
3421	 * If we have a uuid root and we're not being told to rescan we need to
3422	 * check the generation here so we can set the
3423	 * BTRFS_FS_UPDATE_UUID_TREE_GEN bit.  Otherwise we could commit the
3424	 * transaction during a balance or the log replay without updating the
3425	 * uuid generation, and then if we crash we would rescan the uuid tree,
3426	 * even though it was perfectly fine.
3427	 */
3428	if (fs_info->uuid_root && !btrfs_test_opt(fs_info, RESCAN_UUID_TREE) &&
3429	    fs_info->generation == btrfs_super_uuid_tree_generation(disk_super))
3430		set_bit(BTRFS_FS_UPDATE_UUID_TREE_GEN, &fs_info->flags);
3431
3432	ret = btrfs_verify_dev_extents(fs_info);
3433	if (ret) {
3434		btrfs_err(fs_info,
3435			  "failed to verify dev extents against chunks: %d",
3436			  ret);
3437		goto fail_block_groups;
3438	}
3439	ret = btrfs_recover_balance(fs_info);
3440	if (ret) {
3441		btrfs_err(fs_info, "failed to recover balance: %d", ret);
3442		goto fail_block_groups;
3443	}
3444
3445	ret = btrfs_init_dev_stats(fs_info);
3446	if (ret) {
3447		btrfs_err(fs_info, "failed to init dev_stats: %d", ret);
3448		goto fail_block_groups;
3449	}
3450
3451	ret = btrfs_init_dev_replace(fs_info);
3452	if (ret) {
3453		btrfs_err(fs_info, "failed to init dev_replace: %d", ret);
3454		goto fail_block_groups;
3455	}
3456
3457	ret = btrfs_check_zoned_mode(fs_info);
3458	if (ret) {
3459		btrfs_err(fs_info, "failed to initialize zoned mode: %d",
3460			  ret);
3461		goto fail_block_groups;
3462	}
3463
3464	ret = btrfs_sysfs_add_fsid(fs_devices);
3465	if (ret) {
3466		btrfs_err(fs_info, "failed to init sysfs fsid interface: %d",
3467				ret);
3468		goto fail_block_groups;
3469	}
3470
3471	ret = btrfs_sysfs_add_mounted(fs_info);
3472	if (ret) {
3473		btrfs_err(fs_info, "failed to init sysfs interface: %d", ret);
3474		goto fail_fsdev_sysfs;
3475	}
3476
3477	ret = btrfs_init_space_info(fs_info);
3478	if (ret) {
3479		btrfs_err(fs_info, "failed to initialize space info: %d", ret);
3480		goto fail_sysfs;
3481	}
3482
3483	ret = btrfs_read_block_groups(fs_info);
3484	if (ret) {
3485		btrfs_err(fs_info, "failed to read block groups: %d", ret);
3486		goto fail_sysfs;
3487	}
3488
3489	btrfs_free_zone_cache(fs_info);
3490
3491	btrfs_check_active_zone_reservation(fs_info);
3492
3493	if (!sb_rdonly(sb) && fs_info->fs_devices->missing_devices &&
3494	    !btrfs_check_rw_degradable(fs_info, NULL)) {
3495		btrfs_warn(fs_info,
3496		"writable mount is not allowed due to too many missing devices");
3497		ret = -EINVAL;
3498		goto fail_sysfs;
3499	}
3500
3501	fs_info->cleaner_kthread = kthread_run(cleaner_kthread, fs_info,
3502					       "btrfs-cleaner");
3503	if (IS_ERR(fs_info->cleaner_kthread)) {
3504		ret = PTR_ERR(fs_info->cleaner_kthread);
3505		goto fail_sysfs;
3506	}
3507
3508	fs_info->transaction_kthread = kthread_run(transaction_kthread,
3509						   tree_root,
3510						   "btrfs-transaction");
3511	if (IS_ERR(fs_info->transaction_kthread)) {
3512		ret = PTR_ERR(fs_info->transaction_kthread);
3513		goto fail_cleaner;
3514	}
3515
3516	ret = btrfs_read_qgroup_config(fs_info);
3517	if (ret)
3518		goto fail_trans_kthread;
3519
3520	if (btrfs_build_ref_tree(fs_info))
3521		btrfs_err(fs_info, "couldn't build ref tree");
3522
3523	/* do not make disk changes in broken FS or nologreplay is given */
3524	if (btrfs_super_log_root(disk_super) != 0 &&
3525	    !btrfs_test_opt(fs_info, NOLOGREPLAY)) {
3526		btrfs_info(fs_info, "start tree-log replay");
3527		ret = btrfs_replay_log(fs_info, fs_devices);
3528		if (ret)
3529			goto fail_qgroup;
3530	}
3531
3532	fs_info->fs_root = btrfs_get_fs_root(fs_info, BTRFS_FS_TREE_OBJECTID, true);
3533	if (IS_ERR(fs_info->fs_root)) {
3534		ret = PTR_ERR(fs_info->fs_root);
3535		btrfs_warn(fs_info, "failed to read fs tree: %d", ret);
3536		fs_info->fs_root = NULL;
3537		goto fail_qgroup;
3538	}
3539
3540	if (sb_rdonly(sb))
3541		return 0;
3542
3543	ret = btrfs_start_pre_rw_mount(fs_info);
3544	if (ret) {
3545		close_ctree(fs_info);
3546		return ret;
3547	}
3548	btrfs_discard_resume(fs_info);
3549
3550	if (fs_info->uuid_root &&
3551	    (btrfs_test_opt(fs_info, RESCAN_UUID_TREE) ||
3552	     fs_info->generation != btrfs_super_uuid_tree_generation(disk_super))) {
3553		btrfs_info(fs_info, "checking UUID tree");
3554		ret = btrfs_check_uuid_tree(fs_info);
3555		if (ret) {
3556			btrfs_warn(fs_info,
3557				"failed to check the UUID tree: %d", ret);
3558			close_ctree(fs_info);
3559			return ret;
3560		}
3561	}
3562
3563	set_bit(BTRFS_FS_OPEN, &fs_info->flags);
3564
3565	/* Kick the cleaner thread so it'll start deleting snapshots. */
3566	if (test_bit(BTRFS_FS_UNFINISHED_DROPS, &fs_info->flags))
3567		wake_up_process(fs_info->cleaner_kthread);
3568
3569	return 0;
3570
3571fail_qgroup:
3572	btrfs_free_qgroup_config(fs_info);
3573fail_trans_kthread:
3574	kthread_stop(fs_info->transaction_kthread);
3575	btrfs_cleanup_transaction(fs_info);
3576	btrfs_free_fs_roots(fs_info);
3577fail_cleaner:
3578	kthread_stop(fs_info->cleaner_kthread);
3579
3580	/*
3581	 * make sure we're done with the btree inode before we stop our
3582	 * kthreads
3583	 */
3584	filemap_write_and_wait(fs_info->btree_inode->i_mapping);
3585
3586fail_sysfs:
3587	btrfs_sysfs_remove_mounted(fs_info);
3588
3589fail_fsdev_sysfs:
3590	btrfs_sysfs_remove_fsid(fs_info->fs_devices);
3591
3592fail_block_groups:
3593	btrfs_put_block_group_cache(fs_info);
3594
3595fail_tree_roots:
3596	if (fs_info->data_reloc_root)
3597		btrfs_drop_and_free_fs_root(fs_info, fs_info->data_reloc_root);
3598	free_root_pointers(fs_info, true);
3599	invalidate_inode_pages2(fs_info->btree_inode->i_mapping);
3600
3601fail_sb_buffer:
3602	btrfs_stop_all_workers(fs_info);
3603	btrfs_free_block_groups(fs_info);
3604fail_alloc:
3605	btrfs_mapping_tree_free(fs_info);
3606
3607	iput(fs_info->btree_inode);
3608fail:
3609	btrfs_close_devices(fs_info->fs_devices);
3610	ASSERT(ret < 0);
3611	return ret;
3612}
3613ALLOW_ERROR_INJECTION(open_ctree, ERRNO);
3614
3615static void btrfs_end_super_write(struct bio *bio)
3616{
3617	struct btrfs_device *device = bio->bi_private;
3618	struct folio_iter fi;
 
 
 
 
 
3619
3620	bio_for_each_folio_all(fi, bio) {
3621		if (bio->bi_status) {
3622			btrfs_warn_rl_in_rcu(device->fs_info,
3623				"lost super block write due to IO error on %s (%d)",
3624				btrfs_dev_name(device),
3625				blk_status_to_errno(bio->bi_status));
 
 
3626			btrfs_dev_stat_inc_and_print(device,
3627						     BTRFS_DEV_STAT_WRITE_ERRS);
3628			/* Ensure failure if the primary sb fails. */
3629			if (bio->bi_opf & REQ_FUA)
3630				atomic_add(BTRFS_SUPER_PRIMARY_WRITE_ERROR,
3631					   &device->sb_write_errors);
3632			else
3633				atomic_inc(&device->sb_write_errors);
3634		}
3635		folio_unlock(fi.folio);
3636		folio_put(fi.folio);
 
3637	}
3638
3639	bio_put(bio);
3640}
3641
3642struct btrfs_super_block *btrfs_read_dev_one_super(struct block_device *bdev,
3643						   int copy_num, bool drop_cache)
3644{
3645	struct btrfs_super_block *super;
3646	struct page *page;
3647	u64 bytenr, bytenr_orig;
3648	struct address_space *mapping = bdev->bd_mapping;
3649	int ret;
3650
3651	bytenr_orig = btrfs_sb_offset(copy_num);
3652	ret = btrfs_sb_log_location_bdev(bdev, copy_num, READ, &bytenr);
3653	if (ret == -ENOENT)
3654		return ERR_PTR(-EINVAL);
3655	else if (ret)
3656		return ERR_PTR(ret);
3657
3658	if (bytenr + BTRFS_SUPER_INFO_SIZE >= bdev_nr_bytes(bdev))
3659		return ERR_PTR(-EINVAL);
3660
3661	if (drop_cache) {
3662		/* This should only be called with the primary sb. */
3663		ASSERT(copy_num == 0);
3664
3665		/*
3666		 * Drop the page of the primary superblock, so later read will
3667		 * always read from the device.
3668		 */
3669		invalidate_inode_pages2_range(mapping,
3670				bytenr >> PAGE_SHIFT,
3671				(bytenr + BTRFS_SUPER_INFO_SIZE) >> PAGE_SHIFT);
3672	}
3673
3674	page = read_cache_page_gfp(mapping, bytenr >> PAGE_SHIFT, GFP_NOFS);
3675	if (IS_ERR(page))
3676		return ERR_CAST(page);
3677
3678	super = page_address(page);
3679	if (btrfs_super_magic(super) != BTRFS_MAGIC) {
3680		btrfs_release_disk_super(super);
3681		return ERR_PTR(-ENODATA);
3682	}
3683
3684	if (btrfs_super_bytenr(super) != bytenr_orig) {
3685		btrfs_release_disk_super(super);
3686		return ERR_PTR(-EINVAL);
3687	}
3688
3689	return super;
3690}
3691
3692
3693struct btrfs_super_block *btrfs_read_dev_super(struct block_device *bdev)
3694{
3695	struct btrfs_super_block *super, *latest = NULL;
3696	int i;
3697	u64 transid = 0;
3698
3699	/* we would like to check all the supers, but that would make
3700	 * a btrfs mount succeed after a mkfs from a different FS.
3701	 * So, we need to add a special mount option to scan for
3702	 * later supers, using BTRFS_SUPER_MIRROR_MAX instead
3703	 */
3704	for (i = 0; i < 1; i++) {
3705		super = btrfs_read_dev_one_super(bdev, i, false);
3706		if (IS_ERR(super))
3707			continue;
3708
3709		if (!latest || btrfs_super_generation(super) > transid) {
3710			if (latest)
3711				btrfs_release_disk_super(super);
3712
3713			latest = super;
3714			transid = btrfs_super_generation(super);
3715		}
3716	}
3717
3718	return super;
3719}
3720
3721/*
3722 * Write superblock @sb to the @device. Do not wait for completion, all the
3723 * folios we use for writing are locked.
3724 *
3725 * Write @max_mirrors copies of the superblock, where 0 means default that fit
3726 * the expected device size at commit time. Note that max_mirrors must be
3727 * same for write and wait phases.
3728 *
3729 * Return number of errors when folio is not found or submission fails.
3730 */
3731static int write_dev_supers(struct btrfs_device *device,
3732			    struct btrfs_super_block *sb, int max_mirrors)
3733{
3734	struct btrfs_fs_info *fs_info = device->fs_info;
3735	struct address_space *mapping = device->bdev->bd_mapping;
3736	SHASH_DESC_ON_STACK(shash, fs_info->csum_shash);
3737	int i;
 
3738	int ret;
3739	u64 bytenr, bytenr_orig;
3740
3741	atomic_set(&device->sb_write_errors, 0);
3742
3743	if (max_mirrors == 0)
3744		max_mirrors = BTRFS_SUPER_MIRROR_MAX;
3745
3746	shash->tfm = fs_info->csum_shash;
3747
3748	for (i = 0; i < max_mirrors; i++) {
3749		struct folio *folio;
3750		struct bio *bio;
3751		struct btrfs_super_block *disk_super;
3752		size_t offset;
3753
3754		bytenr_orig = btrfs_sb_offset(i);
3755		ret = btrfs_sb_log_location(device, i, WRITE, &bytenr);
3756		if (ret == -ENOENT) {
3757			continue;
3758		} else if (ret < 0) {
3759			btrfs_err(device->fs_info,
3760				"couldn't get super block location for mirror %d",
3761				i);
3762			atomic_inc(&device->sb_write_errors);
3763			continue;
3764		}
3765		if (bytenr + BTRFS_SUPER_INFO_SIZE >=
3766		    device->commit_total_bytes)
3767			break;
3768
3769		btrfs_set_super_bytenr(sb, bytenr_orig);
3770
3771		crypto_shash_digest(shash, (const char *)sb + BTRFS_CSUM_SIZE,
3772				    BTRFS_SUPER_INFO_SIZE - BTRFS_CSUM_SIZE,
3773				    sb->csum);
3774
3775		folio = __filemap_get_folio(mapping, bytenr >> PAGE_SHIFT,
3776					    FGP_LOCK | FGP_ACCESSED | FGP_CREAT,
3777					    GFP_NOFS);
3778		if (IS_ERR(folio)) {
3779			btrfs_err(device->fs_info,
3780			    "couldn't get super block page for bytenr %llu",
3781			    bytenr);
3782			atomic_inc(&device->sb_write_errors);
3783			continue;
3784		}
3785		ASSERT(folio_order(folio) == 0);
3786
3787		offset = offset_in_folio(folio, bytenr);
3788		disk_super = folio_address(folio) + offset;
 
 
3789		memcpy(disk_super, sb, BTRFS_SUPER_INFO_SIZE);
3790
3791		/*
3792		 * Directly use bios here instead of relying on the page cache
3793		 * to do I/O, so we don't lose the ability to do integrity
3794		 * checking.
3795		 */
3796		bio = bio_alloc(device->bdev, 1,
3797				REQ_OP_WRITE | REQ_SYNC | REQ_META | REQ_PRIO,
3798				GFP_NOFS);
3799		bio->bi_iter.bi_sector = bytenr >> SECTOR_SHIFT;
3800		bio->bi_private = device;
3801		bio->bi_end_io = btrfs_end_super_write;
3802		bio_add_folio_nofail(bio, folio, BTRFS_SUPER_INFO_SIZE, offset);
 
3803
3804		/*
3805		 * We FUA only the first super block.  The others we allow to
3806		 * go down lazy and there's a short window where the on-disk
3807		 * copies might still contain the older version.
3808		 */
3809		if (i == 0 && !btrfs_test_opt(device->fs_info, NOBARRIER))
3810			bio->bi_opf |= REQ_FUA;
3811		submit_bio(bio);
3812
3813		if (btrfs_advance_sb_log(device, i))
3814			atomic_inc(&device->sb_write_errors);
3815	}
3816	return atomic_read(&device->sb_write_errors) < i ? 0 : -1;
3817}
3818
3819/*
3820 * Wait for write completion of superblocks done by write_dev_supers,
3821 * @max_mirrors same for write and wait phases.
3822 *
3823 * Return -1 if primary super block write failed or when there were no super block
3824 * copies written. Otherwise 0.
3825 */
3826static int wait_dev_supers(struct btrfs_device *device, int max_mirrors)
3827{
3828	int i;
3829	int errors = 0;
3830	bool primary_failed = false;
3831	int ret;
3832	u64 bytenr;
3833
3834	if (max_mirrors == 0)
3835		max_mirrors = BTRFS_SUPER_MIRROR_MAX;
3836
3837	for (i = 0; i < max_mirrors; i++) {
3838		struct folio *folio;
3839
3840		ret = btrfs_sb_log_location(device, i, READ, &bytenr);
3841		if (ret == -ENOENT) {
3842			break;
3843		} else if (ret < 0) {
3844			errors++;
3845			if (i == 0)
3846				primary_failed = true;
3847			continue;
3848		}
3849		if (bytenr + BTRFS_SUPER_INFO_SIZE >=
3850		    device->commit_total_bytes)
3851			break;
3852
3853		folio = filemap_get_folio(device->bdev->bd_mapping,
3854					  bytenr >> PAGE_SHIFT);
3855		/* If the folio has been removed, then we know it completed. */
3856		if (IS_ERR(folio))
 
 
3857			continue;
3858		ASSERT(folio_order(folio) == 0);
 
 
 
 
 
 
 
 
 
 
3859
3860		/* Folio will be unlocked once the write completes. */
3861		folio_wait_locked(folio);
3862		folio_put(folio);
3863	}
3864
3865	errors += atomic_read(&device->sb_write_errors);
3866	if (errors >= BTRFS_SUPER_PRIMARY_WRITE_ERROR)
3867		primary_failed = true;
3868	if (primary_failed) {
3869		btrfs_err(device->fs_info, "error writing primary super block to device %llu",
3870			  device->devid);
3871		return -1;
3872	}
3873
3874	return errors < i ? 0 : -1;
3875}
3876
3877/*
3878 * endio for the write_dev_flush, this will wake anyone waiting
3879 * for the barrier when it is done
3880 */
3881static void btrfs_end_empty_barrier(struct bio *bio)
3882{
3883	bio_uninit(bio);
3884	complete(bio->bi_private);
3885}
3886
3887/*
3888 * Submit a flush request to the device if it supports it. Error handling is
3889 * done in the waiting counterpart.
3890 */
3891static void write_dev_flush(struct btrfs_device *device)
3892{
3893	struct bio *bio = &device->flush_bio;
3894
3895	device->last_flush_error = BLK_STS_OK;
3896
3897	bio_init(bio, device->bdev, NULL, 0,
3898		 REQ_OP_WRITE | REQ_SYNC | REQ_PREFLUSH);
3899	bio->bi_end_io = btrfs_end_empty_barrier;
3900	init_completion(&device->flush_wait);
3901	bio->bi_private = &device->flush_wait;
3902	submit_bio(bio);
3903	set_bit(BTRFS_DEV_STATE_FLUSH_SENT, &device->dev_state);
3904}
3905
3906/*
3907 * If the flush bio has been submitted by write_dev_flush, wait for it.
3908 * Return true for any error, and false otherwise.
3909 */
3910static bool wait_dev_flush(struct btrfs_device *device)
3911{
3912	struct bio *bio = &device->flush_bio;
3913
3914	if (!test_and_clear_bit(BTRFS_DEV_STATE_FLUSH_SENT, &device->dev_state))
3915		return false;
3916
3917	wait_for_completion_io(&device->flush_wait);
3918
3919	if (bio->bi_status) {
3920		device->last_flush_error = bio->bi_status;
3921		btrfs_dev_stat_inc_and_print(device, BTRFS_DEV_STAT_FLUSH_ERRS);
3922		return true;
3923	}
3924
3925	return false;
3926}
3927
3928/*
3929 * send an empty flush down to each device in parallel,
3930 * then wait for them
3931 */
3932static int barrier_all_devices(struct btrfs_fs_info *info)
3933{
3934	struct list_head *head;
3935	struct btrfs_device *dev;
3936	int errors_wait = 0;
3937
3938	lockdep_assert_held(&info->fs_devices->device_list_mutex);
3939	/* send down all the barriers */
3940	head = &info->fs_devices->devices;
3941	list_for_each_entry(dev, head, dev_list) {
3942		if (test_bit(BTRFS_DEV_STATE_MISSING, &dev->dev_state))
3943			continue;
3944		if (!dev->bdev)
3945			continue;
3946		if (!test_bit(BTRFS_DEV_STATE_IN_FS_METADATA, &dev->dev_state) ||
3947		    !test_bit(BTRFS_DEV_STATE_WRITEABLE, &dev->dev_state))
3948			continue;
3949
3950		write_dev_flush(dev);
3951	}
3952
3953	/* wait for all the barriers */
3954	list_for_each_entry(dev, head, dev_list) {
3955		if (test_bit(BTRFS_DEV_STATE_MISSING, &dev->dev_state))
3956			continue;
3957		if (!dev->bdev) {
3958			errors_wait++;
3959			continue;
3960		}
3961		if (!test_bit(BTRFS_DEV_STATE_IN_FS_METADATA, &dev->dev_state) ||
3962		    !test_bit(BTRFS_DEV_STATE_WRITEABLE, &dev->dev_state))
3963			continue;
3964
3965		if (wait_dev_flush(dev))
3966			errors_wait++;
3967	}
3968
3969	/*
3970	 * Checks last_flush_error of disks in order to determine the device
3971	 * state.
3972	 */
3973	if (errors_wait && !btrfs_check_rw_degradable(info, NULL))
3974		return -EIO;
3975
3976	return 0;
3977}
3978
3979int btrfs_get_num_tolerated_disk_barrier_failures(u64 flags)
3980{
3981	int raid_type;
3982	int min_tolerated = INT_MAX;
3983
3984	if ((flags & BTRFS_BLOCK_GROUP_PROFILE_MASK) == 0 ||
3985	    (flags & BTRFS_AVAIL_ALLOC_BIT_SINGLE))
3986		min_tolerated = min_t(int, min_tolerated,
3987				    btrfs_raid_array[BTRFS_RAID_SINGLE].
3988				    tolerated_failures);
3989
3990	for (raid_type = 0; raid_type < BTRFS_NR_RAID_TYPES; raid_type++) {
3991		if (raid_type == BTRFS_RAID_SINGLE)
3992			continue;
3993		if (!(flags & btrfs_raid_array[raid_type].bg_flag))
3994			continue;
3995		min_tolerated = min_t(int, min_tolerated,
3996				    btrfs_raid_array[raid_type].
3997				    tolerated_failures);
3998	}
3999
4000	if (min_tolerated == INT_MAX) {
4001		pr_warn("BTRFS: unknown raid flag: %llu", flags);
4002		min_tolerated = 0;
4003	}
4004
4005	return min_tolerated;
4006}
4007
4008int write_all_supers(struct btrfs_fs_info *fs_info, int max_mirrors)
4009{
4010	struct list_head *head;
4011	struct btrfs_device *dev;
4012	struct btrfs_super_block *sb;
4013	struct btrfs_dev_item *dev_item;
4014	int ret;
4015	int do_barriers;
4016	int max_errors;
4017	int total_errors = 0;
4018	u64 flags;
4019
4020	do_barriers = !btrfs_test_opt(fs_info, NOBARRIER);
4021
4022	/*
4023	 * max_mirrors == 0 indicates we're from commit_transaction,
4024	 * not from fsync where the tree roots in fs_info have not
4025	 * been consistent on disk.
4026	 */
4027	if (max_mirrors == 0)
4028		backup_super_roots(fs_info);
4029
4030	sb = fs_info->super_for_commit;
4031	dev_item = &sb->dev_item;
4032
4033	mutex_lock(&fs_info->fs_devices->device_list_mutex);
4034	head = &fs_info->fs_devices->devices;
4035	max_errors = btrfs_super_num_devices(fs_info->super_copy) - 1;
4036
4037	if (do_barriers) {
4038		ret = barrier_all_devices(fs_info);
4039		if (ret) {
4040			mutex_unlock(
4041				&fs_info->fs_devices->device_list_mutex);
4042			btrfs_handle_fs_error(fs_info, ret,
4043					      "errors while submitting device barriers.");
4044			return ret;
4045		}
4046	}
4047
4048	list_for_each_entry(dev, head, dev_list) {
4049		if (!dev->bdev) {
4050			total_errors++;
4051			continue;
4052		}
4053		if (!test_bit(BTRFS_DEV_STATE_IN_FS_METADATA, &dev->dev_state) ||
4054		    !test_bit(BTRFS_DEV_STATE_WRITEABLE, &dev->dev_state))
4055			continue;
4056
4057		btrfs_set_stack_device_generation(dev_item, 0);
4058		btrfs_set_stack_device_type(dev_item, dev->type);
4059		btrfs_set_stack_device_id(dev_item, dev->devid);
4060		btrfs_set_stack_device_total_bytes(dev_item,
4061						   dev->commit_total_bytes);
4062		btrfs_set_stack_device_bytes_used(dev_item,
4063						  dev->commit_bytes_used);
4064		btrfs_set_stack_device_io_align(dev_item, dev->io_align);
4065		btrfs_set_stack_device_io_width(dev_item, dev->io_width);
4066		btrfs_set_stack_device_sector_size(dev_item, dev->sector_size);
4067		memcpy(dev_item->uuid, dev->uuid, BTRFS_UUID_SIZE);
4068		memcpy(dev_item->fsid, dev->fs_devices->metadata_uuid,
4069		       BTRFS_FSID_SIZE);
4070
4071		flags = btrfs_super_flags(sb);
4072		btrfs_set_super_flags(sb, flags | BTRFS_HEADER_FLAG_WRITTEN);
4073
4074		ret = btrfs_validate_write_super(fs_info, sb);
4075		if (ret < 0) {
4076			mutex_unlock(&fs_info->fs_devices->device_list_mutex);
4077			btrfs_handle_fs_error(fs_info, -EUCLEAN,
4078				"unexpected superblock corruption detected");
4079			return -EUCLEAN;
4080		}
4081
4082		ret = write_dev_supers(dev, sb, max_mirrors);
4083		if (ret)
4084			total_errors++;
4085	}
4086	if (total_errors > max_errors) {
4087		btrfs_err(fs_info, "%d errors while writing supers",
4088			  total_errors);
4089		mutex_unlock(&fs_info->fs_devices->device_list_mutex);
4090
4091		/* FUA is masked off if unsupported and can't be the reason */
4092		btrfs_handle_fs_error(fs_info, -EIO,
4093				      "%d errors while writing supers",
4094				      total_errors);
4095		return -EIO;
4096	}
4097
4098	total_errors = 0;
4099	list_for_each_entry(dev, head, dev_list) {
4100		if (!dev->bdev)
4101			continue;
4102		if (!test_bit(BTRFS_DEV_STATE_IN_FS_METADATA, &dev->dev_state) ||
4103		    !test_bit(BTRFS_DEV_STATE_WRITEABLE, &dev->dev_state))
4104			continue;
4105
4106		ret = wait_dev_supers(dev, max_mirrors);
4107		if (ret)
4108			total_errors++;
4109	}
4110	mutex_unlock(&fs_info->fs_devices->device_list_mutex);
4111	if (total_errors > max_errors) {
4112		btrfs_handle_fs_error(fs_info, -EIO,
4113				      "%d errors while writing supers",
4114				      total_errors);
4115		return -EIO;
4116	}
4117	return 0;
4118}
4119
4120/* Drop a fs root from the radix tree and free it. */
4121void btrfs_drop_and_free_fs_root(struct btrfs_fs_info *fs_info,
4122				  struct btrfs_root *root)
4123{
4124	bool drop_ref = false;
4125
4126	spin_lock(&fs_info->fs_roots_radix_lock);
4127	radix_tree_delete(&fs_info->fs_roots_radix,
4128			  (unsigned long)btrfs_root_id(root));
4129	if (test_and_clear_bit(BTRFS_ROOT_IN_RADIX, &root->state))
4130		drop_ref = true;
4131	spin_unlock(&fs_info->fs_roots_radix_lock);
4132
4133	if (BTRFS_FS_ERROR(fs_info)) {
4134		ASSERT(root->log_root == NULL);
4135		if (root->reloc_root) {
4136			btrfs_put_root(root->reloc_root);
4137			root->reloc_root = NULL;
4138		}
4139	}
4140
4141	if (drop_ref)
4142		btrfs_put_root(root);
4143}
4144
4145int btrfs_commit_super(struct btrfs_fs_info *fs_info)
4146{
 
 
 
4147	mutex_lock(&fs_info->cleaner_mutex);
4148	btrfs_run_delayed_iputs(fs_info);
4149	mutex_unlock(&fs_info->cleaner_mutex);
4150	wake_up_process(fs_info->cleaner_kthread);
4151
4152	/* wait until ongoing cleanup work done */
4153	down_write(&fs_info->cleanup_work_sem);
4154	up_write(&fs_info->cleanup_work_sem);
4155
4156	return btrfs_commit_current_transaction(fs_info->tree_root);
 
 
 
4157}
4158
4159static void warn_about_uncommitted_trans(struct btrfs_fs_info *fs_info)
4160{
4161	struct btrfs_transaction *trans;
4162	struct btrfs_transaction *tmp;
4163	bool found = false;
4164
 
 
 
4165	/*
4166	 * This function is only called at the very end of close_ctree(),
4167	 * thus no other running transaction, no need to take trans_lock.
4168	 */
4169	ASSERT(test_bit(BTRFS_FS_CLOSING_DONE, &fs_info->flags));
4170	list_for_each_entry_safe(trans, tmp, &fs_info->trans_list, list) {
4171		struct extent_state *cached = NULL;
4172		u64 dirty_bytes = 0;
4173		u64 cur = 0;
4174		u64 found_start;
4175		u64 found_end;
4176
4177		found = true;
4178		while (find_first_extent_bit(&trans->dirty_pages, cur,
4179			&found_start, &found_end, EXTENT_DIRTY, &cached)) {
4180			dirty_bytes += found_end + 1 - found_start;
4181			cur = found_end + 1;
4182		}
4183		btrfs_warn(fs_info,
4184	"transaction %llu (with %llu dirty metadata bytes) is not committed",
4185			   trans->transid, dirty_bytes);
4186		btrfs_cleanup_one_transaction(trans);
4187
4188		if (trans == fs_info->running_transaction)
4189			fs_info->running_transaction = NULL;
4190		list_del_init(&trans->list);
4191
4192		btrfs_put_transaction(trans);
4193		trace_btrfs_transaction_commit(fs_info);
4194	}
4195	ASSERT(!found);
4196}
4197
4198void __cold close_ctree(struct btrfs_fs_info *fs_info)
4199{
4200	int ret;
4201
4202	set_bit(BTRFS_FS_CLOSING_START, &fs_info->flags);
4203
4204	/*
4205	 * If we had UNFINISHED_DROPS we could still be processing them, so
4206	 * clear that bit and wake up relocation so it can stop.
4207	 * We must do this before stopping the block group reclaim task, because
4208	 * at btrfs_relocate_block_group() we wait for this bit, and after the
4209	 * wait we stop with -EINTR if btrfs_fs_closing() returns non-zero - we
4210	 * have just set BTRFS_FS_CLOSING_START, so btrfs_fs_closing() will
4211	 * return 1.
4212	 */
4213	btrfs_wake_unfinished_drop(fs_info);
4214
4215	/*
4216	 * We may have the reclaim task running and relocating a data block group,
4217	 * in which case it may create delayed iputs. So stop it before we park
4218	 * the cleaner kthread otherwise we can get new delayed iputs after
4219	 * parking the cleaner, and that can make the async reclaim task to hang
4220	 * if it's waiting for delayed iputs to complete, since the cleaner is
4221	 * parked and can not run delayed iputs - this will make us hang when
4222	 * trying to stop the async reclaim task.
4223	 */
4224	cancel_work_sync(&fs_info->reclaim_bgs_work);
4225	/*
4226	 * We don't want the cleaner to start new transactions, add more delayed
4227	 * iputs, etc. while we're closing. We can't use kthread_stop() yet
4228	 * because that frees the task_struct, and the transaction kthread might
4229	 * still try to wake up the cleaner.
4230	 */
4231	kthread_park(fs_info->cleaner_kthread);
4232
4233	/* wait for the qgroup rescan worker to stop */
4234	btrfs_qgroup_wait_for_completion(fs_info, false);
4235
4236	/* wait for the uuid_scan task to finish */
4237	down(&fs_info->uuid_tree_rescan_sem);
4238	/* avoid complains from lockdep et al., set sem back to initial state */
4239	up(&fs_info->uuid_tree_rescan_sem);
4240
4241	/* pause restriper - we want to resume on mount */
4242	btrfs_pause_balance(fs_info);
4243
4244	btrfs_dev_replace_suspend_for_unmount(fs_info);
4245
4246	btrfs_scrub_cancel(fs_info);
4247
4248	/* wait for any defraggers to finish */
4249	wait_event(fs_info->transaction_wait,
4250		   (atomic_read(&fs_info->defrag_running) == 0));
4251
4252	/* clear out the rbtree of defraggable inodes */
4253	btrfs_cleanup_defrag_inodes(fs_info);
4254
4255	/*
4256	 * Wait for any fixup workers to complete.
4257	 * If we don't wait for them here and they are still running by the time
4258	 * we call kthread_stop() against the cleaner kthread further below, we
4259	 * get an use-after-free on the cleaner because the fixup worker adds an
4260	 * inode to the list of delayed iputs and then attempts to wakeup the
4261	 * cleaner kthread, which was already stopped and destroyed. We parked
4262	 * already the cleaner, but below we run all pending delayed iputs.
4263	 */
4264	btrfs_flush_workqueue(fs_info->fixup_workers);
4265	/*
4266	 * Similar case here, we have to wait for delalloc workers before we
4267	 * proceed below and stop the cleaner kthread, otherwise we trigger a
4268	 * use-after-tree on the cleaner kthread task_struct when a delalloc
4269	 * worker running submit_compressed_extents() adds a delayed iput, which
4270	 * does a wake up on the cleaner kthread, which was already freed below
4271	 * when we call kthread_stop().
4272	 */
4273	btrfs_flush_workqueue(fs_info->delalloc_workers);
4274
4275	/*
4276	 * After we parked the cleaner kthread, ordered extents may have
4277	 * completed and created new delayed iputs. If one of the async reclaim
4278	 * tasks is running and in the RUN_DELAYED_IPUTS flush state, then we
4279	 * can hang forever trying to stop it, because if a delayed iput is
4280	 * added after it ran btrfs_run_delayed_iputs() and before it called
4281	 * btrfs_wait_on_delayed_iputs(), it will hang forever since there is
4282	 * no one else to run iputs.
4283	 *
4284	 * So wait for all ongoing ordered extents to complete and then run
4285	 * delayed iputs. This works because once we reach this point no one
4286	 * can either create new ordered extents nor create delayed iputs
4287	 * through some other means.
4288	 *
4289	 * Also note that btrfs_wait_ordered_roots() is not safe here, because
4290	 * it waits for BTRFS_ORDERED_COMPLETE to be set on an ordered extent,
4291	 * but the delayed iput for the respective inode is made only when doing
4292	 * the final btrfs_put_ordered_extent() (which must happen at
4293	 * btrfs_finish_ordered_io() when we are unmounting).
4294	 */
4295	btrfs_flush_workqueue(fs_info->endio_write_workers);
4296	/* Ordered extents for free space inodes. */
4297	btrfs_flush_workqueue(fs_info->endio_freespace_worker);
4298	btrfs_run_delayed_iputs(fs_info);
4299
4300	cancel_work_sync(&fs_info->async_reclaim_work);
4301	cancel_work_sync(&fs_info->async_data_reclaim_work);
4302	cancel_work_sync(&fs_info->preempt_reclaim_work);
4303	cancel_work_sync(&fs_info->em_shrinker_work);
4304
4305	/* Cancel or finish ongoing discard work */
4306	btrfs_discard_cleanup(fs_info);
4307
4308	if (!sb_rdonly(fs_info->sb)) {
4309		/*
4310		 * The cleaner kthread is stopped, so do one final pass over
4311		 * unused block groups.
4312		 */
4313		btrfs_delete_unused_bgs(fs_info);
4314
4315		/*
4316		 * There might be existing delayed inode workers still running
4317		 * and holding an empty delayed inode item. We must wait for
4318		 * them to complete first because they can create a transaction.
4319		 * This happens when someone calls btrfs_balance_delayed_items()
4320		 * and then a transaction commit runs the same delayed nodes
4321		 * before any delayed worker has done something with the nodes.
4322		 * We must wait for any worker here and not at transaction
4323		 * commit time since that could cause a deadlock.
4324		 * This is a very rare case.
4325		 */
4326		btrfs_flush_workqueue(fs_info->delayed_workers);
4327
4328		ret = btrfs_commit_super(fs_info);
4329		if (ret)
4330			btrfs_err(fs_info, "commit super ret %d", ret);
4331	}
4332
4333	if (BTRFS_FS_ERROR(fs_info))
4334		btrfs_error_commit_super(fs_info);
4335
4336	kthread_stop(fs_info->transaction_kthread);
4337	kthread_stop(fs_info->cleaner_kthread);
4338
4339	ASSERT(list_empty(&fs_info->delayed_iputs));
4340	set_bit(BTRFS_FS_CLOSING_DONE, &fs_info->flags);
4341
4342	if (btrfs_check_quota_leak(fs_info)) {
4343		WARN_ON(IS_ENABLED(CONFIG_BTRFS_DEBUG));
4344		btrfs_err(fs_info, "qgroup reserved space leaked");
4345	}
4346
4347	btrfs_free_qgroup_config(fs_info);
4348	ASSERT(list_empty(&fs_info->delalloc_roots));
4349
4350	if (percpu_counter_sum(&fs_info->delalloc_bytes)) {
4351		btrfs_info(fs_info, "at unmount delalloc count %lld",
4352		       percpu_counter_sum(&fs_info->delalloc_bytes));
4353	}
4354
4355	if (percpu_counter_sum(&fs_info->ordered_bytes))
4356		btrfs_info(fs_info, "at unmount dio bytes count %lld",
4357			   percpu_counter_sum(&fs_info->ordered_bytes));
4358
4359	btrfs_sysfs_remove_mounted(fs_info);
4360	btrfs_sysfs_remove_fsid(fs_info->fs_devices);
4361
4362	btrfs_put_block_group_cache(fs_info);
4363
4364	/*
4365	 * we must make sure there is not any read request to
4366	 * submit after we stopping all workers.
4367	 */
4368	invalidate_inode_pages2(fs_info->btree_inode->i_mapping);
4369	btrfs_stop_all_workers(fs_info);
4370
4371	/* We shouldn't have any transaction open at this point */
4372	warn_about_uncommitted_trans(fs_info);
4373
4374	clear_bit(BTRFS_FS_OPEN, &fs_info->flags);
4375	free_root_pointers(fs_info, true);
4376	btrfs_free_fs_roots(fs_info);
4377
4378	/*
4379	 * We must free the block groups after dropping the fs_roots as we could
4380	 * have had an IO error and have left over tree log blocks that aren't
4381	 * cleaned up until the fs roots are freed.  This makes the block group
4382	 * accounting appear to be wrong because there's pending reserved bytes,
4383	 * so make sure we do the block group cleanup afterwards.
4384	 */
4385	btrfs_free_block_groups(fs_info);
4386
4387	iput(fs_info->btree_inode);
4388
4389	btrfs_mapping_tree_free(fs_info);
4390	btrfs_close_devices(fs_info->fs_devices);
4391}
4392
4393void btrfs_mark_buffer_dirty(struct btrfs_trans_handle *trans,
4394			     struct extent_buffer *buf)
4395{
4396	struct btrfs_fs_info *fs_info = buf->fs_info;
4397	u64 transid = btrfs_header_generation(buf);
4398
4399#ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS
4400	/*
4401	 * This is a fast path so only do this check if we have sanity tests
4402	 * enabled.  Normal people shouldn't be using unmapped buffers as dirty
4403	 * outside of the sanity tests.
4404	 */
4405	if (unlikely(test_bit(EXTENT_BUFFER_UNMAPPED, &buf->bflags)))
4406		return;
4407#endif
4408	/* This is an active transaction (its state < TRANS_STATE_UNBLOCKED). */
4409	ASSERT(trans->transid == fs_info->generation);
4410	btrfs_assert_tree_write_locked(buf);
4411	if (unlikely(transid != fs_info->generation)) {
4412		btrfs_abort_transaction(trans, -EUCLEAN);
4413		btrfs_crit(fs_info,
4414"dirty buffer transid mismatch, logical %llu found transid %llu running transid %llu",
4415			   buf->start, transid, fs_info->generation);
4416	}
4417	set_extent_buffer_dirty(buf);
4418}
4419
4420static void __btrfs_btree_balance_dirty(struct btrfs_fs_info *fs_info,
4421					int flush_delayed)
4422{
4423	/*
4424	 * looks as though older kernels can get into trouble with
4425	 * this code, they end up stuck in balance_dirty_pages forever
4426	 */
4427	int ret;
4428
4429	if (current->flags & PF_MEMALLOC)
4430		return;
4431
4432	if (flush_delayed)
4433		btrfs_balance_delayed_items(fs_info);
4434
4435	ret = __percpu_counter_compare(&fs_info->dirty_metadata_bytes,
4436				     BTRFS_DIRTY_METADATA_THRESH,
4437				     fs_info->dirty_metadata_batch);
4438	if (ret > 0) {
4439		balance_dirty_pages_ratelimited(fs_info->btree_inode->i_mapping);
4440	}
4441}
4442
4443void btrfs_btree_balance_dirty(struct btrfs_fs_info *fs_info)
4444{
4445	__btrfs_btree_balance_dirty(fs_info, 1);
4446}
4447
4448void btrfs_btree_balance_dirty_nodelay(struct btrfs_fs_info *fs_info)
4449{
4450	__btrfs_btree_balance_dirty(fs_info, 0);
4451}
4452
4453static void btrfs_error_commit_super(struct btrfs_fs_info *fs_info)
4454{
4455	/* cleanup FS via transaction */
4456	btrfs_cleanup_transaction(fs_info);
4457
4458	mutex_lock(&fs_info->cleaner_mutex);
4459	btrfs_run_delayed_iputs(fs_info);
4460	mutex_unlock(&fs_info->cleaner_mutex);
4461
4462	down_write(&fs_info->cleanup_work_sem);
4463	up_write(&fs_info->cleanup_work_sem);
4464}
4465
4466static void btrfs_drop_all_logs(struct btrfs_fs_info *fs_info)
4467{
4468	struct btrfs_root *gang[8];
4469	u64 root_objectid = 0;
4470	int ret;
4471
4472	spin_lock(&fs_info->fs_roots_radix_lock);
4473	while ((ret = radix_tree_gang_lookup(&fs_info->fs_roots_radix,
4474					     (void **)gang, root_objectid,
4475					     ARRAY_SIZE(gang))) != 0) {
4476		int i;
4477
4478		for (i = 0; i < ret; i++)
4479			gang[i] = btrfs_grab_root(gang[i]);
4480		spin_unlock(&fs_info->fs_roots_radix_lock);
4481
4482		for (i = 0; i < ret; i++) {
4483			if (!gang[i])
4484				continue;
4485			root_objectid = btrfs_root_id(gang[i]);
4486			btrfs_free_log(NULL, gang[i]);
4487			btrfs_put_root(gang[i]);
4488		}
4489		root_objectid++;
4490		spin_lock(&fs_info->fs_roots_radix_lock);
4491	}
4492	spin_unlock(&fs_info->fs_roots_radix_lock);
4493	btrfs_free_log_root_tree(NULL, fs_info);
4494}
4495
4496static void btrfs_destroy_ordered_extents(struct btrfs_root *root)
4497{
4498	struct btrfs_ordered_extent *ordered;
4499
4500	spin_lock(&root->ordered_extent_lock);
4501	/*
4502	 * This will just short circuit the ordered completion stuff which will
4503	 * make sure the ordered extent gets properly cleaned up.
4504	 */
4505	list_for_each_entry(ordered, &root->ordered_extents,
4506			    root_extent_list)
4507		set_bit(BTRFS_ORDERED_IOERR, &ordered->flags);
4508	spin_unlock(&root->ordered_extent_lock);
4509}
4510
4511static void btrfs_destroy_all_ordered_extents(struct btrfs_fs_info *fs_info)
4512{
4513	struct btrfs_root *root;
4514	LIST_HEAD(splice);
4515
4516	spin_lock(&fs_info->ordered_root_lock);
4517	list_splice_init(&fs_info->ordered_roots, &splice);
4518	while (!list_empty(&splice)) {
4519		root = list_first_entry(&splice, struct btrfs_root,
4520					ordered_root);
4521		list_move_tail(&root->ordered_root,
4522			       &fs_info->ordered_roots);
4523
4524		spin_unlock(&fs_info->ordered_root_lock);
4525		btrfs_destroy_ordered_extents(root);
4526
4527		cond_resched();
4528		spin_lock(&fs_info->ordered_root_lock);
4529	}
4530	spin_unlock(&fs_info->ordered_root_lock);
4531
4532	/*
4533	 * We need this here because if we've been flipped read-only we won't
4534	 * get sync() from the umount, so we need to make sure any ordered
4535	 * extents that haven't had their dirty pages IO start writeout yet
4536	 * actually get run and error out properly.
4537	 */
4538	btrfs_wait_ordered_roots(fs_info, U64_MAX, NULL);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4539}
4540
4541static void btrfs_destroy_delalloc_inodes(struct btrfs_root *root)
4542{
4543	struct btrfs_inode *btrfs_inode;
4544	LIST_HEAD(splice);
4545
4546	spin_lock(&root->delalloc_lock);
4547	list_splice_init(&root->delalloc_inodes, &splice);
4548
4549	while (!list_empty(&splice)) {
4550		struct inode *inode = NULL;
4551		btrfs_inode = list_first_entry(&splice, struct btrfs_inode,
4552					       delalloc_inodes);
4553		btrfs_del_delalloc_inode(btrfs_inode);
4554		spin_unlock(&root->delalloc_lock);
4555
4556		/*
4557		 * Make sure we get a live inode and that it'll not disappear
4558		 * meanwhile.
4559		 */
4560		inode = igrab(&btrfs_inode->vfs_inode);
4561		if (inode) {
4562			unsigned int nofs_flag;
4563
4564			nofs_flag = memalloc_nofs_save();
4565			invalidate_inode_pages2(inode->i_mapping);
4566			memalloc_nofs_restore(nofs_flag);
4567			iput(inode);
4568		}
4569		spin_lock(&root->delalloc_lock);
4570	}
4571	spin_unlock(&root->delalloc_lock);
4572}
4573
4574static void btrfs_destroy_all_delalloc_inodes(struct btrfs_fs_info *fs_info)
4575{
4576	struct btrfs_root *root;
4577	LIST_HEAD(splice);
4578
4579	spin_lock(&fs_info->delalloc_root_lock);
4580	list_splice_init(&fs_info->delalloc_roots, &splice);
4581	while (!list_empty(&splice)) {
4582		root = list_first_entry(&splice, struct btrfs_root,
4583					 delalloc_root);
4584		root = btrfs_grab_root(root);
4585		BUG_ON(!root);
4586		spin_unlock(&fs_info->delalloc_root_lock);
4587
4588		btrfs_destroy_delalloc_inodes(root);
4589		btrfs_put_root(root);
4590
4591		spin_lock(&fs_info->delalloc_root_lock);
4592	}
4593	spin_unlock(&fs_info->delalloc_root_lock);
4594}
4595
4596static void btrfs_destroy_marked_extents(struct btrfs_fs_info *fs_info,
4597					 struct extent_io_tree *dirty_pages,
4598					 int mark)
4599{
4600	struct extent_buffer *eb;
4601	u64 start = 0;
4602	u64 end;
4603
4604	while (find_first_extent_bit(dirty_pages, start, &start, &end,
4605				     mark, NULL)) {
4606		clear_extent_bits(dirty_pages, start, end, mark);
4607		while (start <= end) {
4608			eb = find_extent_buffer(fs_info, start);
4609			start += fs_info->nodesize;
4610			if (!eb)
4611				continue;
4612
4613			btrfs_tree_lock(eb);
4614			wait_on_extent_buffer_writeback(eb);
4615			btrfs_clear_buffer_dirty(NULL, eb);
4616			btrfs_tree_unlock(eb);
4617
4618			free_extent_buffer_stale(eb);
4619		}
4620	}
4621}
4622
4623static void btrfs_destroy_pinned_extent(struct btrfs_fs_info *fs_info,
4624					struct extent_io_tree *unpin)
4625{
4626	u64 start;
4627	u64 end;
4628
4629	while (1) {
4630		struct extent_state *cached_state = NULL;
4631
4632		/*
4633		 * The btrfs_finish_extent_commit() may get the same range as
4634		 * ours between find_first_extent_bit and clear_extent_dirty.
4635		 * Hence, hold the unused_bg_unpin_mutex to avoid double unpin
4636		 * the same extent range.
4637		 */
4638		mutex_lock(&fs_info->unused_bg_unpin_mutex);
4639		if (!find_first_extent_bit(unpin, 0, &start, &end,
4640					   EXTENT_DIRTY, &cached_state)) {
4641			mutex_unlock(&fs_info->unused_bg_unpin_mutex);
4642			break;
4643		}
4644
4645		clear_extent_dirty(unpin, start, end, &cached_state);
4646		free_extent_state(cached_state);
4647		btrfs_error_unpin_extent_range(fs_info, start, end);
4648		mutex_unlock(&fs_info->unused_bg_unpin_mutex);
4649		cond_resched();
4650	}
4651}
4652
4653static void btrfs_cleanup_bg_io(struct btrfs_block_group *cache)
4654{
4655	struct inode *inode;
4656
4657	inode = cache->io_ctl.inode;
4658	if (inode) {
4659		unsigned int nofs_flag;
4660
4661		nofs_flag = memalloc_nofs_save();
4662		invalidate_inode_pages2(inode->i_mapping);
4663		memalloc_nofs_restore(nofs_flag);
4664
4665		BTRFS_I(inode)->generation = 0;
4666		cache->io_ctl.inode = NULL;
4667		iput(inode);
4668	}
4669	ASSERT(cache->io_ctl.pages == NULL);
4670	btrfs_put_block_group(cache);
4671}
4672
4673void btrfs_cleanup_dirty_bgs(struct btrfs_transaction *cur_trans,
4674			     struct btrfs_fs_info *fs_info)
4675{
4676	struct btrfs_block_group *cache;
4677
4678	spin_lock(&cur_trans->dirty_bgs_lock);
4679	while (!list_empty(&cur_trans->dirty_bgs)) {
4680		cache = list_first_entry(&cur_trans->dirty_bgs,
4681					 struct btrfs_block_group,
4682					 dirty_list);
4683
4684		if (!list_empty(&cache->io_list)) {
4685			spin_unlock(&cur_trans->dirty_bgs_lock);
4686			list_del_init(&cache->io_list);
4687			btrfs_cleanup_bg_io(cache);
4688			spin_lock(&cur_trans->dirty_bgs_lock);
4689		}
4690
4691		list_del_init(&cache->dirty_list);
4692		spin_lock(&cache->lock);
4693		cache->disk_cache_state = BTRFS_DC_ERROR;
4694		spin_unlock(&cache->lock);
4695
4696		spin_unlock(&cur_trans->dirty_bgs_lock);
4697		btrfs_put_block_group(cache);
4698		btrfs_dec_delayed_refs_rsv_bg_updates(fs_info);
4699		spin_lock(&cur_trans->dirty_bgs_lock);
4700	}
4701	spin_unlock(&cur_trans->dirty_bgs_lock);
4702
4703	/*
4704	 * Refer to the definition of io_bgs member for details why it's safe
4705	 * to use it without any locking
4706	 */
4707	while (!list_empty(&cur_trans->io_bgs)) {
4708		cache = list_first_entry(&cur_trans->io_bgs,
4709					 struct btrfs_block_group,
4710					 io_list);
4711
4712		list_del_init(&cache->io_list);
4713		spin_lock(&cache->lock);
4714		cache->disk_cache_state = BTRFS_DC_ERROR;
4715		spin_unlock(&cache->lock);
4716		btrfs_cleanup_bg_io(cache);
4717	}
4718}
4719
4720static void btrfs_free_all_qgroup_pertrans(struct btrfs_fs_info *fs_info)
4721{
4722	struct btrfs_root *gang[8];
4723	int i;
4724	int ret;
4725
4726	spin_lock(&fs_info->fs_roots_radix_lock);
4727	while (1) {
4728		ret = radix_tree_gang_lookup_tag(&fs_info->fs_roots_radix,
4729						 (void **)gang, 0,
4730						 ARRAY_SIZE(gang),
4731						 BTRFS_ROOT_TRANS_TAG);
4732		if (ret == 0)
4733			break;
4734		for (i = 0; i < ret; i++) {
4735			struct btrfs_root *root = gang[i];
4736
4737			btrfs_qgroup_free_meta_all_pertrans(root);
4738			radix_tree_tag_clear(&fs_info->fs_roots_radix,
4739					(unsigned long)btrfs_root_id(root),
4740					BTRFS_ROOT_TRANS_TAG);
4741		}
4742	}
4743	spin_unlock(&fs_info->fs_roots_radix_lock);
4744}
4745
4746void btrfs_cleanup_one_transaction(struct btrfs_transaction *cur_trans)
 
4747{
4748	struct btrfs_fs_info *fs_info = cur_trans->fs_info;
4749	struct btrfs_device *dev, *tmp;
4750
4751	btrfs_cleanup_dirty_bgs(cur_trans, fs_info);
4752	ASSERT(list_empty(&cur_trans->dirty_bgs));
4753	ASSERT(list_empty(&cur_trans->io_bgs));
4754
4755	list_for_each_entry_safe(dev, tmp, &cur_trans->dev_update_list,
4756				 post_commit_list) {
4757		list_del_init(&dev->post_commit_list);
4758	}
4759
4760	btrfs_destroy_delayed_refs(cur_trans);
4761
4762	cur_trans->state = TRANS_STATE_COMMIT_START;
4763	wake_up(&fs_info->transaction_blocked_wait);
4764
4765	cur_trans->state = TRANS_STATE_UNBLOCKED;
4766	wake_up(&fs_info->transaction_wait);
4767
 
 
4768	btrfs_destroy_marked_extents(fs_info, &cur_trans->dirty_pages,
4769				     EXTENT_DIRTY);
4770	btrfs_destroy_pinned_extent(fs_info, &cur_trans->pinned_extents);
4771
 
 
4772	cur_trans->state =TRANS_STATE_COMPLETED;
4773	wake_up(&cur_trans->commit_wait);
4774}
4775
4776static int btrfs_cleanup_transaction(struct btrfs_fs_info *fs_info)
4777{
4778	struct btrfs_transaction *t;
4779
4780	mutex_lock(&fs_info->transaction_kthread_mutex);
4781
4782	spin_lock(&fs_info->trans_lock);
4783	while (!list_empty(&fs_info->trans_list)) {
4784		t = list_first_entry(&fs_info->trans_list,
4785				     struct btrfs_transaction, list);
4786		if (t->state >= TRANS_STATE_COMMIT_PREP) {
4787			refcount_inc(&t->use_count);
4788			spin_unlock(&fs_info->trans_lock);
4789			btrfs_wait_for_commit(fs_info, t->transid);
4790			btrfs_put_transaction(t);
4791			spin_lock(&fs_info->trans_lock);
4792			continue;
4793		}
4794		if (t == fs_info->running_transaction) {
4795			t->state = TRANS_STATE_COMMIT_DOING;
4796			spin_unlock(&fs_info->trans_lock);
4797			/*
4798			 * We wait for 0 num_writers since we don't hold a trans
4799			 * handle open currently for this transaction.
4800			 */
4801			wait_event(t->writer_wait,
4802				   atomic_read(&t->num_writers) == 0);
4803		} else {
4804			spin_unlock(&fs_info->trans_lock);
4805		}
4806		btrfs_cleanup_one_transaction(t);
4807
4808		spin_lock(&fs_info->trans_lock);
4809		if (t == fs_info->running_transaction)
4810			fs_info->running_transaction = NULL;
4811		list_del_init(&t->list);
4812		spin_unlock(&fs_info->trans_lock);
4813
4814		btrfs_put_transaction(t);
4815		trace_btrfs_transaction_commit(fs_info);
4816		spin_lock(&fs_info->trans_lock);
4817	}
4818	spin_unlock(&fs_info->trans_lock);
4819	btrfs_destroy_all_ordered_extents(fs_info);
4820	btrfs_destroy_delayed_inodes(fs_info);
4821	btrfs_assert_delayed_root_empty(fs_info);
4822	btrfs_destroy_all_delalloc_inodes(fs_info);
4823	btrfs_drop_all_logs(fs_info);
4824	btrfs_free_all_qgroup_pertrans(fs_info);
4825	mutex_unlock(&fs_info->transaction_kthread_mutex);
4826
4827	return 0;
4828}
4829
4830int btrfs_init_root_free_objectid(struct btrfs_root *root)
4831{
4832	struct btrfs_path *path;
4833	int ret;
4834	struct extent_buffer *l;
4835	struct btrfs_key search_key;
4836	struct btrfs_key found_key;
4837	int slot;
4838
4839	path = btrfs_alloc_path();
4840	if (!path)
4841		return -ENOMEM;
4842
4843	search_key.objectid = BTRFS_LAST_FREE_OBJECTID;
4844	search_key.type = -1;
4845	search_key.offset = (u64)-1;
4846	ret = btrfs_search_slot(NULL, root, &search_key, path, 0, 0);
4847	if (ret < 0)
4848		goto error;
4849	if (ret == 0) {
4850		/*
4851		 * Key with offset -1 found, there would have to exist a root
4852		 * with such id, but this is out of valid range.
4853		 */
4854		ret = -EUCLEAN;
4855		goto error;
4856	}
4857	if (path->slots[0] > 0) {
4858		slot = path->slots[0] - 1;
4859		l = path->nodes[0];
4860		btrfs_item_key_to_cpu(l, &found_key, slot);
4861		root->free_objectid = max_t(u64, found_key.objectid + 1,
4862					    BTRFS_FIRST_FREE_OBJECTID);
4863	} else {
4864		root->free_objectid = BTRFS_FIRST_FREE_OBJECTID;
4865	}
4866	ret = 0;
4867error:
4868	btrfs_free_path(path);
4869	return ret;
4870}
4871
4872int btrfs_get_free_objectid(struct btrfs_root *root, u64 *objectid)
4873{
4874	int ret;
4875	mutex_lock(&root->objectid_mutex);
4876
4877	if (unlikely(root->free_objectid >= BTRFS_LAST_FREE_OBJECTID)) {
4878		btrfs_warn(root->fs_info,
4879			   "the objectid of root %llu reaches its highest value",
4880			   btrfs_root_id(root));
4881		ret = -ENOSPC;
4882		goto out;
4883	}
4884
4885	*objectid = root->free_objectid++;
4886	ret = 0;
4887out:
4888	mutex_unlock(&root->objectid_mutex);
4889	return ret;
4890}
v6.8
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 * Copyright (C) 2007 Oracle.  All rights reserved.
   4 */
   5
   6#include <linux/fs.h>
   7#include <linux/blkdev.h>
   8#include <linux/radix-tree.h>
   9#include <linux/writeback.h>
  10#include <linux/workqueue.h>
  11#include <linux/kthread.h>
  12#include <linux/slab.h>
  13#include <linux/migrate.h>
  14#include <linux/ratelimit.h>
  15#include <linux/uuid.h>
  16#include <linux/semaphore.h>
  17#include <linux/error-injection.h>
  18#include <linux/crc32c.h>
  19#include <linux/sched/mm.h>
  20#include <asm/unaligned.h>
  21#include <crypto/hash.h>
  22#include "ctree.h"
  23#include "disk-io.h"
  24#include "transaction.h"
  25#include "btrfs_inode.h"
  26#include "bio.h"
  27#include "print-tree.h"
  28#include "locking.h"
  29#include "tree-log.h"
  30#include "free-space-cache.h"
  31#include "free-space-tree.h"
  32#include "rcu-string.h"
  33#include "dev-replace.h"
  34#include "raid56.h"
  35#include "sysfs.h"
  36#include "qgroup.h"
  37#include "compression.h"
  38#include "tree-checker.h"
  39#include "ref-verify.h"
  40#include "block-group.h"
  41#include "discard.h"
  42#include "space-info.h"
  43#include "zoned.h"
  44#include "subpage.h"
  45#include "fs.h"
  46#include "accessors.h"
  47#include "extent-tree.h"
  48#include "root-tree.h"
  49#include "defrag.h"
  50#include "uuid-tree.h"
  51#include "relocation.h"
  52#include "scrub.h"
  53#include "super.h"
  54
  55#define BTRFS_SUPER_FLAG_SUPP	(BTRFS_HEADER_FLAG_WRITTEN |\
  56				 BTRFS_HEADER_FLAG_RELOC |\
  57				 BTRFS_SUPER_FLAG_ERROR |\
  58				 BTRFS_SUPER_FLAG_SEEDING |\
  59				 BTRFS_SUPER_FLAG_METADUMP |\
  60				 BTRFS_SUPER_FLAG_METADUMP_V2)
  61
  62static int btrfs_cleanup_transaction(struct btrfs_fs_info *fs_info);
  63static void btrfs_error_commit_super(struct btrfs_fs_info *fs_info);
  64
  65static void btrfs_free_csum_hash(struct btrfs_fs_info *fs_info)
  66{
  67	if (fs_info->csum_shash)
  68		crypto_free_shash(fs_info->csum_shash);
  69}
  70
  71/*
  72 * Compute the csum of a btree block and store the result to provided buffer.
  73 */
  74static void csum_tree_block(struct extent_buffer *buf, u8 *result)
  75{
  76	struct btrfs_fs_info *fs_info = buf->fs_info;
  77	int num_pages;
  78	u32 first_page_part;
  79	SHASH_DESC_ON_STACK(shash, fs_info->csum_shash);
  80	char *kaddr;
  81	int i;
  82
  83	shash->tfm = fs_info->csum_shash;
  84	crypto_shash_init(shash);
  85
  86	if (buf->addr) {
  87		/* Pages are contiguous, handle them as a big one. */
  88		kaddr = buf->addr;
  89		first_page_part = fs_info->nodesize;
  90		num_pages = 1;
  91	} else {
  92		kaddr = folio_address(buf->folios[0]);
  93		first_page_part = min_t(u32, PAGE_SIZE, fs_info->nodesize);
  94		num_pages = num_extent_pages(buf);
  95	}
  96
  97	crypto_shash_update(shash, kaddr + BTRFS_CSUM_SIZE,
  98			    first_page_part - BTRFS_CSUM_SIZE);
  99
 100	/*
 101	 * Multiple single-page folios case would reach here.
 102	 *
 103	 * nodesize <= PAGE_SIZE and large folio all handled by above
 104	 * crypto_shash_update() already.
 105	 */
 106	for (i = 1; i < num_pages && INLINE_EXTENT_BUFFER_PAGES > 1; i++) {
 107		kaddr = folio_address(buf->folios[i]);
 108		crypto_shash_update(shash, kaddr, PAGE_SIZE);
 109	}
 110	memset(result, 0, BTRFS_CSUM_SIZE);
 111	crypto_shash_final(shash, result);
 112}
 113
 114/*
 115 * we can't consider a given block up to date unless the transid of the
 116 * block matches the transid in the parent node's pointer.  This is how we
 117 * detect blocks that either didn't get written at all or got written
 118 * in the wrong place.
 119 */
 120int btrfs_buffer_uptodate(struct extent_buffer *eb, u64 parent_transid, int atomic)
 121{
 122	if (!extent_buffer_uptodate(eb))
 123		return 0;
 124
 125	if (!parent_transid || btrfs_header_generation(eb) == parent_transid)
 126		return 1;
 127
 128	if (atomic)
 129		return -EAGAIN;
 130
 131	if (!extent_buffer_uptodate(eb) ||
 132	    btrfs_header_generation(eb) != parent_transid) {
 133		btrfs_err_rl(eb->fs_info,
 134"parent transid verify failed on logical %llu mirror %u wanted %llu found %llu",
 135			eb->start, eb->read_mirror,
 136			parent_transid, btrfs_header_generation(eb));
 137		clear_extent_buffer_uptodate(eb);
 138		return 0;
 139	}
 140	return 1;
 141}
 142
 143static bool btrfs_supported_super_csum(u16 csum_type)
 144{
 145	switch (csum_type) {
 146	case BTRFS_CSUM_TYPE_CRC32:
 147	case BTRFS_CSUM_TYPE_XXHASH:
 148	case BTRFS_CSUM_TYPE_SHA256:
 149	case BTRFS_CSUM_TYPE_BLAKE2:
 150		return true;
 151	default:
 152		return false;
 153	}
 154}
 155
 156/*
 157 * Return 0 if the superblock checksum type matches the checksum value of that
 158 * algorithm. Pass the raw disk superblock data.
 159 */
 160int btrfs_check_super_csum(struct btrfs_fs_info *fs_info,
 161			   const struct btrfs_super_block *disk_sb)
 162{
 163	char result[BTRFS_CSUM_SIZE];
 164	SHASH_DESC_ON_STACK(shash, fs_info->csum_shash);
 165
 166	shash->tfm = fs_info->csum_shash;
 167
 168	/*
 169	 * The super_block structure does not span the whole
 170	 * BTRFS_SUPER_INFO_SIZE range, we expect that the unused space is
 171	 * filled with zeros and is included in the checksum.
 172	 */
 173	crypto_shash_digest(shash, (const u8 *)disk_sb + BTRFS_CSUM_SIZE,
 174			    BTRFS_SUPER_INFO_SIZE - BTRFS_CSUM_SIZE, result);
 175
 176	if (memcmp(disk_sb->csum, result, fs_info->csum_size))
 177		return 1;
 178
 179	return 0;
 180}
 181
 182static int btrfs_repair_eb_io_failure(const struct extent_buffer *eb,
 183				      int mirror_num)
 184{
 185	struct btrfs_fs_info *fs_info = eb->fs_info;
 186	int num_folios = num_extent_folios(eb);
 187	int ret = 0;
 188
 189	if (sb_rdonly(fs_info->sb))
 190		return -EROFS;
 191
 192	for (int i = 0; i < num_folios; i++) {
 193		struct folio *folio = eb->folios[i];
 194		u64 start = max_t(u64, eb->start, folio_pos(folio));
 195		u64 end = min_t(u64, eb->start + eb->len,
 196				folio_pos(folio) + folio_size(folio));
 197		u32 len = end - start;
 198
 199		ret = btrfs_repair_io_failure(fs_info, 0, start, len,
 200					      start, folio, offset_in_folio(folio, start),
 201					      mirror_num);
 202		if (ret)
 203			break;
 204	}
 205
 206	return ret;
 207}
 208
 209/*
 210 * helper to read a given tree block, doing retries as required when
 211 * the checksums don't match and we have alternate mirrors to try.
 212 *
 213 * @check:		expected tree parentness check, see the comments of the
 214 *			structure for details.
 215 */
 216int btrfs_read_extent_buffer(struct extent_buffer *eb,
 217			     struct btrfs_tree_parent_check *check)
 218{
 219	struct btrfs_fs_info *fs_info = eb->fs_info;
 220	int failed = 0;
 221	int ret;
 222	int num_copies = 0;
 223	int mirror_num = 0;
 224	int failed_mirror = 0;
 225
 226	ASSERT(check);
 227
 228	while (1) {
 229		clear_bit(EXTENT_BUFFER_CORRUPT, &eb->bflags);
 230		ret = read_extent_buffer_pages(eb, WAIT_COMPLETE, mirror_num, check);
 231		if (!ret)
 232			break;
 233
 234		num_copies = btrfs_num_copies(fs_info,
 235					      eb->start, eb->len);
 236		if (num_copies == 1)
 237			break;
 238
 239		if (!failed_mirror) {
 240			failed = 1;
 241			failed_mirror = eb->read_mirror;
 242		}
 243
 244		mirror_num++;
 245		if (mirror_num == failed_mirror)
 246			mirror_num++;
 247
 248		if (mirror_num > num_copies)
 249			break;
 250	}
 251
 252	if (failed && !ret && failed_mirror)
 253		btrfs_repair_eb_io_failure(eb, failed_mirror);
 254
 255	return ret;
 256}
 257
 258/*
 259 * Checksum a dirty tree block before IO.
 260 */
 261blk_status_t btree_csum_one_bio(struct btrfs_bio *bbio)
 262{
 263	struct extent_buffer *eb = bbio->private;
 264	struct btrfs_fs_info *fs_info = eb->fs_info;
 265	u64 found_start = btrfs_header_bytenr(eb);
 266	u64 last_trans;
 267	u8 result[BTRFS_CSUM_SIZE];
 268	int ret;
 269
 270	/* Btree blocks are always contiguous on disk. */
 271	if (WARN_ON_ONCE(bbio->file_offset != eb->start))
 272		return BLK_STS_IOERR;
 273	if (WARN_ON_ONCE(bbio->bio.bi_iter.bi_size != eb->len))
 274		return BLK_STS_IOERR;
 275
 276	/*
 277	 * If an extent_buffer is marked as EXTENT_BUFFER_ZONED_ZEROOUT, don't
 278	 * checksum it but zero-out its content. This is done to preserve
 279	 * ordering of I/O without unnecessarily writing out data.
 280	 */
 281	if (test_bit(EXTENT_BUFFER_ZONED_ZEROOUT, &eb->bflags)) {
 282		memzero_extent_buffer(eb, 0, eb->len);
 283		return BLK_STS_OK;
 284	}
 285
 286	if (WARN_ON_ONCE(found_start != eb->start))
 287		return BLK_STS_IOERR;
 288	if (WARN_ON(!btrfs_folio_test_uptodate(fs_info, eb->folios[0],
 289					       eb->start, eb->len)))
 290		return BLK_STS_IOERR;
 291
 292	ASSERT(memcmp_extent_buffer(eb, fs_info->fs_devices->metadata_uuid,
 293				    offsetof(struct btrfs_header, fsid),
 294				    BTRFS_FSID_SIZE) == 0);
 295	csum_tree_block(eb, result);
 296
 297	if (btrfs_header_level(eb))
 298		ret = btrfs_check_node(eb);
 299	else
 300		ret = btrfs_check_leaf(eb);
 301
 302	if (ret < 0)
 303		goto error;
 304
 305	/*
 306	 * Also check the generation, the eb reached here must be newer than
 307	 * last committed. Or something seriously wrong happened.
 308	 */
 309	last_trans = btrfs_get_last_trans_committed(fs_info);
 310	if (unlikely(btrfs_header_generation(eb) <= last_trans)) {
 311		ret = -EUCLEAN;
 312		btrfs_err(fs_info,
 313			"block=%llu bad generation, have %llu expect > %llu",
 314			  eb->start, btrfs_header_generation(eb), last_trans);
 315		goto error;
 316	}
 317	write_extent_buffer(eb, result, 0, fs_info->csum_size);
 318	return BLK_STS_OK;
 319
 320error:
 321	btrfs_print_tree(eb, 0);
 322	btrfs_err(fs_info, "block=%llu write time tree block corruption detected",
 323		  eb->start);
 324	/*
 325	 * Be noisy if this is an extent buffer from a log tree. We don't abort
 326	 * a transaction in case there's a bad log tree extent buffer, we just
 327	 * fallback to a transaction commit. Still we want to know when there is
 328	 * a bad log tree extent buffer, as that may signal a bug somewhere.
 329	 */
 330	WARN_ON(IS_ENABLED(CONFIG_BTRFS_DEBUG) ||
 331		btrfs_header_owner(eb) == BTRFS_TREE_LOG_OBJECTID);
 332	return errno_to_blk_status(ret);
 333}
 334
 335static bool check_tree_block_fsid(struct extent_buffer *eb)
 336{
 337	struct btrfs_fs_info *fs_info = eb->fs_info;
 338	struct btrfs_fs_devices *fs_devices = fs_info->fs_devices, *seed_devs;
 339	u8 fsid[BTRFS_FSID_SIZE];
 340
 341	read_extent_buffer(eb, fsid, offsetof(struct btrfs_header, fsid),
 342			   BTRFS_FSID_SIZE);
 343
 344	/*
 345	 * alloc_fsid_devices() copies the fsid into fs_devices::metadata_uuid.
 346	 * This is then overwritten by metadata_uuid if it is present in the
 347	 * device_list_add(). The same true for a seed device as well. So use of
 348	 * fs_devices::metadata_uuid is appropriate here.
 349	 */
 350	if (memcmp(fsid, fs_info->fs_devices->metadata_uuid, BTRFS_FSID_SIZE) == 0)
 351		return false;
 352
 353	list_for_each_entry(seed_devs, &fs_devices->seed_list, seed_list)
 354		if (!memcmp(fsid, seed_devs->fsid, BTRFS_FSID_SIZE))
 355			return false;
 356
 357	return true;
 358}
 359
 360/* Do basic extent buffer checks at read time */
 361int btrfs_validate_extent_buffer(struct extent_buffer *eb,
 362				 struct btrfs_tree_parent_check *check)
 363{
 364	struct btrfs_fs_info *fs_info = eb->fs_info;
 365	u64 found_start;
 366	const u32 csum_size = fs_info->csum_size;
 367	u8 found_level;
 368	u8 result[BTRFS_CSUM_SIZE];
 369	const u8 *header_csum;
 370	int ret = 0;
 
 371
 372	ASSERT(check);
 373
 374	found_start = btrfs_header_bytenr(eb);
 375	if (found_start != eb->start) {
 376		btrfs_err_rl(fs_info,
 377			"bad tree block start, mirror %u want %llu have %llu",
 378			     eb->read_mirror, eb->start, found_start);
 379		ret = -EIO;
 380		goto out;
 381	}
 382	if (check_tree_block_fsid(eb)) {
 383		btrfs_err_rl(fs_info, "bad fsid on logical %llu mirror %u",
 384			     eb->start, eb->read_mirror);
 385		ret = -EIO;
 386		goto out;
 387	}
 388	found_level = btrfs_header_level(eb);
 389	if (found_level >= BTRFS_MAX_LEVEL) {
 390		btrfs_err(fs_info,
 391			"bad tree block level, mirror %u level %d on logical %llu",
 392			eb->read_mirror, btrfs_header_level(eb), eb->start);
 393		ret = -EIO;
 394		goto out;
 395	}
 396
 397	csum_tree_block(eb, result);
 398	header_csum = folio_address(eb->folios[0]) +
 399		get_eb_offset_in_folio(eb, offsetof(struct btrfs_header, csum));
 400
 401	if (memcmp(result, header_csum, csum_size) != 0) {
 402		btrfs_warn_rl(fs_info,
 403"checksum verify failed on logical %llu mirror %u wanted " CSUM_FMT " found " CSUM_FMT " level %d",
 404			      eb->start, eb->read_mirror,
 405			      CSUM_FMT_VALUE(csum_size, header_csum),
 406			      CSUM_FMT_VALUE(csum_size, result),
 407			      btrfs_header_level(eb));
 408		ret = -EUCLEAN;
 409		goto out;
 
 
 
 410	}
 411
 412	if (found_level != check->level) {
 413		btrfs_err(fs_info,
 414		"level verify failed on logical %llu mirror %u wanted %u found %u",
 415			  eb->start, eb->read_mirror, check->level, found_level);
 416		ret = -EIO;
 417		goto out;
 418	}
 419	if (unlikely(check->transid &&
 420		     btrfs_header_generation(eb) != check->transid)) {
 421		btrfs_err_rl(eb->fs_info,
 422"parent transid verify failed on logical %llu mirror %u wanted %llu found %llu",
 423				eb->start, eb->read_mirror, check->transid,
 424				btrfs_header_generation(eb));
 425		ret = -EIO;
 426		goto out;
 427	}
 428	if (check->has_first_key) {
 429		struct btrfs_key *expect_key = &check->first_key;
 430		struct btrfs_key found_key;
 431
 432		if (found_level)
 433			btrfs_node_key_to_cpu(eb, &found_key, 0);
 434		else
 435			btrfs_item_key_to_cpu(eb, &found_key, 0);
 436		if (unlikely(btrfs_comp_cpu_keys(expect_key, &found_key))) {
 437			btrfs_err(fs_info,
 438"tree first key mismatch detected, bytenr=%llu parent_transid=%llu key expected=(%llu,%u,%llu) has=(%llu,%u,%llu)",
 439				  eb->start, check->transid,
 440				  expect_key->objectid,
 441				  expect_key->type, expect_key->offset,
 442				  found_key.objectid, found_key.type,
 443				  found_key.offset);
 444			ret = -EUCLEAN;
 445			goto out;
 446		}
 447	}
 448	if (check->owner_root) {
 449		ret = btrfs_check_eb_owner(eb, check->owner_root);
 450		if (ret < 0)
 451			goto out;
 452	}
 453
 454	/*
 455	 * If this is a leaf block and it is corrupt, set the corrupt bit so
 456	 * that we don't try and read the other copies of this block, just
 457	 * return -EIO.
 458	 */
 459	if (found_level == 0 && btrfs_check_leaf(eb)) {
 460		set_bit(EXTENT_BUFFER_CORRUPT, &eb->bflags);
 461		ret = -EIO;
 462	}
 463
 464	if (found_level > 0 && btrfs_check_node(eb))
 465		ret = -EIO;
 466
 467	if (ret)
 468		btrfs_err(fs_info,
 469		"read time tree block corruption detected on logical %llu mirror %u",
 470			  eb->start, eb->read_mirror);
 471out:
 472	return ret;
 473}
 474
 475#ifdef CONFIG_MIGRATION
 476static int btree_migrate_folio(struct address_space *mapping,
 477		struct folio *dst, struct folio *src, enum migrate_mode mode)
 478{
 479	/*
 480	 * we can't safely write a btree page from here,
 481	 * we haven't done the locking hook
 482	 */
 483	if (folio_test_dirty(src))
 484		return -EAGAIN;
 485	/*
 486	 * Buffers may be managed in a filesystem specific way.
 487	 * We must have no buffers or drop them.
 488	 */
 489	if (folio_get_private(src) &&
 490	    !filemap_release_folio(src, GFP_KERNEL))
 491		return -EAGAIN;
 492	return migrate_folio(mapping, dst, src, mode);
 493}
 494#else
 495#define btree_migrate_folio NULL
 496#endif
 497
 498static int btree_writepages(struct address_space *mapping,
 499			    struct writeback_control *wbc)
 500{
 501	struct btrfs_fs_info *fs_info;
 502	int ret;
 503
 504	if (wbc->sync_mode == WB_SYNC_NONE) {
 
 505
 506		if (wbc->for_kupdate)
 507			return 0;
 508
 509		fs_info = BTRFS_I(mapping->host)->root->fs_info;
 510		/* this is a bit racy, but that's ok */
 511		ret = __percpu_counter_compare(&fs_info->dirty_metadata_bytes,
 512					     BTRFS_DIRTY_METADATA_THRESH,
 513					     fs_info->dirty_metadata_batch);
 514		if (ret < 0)
 515			return 0;
 516	}
 517	return btree_write_cache_pages(mapping, wbc);
 518}
 519
 520static bool btree_release_folio(struct folio *folio, gfp_t gfp_flags)
 521{
 522	if (folio_test_writeback(folio) || folio_test_dirty(folio))
 523		return false;
 524
 525	return try_release_extent_buffer(&folio->page);
 526}
 527
 528static void btree_invalidate_folio(struct folio *folio, size_t offset,
 529				 size_t length)
 530{
 531	struct extent_io_tree *tree;
 532	tree = &BTRFS_I(folio->mapping->host)->io_tree;
 
 533	extent_invalidate_folio(tree, folio, offset);
 534	btree_release_folio(folio, GFP_NOFS);
 535	if (folio_get_private(folio)) {
 536		btrfs_warn(BTRFS_I(folio->mapping->host)->root->fs_info,
 537			   "folio private not zero on folio %llu",
 538			   (unsigned long long)folio_pos(folio));
 539		folio_detach_private(folio);
 540	}
 541}
 542
 543#ifdef DEBUG
 544static bool btree_dirty_folio(struct address_space *mapping,
 545		struct folio *folio)
 546{
 547	struct btrfs_fs_info *fs_info = btrfs_sb(mapping->host->i_sb);
 548	struct btrfs_subpage_info *spi = fs_info->subpage_info;
 549	struct btrfs_subpage *subpage;
 550	struct extent_buffer *eb;
 551	int cur_bit = 0;
 552	u64 page_start = folio_pos(folio);
 553
 554	if (fs_info->sectorsize == PAGE_SIZE) {
 555		eb = folio_get_private(folio);
 556		BUG_ON(!eb);
 557		BUG_ON(!test_bit(EXTENT_BUFFER_DIRTY, &eb->bflags));
 558		BUG_ON(!atomic_read(&eb->refs));
 559		btrfs_assert_tree_write_locked(eb);
 560		return filemap_dirty_folio(mapping, folio);
 561	}
 562
 563	ASSERT(spi);
 564	subpage = folio_get_private(folio);
 565
 566	for (cur_bit = spi->dirty_offset;
 567	     cur_bit < spi->dirty_offset + spi->bitmap_nr_bits;
 568	     cur_bit++) {
 569		unsigned long flags;
 570		u64 cur;
 571
 572		spin_lock_irqsave(&subpage->lock, flags);
 573		if (!test_bit(cur_bit, subpage->bitmaps)) {
 574			spin_unlock_irqrestore(&subpage->lock, flags);
 575			continue;
 576		}
 577		spin_unlock_irqrestore(&subpage->lock, flags);
 578		cur = page_start + cur_bit * fs_info->sectorsize;
 579
 580		eb = find_extent_buffer(fs_info, cur);
 581		ASSERT(eb);
 582		ASSERT(test_bit(EXTENT_BUFFER_DIRTY, &eb->bflags));
 583		ASSERT(atomic_read(&eb->refs));
 584		btrfs_assert_tree_write_locked(eb);
 585		free_extent_buffer(eb);
 586
 587		cur_bit += (fs_info->nodesize >> fs_info->sectorsize_bits) - 1;
 588	}
 589	return filemap_dirty_folio(mapping, folio);
 590}
 591#else
 592#define btree_dirty_folio filemap_dirty_folio
 593#endif
 594
 595static const struct address_space_operations btree_aops = {
 596	.writepages	= btree_writepages,
 597	.release_folio	= btree_release_folio,
 598	.invalidate_folio = btree_invalidate_folio,
 599	.migrate_folio	= btree_migrate_folio,
 600	.dirty_folio	= btree_dirty_folio,
 601};
 602
 603struct extent_buffer *btrfs_find_create_tree_block(
 604						struct btrfs_fs_info *fs_info,
 605						u64 bytenr, u64 owner_root,
 606						int level)
 607{
 608	if (btrfs_is_testing(fs_info))
 609		return alloc_test_extent_buffer(fs_info, bytenr);
 610	return alloc_extent_buffer(fs_info, bytenr, owner_root, level);
 611}
 612
 613/*
 614 * Read tree block at logical address @bytenr and do variant basic but critical
 615 * verification.
 616 *
 617 * @check:		expected tree parentness check, see comments of the
 618 *			structure for details.
 619 */
 620struct extent_buffer *read_tree_block(struct btrfs_fs_info *fs_info, u64 bytenr,
 621				      struct btrfs_tree_parent_check *check)
 622{
 623	struct extent_buffer *buf = NULL;
 624	int ret;
 625
 626	ASSERT(check);
 627
 628	buf = btrfs_find_create_tree_block(fs_info, bytenr, check->owner_root,
 629					   check->level);
 630	if (IS_ERR(buf))
 631		return buf;
 632
 633	ret = btrfs_read_extent_buffer(buf, check);
 634	if (ret) {
 635		free_extent_buffer_stale(buf);
 636		return ERR_PTR(ret);
 637	}
 638	if (btrfs_check_eb_owner(buf, check->owner_root)) {
 639		free_extent_buffer_stale(buf);
 640		return ERR_PTR(-EUCLEAN);
 641	}
 642	return buf;
 643
 644}
 645
 646static void __setup_root(struct btrfs_root *root, struct btrfs_fs_info *fs_info,
 647			 u64 objectid)
 648{
 649	bool dummy = test_bit(BTRFS_FS_STATE_DUMMY_FS_INFO, &fs_info->fs_state);
 650
 651	memset(&root->root_key, 0, sizeof(root->root_key));
 652	memset(&root->root_item, 0, sizeof(root->root_item));
 653	memset(&root->defrag_progress, 0, sizeof(root->defrag_progress));
 654	root->fs_info = fs_info;
 655	root->root_key.objectid = objectid;
 656	root->node = NULL;
 657	root->commit_root = NULL;
 658	root->state = 0;
 659	RB_CLEAR_NODE(&root->rb_node);
 660
 661	root->last_trans = 0;
 662	root->free_objectid = 0;
 663	root->nr_delalloc_inodes = 0;
 664	root->nr_ordered_extents = 0;
 665	root->inode_tree = RB_ROOT;
 666	/* GFP flags are compatible with XA_FLAGS_*. */
 667	xa_init_flags(&root->delayed_nodes, GFP_ATOMIC);
 668
 669	btrfs_init_root_block_rsv(root);
 670
 671	INIT_LIST_HEAD(&root->dirty_list);
 672	INIT_LIST_HEAD(&root->root_list);
 673	INIT_LIST_HEAD(&root->delalloc_inodes);
 674	INIT_LIST_HEAD(&root->delalloc_root);
 675	INIT_LIST_HEAD(&root->ordered_extents);
 676	INIT_LIST_HEAD(&root->ordered_root);
 677	INIT_LIST_HEAD(&root->reloc_dirty_list);
 678	spin_lock_init(&root->inode_lock);
 679	spin_lock_init(&root->delalloc_lock);
 680	spin_lock_init(&root->ordered_extent_lock);
 681	spin_lock_init(&root->accounting_lock);
 682	spin_lock_init(&root->qgroup_meta_rsv_lock);
 683	mutex_init(&root->objectid_mutex);
 684	mutex_init(&root->log_mutex);
 685	mutex_init(&root->ordered_extent_mutex);
 686	mutex_init(&root->delalloc_mutex);
 687	init_waitqueue_head(&root->qgroup_flush_wait);
 688	init_waitqueue_head(&root->log_writer_wait);
 689	init_waitqueue_head(&root->log_commit_wait[0]);
 690	init_waitqueue_head(&root->log_commit_wait[1]);
 691	INIT_LIST_HEAD(&root->log_ctxs[0]);
 692	INIT_LIST_HEAD(&root->log_ctxs[1]);
 693	atomic_set(&root->log_commit[0], 0);
 694	atomic_set(&root->log_commit[1], 0);
 695	atomic_set(&root->log_writers, 0);
 696	atomic_set(&root->log_batch, 0);
 697	refcount_set(&root->refs, 1);
 698	atomic_set(&root->snapshot_force_cow, 0);
 699	atomic_set(&root->nr_swapfiles, 0);
 700	btrfs_set_root_log_transid(root, 0);
 701	root->log_transid_committed = -1;
 702	btrfs_set_root_last_log_commit(root, 0);
 703	root->anon_dev = 0;
 704	if (!dummy) {
 705		extent_io_tree_init(fs_info, &root->dirty_log_pages,
 706				    IO_TREE_ROOT_DIRTY_LOG_PAGES);
 707		extent_io_tree_init(fs_info, &root->log_csum_range,
 708				    IO_TREE_LOG_CSUM_RANGE);
 709	}
 710
 711	spin_lock_init(&root->root_item_lock);
 712	btrfs_qgroup_init_swapped_blocks(&root->swapped_blocks);
 713#ifdef CONFIG_BTRFS_DEBUG
 714	INIT_LIST_HEAD(&root->leak_list);
 715	spin_lock(&fs_info->fs_roots_radix_lock);
 716	list_add_tail(&root->leak_list, &fs_info->allocated_roots);
 717	spin_unlock(&fs_info->fs_roots_radix_lock);
 718#endif
 719}
 720
 721static struct btrfs_root *btrfs_alloc_root(struct btrfs_fs_info *fs_info,
 722					   u64 objectid, gfp_t flags)
 723{
 724	struct btrfs_root *root = kzalloc(sizeof(*root), flags);
 725	if (root)
 726		__setup_root(root, fs_info, objectid);
 727	return root;
 728}
 729
 730#ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS
 731/* Should only be used by the testing infrastructure */
 732struct btrfs_root *btrfs_alloc_dummy_root(struct btrfs_fs_info *fs_info)
 733{
 734	struct btrfs_root *root;
 735
 736	if (!fs_info)
 737		return ERR_PTR(-EINVAL);
 738
 739	root = btrfs_alloc_root(fs_info, BTRFS_ROOT_TREE_OBJECTID, GFP_KERNEL);
 740	if (!root)
 741		return ERR_PTR(-ENOMEM);
 742
 743	/* We don't use the stripesize in selftest, set it as sectorsize */
 744	root->alloc_bytenr = 0;
 745
 746	return root;
 747}
 748#endif
 749
 750static int global_root_cmp(struct rb_node *a_node, const struct rb_node *b_node)
 751{
 752	const struct btrfs_root *a = rb_entry(a_node, struct btrfs_root, rb_node);
 753	const struct btrfs_root *b = rb_entry(b_node, struct btrfs_root, rb_node);
 754
 755	return btrfs_comp_cpu_keys(&a->root_key, &b->root_key);
 756}
 757
 758static int global_root_key_cmp(const void *k, const struct rb_node *node)
 759{
 760	const struct btrfs_key *key = k;
 761	const struct btrfs_root *root = rb_entry(node, struct btrfs_root, rb_node);
 762
 763	return btrfs_comp_cpu_keys(key, &root->root_key);
 764}
 765
 766int btrfs_global_root_insert(struct btrfs_root *root)
 767{
 768	struct btrfs_fs_info *fs_info = root->fs_info;
 769	struct rb_node *tmp;
 770	int ret = 0;
 771
 772	write_lock(&fs_info->global_root_lock);
 773	tmp = rb_find_add(&root->rb_node, &fs_info->global_root_tree, global_root_cmp);
 774	write_unlock(&fs_info->global_root_lock);
 775
 776	if (tmp) {
 777		ret = -EEXIST;
 778		btrfs_warn(fs_info, "global root %llu %llu already exists",
 779				root->root_key.objectid, root->root_key.offset);
 780	}
 781	return ret;
 782}
 783
 784void btrfs_global_root_delete(struct btrfs_root *root)
 785{
 786	struct btrfs_fs_info *fs_info = root->fs_info;
 787
 788	write_lock(&fs_info->global_root_lock);
 789	rb_erase(&root->rb_node, &fs_info->global_root_tree);
 790	write_unlock(&fs_info->global_root_lock);
 791}
 792
 793struct btrfs_root *btrfs_global_root(struct btrfs_fs_info *fs_info,
 794				     struct btrfs_key *key)
 795{
 796	struct rb_node *node;
 797	struct btrfs_root *root = NULL;
 798
 799	read_lock(&fs_info->global_root_lock);
 800	node = rb_find(key, &fs_info->global_root_tree, global_root_key_cmp);
 801	if (node)
 802		root = container_of(node, struct btrfs_root, rb_node);
 803	read_unlock(&fs_info->global_root_lock);
 804
 805	return root;
 806}
 807
 808static u64 btrfs_global_root_id(struct btrfs_fs_info *fs_info, u64 bytenr)
 809{
 810	struct btrfs_block_group *block_group;
 811	u64 ret;
 812
 813	if (!btrfs_fs_incompat(fs_info, EXTENT_TREE_V2))
 814		return 0;
 815
 816	if (bytenr)
 817		block_group = btrfs_lookup_block_group(fs_info, bytenr);
 818	else
 819		block_group = btrfs_lookup_first_block_group(fs_info, bytenr);
 820	ASSERT(block_group);
 821	if (!block_group)
 822		return 0;
 823	ret = block_group->global_root_id;
 824	btrfs_put_block_group(block_group);
 825
 826	return ret;
 827}
 828
 829struct btrfs_root *btrfs_csum_root(struct btrfs_fs_info *fs_info, u64 bytenr)
 830{
 831	struct btrfs_key key = {
 832		.objectid = BTRFS_CSUM_TREE_OBJECTID,
 833		.type = BTRFS_ROOT_ITEM_KEY,
 834		.offset = btrfs_global_root_id(fs_info, bytenr),
 835	};
 836
 837	return btrfs_global_root(fs_info, &key);
 838}
 839
 840struct btrfs_root *btrfs_extent_root(struct btrfs_fs_info *fs_info, u64 bytenr)
 841{
 842	struct btrfs_key key = {
 843		.objectid = BTRFS_EXTENT_TREE_OBJECTID,
 844		.type = BTRFS_ROOT_ITEM_KEY,
 845		.offset = btrfs_global_root_id(fs_info, bytenr),
 846	};
 847
 848	return btrfs_global_root(fs_info, &key);
 849}
 850
 851struct btrfs_root *btrfs_block_group_root(struct btrfs_fs_info *fs_info)
 852{
 853	if (btrfs_fs_compat_ro(fs_info, BLOCK_GROUP_TREE))
 854		return fs_info->block_group_root;
 855	return btrfs_extent_root(fs_info, 0);
 856}
 857
 858struct btrfs_root *btrfs_create_tree(struct btrfs_trans_handle *trans,
 859				     u64 objectid)
 860{
 861	struct btrfs_fs_info *fs_info = trans->fs_info;
 862	struct extent_buffer *leaf;
 863	struct btrfs_root *tree_root = fs_info->tree_root;
 864	struct btrfs_root *root;
 865	struct btrfs_key key;
 866	unsigned int nofs_flag;
 867	int ret = 0;
 868
 869	/*
 870	 * We're holding a transaction handle, so use a NOFS memory allocation
 871	 * context to avoid deadlock if reclaim happens.
 872	 */
 873	nofs_flag = memalloc_nofs_save();
 874	root = btrfs_alloc_root(fs_info, objectid, GFP_KERNEL);
 875	memalloc_nofs_restore(nofs_flag);
 876	if (!root)
 877		return ERR_PTR(-ENOMEM);
 878
 879	root->root_key.objectid = objectid;
 880	root->root_key.type = BTRFS_ROOT_ITEM_KEY;
 881	root->root_key.offset = 0;
 882
 883	leaf = btrfs_alloc_tree_block(trans, root, 0, objectid, NULL, 0, 0, 0,
 884				      0, BTRFS_NESTING_NORMAL);
 885	if (IS_ERR(leaf)) {
 886		ret = PTR_ERR(leaf);
 887		leaf = NULL;
 888		goto fail;
 889	}
 890
 891	root->node = leaf;
 892	btrfs_mark_buffer_dirty(trans, leaf);
 893
 894	root->commit_root = btrfs_root_node(root);
 895	set_bit(BTRFS_ROOT_TRACK_DIRTY, &root->state);
 896
 897	btrfs_set_root_flags(&root->root_item, 0);
 898	btrfs_set_root_limit(&root->root_item, 0);
 899	btrfs_set_root_bytenr(&root->root_item, leaf->start);
 900	btrfs_set_root_generation(&root->root_item, trans->transid);
 901	btrfs_set_root_level(&root->root_item, 0);
 902	btrfs_set_root_refs(&root->root_item, 1);
 903	btrfs_set_root_used(&root->root_item, leaf->len);
 904	btrfs_set_root_last_snapshot(&root->root_item, 0);
 905	btrfs_set_root_dirid(&root->root_item, 0);
 906	if (is_fstree(objectid))
 907		generate_random_guid(root->root_item.uuid);
 908	else
 909		export_guid(root->root_item.uuid, &guid_null);
 910	btrfs_set_root_drop_level(&root->root_item, 0);
 911
 912	btrfs_tree_unlock(leaf);
 913
 914	key.objectid = objectid;
 915	key.type = BTRFS_ROOT_ITEM_KEY;
 916	key.offset = 0;
 917	ret = btrfs_insert_root(trans, tree_root, &key, &root->root_item);
 918	if (ret)
 919		goto fail;
 920
 921	return root;
 922
 923fail:
 924	btrfs_put_root(root);
 925
 926	return ERR_PTR(ret);
 927}
 928
 929static struct btrfs_root *alloc_log_tree(struct btrfs_trans_handle *trans,
 930					 struct btrfs_fs_info *fs_info)
 931{
 932	struct btrfs_root *root;
 933
 934	root = btrfs_alloc_root(fs_info, BTRFS_TREE_LOG_OBJECTID, GFP_NOFS);
 935	if (!root)
 936		return ERR_PTR(-ENOMEM);
 937
 938	root->root_key.objectid = BTRFS_TREE_LOG_OBJECTID;
 939	root->root_key.type = BTRFS_ROOT_ITEM_KEY;
 940	root->root_key.offset = BTRFS_TREE_LOG_OBJECTID;
 941
 942	return root;
 943}
 944
 945int btrfs_alloc_log_tree_node(struct btrfs_trans_handle *trans,
 946			      struct btrfs_root *root)
 947{
 948	struct extent_buffer *leaf;
 949
 950	/*
 951	 * DON'T set SHAREABLE bit for log trees.
 952	 *
 953	 * Log trees are not exposed to user space thus can't be snapshotted,
 954	 * and they go away before a real commit is actually done.
 955	 *
 956	 * They do store pointers to file data extents, and those reference
 957	 * counts still get updated (along with back refs to the log tree).
 958	 */
 959
 960	leaf = btrfs_alloc_tree_block(trans, root, 0, BTRFS_TREE_LOG_OBJECTID,
 961			NULL, 0, 0, 0, 0, BTRFS_NESTING_NORMAL);
 962	if (IS_ERR(leaf))
 963		return PTR_ERR(leaf);
 964
 965	root->node = leaf;
 966
 967	btrfs_mark_buffer_dirty(trans, root->node);
 968	btrfs_tree_unlock(root->node);
 969
 970	return 0;
 971}
 972
 973int btrfs_init_log_root_tree(struct btrfs_trans_handle *trans,
 974			     struct btrfs_fs_info *fs_info)
 975{
 976	struct btrfs_root *log_root;
 977
 978	log_root = alloc_log_tree(trans, fs_info);
 979	if (IS_ERR(log_root))
 980		return PTR_ERR(log_root);
 981
 982	if (!btrfs_is_zoned(fs_info)) {
 983		int ret = btrfs_alloc_log_tree_node(trans, log_root);
 984
 985		if (ret) {
 986			btrfs_put_root(log_root);
 987			return ret;
 988		}
 989	}
 990
 991	WARN_ON(fs_info->log_root_tree);
 992	fs_info->log_root_tree = log_root;
 993	return 0;
 994}
 995
 996int btrfs_add_log_tree(struct btrfs_trans_handle *trans,
 997		       struct btrfs_root *root)
 998{
 999	struct btrfs_fs_info *fs_info = root->fs_info;
1000	struct btrfs_root *log_root;
1001	struct btrfs_inode_item *inode_item;
1002	int ret;
1003
1004	log_root = alloc_log_tree(trans, fs_info);
1005	if (IS_ERR(log_root))
1006		return PTR_ERR(log_root);
1007
1008	ret = btrfs_alloc_log_tree_node(trans, log_root);
1009	if (ret) {
1010		btrfs_put_root(log_root);
1011		return ret;
1012	}
1013
1014	log_root->last_trans = trans->transid;
1015	log_root->root_key.offset = root->root_key.objectid;
1016
1017	inode_item = &log_root->root_item.inode;
1018	btrfs_set_stack_inode_generation(inode_item, 1);
1019	btrfs_set_stack_inode_size(inode_item, 3);
1020	btrfs_set_stack_inode_nlink(inode_item, 1);
1021	btrfs_set_stack_inode_nbytes(inode_item,
1022				     fs_info->nodesize);
1023	btrfs_set_stack_inode_mode(inode_item, S_IFDIR | 0755);
1024
1025	btrfs_set_root_node(&log_root->root_item, log_root->node);
1026
1027	WARN_ON(root->log_root);
1028	root->log_root = log_root;
1029	btrfs_set_root_log_transid(root, 0);
1030	root->log_transid_committed = -1;
1031	btrfs_set_root_last_log_commit(root, 0);
1032	return 0;
1033}
1034
1035static struct btrfs_root *read_tree_root_path(struct btrfs_root *tree_root,
1036					      struct btrfs_path *path,
1037					      struct btrfs_key *key)
1038{
1039	struct btrfs_root *root;
1040	struct btrfs_tree_parent_check check = { 0 };
1041	struct btrfs_fs_info *fs_info = tree_root->fs_info;
1042	u64 generation;
1043	int ret;
1044	int level;
1045
1046	root = btrfs_alloc_root(fs_info, key->objectid, GFP_NOFS);
1047	if (!root)
1048		return ERR_PTR(-ENOMEM);
1049
1050	ret = btrfs_find_root(tree_root, key, path,
1051			      &root->root_item, &root->root_key);
1052	if (ret) {
1053		if (ret > 0)
1054			ret = -ENOENT;
1055		goto fail;
1056	}
1057
1058	generation = btrfs_root_generation(&root->root_item);
1059	level = btrfs_root_level(&root->root_item);
1060	check.level = level;
1061	check.transid = generation;
1062	check.owner_root = key->objectid;
1063	root->node = read_tree_block(fs_info, btrfs_root_bytenr(&root->root_item),
1064				     &check);
1065	if (IS_ERR(root->node)) {
1066		ret = PTR_ERR(root->node);
1067		root->node = NULL;
1068		goto fail;
1069	}
1070	if (!btrfs_buffer_uptodate(root->node, generation, 0)) {
1071		ret = -EIO;
1072		goto fail;
1073	}
1074
1075	/*
1076	 * For real fs, and not log/reloc trees, root owner must
1077	 * match its root node owner
1078	 */
1079	if (!test_bit(BTRFS_FS_STATE_DUMMY_FS_INFO, &fs_info->fs_state) &&
1080	    root->root_key.objectid != BTRFS_TREE_LOG_OBJECTID &&
1081	    root->root_key.objectid != BTRFS_TREE_RELOC_OBJECTID &&
1082	    root->root_key.objectid != btrfs_header_owner(root->node)) {
1083		btrfs_crit(fs_info,
1084"root=%llu block=%llu, tree root owner mismatch, have %llu expect %llu",
1085			   root->root_key.objectid, root->node->start,
1086			   btrfs_header_owner(root->node),
1087			   root->root_key.objectid);
1088		ret = -EUCLEAN;
1089		goto fail;
1090	}
1091	root->commit_root = btrfs_root_node(root);
1092	return root;
1093fail:
1094	btrfs_put_root(root);
1095	return ERR_PTR(ret);
1096}
1097
1098struct btrfs_root *btrfs_read_tree_root(struct btrfs_root *tree_root,
1099					struct btrfs_key *key)
1100{
1101	struct btrfs_root *root;
1102	struct btrfs_path *path;
1103
1104	path = btrfs_alloc_path();
1105	if (!path)
1106		return ERR_PTR(-ENOMEM);
1107	root = read_tree_root_path(tree_root, path, key);
1108	btrfs_free_path(path);
1109
1110	return root;
1111}
1112
1113/*
1114 * Initialize subvolume root in-memory structure
1115 *
1116 * @anon_dev:	anonymous device to attach to the root, if zero, allocate new
1117 */
1118static int btrfs_init_fs_root(struct btrfs_root *root, dev_t anon_dev)
1119{
1120	int ret;
1121
1122	btrfs_drew_lock_init(&root->snapshot_lock);
1123
1124	if (root->root_key.objectid != BTRFS_TREE_LOG_OBJECTID &&
1125	    !btrfs_is_data_reloc_root(root) &&
1126	    is_fstree(root->root_key.objectid)) {
1127		set_bit(BTRFS_ROOT_SHAREABLE, &root->state);
1128		btrfs_check_and_init_root_item(&root->root_item);
1129	}
1130
1131	/*
1132	 * Don't assign anonymous block device to roots that are not exposed to
1133	 * userspace, the id pool is limited to 1M
1134	 */
1135	if (is_fstree(root->root_key.objectid) &&
1136	    btrfs_root_refs(&root->root_item) > 0) {
1137		if (!anon_dev) {
1138			ret = get_anon_bdev(&root->anon_dev);
1139			if (ret)
1140				goto fail;
1141		} else {
1142			root->anon_dev = anon_dev;
1143		}
1144	}
1145
1146	mutex_lock(&root->objectid_mutex);
1147	ret = btrfs_init_root_free_objectid(root);
1148	if (ret) {
1149		mutex_unlock(&root->objectid_mutex);
1150		goto fail;
1151	}
1152
1153	ASSERT(root->free_objectid <= BTRFS_LAST_FREE_OBJECTID);
1154
1155	mutex_unlock(&root->objectid_mutex);
1156
1157	return 0;
1158fail:
1159	/* The caller is responsible to call btrfs_free_fs_root */
1160	return ret;
1161}
1162
1163static struct btrfs_root *btrfs_lookup_fs_root(struct btrfs_fs_info *fs_info,
1164					       u64 root_id)
1165{
1166	struct btrfs_root *root;
1167
1168	spin_lock(&fs_info->fs_roots_radix_lock);
1169	root = radix_tree_lookup(&fs_info->fs_roots_radix,
1170				 (unsigned long)root_id);
1171	root = btrfs_grab_root(root);
1172	spin_unlock(&fs_info->fs_roots_radix_lock);
1173	return root;
1174}
1175
1176static struct btrfs_root *btrfs_get_global_root(struct btrfs_fs_info *fs_info,
1177						u64 objectid)
1178{
1179	struct btrfs_key key = {
1180		.objectid = objectid,
1181		.type = BTRFS_ROOT_ITEM_KEY,
1182		.offset = 0,
1183	};
1184
1185	switch (objectid) {
1186	case BTRFS_ROOT_TREE_OBJECTID:
1187		return btrfs_grab_root(fs_info->tree_root);
1188	case BTRFS_EXTENT_TREE_OBJECTID:
1189		return btrfs_grab_root(btrfs_global_root(fs_info, &key));
1190	case BTRFS_CHUNK_TREE_OBJECTID:
1191		return btrfs_grab_root(fs_info->chunk_root);
1192	case BTRFS_DEV_TREE_OBJECTID:
1193		return btrfs_grab_root(fs_info->dev_root);
1194	case BTRFS_CSUM_TREE_OBJECTID:
1195		return btrfs_grab_root(btrfs_global_root(fs_info, &key));
1196	case BTRFS_QUOTA_TREE_OBJECTID:
1197		return btrfs_grab_root(fs_info->quota_root);
1198	case BTRFS_UUID_TREE_OBJECTID:
1199		return btrfs_grab_root(fs_info->uuid_root);
1200	case BTRFS_BLOCK_GROUP_TREE_OBJECTID:
1201		return btrfs_grab_root(fs_info->block_group_root);
1202	case BTRFS_FREE_SPACE_TREE_OBJECTID:
1203		return btrfs_grab_root(btrfs_global_root(fs_info, &key));
1204	case BTRFS_RAID_STRIPE_TREE_OBJECTID:
1205		return btrfs_grab_root(fs_info->stripe_root);
1206	default:
1207		return NULL;
1208	}
1209}
1210
1211int btrfs_insert_fs_root(struct btrfs_fs_info *fs_info,
1212			 struct btrfs_root *root)
1213{
1214	int ret;
1215
1216	ret = radix_tree_preload(GFP_NOFS);
1217	if (ret)
1218		return ret;
1219
1220	spin_lock(&fs_info->fs_roots_radix_lock);
1221	ret = radix_tree_insert(&fs_info->fs_roots_radix,
1222				(unsigned long)root->root_key.objectid,
1223				root);
1224	if (ret == 0) {
1225		btrfs_grab_root(root);
1226		set_bit(BTRFS_ROOT_IN_RADIX, &root->state);
1227	}
1228	spin_unlock(&fs_info->fs_roots_radix_lock);
1229	radix_tree_preload_end();
1230
1231	return ret;
1232}
1233
1234void btrfs_check_leaked_roots(struct btrfs_fs_info *fs_info)
1235{
1236#ifdef CONFIG_BTRFS_DEBUG
1237	struct btrfs_root *root;
1238
1239	while (!list_empty(&fs_info->allocated_roots)) {
1240		char buf[BTRFS_ROOT_NAME_BUF_LEN];
1241
1242		root = list_first_entry(&fs_info->allocated_roots,
1243					struct btrfs_root, leak_list);
1244		btrfs_err(fs_info, "leaked root %s refcount %d",
1245			  btrfs_root_name(&root->root_key, buf),
1246			  refcount_read(&root->refs));
 
1247		while (refcount_read(&root->refs) > 1)
1248			btrfs_put_root(root);
1249		btrfs_put_root(root);
1250	}
1251#endif
1252}
1253
1254static void free_global_roots(struct btrfs_fs_info *fs_info)
1255{
1256	struct btrfs_root *root;
1257	struct rb_node *node;
1258
1259	while ((node = rb_first_postorder(&fs_info->global_root_tree)) != NULL) {
1260		root = rb_entry(node, struct btrfs_root, rb_node);
1261		rb_erase(&root->rb_node, &fs_info->global_root_tree);
1262		btrfs_put_root(root);
1263	}
1264}
1265
1266void btrfs_free_fs_info(struct btrfs_fs_info *fs_info)
1267{
 
 
1268	percpu_counter_destroy(&fs_info->dirty_metadata_bytes);
1269	percpu_counter_destroy(&fs_info->delalloc_bytes);
1270	percpu_counter_destroy(&fs_info->ordered_bytes);
 
 
 
1271	percpu_counter_destroy(&fs_info->dev_replace.bio_counter);
1272	btrfs_free_csum_hash(fs_info);
1273	btrfs_free_stripe_hash_table(fs_info);
1274	btrfs_free_ref_cache(fs_info);
1275	kfree(fs_info->balance_ctl);
1276	kfree(fs_info->delayed_root);
1277	free_global_roots(fs_info);
1278	btrfs_put_root(fs_info->tree_root);
1279	btrfs_put_root(fs_info->chunk_root);
1280	btrfs_put_root(fs_info->dev_root);
1281	btrfs_put_root(fs_info->quota_root);
1282	btrfs_put_root(fs_info->uuid_root);
1283	btrfs_put_root(fs_info->fs_root);
1284	btrfs_put_root(fs_info->data_reloc_root);
1285	btrfs_put_root(fs_info->block_group_root);
1286	btrfs_put_root(fs_info->stripe_root);
1287	btrfs_check_leaked_roots(fs_info);
1288	btrfs_extent_buffer_leak_debug_check(fs_info);
1289	kfree(fs_info->super_copy);
1290	kfree(fs_info->super_for_commit);
1291	kfree(fs_info->subpage_info);
1292	kvfree(fs_info);
1293}
1294
1295
1296/*
1297 * Get an in-memory reference of a root structure.
1298 *
1299 * For essential trees like root/extent tree, we grab it from fs_info directly.
1300 * For subvolume trees, we check the cached filesystem roots first. If not
1301 * found, then read it from disk and add it to cached fs roots.
1302 *
1303 * Caller should release the root by calling btrfs_put_root() after the usage.
1304 *
1305 * NOTE: Reloc and log trees can't be read by this function as they share the
1306 *	 same root objectid.
1307 *
1308 * @objectid:	root id
1309 * @anon_dev:	preallocated anonymous block device number for new roots,
1310 *		pass NULL for a new allocation.
1311 * @check_ref:	whether to check root item references, If true, return -ENOENT
1312 *		for orphan roots
1313 */
1314static struct btrfs_root *btrfs_get_root_ref(struct btrfs_fs_info *fs_info,
1315					     u64 objectid, dev_t *anon_dev,
1316					     bool check_ref)
1317{
1318	struct btrfs_root *root;
1319	struct btrfs_path *path;
1320	struct btrfs_key key;
1321	int ret;
1322
1323	root = btrfs_get_global_root(fs_info, objectid);
1324	if (root)
1325		return root;
1326
1327	/*
1328	 * If we're called for non-subvolume trees, and above function didn't
1329	 * find one, do not try to read it from disk.
1330	 *
1331	 * This is namely for free-space-tree and quota tree, which can change
1332	 * at runtime and should only be grabbed from fs_info.
1333	 */
1334	if (!is_fstree(objectid) && objectid != BTRFS_DATA_RELOC_TREE_OBJECTID)
1335		return ERR_PTR(-ENOENT);
1336again:
1337	root = btrfs_lookup_fs_root(fs_info, objectid);
1338	if (root) {
1339		/*
1340		 * Some other caller may have read out the newly inserted
1341		 * subvolume already (for things like backref walk etc).  Not
1342		 * that common but still possible.  In that case, we just need
1343		 * to free the anon_dev.
1344		 */
1345		if (unlikely(anon_dev && *anon_dev)) {
1346			free_anon_bdev(*anon_dev);
1347			*anon_dev = 0;
1348		}
1349
1350		if (check_ref && btrfs_root_refs(&root->root_item) == 0) {
1351			btrfs_put_root(root);
1352			return ERR_PTR(-ENOENT);
1353		}
1354		return root;
1355	}
1356
1357	key.objectid = objectid;
1358	key.type = BTRFS_ROOT_ITEM_KEY;
1359	key.offset = (u64)-1;
1360	root = btrfs_read_tree_root(fs_info->tree_root, &key);
1361	if (IS_ERR(root))
1362		return root;
1363
1364	if (check_ref && btrfs_root_refs(&root->root_item) == 0) {
1365		ret = -ENOENT;
1366		goto fail;
1367	}
1368
1369	ret = btrfs_init_fs_root(root, anon_dev ? *anon_dev : 0);
1370	if (ret)
1371		goto fail;
1372
1373	path = btrfs_alloc_path();
1374	if (!path) {
1375		ret = -ENOMEM;
1376		goto fail;
1377	}
1378	key.objectid = BTRFS_ORPHAN_OBJECTID;
1379	key.type = BTRFS_ORPHAN_ITEM_KEY;
1380	key.offset = objectid;
1381
1382	ret = btrfs_search_slot(NULL, fs_info->tree_root, &key, path, 0, 0);
1383	btrfs_free_path(path);
1384	if (ret < 0)
1385		goto fail;
1386	if (ret == 0)
1387		set_bit(BTRFS_ROOT_ORPHAN_ITEM_INSERTED, &root->state);
1388
1389	ret = btrfs_insert_fs_root(fs_info, root);
1390	if (ret) {
1391		if (ret == -EEXIST) {
1392			btrfs_put_root(root);
1393			goto again;
1394		}
1395		goto fail;
1396	}
1397	return root;
1398fail:
1399	/*
1400	 * If our caller provided us an anonymous device, then it's his
1401	 * responsibility to free it in case we fail. So we have to set our
1402	 * root's anon_dev to 0 to avoid a double free, once by btrfs_put_root()
1403	 * and once again by our caller.
1404	 */
1405	if (anon_dev && *anon_dev)
1406		root->anon_dev = 0;
1407	btrfs_put_root(root);
1408	return ERR_PTR(ret);
1409}
1410
1411/*
1412 * Get in-memory reference of a root structure
1413 *
1414 * @objectid:	tree objectid
1415 * @check_ref:	if set, verify that the tree exists and the item has at least
1416 *		one reference
1417 */
1418struct btrfs_root *btrfs_get_fs_root(struct btrfs_fs_info *fs_info,
1419				     u64 objectid, bool check_ref)
1420{
1421	return btrfs_get_root_ref(fs_info, objectid, NULL, check_ref);
1422}
1423
1424/*
1425 * Get in-memory reference of a root structure, created as new, optionally pass
1426 * the anonymous block device id
1427 *
1428 * @objectid:	tree objectid
1429 * @anon_dev:	if NULL, allocate a new anonymous block device or use the
1430 *		parameter value if not NULL
1431 */
1432struct btrfs_root *btrfs_get_new_fs_root(struct btrfs_fs_info *fs_info,
1433					 u64 objectid, dev_t *anon_dev)
1434{
1435	return btrfs_get_root_ref(fs_info, objectid, anon_dev, true);
1436}
1437
1438/*
1439 * Return a root for the given objectid.
1440 *
1441 * @fs_info:	the fs_info
1442 * @objectid:	the objectid we need to lookup
1443 *
1444 * This is exclusively used for backref walking, and exists specifically because
1445 * of how qgroups does lookups.  Qgroups will do a backref lookup at delayed ref
1446 * creation time, which means we may have to read the tree_root in order to look
1447 * up a fs root that is not in memory.  If the root is not in memory we will
1448 * read the tree root commit root and look up the fs root from there.  This is a
1449 * temporary root, it will not be inserted into the radix tree as it doesn't
1450 * have the most uptodate information, it'll simply be discarded once the
1451 * backref code is finished using the root.
1452 */
1453struct btrfs_root *btrfs_get_fs_root_commit_root(struct btrfs_fs_info *fs_info,
1454						 struct btrfs_path *path,
1455						 u64 objectid)
1456{
1457	struct btrfs_root *root;
1458	struct btrfs_key key;
1459
1460	ASSERT(path->search_commit_root && path->skip_locking);
1461
1462	/*
1463	 * This can return -ENOENT if we ask for a root that doesn't exist, but
1464	 * since this is called via the backref walking code we won't be looking
1465	 * up a root that doesn't exist, unless there's corruption.  So if root
1466	 * != NULL just return it.
1467	 */
1468	root = btrfs_get_global_root(fs_info, objectid);
1469	if (root)
1470		return root;
1471
1472	root = btrfs_lookup_fs_root(fs_info, objectid);
1473	if (root)
1474		return root;
1475
1476	key.objectid = objectid;
1477	key.type = BTRFS_ROOT_ITEM_KEY;
1478	key.offset = (u64)-1;
1479	root = read_tree_root_path(fs_info->tree_root, path, &key);
1480	btrfs_release_path(path);
1481
1482	return root;
1483}
1484
1485static int cleaner_kthread(void *arg)
1486{
1487	struct btrfs_fs_info *fs_info = arg;
1488	int again;
1489
1490	while (1) {
1491		again = 0;
1492
1493		set_bit(BTRFS_FS_CLEANER_RUNNING, &fs_info->flags);
1494
1495		/* Make the cleaner go to sleep early. */
1496		if (btrfs_need_cleaner_sleep(fs_info))
1497			goto sleep;
1498
1499		/*
1500		 * Do not do anything if we might cause open_ctree() to block
1501		 * before we have finished mounting the filesystem.
1502		 */
1503		if (!test_bit(BTRFS_FS_OPEN, &fs_info->flags))
1504			goto sleep;
1505
1506		if (!mutex_trylock(&fs_info->cleaner_mutex))
1507			goto sleep;
1508
1509		/*
1510		 * Avoid the problem that we change the status of the fs
1511		 * during the above check and trylock.
1512		 */
1513		if (btrfs_need_cleaner_sleep(fs_info)) {
1514			mutex_unlock(&fs_info->cleaner_mutex);
1515			goto sleep;
1516		}
1517
1518		if (test_and_clear_bit(BTRFS_FS_FEATURE_CHANGED, &fs_info->flags))
1519			btrfs_sysfs_feature_update(fs_info);
1520
1521		btrfs_run_delayed_iputs(fs_info);
1522
1523		again = btrfs_clean_one_deleted_snapshot(fs_info);
1524		mutex_unlock(&fs_info->cleaner_mutex);
1525
1526		/*
1527		 * The defragger has dealt with the R/O remount and umount,
1528		 * needn't do anything special here.
1529		 */
1530		btrfs_run_defrag_inodes(fs_info);
1531
1532		/*
1533		 * Acquires fs_info->reclaim_bgs_lock to avoid racing
1534		 * with relocation (btrfs_relocate_chunk) and relocation
1535		 * acquires fs_info->cleaner_mutex (btrfs_relocate_block_group)
1536		 * after acquiring fs_info->reclaim_bgs_lock. So we
1537		 * can't hold, nor need to, fs_info->cleaner_mutex when deleting
1538		 * unused block groups.
1539		 */
1540		btrfs_delete_unused_bgs(fs_info);
1541
1542		/*
1543		 * Reclaim block groups in the reclaim_bgs list after we deleted
1544		 * all unused block_groups. This possibly gives us some more free
1545		 * space.
1546		 */
1547		btrfs_reclaim_bgs(fs_info);
1548sleep:
1549		clear_and_wake_up_bit(BTRFS_FS_CLEANER_RUNNING, &fs_info->flags);
1550		if (kthread_should_park())
1551			kthread_parkme();
1552		if (kthread_should_stop())
1553			return 0;
1554		if (!again) {
1555			set_current_state(TASK_INTERRUPTIBLE);
1556			schedule();
1557			__set_current_state(TASK_RUNNING);
1558		}
1559	}
1560}
1561
1562static int transaction_kthread(void *arg)
1563{
1564	struct btrfs_root *root = arg;
1565	struct btrfs_fs_info *fs_info = root->fs_info;
1566	struct btrfs_trans_handle *trans;
1567	struct btrfs_transaction *cur;
1568	u64 transid;
1569	time64_t delta;
1570	unsigned long delay;
1571	bool cannot_commit;
1572
1573	do {
1574		cannot_commit = false;
1575		delay = msecs_to_jiffies(fs_info->commit_interval * 1000);
1576		mutex_lock(&fs_info->transaction_kthread_mutex);
1577
1578		spin_lock(&fs_info->trans_lock);
1579		cur = fs_info->running_transaction;
1580		if (!cur) {
1581			spin_unlock(&fs_info->trans_lock);
1582			goto sleep;
1583		}
1584
1585		delta = ktime_get_seconds() - cur->start_time;
1586		if (!test_and_clear_bit(BTRFS_FS_COMMIT_TRANS, &fs_info->flags) &&
1587		    cur->state < TRANS_STATE_COMMIT_PREP &&
1588		    delta < fs_info->commit_interval) {
1589			spin_unlock(&fs_info->trans_lock);
1590			delay -= msecs_to_jiffies((delta - 1) * 1000);
1591			delay = min(delay,
1592				    msecs_to_jiffies(fs_info->commit_interval * 1000));
1593			goto sleep;
1594		}
1595		transid = cur->transid;
1596		spin_unlock(&fs_info->trans_lock);
1597
1598		/* If the file system is aborted, this will always fail. */
1599		trans = btrfs_attach_transaction(root);
1600		if (IS_ERR(trans)) {
1601			if (PTR_ERR(trans) != -ENOENT)
1602				cannot_commit = true;
1603			goto sleep;
1604		}
1605		if (transid == trans->transid) {
1606			btrfs_commit_transaction(trans);
1607		} else {
1608			btrfs_end_transaction(trans);
1609		}
1610sleep:
1611		wake_up_process(fs_info->cleaner_kthread);
1612		mutex_unlock(&fs_info->transaction_kthread_mutex);
1613
1614		if (BTRFS_FS_ERROR(fs_info))
1615			btrfs_cleanup_transaction(fs_info);
1616		if (!kthread_should_stop() &&
1617				(!btrfs_transaction_blocked(fs_info) ||
1618				 cannot_commit))
1619			schedule_timeout_interruptible(delay);
1620	} while (!kthread_should_stop());
1621	return 0;
1622}
1623
1624/*
1625 * This will find the highest generation in the array of root backups.  The
1626 * index of the highest array is returned, or -EINVAL if we can't find
1627 * anything.
1628 *
1629 * We check to make sure the array is valid by comparing the
1630 * generation of the latest  root in the array with the generation
1631 * in the super block.  If they don't match we pitch it.
1632 */
1633static int find_newest_super_backup(struct btrfs_fs_info *info)
1634{
1635	const u64 newest_gen = btrfs_super_generation(info->super_copy);
1636	u64 cur;
1637	struct btrfs_root_backup *root_backup;
1638	int i;
1639
1640	for (i = 0; i < BTRFS_NUM_BACKUP_ROOTS; i++) {
1641		root_backup = info->super_copy->super_roots + i;
1642		cur = btrfs_backup_tree_root_gen(root_backup);
1643		if (cur == newest_gen)
1644			return i;
1645	}
1646
1647	return -EINVAL;
1648}
1649
1650/*
1651 * copy all the root pointers into the super backup array.
1652 * this will bump the backup pointer by one when it is
1653 * done
1654 */
1655static void backup_super_roots(struct btrfs_fs_info *info)
1656{
1657	const int next_backup = info->backup_root_index;
1658	struct btrfs_root_backup *root_backup;
1659
1660	root_backup = info->super_for_commit->super_roots + next_backup;
1661
1662	/*
1663	 * make sure all of our padding and empty slots get zero filled
1664	 * regardless of which ones we use today
1665	 */
1666	memset(root_backup, 0, sizeof(*root_backup));
1667
1668	info->backup_root_index = (next_backup + 1) % BTRFS_NUM_BACKUP_ROOTS;
1669
1670	btrfs_set_backup_tree_root(root_backup, info->tree_root->node->start);
1671	btrfs_set_backup_tree_root_gen(root_backup,
1672			       btrfs_header_generation(info->tree_root->node));
1673
1674	btrfs_set_backup_tree_root_level(root_backup,
1675			       btrfs_header_level(info->tree_root->node));
1676
1677	btrfs_set_backup_chunk_root(root_backup, info->chunk_root->node->start);
1678	btrfs_set_backup_chunk_root_gen(root_backup,
1679			       btrfs_header_generation(info->chunk_root->node));
1680	btrfs_set_backup_chunk_root_level(root_backup,
1681			       btrfs_header_level(info->chunk_root->node));
1682
1683	if (!btrfs_fs_compat_ro(info, BLOCK_GROUP_TREE)) {
1684		struct btrfs_root *extent_root = btrfs_extent_root(info, 0);
1685		struct btrfs_root *csum_root = btrfs_csum_root(info, 0);
1686
1687		btrfs_set_backup_extent_root(root_backup,
1688					     extent_root->node->start);
1689		btrfs_set_backup_extent_root_gen(root_backup,
1690				btrfs_header_generation(extent_root->node));
1691		btrfs_set_backup_extent_root_level(root_backup,
1692					btrfs_header_level(extent_root->node));
1693
1694		btrfs_set_backup_csum_root(root_backup, csum_root->node->start);
1695		btrfs_set_backup_csum_root_gen(root_backup,
1696					       btrfs_header_generation(csum_root->node));
1697		btrfs_set_backup_csum_root_level(root_backup,
1698						 btrfs_header_level(csum_root->node));
1699	}
1700
1701	/*
1702	 * we might commit during log recovery, which happens before we set
1703	 * the fs_root.  Make sure it is valid before we fill it in.
1704	 */
1705	if (info->fs_root && info->fs_root->node) {
1706		btrfs_set_backup_fs_root(root_backup,
1707					 info->fs_root->node->start);
1708		btrfs_set_backup_fs_root_gen(root_backup,
1709			       btrfs_header_generation(info->fs_root->node));
1710		btrfs_set_backup_fs_root_level(root_backup,
1711			       btrfs_header_level(info->fs_root->node));
1712	}
1713
1714	btrfs_set_backup_dev_root(root_backup, info->dev_root->node->start);
1715	btrfs_set_backup_dev_root_gen(root_backup,
1716			       btrfs_header_generation(info->dev_root->node));
1717	btrfs_set_backup_dev_root_level(root_backup,
1718				       btrfs_header_level(info->dev_root->node));
1719
1720	btrfs_set_backup_total_bytes(root_backup,
1721			     btrfs_super_total_bytes(info->super_copy));
1722	btrfs_set_backup_bytes_used(root_backup,
1723			     btrfs_super_bytes_used(info->super_copy));
1724	btrfs_set_backup_num_devices(root_backup,
1725			     btrfs_super_num_devices(info->super_copy));
1726
1727	/*
1728	 * if we don't copy this out to the super_copy, it won't get remembered
1729	 * for the next commit
1730	 */
1731	memcpy(&info->super_copy->super_roots,
1732	       &info->super_for_commit->super_roots,
1733	       sizeof(*root_backup) * BTRFS_NUM_BACKUP_ROOTS);
1734}
1735
1736/*
1737 * Reads a backup root based on the passed priority. Prio 0 is the newest, prio
1738 * 1/2/3 are 2nd newest/3rd newest/4th (oldest) backup roots
1739 *
1740 * @fs_info:  filesystem whose backup roots need to be read
1741 * @priority: priority of backup root required
1742 *
1743 * Returns backup root index on success and -EINVAL otherwise.
1744 */
1745static int read_backup_root(struct btrfs_fs_info *fs_info, u8 priority)
1746{
1747	int backup_index = find_newest_super_backup(fs_info);
1748	struct btrfs_super_block *super = fs_info->super_copy;
1749	struct btrfs_root_backup *root_backup;
1750
1751	if (priority < BTRFS_NUM_BACKUP_ROOTS && backup_index >= 0) {
1752		if (priority == 0)
1753			return backup_index;
1754
1755		backup_index = backup_index + BTRFS_NUM_BACKUP_ROOTS - priority;
1756		backup_index %= BTRFS_NUM_BACKUP_ROOTS;
1757	} else {
1758		return -EINVAL;
1759	}
1760
1761	root_backup = super->super_roots + backup_index;
1762
1763	btrfs_set_super_generation(super,
1764				   btrfs_backup_tree_root_gen(root_backup));
1765	btrfs_set_super_root(super, btrfs_backup_tree_root(root_backup));
1766	btrfs_set_super_root_level(super,
1767				   btrfs_backup_tree_root_level(root_backup));
1768	btrfs_set_super_bytes_used(super, btrfs_backup_bytes_used(root_backup));
1769
1770	/*
1771	 * Fixme: the total bytes and num_devices need to match or we should
1772	 * need a fsck
1773	 */
1774	btrfs_set_super_total_bytes(super, btrfs_backup_total_bytes(root_backup));
1775	btrfs_set_super_num_devices(super, btrfs_backup_num_devices(root_backup));
1776
1777	return backup_index;
1778}
1779
1780/* helper to cleanup workers */
1781static void btrfs_stop_all_workers(struct btrfs_fs_info *fs_info)
1782{
1783	btrfs_destroy_workqueue(fs_info->fixup_workers);
1784	btrfs_destroy_workqueue(fs_info->delalloc_workers);
1785	btrfs_destroy_workqueue(fs_info->workers);
1786	if (fs_info->endio_workers)
1787		destroy_workqueue(fs_info->endio_workers);
1788	if (fs_info->rmw_workers)
1789		destroy_workqueue(fs_info->rmw_workers);
1790	if (fs_info->compressed_write_workers)
1791		destroy_workqueue(fs_info->compressed_write_workers);
1792	btrfs_destroy_workqueue(fs_info->endio_write_workers);
1793	btrfs_destroy_workqueue(fs_info->endio_freespace_worker);
1794	btrfs_destroy_workqueue(fs_info->delayed_workers);
1795	btrfs_destroy_workqueue(fs_info->caching_workers);
1796	btrfs_destroy_workqueue(fs_info->flush_workers);
1797	btrfs_destroy_workqueue(fs_info->qgroup_rescan_workers);
1798	if (fs_info->discard_ctl.discard_workers)
1799		destroy_workqueue(fs_info->discard_ctl.discard_workers);
1800	/*
1801	 * Now that all other work queues are destroyed, we can safely destroy
1802	 * the queues used for metadata I/O, since tasks from those other work
1803	 * queues can do metadata I/O operations.
1804	 */
1805	if (fs_info->endio_meta_workers)
1806		destroy_workqueue(fs_info->endio_meta_workers);
1807}
1808
1809static void free_root_extent_buffers(struct btrfs_root *root)
1810{
1811	if (root) {
1812		free_extent_buffer(root->node);
1813		free_extent_buffer(root->commit_root);
1814		root->node = NULL;
1815		root->commit_root = NULL;
1816	}
1817}
1818
1819static void free_global_root_pointers(struct btrfs_fs_info *fs_info)
1820{
1821	struct btrfs_root *root, *tmp;
1822
1823	rbtree_postorder_for_each_entry_safe(root, tmp,
1824					     &fs_info->global_root_tree,
1825					     rb_node)
1826		free_root_extent_buffers(root);
1827}
1828
1829/* helper to cleanup tree roots */
1830static void free_root_pointers(struct btrfs_fs_info *info, bool free_chunk_root)
1831{
1832	free_root_extent_buffers(info->tree_root);
1833
1834	free_global_root_pointers(info);
1835	free_root_extent_buffers(info->dev_root);
1836	free_root_extent_buffers(info->quota_root);
1837	free_root_extent_buffers(info->uuid_root);
1838	free_root_extent_buffers(info->fs_root);
1839	free_root_extent_buffers(info->data_reloc_root);
1840	free_root_extent_buffers(info->block_group_root);
1841	free_root_extent_buffers(info->stripe_root);
1842	if (free_chunk_root)
1843		free_root_extent_buffers(info->chunk_root);
1844}
1845
1846void btrfs_put_root(struct btrfs_root *root)
1847{
1848	if (!root)
1849		return;
1850
1851	if (refcount_dec_and_test(&root->refs)) {
1852		WARN_ON(!RB_EMPTY_ROOT(&root->inode_tree));
 
1853		WARN_ON(test_bit(BTRFS_ROOT_DEAD_RELOC_TREE, &root->state));
1854		if (root->anon_dev)
1855			free_anon_bdev(root->anon_dev);
1856		free_root_extent_buffers(root);
1857#ifdef CONFIG_BTRFS_DEBUG
1858		spin_lock(&root->fs_info->fs_roots_radix_lock);
1859		list_del_init(&root->leak_list);
1860		spin_unlock(&root->fs_info->fs_roots_radix_lock);
1861#endif
1862		kfree(root);
1863	}
1864}
1865
1866void btrfs_free_fs_roots(struct btrfs_fs_info *fs_info)
1867{
1868	int ret;
1869	struct btrfs_root *gang[8];
1870	int i;
1871
1872	while (!list_empty(&fs_info->dead_roots)) {
1873		gang[0] = list_entry(fs_info->dead_roots.next,
1874				     struct btrfs_root, root_list);
1875		list_del(&gang[0]->root_list);
1876
1877		if (test_bit(BTRFS_ROOT_IN_RADIX, &gang[0]->state))
1878			btrfs_drop_and_free_fs_root(fs_info, gang[0]);
1879		btrfs_put_root(gang[0]);
1880	}
1881
1882	while (1) {
1883		ret = radix_tree_gang_lookup(&fs_info->fs_roots_radix,
1884					     (void **)gang, 0,
1885					     ARRAY_SIZE(gang));
1886		if (!ret)
1887			break;
1888		for (i = 0; i < ret; i++)
1889			btrfs_drop_and_free_fs_root(fs_info, gang[i]);
1890	}
1891}
1892
1893static void btrfs_init_scrub(struct btrfs_fs_info *fs_info)
1894{
1895	mutex_init(&fs_info->scrub_lock);
1896	atomic_set(&fs_info->scrubs_running, 0);
1897	atomic_set(&fs_info->scrub_pause_req, 0);
1898	atomic_set(&fs_info->scrubs_paused, 0);
1899	atomic_set(&fs_info->scrub_cancel_req, 0);
1900	init_waitqueue_head(&fs_info->scrub_pause_wait);
1901	refcount_set(&fs_info->scrub_workers_refcnt, 0);
1902}
1903
1904static void btrfs_init_balance(struct btrfs_fs_info *fs_info)
1905{
1906	spin_lock_init(&fs_info->balance_lock);
1907	mutex_init(&fs_info->balance_mutex);
1908	atomic_set(&fs_info->balance_pause_req, 0);
1909	atomic_set(&fs_info->balance_cancel_req, 0);
1910	fs_info->balance_ctl = NULL;
1911	init_waitqueue_head(&fs_info->balance_wait_q);
1912	atomic_set(&fs_info->reloc_cancel_req, 0);
1913}
1914
1915static int btrfs_init_btree_inode(struct super_block *sb)
1916{
1917	struct btrfs_fs_info *fs_info = btrfs_sb(sb);
1918	unsigned long hash = btrfs_inode_hash(BTRFS_BTREE_INODE_OBJECTID,
1919					      fs_info->tree_root);
1920	struct inode *inode;
1921
1922	inode = new_inode(sb);
1923	if (!inode)
1924		return -ENOMEM;
1925
1926	inode->i_ino = BTRFS_BTREE_INODE_OBJECTID;
1927	set_nlink(inode, 1);
1928	/*
1929	 * we set the i_size on the btree inode to the max possible int.
1930	 * the real end of the address space is determined by all of
1931	 * the devices in the system
1932	 */
1933	inode->i_size = OFFSET_MAX;
1934	inode->i_mapping->a_ops = &btree_aops;
1935	mapping_set_gfp_mask(inode->i_mapping, GFP_NOFS);
1936
1937	RB_CLEAR_NODE(&BTRFS_I(inode)->rb_node);
1938	extent_io_tree_init(fs_info, &BTRFS_I(inode)->io_tree,
1939			    IO_TREE_BTREE_INODE_IO);
1940	extent_map_tree_init(&BTRFS_I(inode)->extent_tree);
1941
1942	BTRFS_I(inode)->root = btrfs_grab_root(fs_info->tree_root);
1943	BTRFS_I(inode)->location.objectid = BTRFS_BTREE_INODE_OBJECTID;
1944	BTRFS_I(inode)->location.type = 0;
1945	BTRFS_I(inode)->location.offset = 0;
1946	set_bit(BTRFS_INODE_DUMMY, &BTRFS_I(inode)->runtime_flags);
1947	__insert_inode_hash(inode, hash);
1948	fs_info->btree_inode = inode;
1949
1950	return 0;
1951}
1952
1953static void btrfs_init_dev_replace_locks(struct btrfs_fs_info *fs_info)
1954{
1955	mutex_init(&fs_info->dev_replace.lock_finishing_cancel_unmount);
1956	init_rwsem(&fs_info->dev_replace.rwsem);
1957	init_waitqueue_head(&fs_info->dev_replace.replace_wait);
1958}
1959
1960static void btrfs_init_qgroup(struct btrfs_fs_info *fs_info)
1961{
1962	spin_lock_init(&fs_info->qgroup_lock);
1963	mutex_init(&fs_info->qgroup_ioctl_lock);
1964	fs_info->qgroup_tree = RB_ROOT;
1965	INIT_LIST_HEAD(&fs_info->dirty_qgroups);
1966	fs_info->qgroup_seq = 1;
1967	fs_info->qgroup_ulist = NULL;
1968	fs_info->qgroup_rescan_running = false;
1969	fs_info->qgroup_drop_subtree_thres = BTRFS_MAX_LEVEL;
1970	mutex_init(&fs_info->qgroup_rescan_lock);
1971}
1972
1973static int btrfs_init_workqueues(struct btrfs_fs_info *fs_info)
1974{
1975	u32 max_active = fs_info->thread_pool_size;
1976	unsigned int flags = WQ_MEM_RECLAIM | WQ_FREEZABLE | WQ_UNBOUND;
1977	unsigned int ordered_flags = WQ_MEM_RECLAIM | WQ_FREEZABLE;
1978
1979	fs_info->workers =
1980		btrfs_alloc_workqueue(fs_info, "worker", flags, max_active, 16);
1981
1982	fs_info->delalloc_workers =
1983		btrfs_alloc_workqueue(fs_info, "delalloc",
1984				      flags, max_active, 2);
1985
1986	fs_info->flush_workers =
1987		btrfs_alloc_workqueue(fs_info, "flush_delalloc",
1988				      flags, max_active, 0);
1989
1990	fs_info->caching_workers =
1991		btrfs_alloc_workqueue(fs_info, "cache", flags, max_active, 0);
1992
1993	fs_info->fixup_workers =
1994		btrfs_alloc_ordered_workqueue(fs_info, "fixup", ordered_flags);
1995
1996	fs_info->endio_workers =
1997		alloc_workqueue("btrfs-endio", flags, max_active);
1998	fs_info->endio_meta_workers =
1999		alloc_workqueue("btrfs-endio-meta", flags, max_active);
2000	fs_info->rmw_workers = alloc_workqueue("btrfs-rmw", flags, max_active);
2001	fs_info->endio_write_workers =
2002		btrfs_alloc_workqueue(fs_info, "endio-write", flags,
2003				      max_active, 2);
2004	fs_info->compressed_write_workers =
2005		alloc_workqueue("btrfs-compressed-write", flags, max_active);
2006	fs_info->endio_freespace_worker =
2007		btrfs_alloc_workqueue(fs_info, "freespace-write", flags,
2008				      max_active, 0);
2009	fs_info->delayed_workers =
2010		btrfs_alloc_workqueue(fs_info, "delayed-meta", flags,
2011				      max_active, 0);
2012	fs_info->qgroup_rescan_workers =
2013		btrfs_alloc_ordered_workqueue(fs_info, "qgroup-rescan",
2014					      ordered_flags);
2015	fs_info->discard_ctl.discard_workers =
2016		alloc_ordered_workqueue("btrfs_discard", WQ_FREEZABLE);
2017
2018	if (!(fs_info->workers &&
2019	      fs_info->delalloc_workers && fs_info->flush_workers &&
2020	      fs_info->endio_workers && fs_info->endio_meta_workers &&
2021	      fs_info->compressed_write_workers &&
2022	      fs_info->endio_write_workers &&
2023	      fs_info->endio_freespace_worker && fs_info->rmw_workers &&
2024	      fs_info->caching_workers && fs_info->fixup_workers &&
2025	      fs_info->delayed_workers && fs_info->qgroup_rescan_workers &&
2026	      fs_info->discard_ctl.discard_workers)) {
2027		return -ENOMEM;
2028	}
2029
2030	return 0;
2031}
2032
2033static int btrfs_init_csum_hash(struct btrfs_fs_info *fs_info, u16 csum_type)
2034{
2035	struct crypto_shash *csum_shash;
2036	const char *csum_driver = btrfs_super_csum_driver(csum_type);
2037
2038	csum_shash = crypto_alloc_shash(csum_driver, 0, 0);
2039
2040	if (IS_ERR(csum_shash)) {
2041		btrfs_err(fs_info, "error allocating %s hash for checksum",
2042			  csum_driver);
2043		return PTR_ERR(csum_shash);
2044	}
2045
2046	fs_info->csum_shash = csum_shash;
2047
2048	/*
2049	 * Check if the checksum implementation is a fast accelerated one.
2050	 * As-is this is a bit of a hack and should be replaced once the csum
2051	 * implementations provide that information themselves.
2052	 */
2053	switch (csum_type) {
2054	case BTRFS_CSUM_TYPE_CRC32:
2055		if (!strstr(crypto_shash_driver_name(csum_shash), "generic"))
2056			set_bit(BTRFS_FS_CSUM_IMPL_FAST, &fs_info->flags);
2057		break;
2058	case BTRFS_CSUM_TYPE_XXHASH:
2059		set_bit(BTRFS_FS_CSUM_IMPL_FAST, &fs_info->flags);
2060		break;
2061	default:
2062		break;
2063	}
2064
2065	btrfs_info(fs_info, "using %s (%s) checksum algorithm",
2066			btrfs_super_csum_name(csum_type),
2067			crypto_shash_driver_name(csum_shash));
2068	return 0;
2069}
2070
2071static int btrfs_replay_log(struct btrfs_fs_info *fs_info,
2072			    struct btrfs_fs_devices *fs_devices)
2073{
2074	int ret;
2075	struct btrfs_tree_parent_check check = { 0 };
2076	struct btrfs_root *log_tree_root;
2077	struct btrfs_super_block *disk_super = fs_info->super_copy;
2078	u64 bytenr = btrfs_super_log_root(disk_super);
2079	int level = btrfs_super_log_root_level(disk_super);
2080
2081	if (fs_devices->rw_devices == 0) {
2082		btrfs_warn(fs_info, "log replay required on RO media");
2083		return -EIO;
2084	}
2085
2086	log_tree_root = btrfs_alloc_root(fs_info, BTRFS_TREE_LOG_OBJECTID,
2087					 GFP_KERNEL);
2088	if (!log_tree_root)
2089		return -ENOMEM;
2090
2091	check.level = level;
2092	check.transid = fs_info->generation + 1;
2093	check.owner_root = BTRFS_TREE_LOG_OBJECTID;
2094	log_tree_root->node = read_tree_block(fs_info, bytenr, &check);
2095	if (IS_ERR(log_tree_root->node)) {
2096		btrfs_warn(fs_info, "failed to read log tree");
2097		ret = PTR_ERR(log_tree_root->node);
2098		log_tree_root->node = NULL;
2099		btrfs_put_root(log_tree_root);
2100		return ret;
2101	}
2102	if (!extent_buffer_uptodate(log_tree_root->node)) {
2103		btrfs_err(fs_info, "failed to read log tree");
2104		btrfs_put_root(log_tree_root);
2105		return -EIO;
2106	}
2107
2108	/* returns with log_tree_root freed on success */
2109	ret = btrfs_recover_log_trees(log_tree_root);
2110	if (ret) {
2111		btrfs_handle_fs_error(fs_info, ret,
2112				      "Failed to recover log tree");
2113		btrfs_put_root(log_tree_root);
2114		return ret;
2115	}
2116
2117	if (sb_rdonly(fs_info->sb)) {
2118		ret = btrfs_commit_super(fs_info);
2119		if (ret)
2120			return ret;
2121	}
2122
2123	return 0;
2124}
2125
2126static int load_global_roots_objectid(struct btrfs_root *tree_root,
2127				      struct btrfs_path *path, u64 objectid,
2128				      const char *name)
2129{
2130	struct btrfs_fs_info *fs_info = tree_root->fs_info;
2131	struct btrfs_root *root;
2132	u64 max_global_id = 0;
2133	int ret;
2134	struct btrfs_key key = {
2135		.objectid = objectid,
2136		.type = BTRFS_ROOT_ITEM_KEY,
2137		.offset = 0,
2138	};
2139	bool found = false;
2140
2141	/* If we have IGNOREDATACSUMS skip loading these roots. */
2142	if (objectid == BTRFS_CSUM_TREE_OBJECTID &&
2143	    btrfs_test_opt(fs_info, IGNOREDATACSUMS)) {
2144		set_bit(BTRFS_FS_STATE_NO_CSUMS, &fs_info->fs_state);
2145		return 0;
2146	}
2147
2148	while (1) {
2149		ret = btrfs_search_slot(NULL, tree_root, &key, path, 0, 0);
2150		if (ret < 0)
2151			break;
2152
2153		if (path->slots[0] >= btrfs_header_nritems(path->nodes[0])) {
2154			ret = btrfs_next_leaf(tree_root, path);
2155			if (ret) {
2156				if (ret > 0)
2157					ret = 0;
2158				break;
2159			}
2160		}
2161		ret = 0;
2162
2163		btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0]);
2164		if (key.objectid != objectid)
2165			break;
2166		btrfs_release_path(path);
2167
2168		/*
2169		 * Just worry about this for extent tree, it'll be the same for
2170		 * everybody.
2171		 */
2172		if (objectid == BTRFS_EXTENT_TREE_OBJECTID)
2173			max_global_id = max(max_global_id, key.offset);
2174
2175		found = true;
2176		root = read_tree_root_path(tree_root, path, &key);
2177		if (IS_ERR(root)) {
2178			if (!btrfs_test_opt(fs_info, IGNOREBADROOTS))
2179				ret = PTR_ERR(root);
2180			break;
2181		}
2182		set_bit(BTRFS_ROOT_TRACK_DIRTY, &root->state);
2183		ret = btrfs_global_root_insert(root);
2184		if (ret) {
2185			btrfs_put_root(root);
2186			break;
2187		}
2188		key.offset++;
2189	}
2190	btrfs_release_path(path);
2191
2192	if (objectid == BTRFS_EXTENT_TREE_OBJECTID)
2193		fs_info->nr_global_roots = max_global_id + 1;
2194
2195	if (!found || ret) {
2196		if (objectid == BTRFS_CSUM_TREE_OBJECTID)
2197			set_bit(BTRFS_FS_STATE_NO_CSUMS, &fs_info->fs_state);
2198
2199		if (!btrfs_test_opt(fs_info, IGNOREBADROOTS))
2200			ret = ret ? ret : -ENOENT;
2201		else
2202			ret = 0;
2203		btrfs_err(fs_info, "failed to load root %s", name);
2204	}
2205	return ret;
2206}
2207
2208static int load_global_roots(struct btrfs_root *tree_root)
2209{
2210	struct btrfs_path *path;
2211	int ret = 0;
2212
2213	path = btrfs_alloc_path();
2214	if (!path)
2215		return -ENOMEM;
2216
2217	ret = load_global_roots_objectid(tree_root, path,
2218					 BTRFS_EXTENT_TREE_OBJECTID, "extent");
2219	if (ret)
2220		goto out;
2221	ret = load_global_roots_objectid(tree_root, path,
2222					 BTRFS_CSUM_TREE_OBJECTID, "csum");
2223	if (ret)
2224		goto out;
2225	if (!btrfs_fs_compat_ro(tree_root->fs_info, FREE_SPACE_TREE))
2226		goto out;
2227	ret = load_global_roots_objectid(tree_root, path,
2228					 BTRFS_FREE_SPACE_TREE_OBJECTID,
2229					 "free space");
2230out:
2231	btrfs_free_path(path);
2232	return ret;
2233}
2234
2235static int btrfs_read_roots(struct btrfs_fs_info *fs_info)
2236{
2237	struct btrfs_root *tree_root = fs_info->tree_root;
2238	struct btrfs_root *root;
2239	struct btrfs_key location;
2240	int ret;
2241
2242	BUG_ON(!fs_info->tree_root);
2243
2244	ret = load_global_roots(tree_root);
2245	if (ret)
2246		return ret;
2247
2248	location.type = BTRFS_ROOT_ITEM_KEY;
2249	location.offset = 0;
2250
2251	if (btrfs_fs_compat_ro(fs_info, BLOCK_GROUP_TREE)) {
2252		location.objectid = BTRFS_BLOCK_GROUP_TREE_OBJECTID;
2253		root = btrfs_read_tree_root(tree_root, &location);
2254		if (IS_ERR(root)) {
2255			if (!btrfs_test_opt(fs_info, IGNOREBADROOTS)) {
2256				ret = PTR_ERR(root);
2257				goto out;
2258			}
2259		} else {
2260			set_bit(BTRFS_ROOT_TRACK_DIRTY, &root->state);
2261			fs_info->block_group_root = root;
2262		}
2263	}
2264
2265	location.objectid = BTRFS_DEV_TREE_OBJECTID;
2266	root = btrfs_read_tree_root(tree_root, &location);
2267	if (IS_ERR(root)) {
2268		if (!btrfs_test_opt(fs_info, IGNOREBADROOTS)) {
2269			ret = PTR_ERR(root);
2270			goto out;
2271		}
2272	} else {
2273		set_bit(BTRFS_ROOT_TRACK_DIRTY, &root->state);
2274		fs_info->dev_root = root;
2275	}
2276	/* Initialize fs_info for all devices in any case */
2277	ret = btrfs_init_devices_late(fs_info);
2278	if (ret)
2279		goto out;
2280
2281	/*
2282	 * This tree can share blocks with some other fs tree during relocation
2283	 * and we need a proper setup by btrfs_get_fs_root
2284	 */
2285	root = btrfs_get_fs_root(tree_root->fs_info,
2286				 BTRFS_DATA_RELOC_TREE_OBJECTID, true);
2287	if (IS_ERR(root)) {
2288		if (!btrfs_test_opt(fs_info, IGNOREBADROOTS)) {
2289			ret = PTR_ERR(root);
2290			goto out;
2291		}
2292	} else {
2293		set_bit(BTRFS_ROOT_TRACK_DIRTY, &root->state);
2294		fs_info->data_reloc_root = root;
2295	}
2296
2297	location.objectid = BTRFS_QUOTA_TREE_OBJECTID;
2298	root = btrfs_read_tree_root(tree_root, &location);
2299	if (!IS_ERR(root)) {
2300		set_bit(BTRFS_ROOT_TRACK_DIRTY, &root->state);
2301		fs_info->quota_root = root;
2302	}
2303
2304	location.objectid = BTRFS_UUID_TREE_OBJECTID;
2305	root = btrfs_read_tree_root(tree_root, &location);
2306	if (IS_ERR(root)) {
2307		if (!btrfs_test_opt(fs_info, IGNOREBADROOTS)) {
2308			ret = PTR_ERR(root);
2309			if (ret != -ENOENT)
2310				goto out;
2311		}
2312	} else {
2313		set_bit(BTRFS_ROOT_TRACK_DIRTY, &root->state);
2314		fs_info->uuid_root = root;
2315	}
2316
2317	if (btrfs_fs_incompat(fs_info, RAID_STRIPE_TREE)) {
2318		location.objectid = BTRFS_RAID_STRIPE_TREE_OBJECTID;
2319		root = btrfs_read_tree_root(tree_root, &location);
2320		if (IS_ERR(root)) {
2321			if (!btrfs_test_opt(fs_info, IGNOREBADROOTS)) {
2322				ret = PTR_ERR(root);
2323				goto out;
2324			}
2325		} else {
2326			set_bit(BTRFS_ROOT_TRACK_DIRTY, &root->state);
2327			fs_info->stripe_root = root;
2328		}
2329	}
2330
2331	return 0;
2332out:
2333	btrfs_warn(fs_info, "failed to read root (objectid=%llu): %d",
2334		   location.objectid, ret);
2335	return ret;
2336}
2337
2338/*
2339 * Real super block validation
2340 * NOTE: super csum type and incompat features will not be checked here.
2341 *
2342 * @sb:		super block to check
2343 * @mirror_num:	the super block number to check its bytenr:
2344 * 		0	the primary (1st) sb
2345 * 		1, 2	2nd and 3rd backup copy
2346 * 	       -1	skip bytenr check
2347 */
2348int btrfs_validate_super(struct btrfs_fs_info *fs_info,
2349			 struct btrfs_super_block *sb, int mirror_num)
2350{
2351	u64 nodesize = btrfs_super_nodesize(sb);
2352	u64 sectorsize = btrfs_super_sectorsize(sb);
2353	int ret = 0;
 
2354
2355	if (btrfs_super_magic(sb) != BTRFS_MAGIC) {
2356		btrfs_err(fs_info, "no valid FS found");
2357		ret = -EINVAL;
2358	}
2359	if (btrfs_super_flags(sb) & ~BTRFS_SUPER_FLAG_SUPP) {
2360		btrfs_err(fs_info, "unrecognized or unsupported super flag: %llu",
2361				btrfs_super_flags(sb) & ~BTRFS_SUPER_FLAG_SUPP);
2362		ret = -EINVAL;
 
 
 
 
 
 
 
2363	}
2364	if (btrfs_super_root_level(sb) >= BTRFS_MAX_LEVEL) {
2365		btrfs_err(fs_info, "tree_root level too big: %d >= %d",
2366				btrfs_super_root_level(sb), BTRFS_MAX_LEVEL);
2367		ret = -EINVAL;
2368	}
2369	if (btrfs_super_chunk_root_level(sb) >= BTRFS_MAX_LEVEL) {
2370		btrfs_err(fs_info, "chunk_root level too big: %d >= %d",
2371				btrfs_super_chunk_root_level(sb), BTRFS_MAX_LEVEL);
2372		ret = -EINVAL;
2373	}
2374	if (btrfs_super_log_root_level(sb) >= BTRFS_MAX_LEVEL) {
2375		btrfs_err(fs_info, "log_root level too big: %d >= %d",
2376				btrfs_super_log_root_level(sb), BTRFS_MAX_LEVEL);
2377		ret = -EINVAL;
2378	}
2379
2380	/*
2381	 * Check sectorsize and nodesize first, other check will need it.
2382	 * Check all possible sectorsize(4K, 8K, 16K, 32K, 64K) here.
2383	 */
2384	if (!is_power_of_2(sectorsize) || sectorsize < 4096 ||
2385	    sectorsize > BTRFS_MAX_METADATA_BLOCKSIZE) {
2386		btrfs_err(fs_info, "invalid sectorsize %llu", sectorsize);
2387		ret = -EINVAL;
2388	}
2389
2390	/*
2391	 * We only support at most two sectorsizes: 4K and PAGE_SIZE.
2392	 *
2393	 * We can support 16K sectorsize with 64K page size without problem,
2394	 * but such sectorsize/pagesize combination doesn't make much sense.
2395	 * 4K will be our future standard, PAGE_SIZE is supported from the very
2396	 * beginning.
2397	 */
2398	if (sectorsize > PAGE_SIZE || (sectorsize != SZ_4K && sectorsize != PAGE_SIZE)) {
2399		btrfs_err(fs_info,
2400			"sectorsize %llu not yet supported for page size %lu",
2401			sectorsize, PAGE_SIZE);
2402		ret = -EINVAL;
2403	}
2404
2405	if (!is_power_of_2(nodesize) || nodesize < sectorsize ||
2406	    nodesize > BTRFS_MAX_METADATA_BLOCKSIZE) {
2407		btrfs_err(fs_info, "invalid nodesize %llu", nodesize);
2408		ret = -EINVAL;
2409	}
2410	if (nodesize != le32_to_cpu(sb->__unused_leafsize)) {
2411		btrfs_err(fs_info, "invalid leafsize %u, should be %llu",
2412			  le32_to_cpu(sb->__unused_leafsize), nodesize);
2413		ret = -EINVAL;
2414	}
2415
2416	/* Root alignment check */
2417	if (!IS_ALIGNED(btrfs_super_root(sb), sectorsize)) {
2418		btrfs_warn(fs_info, "tree_root block unaligned: %llu",
2419			   btrfs_super_root(sb));
2420		ret = -EINVAL;
2421	}
2422	if (!IS_ALIGNED(btrfs_super_chunk_root(sb), sectorsize)) {
2423		btrfs_warn(fs_info, "chunk_root block unaligned: %llu",
2424			   btrfs_super_chunk_root(sb));
2425		ret = -EINVAL;
2426	}
2427	if (!IS_ALIGNED(btrfs_super_log_root(sb), sectorsize)) {
2428		btrfs_warn(fs_info, "log_root block unaligned: %llu",
2429			   btrfs_super_log_root(sb));
2430		ret = -EINVAL;
2431	}
2432
2433	if (!fs_info->fs_devices->temp_fsid &&
2434	    memcmp(fs_info->fs_devices->fsid, sb->fsid, BTRFS_FSID_SIZE) != 0) {
2435		btrfs_err(fs_info,
2436		"superblock fsid doesn't match fsid of fs_devices: %pU != %pU",
2437			  sb->fsid, fs_info->fs_devices->fsid);
2438		ret = -EINVAL;
2439	}
2440
2441	if (memcmp(fs_info->fs_devices->metadata_uuid, btrfs_sb_fsid_ptr(sb),
2442		   BTRFS_FSID_SIZE) != 0) {
2443		btrfs_err(fs_info,
2444"superblock metadata_uuid doesn't match metadata uuid of fs_devices: %pU != %pU",
2445			  btrfs_sb_fsid_ptr(sb), fs_info->fs_devices->metadata_uuid);
2446		ret = -EINVAL;
2447	}
2448
2449	if (memcmp(fs_info->fs_devices->metadata_uuid, sb->dev_item.fsid,
2450		   BTRFS_FSID_SIZE) != 0) {
2451		btrfs_err(fs_info,
2452			"dev_item UUID does not match metadata fsid: %pU != %pU",
2453			fs_info->fs_devices->metadata_uuid, sb->dev_item.fsid);
2454		ret = -EINVAL;
2455	}
2456
2457	/*
2458	 * Artificial requirement for block-group-tree to force newer features
2459	 * (free-space-tree, no-holes) so the test matrix is smaller.
2460	 */
2461	if (btrfs_fs_compat_ro(fs_info, BLOCK_GROUP_TREE) &&
2462	    (!btrfs_fs_compat_ro(fs_info, FREE_SPACE_TREE_VALID) ||
2463	     !btrfs_fs_incompat(fs_info, NO_HOLES))) {
2464		btrfs_err(fs_info,
2465		"block-group-tree feature requires fres-space-tree and no-holes");
2466		ret = -EINVAL;
2467	}
2468
2469	/*
2470	 * Hint to catch really bogus numbers, bitflips or so, more exact checks are
2471	 * done later
2472	 */
2473	if (btrfs_super_bytes_used(sb) < 6 * btrfs_super_nodesize(sb)) {
2474		btrfs_err(fs_info, "bytes_used is too small %llu",
2475			  btrfs_super_bytes_used(sb));
2476		ret = -EINVAL;
2477	}
2478	if (!is_power_of_2(btrfs_super_stripesize(sb))) {
2479		btrfs_err(fs_info, "invalid stripesize %u",
2480			  btrfs_super_stripesize(sb));
2481		ret = -EINVAL;
2482	}
2483	if (btrfs_super_num_devices(sb) > (1UL << 31))
2484		btrfs_warn(fs_info, "suspicious number of devices: %llu",
2485			   btrfs_super_num_devices(sb));
2486	if (btrfs_super_num_devices(sb) == 0) {
2487		btrfs_err(fs_info, "number of devices is 0");
2488		ret = -EINVAL;
2489	}
2490
2491	if (mirror_num >= 0 &&
2492	    btrfs_super_bytenr(sb) != btrfs_sb_offset(mirror_num)) {
2493		btrfs_err(fs_info, "super offset mismatch %llu != %u",
2494			  btrfs_super_bytenr(sb), BTRFS_SUPER_INFO_OFFSET);
2495		ret = -EINVAL;
2496	}
2497
2498	/*
2499	 * Obvious sys_chunk_array corruptions, it must hold at least one key
2500	 * and one chunk
2501	 */
2502	if (btrfs_super_sys_array_size(sb) > BTRFS_SYSTEM_CHUNK_ARRAY_SIZE) {
2503		btrfs_err(fs_info, "system chunk array too big %u > %u",
2504			  btrfs_super_sys_array_size(sb),
2505			  BTRFS_SYSTEM_CHUNK_ARRAY_SIZE);
2506		ret = -EINVAL;
2507	}
2508	if (btrfs_super_sys_array_size(sb) < sizeof(struct btrfs_disk_key)
2509			+ sizeof(struct btrfs_chunk)) {
2510		btrfs_err(fs_info, "system chunk array too small %u < %zu",
2511			  btrfs_super_sys_array_size(sb),
2512			  sizeof(struct btrfs_disk_key)
2513			  + sizeof(struct btrfs_chunk));
2514		ret = -EINVAL;
2515	}
2516
2517	/*
2518	 * The generation is a global counter, we'll trust it more than the others
2519	 * but it's still possible that it's the one that's wrong.
2520	 */
2521	if (btrfs_super_generation(sb) < btrfs_super_chunk_root_generation(sb))
2522		btrfs_warn(fs_info,
2523			"suspicious: generation < chunk_root_generation: %llu < %llu",
2524			btrfs_super_generation(sb),
2525			btrfs_super_chunk_root_generation(sb));
2526	if (btrfs_super_generation(sb) < btrfs_super_cache_generation(sb)
2527	    && btrfs_super_cache_generation(sb) != (u64)-1)
2528		btrfs_warn(fs_info,
2529			"suspicious: generation < cache_generation: %llu < %llu",
2530			btrfs_super_generation(sb),
2531			btrfs_super_cache_generation(sb));
2532
2533	return ret;
2534}
2535
2536/*
2537 * Validation of super block at mount time.
2538 * Some checks already done early at mount time, like csum type and incompat
2539 * flags will be skipped.
2540 */
2541static int btrfs_validate_mount_super(struct btrfs_fs_info *fs_info)
2542{
2543	return btrfs_validate_super(fs_info, fs_info->super_copy, 0);
2544}
2545
2546/*
2547 * Validation of super block at write time.
2548 * Some checks like bytenr check will be skipped as their values will be
2549 * overwritten soon.
2550 * Extra checks like csum type and incompat flags will be done here.
2551 */
2552static int btrfs_validate_write_super(struct btrfs_fs_info *fs_info,
2553				      struct btrfs_super_block *sb)
2554{
2555	int ret;
2556
2557	ret = btrfs_validate_super(fs_info, sb, -1);
2558	if (ret < 0)
2559		goto out;
2560	if (!btrfs_supported_super_csum(btrfs_super_csum_type(sb))) {
2561		ret = -EUCLEAN;
2562		btrfs_err(fs_info, "invalid csum type, has %u want %u",
2563			  btrfs_super_csum_type(sb), BTRFS_CSUM_TYPE_CRC32);
2564		goto out;
2565	}
2566	if (btrfs_super_incompat_flags(sb) & ~BTRFS_FEATURE_INCOMPAT_SUPP) {
2567		ret = -EUCLEAN;
2568		btrfs_err(fs_info,
2569		"invalid incompat flags, has 0x%llx valid mask 0x%llx",
2570			  btrfs_super_incompat_flags(sb),
2571			  (unsigned long long)BTRFS_FEATURE_INCOMPAT_SUPP);
2572		goto out;
2573	}
2574out:
2575	if (ret < 0)
2576		btrfs_err(fs_info,
2577		"super block corruption detected before writing it to disk");
2578	return ret;
2579}
2580
2581static int load_super_root(struct btrfs_root *root, u64 bytenr, u64 gen, int level)
2582{
2583	struct btrfs_tree_parent_check check = {
2584		.level = level,
2585		.transid = gen,
2586		.owner_root = root->root_key.objectid
2587	};
2588	int ret = 0;
2589
2590	root->node = read_tree_block(root->fs_info, bytenr, &check);
2591	if (IS_ERR(root->node)) {
2592		ret = PTR_ERR(root->node);
2593		root->node = NULL;
2594		return ret;
2595	}
2596	if (!extent_buffer_uptodate(root->node)) {
2597		free_extent_buffer(root->node);
2598		root->node = NULL;
2599		return -EIO;
2600	}
2601
2602	btrfs_set_root_node(&root->root_item, root->node);
2603	root->commit_root = btrfs_root_node(root);
2604	btrfs_set_root_refs(&root->root_item, 1);
2605	return ret;
2606}
2607
2608static int load_important_roots(struct btrfs_fs_info *fs_info)
2609{
2610	struct btrfs_super_block *sb = fs_info->super_copy;
2611	u64 gen, bytenr;
2612	int level, ret;
2613
2614	bytenr = btrfs_super_root(sb);
2615	gen = btrfs_super_generation(sb);
2616	level = btrfs_super_root_level(sb);
2617	ret = load_super_root(fs_info->tree_root, bytenr, gen, level);
2618	if (ret) {
2619		btrfs_warn(fs_info, "couldn't read tree root");
2620		return ret;
2621	}
2622	return 0;
2623}
2624
2625static int __cold init_tree_roots(struct btrfs_fs_info *fs_info)
2626{
2627	int backup_index = find_newest_super_backup(fs_info);
2628	struct btrfs_super_block *sb = fs_info->super_copy;
2629	struct btrfs_root *tree_root = fs_info->tree_root;
2630	bool handle_error = false;
2631	int ret = 0;
2632	int i;
2633
2634	for (i = 0; i < BTRFS_NUM_BACKUP_ROOTS; i++) {
2635		if (handle_error) {
2636			if (!IS_ERR(tree_root->node))
2637				free_extent_buffer(tree_root->node);
2638			tree_root->node = NULL;
2639
2640			if (!btrfs_test_opt(fs_info, USEBACKUPROOT))
2641				break;
2642
2643			free_root_pointers(fs_info, 0);
2644
2645			/*
2646			 * Don't use the log in recovery mode, it won't be
2647			 * valid
2648			 */
2649			btrfs_set_super_log_root(sb, 0);
2650
2651			btrfs_warn(fs_info, "try to load backup roots slot %d", i);
2652			ret = read_backup_root(fs_info, i);
2653			backup_index = ret;
2654			if (ret < 0)
2655				return ret;
2656		}
2657
2658		ret = load_important_roots(fs_info);
2659		if (ret) {
2660			handle_error = true;
2661			continue;
2662		}
2663
2664		/*
2665		 * No need to hold btrfs_root::objectid_mutex since the fs
2666		 * hasn't been fully initialised and we are the only user
2667		 */
2668		ret = btrfs_init_root_free_objectid(tree_root);
2669		if (ret < 0) {
2670			handle_error = true;
2671			continue;
2672		}
2673
2674		ASSERT(tree_root->free_objectid <= BTRFS_LAST_FREE_OBJECTID);
2675
2676		ret = btrfs_read_roots(fs_info);
2677		if (ret < 0) {
2678			handle_error = true;
2679			continue;
2680		}
2681
2682		/* All successful */
2683		fs_info->generation = btrfs_header_generation(tree_root->node);
2684		btrfs_set_last_trans_committed(fs_info, fs_info->generation);
2685		fs_info->last_reloc_trans = 0;
2686
2687		/* Always begin writing backup roots after the one being used */
2688		if (backup_index < 0) {
2689			fs_info->backup_root_index = 0;
2690		} else {
2691			fs_info->backup_root_index = backup_index + 1;
2692			fs_info->backup_root_index %= BTRFS_NUM_BACKUP_ROOTS;
2693		}
2694		break;
2695	}
2696
2697	return ret;
2698}
2699
2700void btrfs_init_fs_info(struct btrfs_fs_info *fs_info)
2701{
2702	INIT_RADIX_TREE(&fs_info->fs_roots_radix, GFP_ATOMIC);
2703	INIT_RADIX_TREE(&fs_info->buffer_radix, GFP_ATOMIC);
2704	INIT_LIST_HEAD(&fs_info->trans_list);
2705	INIT_LIST_HEAD(&fs_info->dead_roots);
2706	INIT_LIST_HEAD(&fs_info->delayed_iputs);
2707	INIT_LIST_HEAD(&fs_info->delalloc_roots);
2708	INIT_LIST_HEAD(&fs_info->caching_block_groups);
2709	spin_lock_init(&fs_info->delalloc_root_lock);
2710	spin_lock_init(&fs_info->trans_lock);
2711	spin_lock_init(&fs_info->fs_roots_radix_lock);
2712	spin_lock_init(&fs_info->delayed_iput_lock);
2713	spin_lock_init(&fs_info->defrag_inodes_lock);
2714	spin_lock_init(&fs_info->super_lock);
2715	spin_lock_init(&fs_info->buffer_lock);
2716	spin_lock_init(&fs_info->unused_bgs_lock);
2717	spin_lock_init(&fs_info->treelog_bg_lock);
2718	spin_lock_init(&fs_info->zone_active_bgs_lock);
2719	spin_lock_init(&fs_info->relocation_bg_lock);
2720	rwlock_init(&fs_info->tree_mod_log_lock);
2721	rwlock_init(&fs_info->global_root_lock);
2722	mutex_init(&fs_info->unused_bg_unpin_mutex);
2723	mutex_init(&fs_info->reclaim_bgs_lock);
2724	mutex_init(&fs_info->reloc_mutex);
2725	mutex_init(&fs_info->delalloc_root_mutex);
2726	mutex_init(&fs_info->zoned_meta_io_lock);
2727	mutex_init(&fs_info->zoned_data_reloc_io_lock);
2728	seqlock_init(&fs_info->profiles_lock);
2729
2730	btrfs_lockdep_init_map(fs_info, btrfs_trans_num_writers);
2731	btrfs_lockdep_init_map(fs_info, btrfs_trans_num_extwriters);
2732	btrfs_lockdep_init_map(fs_info, btrfs_trans_pending_ordered);
2733	btrfs_lockdep_init_map(fs_info, btrfs_ordered_extent);
2734	btrfs_state_lockdep_init_map(fs_info, btrfs_trans_commit_prep,
2735				     BTRFS_LOCKDEP_TRANS_COMMIT_PREP);
2736	btrfs_state_lockdep_init_map(fs_info, btrfs_trans_unblocked,
2737				     BTRFS_LOCKDEP_TRANS_UNBLOCKED);
2738	btrfs_state_lockdep_init_map(fs_info, btrfs_trans_super_committed,
2739				     BTRFS_LOCKDEP_TRANS_SUPER_COMMITTED);
2740	btrfs_state_lockdep_init_map(fs_info, btrfs_trans_completed,
2741				     BTRFS_LOCKDEP_TRANS_COMPLETED);
2742
2743	INIT_LIST_HEAD(&fs_info->dirty_cowonly_roots);
2744	INIT_LIST_HEAD(&fs_info->space_info);
2745	INIT_LIST_HEAD(&fs_info->tree_mod_seq_list);
2746	INIT_LIST_HEAD(&fs_info->unused_bgs);
2747	INIT_LIST_HEAD(&fs_info->reclaim_bgs);
2748	INIT_LIST_HEAD(&fs_info->zone_active_bgs);
2749#ifdef CONFIG_BTRFS_DEBUG
2750	INIT_LIST_HEAD(&fs_info->allocated_roots);
2751	INIT_LIST_HEAD(&fs_info->allocated_ebs);
2752	spin_lock_init(&fs_info->eb_leak_lock);
2753#endif
2754	fs_info->mapping_tree = RB_ROOT_CACHED;
2755	rwlock_init(&fs_info->mapping_tree_lock);
2756	btrfs_init_block_rsv(&fs_info->global_block_rsv,
2757			     BTRFS_BLOCK_RSV_GLOBAL);
2758	btrfs_init_block_rsv(&fs_info->trans_block_rsv, BTRFS_BLOCK_RSV_TRANS);
2759	btrfs_init_block_rsv(&fs_info->chunk_block_rsv, BTRFS_BLOCK_RSV_CHUNK);
2760	btrfs_init_block_rsv(&fs_info->empty_block_rsv, BTRFS_BLOCK_RSV_EMPTY);
2761	btrfs_init_block_rsv(&fs_info->delayed_block_rsv,
2762			     BTRFS_BLOCK_RSV_DELOPS);
2763	btrfs_init_block_rsv(&fs_info->delayed_refs_rsv,
2764			     BTRFS_BLOCK_RSV_DELREFS);
2765
2766	atomic_set(&fs_info->async_delalloc_pages, 0);
2767	atomic_set(&fs_info->defrag_running, 0);
2768	atomic_set(&fs_info->nr_delayed_iputs, 0);
2769	atomic64_set(&fs_info->tree_mod_seq, 0);
2770	fs_info->global_root_tree = RB_ROOT;
2771	fs_info->max_inline = BTRFS_DEFAULT_MAX_INLINE;
2772	fs_info->metadata_ratio = 0;
2773	fs_info->defrag_inodes = RB_ROOT;
2774	atomic64_set(&fs_info->free_chunk_space, 0);
2775	fs_info->tree_mod_log = RB_ROOT;
2776	fs_info->commit_interval = BTRFS_DEFAULT_COMMIT_INTERVAL;
2777	btrfs_init_ref_verify(fs_info);
2778
2779	fs_info->thread_pool_size = min_t(unsigned long,
2780					  num_online_cpus() + 2, 8);
2781
2782	INIT_LIST_HEAD(&fs_info->ordered_roots);
2783	spin_lock_init(&fs_info->ordered_root_lock);
2784
2785	btrfs_init_scrub(fs_info);
2786	btrfs_init_balance(fs_info);
2787	btrfs_init_async_reclaim_work(fs_info);
 
2788
2789	rwlock_init(&fs_info->block_group_cache_lock);
2790	fs_info->block_group_cache_tree = RB_ROOT_CACHED;
2791
2792	extent_io_tree_init(fs_info, &fs_info->excluded_extents,
2793			    IO_TREE_FS_EXCLUDED_EXTENTS);
2794
2795	mutex_init(&fs_info->ordered_operations_mutex);
2796	mutex_init(&fs_info->tree_log_mutex);
2797	mutex_init(&fs_info->chunk_mutex);
2798	mutex_init(&fs_info->transaction_kthread_mutex);
2799	mutex_init(&fs_info->cleaner_mutex);
2800	mutex_init(&fs_info->ro_block_group_mutex);
2801	init_rwsem(&fs_info->commit_root_sem);
2802	init_rwsem(&fs_info->cleanup_work_sem);
2803	init_rwsem(&fs_info->subvol_sem);
2804	sema_init(&fs_info->uuid_tree_rescan_sem, 1);
2805
2806	btrfs_init_dev_replace_locks(fs_info);
2807	btrfs_init_qgroup(fs_info);
2808	btrfs_discard_init(fs_info);
2809
2810	btrfs_init_free_cluster(&fs_info->meta_alloc_cluster);
2811	btrfs_init_free_cluster(&fs_info->data_alloc_cluster);
2812
2813	init_waitqueue_head(&fs_info->transaction_throttle);
2814	init_waitqueue_head(&fs_info->transaction_wait);
2815	init_waitqueue_head(&fs_info->transaction_blocked_wait);
2816	init_waitqueue_head(&fs_info->async_submit_wait);
2817	init_waitqueue_head(&fs_info->delayed_iputs_wait);
2818
2819	/* Usable values until the real ones are cached from the superblock */
2820	fs_info->nodesize = 4096;
2821	fs_info->sectorsize = 4096;
2822	fs_info->sectorsize_bits = ilog2(4096);
2823	fs_info->stripesize = 4096;
2824
2825	/* Default compress algorithm when user does -o compress */
2826	fs_info->compress_type = BTRFS_COMPRESS_ZLIB;
2827
2828	fs_info->max_extent_size = BTRFS_MAX_EXTENT_SIZE;
2829
2830	spin_lock_init(&fs_info->swapfile_pins_lock);
2831	fs_info->swapfile_pins = RB_ROOT;
2832
2833	fs_info->bg_reclaim_threshold = BTRFS_DEFAULT_RECLAIM_THRESH;
2834	INIT_WORK(&fs_info->reclaim_bgs_work, btrfs_reclaim_bgs_work);
2835}
2836
2837static int init_mount_fs_info(struct btrfs_fs_info *fs_info, struct super_block *sb)
2838{
2839	int ret;
2840
2841	fs_info->sb = sb;
 
2842	sb->s_blocksize = BTRFS_BDEV_BLOCKSIZE;
2843	sb->s_blocksize_bits = blksize_bits(BTRFS_BDEV_BLOCKSIZE);
2844
2845	ret = percpu_counter_init(&fs_info->ordered_bytes, 0, GFP_KERNEL);
2846	if (ret)
2847		return ret;
2848
 
 
 
 
2849	ret = percpu_counter_init(&fs_info->dirty_metadata_bytes, 0, GFP_KERNEL);
2850	if (ret)
2851		return ret;
2852
2853	fs_info->dirty_metadata_batch = PAGE_SIZE *
2854					(1 + ilog2(nr_cpu_ids));
2855
2856	ret = percpu_counter_init(&fs_info->delalloc_bytes, 0, GFP_KERNEL);
2857	if (ret)
2858		return ret;
2859
2860	ret = percpu_counter_init(&fs_info->dev_replace.bio_counter, 0,
2861			GFP_KERNEL);
2862	if (ret)
2863		return ret;
2864
2865	fs_info->delayed_root = kmalloc(sizeof(struct btrfs_delayed_root),
2866					GFP_KERNEL);
2867	if (!fs_info->delayed_root)
2868		return -ENOMEM;
2869	btrfs_init_delayed_root(fs_info->delayed_root);
2870
2871	if (sb_rdonly(sb))
2872		set_bit(BTRFS_FS_STATE_RO, &fs_info->fs_state);
 
 
2873
2874	return btrfs_alloc_stripe_hash_table(fs_info);
2875}
2876
2877static int btrfs_uuid_rescan_kthread(void *data)
2878{
2879	struct btrfs_fs_info *fs_info = data;
2880	int ret;
2881
2882	/*
2883	 * 1st step is to iterate through the existing UUID tree and
2884	 * to delete all entries that contain outdated data.
2885	 * 2nd step is to add all missing entries to the UUID tree.
2886	 */
2887	ret = btrfs_uuid_tree_iterate(fs_info);
2888	if (ret < 0) {
2889		if (ret != -EINTR)
2890			btrfs_warn(fs_info, "iterating uuid_tree failed %d",
2891				   ret);
2892		up(&fs_info->uuid_tree_rescan_sem);
2893		return ret;
2894	}
2895	return btrfs_uuid_scan_kthread(data);
2896}
2897
2898static int btrfs_check_uuid_tree(struct btrfs_fs_info *fs_info)
2899{
2900	struct task_struct *task;
2901
2902	down(&fs_info->uuid_tree_rescan_sem);
2903	task = kthread_run(btrfs_uuid_rescan_kthread, fs_info, "btrfs-uuid");
2904	if (IS_ERR(task)) {
2905		/* fs_info->update_uuid_tree_gen remains 0 in all error case */
2906		btrfs_warn(fs_info, "failed to start uuid_rescan task");
2907		up(&fs_info->uuid_tree_rescan_sem);
2908		return PTR_ERR(task);
2909	}
2910
2911	return 0;
2912}
2913
2914static int btrfs_cleanup_fs_roots(struct btrfs_fs_info *fs_info)
2915{
2916	u64 root_objectid = 0;
2917	struct btrfs_root *gang[8];
2918	int i = 0;
2919	int err = 0;
2920	unsigned int ret = 0;
2921
2922	while (1) {
 
 
2923		spin_lock(&fs_info->fs_roots_radix_lock);
2924		ret = radix_tree_gang_lookup(&fs_info->fs_roots_radix,
2925					     (void **)gang, root_objectid,
2926					     ARRAY_SIZE(gang));
2927		if (!ret) {
2928			spin_unlock(&fs_info->fs_roots_radix_lock);
2929			break;
2930		}
2931		root_objectid = gang[ret - 1]->root_key.objectid + 1;
2932
2933		for (i = 0; i < ret; i++) {
2934			/* Avoid to grab roots in dead_roots. */
2935			if (btrfs_root_refs(&gang[i]->root_item) == 0) {
2936				gang[i] = NULL;
2937				continue;
2938			}
2939			/* Grab all the search result for later use. */
2940			gang[i] = btrfs_grab_root(gang[i]);
2941		}
2942		spin_unlock(&fs_info->fs_roots_radix_lock);
2943
2944		for (i = 0; i < ret; i++) {
2945			if (!gang[i])
2946				continue;
2947			root_objectid = gang[i]->root_key.objectid;
2948			err = btrfs_orphan_cleanup(gang[i]);
2949			if (err)
2950				goto out;
 
 
 
 
2951			btrfs_put_root(gang[i]);
2952		}
 
 
 
2953		root_objectid++;
2954	}
2955out:
2956	/* Release the uncleaned roots due to error. */
2957	for (; i < ret; i++) {
2958		if (gang[i])
2959			btrfs_put_root(gang[i]);
2960	}
2961	return err;
2962}
2963
2964/*
2965 * Mounting logic specific to read-write file systems. Shared by open_ctree
2966 * and btrfs_remount when remounting from read-only to read-write.
2967 */
2968int btrfs_start_pre_rw_mount(struct btrfs_fs_info *fs_info)
2969{
2970	int ret;
2971	const bool cache_opt = btrfs_test_opt(fs_info, SPACE_CACHE);
2972	bool rebuild_free_space_tree = false;
2973
2974	if (btrfs_test_opt(fs_info, CLEAR_CACHE) &&
2975	    btrfs_fs_compat_ro(fs_info, FREE_SPACE_TREE)) {
2976		if (btrfs_fs_incompat(fs_info, EXTENT_TREE_V2))
2977			btrfs_warn(fs_info,
2978				   "'clear_cache' option is ignored with extent tree v2");
2979		else
2980			rebuild_free_space_tree = true;
2981	} else if (btrfs_fs_compat_ro(fs_info, FREE_SPACE_TREE) &&
2982		   !btrfs_fs_compat_ro(fs_info, FREE_SPACE_TREE_VALID)) {
2983		btrfs_warn(fs_info, "free space tree is invalid");
2984		rebuild_free_space_tree = true;
2985	}
2986
2987	if (rebuild_free_space_tree) {
2988		btrfs_info(fs_info, "rebuilding free space tree");
2989		ret = btrfs_rebuild_free_space_tree(fs_info);
2990		if (ret) {
2991			btrfs_warn(fs_info,
2992				   "failed to rebuild free space tree: %d", ret);
2993			goto out;
2994		}
2995	}
2996
2997	if (btrfs_fs_compat_ro(fs_info, FREE_SPACE_TREE) &&
2998	    !btrfs_test_opt(fs_info, FREE_SPACE_TREE)) {
2999		btrfs_info(fs_info, "disabling free space tree");
3000		ret = btrfs_delete_free_space_tree(fs_info);
3001		if (ret) {
3002			btrfs_warn(fs_info,
3003				   "failed to disable free space tree: %d", ret);
3004			goto out;
3005		}
3006	}
3007
3008	/*
3009	 * btrfs_find_orphan_roots() is responsible for finding all the dead
3010	 * roots (with 0 refs), flag them with BTRFS_ROOT_DEAD_TREE and load
3011	 * them into the fs_info->fs_roots_radix tree. This must be done before
3012	 * calling btrfs_orphan_cleanup() on the tree root. If we don't do it
3013	 * first, then btrfs_orphan_cleanup() will delete a dead root's orphan
3014	 * item before the root's tree is deleted - this means that if we unmount
3015	 * or crash before the deletion completes, on the next mount we will not
3016	 * delete what remains of the tree because the orphan item does not
3017	 * exists anymore, which is what tells us we have a pending deletion.
3018	 */
3019	ret = btrfs_find_orphan_roots(fs_info);
3020	if (ret)
3021		goto out;
3022
3023	ret = btrfs_cleanup_fs_roots(fs_info);
3024	if (ret)
3025		goto out;
3026
3027	down_read(&fs_info->cleanup_work_sem);
3028	if ((ret = btrfs_orphan_cleanup(fs_info->fs_root)) ||
3029	    (ret = btrfs_orphan_cleanup(fs_info->tree_root))) {
3030		up_read(&fs_info->cleanup_work_sem);
3031		goto out;
3032	}
3033	up_read(&fs_info->cleanup_work_sem);
3034
3035	mutex_lock(&fs_info->cleaner_mutex);
3036	ret = btrfs_recover_relocation(fs_info);
3037	mutex_unlock(&fs_info->cleaner_mutex);
3038	if (ret < 0) {
3039		btrfs_warn(fs_info, "failed to recover relocation: %d", ret);
3040		goto out;
3041	}
3042
3043	if (btrfs_test_opt(fs_info, FREE_SPACE_TREE) &&
3044	    !btrfs_fs_compat_ro(fs_info, FREE_SPACE_TREE)) {
3045		btrfs_info(fs_info, "creating free space tree");
3046		ret = btrfs_create_free_space_tree(fs_info);
3047		if (ret) {
3048			btrfs_warn(fs_info,
3049				"failed to create free space tree: %d", ret);
3050			goto out;
3051		}
3052	}
3053
3054	if (cache_opt != btrfs_free_space_cache_v1_active(fs_info)) {
3055		ret = btrfs_set_free_space_cache_v1_active(fs_info, cache_opt);
3056		if (ret)
3057			goto out;
3058	}
3059
3060	ret = btrfs_resume_balance_async(fs_info);
3061	if (ret)
3062		goto out;
3063
3064	ret = btrfs_resume_dev_replace_async(fs_info);
3065	if (ret) {
3066		btrfs_warn(fs_info, "failed to resume dev_replace");
3067		goto out;
3068	}
3069
3070	btrfs_qgroup_rescan_resume(fs_info);
3071
3072	if (!fs_info->uuid_root) {
3073		btrfs_info(fs_info, "creating UUID tree");
3074		ret = btrfs_create_uuid_tree(fs_info);
3075		if (ret) {
3076			btrfs_warn(fs_info,
3077				   "failed to create the UUID tree %d", ret);
3078			goto out;
3079		}
3080	}
3081
3082out:
3083	return ret;
3084}
3085
3086/*
3087 * Do various sanity and dependency checks of different features.
3088 *
3089 * @is_rw_mount:	If the mount is read-write.
3090 *
3091 * This is the place for less strict checks (like for subpage or artificial
3092 * feature dependencies).
3093 *
3094 * For strict checks or possible corruption detection, see
3095 * btrfs_validate_super().
3096 *
3097 * This should be called after btrfs_parse_options(), as some mount options
3098 * (space cache related) can modify on-disk format like free space tree and
3099 * screw up certain feature dependencies.
3100 */
3101int btrfs_check_features(struct btrfs_fs_info *fs_info, bool is_rw_mount)
3102{
3103	struct btrfs_super_block *disk_super = fs_info->super_copy;
3104	u64 incompat = btrfs_super_incompat_flags(disk_super);
3105	const u64 compat_ro = btrfs_super_compat_ro_flags(disk_super);
3106	const u64 compat_ro_unsupp = (compat_ro & ~BTRFS_FEATURE_COMPAT_RO_SUPP);
3107
3108	if (incompat & ~BTRFS_FEATURE_INCOMPAT_SUPP) {
3109		btrfs_err(fs_info,
3110		"cannot mount because of unknown incompat features (0x%llx)",
3111		    incompat);
3112		return -EINVAL;
3113	}
3114
3115	/* Runtime limitation for mixed block groups. */
3116	if ((incompat & BTRFS_FEATURE_INCOMPAT_MIXED_GROUPS) &&
3117	    (fs_info->sectorsize != fs_info->nodesize)) {
3118		btrfs_err(fs_info,
3119"unequal nodesize/sectorsize (%u != %u) are not allowed for mixed block groups",
3120			fs_info->nodesize, fs_info->sectorsize);
3121		return -EINVAL;
3122	}
3123
3124	/* Mixed backref is an always-enabled feature. */
3125	incompat |= BTRFS_FEATURE_INCOMPAT_MIXED_BACKREF;
3126
3127	/* Set compression related flags just in case. */
3128	if (fs_info->compress_type == BTRFS_COMPRESS_LZO)
3129		incompat |= BTRFS_FEATURE_INCOMPAT_COMPRESS_LZO;
3130	else if (fs_info->compress_type == BTRFS_COMPRESS_ZSTD)
3131		incompat |= BTRFS_FEATURE_INCOMPAT_COMPRESS_ZSTD;
3132
3133	/*
3134	 * An ancient flag, which should really be marked deprecated.
3135	 * Such runtime limitation doesn't really need a incompat flag.
3136	 */
3137	if (btrfs_super_nodesize(disk_super) > PAGE_SIZE)
3138		incompat |= BTRFS_FEATURE_INCOMPAT_BIG_METADATA;
3139
3140	if (compat_ro_unsupp && is_rw_mount) {
3141		btrfs_err(fs_info,
3142	"cannot mount read-write because of unknown compat_ro features (0x%llx)",
3143		       compat_ro);
3144		return -EINVAL;
3145	}
3146
3147	/*
3148	 * We have unsupported RO compat features, although RO mounted, we
3149	 * should not cause any metadata writes, including log replay.
3150	 * Or we could screw up whatever the new feature requires.
3151	 */
3152	if (compat_ro_unsupp && btrfs_super_log_root(disk_super) &&
3153	    !btrfs_test_opt(fs_info, NOLOGREPLAY)) {
3154		btrfs_err(fs_info,
3155"cannot replay dirty log with unsupported compat_ro features (0x%llx), try rescue=nologreplay",
3156			  compat_ro);
3157		return -EINVAL;
3158	}
3159
3160	/*
3161	 * Artificial limitations for block group tree, to force
3162	 * block-group-tree to rely on no-holes and free-space-tree.
3163	 */
3164	if (btrfs_fs_compat_ro(fs_info, BLOCK_GROUP_TREE) &&
3165	    (!btrfs_fs_incompat(fs_info, NO_HOLES) ||
3166	     !btrfs_test_opt(fs_info, FREE_SPACE_TREE))) {
3167		btrfs_err(fs_info,
3168"block-group-tree feature requires no-holes and free-space-tree features");
3169		return -EINVAL;
3170	}
3171
3172	/*
3173	 * Subpage runtime limitation on v1 cache.
3174	 *
3175	 * V1 space cache still has some hard codeed PAGE_SIZE usage, while
3176	 * we're already defaulting to v2 cache, no need to bother v1 as it's
3177	 * going to be deprecated anyway.
3178	 */
3179	if (fs_info->sectorsize < PAGE_SIZE && btrfs_test_opt(fs_info, SPACE_CACHE)) {
3180		btrfs_warn(fs_info,
3181	"v1 space cache is not supported for page size %lu with sectorsize %u",
3182			   PAGE_SIZE, fs_info->sectorsize);
3183		return -EINVAL;
3184	}
3185
3186	/* This can be called by remount, we need to protect the super block. */
3187	spin_lock(&fs_info->super_lock);
3188	btrfs_set_super_incompat_flags(disk_super, incompat);
3189	spin_unlock(&fs_info->super_lock);
3190
3191	return 0;
3192}
3193
3194int __cold open_ctree(struct super_block *sb, struct btrfs_fs_devices *fs_devices,
3195		      char *options)
3196{
3197	u32 sectorsize;
3198	u32 nodesize;
3199	u32 stripesize;
3200	u64 generation;
3201	u16 csum_type;
3202	struct btrfs_super_block *disk_super;
3203	struct btrfs_fs_info *fs_info = btrfs_sb(sb);
3204	struct btrfs_root *tree_root;
3205	struct btrfs_root *chunk_root;
3206	int ret;
3207	int level;
3208
3209	ret = init_mount_fs_info(fs_info, sb);
3210	if (ret)
3211		goto fail;
3212
3213	/* These need to be init'ed before we start creating inodes and such. */
3214	tree_root = btrfs_alloc_root(fs_info, BTRFS_ROOT_TREE_OBJECTID,
3215				     GFP_KERNEL);
3216	fs_info->tree_root = tree_root;
3217	chunk_root = btrfs_alloc_root(fs_info, BTRFS_CHUNK_TREE_OBJECTID,
3218				      GFP_KERNEL);
3219	fs_info->chunk_root = chunk_root;
3220	if (!tree_root || !chunk_root) {
3221		ret = -ENOMEM;
3222		goto fail;
3223	}
3224
3225	ret = btrfs_init_btree_inode(sb);
3226	if (ret)
3227		goto fail;
3228
3229	invalidate_bdev(fs_devices->latest_dev->bdev);
3230
3231	/*
3232	 * Read super block and check the signature bytes only
3233	 */
3234	disk_super = btrfs_read_dev_super(fs_devices->latest_dev->bdev);
3235	if (IS_ERR(disk_super)) {
3236		ret = PTR_ERR(disk_super);
3237		goto fail_alloc;
3238	}
3239
3240	btrfs_info(fs_info, "first mount of filesystem %pU", disk_super->fsid);
3241	/*
3242	 * Verify the type first, if that or the checksum value are
3243	 * corrupted, we'll find out
3244	 */
3245	csum_type = btrfs_super_csum_type(disk_super);
3246	if (!btrfs_supported_super_csum(csum_type)) {
3247		btrfs_err(fs_info, "unsupported checksum algorithm: %u",
3248			  csum_type);
3249		ret = -EINVAL;
3250		btrfs_release_disk_super(disk_super);
3251		goto fail_alloc;
3252	}
3253
3254	fs_info->csum_size = btrfs_super_csum_size(disk_super);
3255
3256	ret = btrfs_init_csum_hash(fs_info, csum_type);
3257	if (ret) {
3258		btrfs_release_disk_super(disk_super);
3259		goto fail_alloc;
3260	}
3261
3262	/*
3263	 * We want to check superblock checksum, the type is stored inside.
3264	 * Pass the whole disk block of size BTRFS_SUPER_INFO_SIZE (4k).
3265	 */
3266	if (btrfs_check_super_csum(fs_info, disk_super)) {
3267		btrfs_err(fs_info, "superblock checksum mismatch");
3268		ret = -EINVAL;
3269		btrfs_release_disk_super(disk_super);
3270		goto fail_alloc;
3271	}
3272
3273	/*
3274	 * super_copy is zeroed at allocation time and we never touch the
3275	 * following bytes up to INFO_SIZE, the checksum is calculated from
3276	 * the whole block of INFO_SIZE
3277	 */
3278	memcpy(fs_info->super_copy, disk_super, sizeof(*fs_info->super_copy));
3279	btrfs_release_disk_super(disk_super);
3280
3281	disk_super = fs_info->super_copy;
3282
3283	memcpy(fs_info->super_for_commit, fs_info->super_copy,
3284	       sizeof(*fs_info->super_for_commit));
3285
3286	ret = btrfs_validate_mount_super(fs_info);
3287	if (ret) {
3288		btrfs_err(fs_info, "superblock contains fatal errors");
3289		ret = -EINVAL;
3290		goto fail_alloc;
3291	}
3292
3293	if (!btrfs_super_root(disk_super)) {
3294		btrfs_err(fs_info, "invalid superblock tree root bytenr");
3295		ret = -EINVAL;
3296		goto fail_alloc;
3297	}
3298
3299	/* check FS state, whether FS is broken. */
3300	if (btrfs_super_flags(disk_super) & BTRFS_SUPER_FLAG_ERROR)
3301		WRITE_ONCE(fs_info->fs_error, -EUCLEAN);
3302
3303	/* Set up fs_info before parsing mount options */
3304	nodesize = btrfs_super_nodesize(disk_super);
3305	sectorsize = btrfs_super_sectorsize(disk_super);
3306	stripesize = sectorsize;
3307	fs_info->dirty_metadata_batch = nodesize * (1 + ilog2(nr_cpu_ids));
3308	fs_info->delalloc_batch = sectorsize * 512 * (1 + ilog2(nr_cpu_ids));
3309
3310	fs_info->nodesize = nodesize;
3311	fs_info->sectorsize = sectorsize;
3312	fs_info->sectorsize_bits = ilog2(sectorsize);
 
3313	fs_info->csums_per_leaf = BTRFS_MAX_ITEM_SIZE(fs_info) / fs_info->csum_size;
3314	fs_info->stripesize = stripesize;
3315
3316	/*
3317	 * Handle the space caching options appropriately now that we have the
3318	 * super block loaded and validated.
3319	 */
3320	btrfs_set_free_space_cache_settings(fs_info);
3321
3322	if (!btrfs_check_options(fs_info, &fs_info->mount_opt, sb->s_flags)) {
3323		ret = -EINVAL;
3324		goto fail_alloc;
3325	}
3326
3327	ret = btrfs_check_features(fs_info, !sb_rdonly(sb));
3328	if (ret < 0)
3329		goto fail_alloc;
3330
3331	/*
3332	 * At this point our mount options are validated, if we set ->max_inline
3333	 * to something non-standard make sure we truncate it to sectorsize.
3334	 */
3335	fs_info->max_inline = min_t(u64, fs_info->max_inline, fs_info->sectorsize);
3336
3337	if (sectorsize < PAGE_SIZE) {
3338		struct btrfs_subpage_info *subpage_info;
3339
3340		btrfs_warn(fs_info,
3341		"read-write for sector size %u with page size %lu is experimental",
3342			   sectorsize, PAGE_SIZE);
3343		subpage_info = kzalloc(sizeof(*subpage_info), GFP_KERNEL);
3344		if (!subpage_info) {
3345			ret = -ENOMEM;
3346			goto fail_alloc;
3347		}
3348		btrfs_init_subpage_info(subpage_info, sectorsize);
3349		fs_info->subpage_info = subpage_info;
3350	}
3351
3352	ret = btrfs_init_workqueues(fs_info);
3353	if (ret)
3354		goto fail_sb_buffer;
3355
3356	sb->s_bdi->ra_pages *= btrfs_super_num_devices(disk_super);
3357	sb->s_bdi->ra_pages = max(sb->s_bdi->ra_pages, SZ_4M / PAGE_SIZE);
3358
 
3359	sb->s_blocksize = sectorsize;
3360	sb->s_blocksize_bits = blksize_bits(sectorsize);
3361	memcpy(&sb->s_uuid, fs_info->fs_devices->fsid, BTRFS_FSID_SIZE);
3362
3363	mutex_lock(&fs_info->chunk_mutex);
3364	ret = btrfs_read_sys_array(fs_info);
3365	mutex_unlock(&fs_info->chunk_mutex);
3366	if (ret) {
3367		btrfs_err(fs_info, "failed to read the system array: %d", ret);
3368		goto fail_sb_buffer;
3369	}
3370
3371	generation = btrfs_super_chunk_root_generation(disk_super);
3372	level = btrfs_super_chunk_root_level(disk_super);
3373	ret = load_super_root(chunk_root, btrfs_super_chunk_root(disk_super),
3374			      generation, level);
3375	if (ret) {
3376		btrfs_err(fs_info, "failed to read chunk root");
3377		goto fail_tree_roots;
3378	}
3379
3380	read_extent_buffer(chunk_root->node, fs_info->chunk_tree_uuid,
3381			   offsetof(struct btrfs_header, chunk_tree_uuid),
3382			   BTRFS_UUID_SIZE);
3383
3384	ret = btrfs_read_chunk_tree(fs_info);
3385	if (ret) {
3386		btrfs_err(fs_info, "failed to read chunk tree: %d", ret);
3387		goto fail_tree_roots;
3388	}
3389
3390	/*
3391	 * At this point we know all the devices that make this filesystem,
3392	 * including the seed devices but we don't know yet if the replace
3393	 * target is required. So free devices that are not part of this
3394	 * filesystem but skip the replace target device which is checked
3395	 * below in btrfs_init_dev_replace().
3396	 */
3397	btrfs_free_extra_devids(fs_devices);
3398	if (!fs_devices->latest_dev->bdev) {
3399		btrfs_err(fs_info, "failed to read devices");
3400		ret = -EIO;
3401		goto fail_tree_roots;
3402	}
3403
3404	ret = init_tree_roots(fs_info);
3405	if (ret)
3406		goto fail_tree_roots;
3407
3408	/*
3409	 * Get zone type information of zoned block devices. This will also
3410	 * handle emulation of a zoned filesystem if a regular device has the
3411	 * zoned incompat feature flag set.
3412	 */
3413	ret = btrfs_get_dev_zone_info_all_devices(fs_info);
3414	if (ret) {
3415		btrfs_err(fs_info,
3416			  "zoned: failed to read device zone info: %d", ret);
3417		goto fail_block_groups;
3418	}
3419
3420	/*
3421	 * If we have a uuid root and we're not being told to rescan we need to
3422	 * check the generation here so we can set the
3423	 * BTRFS_FS_UPDATE_UUID_TREE_GEN bit.  Otherwise we could commit the
3424	 * transaction during a balance or the log replay without updating the
3425	 * uuid generation, and then if we crash we would rescan the uuid tree,
3426	 * even though it was perfectly fine.
3427	 */
3428	if (fs_info->uuid_root && !btrfs_test_opt(fs_info, RESCAN_UUID_TREE) &&
3429	    fs_info->generation == btrfs_super_uuid_tree_generation(disk_super))
3430		set_bit(BTRFS_FS_UPDATE_UUID_TREE_GEN, &fs_info->flags);
3431
3432	ret = btrfs_verify_dev_extents(fs_info);
3433	if (ret) {
3434		btrfs_err(fs_info,
3435			  "failed to verify dev extents against chunks: %d",
3436			  ret);
3437		goto fail_block_groups;
3438	}
3439	ret = btrfs_recover_balance(fs_info);
3440	if (ret) {
3441		btrfs_err(fs_info, "failed to recover balance: %d", ret);
3442		goto fail_block_groups;
3443	}
3444
3445	ret = btrfs_init_dev_stats(fs_info);
3446	if (ret) {
3447		btrfs_err(fs_info, "failed to init dev_stats: %d", ret);
3448		goto fail_block_groups;
3449	}
3450
3451	ret = btrfs_init_dev_replace(fs_info);
3452	if (ret) {
3453		btrfs_err(fs_info, "failed to init dev_replace: %d", ret);
3454		goto fail_block_groups;
3455	}
3456
3457	ret = btrfs_check_zoned_mode(fs_info);
3458	if (ret) {
3459		btrfs_err(fs_info, "failed to initialize zoned mode: %d",
3460			  ret);
3461		goto fail_block_groups;
3462	}
3463
3464	ret = btrfs_sysfs_add_fsid(fs_devices);
3465	if (ret) {
3466		btrfs_err(fs_info, "failed to init sysfs fsid interface: %d",
3467				ret);
3468		goto fail_block_groups;
3469	}
3470
3471	ret = btrfs_sysfs_add_mounted(fs_info);
3472	if (ret) {
3473		btrfs_err(fs_info, "failed to init sysfs interface: %d", ret);
3474		goto fail_fsdev_sysfs;
3475	}
3476
3477	ret = btrfs_init_space_info(fs_info);
3478	if (ret) {
3479		btrfs_err(fs_info, "failed to initialize space info: %d", ret);
3480		goto fail_sysfs;
3481	}
3482
3483	ret = btrfs_read_block_groups(fs_info);
3484	if (ret) {
3485		btrfs_err(fs_info, "failed to read block groups: %d", ret);
3486		goto fail_sysfs;
3487	}
3488
3489	btrfs_free_zone_cache(fs_info);
3490
3491	btrfs_check_active_zone_reservation(fs_info);
3492
3493	if (!sb_rdonly(sb) && fs_info->fs_devices->missing_devices &&
3494	    !btrfs_check_rw_degradable(fs_info, NULL)) {
3495		btrfs_warn(fs_info,
3496		"writable mount is not allowed due to too many missing devices");
3497		ret = -EINVAL;
3498		goto fail_sysfs;
3499	}
3500
3501	fs_info->cleaner_kthread = kthread_run(cleaner_kthread, fs_info,
3502					       "btrfs-cleaner");
3503	if (IS_ERR(fs_info->cleaner_kthread)) {
3504		ret = PTR_ERR(fs_info->cleaner_kthread);
3505		goto fail_sysfs;
3506	}
3507
3508	fs_info->transaction_kthread = kthread_run(transaction_kthread,
3509						   tree_root,
3510						   "btrfs-transaction");
3511	if (IS_ERR(fs_info->transaction_kthread)) {
3512		ret = PTR_ERR(fs_info->transaction_kthread);
3513		goto fail_cleaner;
3514	}
3515
3516	ret = btrfs_read_qgroup_config(fs_info);
3517	if (ret)
3518		goto fail_trans_kthread;
3519
3520	if (btrfs_build_ref_tree(fs_info))
3521		btrfs_err(fs_info, "couldn't build ref tree");
3522
3523	/* do not make disk changes in broken FS or nologreplay is given */
3524	if (btrfs_super_log_root(disk_super) != 0 &&
3525	    !btrfs_test_opt(fs_info, NOLOGREPLAY)) {
3526		btrfs_info(fs_info, "start tree-log replay");
3527		ret = btrfs_replay_log(fs_info, fs_devices);
3528		if (ret)
3529			goto fail_qgroup;
3530	}
3531
3532	fs_info->fs_root = btrfs_get_fs_root(fs_info, BTRFS_FS_TREE_OBJECTID, true);
3533	if (IS_ERR(fs_info->fs_root)) {
3534		ret = PTR_ERR(fs_info->fs_root);
3535		btrfs_warn(fs_info, "failed to read fs tree: %d", ret);
3536		fs_info->fs_root = NULL;
3537		goto fail_qgroup;
3538	}
3539
3540	if (sb_rdonly(sb))
3541		return 0;
3542
3543	ret = btrfs_start_pre_rw_mount(fs_info);
3544	if (ret) {
3545		close_ctree(fs_info);
3546		return ret;
3547	}
3548	btrfs_discard_resume(fs_info);
3549
3550	if (fs_info->uuid_root &&
3551	    (btrfs_test_opt(fs_info, RESCAN_UUID_TREE) ||
3552	     fs_info->generation != btrfs_super_uuid_tree_generation(disk_super))) {
3553		btrfs_info(fs_info, "checking UUID tree");
3554		ret = btrfs_check_uuid_tree(fs_info);
3555		if (ret) {
3556			btrfs_warn(fs_info,
3557				"failed to check the UUID tree: %d", ret);
3558			close_ctree(fs_info);
3559			return ret;
3560		}
3561	}
3562
3563	set_bit(BTRFS_FS_OPEN, &fs_info->flags);
3564
3565	/* Kick the cleaner thread so it'll start deleting snapshots. */
3566	if (test_bit(BTRFS_FS_UNFINISHED_DROPS, &fs_info->flags))
3567		wake_up_process(fs_info->cleaner_kthread);
3568
3569	return 0;
3570
3571fail_qgroup:
3572	btrfs_free_qgroup_config(fs_info);
3573fail_trans_kthread:
3574	kthread_stop(fs_info->transaction_kthread);
3575	btrfs_cleanup_transaction(fs_info);
3576	btrfs_free_fs_roots(fs_info);
3577fail_cleaner:
3578	kthread_stop(fs_info->cleaner_kthread);
3579
3580	/*
3581	 * make sure we're done with the btree inode before we stop our
3582	 * kthreads
3583	 */
3584	filemap_write_and_wait(fs_info->btree_inode->i_mapping);
3585
3586fail_sysfs:
3587	btrfs_sysfs_remove_mounted(fs_info);
3588
3589fail_fsdev_sysfs:
3590	btrfs_sysfs_remove_fsid(fs_info->fs_devices);
3591
3592fail_block_groups:
3593	btrfs_put_block_group_cache(fs_info);
3594
3595fail_tree_roots:
3596	if (fs_info->data_reloc_root)
3597		btrfs_drop_and_free_fs_root(fs_info, fs_info->data_reloc_root);
3598	free_root_pointers(fs_info, true);
3599	invalidate_inode_pages2(fs_info->btree_inode->i_mapping);
3600
3601fail_sb_buffer:
3602	btrfs_stop_all_workers(fs_info);
3603	btrfs_free_block_groups(fs_info);
3604fail_alloc:
3605	btrfs_mapping_tree_free(fs_info);
3606
3607	iput(fs_info->btree_inode);
3608fail:
3609	btrfs_close_devices(fs_info->fs_devices);
3610	ASSERT(ret < 0);
3611	return ret;
3612}
3613ALLOW_ERROR_INJECTION(open_ctree, ERRNO);
3614
3615static void btrfs_end_super_write(struct bio *bio)
3616{
3617	struct btrfs_device *device = bio->bi_private;
3618	struct bio_vec *bvec;
3619	struct bvec_iter_all iter_all;
3620	struct page *page;
3621
3622	bio_for_each_segment_all(bvec, bio, iter_all) {
3623		page = bvec->bv_page;
3624
 
3625		if (bio->bi_status) {
3626			btrfs_warn_rl_in_rcu(device->fs_info,
3627				"lost page write due to IO error on %s (%d)",
3628				btrfs_dev_name(device),
3629				blk_status_to_errno(bio->bi_status));
3630			ClearPageUptodate(page);
3631			SetPageError(page);
3632			btrfs_dev_stat_inc_and_print(device,
3633						     BTRFS_DEV_STAT_WRITE_ERRS);
3634		} else {
3635			SetPageUptodate(page);
 
 
 
 
3636		}
3637
3638		put_page(page);
3639		unlock_page(page);
3640	}
3641
3642	bio_put(bio);
3643}
3644
3645struct btrfs_super_block *btrfs_read_dev_one_super(struct block_device *bdev,
3646						   int copy_num, bool drop_cache)
3647{
3648	struct btrfs_super_block *super;
3649	struct page *page;
3650	u64 bytenr, bytenr_orig;
3651	struct address_space *mapping = bdev->bd_inode->i_mapping;
3652	int ret;
3653
3654	bytenr_orig = btrfs_sb_offset(copy_num);
3655	ret = btrfs_sb_log_location_bdev(bdev, copy_num, READ, &bytenr);
3656	if (ret == -ENOENT)
3657		return ERR_PTR(-EINVAL);
3658	else if (ret)
3659		return ERR_PTR(ret);
3660
3661	if (bytenr + BTRFS_SUPER_INFO_SIZE >= bdev_nr_bytes(bdev))
3662		return ERR_PTR(-EINVAL);
3663
3664	if (drop_cache) {
3665		/* This should only be called with the primary sb. */
3666		ASSERT(copy_num == 0);
3667
3668		/*
3669		 * Drop the page of the primary superblock, so later read will
3670		 * always read from the device.
3671		 */
3672		invalidate_inode_pages2_range(mapping,
3673				bytenr >> PAGE_SHIFT,
3674				(bytenr + BTRFS_SUPER_INFO_SIZE) >> PAGE_SHIFT);
3675	}
3676
3677	page = read_cache_page_gfp(mapping, bytenr >> PAGE_SHIFT, GFP_NOFS);
3678	if (IS_ERR(page))
3679		return ERR_CAST(page);
3680
3681	super = page_address(page);
3682	if (btrfs_super_magic(super) != BTRFS_MAGIC) {
3683		btrfs_release_disk_super(super);
3684		return ERR_PTR(-ENODATA);
3685	}
3686
3687	if (btrfs_super_bytenr(super) != bytenr_orig) {
3688		btrfs_release_disk_super(super);
3689		return ERR_PTR(-EINVAL);
3690	}
3691
3692	return super;
3693}
3694
3695
3696struct btrfs_super_block *btrfs_read_dev_super(struct block_device *bdev)
3697{
3698	struct btrfs_super_block *super, *latest = NULL;
3699	int i;
3700	u64 transid = 0;
3701
3702	/* we would like to check all the supers, but that would make
3703	 * a btrfs mount succeed after a mkfs from a different FS.
3704	 * So, we need to add a special mount option to scan for
3705	 * later supers, using BTRFS_SUPER_MIRROR_MAX instead
3706	 */
3707	for (i = 0; i < 1; i++) {
3708		super = btrfs_read_dev_one_super(bdev, i, false);
3709		if (IS_ERR(super))
3710			continue;
3711
3712		if (!latest || btrfs_super_generation(super) > transid) {
3713			if (latest)
3714				btrfs_release_disk_super(super);
3715
3716			latest = super;
3717			transid = btrfs_super_generation(super);
3718		}
3719	}
3720
3721	return super;
3722}
3723
3724/*
3725 * Write superblock @sb to the @device. Do not wait for completion, all the
3726 * pages we use for writing are locked.
3727 *
3728 * Write @max_mirrors copies of the superblock, where 0 means default that fit
3729 * the expected device size at commit time. Note that max_mirrors must be
3730 * same for write and wait phases.
3731 *
3732 * Return number of errors when page is not found or submission fails.
3733 */
3734static int write_dev_supers(struct btrfs_device *device,
3735			    struct btrfs_super_block *sb, int max_mirrors)
3736{
3737	struct btrfs_fs_info *fs_info = device->fs_info;
3738	struct address_space *mapping = device->bdev->bd_inode->i_mapping;
3739	SHASH_DESC_ON_STACK(shash, fs_info->csum_shash);
3740	int i;
3741	int errors = 0;
3742	int ret;
3743	u64 bytenr, bytenr_orig;
3744
 
 
3745	if (max_mirrors == 0)
3746		max_mirrors = BTRFS_SUPER_MIRROR_MAX;
3747
3748	shash->tfm = fs_info->csum_shash;
3749
3750	for (i = 0; i < max_mirrors; i++) {
3751		struct page *page;
3752		struct bio *bio;
3753		struct btrfs_super_block *disk_super;
 
3754
3755		bytenr_orig = btrfs_sb_offset(i);
3756		ret = btrfs_sb_log_location(device, i, WRITE, &bytenr);
3757		if (ret == -ENOENT) {
3758			continue;
3759		} else if (ret < 0) {
3760			btrfs_err(device->fs_info,
3761				"couldn't get super block location for mirror %d",
3762				i);
3763			errors++;
3764			continue;
3765		}
3766		if (bytenr + BTRFS_SUPER_INFO_SIZE >=
3767		    device->commit_total_bytes)
3768			break;
3769
3770		btrfs_set_super_bytenr(sb, bytenr_orig);
3771
3772		crypto_shash_digest(shash, (const char *)sb + BTRFS_CSUM_SIZE,
3773				    BTRFS_SUPER_INFO_SIZE - BTRFS_CSUM_SIZE,
3774				    sb->csum);
3775
3776		page = find_or_create_page(mapping, bytenr >> PAGE_SHIFT,
3777					   GFP_NOFS);
3778		if (!page) {
 
3779			btrfs_err(device->fs_info,
3780			    "couldn't get super block page for bytenr %llu",
3781			    bytenr);
3782			errors++;
3783			continue;
3784		}
 
3785
3786		/* Bump the refcount for wait_dev_supers() */
3787		get_page(page);
3788
3789		disk_super = page_address(page);
3790		memcpy(disk_super, sb, BTRFS_SUPER_INFO_SIZE);
3791
3792		/*
3793		 * Directly use bios here instead of relying on the page cache
3794		 * to do I/O, so we don't lose the ability to do integrity
3795		 * checking.
3796		 */
3797		bio = bio_alloc(device->bdev, 1,
3798				REQ_OP_WRITE | REQ_SYNC | REQ_META | REQ_PRIO,
3799				GFP_NOFS);
3800		bio->bi_iter.bi_sector = bytenr >> SECTOR_SHIFT;
3801		bio->bi_private = device;
3802		bio->bi_end_io = btrfs_end_super_write;
3803		__bio_add_page(bio, page, BTRFS_SUPER_INFO_SIZE,
3804			       offset_in_page(bytenr));
3805
3806		/*
3807		 * We FUA only the first super block.  The others we allow to
3808		 * go down lazy and there's a short window where the on-disk
3809		 * copies might still contain the older version.
3810		 */
3811		if (i == 0 && !btrfs_test_opt(device->fs_info, NOBARRIER))
3812			bio->bi_opf |= REQ_FUA;
3813		submit_bio(bio);
3814
3815		if (btrfs_advance_sb_log(device, i))
3816			errors++;
3817	}
3818	return errors < i ? 0 : -1;
3819}
3820
3821/*
3822 * Wait for write completion of superblocks done by write_dev_supers,
3823 * @max_mirrors same for write and wait phases.
3824 *
3825 * Return number of errors when page is not found or not marked up to
3826 * date.
3827 */
3828static int wait_dev_supers(struct btrfs_device *device, int max_mirrors)
3829{
3830	int i;
3831	int errors = 0;
3832	bool primary_failed = false;
3833	int ret;
3834	u64 bytenr;
3835
3836	if (max_mirrors == 0)
3837		max_mirrors = BTRFS_SUPER_MIRROR_MAX;
3838
3839	for (i = 0; i < max_mirrors; i++) {
3840		struct page *page;
3841
3842		ret = btrfs_sb_log_location(device, i, READ, &bytenr);
3843		if (ret == -ENOENT) {
3844			break;
3845		} else if (ret < 0) {
3846			errors++;
3847			if (i == 0)
3848				primary_failed = true;
3849			continue;
3850		}
3851		if (bytenr + BTRFS_SUPER_INFO_SIZE >=
3852		    device->commit_total_bytes)
3853			break;
3854
3855		page = find_get_page(device->bdev->bd_inode->i_mapping,
3856				     bytenr >> PAGE_SHIFT);
3857		if (!page) {
3858			errors++;
3859			if (i == 0)
3860				primary_failed = true;
3861			continue;
3862		}
3863		/* Page is submitted locked and unlocked once the IO completes */
3864		wait_on_page_locked(page);
3865		if (PageError(page)) {
3866			errors++;
3867			if (i == 0)
3868				primary_failed = true;
3869		}
3870
3871		/* Drop our reference */
3872		put_page(page);
3873
3874		/* Drop the reference from the writing run */
3875		put_page(page);
 
3876	}
3877
3878	/* log error, force error return */
 
 
3879	if (primary_failed) {
3880		btrfs_err(device->fs_info, "error writing primary super block to device %llu",
3881			  device->devid);
3882		return -1;
3883	}
3884
3885	return errors < i ? 0 : -1;
3886}
3887
3888/*
3889 * endio for the write_dev_flush, this will wake anyone waiting
3890 * for the barrier when it is done
3891 */
3892static void btrfs_end_empty_barrier(struct bio *bio)
3893{
3894	bio_uninit(bio);
3895	complete(bio->bi_private);
3896}
3897
3898/*
3899 * Submit a flush request to the device if it supports it. Error handling is
3900 * done in the waiting counterpart.
3901 */
3902static void write_dev_flush(struct btrfs_device *device)
3903{
3904	struct bio *bio = &device->flush_bio;
3905
3906	device->last_flush_error = BLK_STS_OK;
3907
3908	bio_init(bio, device->bdev, NULL, 0,
3909		 REQ_OP_WRITE | REQ_SYNC | REQ_PREFLUSH);
3910	bio->bi_end_io = btrfs_end_empty_barrier;
3911	init_completion(&device->flush_wait);
3912	bio->bi_private = &device->flush_wait;
3913	submit_bio(bio);
3914	set_bit(BTRFS_DEV_STATE_FLUSH_SENT, &device->dev_state);
3915}
3916
3917/*
3918 * If the flush bio has been submitted by write_dev_flush, wait for it.
3919 * Return true for any error, and false otherwise.
3920 */
3921static bool wait_dev_flush(struct btrfs_device *device)
3922{
3923	struct bio *bio = &device->flush_bio;
3924
3925	if (!test_and_clear_bit(BTRFS_DEV_STATE_FLUSH_SENT, &device->dev_state))
3926		return false;
3927
3928	wait_for_completion_io(&device->flush_wait);
3929
3930	if (bio->bi_status) {
3931		device->last_flush_error = bio->bi_status;
3932		btrfs_dev_stat_inc_and_print(device, BTRFS_DEV_STAT_FLUSH_ERRS);
3933		return true;
3934	}
3935
3936	return false;
3937}
3938
3939/*
3940 * send an empty flush down to each device in parallel,
3941 * then wait for them
3942 */
3943static int barrier_all_devices(struct btrfs_fs_info *info)
3944{
3945	struct list_head *head;
3946	struct btrfs_device *dev;
3947	int errors_wait = 0;
3948
3949	lockdep_assert_held(&info->fs_devices->device_list_mutex);
3950	/* send down all the barriers */
3951	head = &info->fs_devices->devices;
3952	list_for_each_entry(dev, head, dev_list) {
3953		if (test_bit(BTRFS_DEV_STATE_MISSING, &dev->dev_state))
3954			continue;
3955		if (!dev->bdev)
3956			continue;
3957		if (!test_bit(BTRFS_DEV_STATE_IN_FS_METADATA, &dev->dev_state) ||
3958		    !test_bit(BTRFS_DEV_STATE_WRITEABLE, &dev->dev_state))
3959			continue;
3960
3961		write_dev_flush(dev);
3962	}
3963
3964	/* wait for all the barriers */
3965	list_for_each_entry(dev, head, dev_list) {
3966		if (test_bit(BTRFS_DEV_STATE_MISSING, &dev->dev_state))
3967			continue;
3968		if (!dev->bdev) {
3969			errors_wait++;
3970			continue;
3971		}
3972		if (!test_bit(BTRFS_DEV_STATE_IN_FS_METADATA, &dev->dev_state) ||
3973		    !test_bit(BTRFS_DEV_STATE_WRITEABLE, &dev->dev_state))
3974			continue;
3975
3976		if (wait_dev_flush(dev))
3977			errors_wait++;
3978	}
3979
3980	/*
3981	 * Checks last_flush_error of disks in order to determine the device
3982	 * state.
3983	 */
3984	if (errors_wait && !btrfs_check_rw_degradable(info, NULL))
3985		return -EIO;
3986
3987	return 0;
3988}
3989
3990int btrfs_get_num_tolerated_disk_barrier_failures(u64 flags)
3991{
3992	int raid_type;
3993	int min_tolerated = INT_MAX;
3994
3995	if ((flags & BTRFS_BLOCK_GROUP_PROFILE_MASK) == 0 ||
3996	    (flags & BTRFS_AVAIL_ALLOC_BIT_SINGLE))
3997		min_tolerated = min_t(int, min_tolerated,
3998				    btrfs_raid_array[BTRFS_RAID_SINGLE].
3999				    tolerated_failures);
4000
4001	for (raid_type = 0; raid_type < BTRFS_NR_RAID_TYPES; raid_type++) {
4002		if (raid_type == BTRFS_RAID_SINGLE)
4003			continue;
4004		if (!(flags & btrfs_raid_array[raid_type].bg_flag))
4005			continue;
4006		min_tolerated = min_t(int, min_tolerated,
4007				    btrfs_raid_array[raid_type].
4008				    tolerated_failures);
4009	}
4010
4011	if (min_tolerated == INT_MAX) {
4012		pr_warn("BTRFS: unknown raid flag: %llu", flags);
4013		min_tolerated = 0;
4014	}
4015
4016	return min_tolerated;
4017}
4018
4019int write_all_supers(struct btrfs_fs_info *fs_info, int max_mirrors)
4020{
4021	struct list_head *head;
4022	struct btrfs_device *dev;
4023	struct btrfs_super_block *sb;
4024	struct btrfs_dev_item *dev_item;
4025	int ret;
4026	int do_barriers;
4027	int max_errors;
4028	int total_errors = 0;
4029	u64 flags;
4030
4031	do_barriers = !btrfs_test_opt(fs_info, NOBARRIER);
4032
4033	/*
4034	 * max_mirrors == 0 indicates we're from commit_transaction,
4035	 * not from fsync where the tree roots in fs_info have not
4036	 * been consistent on disk.
4037	 */
4038	if (max_mirrors == 0)
4039		backup_super_roots(fs_info);
4040
4041	sb = fs_info->super_for_commit;
4042	dev_item = &sb->dev_item;
4043
4044	mutex_lock(&fs_info->fs_devices->device_list_mutex);
4045	head = &fs_info->fs_devices->devices;
4046	max_errors = btrfs_super_num_devices(fs_info->super_copy) - 1;
4047
4048	if (do_barriers) {
4049		ret = barrier_all_devices(fs_info);
4050		if (ret) {
4051			mutex_unlock(
4052				&fs_info->fs_devices->device_list_mutex);
4053			btrfs_handle_fs_error(fs_info, ret,
4054					      "errors while submitting device barriers.");
4055			return ret;
4056		}
4057	}
4058
4059	list_for_each_entry(dev, head, dev_list) {
4060		if (!dev->bdev) {
4061			total_errors++;
4062			continue;
4063		}
4064		if (!test_bit(BTRFS_DEV_STATE_IN_FS_METADATA, &dev->dev_state) ||
4065		    !test_bit(BTRFS_DEV_STATE_WRITEABLE, &dev->dev_state))
4066			continue;
4067
4068		btrfs_set_stack_device_generation(dev_item, 0);
4069		btrfs_set_stack_device_type(dev_item, dev->type);
4070		btrfs_set_stack_device_id(dev_item, dev->devid);
4071		btrfs_set_stack_device_total_bytes(dev_item,
4072						   dev->commit_total_bytes);
4073		btrfs_set_stack_device_bytes_used(dev_item,
4074						  dev->commit_bytes_used);
4075		btrfs_set_stack_device_io_align(dev_item, dev->io_align);
4076		btrfs_set_stack_device_io_width(dev_item, dev->io_width);
4077		btrfs_set_stack_device_sector_size(dev_item, dev->sector_size);
4078		memcpy(dev_item->uuid, dev->uuid, BTRFS_UUID_SIZE);
4079		memcpy(dev_item->fsid, dev->fs_devices->metadata_uuid,
4080		       BTRFS_FSID_SIZE);
4081
4082		flags = btrfs_super_flags(sb);
4083		btrfs_set_super_flags(sb, flags | BTRFS_HEADER_FLAG_WRITTEN);
4084
4085		ret = btrfs_validate_write_super(fs_info, sb);
4086		if (ret < 0) {
4087			mutex_unlock(&fs_info->fs_devices->device_list_mutex);
4088			btrfs_handle_fs_error(fs_info, -EUCLEAN,
4089				"unexpected superblock corruption detected");
4090			return -EUCLEAN;
4091		}
4092
4093		ret = write_dev_supers(dev, sb, max_mirrors);
4094		if (ret)
4095			total_errors++;
4096	}
4097	if (total_errors > max_errors) {
4098		btrfs_err(fs_info, "%d errors while writing supers",
4099			  total_errors);
4100		mutex_unlock(&fs_info->fs_devices->device_list_mutex);
4101
4102		/* FUA is masked off if unsupported and can't be the reason */
4103		btrfs_handle_fs_error(fs_info, -EIO,
4104				      "%d errors while writing supers",
4105				      total_errors);
4106		return -EIO;
4107	}
4108
4109	total_errors = 0;
4110	list_for_each_entry(dev, head, dev_list) {
4111		if (!dev->bdev)
4112			continue;
4113		if (!test_bit(BTRFS_DEV_STATE_IN_FS_METADATA, &dev->dev_state) ||
4114		    !test_bit(BTRFS_DEV_STATE_WRITEABLE, &dev->dev_state))
4115			continue;
4116
4117		ret = wait_dev_supers(dev, max_mirrors);
4118		if (ret)
4119			total_errors++;
4120	}
4121	mutex_unlock(&fs_info->fs_devices->device_list_mutex);
4122	if (total_errors > max_errors) {
4123		btrfs_handle_fs_error(fs_info, -EIO,
4124				      "%d errors while writing supers",
4125				      total_errors);
4126		return -EIO;
4127	}
4128	return 0;
4129}
4130
4131/* Drop a fs root from the radix tree and free it. */
4132void btrfs_drop_and_free_fs_root(struct btrfs_fs_info *fs_info,
4133				  struct btrfs_root *root)
4134{
4135	bool drop_ref = false;
4136
4137	spin_lock(&fs_info->fs_roots_radix_lock);
4138	radix_tree_delete(&fs_info->fs_roots_radix,
4139			  (unsigned long)root->root_key.objectid);
4140	if (test_and_clear_bit(BTRFS_ROOT_IN_RADIX, &root->state))
4141		drop_ref = true;
4142	spin_unlock(&fs_info->fs_roots_radix_lock);
4143
4144	if (BTRFS_FS_ERROR(fs_info)) {
4145		ASSERT(root->log_root == NULL);
4146		if (root->reloc_root) {
4147			btrfs_put_root(root->reloc_root);
4148			root->reloc_root = NULL;
4149		}
4150	}
4151
4152	if (drop_ref)
4153		btrfs_put_root(root);
4154}
4155
4156int btrfs_commit_super(struct btrfs_fs_info *fs_info)
4157{
4158	struct btrfs_root *root = fs_info->tree_root;
4159	struct btrfs_trans_handle *trans;
4160
4161	mutex_lock(&fs_info->cleaner_mutex);
4162	btrfs_run_delayed_iputs(fs_info);
4163	mutex_unlock(&fs_info->cleaner_mutex);
4164	wake_up_process(fs_info->cleaner_kthread);
4165
4166	/* wait until ongoing cleanup work done */
4167	down_write(&fs_info->cleanup_work_sem);
4168	up_write(&fs_info->cleanup_work_sem);
4169
4170	trans = btrfs_join_transaction(root);
4171	if (IS_ERR(trans))
4172		return PTR_ERR(trans);
4173	return btrfs_commit_transaction(trans);
4174}
4175
4176static void warn_about_uncommitted_trans(struct btrfs_fs_info *fs_info)
4177{
4178	struct btrfs_transaction *trans;
4179	struct btrfs_transaction *tmp;
4180	bool found = false;
4181
4182	if (list_empty(&fs_info->trans_list))
4183		return;
4184
4185	/*
4186	 * This function is only called at the very end of close_ctree(),
4187	 * thus no other running transaction, no need to take trans_lock.
4188	 */
4189	ASSERT(test_bit(BTRFS_FS_CLOSING_DONE, &fs_info->flags));
4190	list_for_each_entry_safe(trans, tmp, &fs_info->trans_list, list) {
4191		struct extent_state *cached = NULL;
4192		u64 dirty_bytes = 0;
4193		u64 cur = 0;
4194		u64 found_start;
4195		u64 found_end;
4196
4197		found = true;
4198		while (find_first_extent_bit(&trans->dirty_pages, cur,
4199			&found_start, &found_end, EXTENT_DIRTY, &cached)) {
4200			dirty_bytes += found_end + 1 - found_start;
4201			cur = found_end + 1;
4202		}
4203		btrfs_warn(fs_info,
4204	"transaction %llu (with %llu dirty metadata bytes) is not committed",
4205			   trans->transid, dirty_bytes);
4206		btrfs_cleanup_one_transaction(trans, fs_info);
4207
4208		if (trans == fs_info->running_transaction)
4209			fs_info->running_transaction = NULL;
4210		list_del_init(&trans->list);
4211
4212		btrfs_put_transaction(trans);
4213		trace_btrfs_transaction_commit(fs_info);
4214	}
4215	ASSERT(!found);
4216}
4217
4218void __cold close_ctree(struct btrfs_fs_info *fs_info)
4219{
4220	int ret;
4221
4222	set_bit(BTRFS_FS_CLOSING_START, &fs_info->flags);
4223
4224	/*
4225	 * If we had UNFINISHED_DROPS we could still be processing them, so
4226	 * clear that bit and wake up relocation so it can stop.
4227	 * We must do this before stopping the block group reclaim task, because
4228	 * at btrfs_relocate_block_group() we wait for this bit, and after the
4229	 * wait we stop with -EINTR if btrfs_fs_closing() returns non-zero - we
4230	 * have just set BTRFS_FS_CLOSING_START, so btrfs_fs_closing() will
4231	 * return 1.
4232	 */
4233	btrfs_wake_unfinished_drop(fs_info);
4234
4235	/*
4236	 * We may have the reclaim task running and relocating a data block group,
4237	 * in which case it may create delayed iputs. So stop it before we park
4238	 * the cleaner kthread otherwise we can get new delayed iputs after
4239	 * parking the cleaner, and that can make the async reclaim task to hang
4240	 * if it's waiting for delayed iputs to complete, since the cleaner is
4241	 * parked and can not run delayed iputs - this will make us hang when
4242	 * trying to stop the async reclaim task.
4243	 */
4244	cancel_work_sync(&fs_info->reclaim_bgs_work);
4245	/*
4246	 * We don't want the cleaner to start new transactions, add more delayed
4247	 * iputs, etc. while we're closing. We can't use kthread_stop() yet
4248	 * because that frees the task_struct, and the transaction kthread might
4249	 * still try to wake up the cleaner.
4250	 */
4251	kthread_park(fs_info->cleaner_kthread);
4252
4253	/* wait for the qgroup rescan worker to stop */
4254	btrfs_qgroup_wait_for_completion(fs_info, false);
4255
4256	/* wait for the uuid_scan task to finish */
4257	down(&fs_info->uuid_tree_rescan_sem);
4258	/* avoid complains from lockdep et al., set sem back to initial state */
4259	up(&fs_info->uuid_tree_rescan_sem);
4260
4261	/* pause restriper - we want to resume on mount */
4262	btrfs_pause_balance(fs_info);
4263
4264	btrfs_dev_replace_suspend_for_unmount(fs_info);
4265
4266	btrfs_scrub_cancel(fs_info);
4267
4268	/* wait for any defraggers to finish */
4269	wait_event(fs_info->transaction_wait,
4270		   (atomic_read(&fs_info->defrag_running) == 0));
4271
4272	/* clear out the rbtree of defraggable inodes */
4273	btrfs_cleanup_defrag_inodes(fs_info);
4274
4275	/*
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4276	 * After we parked the cleaner kthread, ordered extents may have
4277	 * completed and created new delayed iputs. If one of the async reclaim
4278	 * tasks is running and in the RUN_DELAYED_IPUTS flush state, then we
4279	 * can hang forever trying to stop it, because if a delayed iput is
4280	 * added after it ran btrfs_run_delayed_iputs() and before it called
4281	 * btrfs_wait_on_delayed_iputs(), it will hang forever since there is
4282	 * no one else to run iputs.
4283	 *
4284	 * So wait for all ongoing ordered extents to complete and then run
4285	 * delayed iputs. This works because once we reach this point no one
4286	 * can either create new ordered extents nor create delayed iputs
4287	 * through some other means.
4288	 *
4289	 * Also note that btrfs_wait_ordered_roots() is not safe here, because
4290	 * it waits for BTRFS_ORDERED_COMPLETE to be set on an ordered extent,
4291	 * but the delayed iput for the respective inode is made only when doing
4292	 * the final btrfs_put_ordered_extent() (which must happen at
4293	 * btrfs_finish_ordered_io() when we are unmounting).
4294	 */
4295	btrfs_flush_workqueue(fs_info->endio_write_workers);
4296	/* Ordered extents for free space inodes. */
4297	btrfs_flush_workqueue(fs_info->endio_freespace_worker);
4298	btrfs_run_delayed_iputs(fs_info);
4299
4300	cancel_work_sync(&fs_info->async_reclaim_work);
4301	cancel_work_sync(&fs_info->async_data_reclaim_work);
4302	cancel_work_sync(&fs_info->preempt_reclaim_work);
 
4303
4304	/* Cancel or finish ongoing discard work */
4305	btrfs_discard_cleanup(fs_info);
4306
4307	if (!sb_rdonly(fs_info->sb)) {
4308		/*
4309		 * The cleaner kthread is stopped, so do one final pass over
4310		 * unused block groups.
4311		 */
4312		btrfs_delete_unused_bgs(fs_info);
4313
4314		/*
4315		 * There might be existing delayed inode workers still running
4316		 * and holding an empty delayed inode item. We must wait for
4317		 * them to complete first because they can create a transaction.
4318		 * This happens when someone calls btrfs_balance_delayed_items()
4319		 * and then a transaction commit runs the same delayed nodes
4320		 * before any delayed worker has done something with the nodes.
4321		 * We must wait for any worker here and not at transaction
4322		 * commit time since that could cause a deadlock.
4323		 * This is a very rare case.
4324		 */
4325		btrfs_flush_workqueue(fs_info->delayed_workers);
4326
4327		ret = btrfs_commit_super(fs_info);
4328		if (ret)
4329			btrfs_err(fs_info, "commit super ret %d", ret);
4330	}
4331
4332	if (BTRFS_FS_ERROR(fs_info))
4333		btrfs_error_commit_super(fs_info);
4334
4335	kthread_stop(fs_info->transaction_kthread);
4336	kthread_stop(fs_info->cleaner_kthread);
4337
4338	ASSERT(list_empty(&fs_info->delayed_iputs));
4339	set_bit(BTRFS_FS_CLOSING_DONE, &fs_info->flags);
4340
4341	if (btrfs_check_quota_leak(fs_info)) {
4342		WARN_ON(IS_ENABLED(CONFIG_BTRFS_DEBUG));
4343		btrfs_err(fs_info, "qgroup reserved space leaked");
4344	}
4345
4346	btrfs_free_qgroup_config(fs_info);
4347	ASSERT(list_empty(&fs_info->delalloc_roots));
4348
4349	if (percpu_counter_sum(&fs_info->delalloc_bytes)) {
4350		btrfs_info(fs_info, "at unmount delalloc count %lld",
4351		       percpu_counter_sum(&fs_info->delalloc_bytes));
4352	}
4353
4354	if (percpu_counter_sum(&fs_info->ordered_bytes))
4355		btrfs_info(fs_info, "at unmount dio bytes count %lld",
4356			   percpu_counter_sum(&fs_info->ordered_bytes));
4357
4358	btrfs_sysfs_remove_mounted(fs_info);
4359	btrfs_sysfs_remove_fsid(fs_info->fs_devices);
4360
4361	btrfs_put_block_group_cache(fs_info);
4362
4363	/*
4364	 * we must make sure there is not any read request to
4365	 * submit after we stopping all workers.
4366	 */
4367	invalidate_inode_pages2(fs_info->btree_inode->i_mapping);
4368	btrfs_stop_all_workers(fs_info);
4369
4370	/* We shouldn't have any transaction open at this point */
4371	warn_about_uncommitted_trans(fs_info);
4372
4373	clear_bit(BTRFS_FS_OPEN, &fs_info->flags);
4374	free_root_pointers(fs_info, true);
4375	btrfs_free_fs_roots(fs_info);
4376
4377	/*
4378	 * We must free the block groups after dropping the fs_roots as we could
4379	 * have had an IO error and have left over tree log blocks that aren't
4380	 * cleaned up until the fs roots are freed.  This makes the block group
4381	 * accounting appear to be wrong because there's pending reserved bytes,
4382	 * so make sure we do the block group cleanup afterwards.
4383	 */
4384	btrfs_free_block_groups(fs_info);
4385
4386	iput(fs_info->btree_inode);
4387
4388	btrfs_mapping_tree_free(fs_info);
4389	btrfs_close_devices(fs_info->fs_devices);
4390}
4391
4392void btrfs_mark_buffer_dirty(struct btrfs_trans_handle *trans,
4393			     struct extent_buffer *buf)
4394{
4395	struct btrfs_fs_info *fs_info = buf->fs_info;
4396	u64 transid = btrfs_header_generation(buf);
4397
4398#ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS
4399	/*
4400	 * This is a fast path so only do this check if we have sanity tests
4401	 * enabled.  Normal people shouldn't be using unmapped buffers as dirty
4402	 * outside of the sanity tests.
4403	 */
4404	if (unlikely(test_bit(EXTENT_BUFFER_UNMAPPED, &buf->bflags)))
4405		return;
4406#endif
4407	/* This is an active transaction (its state < TRANS_STATE_UNBLOCKED). */
4408	ASSERT(trans->transid == fs_info->generation);
4409	btrfs_assert_tree_write_locked(buf);
4410	if (unlikely(transid != fs_info->generation)) {
4411		btrfs_abort_transaction(trans, -EUCLEAN);
4412		btrfs_crit(fs_info,
4413"dirty buffer transid mismatch, logical %llu found transid %llu running transid %llu",
4414			   buf->start, transid, fs_info->generation);
4415	}
4416	set_extent_buffer_dirty(buf);
4417}
4418
4419static void __btrfs_btree_balance_dirty(struct btrfs_fs_info *fs_info,
4420					int flush_delayed)
4421{
4422	/*
4423	 * looks as though older kernels can get into trouble with
4424	 * this code, they end up stuck in balance_dirty_pages forever
4425	 */
4426	int ret;
4427
4428	if (current->flags & PF_MEMALLOC)
4429		return;
4430
4431	if (flush_delayed)
4432		btrfs_balance_delayed_items(fs_info);
4433
4434	ret = __percpu_counter_compare(&fs_info->dirty_metadata_bytes,
4435				     BTRFS_DIRTY_METADATA_THRESH,
4436				     fs_info->dirty_metadata_batch);
4437	if (ret > 0) {
4438		balance_dirty_pages_ratelimited(fs_info->btree_inode->i_mapping);
4439	}
4440}
4441
4442void btrfs_btree_balance_dirty(struct btrfs_fs_info *fs_info)
4443{
4444	__btrfs_btree_balance_dirty(fs_info, 1);
4445}
4446
4447void btrfs_btree_balance_dirty_nodelay(struct btrfs_fs_info *fs_info)
4448{
4449	__btrfs_btree_balance_dirty(fs_info, 0);
4450}
4451
4452static void btrfs_error_commit_super(struct btrfs_fs_info *fs_info)
4453{
4454	/* cleanup FS via transaction */
4455	btrfs_cleanup_transaction(fs_info);
4456
4457	mutex_lock(&fs_info->cleaner_mutex);
4458	btrfs_run_delayed_iputs(fs_info);
4459	mutex_unlock(&fs_info->cleaner_mutex);
4460
4461	down_write(&fs_info->cleanup_work_sem);
4462	up_write(&fs_info->cleanup_work_sem);
4463}
4464
4465static void btrfs_drop_all_logs(struct btrfs_fs_info *fs_info)
4466{
4467	struct btrfs_root *gang[8];
4468	u64 root_objectid = 0;
4469	int ret;
4470
4471	spin_lock(&fs_info->fs_roots_radix_lock);
4472	while ((ret = radix_tree_gang_lookup(&fs_info->fs_roots_radix,
4473					     (void **)gang, root_objectid,
4474					     ARRAY_SIZE(gang))) != 0) {
4475		int i;
4476
4477		for (i = 0; i < ret; i++)
4478			gang[i] = btrfs_grab_root(gang[i]);
4479		spin_unlock(&fs_info->fs_roots_radix_lock);
4480
4481		for (i = 0; i < ret; i++) {
4482			if (!gang[i])
4483				continue;
4484			root_objectid = gang[i]->root_key.objectid;
4485			btrfs_free_log(NULL, gang[i]);
4486			btrfs_put_root(gang[i]);
4487		}
4488		root_objectid++;
4489		spin_lock(&fs_info->fs_roots_radix_lock);
4490	}
4491	spin_unlock(&fs_info->fs_roots_radix_lock);
4492	btrfs_free_log_root_tree(NULL, fs_info);
4493}
4494
4495static void btrfs_destroy_ordered_extents(struct btrfs_root *root)
4496{
4497	struct btrfs_ordered_extent *ordered;
4498
4499	spin_lock(&root->ordered_extent_lock);
4500	/*
4501	 * This will just short circuit the ordered completion stuff which will
4502	 * make sure the ordered extent gets properly cleaned up.
4503	 */
4504	list_for_each_entry(ordered, &root->ordered_extents,
4505			    root_extent_list)
4506		set_bit(BTRFS_ORDERED_IOERR, &ordered->flags);
4507	spin_unlock(&root->ordered_extent_lock);
4508}
4509
4510static void btrfs_destroy_all_ordered_extents(struct btrfs_fs_info *fs_info)
4511{
4512	struct btrfs_root *root;
4513	LIST_HEAD(splice);
4514
4515	spin_lock(&fs_info->ordered_root_lock);
4516	list_splice_init(&fs_info->ordered_roots, &splice);
4517	while (!list_empty(&splice)) {
4518		root = list_first_entry(&splice, struct btrfs_root,
4519					ordered_root);
4520		list_move_tail(&root->ordered_root,
4521			       &fs_info->ordered_roots);
4522
4523		spin_unlock(&fs_info->ordered_root_lock);
4524		btrfs_destroy_ordered_extents(root);
4525
4526		cond_resched();
4527		spin_lock(&fs_info->ordered_root_lock);
4528	}
4529	spin_unlock(&fs_info->ordered_root_lock);
4530
4531	/*
4532	 * We need this here because if we've been flipped read-only we won't
4533	 * get sync() from the umount, so we need to make sure any ordered
4534	 * extents that haven't had their dirty pages IO start writeout yet
4535	 * actually get run and error out properly.
4536	 */
4537	btrfs_wait_ordered_roots(fs_info, U64_MAX, 0, (u64)-1);
4538}
4539
4540static void btrfs_destroy_delayed_refs(struct btrfs_transaction *trans,
4541				       struct btrfs_fs_info *fs_info)
4542{
4543	struct rb_node *node;
4544	struct btrfs_delayed_ref_root *delayed_refs;
4545	struct btrfs_delayed_ref_node *ref;
4546
4547	delayed_refs = &trans->delayed_refs;
4548
4549	spin_lock(&delayed_refs->lock);
4550	if (atomic_read(&delayed_refs->num_entries) == 0) {
4551		spin_unlock(&delayed_refs->lock);
4552		btrfs_debug(fs_info, "delayed_refs has NO entry");
4553		return;
4554	}
4555
4556	while ((node = rb_first_cached(&delayed_refs->href_root)) != NULL) {
4557		struct btrfs_delayed_ref_head *head;
4558		struct rb_node *n;
4559		bool pin_bytes = false;
4560
4561		head = rb_entry(node, struct btrfs_delayed_ref_head,
4562				href_node);
4563		if (btrfs_delayed_ref_lock(delayed_refs, head))
4564			continue;
4565
4566		spin_lock(&head->lock);
4567		while ((n = rb_first_cached(&head->ref_tree)) != NULL) {
4568			ref = rb_entry(n, struct btrfs_delayed_ref_node,
4569				       ref_node);
4570			rb_erase_cached(&ref->ref_node, &head->ref_tree);
4571			RB_CLEAR_NODE(&ref->ref_node);
4572			if (!list_empty(&ref->add_list))
4573				list_del(&ref->add_list);
4574			atomic_dec(&delayed_refs->num_entries);
4575			btrfs_put_delayed_ref(ref);
4576			btrfs_delayed_refs_rsv_release(fs_info, 1, 0);
4577		}
4578		if (head->must_insert_reserved)
4579			pin_bytes = true;
4580		btrfs_free_delayed_extent_op(head->extent_op);
4581		btrfs_delete_ref_head(delayed_refs, head);
4582		spin_unlock(&head->lock);
4583		spin_unlock(&delayed_refs->lock);
4584		mutex_unlock(&head->mutex);
4585
4586		if (pin_bytes) {
4587			struct btrfs_block_group *cache;
4588
4589			cache = btrfs_lookup_block_group(fs_info, head->bytenr);
4590			BUG_ON(!cache);
4591
4592			spin_lock(&cache->space_info->lock);
4593			spin_lock(&cache->lock);
4594			cache->pinned += head->num_bytes;
4595			btrfs_space_info_update_bytes_pinned(fs_info,
4596				cache->space_info, head->num_bytes);
4597			cache->reserved -= head->num_bytes;
4598			cache->space_info->bytes_reserved -= head->num_bytes;
4599			spin_unlock(&cache->lock);
4600			spin_unlock(&cache->space_info->lock);
4601
4602			btrfs_put_block_group(cache);
4603
4604			btrfs_error_unpin_extent_range(fs_info, head->bytenr,
4605				head->bytenr + head->num_bytes - 1);
4606		}
4607		btrfs_cleanup_ref_head_accounting(fs_info, delayed_refs, head);
4608		btrfs_put_delayed_ref_head(head);
4609		cond_resched();
4610		spin_lock(&delayed_refs->lock);
4611	}
4612	btrfs_qgroup_destroy_extent_records(trans);
4613
4614	spin_unlock(&delayed_refs->lock);
4615}
4616
4617static void btrfs_destroy_delalloc_inodes(struct btrfs_root *root)
4618{
4619	struct btrfs_inode *btrfs_inode;
4620	LIST_HEAD(splice);
4621
4622	spin_lock(&root->delalloc_lock);
4623	list_splice_init(&root->delalloc_inodes, &splice);
4624
4625	while (!list_empty(&splice)) {
4626		struct inode *inode = NULL;
4627		btrfs_inode = list_first_entry(&splice, struct btrfs_inode,
4628					       delalloc_inodes);
4629		__btrfs_del_delalloc_inode(root, btrfs_inode);
4630		spin_unlock(&root->delalloc_lock);
4631
4632		/*
4633		 * Make sure we get a live inode and that it'll not disappear
4634		 * meanwhile.
4635		 */
4636		inode = igrab(&btrfs_inode->vfs_inode);
4637		if (inode) {
4638			unsigned int nofs_flag;
4639
4640			nofs_flag = memalloc_nofs_save();
4641			invalidate_inode_pages2(inode->i_mapping);
4642			memalloc_nofs_restore(nofs_flag);
4643			iput(inode);
4644		}
4645		spin_lock(&root->delalloc_lock);
4646	}
4647	spin_unlock(&root->delalloc_lock);
4648}
4649
4650static void btrfs_destroy_all_delalloc_inodes(struct btrfs_fs_info *fs_info)
4651{
4652	struct btrfs_root *root;
4653	LIST_HEAD(splice);
4654
4655	spin_lock(&fs_info->delalloc_root_lock);
4656	list_splice_init(&fs_info->delalloc_roots, &splice);
4657	while (!list_empty(&splice)) {
4658		root = list_first_entry(&splice, struct btrfs_root,
4659					 delalloc_root);
4660		root = btrfs_grab_root(root);
4661		BUG_ON(!root);
4662		spin_unlock(&fs_info->delalloc_root_lock);
4663
4664		btrfs_destroy_delalloc_inodes(root);
4665		btrfs_put_root(root);
4666
4667		spin_lock(&fs_info->delalloc_root_lock);
4668	}
4669	spin_unlock(&fs_info->delalloc_root_lock);
4670}
4671
4672static void btrfs_destroy_marked_extents(struct btrfs_fs_info *fs_info,
4673					 struct extent_io_tree *dirty_pages,
4674					 int mark)
4675{
4676	struct extent_buffer *eb;
4677	u64 start = 0;
4678	u64 end;
4679
4680	while (find_first_extent_bit(dirty_pages, start, &start, &end,
4681				     mark, NULL)) {
4682		clear_extent_bits(dirty_pages, start, end, mark);
4683		while (start <= end) {
4684			eb = find_extent_buffer(fs_info, start);
4685			start += fs_info->nodesize;
4686			if (!eb)
4687				continue;
4688
4689			btrfs_tree_lock(eb);
4690			wait_on_extent_buffer_writeback(eb);
4691			btrfs_clear_buffer_dirty(NULL, eb);
4692			btrfs_tree_unlock(eb);
4693
4694			free_extent_buffer_stale(eb);
4695		}
4696	}
4697}
4698
4699static void btrfs_destroy_pinned_extent(struct btrfs_fs_info *fs_info,
4700					struct extent_io_tree *unpin)
4701{
4702	u64 start;
4703	u64 end;
4704
4705	while (1) {
4706		struct extent_state *cached_state = NULL;
4707
4708		/*
4709		 * The btrfs_finish_extent_commit() may get the same range as
4710		 * ours between find_first_extent_bit and clear_extent_dirty.
4711		 * Hence, hold the unused_bg_unpin_mutex to avoid double unpin
4712		 * the same extent range.
4713		 */
4714		mutex_lock(&fs_info->unused_bg_unpin_mutex);
4715		if (!find_first_extent_bit(unpin, 0, &start, &end,
4716					   EXTENT_DIRTY, &cached_state)) {
4717			mutex_unlock(&fs_info->unused_bg_unpin_mutex);
4718			break;
4719		}
4720
4721		clear_extent_dirty(unpin, start, end, &cached_state);
4722		free_extent_state(cached_state);
4723		btrfs_error_unpin_extent_range(fs_info, start, end);
4724		mutex_unlock(&fs_info->unused_bg_unpin_mutex);
4725		cond_resched();
4726	}
4727}
4728
4729static void btrfs_cleanup_bg_io(struct btrfs_block_group *cache)
4730{
4731	struct inode *inode;
4732
4733	inode = cache->io_ctl.inode;
4734	if (inode) {
4735		unsigned int nofs_flag;
4736
4737		nofs_flag = memalloc_nofs_save();
4738		invalidate_inode_pages2(inode->i_mapping);
4739		memalloc_nofs_restore(nofs_flag);
4740
4741		BTRFS_I(inode)->generation = 0;
4742		cache->io_ctl.inode = NULL;
4743		iput(inode);
4744	}
4745	ASSERT(cache->io_ctl.pages == NULL);
4746	btrfs_put_block_group(cache);
4747}
4748
4749void btrfs_cleanup_dirty_bgs(struct btrfs_transaction *cur_trans,
4750			     struct btrfs_fs_info *fs_info)
4751{
4752	struct btrfs_block_group *cache;
4753
4754	spin_lock(&cur_trans->dirty_bgs_lock);
4755	while (!list_empty(&cur_trans->dirty_bgs)) {
4756		cache = list_first_entry(&cur_trans->dirty_bgs,
4757					 struct btrfs_block_group,
4758					 dirty_list);
4759
4760		if (!list_empty(&cache->io_list)) {
4761			spin_unlock(&cur_trans->dirty_bgs_lock);
4762			list_del_init(&cache->io_list);
4763			btrfs_cleanup_bg_io(cache);
4764			spin_lock(&cur_trans->dirty_bgs_lock);
4765		}
4766
4767		list_del_init(&cache->dirty_list);
4768		spin_lock(&cache->lock);
4769		cache->disk_cache_state = BTRFS_DC_ERROR;
4770		spin_unlock(&cache->lock);
4771
4772		spin_unlock(&cur_trans->dirty_bgs_lock);
4773		btrfs_put_block_group(cache);
4774		btrfs_dec_delayed_refs_rsv_bg_updates(fs_info);
4775		spin_lock(&cur_trans->dirty_bgs_lock);
4776	}
4777	spin_unlock(&cur_trans->dirty_bgs_lock);
4778
4779	/*
4780	 * Refer to the definition of io_bgs member for details why it's safe
4781	 * to use it without any locking
4782	 */
4783	while (!list_empty(&cur_trans->io_bgs)) {
4784		cache = list_first_entry(&cur_trans->io_bgs,
4785					 struct btrfs_block_group,
4786					 io_list);
4787
4788		list_del_init(&cache->io_list);
4789		spin_lock(&cache->lock);
4790		cache->disk_cache_state = BTRFS_DC_ERROR;
4791		spin_unlock(&cache->lock);
4792		btrfs_cleanup_bg_io(cache);
4793	}
4794}
4795
4796static void btrfs_free_all_qgroup_pertrans(struct btrfs_fs_info *fs_info)
4797{
4798	struct btrfs_root *gang[8];
4799	int i;
4800	int ret;
4801
4802	spin_lock(&fs_info->fs_roots_radix_lock);
4803	while (1) {
4804		ret = radix_tree_gang_lookup_tag(&fs_info->fs_roots_radix,
4805						 (void **)gang, 0,
4806						 ARRAY_SIZE(gang),
4807						 BTRFS_ROOT_TRANS_TAG);
4808		if (ret == 0)
4809			break;
4810		for (i = 0; i < ret; i++) {
4811			struct btrfs_root *root = gang[i];
4812
4813			btrfs_qgroup_free_meta_all_pertrans(root);
4814			radix_tree_tag_clear(&fs_info->fs_roots_radix,
4815					(unsigned long)root->root_key.objectid,
4816					BTRFS_ROOT_TRANS_TAG);
4817		}
4818	}
4819	spin_unlock(&fs_info->fs_roots_radix_lock);
4820}
4821
4822void btrfs_cleanup_one_transaction(struct btrfs_transaction *cur_trans,
4823				   struct btrfs_fs_info *fs_info)
4824{
 
4825	struct btrfs_device *dev, *tmp;
4826
4827	btrfs_cleanup_dirty_bgs(cur_trans, fs_info);
4828	ASSERT(list_empty(&cur_trans->dirty_bgs));
4829	ASSERT(list_empty(&cur_trans->io_bgs));
4830
4831	list_for_each_entry_safe(dev, tmp, &cur_trans->dev_update_list,
4832				 post_commit_list) {
4833		list_del_init(&dev->post_commit_list);
4834	}
4835
4836	btrfs_destroy_delayed_refs(cur_trans, fs_info);
4837
4838	cur_trans->state = TRANS_STATE_COMMIT_START;
4839	wake_up(&fs_info->transaction_blocked_wait);
4840
4841	cur_trans->state = TRANS_STATE_UNBLOCKED;
4842	wake_up(&fs_info->transaction_wait);
4843
4844	btrfs_destroy_delayed_inodes(fs_info);
4845
4846	btrfs_destroy_marked_extents(fs_info, &cur_trans->dirty_pages,
4847				     EXTENT_DIRTY);
4848	btrfs_destroy_pinned_extent(fs_info, &cur_trans->pinned_extents);
4849
4850	btrfs_free_all_qgroup_pertrans(fs_info);
4851
4852	cur_trans->state =TRANS_STATE_COMPLETED;
4853	wake_up(&cur_trans->commit_wait);
4854}
4855
4856static int btrfs_cleanup_transaction(struct btrfs_fs_info *fs_info)
4857{
4858	struct btrfs_transaction *t;
4859
4860	mutex_lock(&fs_info->transaction_kthread_mutex);
4861
4862	spin_lock(&fs_info->trans_lock);
4863	while (!list_empty(&fs_info->trans_list)) {
4864		t = list_first_entry(&fs_info->trans_list,
4865				     struct btrfs_transaction, list);
4866		if (t->state >= TRANS_STATE_COMMIT_PREP) {
4867			refcount_inc(&t->use_count);
4868			spin_unlock(&fs_info->trans_lock);
4869			btrfs_wait_for_commit(fs_info, t->transid);
4870			btrfs_put_transaction(t);
4871			spin_lock(&fs_info->trans_lock);
4872			continue;
4873		}
4874		if (t == fs_info->running_transaction) {
4875			t->state = TRANS_STATE_COMMIT_DOING;
4876			spin_unlock(&fs_info->trans_lock);
4877			/*
4878			 * We wait for 0 num_writers since we don't hold a trans
4879			 * handle open currently for this transaction.
4880			 */
4881			wait_event(t->writer_wait,
4882				   atomic_read(&t->num_writers) == 0);
4883		} else {
4884			spin_unlock(&fs_info->trans_lock);
4885		}
4886		btrfs_cleanup_one_transaction(t, fs_info);
4887
4888		spin_lock(&fs_info->trans_lock);
4889		if (t == fs_info->running_transaction)
4890			fs_info->running_transaction = NULL;
4891		list_del_init(&t->list);
4892		spin_unlock(&fs_info->trans_lock);
4893
4894		btrfs_put_transaction(t);
4895		trace_btrfs_transaction_commit(fs_info);
4896		spin_lock(&fs_info->trans_lock);
4897	}
4898	spin_unlock(&fs_info->trans_lock);
4899	btrfs_destroy_all_ordered_extents(fs_info);
4900	btrfs_destroy_delayed_inodes(fs_info);
4901	btrfs_assert_delayed_root_empty(fs_info);
4902	btrfs_destroy_all_delalloc_inodes(fs_info);
4903	btrfs_drop_all_logs(fs_info);
 
4904	mutex_unlock(&fs_info->transaction_kthread_mutex);
4905
4906	return 0;
4907}
4908
4909int btrfs_init_root_free_objectid(struct btrfs_root *root)
4910{
4911	struct btrfs_path *path;
4912	int ret;
4913	struct extent_buffer *l;
4914	struct btrfs_key search_key;
4915	struct btrfs_key found_key;
4916	int slot;
4917
4918	path = btrfs_alloc_path();
4919	if (!path)
4920		return -ENOMEM;
4921
4922	search_key.objectid = BTRFS_LAST_FREE_OBJECTID;
4923	search_key.type = -1;
4924	search_key.offset = (u64)-1;
4925	ret = btrfs_search_slot(NULL, root, &search_key, path, 0, 0);
4926	if (ret < 0)
4927		goto error;
4928	BUG_ON(ret == 0); /* Corruption */
 
 
 
 
 
 
 
4929	if (path->slots[0] > 0) {
4930		slot = path->slots[0] - 1;
4931		l = path->nodes[0];
4932		btrfs_item_key_to_cpu(l, &found_key, slot);
4933		root->free_objectid = max_t(u64, found_key.objectid + 1,
4934					    BTRFS_FIRST_FREE_OBJECTID);
4935	} else {
4936		root->free_objectid = BTRFS_FIRST_FREE_OBJECTID;
4937	}
4938	ret = 0;
4939error:
4940	btrfs_free_path(path);
4941	return ret;
4942}
4943
4944int btrfs_get_free_objectid(struct btrfs_root *root, u64 *objectid)
4945{
4946	int ret;
4947	mutex_lock(&root->objectid_mutex);
4948
4949	if (unlikely(root->free_objectid >= BTRFS_LAST_FREE_OBJECTID)) {
4950		btrfs_warn(root->fs_info,
4951			   "the objectid of root %llu reaches its highest value",
4952			   root->root_key.objectid);
4953		ret = -ENOSPC;
4954		goto out;
4955	}
4956
4957	*objectid = root->free_objectid++;
4958	ret = 0;
4959out:
4960	mutex_unlock(&root->objectid_mutex);
4961	return ret;
4962}