Linux Audio

Check our new training course

Loading...
v5.4
   1// SPDX-License-Identifier: GPL-2.0
   2
 
   3#include "misc.h"
   4#include "ctree.h"
   5#include "block-group.h"
   6#include "space-info.h"
   7#include "disk-io.h"
   8#include "free-space-cache.h"
   9#include "free-space-tree.h"
  10#include "disk-io.h"
  11#include "volumes.h"
  12#include "transaction.h"
  13#include "ref-verify.h"
  14#include "sysfs.h"
  15#include "tree-log.h"
  16#include "delalloc-space.h"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
  17
  18/*
  19 * Return target flags in extended format or 0 if restripe for this chunk_type
  20 * is not in progress
  21 *
  22 * Should be called with balance_lock held
  23 */
  24static u64 get_restripe_target(struct btrfs_fs_info *fs_info, u64 flags)
  25{
  26	struct btrfs_balance_control *bctl = fs_info->balance_ctl;
  27	u64 target = 0;
  28
  29	if (!bctl)
  30		return 0;
  31
  32	if (flags & BTRFS_BLOCK_GROUP_DATA &&
  33	    bctl->data.flags & BTRFS_BALANCE_ARGS_CONVERT) {
  34		target = BTRFS_BLOCK_GROUP_DATA | bctl->data.target;
  35	} else if (flags & BTRFS_BLOCK_GROUP_SYSTEM &&
  36		   bctl->sys.flags & BTRFS_BALANCE_ARGS_CONVERT) {
  37		target = BTRFS_BLOCK_GROUP_SYSTEM | bctl->sys.target;
  38	} else if (flags & BTRFS_BLOCK_GROUP_METADATA &&
  39		   bctl->meta.flags & BTRFS_BALANCE_ARGS_CONVERT) {
  40		target = BTRFS_BLOCK_GROUP_METADATA | bctl->meta.target;
  41	}
  42
  43	return target;
  44}
  45
  46/*
  47 * @flags: available profiles in extended format (see ctree.h)
  48 *
  49 * Return reduced profile in chunk format.  If profile changing is in progress
  50 * (either running or paused) picks the target profile (if it's already
  51 * available), otherwise falls back to plain reducing.
  52 */
  53static u64 btrfs_reduce_alloc_profile(struct btrfs_fs_info *fs_info, u64 flags)
  54{
  55	u64 num_devices = fs_info->fs_devices->rw_devices;
  56	u64 target;
  57	u64 raid_type;
  58	u64 allowed = 0;
  59
  60	/*
  61	 * See if restripe for this chunk_type is in progress, if so try to
  62	 * reduce to the target profile
  63	 */
  64	spin_lock(&fs_info->balance_lock);
  65	target = get_restripe_target(fs_info, flags);
  66	if (target) {
  67		/* Pick target profile only if it's already available */
  68		if ((flags & target) & BTRFS_EXTENDED_PROFILE_MASK) {
  69			spin_unlock(&fs_info->balance_lock);
  70			return extended_to_chunk(target);
  71		}
  72	}
  73	spin_unlock(&fs_info->balance_lock);
  74
  75	/* First, mask out the RAID levels which aren't possible */
  76	for (raid_type = 0; raid_type < BTRFS_NR_RAID_TYPES; raid_type++) {
  77		if (num_devices >= btrfs_raid_array[raid_type].devs_min)
  78			allowed |= btrfs_raid_array[raid_type].bg_flag;
  79	}
  80	allowed &= flags;
  81
  82	if (allowed & BTRFS_BLOCK_GROUP_RAID6)
  83		allowed = BTRFS_BLOCK_GROUP_RAID6;
  84	else if (allowed & BTRFS_BLOCK_GROUP_RAID5)
  85		allowed = BTRFS_BLOCK_GROUP_RAID5;
  86	else if (allowed & BTRFS_BLOCK_GROUP_RAID10)
  87		allowed = BTRFS_BLOCK_GROUP_RAID10;
  88	else if (allowed & BTRFS_BLOCK_GROUP_RAID1)
  89		allowed = BTRFS_BLOCK_GROUP_RAID1;
  90	else if (allowed & BTRFS_BLOCK_GROUP_RAID0)
  91		allowed = BTRFS_BLOCK_GROUP_RAID0;
  92
  93	flags &= ~BTRFS_BLOCK_GROUP_PROFILE_MASK;
  94
  95	return extended_to_chunk(flags | allowed);
  96}
  97
  98static u64 get_alloc_profile(struct btrfs_fs_info *fs_info, u64 orig_flags)
  99{
 100	unsigned seq;
 101	u64 flags;
 102
 103	do {
 104		flags = orig_flags;
 105		seq = read_seqbegin(&fs_info->profiles_lock);
 106
 107		if (flags & BTRFS_BLOCK_GROUP_DATA)
 108			flags |= fs_info->avail_data_alloc_bits;
 109		else if (flags & BTRFS_BLOCK_GROUP_SYSTEM)
 110			flags |= fs_info->avail_system_alloc_bits;
 111		else if (flags & BTRFS_BLOCK_GROUP_METADATA)
 112			flags |= fs_info->avail_metadata_alloc_bits;
 113	} while (read_seqretry(&fs_info->profiles_lock, seq));
 114
 115	return btrfs_reduce_alloc_profile(fs_info, flags);
 116}
 117
 118u64 btrfs_get_alloc_profile(struct btrfs_fs_info *fs_info, u64 orig_flags)
 119{
 120	return get_alloc_profile(fs_info, orig_flags);
 121}
 122
 123void btrfs_get_block_group(struct btrfs_block_group_cache *cache)
 124{
 125	atomic_inc(&cache->count);
 126}
 127
 128void btrfs_put_block_group(struct btrfs_block_group_cache *cache)
 129{
 130	if (atomic_dec_and_test(&cache->count)) {
 131		WARN_ON(cache->pinned > 0);
 132		WARN_ON(cache->reserved > 0);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 133
 134		/*
 135		 * If not empty, someone is still holding mutex of
 136		 * full_stripe_lock, which can only be released by caller.
 137		 * And it will definitely cause use-after-free when caller
 138		 * tries to release full stripe lock.
 139		 *
 140		 * No better way to resolve, but only to warn.
 141		 */
 142		WARN_ON(!RB_EMPTY_ROOT(&cache->full_stripe_locks_root.root));
 143		kfree(cache->free_space_ctl);
 
 144		kfree(cache);
 145	}
 146}
 147
 148/*
 149 * This adds the block group to the fs_info rb tree for the block group cache
 150 */
 151static int btrfs_add_block_group_cache(struct btrfs_fs_info *info,
 152				struct btrfs_block_group_cache *block_group)
 153{
 154	struct rb_node **p;
 155	struct rb_node *parent = NULL;
 156	struct btrfs_block_group_cache *cache;
 
 157
 158	spin_lock(&info->block_group_cache_lock);
 159	p = &info->block_group_cache_tree.rb_node;
 
 
 160
 161	while (*p) {
 162		parent = *p;
 163		cache = rb_entry(parent, struct btrfs_block_group_cache,
 164				 cache_node);
 165		if (block_group->key.objectid < cache->key.objectid) {
 166			p = &(*p)->rb_left;
 167		} else if (block_group->key.objectid > cache->key.objectid) {
 168			p = &(*p)->rb_right;
 
 169		} else {
 170			spin_unlock(&info->block_group_cache_lock);
 171			return -EEXIST;
 172		}
 173	}
 174
 175	rb_link_node(&block_group->cache_node, parent, p);
 176	rb_insert_color(&block_group->cache_node,
 177			&info->block_group_cache_tree);
 178
 179	if (info->first_logical_byte > block_group->key.objectid)
 180		info->first_logical_byte = block_group->key.objectid;
 181
 182	spin_unlock(&info->block_group_cache_lock);
 183
 184	return 0;
 185}
 186
 187/*
 188 * This will return the block group at or after bytenr if contains is 0, else
 189 * it will return the block group that contains the bytenr
 190 */
 191static struct btrfs_block_group_cache *block_group_cache_tree_search(
 192		struct btrfs_fs_info *info, u64 bytenr, int contains)
 193{
 194	struct btrfs_block_group_cache *cache, *ret = NULL;
 195	struct rb_node *n;
 196	u64 end, start;
 197
 198	spin_lock(&info->block_group_cache_lock);
 199	n = info->block_group_cache_tree.rb_node;
 200
 201	while (n) {
 202		cache = rb_entry(n, struct btrfs_block_group_cache,
 203				 cache_node);
 204		end = cache->key.objectid + cache->key.offset - 1;
 205		start = cache->key.objectid;
 206
 207		if (bytenr < start) {
 208			if (!contains && (!ret || start < ret->key.objectid))
 209				ret = cache;
 210			n = n->rb_left;
 211		} else if (bytenr > start) {
 212			if (contains && bytenr <= end) {
 213				ret = cache;
 214				break;
 215			}
 216			n = n->rb_right;
 217		} else {
 218			ret = cache;
 219			break;
 220		}
 221	}
 222	if (ret) {
 223		btrfs_get_block_group(ret);
 224		if (bytenr == 0 && info->first_logical_byte > ret->key.objectid)
 225			info->first_logical_byte = ret->key.objectid;
 226	}
 227	spin_unlock(&info->block_group_cache_lock);
 228
 229	return ret;
 230}
 231
 232/*
 233 * Return the block group that starts at or after bytenr
 234 */
 235struct btrfs_block_group_cache *btrfs_lookup_first_block_group(
 236		struct btrfs_fs_info *info, u64 bytenr)
 237{
 238	return block_group_cache_tree_search(info, bytenr, 0);
 239}
 240
 241/*
 242 * Return the block group that contains the given bytenr
 243 */
 244struct btrfs_block_group_cache *btrfs_lookup_block_group(
 245		struct btrfs_fs_info *info, u64 bytenr)
 246{
 247	return block_group_cache_tree_search(info, bytenr, 1);
 248}
 249
 250struct btrfs_block_group_cache *btrfs_next_block_group(
 251		struct btrfs_block_group_cache *cache)
 252{
 253	struct btrfs_fs_info *fs_info = cache->fs_info;
 254	struct rb_node *node;
 255
 256	spin_lock(&fs_info->block_group_cache_lock);
 257
 258	/* If our block group was removed, we need a full search. */
 259	if (RB_EMPTY_NODE(&cache->cache_node)) {
 260		const u64 next_bytenr = cache->key.objectid + cache->key.offset;
 261
 262		spin_unlock(&fs_info->block_group_cache_lock);
 263		btrfs_put_block_group(cache);
 264		cache = btrfs_lookup_first_block_group(fs_info, next_bytenr); return cache;
 265	}
 266	node = rb_next(&cache->cache_node);
 267	btrfs_put_block_group(cache);
 268	if (node) {
 269		cache = rb_entry(node, struct btrfs_block_group_cache,
 270				 cache_node);
 271		btrfs_get_block_group(cache);
 272	} else
 273		cache = NULL;
 274	spin_unlock(&fs_info->block_group_cache_lock);
 275	return cache;
 276}
 277
 278bool btrfs_inc_nocow_writers(struct btrfs_fs_info *fs_info, u64 bytenr)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 279{
 280	struct btrfs_block_group_cache *bg;
 281	bool ret = true;
 282
 283	bg = btrfs_lookup_block_group(fs_info, bytenr);
 284	if (!bg)
 285		return false;
 286
 287	spin_lock(&bg->lock);
 288	if (bg->ro)
 289		ret = false;
 290	else
 291		atomic_inc(&bg->nocow_writers);
 292	spin_unlock(&bg->lock);
 293
 294	/* No put on block group, done by btrfs_dec_nocow_writers */
 295	if (!ret)
 296		btrfs_put_block_group(bg);
 
 
 297
 298	return ret;
 
 299}
 300
 301void btrfs_dec_nocow_writers(struct btrfs_fs_info *fs_info, u64 bytenr)
 
 
 
 
 
 
 
 
 
 
 
 302{
 303	struct btrfs_block_group_cache *bg;
 304
 305	bg = btrfs_lookup_block_group(fs_info, bytenr);
 306	ASSERT(bg);
 307	if (atomic_dec_and_test(&bg->nocow_writers))
 308		wake_up_var(&bg->nocow_writers);
 309	/*
 310	 * Once for our lookup and once for the lookup done by a previous call
 311	 * to btrfs_inc_nocow_writers()
 312	 */
 313	btrfs_put_block_group(bg);
 314	btrfs_put_block_group(bg);
 315}
 316
 317void btrfs_wait_nocow_writers(struct btrfs_block_group_cache *bg)
 318{
 319	wait_var_event(&bg->nocow_writers, !atomic_read(&bg->nocow_writers));
 320}
 321
 322void btrfs_dec_block_group_reservations(struct btrfs_fs_info *fs_info,
 323					const u64 start)
 324{
 325	struct btrfs_block_group_cache *bg;
 326
 327	bg = btrfs_lookup_block_group(fs_info, start);
 328	ASSERT(bg);
 329	if (atomic_dec_and_test(&bg->reservations))
 330		wake_up_var(&bg->reservations);
 331	btrfs_put_block_group(bg);
 332}
 333
 334void btrfs_wait_block_group_reservations(struct btrfs_block_group_cache *bg)
 335{
 336	struct btrfs_space_info *space_info = bg->space_info;
 337
 338	ASSERT(bg->ro);
 339
 340	if (!(bg->flags & BTRFS_BLOCK_GROUP_DATA))
 341		return;
 342
 343	/*
 344	 * Our block group is read only but before we set it to read only,
 345	 * some task might have had allocated an extent from it already, but it
 346	 * has not yet created a respective ordered extent (and added it to a
 347	 * root's list of ordered extents).
 348	 * Therefore wait for any task currently allocating extents, since the
 349	 * block group's reservations counter is incremented while a read lock
 350	 * on the groups' semaphore is held and decremented after releasing
 351	 * the read access on that semaphore and creating the ordered extent.
 352	 */
 353	down_write(&space_info->groups_sem);
 354	up_write(&space_info->groups_sem);
 355
 356	wait_var_event(&bg->reservations, !atomic_read(&bg->reservations));
 357}
 358
 359struct btrfs_caching_control *btrfs_get_caching_control(
 360		struct btrfs_block_group_cache *cache)
 361{
 362	struct btrfs_caching_control *ctl;
 363
 364	spin_lock(&cache->lock);
 365	if (!cache->caching_ctl) {
 366		spin_unlock(&cache->lock);
 367		return NULL;
 368	}
 369
 370	ctl = cache->caching_ctl;
 371	refcount_inc(&ctl->count);
 372	spin_unlock(&cache->lock);
 373	return ctl;
 374}
 375
 376void btrfs_put_caching_control(struct btrfs_caching_control *ctl)
 377{
 378	if (refcount_dec_and_test(&ctl->count))
 379		kfree(ctl);
 380}
 381
 382/*
 383 * When we wait for progress in the block group caching, its because our
 384 * allocation attempt failed at least once.  So, we must sleep and let some
 385 * progress happen before we try again.
 386 *
 387 * This function will sleep at least once waiting for new free space to show
 388 * up, and then it will check the block group free space numbers for our min
 389 * num_bytes.  Another option is to have it go ahead and look in the rbtree for
 390 * a free extent of a given size, but this is a good start.
 391 *
 392 * Callers of this must check if cache->cached == BTRFS_CACHE_ERROR before using
 393 * any of the information in this block group.
 394 */
 395void btrfs_wait_block_group_cache_progress(struct btrfs_block_group_cache *cache,
 396					   u64 num_bytes)
 397{
 398	struct btrfs_caching_control *caching_ctl;
 399
 400	caching_ctl = btrfs_get_caching_control(cache);
 401	if (!caching_ctl)
 402		return;
 403
 404	wait_event(caching_ctl->wait, btrfs_block_group_cache_done(cache) ||
 405		   (cache->free_space_ctl->free_space >= num_bytes));
 406
 407	btrfs_put_caching_control(caching_ctl);
 408}
 409
 410int btrfs_wait_block_group_cache_done(struct btrfs_block_group_cache *cache)
 
 
 
 
 
 
 
 411{
 412	struct btrfs_caching_control *caching_ctl;
 413	int ret = 0;
 414
 415	caching_ctl = btrfs_get_caching_control(cache);
 416	if (!caching_ctl)
 417		return (cache->cached == BTRFS_CACHE_ERROR) ? -EIO : 0;
 418
 419	wait_event(caching_ctl->wait, btrfs_block_group_cache_done(cache));
 420	if (cache->cached == BTRFS_CACHE_ERROR)
 421		ret = -EIO;
 422	btrfs_put_caching_control(caching_ctl);
 423	return ret;
 424}
 425
 426#ifdef CONFIG_BTRFS_DEBUG
 427static void fragment_free_space(struct btrfs_block_group_cache *block_group)
 428{
 429	struct btrfs_fs_info *fs_info = block_group->fs_info;
 430	u64 start = block_group->key.objectid;
 431	u64 len = block_group->key.offset;
 432	u64 chunk = block_group->flags & BTRFS_BLOCK_GROUP_METADATA ?
 433		fs_info->nodesize : fs_info->sectorsize;
 434	u64 step = chunk << 1;
 435
 436	while (len > chunk) {
 437		btrfs_remove_free_space(block_group, start, chunk);
 438		start += step;
 439		if (len < step)
 440			len = 0;
 441		else
 442			len -= step;
 443	}
 444}
 445#endif
 446
 447/*
 448 * This is only called by btrfs_cache_block_group, since we could have freed
 449 * extents we need to check the pinned_extents for any extents that can't be
 450 * used yet since their free space will be released as soon as the transaction
 451 * commits.
 452 */
 453u64 add_new_free_space(struct btrfs_block_group_cache *block_group,
 454		       u64 start, u64 end)
 455{
 456	struct btrfs_fs_info *info = block_group->fs_info;
 457	u64 extent_start, extent_end, size, total_added = 0;
 458	int ret;
 459
 460	while (start < end) {
 461		ret = find_first_extent_bit(info->pinned_extents, start,
 462					    &extent_start, &extent_end,
 463					    EXTENT_DIRTY | EXTENT_UPTODATE,
 464					    NULL);
 465		if (ret)
 466			break;
 467
 468		if (extent_start <= start) {
 469			start = extent_end + 1;
 470		} else if (extent_start > start && extent_start < end) {
 471			size = extent_start - start;
 472			total_added += size;
 473			ret = btrfs_add_free_space(block_group, start,
 474						   size);
 475			BUG_ON(ret); /* -ENOMEM or logic error */
 476			start = extent_end + 1;
 477		} else {
 478			break;
 479		}
 480	}
 481
 482	if (start < end) {
 483		size = end - start;
 484		total_added += size;
 485		ret = btrfs_add_free_space(block_group, start, size);
 
 486		BUG_ON(ret); /* -ENOMEM or logic error */
 487	}
 488
 489	return total_added;
 490}
 491
 492static int load_extent_tree_free(struct btrfs_caching_control *caching_ctl)
 493{
 494	struct btrfs_block_group_cache *block_group = caching_ctl->block_group;
 495	struct btrfs_fs_info *fs_info = block_group->fs_info;
 496	struct btrfs_root *extent_root = fs_info->extent_root;
 497	struct btrfs_path *path;
 498	struct extent_buffer *leaf;
 499	struct btrfs_key key;
 500	u64 total_found = 0;
 501	u64 last = 0;
 502	u32 nritems;
 503	int ret;
 504	bool wakeup = true;
 505
 506	path = btrfs_alloc_path();
 507	if (!path)
 508		return -ENOMEM;
 509
 510	last = max_t(u64, block_group->key.objectid, BTRFS_SUPER_INFO_OFFSET);
 
 511
 512#ifdef CONFIG_BTRFS_DEBUG
 513	/*
 514	 * If we're fragmenting we don't want to make anybody think we can
 515	 * allocate from this block group until we've had a chance to fragment
 516	 * the free space.
 517	 */
 518	if (btrfs_should_fragment_free_space(block_group))
 519		wakeup = false;
 520#endif
 521	/*
 522	 * We don't want to deadlock with somebody trying to allocate a new
 523	 * extent for the extent root while also trying to search the extent
 524	 * root to add free space.  So we skip locking and search the commit
 525	 * root, since its read-only
 526	 */
 527	path->skip_locking = 1;
 528	path->search_commit_root = 1;
 529	path->reada = READA_FORWARD;
 530
 531	key.objectid = last;
 532	key.offset = 0;
 533	key.type = BTRFS_EXTENT_ITEM_KEY;
 534
 535next:
 536	ret = btrfs_search_slot(NULL, extent_root, &key, path, 0, 0);
 537	if (ret < 0)
 538		goto out;
 539
 540	leaf = path->nodes[0];
 541	nritems = btrfs_header_nritems(leaf);
 542
 543	while (1) {
 544		if (btrfs_fs_closing(fs_info) > 1) {
 545			last = (u64)-1;
 546			break;
 547		}
 548
 549		if (path->slots[0] < nritems) {
 550			btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
 551		} else {
 552			ret = btrfs_find_next_key(extent_root, path, &key, 0, 0);
 553			if (ret)
 554				break;
 555
 556			if (need_resched() ||
 557			    rwsem_is_contended(&fs_info->commit_root_sem)) {
 558				if (wakeup)
 559					caching_ctl->progress = last;
 560				btrfs_release_path(path);
 561				up_read(&fs_info->commit_root_sem);
 562				mutex_unlock(&caching_ctl->mutex);
 563				cond_resched();
 564				mutex_lock(&caching_ctl->mutex);
 565				down_read(&fs_info->commit_root_sem);
 566				goto next;
 567			}
 568
 569			ret = btrfs_next_leaf(extent_root, path);
 570			if (ret < 0)
 571				goto out;
 572			if (ret)
 573				break;
 574			leaf = path->nodes[0];
 575			nritems = btrfs_header_nritems(leaf);
 576			continue;
 577		}
 578
 579		if (key.objectid < last) {
 580			key.objectid = last;
 581			key.offset = 0;
 582			key.type = BTRFS_EXTENT_ITEM_KEY;
 583
 584			if (wakeup)
 585				caching_ctl->progress = last;
 586			btrfs_release_path(path);
 587			goto next;
 588		}
 589
 590		if (key.objectid < block_group->key.objectid) {
 591			path->slots[0]++;
 592			continue;
 593		}
 594
 595		if (key.objectid >= block_group->key.objectid +
 596		    block_group->key.offset)
 597			break;
 598
 599		if (key.type == BTRFS_EXTENT_ITEM_KEY ||
 600		    key.type == BTRFS_METADATA_ITEM_KEY) {
 601			total_found += add_new_free_space(block_group, last,
 602							  key.objectid);
 603			if (key.type == BTRFS_METADATA_ITEM_KEY)
 604				last = key.objectid +
 605					fs_info->nodesize;
 606			else
 607				last = key.objectid + key.offset;
 608
 609			if (total_found > CACHING_CTL_WAKE_UP) {
 610				total_found = 0;
 611				if (wakeup)
 612					wake_up(&caching_ctl->wait);
 613			}
 614		}
 615		path->slots[0]++;
 616	}
 617	ret = 0;
 618
 619	total_found += add_new_free_space(block_group, last,
 620					  block_group->key.objectid +
 621					  block_group->key.offset);
 622	caching_ctl->progress = (u64)-1;
 623
 624out:
 625	btrfs_free_path(path);
 626	return ret;
 627}
 628
 629static noinline void caching_thread(struct btrfs_work *work)
 630{
 631	struct btrfs_block_group_cache *block_group;
 632	struct btrfs_fs_info *fs_info;
 633	struct btrfs_caching_control *caching_ctl;
 634	int ret;
 635
 636	caching_ctl = container_of(work, struct btrfs_caching_control, work);
 637	block_group = caching_ctl->block_group;
 638	fs_info = block_group->fs_info;
 639
 640	mutex_lock(&caching_ctl->mutex);
 641	down_read(&fs_info->commit_root_sem);
 642
 643	if (btrfs_fs_compat_ro(fs_info, FREE_SPACE_TREE))
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 644		ret = load_free_space_tree(caching_ctl);
 645	else
 646		ret = load_extent_tree_free(caching_ctl);
 647
 648	spin_lock(&block_group->lock);
 649	block_group->caching_ctl = NULL;
 650	block_group->cached = ret ? BTRFS_CACHE_ERROR : BTRFS_CACHE_FINISHED;
 651	spin_unlock(&block_group->lock);
 652
 653#ifdef CONFIG_BTRFS_DEBUG
 654	if (btrfs_should_fragment_free_space(block_group)) {
 655		u64 bytes_used;
 656
 657		spin_lock(&block_group->space_info->lock);
 658		spin_lock(&block_group->lock);
 659		bytes_used = block_group->key.offset -
 660			btrfs_block_group_used(&block_group->item);
 661		block_group->space_info->bytes_used += bytes_used >> 1;
 662		spin_unlock(&block_group->lock);
 663		spin_unlock(&block_group->space_info->lock);
 664		fragment_free_space(block_group);
 665	}
 666#endif
 667
 668	caching_ctl->progress = (u64)-1;
 669
 670	up_read(&fs_info->commit_root_sem);
 671	btrfs_free_excluded_extents(block_group);
 672	mutex_unlock(&caching_ctl->mutex);
 673
 674	wake_up(&caching_ctl->wait);
 675
 676	btrfs_put_caching_control(caching_ctl);
 677	btrfs_put_block_group(block_group);
 678}
 679
 680int btrfs_cache_block_group(struct btrfs_block_group_cache *cache,
 681			    int load_cache_only)
 682{
 683	DEFINE_WAIT(wait);
 684	struct btrfs_fs_info *fs_info = cache->fs_info;
 685	struct btrfs_caching_control *caching_ctl;
 686	int ret = 0;
 687
 
 
 
 
 688	caching_ctl = kzalloc(sizeof(*caching_ctl), GFP_NOFS);
 689	if (!caching_ctl)
 690		return -ENOMEM;
 691
 692	INIT_LIST_HEAD(&caching_ctl->list);
 693	mutex_init(&caching_ctl->mutex);
 694	init_waitqueue_head(&caching_ctl->wait);
 695	caching_ctl->block_group = cache;
 696	caching_ctl->progress = cache->key.objectid;
 697	refcount_set(&caching_ctl->count, 1);
 698	btrfs_init_work(&caching_ctl->work, btrfs_cache_helper,
 699			caching_thread, NULL, NULL);
 700
 701	spin_lock(&cache->lock);
 702	/*
 703	 * This should be a rare occasion, but this could happen I think in the
 704	 * case where one thread starts to load the space cache info, and then
 705	 * some other thread starts a transaction commit which tries to do an
 706	 * allocation while the other thread is still loading the space cache
 707	 * info.  The previous loop should have kept us from choosing this block
 708	 * group, but if we've moved to the state where we will wait on caching
 709	 * block groups we need to first check if we're doing a fast load here,
 710	 * so we can wait for it to finish, otherwise we could end up allocating
 711	 * from a block group who's cache gets evicted for one reason or
 712	 * another.
 713	 */
 714	while (cache->cached == BTRFS_CACHE_FAST) {
 715		struct btrfs_caching_control *ctl;
 716
 717		ctl = cache->caching_ctl;
 718		refcount_inc(&ctl->count);
 719		prepare_to_wait(&ctl->wait, &wait, TASK_UNINTERRUPTIBLE);
 720		spin_unlock(&cache->lock);
 721
 722		schedule();
 723
 724		finish_wait(&ctl->wait, &wait);
 725		btrfs_put_caching_control(ctl);
 726		spin_lock(&cache->lock);
 727	}
 728
 729	if (cache->cached != BTRFS_CACHE_NO) {
 730		spin_unlock(&cache->lock);
 731		kfree(caching_ctl);
 732		return 0;
 
 
 
 
 
 733	}
 734	WARN_ON(cache->caching_ctl);
 735	cache->caching_ctl = caching_ctl;
 736	cache->cached = BTRFS_CACHE_FAST;
 737	spin_unlock(&cache->lock);
 738
 739	if (btrfs_test_opt(fs_info, SPACE_CACHE)) {
 740		mutex_lock(&caching_ctl->mutex);
 741		ret = load_free_space_cache(cache);
 742
 743		spin_lock(&cache->lock);
 744		if (ret == 1) {
 745			cache->caching_ctl = NULL;
 746			cache->cached = BTRFS_CACHE_FINISHED;
 747			cache->last_byte_to_unpin = (u64)-1;
 748			caching_ctl->progress = (u64)-1;
 749		} else {
 750			if (load_cache_only) {
 751				cache->caching_ctl = NULL;
 752				cache->cached = BTRFS_CACHE_NO;
 753			} else {
 754				cache->cached = BTRFS_CACHE_STARTED;
 755				cache->has_caching_ctl = 1;
 756			}
 757		}
 758		spin_unlock(&cache->lock);
 759#ifdef CONFIG_BTRFS_DEBUG
 760		if (ret == 1 &&
 761		    btrfs_should_fragment_free_space(cache)) {
 762			u64 bytes_used;
 763
 764			spin_lock(&cache->space_info->lock);
 765			spin_lock(&cache->lock);
 766			bytes_used = cache->key.offset -
 767				btrfs_block_group_used(&cache->item);
 768			cache->space_info->bytes_used += bytes_used >> 1;
 769			spin_unlock(&cache->lock);
 770			spin_unlock(&cache->space_info->lock);
 771			fragment_free_space(cache);
 772		}
 773#endif
 774		mutex_unlock(&caching_ctl->mutex);
 775
 776		wake_up(&caching_ctl->wait);
 777		if (ret == 1) {
 778			btrfs_put_caching_control(caching_ctl);
 779			btrfs_free_excluded_extents(cache);
 780			return 0;
 781		}
 782	} else {
 783		/*
 784		 * We're either using the free space tree or no caching at all.
 785		 * Set cached to the appropriate value and wakeup any waiters.
 786		 */
 787		spin_lock(&cache->lock);
 788		if (load_cache_only) {
 789			cache->caching_ctl = NULL;
 790			cache->cached = BTRFS_CACHE_NO;
 791		} else {
 792			cache->cached = BTRFS_CACHE_STARTED;
 793			cache->has_caching_ctl = 1;
 794		}
 795		spin_unlock(&cache->lock);
 796		wake_up(&caching_ctl->wait);
 797	}
 798
 799	if (load_cache_only) {
 800		btrfs_put_caching_control(caching_ctl);
 801		return 0;
 802	}
 803
 804	down_write(&fs_info->commit_root_sem);
 805	refcount_inc(&caching_ctl->count);
 806	list_add_tail(&caching_ctl->list, &fs_info->caching_block_groups);
 807	up_write(&fs_info->commit_root_sem);
 808
 809	btrfs_get_block_group(cache);
 810
 811	btrfs_queue_work(fs_info->caching_workers, &caching_ctl->work);
 
 
 
 
 
 812
 813	return ret;
 814}
 815
 816static void clear_avail_alloc_bits(struct btrfs_fs_info *fs_info, u64 flags)
 817{
 818	u64 extra_flags = chunk_to_extended(flags) &
 819				BTRFS_EXTENDED_PROFILE_MASK;
 820
 821	write_seqlock(&fs_info->profiles_lock);
 822	if (flags & BTRFS_BLOCK_GROUP_DATA)
 823		fs_info->avail_data_alloc_bits &= ~extra_flags;
 824	if (flags & BTRFS_BLOCK_GROUP_METADATA)
 825		fs_info->avail_metadata_alloc_bits &= ~extra_flags;
 826	if (flags & BTRFS_BLOCK_GROUP_SYSTEM)
 827		fs_info->avail_system_alloc_bits &= ~extra_flags;
 828	write_sequnlock(&fs_info->profiles_lock);
 829}
 830
 831/*
 832 * Clear incompat bits for the following feature(s):
 833 *
 834 * - RAID56 - in case there's neither RAID5 nor RAID6 profile block group
 835 *            in the whole filesystem
 
 
 836 */
 837static void clear_incompat_bg_bits(struct btrfs_fs_info *fs_info, u64 flags)
 838{
 839	if (flags & BTRFS_BLOCK_GROUP_RAID56_MASK) {
 
 
 
 
 
 840		struct list_head *head = &fs_info->space_info;
 841		struct btrfs_space_info *sinfo;
 842
 843		list_for_each_entry_rcu(sinfo, head, list) {
 844			bool found = false;
 845
 846			down_read(&sinfo->groups_sem);
 847			if (!list_empty(&sinfo->block_groups[BTRFS_RAID_RAID5]))
 848				found = true;
 849			if (!list_empty(&sinfo->block_groups[BTRFS_RAID_RAID6]))
 850				found = true;
 
 
 
 
 851			up_read(&sinfo->groups_sem);
 852
 853			if (found)
 854				return;
 855		}
 856		btrfs_clear_fs_incompat(fs_info, RAID56);
 
 
 
 857	}
 858}
 859
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 860int btrfs_remove_block_group(struct btrfs_trans_handle *trans,
 861			     u64 group_start, struct extent_map *em)
 862{
 863	struct btrfs_fs_info *fs_info = trans->fs_info;
 864	struct btrfs_root *root = fs_info->extent_root;
 865	struct btrfs_path *path;
 866	struct btrfs_block_group_cache *block_group;
 867	struct btrfs_free_cluster *cluster;
 868	struct btrfs_root *tree_root = fs_info->tree_root;
 869	struct btrfs_key key;
 870	struct inode *inode;
 871	struct kobject *kobj = NULL;
 872	int ret;
 873	int index;
 874	int factor;
 875	struct btrfs_caching_control *caching_ctl = NULL;
 876	bool remove_em;
 877	bool remove_rsv = false;
 878
 879	block_group = btrfs_lookup_block_group(fs_info, group_start);
 880	BUG_ON(!block_group);
 881	BUG_ON(!block_group->ro);
 882
 883	trace_btrfs_remove_block_group(block_group);
 884	/*
 885	 * Free the reserved super bytes from this block group before
 886	 * remove it.
 887	 */
 888	btrfs_free_excluded_extents(block_group);
 889	btrfs_free_ref_tree_range(fs_info, block_group->key.objectid,
 890				  block_group->key.offset);
 891
 892	memcpy(&key, &block_group->key, sizeof(key));
 893	index = btrfs_bg_flags_to_raid_index(block_group->flags);
 894	factor = btrfs_bg_type_to_factor(block_group->flags);
 895
 896	/* make sure this block group isn't part of an allocation cluster */
 897	cluster = &fs_info->data_alloc_cluster;
 898	spin_lock(&cluster->refill_lock);
 899	btrfs_return_cluster_to_free_space(block_group, cluster);
 900	spin_unlock(&cluster->refill_lock);
 901
 902	/*
 903	 * make sure this block group isn't part of a metadata
 904	 * allocation cluster
 905	 */
 906	cluster = &fs_info->meta_alloc_cluster;
 907	spin_lock(&cluster->refill_lock);
 908	btrfs_return_cluster_to_free_space(block_group, cluster);
 909	spin_unlock(&cluster->refill_lock);
 910
 
 
 
 911	path = btrfs_alloc_path();
 912	if (!path) {
 913		ret = -ENOMEM;
 914		goto out;
 915	}
 916
 917	/*
 918	 * get the inode first so any iput calls done for the io_list
 919	 * aren't the final iput (no unlinks allowed now)
 920	 */
 921	inode = lookup_free_space_inode(block_group, path);
 922
 923	mutex_lock(&trans->transaction->cache_write_mutex);
 924	/*
 925	 * Make sure our free space cache IO is done before removing the
 926	 * free space inode
 927	 */
 928	spin_lock(&trans->transaction->dirty_bgs_lock);
 929	if (!list_empty(&block_group->io_list)) {
 930		list_del_init(&block_group->io_list);
 931
 932		WARN_ON(!IS_ERR(inode) && inode != block_group->io_ctl.inode);
 933
 934		spin_unlock(&trans->transaction->dirty_bgs_lock);
 935		btrfs_wait_cache_io(trans, block_group, path);
 936		btrfs_put_block_group(block_group);
 937		spin_lock(&trans->transaction->dirty_bgs_lock);
 938	}
 939
 940	if (!list_empty(&block_group->dirty_list)) {
 941		list_del_init(&block_group->dirty_list);
 942		remove_rsv = true;
 943		btrfs_put_block_group(block_group);
 944	}
 945	spin_unlock(&trans->transaction->dirty_bgs_lock);
 946	mutex_unlock(&trans->transaction->cache_write_mutex);
 947
 948	if (!IS_ERR(inode)) {
 949		ret = btrfs_orphan_add(trans, BTRFS_I(inode));
 950		if (ret) {
 951			btrfs_add_delayed_iput(inode);
 952			goto out;
 953		}
 954		clear_nlink(inode);
 955		/* One for the block groups ref */
 956		spin_lock(&block_group->lock);
 957		if (block_group->iref) {
 958			block_group->iref = 0;
 959			block_group->inode = NULL;
 960			spin_unlock(&block_group->lock);
 961			iput(inode);
 962		} else {
 963			spin_unlock(&block_group->lock);
 964		}
 965		/* One for our lookup ref */
 966		btrfs_add_delayed_iput(inode);
 967	}
 968
 969	key.objectid = BTRFS_FREE_SPACE_OBJECTID;
 970	key.offset = block_group->key.objectid;
 971	key.type = 0;
 972
 973	ret = btrfs_search_slot(trans, tree_root, &key, path, -1, 1);
 974	if (ret < 0)
 975		goto out;
 976	if (ret > 0)
 977		btrfs_release_path(path);
 978	if (ret == 0) {
 979		ret = btrfs_del_item(trans, tree_root, path);
 980		if (ret)
 981			goto out;
 982		btrfs_release_path(path);
 983	}
 984
 985	spin_lock(&fs_info->block_group_cache_lock);
 986	rb_erase(&block_group->cache_node,
 987		 &fs_info->block_group_cache_tree);
 988	RB_CLEAR_NODE(&block_group->cache_node);
 989
 990	if (fs_info->first_logical_byte == block_group->key.objectid)
 991		fs_info->first_logical_byte = (u64)-1;
 992	spin_unlock(&fs_info->block_group_cache_lock);
 
 993
 994	down_write(&block_group->space_info->groups_sem);
 995	/*
 996	 * we must use list_del_init so people can check to see if they
 997	 * are still on the list after taking the semaphore
 998	 */
 999	list_del_init(&block_group->list);
1000	if (list_empty(&block_group->space_info->block_groups[index])) {
1001		kobj = block_group->space_info->block_group_kobjs[index];
1002		block_group->space_info->block_group_kobjs[index] = NULL;
1003		clear_avail_alloc_bits(fs_info, block_group->flags);
1004	}
1005	up_write(&block_group->space_info->groups_sem);
1006	clear_incompat_bg_bits(fs_info, block_group->flags);
1007	if (kobj) {
1008		kobject_del(kobj);
1009		kobject_put(kobj);
1010	}
1011
1012	if (block_group->has_caching_ctl)
1013		caching_ctl = btrfs_get_caching_control(block_group);
1014	if (block_group->cached == BTRFS_CACHE_STARTED)
1015		btrfs_wait_block_group_cache_done(block_group);
1016	if (block_group->has_caching_ctl) {
1017		down_write(&fs_info->commit_root_sem);
1018		if (!caching_ctl) {
1019			struct btrfs_caching_control *ctl;
1020
1021			list_for_each_entry(ctl,
1022				    &fs_info->caching_block_groups, list)
1023				if (ctl->block_group == block_group) {
1024					caching_ctl = ctl;
1025					refcount_inc(&caching_ctl->count);
1026					break;
1027				}
1028		}
1029		if (caching_ctl)
1030			list_del_init(&caching_ctl->list);
1031		up_write(&fs_info->commit_root_sem);
1032		if (caching_ctl) {
1033			/* Once for the caching bgs list and once for us. */
1034			btrfs_put_caching_control(caching_ctl);
1035			btrfs_put_caching_control(caching_ctl);
1036		}
1037	}
 
 
 
 
 
 
 
 
 
1038
1039	spin_lock(&trans->transaction->dirty_bgs_lock);
1040	WARN_ON(!list_empty(&block_group->dirty_list));
1041	WARN_ON(!list_empty(&block_group->io_list));
1042	spin_unlock(&trans->transaction->dirty_bgs_lock);
1043
1044	btrfs_remove_free_space_cache(block_group);
1045
1046	spin_lock(&block_group->space_info->lock);
1047	list_del_init(&block_group->ro_list);
1048
1049	if (btrfs_test_opt(fs_info, ENOSPC_DEBUG)) {
1050		WARN_ON(block_group->space_info->total_bytes
1051			< block_group->key.offset);
1052		WARN_ON(block_group->space_info->bytes_readonly
1053			< block_group->key.offset);
 
 
1054		WARN_ON(block_group->space_info->disk_total
1055			< block_group->key.offset * factor);
1056	}
1057	block_group->space_info->total_bytes -= block_group->key.offset;
1058	block_group->space_info->bytes_readonly -= block_group->key.offset;
1059	block_group->space_info->disk_total -= block_group->key.offset * factor;
 
 
 
 
 
 
 
 
 
1060
1061	spin_unlock(&block_group->space_info->lock);
1062
1063	memcpy(&key, &block_group->key, sizeof(key));
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1064
1065	mutex_lock(&fs_info->chunk_mutex);
1066	spin_lock(&block_group->lock);
1067	block_group->removed = 1;
 
1068	/*
1069	 * At this point trimming can't start on this block group, because we
1070	 * removed the block group from the tree fs_info->block_group_cache_tree
1071	 * so no one can't find it anymore and even if someone already got this
1072	 * block group before we removed it from the rbtree, they have already
1073	 * incremented block_group->trimming - if they didn't, they won't find
1074	 * any free space entries because we already removed them all when we
1075	 * called btrfs_remove_free_space_cache().
 
1076	 *
1077	 * And we must not remove the extent map from the fs_info->mapping_tree
1078	 * to prevent the same logical address range and physical device space
1079	 * ranges from being reused for a new block group. This is because our
1080	 * fs trim operation (btrfs_trim_fs() / btrfs_ioctl_fitrim()) is
 
 
1081	 * completely transactionless, so while it is trimming a range the
1082	 * currently running transaction might finish and a new one start,
1083	 * allowing for new block groups to be created that can reuse the same
1084	 * physical device locations unless we take this special care.
1085	 *
1086	 * There may also be an implicit trim operation if the file system
1087	 * is mounted with -odiscard. The same protections must remain
1088	 * in place until the extents have been discarded completely when
1089	 * the transaction commit has completed.
1090	 */
1091	remove_em = (atomic_read(&block_group->trimming) == 0);
1092	spin_unlock(&block_group->lock);
1093
1094	mutex_unlock(&fs_info->chunk_mutex);
1095
1096	ret = remove_block_group_free_space(trans, block_group);
1097	if (ret)
1098		goto out;
1099
1100	btrfs_put_block_group(block_group);
1101	btrfs_put_block_group(block_group);
1102
1103	ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
1104	if (ret > 0)
1105		ret = -EIO;
1106	if (ret < 0)
1107		goto out;
1108
1109	ret = btrfs_del_item(trans, root, path);
1110	if (ret)
1111		goto out;
1112
1113	if (remove_em) {
1114		struct extent_map_tree *em_tree;
1115
1116		em_tree = &fs_info->mapping_tree;
1117		write_lock(&em_tree->lock);
1118		remove_extent_mapping(em_tree, em);
1119		write_unlock(&em_tree->lock);
1120		/* once for the tree */
1121		free_extent_map(em);
1122	}
 
1123out:
 
 
1124	if (remove_rsv)
1125		btrfs_delayed_refs_rsv_release(fs_info, 1);
1126	btrfs_free_path(path);
1127	return ret;
1128}
1129
1130struct btrfs_trans_handle *btrfs_start_trans_remove_block_group(
1131		struct btrfs_fs_info *fs_info, const u64 chunk_offset)
1132{
 
1133	struct extent_map_tree *em_tree = &fs_info->mapping_tree;
1134	struct extent_map *em;
1135	struct map_lookup *map;
1136	unsigned int num_items;
1137
1138	read_lock(&em_tree->lock);
1139	em = lookup_extent_mapping(em_tree, chunk_offset, 1);
1140	read_unlock(&em_tree->lock);
1141	ASSERT(em && em->start == chunk_offset);
1142
1143	/*
1144	 * We need to reserve 3 + N units from the metadata space info in order
1145	 * to remove a block group (done at btrfs_remove_chunk() and at
1146	 * btrfs_remove_block_group()), which are used for:
1147	 *
1148	 * 1 unit for adding the free space inode's orphan (located in the tree
1149	 * of tree roots).
1150	 * 1 unit for deleting the block group item (located in the extent
1151	 * tree).
1152	 * 1 unit for deleting the free space item (located in tree of tree
1153	 * roots).
1154	 * N units for deleting N device extent items corresponding to each
1155	 * stripe (located in the device tree).
1156	 *
1157	 * In order to remove a block group we also need to reserve units in the
1158	 * system space info in order to update the chunk tree (update one or
1159	 * more device items and remove one chunk item), but this is done at
1160	 * btrfs_remove_chunk() through a call to check_system_chunk().
1161	 */
1162	map = em->map_lookup;
1163	num_items = 3 + map->num_stripes;
1164	free_extent_map(em);
1165
1166	return btrfs_start_transaction_fallback_global_rsv(fs_info->extent_root,
1167							   num_items, 1);
1168}
1169
1170/*
1171 * Mark block group @cache read-only, so later write won't happen to block
1172 * group @cache.
1173 *
1174 * If @force is not set, this function will only mark the block group readonly
1175 * if we have enough free space (1M) in other metadata/system block groups.
1176 * If @force is not set, this function will mark the block group readonly
1177 * without checking free space.
1178 *
1179 * NOTE: This function doesn't care if other block groups can contain all the
1180 * data in this block group. That check should be done by relocation routine,
1181 * not this function.
1182 */
1183static int inc_block_group_ro(struct btrfs_block_group_cache *cache, int force)
1184{
1185	struct btrfs_space_info *sinfo = cache->space_info;
1186	u64 num_bytes;
1187	u64 sinfo_used;
1188	u64 min_allocable_bytes;
1189	int ret = -ENOSPC;
1190
1191	/*
1192	 * We need some metadata space and system metadata space for
1193	 * allocating chunks in some corner cases until we force to set
1194	 * it to be readonly.
1195	 */
1196	if ((sinfo->flags &
1197	     (BTRFS_BLOCK_GROUP_SYSTEM | BTRFS_BLOCK_GROUP_METADATA)) &&
1198	    !force)
1199		min_allocable_bytes = SZ_1M;
1200	else
1201		min_allocable_bytes = 0;
1202
1203	spin_lock(&sinfo->lock);
1204	spin_lock(&cache->lock);
1205
 
 
 
 
 
1206	if (cache->ro) {
1207		cache->ro++;
1208		ret = 0;
1209		goto out;
1210	}
1211
1212	num_bytes = cache->key.offset - cache->reserved - cache->pinned -
1213		    cache->bytes_super - btrfs_block_group_used(&cache->item);
1214	sinfo_used = btrfs_space_info_used(sinfo, true);
1215
1216	/*
1217	 * sinfo_used + num_bytes should always <= sinfo->total_bytes.
1218	 *
1219	 * Here we make sure if we mark this bg RO, we still have enough
1220	 * free space as buffer (if min_allocable_bytes is not 0).
1221	 */
1222	if (sinfo_used + num_bytes + min_allocable_bytes <=
1223	    sinfo->total_bytes) {
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1224		sinfo->bytes_readonly += num_bytes;
 
 
 
 
 
 
1225		cache->ro++;
1226		list_add_tail(&cache->ro_list, &sinfo->ro_bgs);
1227		ret = 0;
1228	}
1229out:
1230	spin_unlock(&cache->lock);
1231	spin_unlock(&sinfo->lock);
1232	if (ret == -ENOSPC && btrfs_test_opt(cache->fs_info, ENOSPC_DEBUG)) {
1233		btrfs_info(cache->fs_info,
1234			"unable to make block group %llu ro",
1235			cache->key.objectid);
1236		btrfs_info(cache->fs_info,
1237			"sinfo_used=%llu bg_num_bytes=%llu min_allocable=%llu",
1238			sinfo_used, num_bytes, min_allocable_bytes);
1239		btrfs_dump_space_info(cache->fs_info, cache->space_info, 0, 0);
1240	}
1241	return ret;
1242}
1243
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1244/*
1245 * Process the unused_bgs list and remove any that don't have any allocated
1246 * space inside of them.
1247 */
1248void btrfs_delete_unused_bgs(struct btrfs_fs_info *fs_info)
1249{
1250	struct btrfs_block_group_cache *block_group;
1251	struct btrfs_space_info *space_info;
1252	struct btrfs_trans_handle *trans;
 
1253	int ret = 0;
1254
1255	if (!test_bit(BTRFS_FS_OPEN, &fs_info->flags))
1256		return;
1257
 
 
 
 
 
 
 
 
 
 
1258	spin_lock(&fs_info->unused_bgs_lock);
1259	while (!list_empty(&fs_info->unused_bgs)) {
1260		u64 start, end;
1261		int trimming;
1262
1263		block_group = list_first_entry(&fs_info->unused_bgs,
1264					       struct btrfs_block_group_cache,
1265					       bg_list);
1266		list_del_init(&block_group->bg_list);
1267
1268		space_info = block_group->space_info;
1269
1270		if (ret || btrfs_mixed_space_info(space_info)) {
1271			btrfs_put_block_group(block_group);
1272			continue;
1273		}
1274		spin_unlock(&fs_info->unused_bgs_lock);
1275
1276		mutex_lock(&fs_info->delete_unused_bgs_mutex);
1277
1278		/* Don't want to race with allocators so take the groups_sem */
1279		down_write(&space_info->groups_sem);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1280		spin_lock(&block_group->lock);
1281		if (block_group->reserved || block_group->pinned ||
1282		    btrfs_block_group_used(&block_group->item) ||
1283		    block_group->ro ||
1284		    list_is_singular(&block_group->list)) {
1285			/*
1286			 * We want to bail if we made new allocations or have
1287			 * outstanding allocations in this block group.  We do
1288			 * the ro check in case balance is currently acting on
1289			 * this block group.
1290			 */
1291			trace_btrfs_skip_unused_block_group(block_group);
1292			spin_unlock(&block_group->lock);
1293			up_write(&space_info->groups_sem);
1294			goto next;
1295		}
1296		spin_unlock(&block_group->lock);
1297
1298		/* We don't want to force the issue, only flip if it's ok. */
1299		ret = inc_block_group_ro(block_group, 0);
1300		up_write(&space_info->groups_sem);
1301		if (ret < 0) {
1302			ret = 0;
1303			goto next;
1304		}
1305
 
 
 
 
 
 
 
 
1306		/*
1307		 * Want to do this before we do anything else so we can recover
1308		 * properly if we fail to join the transaction.
1309		 */
1310		trans = btrfs_start_trans_remove_block_group(fs_info,
1311						     block_group->key.objectid);
1312		if (IS_ERR(trans)) {
1313			btrfs_dec_block_group_ro(block_group);
1314			ret = PTR_ERR(trans);
1315			goto next;
1316		}
1317
1318		/*
1319		 * We could have pending pinned extents for this block group,
1320		 * just delete them, we don't care about them anymore.
1321		 */
1322		start = block_group->key.objectid;
1323		end = start + block_group->key.offset - 1;
1324		/*
1325		 * Hold the unused_bg_unpin_mutex lock to avoid racing with
1326		 * btrfs_finish_extent_commit(). If we are at transaction N,
1327		 * another task might be running finish_extent_commit() for the
1328		 * previous transaction N - 1, and have seen a range belonging
1329		 * to the block group in freed_extents[] before we were able to
1330		 * clear the whole block group range from freed_extents[]. This
1331		 * means that task can lookup for the block group after we
1332		 * unpinned it from freed_extents[] and removed it, leading to
1333		 * a BUG_ON() at btrfs_unpin_extent_range().
1334		 */
1335		mutex_lock(&fs_info->unused_bg_unpin_mutex);
1336		ret = clear_extent_bits(&fs_info->freed_extents[0], start, end,
1337				  EXTENT_DIRTY);
1338		if (ret) {
1339			mutex_unlock(&fs_info->unused_bg_unpin_mutex);
1340			btrfs_dec_block_group_ro(block_group);
1341			goto end_trans;
1342		}
1343		ret = clear_extent_bits(&fs_info->freed_extents[1], start, end,
1344				  EXTENT_DIRTY);
1345		if (ret) {
1346			mutex_unlock(&fs_info->unused_bg_unpin_mutex);
 
 
 
 
 
 
 
1347			btrfs_dec_block_group_ro(block_group);
 
 
1348			goto end_trans;
1349		}
1350		mutex_unlock(&fs_info->unused_bg_unpin_mutex);
1351
1352		/* Reset pinned so btrfs_put_block_group doesn't complain */
1353		spin_lock(&space_info->lock);
1354		spin_lock(&block_group->lock);
1355
1356		btrfs_space_info_update_bytes_pinned(fs_info, space_info,
1357						     -block_group->pinned);
1358		space_info->bytes_readonly += block_group->pinned;
1359		percpu_counter_add_batch(&space_info->total_bytes_pinned,
1360				   -block_group->pinned,
1361				   BTRFS_TOTAL_BYTES_PINNED_BATCH);
1362		block_group->pinned = 0;
1363
1364		spin_unlock(&block_group->lock);
1365		spin_unlock(&space_info->lock);
1366
1367		/* DISCARD can flip during remount */
1368		trimming = btrfs_test_opt(fs_info, DISCARD);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1369
1370		/* Implicit trim during transaction commit. */
1371		if (trimming)
1372			btrfs_get_block_group_trimming(block_group);
1373
1374		/*
1375		 * Btrfs_remove_chunk will abort the transaction if things go
1376		 * horribly wrong.
1377		 */
1378		ret = btrfs_remove_chunk(trans, block_group->key.objectid);
1379
1380		if (ret) {
1381			if (trimming)
1382				btrfs_put_block_group_trimming(block_group);
1383			goto end_trans;
1384		}
1385
1386		/*
1387		 * If we're not mounted with -odiscard, we can just forget
1388		 * about this block group. Otherwise we'll need to wait
1389		 * until transaction commit to do the actual discard.
1390		 */
1391		if (trimming) {
1392			spin_lock(&fs_info->unused_bgs_lock);
1393			/*
1394			 * A concurrent scrub might have added us to the list
1395			 * fs_info->unused_bgs, so use a list_move operation
1396			 * to add the block group to the deleted_bgs list.
1397			 */
1398			list_move(&block_group->bg_list,
1399				  &trans->transaction->deleted_bgs);
1400			spin_unlock(&fs_info->unused_bgs_lock);
1401			btrfs_get_block_group(block_group);
1402		}
1403end_trans:
1404		btrfs_end_transaction(trans);
1405next:
1406		mutex_unlock(&fs_info->delete_unused_bgs_mutex);
1407		btrfs_put_block_group(block_group);
1408		spin_lock(&fs_info->unused_bgs_lock);
1409	}
1410	spin_unlock(&fs_info->unused_bgs_lock);
 
 
 
 
 
 
 
 
1411}
1412
1413void btrfs_mark_bg_unused(struct btrfs_block_group_cache *bg)
1414{
1415	struct btrfs_fs_info *fs_info = bg->fs_info;
1416
1417	spin_lock(&fs_info->unused_bgs_lock);
1418	if (list_empty(&bg->bg_list)) {
1419		btrfs_get_block_group(bg);
1420		trace_btrfs_add_unused_block_group(bg);
1421		list_add_tail(&bg->bg_list, &fs_info->unused_bgs);
1422	}
1423	spin_unlock(&fs_info->unused_bgs_lock);
1424}
1425
1426static int find_first_block_group(struct btrfs_fs_info *fs_info,
1427				  struct btrfs_path *path,
1428				  struct btrfs_key *key)
 
 
 
1429{
1430	struct btrfs_root *root = fs_info->extent_root;
1431	int ret = 0;
1432	struct btrfs_key found_key;
1433	struct extent_buffer *leaf;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1434	struct btrfs_block_group_item bg;
1435	u64 flags;
1436	int slot;
 
 
1437
1438	ret = btrfs_search_slot(NULL, root, key, path, 0, 0);
1439	if (ret < 0)
1440		goto out;
1441
1442	while (1) {
1443		slot = path->slots[0];
1444		leaf = path->nodes[0];
1445		if (slot >= btrfs_header_nritems(leaf)) {
1446			ret = btrfs_next_leaf(root, path);
1447			if (ret == 0)
1448				continue;
1449			if (ret < 0)
1450				goto out;
1451			break;
1452		}
1453		btrfs_item_key_to_cpu(leaf, &found_key, slot);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1454
 
 
 
 
 
 
 
 
 
1455		if (found_key.objectid >= key->objectid &&
1456		    found_key.type == BTRFS_BLOCK_GROUP_ITEM_KEY) {
1457			struct extent_map_tree *em_tree;
1458			struct extent_map *em;
1459
1460			em_tree = &root->fs_info->mapping_tree;
1461			read_lock(&em_tree->lock);
1462			em = lookup_extent_mapping(em_tree, found_key.objectid,
1463						   found_key.offset);
1464			read_unlock(&em_tree->lock);
1465			if (!em) {
1466				btrfs_err(fs_info,
1467			"logical %llu len %llu found bg but no related chunk",
1468					  found_key.objectid, found_key.offset);
1469				ret = -ENOENT;
1470			} else if (em->start != found_key.objectid ||
1471				   em->len != found_key.offset) {
1472				btrfs_err(fs_info,
1473		"block group %llu len %llu mismatch with chunk %llu len %llu",
1474					  found_key.objectid, found_key.offset,
1475					  em->start, em->len);
1476				ret = -EUCLEAN;
1477			} else {
1478				read_extent_buffer(leaf, &bg,
1479					btrfs_item_ptr_offset(leaf, slot),
1480					sizeof(bg));
1481				flags = btrfs_block_group_flags(&bg) &
1482					BTRFS_BLOCK_GROUP_TYPE_MASK;
1483
1484				if (flags != (em->map_lookup->type &
1485					      BTRFS_BLOCK_GROUP_TYPE_MASK)) {
1486					btrfs_err(fs_info,
1487"block group %llu len %llu type flags 0x%llx mismatch with chunk type flags 0x%llx",
1488						found_key.objectid,
1489						found_key.offset, flags,
1490						(BTRFS_BLOCK_GROUP_TYPE_MASK &
1491						 em->map_lookup->type));
1492					ret = -EUCLEAN;
1493				} else {
1494					ret = 0;
1495				}
1496			}
1497			free_extent_map(em);
1498			goto out;
1499		}
1500		path->slots[0]++;
1501	}
1502out:
1503	return ret;
1504}
1505
1506static void set_avail_alloc_bits(struct btrfs_fs_info *fs_info, u64 flags)
1507{
1508	u64 extra_flags = chunk_to_extended(flags) &
1509				BTRFS_EXTENDED_PROFILE_MASK;
1510
1511	write_seqlock(&fs_info->profiles_lock);
1512	if (flags & BTRFS_BLOCK_GROUP_DATA)
1513		fs_info->avail_data_alloc_bits |= extra_flags;
1514	if (flags & BTRFS_BLOCK_GROUP_METADATA)
1515		fs_info->avail_metadata_alloc_bits |= extra_flags;
1516	if (flags & BTRFS_BLOCK_GROUP_SYSTEM)
1517		fs_info->avail_system_alloc_bits |= extra_flags;
1518	write_sequnlock(&fs_info->profiles_lock);
1519}
1520
1521static int exclude_super_stripes(struct btrfs_block_group_cache *cache)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1522{
1523	struct btrfs_fs_info *fs_info = cache->fs_info;
 
1524	u64 bytenr;
1525	u64 *logical;
1526	int stripe_len;
1527	int i, nr, ret;
1528
1529	if (cache->key.objectid < BTRFS_SUPER_INFO_OFFSET) {
1530		stripe_len = BTRFS_SUPER_INFO_OFFSET - cache->key.objectid;
1531		cache->bytes_super += stripe_len;
1532		ret = btrfs_add_excluded_extent(fs_info, cache->key.objectid,
1533						stripe_len);
1534		if (ret)
1535			return ret;
1536	}
1537
1538	for (i = 0; i < BTRFS_SUPER_MIRROR_MAX; i++) {
1539		bytenr = btrfs_sb_offset(i);
1540		ret = btrfs_rmap_block(fs_info, cache->key.objectid,
1541				       bytenr, &logical, &nr, &stripe_len);
1542		if (ret)
1543			return ret;
1544
1545		while (nr--) {
1546			u64 start, len;
1547
1548			if (logical[nr] > cache->key.objectid +
1549			    cache->key.offset)
1550				continue;
1551
1552			if (logical[nr] + stripe_len <= cache->key.objectid)
1553				continue;
1554
1555			start = logical[nr];
1556			if (start < cache->key.objectid) {
1557				start = cache->key.objectid;
1558				len = (logical[nr] + stripe_len) - start;
1559			} else {
1560				len = min_t(u64, stripe_len,
1561					    cache->key.objectid +
1562					    cache->key.offset - start);
1563			}
1564
1565			cache->bytes_super += len;
1566			ret = btrfs_add_excluded_extent(fs_info, start, len);
 
1567			if (ret) {
1568				kfree(logical);
1569				return ret;
1570			}
1571		}
1572
1573		kfree(logical);
1574	}
1575	return 0;
1576}
1577
1578static void link_block_group(struct btrfs_block_group_cache *cache)
1579{
1580	struct btrfs_space_info *space_info = cache->space_info;
1581	int index = btrfs_bg_flags_to_raid_index(cache->flags);
1582	bool first = false;
1583
1584	down_write(&space_info->groups_sem);
1585	if (list_empty(&space_info->block_groups[index]))
1586		first = true;
1587	list_add_tail(&cache->list, &space_info->block_groups[index]);
1588	up_write(&space_info->groups_sem);
1589
1590	if (first)
1591		btrfs_sysfs_add_block_group_type(cache);
1592}
1593
1594static struct btrfs_block_group_cache *btrfs_create_block_group_cache(
1595		struct btrfs_fs_info *fs_info, u64 start, u64 size)
1596{
1597	struct btrfs_block_group_cache *cache;
1598
1599	cache = kzalloc(sizeof(*cache), GFP_NOFS);
1600	if (!cache)
1601		return NULL;
1602
1603	cache->free_space_ctl = kzalloc(sizeof(*cache->free_space_ctl),
1604					GFP_NOFS);
1605	if (!cache->free_space_ctl) {
1606		kfree(cache);
1607		return NULL;
1608	}
1609
1610	cache->key.objectid = start;
1611	cache->key.offset = size;
1612	cache->key.type = BTRFS_BLOCK_GROUP_ITEM_KEY;
1613
1614	cache->fs_info = fs_info;
1615	cache->full_stripe_len = btrfs_full_stripe_len(fs_info, start);
1616	set_free_space_tree_thresholds(cache);
1617
1618	atomic_set(&cache->count, 1);
 
 
1619	spin_lock_init(&cache->lock);
1620	init_rwsem(&cache->data_rwsem);
1621	INIT_LIST_HEAD(&cache->list);
1622	INIT_LIST_HEAD(&cache->cluster_list);
1623	INIT_LIST_HEAD(&cache->bg_list);
1624	INIT_LIST_HEAD(&cache->ro_list);
 
1625	INIT_LIST_HEAD(&cache->dirty_list);
1626	INIT_LIST_HEAD(&cache->io_list);
1627	btrfs_init_free_space_ctl(cache);
1628	atomic_set(&cache->trimming, 0);
 
1629	mutex_init(&cache->free_space_lock);
1630	btrfs_init_full_stripe_locks_tree(&cache->full_stripe_locks_root);
 
1631
1632	return cache;
1633}
1634
1635/*
1636 * Iterate all chunks and verify that each of them has the corresponding block
1637 * group
1638 */
1639static int check_chunk_block_group_mappings(struct btrfs_fs_info *fs_info)
1640{
1641	struct extent_map_tree *map_tree = &fs_info->mapping_tree;
1642	struct extent_map *em;
1643	struct btrfs_block_group_cache *bg;
1644	u64 start = 0;
1645	int ret = 0;
1646
1647	while (1) {
1648		read_lock(&map_tree->lock);
1649		/*
1650		 * lookup_extent_mapping will return the first extent map
1651		 * intersecting the range, so setting @len to 1 is enough to
1652		 * get the first chunk.
1653		 */
1654		em = lookup_extent_mapping(map_tree, start, 1);
1655		read_unlock(&map_tree->lock);
1656		if (!em)
1657			break;
1658
1659		bg = btrfs_lookup_block_group(fs_info, em->start);
1660		if (!bg) {
1661			btrfs_err(fs_info,
1662	"chunk start=%llu len=%llu doesn't have corresponding block group",
1663				     em->start, em->len);
1664			ret = -EUCLEAN;
1665			free_extent_map(em);
1666			break;
1667		}
1668		if (bg->key.objectid != em->start ||
1669		    bg->key.offset != em->len ||
1670		    (bg->flags & BTRFS_BLOCK_GROUP_TYPE_MASK) !=
1671		    (em->map_lookup->type & BTRFS_BLOCK_GROUP_TYPE_MASK)) {
1672			btrfs_err(fs_info,
1673"chunk start=%llu len=%llu flags=0x%llx doesn't match block group start=%llu len=%llu flags=0x%llx",
1674				em->start, em->len,
1675				em->map_lookup->type & BTRFS_BLOCK_GROUP_TYPE_MASK,
1676				bg->key.objectid, bg->key.offset,
1677				bg->flags & BTRFS_BLOCK_GROUP_TYPE_MASK);
1678			ret = -EUCLEAN;
1679			free_extent_map(em);
1680			btrfs_put_block_group(bg);
1681			break;
1682		}
1683		start = em->start + em->len;
1684		free_extent_map(em);
1685		btrfs_put_block_group(bg);
1686	}
1687	return ret;
1688}
1689
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1690int btrfs_read_block_groups(struct btrfs_fs_info *info)
1691{
 
1692	struct btrfs_path *path;
1693	int ret;
1694	struct btrfs_block_group_cache *cache;
1695	struct btrfs_space_info *space_info;
1696	struct btrfs_key key;
1697	struct btrfs_key found_key;
1698	struct extent_buffer *leaf;
1699	int need_clear = 0;
1700	u64 cache_gen;
1701	u64 feature;
1702	int mixed;
1703
1704	feature = btrfs_super_incompat_flags(info->super_copy);
1705	mixed = !!(feature & BTRFS_FEATURE_INCOMPAT_MIXED_GROUPS);
 
 
 
 
 
 
 
 
 
1706
1707	key.objectid = 0;
1708	key.offset = 0;
1709	key.type = BTRFS_BLOCK_GROUP_ITEM_KEY;
1710	path = btrfs_alloc_path();
1711	if (!path)
1712		return -ENOMEM;
1713	path->reada = READA_FORWARD;
1714
1715	cache_gen = btrfs_super_cache_generation(info->super_copy);
1716	if (btrfs_test_opt(info, SPACE_CACHE) &&
1717	    btrfs_super_generation(info->super_copy) != cache_gen)
1718		need_clear = 1;
1719	if (btrfs_test_opt(info, CLEAR_CACHE))
1720		need_clear = 1;
1721
1722	while (1) {
 
 
 
 
1723		ret = find_first_block_group(info, path, &key);
1724		if (ret > 0)
1725			break;
1726		if (ret != 0)
1727			goto error;
1728
1729		leaf = path->nodes[0];
1730		btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
1731
1732		cache = btrfs_create_block_group_cache(info, found_key.objectid,
1733						       found_key.offset);
1734		if (!cache) {
1735			ret = -ENOMEM;
1736			goto error;
1737		}
1738
1739		if (need_clear) {
1740			/*
1741			 * When we mount with old space cache, we need to
1742			 * set BTRFS_DC_CLEAR and set dirty flag.
1743			 *
1744			 * a) Setting 'BTRFS_DC_CLEAR' makes sure that we
1745			 *    truncate the old free space cache inode and
1746			 *    setup a new one.
1747			 * b) Setting 'dirty flag' makes sure that we flush
1748			 *    the new space cache info onto disk.
1749			 */
1750			if (btrfs_test_opt(info, SPACE_CACHE))
1751				cache->disk_cache_state = BTRFS_DC_CLEAR;
1752		}
1753
1754		read_extent_buffer(leaf, &cache->item,
1755				   btrfs_item_ptr_offset(leaf, path->slots[0]),
1756				   sizeof(cache->item));
1757		cache->flags = btrfs_block_group_flags(&cache->item);
1758		if (!mixed &&
1759		    ((cache->flags & BTRFS_BLOCK_GROUP_METADATA) &&
1760		    (cache->flags & BTRFS_BLOCK_GROUP_DATA))) {
1761			btrfs_err(info,
1762"bg %llu is a mixed block group but filesystem hasn't enabled mixed block groups",
1763				  cache->key.objectid);
1764			btrfs_put_block_group(cache);
1765			ret = -EINVAL;
1766			goto error;
1767		}
1768
1769		key.objectid = found_key.objectid + found_key.offset;
1770		btrfs_release_path(path);
1771
1772		/*
1773		 * We need to exclude the super stripes now so that the space
1774		 * info has super bytes accounted for, otherwise we'll think
1775		 * we have more space than we actually do.
1776		 */
1777		ret = exclude_super_stripes(cache);
1778		if (ret) {
1779			/*
1780			 * We may have excluded something, so call this just in
1781			 * case.
1782			 */
1783			btrfs_free_excluded_extents(cache);
1784			btrfs_put_block_group(cache);
1785			goto error;
1786		}
1787
1788		/*
1789		 * Check for two cases, either we are full, and therefore
1790		 * don't need to bother with the caching work since we won't
1791		 * find any space, or we are empty, and we can just add all
1792		 * the space in and be done with it.  This saves us _a_lot_ of
1793		 * time, particularly in the full case.
1794		 */
1795		if (found_key.offset == btrfs_block_group_used(&cache->item)) {
1796			cache->last_byte_to_unpin = (u64)-1;
1797			cache->cached = BTRFS_CACHE_FINISHED;
1798			btrfs_free_excluded_extents(cache);
1799		} else if (btrfs_block_group_used(&cache->item) == 0) {
1800			cache->last_byte_to_unpin = (u64)-1;
1801			cache->cached = BTRFS_CACHE_FINISHED;
1802			add_new_free_space(cache, found_key.objectid,
1803					   found_key.objectid +
1804					   found_key.offset);
1805			btrfs_free_excluded_extents(cache);
1806		}
1807
1808		ret = btrfs_add_block_group_cache(info, cache);
1809		if (ret) {
1810			btrfs_remove_free_space_cache(cache);
1811			btrfs_put_block_group(cache);
1812			goto error;
1813		}
1814
1815		trace_btrfs_add_block_group(info, cache, 0);
1816		btrfs_update_space_info(info, cache->flags, found_key.offset,
1817					btrfs_block_group_used(&cache->item),
1818					cache->bytes_super, &space_info);
1819
1820		cache->space_info = space_info;
1821
1822		link_block_group(cache);
 
1823
1824		set_avail_alloc_bits(info, cache->flags);
1825		if (btrfs_chunk_readonly(info, cache->key.objectid)) {
1826			inc_block_group_ro(cache, 1);
1827		} else if (btrfs_block_group_used(&cache->item) == 0) {
1828			ASSERT(list_empty(&cache->bg_list));
1829			btrfs_mark_bg_unused(cache);
 
1830		}
1831	}
1832
1833	list_for_each_entry_rcu(space_info, &info->space_info, list) {
1834		if (!(btrfs_get_alloc_profile(info, space_info->flags) &
1835		      (BTRFS_BLOCK_GROUP_RAID10 |
1836		       BTRFS_BLOCK_GROUP_RAID1_MASK |
1837		       BTRFS_BLOCK_GROUP_RAID56_MASK |
1838		       BTRFS_BLOCK_GROUP_DUP)))
1839			continue;
1840		/*
1841		 * Avoid allocating from un-mirrored block group if there are
1842		 * mirrored block groups.
1843		 */
1844		list_for_each_entry(cache,
1845				&space_info->block_groups[BTRFS_RAID_RAID0],
1846				list)
1847			inc_block_group_ro(cache, 1);
1848		list_for_each_entry(cache,
1849				&space_info->block_groups[BTRFS_RAID_SINGLE],
1850				list)
1851			inc_block_group_ro(cache, 1);
1852	}
1853
1854	btrfs_init_global_block_rsv(info);
1855	ret = check_chunk_block_group_mappings(info);
1856error:
1857	btrfs_free_path(path);
 
 
 
 
 
 
 
 
1858	return ret;
1859}
1860
1861void btrfs_create_pending_block_groups(struct btrfs_trans_handle *trans)
 
 
 
 
 
 
 
 
1862{
1863	struct btrfs_fs_info *fs_info = trans->fs_info;
1864	struct btrfs_block_group_cache *block_group;
1865	struct btrfs_root *extent_root = fs_info->extent_root;
1866	struct btrfs_block_group_item item;
1867	struct btrfs_key key;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1868	int ret = 0;
1869
1870	if (!trans->can_flush_pending_bgs)
1871		return;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1872
1873	while (!list_empty(&trans->new_bgs)) {
 
 
1874		block_group = list_first_entry(&trans->new_bgs,
1875					       struct btrfs_block_group_cache,
1876					       bg_list);
1877		if (ret)
1878			goto next;
1879
1880		spin_lock(&block_group->lock);
1881		memcpy(&item, &block_group->item, sizeof(item));
1882		memcpy(&key, &block_group->key, sizeof(key));
1883		spin_unlock(&block_group->lock);
1884
1885		ret = btrfs_insert_item(trans, extent_root, &key, &item,
1886					sizeof(item));
1887		if (ret)
1888			btrfs_abort_transaction(trans, ret);
1889		ret = btrfs_finish_chunk_alloc(trans, key.objectid, key.offset);
 
 
 
 
 
 
 
 
 
1890		if (ret)
1891			btrfs_abort_transaction(trans, ret);
1892		add_block_group_free_space(trans, block_group);
 
 
 
 
 
 
 
 
 
 
1893		/* Already aborted the transaction if it failed. */
1894next:
1895		btrfs_delayed_refs_rsv_release(fs_info, 1);
1896		list_del_init(&block_group->bg_list);
1897	}
1898	btrfs_trans_release_chunk_metadata(trans);
1899}
1900
1901int btrfs_make_block_group(struct btrfs_trans_handle *trans, u64 bytes_used,
1902			   u64 type, u64 chunk_offset, u64 size)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1903{
1904	struct btrfs_fs_info *fs_info = trans->fs_info;
1905	struct btrfs_block_group_cache *cache;
1906	int ret;
1907
1908	btrfs_set_log_full_commit(trans);
1909
1910	cache = btrfs_create_block_group_cache(fs_info, chunk_offset, size);
1911	if (!cache)
1912		return -ENOMEM;
1913
1914	btrfs_set_block_group_used(&cache->item, bytes_used);
1915	btrfs_set_block_group_chunk_objectid(&cache->item,
1916					     BTRFS_FIRST_CHUNK_TREE_OBJECTID);
1917	btrfs_set_block_group_flags(&cache->item, type);
1918
 
 
 
1919	cache->flags = type;
1920	cache->last_byte_to_unpin = (u64)-1;
1921	cache->cached = BTRFS_CACHE_FINISHED;
1922	cache->needs_free_space = 1;
 
 
 
 
 
 
 
 
 
 
1923	ret = exclude_super_stripes(cache);
1924	if (ret) {
1925		/* We may have excluded something, so call this just in case */
1926		btrfs_free_excluded_extents(cache);
1927		btrfs_put_block_group(cache);
1928		return ret;
1929	}
1930
1931	add_new_free_space(cache, chunk_offset, chunk_offset + size);
1932
1933	btrfs_free_excluded_extents(cache);
1934
1935#ifdef CONFIG_BTRFS_DEBUG
1936	if (btrfs_should_fragment_free_space(cache)) {
1937		u64 new_bytes_used = size - bytes_used;
1938
1939		bytes_used += new_bytes_used >> 1;
1940		fragment_free_space(cache);
1941	}
1942#endif
1943	/*
1944	 * Ensure the corresponding space_info object is created and
1945	 * assigned to our block group. We want our bg to be added to the rbtree
1946	 * with its ->space_info set.
1947	 */
1948	cache->space_info = btrfs_find_space_info(fs_info, cache->flags);
1949	ASSERT(cache->space_info);
1950
1951	ret = btrfs_add_block_group_cache(fs_info, cache);
1952	if (ret) {
1953		btrfs_remove_free_space_cache(cache);
1954		btrfs_put_block_group(cache);
1955		return ret;
1956	}
1957
1958	/*
1959	 * Now that our block group has its ->space_info set and is inserted in
1960	 * the rbtree, update the space info's counters.
1961	 */
1962	trace_btrfs_add_block_group(fs_info, cache, 1);
1963	btrfs_update_space_info(fs_info, cache->flags, size, bytes_used,
1964				cache->bytes_super, &cache->space_info);
1965	btrfs_update_global_block_rsv(fs_info);
1966
1967	link_block_group(cache);
 
 
 
 
 
 
 
1968
1969	list_add_tail(&cache->bg_list, &trans->new_bgs);
1970	trans->delayed_ref_updates++;
1971	btrfs_update_delayed_refs_rsv(trans);
1972
1973	set_avail_alloc_bits(fs_info, type);
1974	return 0;
1975}
1976
1977static u64 update_block_group_flags(struct btrfs_fs_info *fs_info, u64 flags)
 
 
 
 
 
 
 
 
 
 
1978{
1979	u64 num_devices;
1980	u64 stripped;
 
 
 
 
1981
1982	/*
1983	 * if restripe for this chunk_type is on pick target profile and
1984	 * return, otherwise do the usual balance
 
 
1985	 */
1986	stripped = get_restripe_target(fs_info, flags);
1987	if (stripped)
1988		return extended_to_chunk(stripped);
1989
1990	num_devices = fs_info->fs_devices->rw_devices;
1991
1992	stripped = BTRFS_BLOCK_GROUP_RAID0 | BTRFS_BLOCK_GROUP_RAID56_MASK |
1993		BTRFS_BLOCK_GROUP_RAID1_MASK | BTRFS_BLOCK_GROUP_RAID10;
1994
1995	if (num_devices == 1) {
1996		stripped |= BTRFS_BLOCK_GROUP_DUP;
1997		stripped = flags & ~stripped;
1998
1999		/* turn raid0 into single device chunks */
2000		if (flags & BTRFS_BLOCK_GROUP_RAID0)
2001			return stripped;
2002
2003		/* turn mirroring into duplication */
2004		if (flags & (BTRFS_BLOCK_GROUP_RAID1_MASK |
2005			     BTRFS_BLOCK_GROUP_RAID10))
2006			return stripped | BTRFS_BLOCK_GROUP_DUP;
2007	} else {
2008		/* they already had raid on here, just return */
2009		if (flags & stripped)
2010			return flags;
2011
2012		stripped |= BTRFS_BLOCK_GROUP_DUP;
2013		stripped = flags & ~stripped;
2014
2015		/* switch duplicated blocks with raid1 */
2016		if (flags & BTRFS_BLOCK_GROUP_DUP)
2017			return stripped | BTRFS_BLOCK_GROUP_RAID1;
2018
2019		/* this is drive concat, leave it alone */
2020	}
2021
2022	return flags;
2023}
2024
2025int btrfs_inc_block_group_ro(struct btrfs_block_group_cache *cache)
2026
2027{
2028	struct btrfs_fs_info *fs_info = cache->fs_info;
2029	struct btrfs_trans_handle *trans;
2030	u64 alloc_flags;
2031	int ret;
2032
2033again:
2034	trans = btrfs_join_transaction(fs_info->extent_root);
2035	if (IS_ERR(trans))
2036		return PTR_ERR(trans);
2037
2038	/*
2039	 * we're not allowed to set block groups readonly after the dirty
2040	 * block groups cache has started writing.  If it already started,
2041	 * back off and let this transaction commit
2042	 */
2043	mutex_lock(&fs_info->ro_block_group_mutex);
2044	if (test_bit(BTRFS_TRANS_DIRTY_BG_RUN, &trans->transaction->flags)) {
2045		u64 transid = trans->transid;
2046
2047		mutex_unlock(&fs_info->ro_block_group_mutex);
2048		btrfs_end_transaction(trans);
2049
2050		ret = btrfs_wait_for_commit(fs_info, transid);
2051		if (ret)
2052			return ret;
2053		goto again;
2054	}
 
2055
2056	/*
2057	 * if we are changing raid levels, try to allocate a corresponding
2058	 * block group with the new raid level.
2059	 */
2060	alloc_flags = update_block_group_flags(fs_info, cache->flags);
2061	if (alloc_flags != cache->flags) {
2062		ret = btrfs_chunk_alloc(trans, alloc_flags, CHUNK_ALLOC_FORCE);
2063		/*
2064		 * ENOSPC is allowed here, we may have enough space
2065		 * already allocated at the new raid level to
2066		 * carry on
2067		 */
2068		if (ret == -ENOSPC)
2069			ret = 0;
2070		if (ret < 0)
2071			goto out;
 
 
 
 
 
 
 
 
 
2072	}
2073
2074	ret = inc_block_group_ro(cache, 0);
 
 
2075	if (!ret)
2076		goto out;
2077	alloc_flags = btrfs_get_alloc_profile(fs_info, cache->space_info->flags);
2078	ret = btrfs_chunk_alloc(trans, alloc_flags, CHUNK_ALLOC_FORCE);
2079	if (ret < 0)
2080		goto out;
 
 
 
 
 
 
 
 
2081	ret = inc_block_group_ro(cache, 0);
 
 
2082out:
2083	if (cache->flags & BTRFS_BLOCK_GROUP_SYSTEM) {
2084		alloc_flags = update_block_group_flags(fs_info, cache->flags);
2085		mutex_lock(&fs_info->chunk_mutex);
2086		check_system_chunk(trans, alloc_flags);
2087		mutex_unlock(&fs_info->chunk_mutex);
2088	}
 
2089	mutex_unlock(&fs_info->ro_block_group_mutex);
2090
2091	btrfs_end_transaction(trans);
2092	return ret;
2093}
2094
2095void btrfs_dec_block_group_ro(struct btrfs_block_group_cache *cache)
2096{
2097	struct btrfs_space_info *sinfo = cache->space_info;
2098	u64 num_bytes;
2099
2100	BUG_ON(!cache->ro);
2101
2102	spin_lock(&sinfo->lock);
2103	spin_lock(&cache->lock);
2104	if (!--cache->ro) {
2105		num_bytes = cache->key.offset - cache->reserved -
 
 
 
 
 
 
 
 
2106			    cache->pinned - cache->bytes_super -
2107			    btrfs_block_group_used(&cache->item);
2108		sinfo->bytes_readonly -= num_bytes;
2109		list_del_init(&cache->ro_list);
2110	}
2111	spin_unlock(&cache->lock);
2112	spin_unlock(&sinfo->lock);
2113}
2114
2115static int write_one_cache_group(struct btrfs_trans_handle *trans,
2116				 struct btrfs_path *path,
2117				 struct btrfs_block_group_cache *cache)
2118{
2119	struct btrfs_fs_info *fs_info = trans->fs_info;
2120	int ret;
2121	struct btrfs_root *extent_root = fs_info->extent_root;
2122	unsigned long bi;
2123	struct extent_buffer *leaf;
 
 
 
 
2124
2125	ret = btrfs_search_slot(trans, extent_root, &cache->key, path, 0, 1);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2126	if (ret) {
2127		if (ret > 0)
2128			ret = -ENOENT;
2129		goto fail;
2130	}
2131
2132	leaf = path->nodes[0];
2133	bi = btrfs_item_ptr_offset(leaf, path->slots[0]);
2134	write_extent_buffer(leaf, &cache->item, bi, sizeof(cache->item));
 
 
 
 
2135	btrfs_mark_buffer_dirty(leaf);
2136fail:
2137	btrfs_release_path(path);
 
 
 
 
 
 
2138	return ret;
2139
2140}
2141
2142static int cache_save_setup(struct btrfs_block_group_cache *block_group,
2143			    struct btrfs_trans_handle *trans,
2144			    struct btrfs_path *path)
2145{
2146	struct btrfs_fs_info *fs_info = block_group->fs_info;
2147	struct btrfs_root *root = fs_info->tree_root;
2148	struct inode *inode = NULL;
2149	struct extent_changeset *data_reserved = NULL;
2150	u64 alloc_hint = 0;
2151	int dcs = BTRFS_DC_ERROR;
2152	u64 num_pages = 0;
2153	int retries = 0;
2154	int ret = 0;
2155
 
 
 
2156	/*
2157	 * If this block group is smaller than 100 megs don't bother caching the
2158	 * block group.
2159	 */
2160	if (block_group->key.offset < (100 * SZ_1M)) {
2161		spin_lock(&block_group->lock);
2162		block_group->disk_cache_state = BTRFS_DC_WRITTEN;
2163		spin_unlock(&block_group->lock);
2164		return 0;
2165	}
2166
2167	if (trans->aborted)
2168		return 0;
2169again:
2170	inode = lookup_free_space_inode(block_group, path);
2171	if (IS_ERR(inode) && PTR_ERR(inode) != -ENOENT) {
2172		ret = PTR_ERR(inode);
2173		btrfs_release_path(path);
2174		goto out;
2175	}
2176
2177	if (IS_ERR(inode)) {
2178		BUG_ON(retries);
2179		retries++;
2180
2181		if (block_group->ro)
2182			goto out_free;
2183
2184		ret = create_free_space_inode(trans, block_group, path);
2185		if (ret)
2186			goto out_free;
2187		goto again;
2188	}
2189
2190	/*
2191	 * We want to set the generation to 0, that way if anything goes wrong
2192	 * from here on out we know not to trust this cache when we load up next
2193	 * time.
2194	 */
2195	BTRFS_I(inode)->generation = 0;
2196	ret = btrfs_update_inode(trans, root, inode);
2197	if (ret) {
2198		/*
2199		 * So theoretically we could recover from this, simply set the
2200		 * super cache generation to 0 so we know to invalidate the
2201		 * cache, but then we'd have to keep track of the block groups
2202		 * that fail this way so we know we _have_ to reset this cache
2203		 * before the next commit or risk reading stale cache.  So to
2204		 * limit our exposure to horrible edge cases lets just abort the
2205		 * transaction, this only happens in really bad situations
2206		 * anyway.
2207		 */
2208		btrfs_abort_transaction(trans, ret);
2209		goto out_put;
2210	}
2211	WARN_ON(ret);
2212
2213	/* We've already setup this transaction, go ahead and exit */
2214	if (block_group->cache_generation == trans->transid &&
2215	    i_size_read(inode)) {
2216		dcs = BTRFS_DC_SETUP;
2217		goto out_put;
2218	}
2219
2220	if (i_size_read(inode) > 0) {
2221		ret = btrfs_check_trunc_cache_free_space(fs_info,
2222					&fs_info->global_block_rsv);
2223		if (ret)
2224			goto out_put;
2225
2226		ret = btrfs_truncate_free_space_cache(trans, NULL, inode);
2227		if (ret)
2228			goto out_put;
2229	}
2230
2231	spin_lock(&block_group->lock);
2232	if (block_group->cached != BTRFS_CACHE_FINISHED ||
2233	    !btrfs_test_opt(fs_info, SPACE_CACHE)) {
2234		/*
2235		 * don't bother trying to write stuff out _if_
2236		 * a) we're not cached,
2237		 * b) we're with nospace_cache mount option,
2238		 * c) we're with v2 space_cache (FREE_SPACE_TREE).
2239		 */
2240		dcs = BTRFS_DC_WRITTEN;
2241		spin_unlock(&block_group->lock);
2242		goto out_put;
2243	}
2244	spin_unlock(&block_group->lock);
2245
2246	/*
2247	 * We hit an ENOSPC when setting up the cache in this transaction, just
2248	 * skip doing the setup, we've already cleared the cache so we're safe.
2249	 */
2250	if (test_bit(BTRFS_TRANS_CACHE_ENOSPC, &trans->transaction->flags)) {
2251		ret = -ENOSPC;
2252		goto out_put;
2253	}
2254
2255	/*
2256	 * Try to preallocate enough space based on how big the block group is.
2257	 * Keep in mind this has to include any pinned space which could end up
2258	 * taking up quite a bit since it's not folded into the other space
2259	 * cache.
2260	 */
2261	num_pages = div_u64(block_group->key.offset, SZ_256M);
2262	if (!num_pages)
2263		num_pages = 1;
2264
2265	num_pages *= 16;
2266	num_pages *= PAGE_SIZE;
2267
2268	ret = btrfs_check_data_free_space(inode, &data_reserved, 0, num_pages);
 
2269	if (ret)
2270		goto out_put;
2271
2272	ret = btrfs_prealloc_file_range_trans(inode, trans, 0, 0, num_pages,
2273					      num_pages, num_pages,
2274					      &alloc_hint);
2275	/*
2276	 * Our cache requires contiguous chunks so that we don't modify a bunch
2277	 * of metadata or split extents when writing the cache out, which means
2278	 * we can enospc if we are heavily fragmented in addition to just normal
2279	 * out of space conditions.  So if we hit this just skip setting up any
2280	 * other block groups for this transaction, maybe we'll unpin enough
2281	 * space the next time around.
2282	 */
2283	if (!ret)
2284		dcs = BTRFS_DC_SETUP;
2285	else if (ret == -ENOSPC)
2286		set_bit(BTRFS_TRANS_CACHE_ENOSPC, &trans->transaction->flags);
2287
2288out_put:
2289	iput(inode);
2290out_free:
2291	btrfs_release_path(path);
2292out:
2293	spin_lock(&block_group->lock);
2294	if (!ret && dcs == BTRFS_DC_SETUP)
2295		block_group->cache_generation = trans->transid;
2296	block_group->disk_cache_state = dcs;
2297	spin_unlock(&block_group->lock);
2298
2299	extent_changeset_free(data_reserved);
2300	return ret;
2301}
2302
2303int btrfs_setup_space_cache(struct btrfs_trans_handle *trans)
2304{
2305	struct btrfs_fs_info *fs_info = trans->fs_info;
2306	struct btrfs_block_group_cache *cache, *tmp;
2307	struct btrfs_transaction *cur_trans = trans->transaction;
2308	struct btrfs_path *path;
2309
2310	if (list_empty(&cur_trans->dirty_bgs) ||
2311	    !btrfs_test_opt(fs_info, SPACE_CACHE))
2312		return 0;
2313
2314	path = btrfs_alloc_path();
2315	if (!path)
2316		return -ENOMEM;
2317
2318	/* Could add new block groups, use _safe just in case */
2319	list_for_each_entry_safe(cache, tmp, &cur_trans->dirty_bgs,
2320				 dirty_list) {
2321		if (cache->disk_cache_state == BTRFS_DC_CLEAR)
2322			cache_save_setup(cache, trans, path);
2323	}
2324
2325	btrfs_free_path(path);
2326	return 0;
2327}
2328
2329/*
2330 * Transaction commit does final block group cache writeback during a critical
2331 * section where nothing is allowed to change the FS.  This is required in
2332 * order for the cache to actually match the block group, but can introduce a
2333 * lot of latency into the commit.
2334 *
2335 * So, btrfs_start_dirty_block_groups is here to kick off block group cache IO.
2336 * There's a chance we'll have to redo some of it if the block group changes
2337 * again during the commit, but it greatly reduces the commit latency by
2338 * getting rid of the easy block groups while we're still allowing others to
2339 * join the commit.
2340 */
2341int btrfs_start_dirty_block_groups(struct btrfs_trans_handle *trans)
2342{
2343	struct btrfs_fs_info *fs_info = trans->fs_info;
2344	struct btrfs_block_group_cache *cache;
2345	struct btrfs_transaction *cur_trans = trans->transaction;
2346	int ret = 0;
2347	int should_put;
2348	struct btrfs_path *path = NULL;
2349	LIST_HEAD(dirty);
2350	struct list_head *io = &cur_trans->io_bgs;
2351	int num_started = 0;
2352	int loops = 0;
2353
2354	spin_lock(&cur_trans->dirty_bgs_lock);
2355	if (list_empty(&cur_trans->dirty_bgs)) {
2356		spin_unlock(&cur_trans->dirty_bgs_lock);
2357		return 0;
2358	}
2359	list_splice_init(&cur_trans->dirty_bgs, &dirty);
2360	spin_unlock(&cur_trans->dirty_bgs_lock);
2361
2362again:
2363	/* Make sure all the block groups on our dirty list actually exist */
2364	btrfs_create_pending_block_groups(trans);
2365
2366	if (!path) {
2367		path = btrfs_alloc_path();
2368		if (!path)
2369			return -ENOMEM;
 
 
2370	}
2371
2372	/*
2373	 * cache_write_mutex is here only to save us from balance or automatic
2374	 * removal of empty block groups deleting this block group while we are
2375	 * writing out the cache
2376	 */
2377	mutex_lock(&trans->transaction->cache_write_mutex);
2378	while (!list_empty(&dirty)) {
2379		bool drop_reserve = true;
2380
2381		cache = list_first_entry(&dirty,
2382					 struct btrfs_block_group_cache,
2383					 dirty_list);
2384		/*
2385		 * This can happen if something re-dirties a block group that
2386		 * is already under IO.  Just wait for it to finish and then do
2387		 * it all again
2388		 */
2389		if (!list_empty(&cache->io_list)) {
2390			list_del_init(&cache->io_list);
2391			btrfs_wait_cache_io(trans, cache, path);
2392			btrfs_put_block_group(cache);
2393		}
2394
2395
2396		/*
2397		 * btrfs_wait_cache_io uses the cache->dirty_list to decide if
2398		 * it should update the cache_state.  Don't delete until after
2399		 * we wait.
2400		 *
2401		 * Since we're not running in the commit critical section
2402		 * we need the dirty_bgs_lock to protect from update_block_group
2403		 */
2404		spin_lock(&cur_trans->dirty_bgs_lock);
2405		list_del_init(&cache->dirty_list);
2406		spin_unlock(&cur_trans->dirty_bgs_lock);
2407
2408		should_put = 1;
2409
2410		cache_save_setup(cache, trans, path);
2411
2412		if (cache->disk_cache_state == BTRFS_DC_SETUP) {
2413			cache->io_ctl.inode = NULL;
2414			ret = btrfs_write_out_cache(trans, cache, path);
2415			if (ret == 0 && cache->io_ctl.inode) {
2416				num_started++;
2417				should_put = 0;
2418
2419				/*
2420				 * The cache_write_mutex is protecting the
2421				 * io_list, also refer to the definition of
2422				 * btrfs_transaction::io_bgs for more details
2423				 */
2424				list_add_tail(&cache->io_list, io);
2425			} else {
2426				/*
2427				 * If we failed to write the cache, the
2428				 * generation will be bad and life goes on
2429				 */
2430				ret = 0;
2431			}
2432		}
2433		if (!ret) {
2434			ret = write_one_cache_group(trans, path, cache);
2435			/*
2436			 * Our block group might still be attached to the list
2437			 * of new block groups in the transaction handle of some
2438			 * other task (struct btrfs_trans_handle->new_bgs). This
2439			 * means its block group item isn't yet in the extent
2440			 * tree. If this happens ignore the error, as we will
2441			 * try again later in the critical section of the
2442			 * transaction commit.
2443			 */
2444			if (ret == -ENOENT) {
2445				ret = 0;
2446				spin_lock(&cur_trans->dirty_bgs_lock);
2447				if (list_empty(&cache->dirty_list)) {
2448					list_add_tail(&cache->dirty_list,
2449						      &cur_trans->dirty_bgs);
2450					btrfs_get_block_group(cache);
2451					drop_reserve = false;
2452				}
2453				spin_unlock(&cur_trans->dirty_bgs_lock);
2454			} else if (ret) {
2455				btrfs_abort_transaction(trans, ret);
2456			}
2457		}
2458
2459		/* If it's not on the io list, we need to put the block group */
2460		if (should_put)
2461			btrfs_put_block_group(cache);
2462		if (drop_reserve)
2463			btrfs_delayed_refs_rsv_release(fs_info, 1);
2464
2465		if (ret)
2466			break;
2467
2468		/*
2469		 * Avoid blocking other tasks for too long. It might even save
2470		 * us from writing caches for block groups that are going to be
2471		 * removed.
2472		 */
2473		mutex_unlock(&trans->transaction->cache_write_mutex);
 
 
2474		mutex_lock(&trans->transaction->cache_write_mutex);
2475	}
2476	mutex_unlock(&trans->transaction->cache_write_mutex);
2477
2478	/*
2479	 * Go through delayed refs for all the stuff we've just kicked off
2480	 * and then loop back (just once)
2481	 */
2482	ret = btrfs_run_delayed_refs(trans, 0);
 
2483	if (!ret && loops == 0) {
2484		loops++;
2485		spin_lock(&cur_trans->dirty_bgs_lock);
2486		list_splice_init(&cur_trans->dirty_bgs, &dirty);
2487		/*
2488		 * dirty_bgs_lock protects us from concurrent block group
2489		 * deletes too (not just cache_write_mutex).
2490		 */
2491		if (!list_empty(&dirty)) {
2492			spin_unlock(&cur_trans->dirty_bgs_lock);
2493			goto again;
2494		}
2495		spin_unlock(&cur_trans->dirty_bgs_lock);
2496	} else if (ret < 0) {
 
 
 
 
 
2497		btrfs_cleanup_dirty_bgs(cur_trans, fs_info);
2498	}
2499
2500	btrfs_free_path(path);
2501	return ret;
2502}
2503
2504int btrfs_write_dirty_block_groups(struct btrfs_trans_handle *trans)
2505{
2506	struct btrfs_fs_info *fs_info = trans->fs_info;
2507	struct btrfs_block_group_cache *cache;
2508	struct btrfs_transaction *cur_trans = trans->transaction;
2509	int ret = 0;
2510	int should_put;
2511	struct btrfs_path *path;
2512	struct list_head *io = &cur_trans->io_bgs;
2513	int num_started = 0;
2514
2515	path = btrfs_alloc_path();
2516	if (!path)
2517		return -ENOMEM;
2518
2519	/*
2520	 * Even though we are in the critical section of the transaction commit,
2521	 * we can still have concurrent tasks adding elements to this
2522	 * transaction's list of dirty block groups. These tasks correspond to
2523	 * endio free space workers started when writeback finishes for a
2524	 * space cache, which run inode.c:btrfs_finish_ordered_io(), and can
2525	 * allocate new block groups as a result of COWing nodes of the root
2526	 * tree when updating the free space inode. The writeback for the space
2527	 * caches is triggered by an earlier call to
2528	 * btrfs_start_dirty_block_groups() and iterations of the following
2529	 * loop.
2530	 * Also we want to do the cache_save_setup first and then run the
2531	 * delayed refs to make sure we have the best chance at doing this all
2532	 * in one shot.
2533	 */
2534	spin_lock(&cur_trans->dirty_bgs_lock);
2535	while (!list_empty(&cur_trans->dirty_bgs)) {
2536		cache = list_first_entry(&cur_trans->dirty_bgs,
2537					 struct btrfs_block_group_cache,
2538					 dirty_list);
2539
2540		/*
2541		 * This can happen if cache_save_setup re-dirties a block group
2542		 * that is already under IO.  Just wait for it to finish and
2543		 * then do it all again
2544		 */
2545		if (!list_empty(&cache->io_list)) {
2546			spin_unlock(&cur_trans->dirty_bgs_lock);
2547			list_del_init(&cache->io_list);
2548			btrfs_wait_cache_io(trans, cache, path);
2549			btrfs_put_block_group(cache);
2550			spin_lock(&cur_trans->dirty_bgs_lock);
2551		}
2552
2553		/*
2554		 * Don't remove from the dirty list until after we've waited on
2555		 * any pending IO
2556		 */
2557		list_del_init(&cache->dirty_list);
2558		spin_unlock(&cur_trans->dirty_bgs_lock);
2559		should_put = 1;
2560
2561		cache_save_setup(cache, trans, path);
2562
2563		if (!ret)
2564			ret = btrfs_run_delayed_refs(trans,
2565						     (unsigned long) -1);
2566
2567		if (!ret && cache->disk_cache_state == BTRFS_DC_SETUP) {
2568			cache->io_ctl.inode = NULL;
2569			ret = btrfs_write_out_cache(trans, cache, path);
2570			if (ret == 0 && cache->io_ctl.inode) {
2571				num_started++;
2572				should_put = 0;
2573				list_add_tail(&cache->io_list, io);
2574			} else {
2575				/*
2576				 * If we failed to write the cache, the
2577				 * generation will be bad and life goes on
2578				 */
2579				ret = 0;
2580			}
2581		}
2582		if (!ret) {
2583			ret = write_one_cache_group(trans, path, cache);
2584			/*
2585			 * One of the free space endio workers might have
2586			 * created a new block group while updating a free space
2587			 * cache's inode (at inode.c:btrfs_finish_ordered_io())
2588			 * and hasn't released its transaction handle yet, in
2589			 * which case the new block group is still attached to
2590			 * its transaction handle and its creation has not
2591			 * finished yet (no block group item in the extent tree
2592			 * yet, etc). If this is the case, wait for all free
2593			 * space endio workers to finish and retry. This is a
2594			 * a very rare case so no need for a more efficient and
2595			 * complex approach.
2596			 */
2597			if (ret == -ENOENT) {
2598				wait_event(cur_trans->writer_wait,
2599				   atomic_read(&cur_trans->num_writers) == 1);
2600				ret = write_one_cache_group(trans, path, cache);
2601			}
2602			if (ret)
2603				btrfs_abort_transaction(trans, ret);
2604		}
2605
2606		/* If its not on the io list, we need to put the block group */
2607		if (should_put)
2608			btrfs_put_block_group(cache);
2609		btrfs_delayed_refs_rsv_release(fs_info, 1);
2610		spin_lock(&cur_trans->dirty_bgs_lock);
2611	}
2612	spin_unlock(&cur_trans->dirty_bgs_lock);
2613
2614	/*
2615	 * Refer to the definition of io_bgs member for details why it's safe
2616	 * to use it without any locking
2617	 */
2618	while (!list_empty(io)) {
2619		cache = list_first_entry(io, struct btrfs_block_group_cache,
2620					 io_list);
2621		list_del_init(&cache->io_list);
2622		btrfs_wait_cache_io(trans, cache, path);
2623		btrfs_put_block_group(cache);
2624	}
2625
2626	btrfs_free_path(path);
2627	return ret;
2628}
2629
2630int btrfs_update_block_group(struct btrfs_trans_handle *trans,
2631			     u64 bytenr, u64 num_bytes, int alloc)
2632{
2633	struct btrfs_fs_info *info = trans->fs_info;
2634	struct btrfs_block_group_cache *cache = NULL;
2635	u64 total = num_bytes;
2636	u64 old_val;
2637	u64 byte_in_group;
2638	int factor;
2639	int ret = 0;
2640
2641	/* Block accounting for super block */
2642	spin_lock(&info->delalloc_root_lock);
2643	old_val = btrfs_super_bytes_used(info->super_copy);
2644	if (alloc)
2645		old_val += num_bytes;
2646	else
2647		old_val -= num_bytes;
2648	btrfs_set_super_bytes_used(info->super_copy, old_val);
2649	spin_unlock(&info->delalloc_root_lock);
2650
2651	while (total) {
 
 
2652		cache = btrfs_lookup_block_group(info, bytenr);
2653		if (!cache) {
2654			ret = -ENOENT;
2655			break;
2656		}
2657		factor = btrfs_bg_type_to_factor(cache->flags);
2658
2659		/*
2660		 * If this block group has free space cache written out, we
2661		 * need to make sure to load it if we are removing space.  This
2662		 * is because we need the unpinning stage to actually add the
2663		 * space back to the block group, otherwise we will leak space.
2664		 */
2665		if (!alloc && cache->cached == BTRFS_CACHE_NO)
2666			btrfs_cache_block_group(cache, 1);
2667
2668		byte_in_group = bytenr - cache->key.objectid;
2669		WARN_ON(byte_in_group > cache->key.offset);
2670
2671		spin_lock(&cache->space_info->lock);
2672		spin_lock(&cache->lock);
2673
2674		if (btrfs_test_opt(info, SPACE_CACHE) &&
2675		    cache->disk_cache_state < BTRFS_DC_CLEAR)
2676			cache->disk_cache_state = BTRFS_DC_CLEAR;
2677
2678		old_val = btrfs_block_group_used(&cache->item);
2679		num_bytes = min(total, cache->key.offset - byte_in_group);
2680		if (alloc) {
2681			old_val += num_bytes;
2682			btrfs_set_block_group_used(&cache->item, old_val);
2683			cache->reserved -= num_bytes;
2684			cache->space_info->bytes_reserved -= num_bytes;
2685			cache->space_info->bytes_used += num_bytes;
2686			cache->space_info->disk_used += num_bytes * factor;
2687			spin_unlock(&cache->lock);
2688			spin_unlock(&cache->space_info->lock);
2689		} else {
2690			old_val -= num_bytes;
2691			btrfs_set_block_group_used(&cache->item, old_val);
2692			cache->pinned += num_bytes;
2693			btrfs_space_info_update_bytes_pinned(info,
2694					cache->space_info, num_bytes);
2695			cache->space_info->bytes_used -= num_bytes;
2696			cache->space_info->disk_used -= num_bytes * factor;
 
 
2697			spin_unlock(&cache->lock);
2698			spin_unlock(&cache->space_info->lock);
2699
2700			percpu_counter_add_batch(
2701					&cache->space_info->total_bytes_pinned,
2702					num_bytes,
2703					BTRFS_TOTAL_BYTES_PINNED_BATCH);
2704			set_extent_dirty(info->pinned_extents,
2705					 bytenr, bytenr + num_bytes - 1,
2706					 GFP_NOFS | __GFP_NOFAIL);
2707		}
2708
2709		spin_lock(&trans->transaction->dirty_bgs_lock);
2710		if (list_empty(&cache->dirty_list)) {
2711			list_add_tail(&cache->dirty_list,
2712				      &trans->transaction->dirty_bgs);
2713			trans->delayed_ref_updates++;
2714			btrfs_get_block_group(cache);
2715		}
2716		spin_unlock(&trans->transaction->dirty_bgs_lock);
2717
2718		/*
2719		 * No longer have used bytes in this block group, queue it for
2720		 * deletion. We do this after adding the block group to the
2721		 * dirty list to avoid races between cleaner kthread and space
2722		 * cache writeout.
2723		 */
2724		if (!alloc && old_val == 0)
2725			btrfs_mark_bg_unused(cache);
 
 
 
 
2726
2727		btrfs_put_block_group(cache);
2728		total -= num_bytes;
2729		bytenr += num_bytes;
2730	}
2731
2732	/* Modified block groups are accounted for in the delayed_refs_rsv. */
2733	btrfs_update_delayed_refs_rsv(trans);
2734	return ret;
2735}
2736
2737/**
2738 * btrfs_add_reserved_bytes - update the block_group and space info counters
 
2739 * @cache:	The cache we are manipulating
2740 * @ram_bytes:  The number of bytes of file content, and will be same to
2741 *              @num_bytes except for the compress path.
2742 * @num_bytes:	The number of bytes in question
2743 * @delalloc:   The blocks are allocated for the delalloc write
2744 *
2745 * This is called by the allocator when it reserves space. If this is a
2746 * reservation and the block group has become read only we cannot make the
2747 * reservation and return -EAGAIN, otherwise this function always succeeds.
2748 */
2749int btrfs_add_reserved_bytes(struct btrfs_block_group_cache *cache,
2750			     u64 ram_bytes, u64 num_bytes, int delalloc)
2751{
2752	struct btrfs_space_info *space_info = cache->space_info;
2753	int ret = 0;
2754
2755	spin_lock(&space_info->lock);
2756	spin_lock(&cache->lock);
2757	if (cache->ro) {
2758		ret = -EAGAIN;
2759	} else {
2760		cache->reserved += num_bytes;
2761		space_info->bytes_reserved += num_bytes;
2762		trace_btrfs_space_reservation(cache->fs_info, "space_info",
2763					      space_info->flags, num_bytes, 1);
2764		btrfs_space_info_update_bytes_may_use(cache->fs_info,
2765						      space_info, -ram_bytes);
2766		if (delalloc)
2767			cache->delalloc_bytes += num_bytes;
 
 
 
 
 
 
 
2768	}
2769	spin_unlock(&cache->lock);
2770	spin_unlock(&space_info->lock);
2771	return ret;
2772}
2773
2774/**
2775 * btrfs_free_reserved_bytes - update the block_group and space info counters
 
2776 * @cache:      The cache we are manipulating
2777 * @num_bytes:  The number of bytes in question
2778 * @delalloc:   The blocks are allocated for the delalloc write
2779 *
2780 * This is called by somebody who is freeing space that was never actually used
2781 * on disk.  For example if you reserve some space for a new leaf in transaction
2782 * A and before transaction A commits you free that leaf, you call this with
2783 * reserve set to 0 in order to clear the reservation.
2784 */
2785void btrfs_free_reserved_bytes(struct btrfs_block_group_cache *cache,
2786			       u64 num_bytes, int delalloc)
2787{
2788	struct btrfs_space_info *space_info = cache->space_info;
2789
2790	spin_lock(&space_info->lock);
2791	spin_lock(&cache->lock);
2792	if (cache->ro)
2793		space_info->bytes_readonly += num_bytes;
2794	cache->reserved -= num_bytes;
2795	space_info->bytes_reserved -= num_bytes;
2796	space_info->max_extent_size = 0;
2797
2798	if (delalloc)
2799		cache->delalloc_bytes -= num_bytes;
2800	spin_unlock(&cache->lock);
 
 
2801	spin_unlock(&space_info->lock);
2802}
2803
2804static void force_metadata_allocation(struct btrfs_fs_info *info)
2805{
2806	struct list_head *head = &info->space_info;
2807	struct btrfs_space_info *found;
2808
2809	rcu_read_lock();
2810	list_for_each_entry_rcu(found, head, list) {
2811		if (found->flags & BTRFS_BLOCK_GROUP_METADATA)
2812			found->force_alloc = CHUNK_ALLOC_FORCE;
2813	}
2814	rcu_read_unlock();
2815}
2816
2817static int should_alloc_chunk(struct btrfs_fs_info *fs_info,
2818			      struct btrfs_space_info *sinfo, int force)
2819{
2820	u64 bytes_used = btrfs_space_info_used(sinfo, false);
2821	u64 thresh;
2822
2823	if (force == CHUNK_ALLOC_FORCE)
2824		return 1;
2825
2826	/*
2827	 * in limited mode, we want to have some free space up to
2828	 * about 1% of the FS size.
2829	 */
2830	if (force == CHUNK_ALLOC_LIMITED) {
2831		thresh = btrfs_super_total_bytes(fs_info->super_copy);
2832		thresh = max_t(u64, SZ_64M, div_factor_fine(thresh, 1));
2833
2834		if (sinfo->total_bytes - bytes_used < thresh)
2835			return 1;
2836	}
2837
2838	if (bytes_used + SZ_2M < div_factor(sinfo->total_bytes, 8))
2839		return 0;
2840	return 1;
2841}
2842
2843int btrfs_force_chunk_alloc(struct btrfs_trans_handle *trans, u64 type)
2844{
2845	u64 alloc_flags = btrfs_get_alloc_profile(trans->fs_info, type);
2846
2847	return btrfs_chunk_alloc(trans, alloc_flags, CHUNK_ALLOC_FORCE);
2848}
2849
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2850/*
2851 * If force is CHUNK_ALLOC_FORCE:
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2852 *    - return 1 if it successfully allocates a chunk,
2853 *    - return errors including -ENOSPC otherwise.
2854 * If force is NOT CHUNK_ALLOC_FORCE:
2855 *    - return 0 if it doesn't need to allocate a new chunk,
2856 *    - return 1 if it successfully allocates a chunk,
2857 *    - return errors including -ENOSPC otherwise.
2858 */
2859int btrfs_chunk_alloc(struct btrfs_trans_handle *trans, u64 flags,
2860		      enum btrfs_chunk_alloc_enum force)
2861{
2862	struct btrfs_fs_info *fs_info = trans->fs_info;
2863	struct btrfs_space_info *space_info;
 
2864	bool wait_for_alloc = false;
2865	bool should_alloc = false;
 
2866	int ret = 0;
2867
 
 
 
 
 
2868	/* Don't re-enter if we're already allocating a chunk */
2869	if (trans->allocating_chunk)
2870		return -ENOSPC;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2871
2872	space_info = btrfs_find_space_info(fs_info, flags);
2873	ASSERT(space_info);
2874
2875	do {
2876		spin_lock(&space_info->lock);
2877		if (force < space_info->force_alloc)
2878			force = space_info->force_alloc;
2879		should_alloc = should_alloc_chunk(fs_info, space_info, force);
2880		if (space_info->full) {
2881			/* No more free physical space */
2882			if (should_alloc)
2883				ret = -ENOSPC;
2884			else
2885				ret = 0;
2886			spin_unlock(&space_info->lock);
2887			return ret;
2888		} else if (!should_alloc) {
2889			spin_unlock(&space_info->lock);
2890			return 0;
2891		} else if (space_info->chunk_alloc) {
2892			/*
2893			 * Someone is already allocating, so we need to block
2894			 * until this someone is finished and then loop to
2895			 * recheck if we should continue with our allocation
2896			 * attempt.
2897			 */
2898			wait_for_alloc = true;
 
2899			spin_unlock(&space_info->lock);
2900			mutex_lock(&fs_info->chunk_mutex);
2901			mutex_unlock(&fs_info->chunk_mutex);
2902		} else {
2903			/* Proceed with allocation */
2904			space_info->chunk_alloc = 1;
2905			wait_for_alloc = false;
2906			spin_unlock(&space_info->lock);
2907		}
2908
2909		cond_resched();
2910	} while (wait_for_alloc);
2911
2912	mutex_lock(&fs_info->chunk_mutex);
2913	trans->allocating_chunk = true;
2914
2915	/*
2916	 * If we have mixed data/metadata chunks we want to make sure we keep
2917	 * allocating mixed chunks instead of individual chunks.
2918	 */
2919	if (btrfs_mixed_space_info(space_info))
2920		flags |= (BTRFS_BLOCK_GROUP_DATA | BTRFS_BLOCK_GROUP_METADATA);
2921
2922	/*
2923	 * if we're doing a data chunk, go ahead and make sure that
2924	 * we keep a reasonable number of metadata chunks allocated in the
2925	 * FS as well.
2926	 */
2927	if (flags & BTRFS_BLOCK_GROUP_DATA && fs_info->metadata_ratio) {
2928		fs_info->data_chunk_allocations++;
2929		if (!(fs_info->data_chunk_allocations %
2930		      fs_info->metadata_ratio))
2931			force_metadata_allocation(fs_info);
2932	}
2933
2934	/*
2935	 * Check if we have enough space in SYSTEM chunk because we may need
2936	 * to update devices.
2937	 */
2938	check_system_chunk(trans, flags);
2939
2940	ret = btrfs_alloc_chunk(trans, flags);
2941	trans->allocating_chunk = false;
2942
 
 
 
 
 
 
 
 
 
 
 
 
 
2943	spin_lock(&space_info->lock);
2944	if (ret < 0) {
2945		if (ret == -ENOSPC)
2946			space_info->full = 1;
2947		else
2948			goto out;
2949	} else {
2950		ret = 1;
2951		space_info->max_extent_size = 0;
2952	}
2953
2954	space_info->force_alloc = CHUNK_ALLOC_NO_FORCE;
2955out:
2956	space_info->chunk_alloc = 0;
2957	spin_unlock(&space_info->lock);
2958	mutex_unlock(&fs_info->chunk_mutex);
2959	/*
2960	 * When we allocate a new chunk we reserve space in the chunk block
2961	 * reserve to make sure we can COW nodes/leafs in the chunk tree or
2962	 * add new nodes/leafs to it if we end up needing to do it when
2963	 * inserting the chunk item and updating device items as part of the
2964	 * second phase of chunk allocation, performed by
2965	 * btrfs_finish_chunk_alloc(). So make sure we don't accumulate a
2966	 * large number of new block groups to create in our transaction
2967	 * handle's new_bgs list to avoid exhausting the chunk block reserve
2968	 * in extreme cases - like having a single transaction create many new
2969	 * block groups when starting to write out the free space caches of all
2970	 * the block groups that were made dirty during the lifetime of the
2971	 * transaction.
2972	 */
2973	if (trans->chunk_bytes_reserved >= (u64)SZ_2M)
2974		btrfs_create_pending_block_groups(trans);
2975
2976	return ret;
2977}
2978
2979static u64 get_profile_num_devs(struct btrfs_fs_info *fs_info, u64 type)
2980{
2981	u64 num_dev;
2982
2983	num_dev = btrfs_raid_array[btrfs_bg_flags_to_raid_index(type)].devs_max;
2984	if (!num_dev)
2985		num_dev = fs_info->fs_devices->rw_devices;
2986
2987	return num_dev;
2988}
2989
2990/*
2991 * If @is_allocation is true, reserve space in the system space info necessary
2992 * for allocating a chunk, otherwise if it's false, reserve space necessary for
2993 * removing a chunk.
2994 */
2995void check_system_chunk(struct btrfs_trans_handle *trans, u64 type)
2996{
2997	struct btrfs_fs_info *fs_info = trans->fs_info;
2998	struct btrfs_space_info *info;
2999	u64 left;
3000	u64 thresh;
3001	int ret = 0;
3002	u64 num_devs;
3003
3004	/*
3005	 * Needed because we can end up allocating a system chunk and for an
3006	 * atomic and race free space reservation in the chunk block reserve.
3007	 */
3008	lockdep_assert_held(&fs_info->chunk_mutex);
3009
3010	info = btrfs_find_space_info(fs_info, BTRFS_BLOCK_GROUP_SYSTEM);
3011	spin_lock(&info->lock);
3012	left = info->total_bytes - btrfs_space_info_used(info, true);
3013	spin_unlock(&info->lock);
3014
3015	num_devs = get_profile_num_devs(fs_info, type);
3016
3017	/* num_devs device items to update and 1 chunk item to add or remove */
3018	thresh = btrfs_calc_metadata_size(fs_info, num_devs) +
3019		btrfs_calc_insert_metadata_size(fs_info, 1);
3020
3021	if (left < thresh && btrfs_test_opt(fs_info, ENOSPC_DEBUG)) {
3022		btrfs_info(fs_info, "left=%llu, need=%llu, flags=%llu",
3023			   left, thresh, type);
3024		btrfs_dump_space_info(fs_info, info, 0, 0);
3025	}
3026
3027	if (left < thresh) {
3028		u64 flags = btrfs_system_alloc_profile(fs_info);
 
3029
3030		/*
3031		 * Ignore failure to create system chunk. We might end up not
3032		 * needing it, as we might not need to COW all nodes/leafs from
3033		 * the paths we visit in the chunk tree (they were already COWed
3034		 * or created in the current transaction for example).
3035		 */
3036		ret = btrfs_alloc_chunk(trans, flags);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3037	}
3038
3039	if (!ret) {
3040		ret = btrfs_block_rsv_add(fs_info->chunk_root,
3041					  &fs_info->chunk_block_rsv,
3042					  thresh, BTRFS_RESERVE_NO_FLUSH);
3043		if (!ret)
3044			trans->chunk_bytes_reserved += thresh;
3045	}
3046}
3047
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3048void btrfs_put_block_group_cache(struct btrfs_fs_info *info)
3049{
3050	struct btrfs_block_group_cache *block_group;
3051	u64 last = 0;
3052
3053	while (1) {
3054		struct inode *inode;
 
 
 
 
 
3055
3056		block_group = btrfs_lookup_first_block_group(info, last);
3057		while (block_group) {
3058			btrfs_wait_block_group_cache_done(block_group);
3059			spin_lock(&block_group->lock);
3060			if (block_group->iref)
3061				break;
3062			spin_unlock(&block_group->lock);
3063			block_group = btrfs_next_block_group(block_group);
3064		}
3065		if (!block_group) {
3066			if (last == 0)
3067				break;
3068			last = 0;
3069			continue;
3070		}
3071
3072		inode = block_group->inode;
3073		block_group->iref = 0;
3074		block_group->inode = NULL;
3075		spin_unlock(&block_group->lock);
3076		ASSERT(block_group->io_ctl.inode == NULL);
3077		iput(inode);
3078		last = block_group->key.objectid + block_group->key.offset;
3079		btrfs_put_block_group(block_group);
3080	}
3081}
3082
3083/*
3084 * Must be called only after stopping all workers, since we could have block
3085 * group caching kthreads running, and therefore they could race with us if we
3086 * freed the block groups before stopping them.
3087 */
3088int btrfs_free_block_groups(struct btrfs_fs_info *info)
3089{
3090	struct btrfs_block_group_cache *block_group;
3091	struct btrfs_space_info *space_info;
3092	struct btrfs_caching_control *caching_ctl;
3093	struct rb_node *n;
3094
3095	down_write(&info->commit_root_sem);
3096	while (!list_empty(&info->caching_block_groups)) {
3097		caching_ctl = list_entry(info->caching_block_groups.next,
3098					 struct btrfs_caching_control, list);
3099		list_del(&caching_ctl->list);
3100		btrfs_put_caching_control(caching_ctl);
3101	}
3102	up_write(&info->commit_root_sem);
3103
3104	spin_lock(&info->unused_bgs_lock);
3105	while (!list_empty(&info->unused_bgs)) {
3106		block_group = list_first_entry(&info->unused_bgs,
3107					       struct btrfs_block_group_cache,
 
 
 
 
 
 
 
 
3108					       bg_list);
3109		list_del_init(&block_group->bg_list);
3110		btrfs_put_block_group(block_group);
3111	}
3112	spin_unlock(&info->unused_bgs_lock);
3113
3114	spin_lock(&info->block_group_cache_lock);
3115	while ((n = rb_last(&info->block_group_cache_tree)) != NULL) {
3116		block_group = rb_entry(n, struct btrfs_block_group_cache,
 
 
 
 
 
 
 
 
 
 
3117				       cache_node);
3118		rb_erase(&block_group->cache_node,
3119			 &info->block_group_cache_tree);
3120		RB_CLEAR_NODE(&block_group->cache_node);
3121		spin_unlock(&info->block_group_cache_lock);
3122
3123		down_write(&block_group->space_info->groups_sem);
3124		list_del(&block_group->list);
3125		up_write(&block_group->space_info->groups_sem);
3126
3127		/*
3128		 * We haven't cached this block group, which means we could
3129		 * possibly have excluded extents on this block group.
3130		 */
3131		if (block_group->cached == BTRFS_CACHE_NO ||
3132		    block_group->cached == BTRFS_CACHE_ERROR)
3133			btrfs_free_excluded_extents(block_group);
3134
3135		btrfs_remove_free_space_cache(block_group);
3136		ASSERT(block_group->cached != BTRFS_CACHE_STARTED);
3137		ASSERT(list_empty(&block_group->dirty_list));
3138		ASSERT(list_empty(&block_group->io_list));
3139		ASSERT(list_empty(&block_group->bg_list));
3140		ASSERT(atomic_read(&block_group->count) == 1);
 
3141		btrfs_put_block_group(block_group);
3142
3143		spin_lock(&info->block_group_cache_lock);
3144	}
3145	spin_unlock(&info->block_group_cache_lock);
3146
3147	/*
3148	 * Now that all the block groups are freed, go through and free all the
3149	 * space_info structs.  This is only called during the final stages of
3150	 * unmount, and so we know nobody is using them.  We call
3151	 * synchronize_rcu() once before we start, just to be on the safe side.
3152	 */
3153	synchronize_rcu();
3154
3155	btrfs_release_global_block_rsv(info);
3156
3157	while (!list_empty(&info->space_info)) {
3158		space_info = list_entry(info->space_info.next,
3159					struct btrfs_space_info,
3160					list);
3161
3162		/*
3163		 * Do not hide this behind enospc_debug, this is actually
3164		 * important and indicates a real bug if this happens.
3165		 */
3166		if (WARN_ON(space_info->bytes_pinned > 0 ||
3167			    space_info->bytes_reserved > 0 ||
3168			    space_info->bytes_may_use > 0))
3169			btrfs_dump_space_info(info, space_info, 0, 0);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3170		list_del(&space_info->list);
3171		btrfs_sysfs_remove_space_info(space_info);
3172	}
3173	return 0;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3174}
v6.2
   1// SPDX-License-Identifier: GPL-2.0
   2
   3#include <linux/list_sort.h>
   4#include "misc.h"
   5#include "ctree.h"
   6#include "block-group.h"
   7#include "space-info.h"
   8#include "disk-io.h"
   9#include "free-space-cache.h"
  10#include "free-space-tree.h"
 
  11#include "volumes.h"
  12#include "transaction.h"
  13#include "ref-verify.h"
  14#include "sysfs.h"
  15#include "tree-log.h"
  16#include "delalloc-space.h"
  17#include "discard.h"
  18#include "raid56.h"
  19#include "zoned.h"
  20#include "fs.h"
  21#include "accessors.h"
  22#include "extent-tree.h"
  23
  24#ifdef CONFIG_BTRFS_DEBUG
  25int btrfs_should_fragment_free_space(struct btrfs_block_group *block_group)
  26{
  27	struct btrfs_fs_info *fs_info = block_group->fs_info;
  28
  29	return (btrfs_test_opt(fs_info, FRAGMENT_METADATA) &&
  30		block_group->flags & BTRFS_BLOCK_GROUP_METADATA) ||
  31	       (btrfs_test_opt(fs_info, FRAGMENT_DATA) &&
  32		block_group->flags &  BTRFS_BLOCK_GROUP_DATA);
  33}
  34#endif
  35
  36/*
  37 * Return target flags in extended format or 0 if restripe for this chunk_type
  38 * is not in progress
  39 *
  40 * Should be called with balance_lock held
  41 */
  42static u64 get_restripe_target(struct btrfs_fs_info *fs_info, u64 flags)
  43{
  44	struct btrfs_balance_control *bctl = fs_info->balance_ctl;
  45	u64 target = 0;
  46
  47	if (!bctl)
  48		return 0;
  49
  50	if (flags & BTRFS_BLOCK_GROUP_DATA &&
  51	    bctl->data.flags & BTRFS_BALANCE_ARGS_CONVERT) {
  52		target = BTRFS_BLOCK_GROUP_DATA | bctl->data.target;
  53	} else if (flags & BTRFS_BLOCK_GROUP_SYSTEM &&
  54		   bctl->sys.flags & BTRFS_BALANCE_ARGS_CONVERT) {
  55		target = BTRFS_BLOCK_GROUP_SYSTEM | bctl->sys.target;
  56	} else if (flags & BTRFS_BLOCK_GROUP_METADATA &&
  57		   bctl->meta.flags & BTRFS_BALANCE_ARGS_CONVERT) {
  58		target = BTRFS_BLOCK_GROUP_METADATA | bctl->meta.target;
  59	}
  60
  61	return target;
  62}
  63
  64/*
  65 * @flags: available profiles in extended format (see ctree.h)
  66 *
  67 * Return reduced profile in chunk format.  If profile changing is in progress
  68 * (either running or paused) picks the target profile (if it's already
  69 * available), otherwise falls back to plain reducing.
  70 */
  71static u64 btrfs_reduce_alloc_profile(struct btrfs_fs_info *fs_info, u64 flags)
  72{
  73	u64 num_devices = fs_info->fs_devices->rw_devices;
  74	u64 target;
  75	u64 raid_type;
  76	u64 allowed = 0;
  77
  78	/*
  79	 * See if restripe for this chunk_type is in progress, if so try to
  80	 * reduce to the target profile
  81	 */
  82	spin_lock(&fs_info->balance_lock);
  83	target = get_restripe_target(fs_info, flags);
  84	if (target) {
  85		spin_unlock(&fs_info->balance_lock);
  86		return extended_to_chunk(target);
 
 
 
  87	}
  88	spin_unlock(&fs_info->balance_lock);
  89
  90	/* First, mask out the RAID levels which aren't possible */
  91	for (raid_type = 0; raid_type < BTRFS_NR_RAID_TYPES; raid_type++) {
  92		if (num_devices >= btrfs_raid_array[raid_type].devs_min)
  93			allowed |= btrfs_raid_array[raid_type].bg_flag;
  94	}
  95	allowed &= flags;
  96
  97	if (allowed & BTRFS_BLOCK_GROUP_RAID6)
  98		allowed = BTRFS_BLOCK_GROUP_RAID6;
  99	else if (allowed & BTRFS_BLOCK_GROUP_RAID5)
 100		allowed = BTRFS_BLOCK_GROUP_RAID5;
 101	else if (allowed & BTRFS_BLOCK_GROUP_RAID10)
 102		allowed = BTRFS_BLOCK_GROUP_RAID10;
 103	else if (allowed & BTRFS_BLOCK_GROUP_RAID1)
 104		allowed = BTRFS_BLOCK_GROUP_RAID1;
 105	else if (allowed & BTRFS_BLOCK_GROUP_RAID0)
 106		allowed = BTRFS_BLOCK_GROUP_RAID0;
 107
 108	flags &= ~BTRFS_BLOCK_GROUP_PROFILE_MASK;
 109
 110	return extended_to_chunk(flags | allowed);
 111}
 112
 113u64 btrfs_get_alloc_profile(struct btrfs_fs_info *fs_info, u64 orig_flags)
 114{
 115	unsigned seq;
 116	u64 flags;
 117
 118	do {
 119		flags = orig_flags;
 120		seq = read_seqbegin(&fs_info->profiles_lock);
 121
 122		if (flags & BTRFS_BLOCK_GROUP_DATA)
 123			flags |= fs_info->avail_data_alloc_bits;
 124		else if (flags & BTRFS_BLOCK_GROUP_SYSTEM)
 125			flags |= fs_info->avail_system_alloc_bits;
 126		else if (flags & BTRFS_BLOCK_GROUP_METADATA)
 127			flags |= fs_info->avail_metadata_alloc_bits;
 128	} while (read_seqretry(&fs_info->profiles_lock, seq));
 129
 130	return btrfs_reduce_alloc_profile(fs_info, flags);
 131}
 132
 133void btrfs_get_block_group(struct btrfs_block_group *cache)
 
 
 
 
 
 134{
 135	refcount_inc(&cache->refs);
 136}
 137
 138void btrfs_put_block_group(struct btrfs_block_group *cache)
 139{
 140	if (refcount_dec_and_test(&cache->refs)) {
 141		WARN_ON(cache->pinned > 0);
 142		/*
 143		 * If there was a failure to cleanup a log tree, very likely due
 144		 * to an IO failure on a writeback attempt of one or more of its
 145		 * extent buffers, we could not do proper (and cheap) unaccounting
 146		 * of their reserved space, so don't warn on reserved > 0 in that
 147		 * case.
 148		 */
 149		if (!(cache->flags & BTRFS_BLOCK_GROUP_METADATA) ||
 150		    !BTRFS_FS_LOG_CLEANUP_ERROR(cache->fs_info))
 151			WARN_ON(cache->reserved > 0);
 152
 153		/*
 154		 * A block_group shouldn't be on the discard_list anymore.
 155		 * Remove the block_group from the discard_list to prevent us
 156		 * from causing a panic due to NULL pointer dereference.
 157		 */
 158		if (WARN_ON(!list_empty(&cache->discard_list)))
 159			btrfs_discard_cancel_work(&cache->fs_info->discard_ctl,
 160						  cache);
 161
 162		/*
 163		 * If not empty, someone is still holding mutex of
 164		 * full_stripe_lock, which can only be released by caller.
 165		 * And it will definitely cause use-after-free when caller
 166		 * tries to release full stripe lock.
 167		 *
 168		 * No better way to resolve, but only to warn.
 169		 */
 170		WARN_ON(!RB_EMPTY_ROOT(&cache->full_stripe_locks_root.root));
 171		kfree(cache->free_space_ctl);
 172		kfree(cache->physical_map);
 173		kfree(cache);
 174	}
 175}
 176
 177/*
 178 * This adds the block group to the fs_info rb tree for the block group cache
 179 */
 180static int btrfs_add_block_group_cache(struct btrfs_fs_info *info,
 181				       struct btrfs_block_group *block_group)
 182{
 183	struct rb_node **p;
 184	struct rb_node *parent = NULL;
 185	struct btrfs_block_group *cache;
 186	bool leftmost = true;
 187
 188	ASSERT(block_group->length != 0);
 189
 190	write_lock(&info->block_group_cache_lock);
 191	p = &info->block_group_cache_tree.rb_root.rb_node;
 192
 193	while (*p) {
 194		parent = *p;
 195		cache = rb_entry(parent, struct btrfs_block_group, cache_node);
 196		if (block_group->start < cache->start) {
 
 197			p = &(*p)->rb_left;
 198		} else if (block_group->start > cache->start) {
 199			p = &(*p)->rb_right;
 200			leftmost = false;
 201		} else {
 202			write_unlock(&info->block_group_cache_lock);
 203			return -EEXIST;
 204		}
 205	}
 206
 207	rb_link_node(&block_group->cache_node, parent, p);
 208	rb_insert_color_cached(&block_group->cache_node,
 209			       &info->block_group_cache_tree, leftmost);
 
 
 
 210
 211	write_unlock(&info->block_group_cache_lock);
 212
 213	return 0;
 214}
 215
 216/*
 217 * This will return the block group at or after bytenr if contains is 0, else
 218 * it will return the block group that contains the bytenr
 219 */
 220static struct btrfs_block_group *block_group_cache_tree_search(
 221		struct btrfs_fs_info *info, u64 bytenr, int contains)
 222{
 223	struct btrfs_block_group *cache, *ret = NULL;
 224	struct rb_node *n;
 225	u64 end, start;
 226
 227	read_lock(&info->block_group_cache_lock);
 228	n = info->block_group_cache_tree.rb_root.rb_node;
 229
 230	while (n) {
 231		cache = rb_entry(n, struct btrfs_block_group, cache_node);
 232		end = cache->start + cache->length - 1;
 233		start = cache->start;
 
 234
 235		if (bytenr < start) {
 236			if (!contains && (!ret || start < ret->start))
 237				ret = cache;
 238			n = n->rb_left;
 239		} else if (bytenr > start) {
 240			if (contains && bytenr <= end) {
 241				ret = cache;
 242				break;
 243			}
 244			n = n->rb_right;
 245		} else {
 246			ret = cache;
 247			break;
 248		}
 249	}
 250	if (ret)
 251		btrfs_get_block_group(ret);
 252	read_unlock(&info->block_group_cache_lock);
 
 
 
 253
 254	return ret;
 255}
 256
 257/*
 258 * Return the block group that starts at or after bytenr
 259 */
 260struct btrfs_block_group *btrfs_lookup_first_block_group(
 261		struct btrfs_fs_info *info, u64 bytenr)
 262{
 263	return block_group_cache_tree_search(info, bytenr, 0);
 264}
 265
 266/*
 267 * Return the block group that contains the given bytenr
 268 */
 269struct btrfs_block_group *btrfs_lookup_block_group(
 270		struct btrfs_fs_info *info, u64 bytenr)
 271{
 272	return block_group_cache_tree_search(info, bytenr, 1);
 273}
 274
 275struct btrfs_block_group *btrfs_next_block_group(
 276		struct btrfs_block_group *cache)
 277{
 278	struct btrfs_fs_info *fs_info = cache->fs_info;
 279	struct rb_node *node;
 280
 281	read_lock(&fs_info->block_group_cache_lock);
 282
 283	/* If our block group was removed, we need a full search. */
 284	if (RB_EMPTY_NODE(&cache->cache_node)) {
 285		const u64 next_bytenr = cache->start + cache->length;
 286
 287		read_unlock(&fs_info->block_group_cache_lock);
 288		btrfs_put_block_group(cache);
 289		return btrfs_lookup_first_block_group(fs_info, next_bytenr);
 290	}
 291	node = rb_next(&cache->cache_node);
 292	btrfs_put_block_group(cache);
 293	if (node) {
 294		cache = rb_entry(node, struct btrfs_block_group, cache_node);
 
 295		btrfs_get_block_group(cache);
 296	} else
 297		cache = NULL;
 298	read_unlock(&fs_info->block_group_cache_lock);
 299	return cache;
 300}
 301
 302/*
 303 * Check if we can do a NOCOW write for a given extent.
 304 *
 305 * @fs_info:       The filesystem information object.
 306 * @bytenr:        Logical start address of the extent.
 307 *
 308 * Check if we can do a NOCOW write for the given extent, and increments the
 309 * number of NOCOW writers in the block group that contains the extent, as long
 310 * as the block group exists and it's currently not in read-only mode.
 311 *
 312 * Returns: A non-NULL block group pointer if we can do a NOCOW write, the caller
 313 *          is responsible for calling btrfs_dec_nocow_writers() later.
 314 *
 315 *          Or NULL if we can not do a NOCOW write
 316 */
 317struct btrfs_block_group *btrfs_inc_nocow_writers(struct btrfs_fs_info *fs_info,
 318						  u64 bytenr)
 319{
 320	struct btrfs_block_group *bg;
 321	bool can_nocow = true;
 322
 323	bg = btrfs_lookup_block_group(fs_info, bytenr);
 324	if (!bg)
 325		return NULL;
 326
 327	spin_lock(&bg->lock);
 328	if (bg->ro)
 329		can_nocow = false;
 330	else
 331		atomic_inc(&bg->nocow_writers);
 332	spin_unlock(&bg->lock);
 333
 334	if (!can_nocow) {
 
 335		btrfs_put_block_group(bg);
 336		return NULL;
 337	}
 338
 339	/* No put on block group, done by btrfs_dec_nocow_writers(). */
 340	return bg;
 341}
 342
 343/*
 344 * Decrement the number of NOCOW writers in a block group.
 345 *
 346 * This is meant to be called after a previous call to btrfs_inc_nocow_writers(),
 347 * and on the block group returned by that call. Typically this is called after
 348 * creating an ordered extent for a NOCOW write, to prevent races with scrub and
 349 * relocation.
 350 *
 351 * After this call, the caller should not use the block group anymore. It it wants
 352 * to use it, then it should get a reference on it before calling this function.
 353 */
 354void btrfs_dec_nocow_writers(struct btrfs_block_group *bg)
 355{
 
 
 
 
 356	if (atomic_dec_and_test(&bg->nocow_writers))
 357		wake_up_var(&bg->nocow_writers);
 358
 359	/* For the lookup done by a previous call to btrfs_inc_nocow_writers(). */
 
 
 
 360	btrfs_put_block_group(bg);
 361}
 362
 363void btrfs_wait_nocow_writers(struct btrfs_block_group *bg)
 364{
 365	wait_var_event(&bg->nocow_writers, !atomic_read(&bg->nocow_writers));
 366}
 367
 368void btrfs_dec_block_group_reservations(struct btrfs_fs_info *fs_info,
 369					const u64 start)
 370{
 371	struct btrfs_block_group *bg;
 372
 373	bg = btrfs_lookup_block_group(fs_info, start);
 374	ASSERT(bg);
 375	if (atomic_dec_and_test(&bg->reservations))
 376		wake_up_var(&bg->reservations);
 377	btrfs_put_block_group(bg);
 378}
 379
 380void btrfs_wait_block_group_reservations(struct btrfs_block_group *bg)
 381{
 382	struct btrfs_space_info *space_info = bg->space_info;
 383
 384	ASSERT(bg->ro);
 385
 386	if (!(bg->flags & BTRFS_BLOCK_GROUP_DATA))
 387		return;
 388
 389	/*
 390	 * Our block group is read only but before we set it to read only,
 391	 * some task might have had allocated an extent from it already, but it
 392	 * has not yet created a respective ordered extent (and added it to a
 393	 * root's list of ordered extents).
 394	 * Therefore wait for any task currently allocating extents, since the
 395	 * block group's reservations counter is incremented while a read lock
 396	 * on the groups' semaphore is held and decremented after releasing
 397	 * the read access on that semaphore and creating the ordered extent.
 398	 */
 399	down_write(&space_info->groups_sem);
 400	up_write(&space_info->groups_sem);
 401
 402	wait_var_event(&bg->reservations, !atomic_read(&bg->reservations));
 403}
 404
 405struct btrfs_caching_control *btrfs_get_caching_control(
 406		struct btrfs_block_group *cache)
 407{
 408	struct btrfs_caching_control *ctl;
 409
 410	spin_lock(&cache->lock);
 411	if (!cache->caching_ctl) {
 412		spin_unlock(&cache->lock);
 413		return NULL;
 414	}
 415
 416	ctl = cache->caching_ctl;
 417	refcount_inc(&ctl->count);
 418	spin_unlock(&cache->lock);
 419	return ctl;
 420}
 421
 422void btrfs_put_caching_control(struct btrfs_caching_control *ctl)
 423{
 424	if (refcount_dec_and_test(&ctl->count))
 425		kfree(ctl);
 426}
 427
 428/*
 429 * When we wait for progress in the block group caching, its because our
 430 * allocation attempt failed at least once.  So, we must sleep and let some
 431 * progress happen before we try again.
 432 *
 433 * This function will sleep at least once waiting for new free space to show
 434 * up, and then it will check the block group free space numbers for our min
 435 * num_bytes.  Another option is to have it go ahead and look in the rbtree for
 436 * a free extent of a given size, but this is a good start.
 437 *
 438 * Callers of this must check if cache->cached == BTRFS_CACHE_ERROR before using
 439 * any of the information in this block group.
 440 */
 441void btrfs_wait_block_group_cache_progress(struct btrfs_block_group *cache,
 442					   u64 num_bytes)
 443{
 444	struct btrfs_caching_control *caching_ctl;
 445
 446	caching_ctl = btrfs_get_caching_control(cache);
 447	if (!caching_ctl)
 448		return;
 449
 450	wait_event(caching_ctl->wait, btrfs_block_group_done(cache) ||
 451		   (cache->free_space_ctl->free_space >= num_bytes));
 452
 453	btrfs_put_caching_control(caching_ctl);
 454}
 455
 456static int btrfs_caching_ctl_wait_done(struct btrfs_block_group *cache,
 457				       struct btrfs_caching_control *caching_ctl)
 458{
 459	wait_event(caching_ctl->wait, btrfs_block_group_done(cache));
 460	return cache->cached == BTRFS_CACHE_ERROR ? -EIO : 0;
 461}
 462
 463static int btrfs_wait_block_group_cache_done(struct btrfs_block_group *cache)
 464{
 465	struct btrfs_caching_control *caching_ctl;
 466	int ret;
 467
 468	caching_ctl = btrfs_get_caching_control(cache);
 469	if (!caching_ctl)
 470		return (cache->cached == BTRFS_CACHE_ERROR) ? -EIO : 0;
 471	ret = btrfs_caching_ctl_wait_done(cache, caching_ctl);
 
 
 
 472	btrfs_put_caching_control(caching_ctl);
 473	return ret;
 474}
 475
 476#ifdef CONFIG_BTRFS_DEBUG
 477static void fragment_free_space(struct btrfs_block_group *block_group)
 478{
 479	struct btrfs_fs_info *fs_info = block_group->fs_info;
 480	u64 start = block_group->start;
 481	u64 len = block_group->length;
 482	u64 chunk = block_group->flags & BTRFS_BLOCK_GROUP_METADATA ?
 483		fs_info->nodesize : fs_info->sectorsize;
 484	u64 step = chunk << 1;
 485
 486	while (len > chunk) {
 487		btrfs_remove_free_space(block_group, start, chunk);
 488		start += step;
 489		if (len < step)
 490			len = 0;
 491		else
 492			len -= step;
 493	}
 494}
 495#endif
 496
 497/*
 498 * This is only called by btrfs_cache_block_group, since we could have freed
 499 * extents we need to check the pinned_extents for any extents that can't be
 500 * used yet since their free space will be released as soon as the transaction
 501 * commits.
 502 */
 503u64 add_new_free_space(struct btrfs_block_group *block_group, u64 start, u64 end)
 
 504{
 505	struct btrfs_fs_info *info = block_group->fs_info;
 506	u64 extent_start, extent_end, size, total_added = 0;
 507	int ret;
 508
 509	while (start < end) {
 510		ret = find_first_extent_bit(&info->excluded_extents, start,
 511					    &extent_start, &extent_end,
 512					    EXTENT_DIRTY | EXTENT_UPTODATE,
 513					    NULL);
 514		if (ret)
 515			break;
 516
 517		if (extent_start <= start) {
 518			start = extent_end + 1;
 519		} else if (extent_start > start && extent_start < end) {
 520			size = extent_start - start;
 521			total_added += size;
 522			ret = btrfs_add_free_space_async_trimmed(block_group,
 523								 start, size);
 524			BUG_ON(ret); /* -ENOMEM or logic error */
 525			start = extent_end + 1;
 526		} else {
 527			break;
 528		}
 529	}
 530
 531	if (start < end) {
 532		size = end - start;
 533		total_added += size;
 534		ret = btrfs_add_free_space_async_trimmed(block_group, start,
 535							 size);
 536		BUG_ON(ret); /* -ENOMEM or logic error */
 537	}
 538
 539	return total_added;
 540}
 541
 542static int load_extent_tree_free(struct btrfs_caching_control *caching_ctl)
 543{
 544	struct btrfs_block_group *block_group = caching_ctl->block_group;
 545	struct btrfs_fs_info *fs_info = block_group->fs_info;
 546	struct btrfs_root *extent_root;
 547	struct btrfs_path *path;
 548	struct extent_buffer *leaf;
 549	struct btrfs_key key;
 550	u64 total_found = 0;
 551	u64 last = 0;
 552	u32 nritems;
 553	int ret;
 554	bool wakeup = true;
 555
 556	path = btrfs_alloc_path();
 557	if (!path)
 558		return -ENOMEM;
 559
 560	last = max_t(u64, block_group->start, BTRFS_SUPER_INFO_OFFSET);
 561	extent_root = btrfs_extent_root(fs_info, last);
 562
 563#ifdef CONFIG_BTRFS_DEBUG
 564	/*
 565	 * If we're fragmenting we don't want to make anybody think we can
 566	 * allocate from this block group until we've had a chance to fragment
 567	 * the free space.
 568	 */
 569	if (btrfs_should_fragment_free_space(block_group))
 570		wakeup = false;
 571#endif
 572	/*
 573	 * We don't want to deadlock with somebody trying to allocate a new
 574	 * extent for the extent root while also trying to search the extent
 575	 * root to add free space.  So we skip locking and search the commit
 576	 * root, since its read-only
 577	 */
 578	path->skip_locking = 1;
 579	path->search_commit_root = 1;
 580	path->reada = READA_FORWARD;
 581
 582	key.objectid = last;
 583	key.offset = 0;
 584	key.type = BTRFS_EXTENT_ITEM_KEY;
 585
 586next:
 587	ret = btrfs_search_slot(NULL, extent_root, &key, path, 0, 0);
 588	if (ret < 0)
 589		goto out;
 590
 591	leaf = path->nodes[0];
 592	nritems = btrfs_header_nritems(leaf);
 593
 594	while (1) {
 595		if (btrfs_fs_closing(fs_info) > 1) {
 596			last = (u64)-1;
 597			break;
 598		}
 599
 600		if (path->slots[0] < nritems) {
 601			btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
 602		} else {
 603			ret = btrfs_find_next_key(extent_root, path, &key, 0, 0);
 604			if (ret)
 605				break;
 606
 607			if (need_resched() ||
 608			    rwsem_is_contended(&fs_info->commit_root_sem)) {
 
 
 609				btrfs_release_path(path);
 610				up_read(&fs_info->commit_root_sem);
 611				mutex_unlock(&caching_ctl->mutex);
 612				cond_resched();
 613				mutex_lock(&caching_ctl->mutex);
 614				down_read(&fs_info->commit_root_sem);
 615				goto next;
 616			}
 617
 618			ret = btrfs_next_leaf(extent_root, path);
 619			if (ret < 0)
 620				goto out;
 621			if (ret)
 622				break;
 623			leaf = path->nodes[0];
 624			nritems = btrfs_header_nritems(leaf);
 625			continue;
 626		}
 627
 628		if (key.objectid < last) {
 629			key.objectid = last;
 630			key.offset = 0;
 631			key.type = BTRFS_EXTENT_ITEM_KEY;
 
 
 
 632			btrfs_release_path(path);
 633			goto next;
 634		}
 635
 636		if (key.objectid < block_group->start) {
 637			path->slots[0]++;
 638			continue;
 639		}
 640
 641		if (key.objectid >= block_group->start + block_group->length)
 
 642			break;
 643
 644		if (key.type == BTRFS_EXTENT_ITEM_KEY ||
 645		    key.type == BTRFS_METADATA_ITEM_KEY) {
 646			total_found += add_new_free_space(block_group, last,
 647							  key.objectid);
 648			if (key.type == BTRFS_METADATA_ITEM_KEY)
 649				last = key.objectid +
 650					fs_info->nodesize;
 651			else
 652				last = key.objectid + key.offset;
 653
 654			if (total_found > CACHING_CTL_WAKE_UP) {
 655				total_found = 0;
 656				if (wakeup)
 657					wake_up(&caching_ctl->wait);
 658			}
 659		}
 660		path->slots[0]++;
 661	}
 662	ret = 0;
 663
 664	total_found += add_new_free_space(block_group, last,
 665				block_group->start + block_group->length);
 
 
 666
 667out:
 668	btrfs_free_path(path);
 669	return ret;
 670}
 671
 672static noinline void caching_thread(struct btrfs_work *work)
 673{
 674	struct btrfs_block_group *block_group;
 675	struct btrfs_fs_info *fs_info;
 676	struct btrfs_caching_control *caching_ctl;
 677	int ret;
 678
 679	caching_ctl = container_of(work, struct btrfs_caching_control, work);
 680	block_group = caching_ctl->block_group;
 681	fs_info = block_group->fs_info;
 682
 683	mutex_lock(&caching_ctl->mutex);
 684	down_read(&fs_info->commit_root_sem);
 685
 686	if (btrfs_test_opt(fs_info, SPACE_CACHE)) {
 687		ret = load_free_space_cache(block_group);
 688		if (ret == 1) {
 689			ret = 0;
 690			goto done;
 691		}
 692
 693		/*
 694		 * We failed to load the space cache, set ourselves to
 695		 * CACHE_STARTED and carry on.
 696		 */
 697		spin_lock(&block_group->lock);
 698		block_group->cached = BTRFS_CACHE_STARTED;
 699		spin_unlock(&block_group->lock);
 700		wake_up(&caching_ctl->wait);
 701	}
 702
 703	/*
 704	 * If we are in the transaction that populated the free space tree we
 705	 * can't actually cache from the free space tree as our commit root and
 706	 * real root are the same, so we could change the contents of the blocks
 707	 * while caching.  Instead do the slow caching in this case, and after
 708	 * the transaction has committed we will be safe.
 709	 */
 710	if (btrfs_fs_compat_ro(fs_info, FREE_SPACE_TREE) &&
 711	    !(test_bit(BTRFS_FS_FREE_SPACE_TREE_UNTRUSTED, &fs_info->flags)))
 712		ret = load_free_space_tree(caching_ctl);
 713	else
 714		ret = load_extent_tree_free(caching_ctl);
 715done:
 716	spin_lock(&block_group->lock);
 717	block_group->caching_ctl = NULL;
 718	block_group->cached = ret ? BTRFS_CACHE_ERROR : BTRFS_CACHE_FINISHED;
 719	spin_unlock(&block_group->lock);
 720
 721#ifdef CONFIG_BTRFS_DEBUG
 722	if (btrfs_should_fragment_free_space(block_group)) {
 723		u64 bytes_used;
 724
 725		spin_lock(&block_group->space_info->lock);
 726		spin_lock(&block_group->lock);
 727		bytes_used = block_group->length - block_group->used;
 
 728		block_group->space_info->bytes_used += bytes_used >> 1;
 729		spin_unlock(&block_group->lock);
 730		spin_unlock(&block_group->space_info->lock);
 731		fragment_free_space(block_group);
 732	}
 733#endif
 734
 
 
 735	up_read(&fs_info->commit_root_sem);
 736	btrfs_free_excluded_extents(block_group);
 737	mutex_unlock(&caching_ctl->mutex);
 738
 739	wake_up(&caching_ctl->wait);
 740
 741	btrfs_put_caching_control(caching_ctl);
 742	btrfs_put_block_group(block_group);
 743}
 744
 745int btrfs_cache_block_group(struct btrfs_block_group *cache, bool wait)
 
 746{
 
 747	struct btrfs_fs_info *fs_info = cache->fs_info;
 748	struct btrfs_caching_control *caching_ctl = NULL;
 749	int ret = 0;
 750
 751	/* Allocator for zoned filesystems does not use the cache at all */
 752	if (btrfs_is_zoned(fs_info))
 753		return 0;
 754
 755	caching_ctl = kzalloc(sizeof(*caching_ctl), GFP_NOFS);
 756	if (!caching_ctl)
 757		return -ENOMEM;
 758
 759	INIT_LIST_HEAD(&caching_ctl->list);
 760	mutex_init(&caching_ctl->mutex);
 761	init_waitqueue_head(&caching_ctl->wait);
 762	caching_ctl->block_group = cache;
 763	refcount_set(&caching_ctl->count, 2);
 764	btrfs_init_work(&caching_ctl->work, caching_thread, NULL, NULL);
 
 
 765
 766	spin_lock(&cache->lock);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 767	if (cache->cached != BTRFS_CACHE_NO) {
 
 768		kfree(caching_ctl);
 769
 770		caching_ctl = cache->caching_ctl;
 771		if (caching_ctl)
 772			refcount_inc(&caching_ctl->count);
 773		spin_unlock(&cache->lock);
 774		goto out;
 775	}
 776	WARN_ON(cache->caching_ctl);
 777	cache->caching_ctl = caching_ctl;
 778	cache->cached = BTRFS_CACHE_STARTED;
 779	spin_unlock(&cache->lock);
 780
 781	write_lock(&fs_info->block_group_cache_lock);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 782	refcount_inc(&caching_ctl->count);
 783	list_add_tail(&caching_ctl->list, &fs_info->caching_block_groups);
 784	write_unlock(&fs_info->block_group_cache_lock);
 785
 786	btrfs_get_block_group(cache);
 787
 788	btrfs_queue_work(fs_info->caching_workers, &caching_ctl->work);
 789out:
 790	if (wait && caching_ctl)
 791		ret = btrfs_caching_ctl_wait_done(cache, caching_ctl);
 792	if (caching_ctl)
 793		btrfs_put_caching_control(caching_ctl);
 794
 795	return ret;
 796}
 797
 798static void clear_avail_alloc_bits(struct btrfs_fs_info *fs_info, u64 flags)
 799{
 800	u64 extra_flags = chunk_to_extended(flags) &
 801				BTRFS_EXTENDED_PROFILE_MASK;
 802
 803	write_seqlock(&fs_info->profiles_lock);
 804	if (flags & BTRFS_BLOCK_GROUP_DATA)
 805		fs_info->avail_data_alloc_bits &= ~extra_flags;
 806	if (flags & BTRFS_BLOCK_GROUP_METADATA)
 807		fs_info->avail_metadata_alloc_bits &= ~extra_flags;
 808	if (flags & BTRFS_BLOCK_GROUP_SYSTEM)
 809		fs_info->avail_system_alloc_bits &= ~extra_flags;
 810	write_sequnlock(&fs_info->profiles_lock);
 811}
 812
 813/*
 814 * Clear incompat bits for the following feature(s):
 815 *
 816 * - RAID56 - in case there's neither RAID5 nor RAID6 profile block group
 817 *            in the whole filesystem
 818 *
 819 * - RAID1C34 - same as above for RAID1C3 and RAID1C4 block groups
 820 */
 821static void clear_incompat_bg_bits(struct btrfs_fs_info *fs_info, u64 flags)
 822{
 823	bool found_raid56 = false;
 824	bool found_raid1c34 = false;
 825
 826	if ((flags & BTRFS_BLOCK_GROUP_RAID56_MASK) ||
 827	    (flags & BTRFS_BLOCK_GROUP_RAID1C3) ||
 828	    (flags & BTRFS_BLOCK_GROUP_RAID1C4)) {
 829		struct list_head *head = &fs_info->space_info;
 830		struct btrfs_space_info *sinfo;
 831
 832		list_for_each_entry_rcu(sinfo, head, list) {
 
 
 833			down_read(&sinfo->groups_sem);
 834			if (!list_empty(&sinfo->block_groups[BTRFS_RAID_RAID5]))
 835				found_raid56 = true;
 836			if (!list_empty(&sinfo->block_groups[BTRFS_RAID_RAID6]))
 837				found_raid56 = true;
 838			if (!list_empty(&sinfo->block_groups[BTRFS_RAID_RAID1C3]))
 839				found_raid1c34 = true;
 840			if (!list_empty(&sinfo->block_groups[BTRFS_RAID_RAID1C4]))
 841				found_raid1c34 = true;
 842			up_read(&sinfo->groups_sem);
 
 
 
 843		}
 844		if (!found_raid56)
 845			btrfs_clear_fs_incompat(fs_info, RAID56);
 846		if (!found_raid1c34)
 847			btrfs_clear_fs_incompat(fs_info, RAID1C34);
 848	}
 849}
 850
 851static int remove_block_group_item(struct btrfs_trans_handle *trans,
 852				   struct btrfs_path *path,
 853				   struct btrfs_block_group *block_group)
 854{
 855	struct btrfs_fs_info *fs_info = trans->fs_info;
 856	struct btrfs_root *root;
 857	struct btrfs_key key;
 858	int ret;
 859
 860	root = btrfs_block_group_root(fs_info);
 861	key.objectid = block_group->start;
 862	key.type = BTRFS_BLOCK_GROUP_ITEM_KEY;
 863	key.offset = block_group->length;
 864
 865	ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
 866	if (ret > 0)
 867		ret = -ENOENT;
 868	if (ret < 0)
 869		return ret;
 870
 871	ret = btrfs_del_item(trans, root, path);
 872	return ret;
 873}
 874
 875int btrfs_remove_block_group(struct btrfs_trans_handle *trans,
 876			     u64 group_start, struct extent_map *em)
 877{
 878	struct btrfs_fs_info *fs_info = trans->fs_info;
 
 879	struct btrfs_path *path;
 880	struct btrfs_block_group *block_group;
 881	struct btrfs_free_cluster *cluster;
 
 
 882	struct inode *inode;
 883	struct kobject *kobj = NULL;
 884	int ret;
 885	int index;
 886	int factor;
 887	struct btrfs_caching_control *caching_ctl = NULL;
 888	bool remove_em;
 889	bool remove_rsv = false;
 890
 891	block_group = btrfs_lookup_block_group(fs_info, group_start);
 892	BUG_ON(!block_group);
 893	BUG_ON(!block_group->ro);
 894
 895	trace_btrfs_remove_block_group(block_group);
 896	/*
 897	 * Free the reserved super bytes from this block group before
 898	 * remove it.
 899	 */
 900	btrfs_free_excluded_extents(block_group);
 901	btrfs_free_ref_tree_range(fs_info, block_group->start,
 902				  block_group->length);
 903
 
 904	index = btrfs_bg_flags_to_raid_index(block_group->flags);
 905	factor = btrfs_bg_type_to_factor(block_group->flags);
 906
 907	/* make sure this block group isn't part of an allocation cluster */
 908	cluster = &fs_info->data_alloc_cluster;
 909	spin_lock(&cluster->refill_lock);
 910	btrfs_return_cluster_to_free_space(block_group, cluster);
 911	spin_unlock(&cluster->refill_lock);
 912
 913	/*
 914	 * make sure this block group isn't part of a metadata
 915	 * allocation cluster
 916	 */
 917	cluster = &fs_info->meta_alloc_cluster;
 918	spin_lock(&cluster->refill_lock);
 919	btrfs_return_cluster_to_free_space(block_group, cluster);
 920	spin_unlock(&cluster->refill_lock);
 921
 922	btrfs_clear_treelog_bg(block_group);
 923	btrfs_clear_data_reloc_bg(block_group);
 924
 925	path = btrfs_alloc_path();
 926	if (!path) {
 927		ret = -ENOMEM;
 928		goto out;
 929	}
 930
 931	/*
 932	 * get the inode first so any iput calls done for the io_list
 933	 * aren't the final iput (no unlinks allowed now)
 934	 */
 935	inode = lookup_free_space_inode(block_group, path);
 936
 937	mutex_lock(&trans->transaction->cache_write_mutex);
 938	/*
 939	 * Make sure our free space cache IO is done before removing the
 940	 * free space inode
 941	 */
 942	spin_lock(&trans->transaction->dirty_bgs_lock);
 943	if (!list_empty(&block_group->io_list)) {
 944		list_del_init(&block_group->io_list);
 945
 946		WARN_ON(!IS_ERR(inode) && inode != block_group->io_ctl.inode);
 947
 948		spin_unlock(&trans->transaction->dirty_bgs_lock);
 949		btrfs_wait_cache_io(trans, block_group, path);
 950		btrfs_put_block_group(block_group);
 951		spin_lock(&trans->transaction->dirty_bgs_lock);
 952	}
 953
 954	if (!list_empty(&block_group->dirty_list)) {
 955		list_del_init(&block_group->dirty_list);
 956		remove_rsv = true;
 957		btrfs_put_block_group(block_group);
 958	}
 959	spin_unlock(&trans->transaction->dirty_bgs_lock);
 960	mutex_unlock(&trans->transaction->cache_write_mutex);
 961
 962	ret = btrfs_remove_free_space_inode(trans, inode, block_group);
 963	if (ret)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 964		goto out;
 
 
 
 
 
 
 
 
 965
 966	write_lock(&fs_info->block_group_cache_lock);
 967	rb_erase_cached(&block_group->cache_node,
 968			&fs_info->block_group_cache_tree);
 969	RB_CLEAR_NODE(&block_group->cache_node);
 970
 971	/* Once for the block groups rbtree */
 972	btrfs_put_block_group(block_group);
 973
 974	write_unlock(&fs_info->block_group_cache_lock);
 975
 976	down_write(&block_group->space_info->groups_sem);
 977	/*
 978	 * we must use list_del_init so people can check to see if they
 979	 * are still on the list after taking the semaphore
 980	 */
 981	list_del_init(&block_group->list);
 982	if (list_empty(&block_group->space_info->block_groups[index])) {
 983		kobj = block_group->space_info->block_group_kobjs[index];
 984		block_group->space_info->block_group_kobjs[index] = NULL;
 985		clear_avail_alloc_bits(fs_info, block_group->flags);
 986	}
 987	up_write(&block_group->space_info->groups_sem);
 988	clear_incompat_bg_bits(fs_info, block_group->flags);
 989	if (kobj) {
 990		kobject_del(kobj);
 991		kobject_put(kobj);
 992	}
 993
 
 
 994	if (block_group->cached == BTRFS_CACHE_STARTED)
 995		btrfs_wait_block_group_cache_done(block_group);
 996
 997	write_lock(&fs_info->block_group_cache_lock);
 998	caching_ctl = btrfs_get_caching_control(block_group);
 999	if (!caching_ctl) {
1000		struct btrfs_caching_control *ctl;
1001
1002		list_for_each_entry(ctl, &fs_info->caching_block_groups, list) {
1003			if (ctl->block_group == block_group) {
1004				caching_ctl = ctl;
1005				refcount_inc(&caching_ctl->count);
1006				break;
1007			}
 
 
 
 
 
 
 
 
1008		}
1009	}
1010	if (caching_ctl)
1011		list_del_init(&caching_ctl->list);
1012	write_unlock(&fs_info->block_group_cache_lock);
1013
1014	if (caching_ctl) {
1015		/* Once for the caching bgs list and once for us. */
1016		btrfs_put_caching_control(caching_ctl);
1017		btrfs_put_caching_control(caching_ctl);
1018	}
1019
1020	spin_lock(&trans->transaction->dirty_bgs_lock);
1021	WARN_ON(!list_empty(&block_group->dirty_list));
1022	WARN_ON(!list_empty(&block_group->io_list));
1023	spin_unlock(&trans->transaction->dirty_bgs_lock);
1024
1025	btrfs_remove_free_space_cache(block_group);
1026
1027	spin_lock(&block_group->space_info->lock);
1028	list_del_init(&block_group->ro_list);
1029
1030	if (btrfs_test_opt(fs_info, ENOSPC_DEBUG)) {
1031		WARN_ON(block_group->space_info->total_bytes
1032			< block_group->length);
1033		WARN_ON(block_group->space_info->bytes_readonly
1034			< block_group->length - block_group->zone_unusable);
1035		WARN_ON(block_group->space_info->bytes_zone_unusable
1036			< block_group->zone_unusable);
1037		WARN_ON(block_group->space_info->disk_total
1038			< block_group->length * factor);
1039		WARN_ON(test_bit(BLOCK_GROUP_FLAG_ZONE_IS_ACTIVE,
1040				 &block_group->runtime_flags) &&
1041			block_group->space_info->active_total_bytes
1042			< block_group->length);
1043	}
1044	block_group->space_info->total_bytes -= block_group->length;
1045	if (test_bit(BLOCK_GROUP_FLAG_ZONE_IS_ACTIVE, &block_group->runtime_flags))
1046		block_group->space_info->active_total_bytes -= block_group->length;
1047	block_group->space_info->bytes_readonly -=
1048		(block_group->length - block_group->zone_unusable);
1049	block_group->space_info->bytes_zone_unusable -=
1050		block_group->zone_unusable;
1051	block_group->space_info->disk_total -= block_group->length * factor;
1052
1053	spin_unlock(&block_group->space_info->lock);
1054
1055	/*
1056	 * Remove the free space for the block group from the free space tree
1057	 * and the block group's item from the extent tree before marking the
1058	 * block group as removed. This is to prevent races with tasks that
1059	 * freeze and unfreeze a block group, this task and another task
1060	 * allocating a new block group - the unfreeze task ends up removing
1061	 * the block group's extent map before the task calling this function
1062	 * deletes the block group item from the extent tree, allowing for
1063	 * another task to attempt to create another block group with the same
1064	 * item key (and failing with -EEXIST and a transaction abort).
1065	 */
1066	ret = remove_block_group_free_space(trans, block_group);
1067	if (ret)
1068		goto out;
1069
1070	ret = remove_block_group_item(trans, path, block_group);
1071	if (ret < 0)
1072		goto out;
1073
 
1074	spin_lock(&block_group->lock);
1075	set_bit(BLOCK_GROUP_FLAG_REMOVED, &block_group->runtime_flags);
1076
1077	/*
1078	 * At this point trimming or scrub can't start on this block group,
1079	 * because we removed the block group from the rbtree
1080	 * fs_info->block_group_cache_tree so no one can't find it anymore and
1081	 * even if someone already got this block group before we removed it
1082	 * from the rbtree, they have already incremented block_group->frozen -
1083	 * if they didn't, for the trimming case they won't find any free space
1084	 * entries because we already removed them all when we called
1085	 * btrfs_remove_free_space_cache().
1086	 *
1087	 * And we must not remove the extent map from the fs_info->mapping_tree
1088	 * to prevent the same logical address range and physical device space
1089	 * ranges from being reused for a new block group. This is needed to
1090	 * avoid races with trimming and scrub.
1091	 *
1092	 * An fs trim operation (btrfs_trim_fs() / btrfs_ioctl_fitrim()) is
1093	 * completely transactionless, so while it is trimming a range the
1094	 * currently running transaction might finish and a new one start,
1095	 * allowing for new block groups to be created that can reuse the same
1096	 * physical device locations unless we take this special care.
1097	 *
1098	 * There may also be an implicit trim operation if the file system
1099	 * is mounted with -odiscard. The same protections must remain
1100	 * in place until the extents have been discarded completely when
1101	 * the transaction commit has completed.
1102	 */
1103	remove_em = (atomic_read(&block_group->frozen) == 0);
1104	spin_unlock(&block_group->lock);
1105
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1106	if (remove_em) {
1107		struct extent_map_tree *em_tree;
1108
1109		em_tree = &fs_info->mapping_tree;
1110		write_lock(&em_tree->lock);
1111		remove_extent_mapping(em_tree, em);
1112		write_unlock(&em_tree->lock);
1113		/* once for the tree */
1114		free_extent_map(em);
1115	}
1116
1117out:
1118	/* Once for the lookup reference */
1119	btrfs_put_block_group(block_group);
1120	if (remove_rsv)
1121		btrfs_delayed_refs_rsv_release(fs_info, 1);
1122	btrfs_free_path(path);
1123	return ret;
1124}
1125
1126struct btrfs_trans_handle *btrfs_start_trans_remove_block_group(
1127		struct btrfs_fs_info *fs_info, const u64 chunk_offset)
1128{
1129	struct btrfs_root *root = btrfs_block_group_root(fs_info);
1130	struct extent_map_tree *em_tree = &fs_info->mapping_tree;
1131	struct extent_map *em;
1132	struct map_lookup *map;
1133	unsigned int num_items;
1134
1135	read_lock(&em_tree->lock);
1136	em = lookup_extent_mapping(em_tree, chunk_offset, 1);
1137	read_unlock(&em_tree->lock);
1138	ASSERT(em && em->start == chunk_offset);
1139
1140	/*
1141	 * We need to reserve 3 + N units from the metadata space info in order
1142	 * to remove a block group (done at btrfs_remove_chunk() and at
1143	 * btrfs_remove_block_group()), which are used for:
1144	 *
1145	 * 1 unit for adding the free space inode's orphan (located in the tree
1146	 * of tree roots).
1147	 * 1 unit for deleting the block group item (located in the extent
1148	 * tree).
1149	 * 1 unit for deleting the free space item (located in tree of tree
1150	 * roots).
1151	 * N units for deleting N device extent items corresponding to each
1152	 * stripe (located in the device tree).
1153	 *
1154	 * In order to remove a block group we also need to reserve units in the
1155	 * system space info in order to update the chunk tree (update one or
1156	 * more device items and remove one chunk item), but this is done at
1157	 * btrfs_remove_chunk() through a call to check_system_chunk().
1158	 */
1159	map = em->map_lookup;
1160	num_items = 3 + map->num_stripes;
1161	free_extent_map(em);
1162
1163	return btrfs_start_transaction_fallback_global_rsv(root, num_items);
 
1164}
1165
1166/*
1167 * Mark block group @cache read-only, so later write won't happen to block
1168 * group @cache.
1169 *
1170 * If @force is not set, this function will only mark the block group readonly
1171 * if we have enough free space (1M) in other metadata/system block groups.
1172 * If @force is not set, this function will mark the block group readonly
1173 * without checking free space.
1174 *
1175 * NOTE: This function doesn't care if other block groups can contain all the
1176 * data in this block group. That check should be done by relocation routine,
1177 * not this function.
1178 */
1179static int inc_block_group_ro(struct btrfs_block_group *cache, int force)
1180{
1181	struct btrfs_space_info *sinfo = cache->space_info;
1182	u64 num_bytes;
 
 
1183	int ret = -ENOSPC;
1184
 
 
 
 
 
 
 
 
 
 
 
 
1185	spin_lock(&sinfo->lock);
1186	spin_lock(&cache->lock);
1187
1188	if (cache->swap_extents) {
1189		ret = -ETXTBSY;
1190		goto out;
1191	}
1192
1193	if (cache->ro) {
1194		cache->ro++;
1195		ret = 0;
1196		goto out;
1197	}
1198
1199	num_bytes = cache->length - cache->reserved - cache->pinned -
1200		    cache->bytes_super - cache->zone_unusable - cache->used;
 
1201
1202	/*
1203	 * Data never overcommits, even in mixed mode, so do just the straight
1204	 * check of left over space in how much we have allocated.
 
 
1205	 */
1206	if (force) {
1207		ret = 0;
1208	} else if (sinfo->flags & BTRFS_BLOCK_GROUP_DATA) {
1209		u64 sinfo_used = btrfs_space_info_used(sinfo, true);
1210
1211		/*
1212		 * Here we make sure if we mark this bg RO, we still have enough
1213		 * free space as buffer.
1214		 */
1215		if (sinfo_used + num_bytes <= sinfo->total_bytes)
1216			ret = 0;
1217	} else {
1218		/*
1219		 * We overcommit metadata, so we need to do the
1220		 * btrfs_can_overcommit check here, and we need to pass in
1221		 * BTRFS_RESERVE_NO_FLUSH to give ourselves the most amount of
1222		 * leeway to allow us to mark this block group as read only.
1223		 */
1224		if (btrfs_can_overcommit(cache->fs_info, sinfo, num_bytes,
1225					 BTRFS_RESERVE_NO_FLUSH))
1226			ret = 0;
1227	}
1228
1229	if (!ret) {
1230		sinfo->bytes_readonly += num_bytes;
1231		if (btrfs_is_zoned(cache->fs_info)) {
1232			/* Migrate zone_unusable bytes to readonly */
1233			sinfo->bytes_readonly += cache->zone_unusable;
1234			sinfo->bytes_zone_unusable -= cache->zone_unusable;
1235			cache->zone_unusable = 0;
1236		}
1237		cache->ro++;
1238		list_add_tail(&cache->ro_list, &sinfo->ro_bgs);
 
1239	}
1240out:
1241	spin_unlock(&cache->lock);
1242	spin_unlock(&sinfo->lock);
1243	if (ret == -ENOSPC && btrfs_test_opt(cache->fs_info, ENOSPC_DEBUG)) {
1244		btrfs_info(cache->fs_info,
1245			"unable to make block group %llu ro", cache->start);
 
 
 
 
1246		btrfs_dump_space_info(cache->fs_info, cache->space_info, 0, 0);
1247	}
1248	return ret;
1249}
1250
1251static bool clean_pinned_extents(struct btrfs_trans_handle *trans,
1252				 struct btrfs_block_group *bg)
1253{
1254	struct btrfs_fs_info *fs_info = bg->fs_info;
1255	struct btrfs_transaction *prev_trans = NULL;
1256	const u64 start = bg->start;
1257	const u64 end = start + bg->length - 1;
1258	int ret;
1259
1260	spin_lock(&fs_info->trans_lock);
1261	if (trans->transaction->list.prev != &fs_info->trans_list) {
1262		prev_trans = list_last_entry(&trans->transaction->list,
1263					     struct btrfs_transaction, list);
1264		refcount_inc(&prev_trans->use_count);
1265	}
1266	spin_unlock(&fs_info->trans_lock);
1267
1268	/*
1269	 * Hold the unused_bg_unpin_mutex lock to avoid racing with
1270	 * btrfs_finish_extent_commit(). If we are at transaction N, another
1271	 * task might be running finish_extent_commit() for the previous
1272	 * transaction N - 1, and have seen a range belonging to the block
1273	 * group in pinned_extents before we were able to clear the whole block
1274	 * group range from pinned_extents. This means that task can lookup for
1275	 * the block group after we unpinned it from pinned_extents and removed
1276	 * it, leading to a BUG_ON() at unpin_extent_range().
1277	 */
1278	mutex_lock(&fs_info->unused_bg_unpin_mutex);
1279	if (prev_trans) {
1280		ret = clear_extent_bits(&prev_trans->pinned_extents, start, end,
1281					EXTENT_DIRTY);
1282		if (ret)
1283			goto out;
1284	}
1285
1286	ret = clear_extent_bits(&trans->transaction->pinned_extents, start, end,
1287				EXTENT_DIRTY);
1288out:
1289	mutex_unlock(&fs_info->unused_bg_unpin_mutex);
1290	if (prev_trans)
1291		btrfs_put_transaction(prev_trans);
1292
1293	return ret == 0;
1294}
1295
1296/*
1297 * Process the unused_bgs list and remove any that don't have any allocated
1298 * space inside of them.
1299 */
1300void btrfs_delete_unused_bgs(struct btrfs_fs_info *fs_info)
1301{
1302	struct btrfs_block_group *block_group;
1303	struct btrfs_space_info *space_info;
1304	struct btrfs_trans_handle *trans;
1305	const bool async_trim_enabled = btrfs_test_opt(fs_info, DISCARD_ASYNC);
1306	int ret = 0;
1307
1308	if (!test_bit(BTRFS_FS_OPEN, &fs_info->flags))
1309		return;
1310
1311	if (btrfs_fs_closing(fs_info))
1312		return;
1313
1314	/*
1315	 * Long running balances can keep us blocked here for eternity, so
1316	 * simply skip deletion if we're unable to get the mutex.
1317	 */
1318	if (!mutex_trylock(&fs_info->reclaim_bgs_lock))
1319		return;
1320
1321	spin_lock(&fs_info->unused_bgs_lock);
1322	while (!list_empty(&fs_info->unused_bgs)) {
 
1323		int trimming;
1324
1325		block_group = list_first_entry(&fs_info->unused_bgs,
1326					       struct btrfs_block_group,
1327					       bg_list);
1328		list_del_init(&block_group->bg_list);
1329
1330		space_info = block_group->space_info;
1331
1332		if (ret || btrfs_mixed_space_info(space_info)) {
1333			btrfs_put_block_group(block_group);
1334			continue;
1335		}
1336		spin_unlock(&fs_info->unused_bgs_lock);
1337
1338		btrfs_discard_cancel_work(&fs_info->discard_ctl, block_group);
1339
1340		/* Don't want to race with allocators so take the groups_sem */
1341		down_write(&space_info->groups_sem);
1342
1343		/*
1344		 * Async discard moves the final block group discard to be prior
1345		 * to the unused_bgs code path.  Therefore, if it's not fully
1346		 * trimmed, punt it back to the async discard lists.
1347		 */
1348		if (btrfs_test_opt(fs_info, DISCARD_ASYNC) &&
1349		    !btrfs_is_free_space_trimmed(block_group)) {
1350			trace_btrfs_skip_unused_block_group(block_group);
1351			up_write(&space_info->groups_sem);
1352			/* Requeue if we failed because of async discard */
1353			btrfs_discard_queue_work(&fs_info->discard_ctl,
1354						 block_group);
1355			goto next;
1356		}
1357
1358		spin_lock(&block_group->lock);
1359		if (block_group->reserved || block_group->pinned ||
1360		    block_group->used || block_group->ro ||
 
1361		    list_is_singular(&block_group->list)) {
1362			/*
1363			 * We want to bail if we made new allocations or have
1364			 * outstanding allocations in this block group.  We do
1365			 * the ro check in case balance is currently acting on
1366			 * this block group.
1367			 */
1368			trace_btrfs_skip_unused_block_group(block_group);
1369			spin_unlock(&block_group->lock);
1370			up_write(&space_info->groups_sem);
1371			goto next;
1372		}
1373		spin_unlock(&block_group->lock);
1374
1375		/* We don't want to force the issue, only flip if it's ok. */
1376		ret = inc_block_group_ro(block_group, 0);
1377		up_write(&space_info->groups_sem);
1378		if (ret < 0) {
1379			ret = 0;
1380			goto next;
1381		}
1382
1383		ret = btrfs_zone_finish(block_group);
1384		if (ret < 0) {
1385			btrfs_dec_block_group_ro(block_group);
1386			if (ret == -EAGAIN)
1387				ret = 0;
1388			goto next;
1389		}
1390
1391		/*
1392		 * Want to do this before we do anything else so we can recover
1393		 * properly if we fail to join the transaction.
1394		 */
1395		trans = btrfs_start_trans_remove_block_group(fs_info,
1396						     block_group->start);
1397		if (IS_ERR(trans)) {
1398			btrfs_dec_block_group_ro(block_group);
1399			ret = PTR_ERR(trans);
1400			goto next;
1401		}
1402
1403		/*
1404		 * We could have pending pinned extents for this block group,
1405		 * just delete them, we don't care about them anymore.
1406		 */
1407		if (!clean_pinned_extents(trans, block_group)) {
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1408			btrfs_dec_block_group_ro(block_group);
1409			goto end_trans;
1410		}
1411
1412		/*
1413		 * At this point, the block_group is read only and should fail
1414		 * new allocations.  However, btrfs_finish_extent_commit() can
1415		 * cause this block_group to be placed back on the discard
1416		 * lists because now the block_group isn't fully discarded.
1417		 * Bail here and try again later after discarding everything.
1418		 */
1419		spin_lock(&fs_info->discard_ctl.lock);
1420		if (!list_empty(&block_group->discard_list)) {
1421			spin_unlock(&fs_info->discard_ctl.lock);
1422			btrfs_dec_block_group_ro(block_group);
1423			btrfs_discard_queue_work(&fs_info->discard_ctl,
1424						 block_group);
1425			goto end_trans;
1426		}
1427		spin_unlock(&fs_info->discard_ctl.lock);
1428
1429		/* Reset pinned so btrfs_put_block_group doesn't complain */
1430		spin_lock(&space_info->lock);
1431		spin_lock(&block_group->lock);
1432
1433		btrfs_space_info_update_bytes_pinned(fs_info, space_info,
1434						     -block_group->pinned);
1435		space_info->bytes_readonly += block_group->pinned;
 
 
 
1436		block_group->pinned = 0;
1437
1438		spin_unlock(&block_group->lock);
1439		spin_unlock(&space_info->lock);
1440
1441		/*
1442		 * The normal path here is an unused block group is passed here,
1443		 * then trimming is handled in the transaction commit path.
1444		 * Async discard interposes before this to do the trimming
1445		 * before coming down the unused block group path as trimming
1446		 * will no longer be done later in the transaction commit path.
1447		 */
1448		if (!async_trim_enabled && btrfs_test_opt(fs_info, DISCARD_ASYNC))
1449			goto flip_async;
1450
1451		/*
1452		 * DISCARD can flip during remount. On zoned filesystems, we
1453		 * need to reset sequential-required zones.
1454		 */
1455		trimming = btrfs_test_opt(fs_info, DISCARD_SYNC) ||
1456				btrfs_is_zoned(fs_info);
1457
1458		/* Implicit trim during transaction commit. */
1459		if (trimming)
1460			btrfs_freeze_block_group(block_group);
1461
1462		/*
1463		 * Btrfs_remove_chunk will abort the transaction if things go
1464		 * horribly wrong.
1465		 */
1466		ret = btrfs_remove_chunk(trans, block_group->start);
1467
1468		if (ret) {
1469			if (trimming)
1470				btrfs_unfreeze_block_group(block_group);
1471			goto end_trans;
1472		}
1473
1474		/*
1475		 * If we're not mounted with -odiscard, we can just forget
1476		 * about this block group. Otherwise we'll need to wait
1477		 * until transaction commit to do the actual discard.
1478		 */
1479		if (trimming) {
1480			spin_lock(&fs_info->unused_bgs_lock);
1481			/*
1482			 * A concurrent scrub might have added us to the list
1483			 * fs_info->unused_bgs, so use a list_move operation
1484			 * to add the block group to the deleted_bgs list.
1485			 */
1486			list_move(&block_group->bg_list,
1487				  &trans->transaction->deleted_bgs);
1488			spin_unlock(&fs_info->unused_bgs_lock);
1489			btrfs_get_block_group(block_group);
1490		}
1491end_trans:
1492		btrfs_end_transaction(trans);
1493next:
 
1494		btrfs_put_block_group(block_group);
1495		spin_lock(&fs_info->unused_bgs_lock);
1496	}
1497	spin_unlock(&fs_info->unused_bgs_lock);
1498	mutex_unlock(&fs_info->reclaim_bgs_lock);
1499	return;
1500
1501flip_async:
1502	btrfs_end_transaction(trans);
1503	mutex_unlock(&fs_info->reclaim_bgs_lock);
1504	btrfs_put_block_group(block_group);
1505	btrfs_discard_punt_unused_bgs_list(fs_info);
1506}
1507
1508void btrfs_mark_bg_unused(struct btrfs_block_group *bg)
1509{
1510	struct btrfs_fs_info *fs_info = bg->fs_info;
1511
1512	spin_lock(&fs_info->unused_bgs_lock);
1513	if (list_empty(&bg->bg_list)) {
1514		btrfs_get_block_group(bg);
1515		trace_btrfs_add_unused_block_group(bg);
1516		list_add_tail(&bg->bg_list, &fs_info->unused_bgs);
1517	}
1518	spin_unlock(&fs_info->unused_bgs_lock);
1519}
1520
1521/*
1522 * We want block groups with a low number of used bytes to be in the beginning
1523 * of the list, so they will get reclaimed first.
1524 */
1525static int reclaim_bgs_cmp(void *unused, const struct list_head *a,
1526			   const struct list_head *b)
1527{
1528	const struct btrfs_block_group *bg1, *bg2;
1529
1530	bg1 = list_entry(a, struct btrfs_block_group, bg_list);
1531	bg2 = list_entry(b, struct btrfs_block_group, bg_list);
1532
1533	return bg1->used > bg2->used;
1534}
1535
1536static inline bool btrfs_should_reclaim(struct btrfs_fs_info *fs_info)
1537{
1538	if (btrfs_is_zoned(fs_info))
1539		return btrfs_zoned_should_reclaim(fs_info);
1540	return true;
1541}
1542
1543static bool should_reclaim_block_group(struct btrfs_block_group *bg, u64 bytes_freed)
1544{
1545	const struct btrfs_space_info *space_info = bg->space_info;
1546	const int reclaim_thresh = READ_ONCE(space_info->bg_reclaim_threshold);
1547	const u64 new_val = bg->used;
1548	const u64 old_val = new_val + bytes_freed;
1549	u64 thresh;
1550
1551	if (reclaim_thresh == 0)
1552		return false;
1553
1554	thresh = mult_perc(bg->length, reclaim_thresh);
1555
1556	/*
1557	 * If we were below the threshold before don't reclaim, we are likely a
1558	 * brand new block group and we don't want to relocate new block groups.
1559	 */
1560	if (old_val < thresh)
1561		return false;
1562	if (new_val >= thresh)
1563		return false;
1564	return true;
1565}
1566
1567void btrfs_reclaim_bgs_work(struct work_struct *work)
1568{
1569	struct btrfs_fs_info *fs_info =
1570		container_of(work, struct btrfs_fs_info, reclaim_bgs_work);
1571	struct btrfs_block_group *bg;
1572	struct btrfs_space_info *space_info;
1573
1574	if (!test_bit(BTRFS_FS_OPEN, &fs_info->flags))
1575		return;
1576
1577	if (btrfs_fs_closing(fs_info))
1578		return;
1579
1580	if (!btrfs_should_reclaim(fs_info))
1581		return;
1582
1583	sb_start_write(fs_info->sb);
1584
1585	if (!btrfs_exclop_start(fs_info, BTRFS_EXCLOP_BALANCE)) {
1586		sb_end_write(fs_info->sb);
1587		return;
1588	}
1589
1590	/*
1591	 * Long running balances can keep us blocked here for eternity, so
1592	 * simply skip reclaim if we're unable to get the mutex.
1593	 */
1594	if (!mutex_trylock(&fs_info->reclaim_bgs_lock)) {
1595		btrfs_exclop_finish(fs_info);
1596		sb_end_write(fs_info->sb);
1597		return;
1598	}
1599
1600	spin_lock(&fs_info->unused_bgs_lock);
1601	/*
1602	 * Sort happens under lock because we can't simply splice it and sort.
1603	 * The block groups might still be in use and reachable via bg_list,
1604	 * and their presence in the reclaim_bgs list must be preserved.
1605	 */
1606	list_sort(NULL, &fs_info->reclaim_bgs, reclaim_bgs_cmp);
1607	while (!list_empty(&fs_info->reclaim_bgs)) {
1608		u64 zone_unusable;
1609		int ret = 0;
1610
1611		bg = list_first_entry(&fs_info->reclaim_bgs,
1612				      struct btrfs_block_group,
1613				      bg_list);
1614		list_del_init(&bg->bg_list);
1615
1616		space_info = bg->space_info;
1617		spin_unlock(&fs_info->unused_bgs_lock);
1618
1619		/* Don't race with allocators so take the groups_sem */
1620		down_write(&space_info->groups_sem);
1621
1622		spin_lock(&bg->lock);
1623		if (bg->reserved || bg->pinned || bg->ro) {
1624			/*
1625			 * We want to bail if we made new allocations or have
1626			 * outstanding allocations in this block group.  We do
1627			 * the ro check in case balance is currently acting on
1628			 * this block group.
1629			 */
1630			spin_unlock(&bg->lock);
1631			up_write(&space_info->groups_sem);
1632			goto next;
1633		}
1634		if (bg->used == 0) {
1635			/*
1636			 * It is possible that we trigger relocation on a block
1637			 * group as its extents are deleted and it first goes
1638			 * below the threshold, then shortly after goes empty.
1639			 *
1640			 * In this case, relocating it does delete it, but has
1641			 * some overhead in relocation specific metadata, looking
1642			 * for the non-existent extents and running some extra
1643			 * transactions, which we can avoid by using one of the
1644			 * other mechanisms for dealing with empty block groups.
1645			 */
1646			if (!btrfs_test_opt(fs_info, DISCARD_ASYNC))
1647				btrfs_mark_bg_unused(bg);
1648			spin_unlock(&bg->lock);
1649			up_write(&space_info->groups_sem);
1650			goto next;
1651
1652		}
1653		/*
1654		 * The block group might no longer meet the reclaim condition by
1655		 * the time we get around to reclaiming it, so to avoid
1656		 * reclaiming overly full block_groups, skip reclaiming them.
1657		 *
1658		 * Since the decision making process also depends on the amount
1659		 * being freed, pass in a fake giant value to skip that extra
1660		 * check, which is more meaningful when adding to the list in
1661		 * the first place.
1662		 */
1663		if (!should_reclaim_block_group(bg, bg->length)) {
1664			spin_unlock(&bg->lock);
1665			up_write(&space_info->groups_sem);
1666			goto next;
1667		}
1668		spin_unlock(&bg->lock);
1669
1670		/* Get out fast, in case we're unmounting the filesystem */
1671		if (btrfs_fs_closing(fs_info)) {
1672			up_write(&space_info->groups_sem);
1673			goto next;
1674		}
1675
1676		/*
1677		 * Cache the zone_unusable value before turning the block group
1678		 * to read only. As soon as the blog group is read only it's
1679		 * zone_unusable value gets moved to the block group's read-only
1680		 * bytes and isn't available for calculations anymore.
1681		 */
1682		zone_unusable = bg->zone_unusable;
1683		ret = inc_block_group_ro(bg, 0);
1684		up_write(&space_info->groups_sem);
1685		if (ret < 0)
1686			goto next;
1687
1688		btrfs_info(fs_info,
1689			"reclaiming chunk %llu with %llu%% used %llu%% unusable",
1690				bg->start, div_u64(bg->used * 100, bg->length),
1691				div64_u64(zone_unusable * 100, bg->length));
1692		trace_btrfs_reclaim_block_group(bg);
1693		ret = btrfs_relocate_chunk(fs_info, bg->start);
1694		if (ret) {
1695			btrfs_dec_block_group_ro(bg);
1696			btrfs_err(fs_info, "error relocating chunk %llu",
1697				  bg->start);
1698		}
1699
1700next:
1701		btrfs_put_block_group(bg);
1702		spin_lock(&fs_info->unused_bgs_lock);
1703	}
1704	spin_unlock(&fs_info->unused_bgs_lock);
1705	mutex_unlock(&fs_info->reclaim_bgs_lock);
1706	btrfs_exclop_finish(fs_info);
1707	sb_end_write(fs_info->sb);
1708}
1709
1710void btrfs_reclaim_bgs(struct btrfs_fs_info *fs_info)
1711{
1712	spin_lock(&fs_info->unused_bgs_lock);
1713	if (!list_empty(&fs_info->reclaim_bgs))
1714		queue_work(system_unbound_wq, &fs_info->reclaim_bgs_work);
1715	spin_unlock(&fs_info->unused_bgs_lock);
1716}
1717
1718void btrfs_mark_bg_to_reclaim(struct btrfs_block_group *bg)
1719{
1720	struct btrfs_fs_info *fs_info = bg->fs_info;
1721
1722	spin_lock(&fs_info->unused_bgs_lock);
1723	if (list_empty(&bg->bg_list)) {
1724		btrfs_get_block_group(bg);
1725		trace_btrfs_add_reclaim_block_group(bg);
1726		list_add_tail(&bg->bg_list, &fs_info->reclaim_bgs);
1727	}
1728	spin_unlock(&fs_info->unused_bgs_lock);
1729}
1730
1731static int read_bg_from_eb(struct btrfs_fs_info *fs_info, struct btrfs_key *key,
1732			   struct btrfs_path *path)
1733{
1734	struct extent_map_tree *em_tree;
1735	struct extent_map *em;
1736	struct btrfs_block_group_item bg;
1737	struct extent_buffer *leaf;
1738	int slot;
1739	u64 flags;
1740	int ret = 0;
1741
1742	slot = path->slots[0];
1743	leaf = path->nodes[0];
 
1744
1745	em_tree = &fs_info->mapping_tree;
1746	read_lock(&em_tree->lock);
1747	em = lookup_extent_mapping(em_tree, key->objectid, key->offset);
1748	read_unlock(&em_tree->lock);
1749	if (!em) {
1750		btrfs_err(fs_info,
1751			  "logical %llu len %llu found bg but no related chunk",
1752			  key->objectid, key->offset);
1753		return -ENOENT;
1754	}
1755
1756	if (em->start != key->objectid || em->len != key->offset) {
1757		btrfs_err(fs_info,
1758			"block group %llu len %llu mismatch with chunk %llu len %llu",
1759			key->objectid, key->offset, em->start, em->len);
1760		ret = -EUCLEAN;
1761		goto out_free_em;
1762	}
1763
1764	read_extent_buffer(leaf, &bg, btrfs_item_ptr_offset(leaf, slot),
1765			   sizeof(bg));
1766	flags = btrfs_stack_block_group_flags(&bg) &
1767		BTRFS_BLOCK_GROUP_TYPE_MASK;
1768
1769	if (flags != (em->map_lookup->type & BTRFS_BLOCK_GROUP_TYPE_MASK)) {
1770		btrfs_err(fs_info,
1771"block group %llu len %llu type flags 0x%llx mismatch with chunk type flags 0x%llx",
1772			  key->objectid, key->offset, flags,
1773			  (BTRFS_BLOCK_GROUP_TYPE_MASK & em->map_lookup->type));
1774		ret = -EUCLEAN;
1775	}
1776
1777out_free_em:
1778	free_extent_map(em);
1779	return ret;
1780}
1781
1782static int find_first_block_group(struct btrfs_fs_info *fs_info,
1783				  struct btrfs_path *path,
1784				  struct btrfs_key *key)
1785{
1786	struct btrfs_root *root = btrfs_block_group_root(fs_info);
1787	int ret;
1788	struct btrfs_key found_key;
1789
1790	btrfs_for_each_slot(root, key, &found_key, path, ret) {
1791		if (found_key.objectid >= key->objectid &&
1792		    found_key.type == BTRFS_BLOCK_GROUP_ITEM_KEY) {
1793			return read_bg_from_eb(fs_info, &found_key, path);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1794		}
 
1795	}
 
1796	return ret;
1797}
1798
1799static void set_avail_alloc_bits(struct btrfs_fs_info *fs_info, u64 flags)
1800{
1801	u64 extra_flags = chunk_to_extended(flags) &
1802				BTRFS_EXTENDED_PROFILE_MASK;
1803
1804	write_seqlock(&fs_info->profiles_lock);
1805	if (flags & BTRFS_BLOCK_GROUP_DATA)
1806		fs_info->avail_data_alloc_bits |= extra_flags;
1807	if (flags & BTRFS_BLOCK_GROUP_METADATA)
1808		fs_info->avail_metadata_alloc_bits |= extra_flags;
1809	if (flags & BTRFS_BLOCK_GROUP_SYSTEM)
1810		fs_info->avail_system_alloc_bits |= extra_flags;
1811	write_sequnlock(&fs_info->profiles_lock);
1812}
1813
1814/*
1815 * Map a physical disk address to a list of logical addresses.
1816 *
1817 * @fs_info:       the filesystem
1818 * @chunk_start:   logical address of block group
1819 * @bdev:	   physical device to resolve, can be NULL to indicate any device
1820 * @physical:	   physical address to map to logical addresses
1821 * @logical:	   return array of logical addresses which map to @physical
1822 * @naddrs:	   length of @logical
1823 * @stripe_len:    size of IO stripe for the given block group
1824 *
1825 * Maps a particular @physical disk address to a list of @logical addresses.
1826 * Used primarily to exclude those portions of a block group that contain super
1827 * block copies.
1828 */
1829int btrfs_rmap_block(struct btrfs_fs_info *fs_info, u64 chunk_start,
1830		     struct block_device *bdev, u64 physical, u64 **logical,
1831		     int *naddrs, int *stripe_len)
1832{
1833	struct extent_map *em;
1834	struct map_lookup *map;
1835	u64 *buf;
1836	u64 bytenr;
1837	u64 data_stripe_length;
1838	u64 io_stripe_size;
1839	int i, nr = 0;
1840	int ret = 0;
1841
1842	em = btrfs_get_chunk_map(fs_info, chunk_start, 1);
1843	if (IS_ERR(em))
1844		return -EIO;
1845
1846	map = em->map_lookup;
1847	data_stripe_length = em->orig_block_len;
1848	io_stripe_size = map->stripe_len;
1849	chunk_start = em->start;
1850
1851	/* For RAID5/6 adjust to a full IO stripe length */
1852	if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK)
1853		io_stripe_size = map->stripe_len * nr_data_stripes(map);
1854
1855	buf = kcalloc(map->num_stripes, sizeof(u64), GFP_NOFS);
1856	if (!buf) {
1857		ret = -ENOMEM;
1858		goto out;
1859	}
1860
1861	for (i = 0; i < map->num_stripes; i++) {
1862		bool already_inserted = false;
1863		u64 stripe_nr;
1864		u64 offset;
1865		int j;
1866
1867		if (!in_range(physical, map->stripes[i].physical,
1868			      data_stripe_length))
1869			continue;
1870
1871		if (bdev && map->stripes[i].dev->bdev != bdev)
1872			continue;
1873
1874		stripe_nr = physical - map->stripes[i].physical;
1875		stripe_nr = div64_u64_rem(stripe_nr, map->stripe_len, &offset);
1876
1877		if (map->type & (BTRFS_BLOCK_GROUP_RAID0 |
1878				 BTRFS_BLOCK_GROUP_RAID10)) {
1879			stripe_nr = stripe_nr * map->num_stripes + i;
1880			stripe_nr = div_u64(stripe_nr, map->sub_stripes);
1881		}
1882		/*
1883		 * The remaining case would be for RAID56, multiply by
1884		 * nr_data_stripes().  Alternatively, just use rmap_len below
1885		 * instead of map->stripe_len
1886		 */
1887
1888		bytenr = chunk_start + stripe_nr * io_stripe_size + offset;
1889
1890		/* Ensure we don't add duplicate addresses */
1891		for (j = 0; j < nr; j++) {
1892			if (buf[j] == bytenr) {
1893				already_inserted = true;
1894				break;
1895			}
1896		}
1897
1898		if (!already_inserted)
1899			buf[nr++] = bytenr;
1900	}
1901
1902	*logical = buf;
1903	*naddrs = nr;
1904	*stripe_len = io_stripe_size;
1905out:
1906	free_extent_map(em);
1907	return ret;
1908}
1909
1910static int exclude_super_stripes(struct btrfs_block_group *cache)
1911{
1912	struct btrfs_fs_info *fs_info = cache->fs_info;
1913	const bool zoned = btrfs_is_zoned(fs_info);
1914	u64 bytenr;
1915	u64 *logical;
1916	int stripe_len;
1917	int i, nr, ret;
1918
1919	if (cache->start < BTRFS_SUPER_INFO_OFFSET) {
1920		stripe_len = BTRFS_SUPER_INFO_OFFSET - cache->start;
1921		cache->bytes_super += stripe_len;
1922		ret = btrfs_add_excluded_extent(fs_info, cache->start,
1923						stripe_len);
1924		if (ret)
1925			return ret;
1926	}
1927
1928	for (i = 0; i < BTRFS_SUPER_MIRROR_MAX; i++) {
1929		bytenr = btrfs_sb_offset(i);
1930		ret = btrfs_rmap_block(fs_info, cache->start, NULL,
1931				       bytenr, &logical, &nr, &stripe_len);
1932		if (ret)
1933			return ret;
1934
1935		/* Shouldn't have super stripes in sequential zones */
1936		if (zoned && nr) {
1937			btrfs_err(fs_info,
1938			"zoned: block group %llu must not contain super block",
1939				  cache->start);
1940			return -EUCLEAN;
1941		}
 
 
1942
1943		while (nr--) {
1944			u64 len = min_t(u64, stripe_len,
1945				cache->start + cache->length - logical[nr]);
 
 
 
 
 
 
1946
1947			cache->bytes_super += len;
1948			ret = btrfs_add_excluded_extent(fs_info, logical[nr],
1949							len);
1950			if (ret) {
1951				kfree(logical);
1952				return ret;
1953			}
1954		}
1955
1956		kfree(logical);
1957	}
1958	return 0;
1959}
1960
1961static struct btrfs_block_group *btrfs_create_block_group_cache(
1962		struct btrfs_fs_info *fs_info, u64 start)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1963{
1964	struct btrfs_block_group *cache;
1965
1966	cache = kzalloc(sizeof(*cache), GFP_NOFS);
1967	if (!cache)
1968		return NULL;
1969
1970	cache->free_space_ctl = kzalloc(sizeof(*cache->free_space_ctl),
1971					GFP_NOFS);
1972	if (!cache->free_space_ctl) {
1973		kfree(cache);
1974		return NULL;
1975	}
1976
1977	cache->start = start;
 
 
1978
1979	cache->fs_info = fs_info;
1980	cache->full_stripe_len = btrfs_full_stripe_len(fs_info, start);
 
1981
1982	cache->discard_index = BTRFS_DISCARD_INDEX_UNUSED;
1983
1984	refcount_set(&cache->refs, 1);
1985	spin_lock_init(&cache->lock);
1986	init_rwsem(&cache->data_rwsem);
1987	INIT_LIST_HEAD(&cache->list);
1988	INIT_LIST_HEAD(&cache->cluster_list);
1989	INIT_LIST_HEAD(&cache->bg_list);
1990	INIT_LIST_HEAD(&cache->ro_list);
1991	INIT_LIST_HEAD(&cache->discard_list);
1992	INIT_LIST_HEAD(&cache->dirty_list);
1993	INIT_LIST_HEAD(&cache->io_list);
1994	INIT_LIST_HEAD(&cache->active_bg_list);
1995	btrfs_init_free_space_ctl(cache, cache->free_space_ctl);
1996	atomic_set(&cache->frozen, 0);
1997	mutex_init(&cache->free_space_lock);
1998	cache->full_stripe_locks_root.root = RB_ROOT;
1999	mutex_init(&cache->full_stripe_locks_root.lock);
2000
2001	return cache;
2002}
2003
2004/*
2005 * Iterate all chunks and verify that each of them has the corresponding block
2006 * group
2007 */
2008static int check_chunk_block_group_mappings(struct btrfs_fs_info *fs_info)
2009{
2010	struct extent_map_tree *map_tree = &fs_info->mapping_tree;
2011	struct extent_map *em;
2012	struct btrfs_block_group *bg;
2013	u64 start = 0;
2014	int ret = 0;
2015
2016	while (1) {
2017		read_lock(&map_tree->lock);
2018		/*
2019		 * lookup_extent_mapping will return the first extent map
2020		 * intersecting the range, so setting @len to 1 is enough to
2021		 * get the first chunk.
2022		 */
2023		em = lookup_extent_mapping(map_tree, start, 1);
2024		read_unlock(&map_tree->lock);
2025		if (!em)
2026			break;
2027
2028		bg = btrfs_lookup_block_group(fs_info, em->start);
2029		if (!bg) {
2030			btrfs_err(fs_info,
2031	"chunk start=%llu len=%llu doesn't have corresponding block group",
2032				     em->start, em->len);
2033			ret = -EUCLEAN;
2034			free_extent_map(em);
2035			break;
2036		}
2037		if (bg->start != em->start || bg->length != em->len ||
 
2038		    (bg->flags & BTRFS_BLOCK_GROUP_TYPE_MASK) !=
2039		    (em->map_lookup->type & BTRFS_BLOCK_GROUP_TYPE_MASK)) {
2040			btrfs_err(fs_info,
2041"chunk start=%llu len=%llu flags=0x%llx doesn't match block group start=%llu len=%llu flags=0x%llx",
2042				em->start, em->len,
2043				em->map_lookup->type & BTRFS_BLOCK_GROUP_TYPE_MASK,
2044				bg->start, bg->length,
2045				bg->flags & BTRFS_BLOCK_GROUP_TYPE_MASK);
2046			ret = -EUCLEAN;
2047			free_extent_map(em);
2048			btrfs_put_block_group(bg);
2049			break;
2050		}
2051		start = em->start + em->len;
2052		free_extent_map(em);
2053		btrfs_put_block_group(bg);
2054	}
2055	return ret;
2056}
2057
2058static int read_one_block_group(struct btrfs_fs_info *info,
2059				struct btrfs_block_group_item *bgi,
2060				const struct btrfs_key *key,
2061				int need_clear)
2062{
2063	struct btrfs_block_group *cache;
2064	const bool mixed = btrfs_fs_incompat(info, MIXED_GROUPS);
2065	int ret;
2066
2067	ASSERT(key->type == BTRFS_BLOCK_GROUP_ITEM_KEY);
2068
2069	cache = btrfs_create_block_group_cache(info, key->objectid);
2070	if (!cache)
2071		return -ENOMEM;
2072
2073	cache->length = key->offset;
2074	cache->used = btrfs_stack_block_group_used(bgi);
2075	cache->commit_used = cache->used;
2076	cache->flags = btrfs_stack_block_group_flags(bgi);
2077	cache->global_root_id = btrfs_stack_block_group_chunk_objectid(bgi);
2078
2079	set_free_space_tree_thresholds(cache);
2080
2081	if (need_clear) {
2082		/*
2083		 * When we mount with old space cache, we need to
2084		 * set BTRFS_DC_CLEAR and set dirty flag.
2085		 *
2086		 * a) Setting 'BTRFS_DC_CLEAR' makes sure that we
2087		 *    truncate the old free space cache inode and
2088		 *    setup a new one.
2089		 * b) Setting 'dirty flag' makes sure that we flush
2090		 *    the new space cache info onto disk.
2091		 */
2092		if (btrfs_test_opt(info, SPACE_CACHE))
2093			cache->disk_cache_state = BTRFS_DC_CLEAR;
2094	}
2095	if (!mixed && ((cache->flags & BTRFS_BLOCK_GROUP_METADATA) &&
2096	    (cache->flags & BTRFS_BLOCK_GROUP_DATA))) {
2097			btrfs_err(info,
2098"bg %llu is a mixed block group but filesystem hasn't enabled mixed block groups",
2099				  cache->start);
2100			ret = -EINVAL;
2101			goto error;
2102	}
2103
2104	ret = btrfs_load_block_group_zone_info(cache, false);
2105	if (ret) {
2106		btrfs_err(info, "zoned: failed to load zone info of bg %llu",
2107			  cache->start);
2108		goto error;
2109	}
2110
2111	/*
2112	 * We need to exclude the super stripes now so that the space info has
2113	 * super bytes accounted for, otherwise we'll think we have more space
2114	 * than we actually do.
2115	 */
2116	ret = exclude_super_stripes(cache);
2117	if (ret) {
2118		/* We may have excluded something, so call this just in case. */
2119		btrfs_free_excluded_extents(cache);
2120		goto error;
2121	}
2122
2123	/*
2124	 * For zoned filesystem, space after the allocation offset is the only
2125	 * free space for a block group. So, we don't need any caching work.
2126	 * btrfs_calc_zone_unusable() will set the amount of free space and
2127	 * zone_unusable space.
2128	 *
2129	 * For regular filesystem, check for two cases, either we are full, and
2130	 * therefore don't need to bother with the caching work since we won't
2131	 * find any space, or we are empty, and we can just add all the space
2132	 * in and be done with it.  This saves us _a_lot_ of time, particularly
2133	 * in the full case.
2134	 */
2135	if (btrfs_is_zoned(info)) {
2136		btrfs_calc_zone_unusable(cache);
2137		/* Should not have any excluded extents. Just in case, though. */
2138		btrfs_free_excluded_extents(cache);
2139	} else if (cache->length == cache->used) {
2140		cache->cached = BTRFS_CACHE_FINISHED;
2141		btrfs_free_excluded_extents(cache);
2142	} else if (cache->used == 0) {
2143		cache->cached = BTRFS_CACHE_FINISHED;
2144		add_new_free_space(cache, cache->start,
2145				   cache->start + cache->length);
2146		btrfs_free_excluded_extents(cache);
2147	}
2148
2149	ret = btrfs_add_block_group_cache(info, cache);
2150	if (ret) {
2151		btrfs_remove_free_space_cache(cache);
2152		goto error;
2153	}
2154	trace_btrfs_add_block_group(info, cache, 0);
2155	btrfs_add_bg_to_space_info(info, cache);
2156
2157	set_avail_alloc_bits(info, cache->flags);
2158	if (btrfs_chunk_writeable(info, cache->start)) {
2159		if (cache->used == 0) {
2160			ASSERT(list_empty(&cache->bg_list));
2161			if (btrfs_test_opt(info, DISCARD_ASYNC))
2162				btrfs_discard_queue_work(&info->discard_ctl, cache);
2163			else
2164				btrfs_mark_bg_unused(cache);
2165		}
2166	} else {
2167		inc_block_group_ro(cache, 1);
2168	}
2169
2170	return 0;
2171error:
2172	btrfs_put_block_group(cache);
2173	return ret;
2174}
2175
2176static int fill_dummy_bgs(struct btrfs_fs_info *fs_info)
2177{
2178	struct extent_map_tree *em_tree = &fs_info->mapping_tree;
2179	struct rb_node *node;
2180	int ret = 0;
2181
2182	for (node = rb_first_cached(&em_tree->map); node; node = rb_next(node)) {
2183		struct extent_map *em;
2184		struct map_lookup *map;
2185		struct btrfs_block_group *bg;
2186
2187		em = rb_entry(node, struct extent_map, rb_node);
2188		map = em->map_lookup;
2189		bg = btrfs_create_block_group_cache(fs_info, em->start);
2190		if (!bg) {
2191			ret = -ENOMEM;
2192			break;
2193		}
2194
2195		/* Fill dummy cache as FULL */
2196		bg->length = em->len;
2197		bg->flags = map->type;
2198		bg->cached = BTRFS_CACHE_FINISHED;
2199		bg->used = em->len;
2200		bg->flags = map->type;
2201		ret = btrfs_add_block_group_cache(fs_info, bg);
2202		/*
2203		 * We may have some valid block group cache added already, in
2204		 * that case we skip to the next one.
2205		 */
2206		if (ret == -EEXIST) {
2207			ret = 0;
2208			btrfs_put_block_group(bg);
2209			continue;
2210		}
2211
2212		if (ret) {
2213			btrfs_remove_free_space_cache(bg);
2214			btrfs_put_block_group(bg);
2215			break;
2216		}
2217
2218		btrfs_add_bg_to_space_info(fs_info, bg);
2219
2220		set_avail_alloc_bits(fs_info, bg->flags);
2221	}
2222	if (!ret)
2223		btrfs_init_global_block_rsv(fs_info);
2224	return ret;
2225}
2226
2227int btrfs_read_block_groups(struct btrfs_fs_info *info)
2228{
2229	struct btrfs_root *root = btrfs_block_group_root(info);
2230	struct btrfs_path *path;
2231	int ret;
2232	struct btrfs_block_group *cache;
2233	struct btrfs_space_info *space_info;
2234	struct btrfs_key key;
 
 
2235	int need_clear = 0;
2236	u64 cache_gen;
 
 
2237
2238	/*
2239	 * Either no extent root (with ibadroots rescue option) or we have
2240	 * unsupported RO options. The fs can never be mounted read-write, so no
2241	 * need to waste time searching block group items.
2242	 *
2243	 * This also allows new extent tree related changes to be RO compat,
2244	 * no need for a full incompat flag.
2245	 */
2246	if (!root || (btrfs_super_compat_ro_flags(info->super_copy) &
2247		      ~BTRFS_FEATURE_COMPAT_RO_SUPP))
2248		return fill_dummy_bgs(info);
2249
2250	key.objectid = 0;
2251	key.offset = 0;
2252	key.type = BTRFS_BLOCK_GROUP_ITEM_KEY;
2253	path = btrfs_alloc_path();
2254	if (!path)
2255		return -ENOMEM;
 
2256
2257	cache_gen = btrfs_super_cache_generation(info->super_copy);
2258	if (btrfs_test_opt(info, SPACE_CACHE) &&
2259	    btrfs_super_generation(info->super_copy) != cache_gen)
2260		need_clear = 1;
2261	if (btrfs_test_opt(info, CLEAR_CACHE))
2262		need_clear = 1;
2263
2264	while (1) {
2265		struct btrfs_block_group_item bgi;
2266		struct extent_buffer *leaf;
2267		int slot;
2268
2269		ret = find_first_block_group(info, path, &key);
2270		if (ret > 0)
2271			break;
2272		if (ret != 0)
2273			goto error;
2274
2275		leaf = path->nodes[0];
2276		slot = path->slots[0];
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2277
2278		read_extent_buffer(leaf, &bgi, btrfs_item_ptr_offset(leaf, slot),
2279				   sizeof(bgi));
 
 
 
 
 
 
 
 
 
 
 
 
2280
2281		btrfs_item_key_to_cpu(leaf, &key, slot);
2282		btrfs_release_path(path);
2283		ret = read_one_block_group(info, &bgi, &key, need_clear);
2284		if (ret < 0)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2285			goto error;
2286		key.objectid += key.offset;
2287		key.offset = 0;
2288	}
2289	btrfs_release_path(path);
 
 
 
 
2290
2291	list_for_each_entry(space_info, &info->space_info, list) {
2292		int i;
2293
2294		for (i = 0; i < BTRFS_NR_RAID_TYPES; i++) {
2295			if (list_empty(&space_info->block_groups[i]))
2296				continue;
2297			cache = list_first_entry(&space_info->block_groups[i],
2298						 struct btrfs_block_group,
2299						 list);
2300			btrfs_sysfs_add_block_group_type(cache);
2301		}
 
2302
 
2303		if (!(btrfs_get_alloc_profile(info, space_info->flags) &
2304		      (BTRFS_BLOCK_GROUP_RAID10 |
2305		       BTRFS_BLOCK_GROUP_RAID1_MASK |
2306		       BTRFS_BLOCK_GROUP_RAID56_MASK |
2307		       BTRFS_BLOCK_GROUP_DUP)))
2308			continue;
2309		/*
2310		 * Avoid allocating from un-mirrored block group if there are
2311		 * mirrored block groups.
2312		 */
2313		list_for_each_entry(cache,
2314				&space_info->block_groups[BTRFS_RAID_RAID0],
2315				list)
2316			inc_block_group_ro(cache, 1);
2317		list_for_each_entry(cache,
2318				&space_info->block_groups[BTRFS_RAID_SINGLE],
2319				list)
2320			inc_block_group_ro(cache, 1);
2321	}
2322
2323	btrfs_init_global_block_rsv(info);
2324	ret = check_chunk_block_group_mappings(info);
2325error:
2326	btrfs_free_path(path);
2327	/*
2328	 * We've hit some error while reading the extent tree, and have
2329	 * rescue=ibadroots mount option.
2330	 * Try to fill the tree using dummy block groups so that the user can
2331	 * continue to mount and grab their data.
2332	 */
2333	if (ret && btrfs_test_opt(info, IGNOREBADROOTS))
2334		ret = fill_dummy_bgs(info);
2335	return ret;
2336}
2337
2338/*
2339 * This function, insert_block_group_item(), belongs to the phase 2 of chunk
2340 * allocation.
2341 *
2342 * See the comment at btrfs_chunk_alloc() for details about the chunk allocation
2343 * phases.
2344 */
2345static int insert_block_group_item(struct btrfs_trans_handle *trans,
2346				   struct btrfs_block_group *block_group)
2347{
2348	struct btrfs_fs_info *fs_info = trans->fs_info;
2349	struct btrfs_block_group_item bgi;
2350	struct btrfs_root *root = btrfs_block_group_root(fs_info);
 
2351	struct btrfs_key key;
2352
2353	spin_lock(&block_group->lock);
2354	btrfs_set_stack_block_group_used(&bgi, block_group->used);
2355	btrfs_set_stack_block_group_chunk_objectid(&bgi,
2356						   block_group->global_root_id);
2357	btrfs_set_stack_block_group_flags(&bgi, block_group->flags);
2358	key.objectid = block_group->start;
2359	key.type = BTRFS_BLOCK_GROUP_ITEM_KEY;
2360	key.offset = block_group->length;
2361	spin_unlock(&block_group->lock);
2362
2363	return btrfs_insert_item(trans, root, &key, &bgi, sizeof(bgi));
2364}
2365
2366static int insert_dev_extent(struct btrfs_trans_handle *trans,
2367			    struct btrfs_device *device, u64 chunk_offset,
2368			    u64 start, u64 num_bytes)
2369{
2370	struct btrfs_fs_info *fs_info = device->fs_info;
2371	struct btrfs_root *root = fs_info->dev_root;
2372	struct btrfs_path *path;
2373	struct btrfs_dev_extent *extent;
2374	struct extent_buffer *leaf;
2375	struct btrfs_key key;
2376	int ret;
2377
2378	WARN_ON(!test_bit(BTRFS_DEV_STATE_IN_FS_METADATA, &device->dev_state));
2379	WARN_ON(test_bit(BTRFS_DEV_STATE_REPLACE_TGT, &device->dev_state));
2380	path = btrfs_alloc_path();
2381	if (!path)
2382		return -ENOMEM;
2383
2384	key.objectid = device->devid;
2385	key.type = BTRFS_DEV_EXTENT_KEY;
2386	key.offset = start;
2387	ret = btrfs_insert_empty_item(trans, root, path, &key, sizeof(*extent));
2388	if (ret)
2389		goto out;
2390
2391	leaf = path->nodes[0];
2392	extent = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_dev_extent);
2393	btrfs_set_dev_extent_chunk_tree(leaf, extent, BTRFS_CHUNK_TREE_OBJECTID);
2394	btrfs_set_dev_extent_chunk_objectid(leaf, extent,
2395					    BTRFS_FIRST_CHUNK_TREE_OBJECTID);
2396	btrfs_set_dev_extent_chunk_offset(leaf, extent, chunk_offset);
2397
2398	btrfs_set_dev_extent_length(leaf, extent, num_bytes);
2399	btrfs_mark_buffer_dirty(leaf);
2400out:
2401	btrfs_free_path(path);
2402	return ret;
2403}
2404
2405/*
2406 * This function belongs to phase 2.
2407 *
2408 * See the comment at btrfs_chunk_alloc() for details about the chunk allocation
2409 * phases.
2410 */
2411static int insert_dev_extents(struct btrfs_trans_handle *trans,
2412				   u64 chunk_offset, u64 chunk_size)
2413{
2414	struct btrfs_fs_info *fs_info = trans->fs_info;
2415	struct btrfs_device *device;
2416	struct extent_map *em;
2417	struct map_lookup *map;
2418	u64 dev_offset;
2419	u64 stripe_size;
2420	int i;
2421	int ret = 0;
2422
2423	em = btrfs_get_chunk_map(fs_info, chunk_offset, chunk_size);
2424	if (IS_ERR(em))
2425		return PTR_ERR(em);
2426
2427	map = em->map_lookup;
2428	stripe_size = em->orig_block_len;
2429
2430	/*
2431	 * Take the device list mutex to prevent races with the final phase of
2432	 * a device replace operation that replaces the device object associated
2433	 * with the map's stripes, because the device object's id can change
2434	 * at any time during that final phase of the device replace operation
2435	 * (dev-replace.c:btrfs_dev_replace_finishing()), so we could grab the
2436	 * replaced device and then see it with an ID of BTRFS_DEV_REPLACE_DEVID,
2437	 * resulting in persisting a device extent item with such ID.
2438	 */
2439	mutex_lock(&fs_info->fs_devices->device_list_mutex);
2440	for (i = 0; i < map->num_stripes; i++) {
2441		device = map->stripes[i].dev;
2442		dev_offset = map->stripes[i].physical;
2443
2444		ret = insert_dev_extent(trans, device, chunk_offset, dev_offset,
2445				       stripe_size);
2446		if (ret)
2447			break;
2448	}
2449	mutex_unlock(&fs_info->fs_devices->device_list_mutex);
2450
2451	free_extent_map(em);
2452	return ret;
2453}
2454
2455/*
2456 * This function, btrfs_create_pending_block_groups(), belongs to the phase 2 of
2457 * chunk allocation.
2458 *
2459 * See the comment at btrfs_chunk_alloc() for details about the chunk allocation
2460 * phases.
2461 */
2462void btrfs_create_pending_block_groups(struct btrfs_trans_handle *trans)
2463{
2464	struct btrfs_fs_info *fs_info = trans->fs_info;
2465	struct btrfs_block_group *block_group;
2466	int ret = 0;
2467
2468	while (!list_empty(&trans->new_bgs)) {
2469		int index;
2470
2471		block_group = list_first_entry(&trans->new_bgs,
2472					       struct btrfs_block_group,
2473					       bg_list);
2474		if (ret)
2475			goto next;
2476
2477		index = btrfs_bg_flags_to_raid_index(block_group->flags);
 
 
 
2478
2479		ret = insert_block_group_item(trans, block_group);
 
2480		if (ret)
2481			btrfs_abort_transaction(trans, ret);
2482		if (!test_bit(BLOCK_GROUP_FLAG_CHUNK_ITEM_INSERTED,
2483			      &block_group->runtime_flags)) {
2484			mutex_lock(&fs_info->chunk_mutex);
2485			ret = btrfs_chunk_alloc_add_chunk_item(trans, block_group);
2486			mutex_unlock(&fs_info->chunk_mutex);
2487			if (ret)
2488				btrfs_abort_transaction(trans, ret);
2489		}
2490		ret = insert_dev_extents(trans, block_group->start,
2491					 block_group->length);
2492		if (ret)
2493			btrfs_abort_transaction(trans, ret);
2494		add_block_group_free_space(trans, block_group);
2495
2496		/*
2497		 * If we restriped during balance, we may have added a new raid
2498		 * type, so now add the sysfs entries when it is safe to do so.
2499		 * We don't have to worry about locking here as it's handled in
2500		 * btrfs_sysfs_add_block_group_type.
2501		 */
2502		if (block_group->space_info->block_group_kobjs[index] == NULL)
2503			btrfs_sysfs_add_block_group_type(block_group);
2504
2505		/* Already aborted the transaction if it failed. */
2506next:
2507		btrfs_delayed_refs_rsv_release(fs_info, 1);
2508		list_del_init(&block_group->bg_list);
2509	}
2510	btrfs_trans_release_chunk_metadata(trans);
2511}
2512
2513/*
2514 * For extent tree v2 we use the block_group_item->chunk_offset to point at our
2515 * global root id.  For v1 it's always set to BTRFS_FIRST_CHUNK_TREE_OBJECTID.
2516 */
2517static u64 calculate_global_root_id(struct btrfs_fs_info *fs_info, u64 offset)
2518{
2519	u64 div = SZ_1G;
2520	u64 index;
2521
2522	if (!btrfs_fs_incompat(fs_info, EXTENT_TREE_V2))
2523		return BTRFS_FIRST_CHUNK_TREE_OBJECTID;
2524
2525	/* If we have a smaller fs index based on 128MiB. */
2526	if (btrfs_super_total_bytes(fs_info->super_copy) <= (SZ_1G * 10ULL))
2527		div = SZ_128M;
2528
2529	offset = div64_u64(offset, div);
2530	div64_u64_rem(offset, fs_info->nr_global_roots, &index);
2531	return index;
2532}
2533
2534struct btrfs_block_group *btrfs_make_block_group(struct btrfs_trans_handle *trans,
2535						 u64 bytes_used, u64 type,
2536						 u64 chunk_offset, u64 size)
2537{
2538	struct btrfs_fs_info *fs_info = trans->fs_info;
2539	struct btrfs_block_group *cache;
2540	int ret;
2541
2542	btrfs_set_log_full_commit(trans);
2543
2544	cache = btrfs_create_block_group_cache(fs_info, chunk_offset);
2545	if (!cache)
2546		return ERR_PTR(-ENOMEM);
 
 
 
 
 
2547
2548	cache->length = size;
2549	set_free_space_tree_thresholds(cache);
2550	cache->used = bytes_used;
2551	cache->flags = type;
 
2552	cache->cached = BTRFS_CACHE_FINISHED;
2553	cache->global_root_id = calculate_global_root_id(fs_info, cache->start);
2554
2555	if (btrfs_fs_compat_ro(fs_info, FREE_SPACE_TREE))
2556		set_bit(BLOCK_GROUP_FLAG_NEEDS_FREE_SPACE, &cache->runtime_flags);
2557
2558	ret = btrfs_load_block_group_zone_info(cache, true);
2559	if (ret) {
2560		btrfs_put_block_group(cache);
2561		return ERR_PTR(ret);
2562	}
2563
2564	ret = exclude_super_stripes(cache);
2565	if (ret) {
2566		/* We may have excluded something, so call this just in case */
2567		btrfs_free_excluded_extents(cache);
2568		btrfs_put_block_group(cache);
2569		return ERR_PTR(ret);
2570	}
2571
2572	add_new_free_space(cache, chunk_offset, chunk_offset + size);
2573
2574	btrfs_free_excluded_extents(cache);
2575
 
 
 
 
 
 
 
 
2576	/*
2577	 * Ensure the corresponding space_info object is created and
2578	 * assigned to our block group. We want our bg to be added to the rbtree
2579	 * with its ->space_info set.
2580	 */
2581	cache->space_info = btrfs_find_space_info(fs_info, cache->flags);
2582	ASSERT(cache->space_info);
2583
2584	ret = btrfs_add_block_group_cache(fs_info, cache);
2585	if (ret) {
2586		btrfs_remove_free_space_cache(cache);
2587		btrfs_put_block_group(cache);
2588		return ERR_PTR(ret);
2589	}
2590
2591	/*
2592	 * Now that our block group has its ->space_info set and is inserted in
2593	 * the rbtree, update the space info's counters.
2594	 */
2595	trace_btrfs_add_block_group(fs_info, cache, 1);
2596	btrfs_add_bg_to_space_info(fs_info, cache);
 
2597	btrfs_update_global_block_rsv(fs_info);
2598
2599#ifdef CONFIG_BTRFS_DEBUG
2600	if (btrfs_should_fragment_free_space(cache)) {
2601		u64 new_bytes_used = size - bytes_used;
2602
2603		cache->space_info->bytes_used += new_bytes_used >> 1;
2604		fragment_free_space(cache);
2605	}
2606#endif
2607
2608	list_add_tail(&cache->bg_list, &trans->new_bgs);
2609	trans->delayed_ref_updates++;
2610	btrfs_update_delayed_refs_rsv(trans);
2611
2612	set_avail_alloc_bits(fs_info, type);
2613	return cache;
2614}
2615
2616/*
2617 * Mark one block group RO, can be called several times for the same block
2618 * group.
2619 *
2620 * @cache:		the destination block group
2621 * @do_chunk_alloc:	whether need to do chunk pre-allocation, this is to
2622 * 			ensure we still have some free space after marking this
2623 * 			block group RO.
2624 */
2625int btrfs_inc_block_group_ro(struct btrfs_block_group *cache,
2626			     bool do_chunk_alloc)
2627{
2628	struct btrfs_fs_info *fs_info = cache->fs_info;
2629	struct btrfs_trans_handle *trans;
2630	struct btrfs_root *root = btrfs_block_group_root(fs_info);
2631	u64 alloc_flags;
2632	int ret;
2633	bool dirty_bg_running;
2634
2635	/*
2636	 * This can only happen when we are doing read-only scrub on read-only
2637	 * mount.
2638	 * In that case we should not start a new transaction on read-only fs.
2639	 * Thus here we skip all chunk allocations.
2640	 */
2641	if (sb_rdonly(fs_info->sb)) {
2642		mutex_lock(&fs_info->ro_block_group_mutex);
2643		ret = inc_block_group_ro(cache, 0);
2644		mutex_unlock(&fs_info->ro_block_group_mutex);
2645		return ret;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2646	}
2647
2648	do {
2649		trans = btrfs_join_transaction(root);
2650		if (IS_ERR(trans))
2651			return PTR_ERR(trans);
 
 
 
 
 
 
2652
2653		dirty_bg_running = false;
 
 
 
2654
2655		/*
2656		 * We're not allowed to set block groups readonly after the dirty
2657		 * block group cache has started writing.  If it already started,
2658		 * back off and let this transaction commit.
2659		 */
2660		mutex_lock(&fs_info->ro_block_group_mutex);
2661		if (test_bit(BTRFS_TRANS_DIRTY_BG_RUN, &trans->transaction->flags)) {
2662			u64 transid = trans->transid;
2663
2664			mutex_unlock(&fs_info->ro_block_group_mutex);
2665			btrfs_end_transaction(trans);
2666
2667			ret = btrfs_wait_for_commit(fs_info, transid);
2668			if (ret)
2669				return ret;
2670			dirty_bg_running = true;
2671		}
2672	} while (dirty_bg_running);
2673
2674	if (do_chunk_alloc) {
2675		/*
2676		 * If we are changing raid levels, try to allocate a
2677		 * corresponding block group with the new raid level.
 
 
 
 
 
 
 
2678		 */
2679		alloc_flags = btrfs_get_alloc_profile(fs_info, cache->flags);
2680		if (alloc_flags != cache->flags) {
2681			ret = btrfs_chunk_alloc(trans, alloc_flags,
2682						CHUNK_ALLOC_FORCE);
2683			/*
2684			 * ENOSPC is allowed here, we may have enough space
2685			 * already allocated at the new raid level to carry on
2686			 */
2687			if (ret == -ENOSPC)
2688				ret = 0;
2689			if (ret < 0)
2690				goto out;
2691		}
2692	}
2693
2694	ret = inc_block_group_ro(cache, 0);
2695	if (!do_chunk_alloc || ret == -ETXTBSY)
2696		goto unlock_out;
2697	if (!ret)
2698		goto out;
2699	alloc_flags = btrfs_get_alloc_profile(fs_info, cache->space_info->flags);
2700	ret = btrfs_chunk_alloc(trans, alloc_flags, CHUNK_ALLOC_FORCE);
2701	if (ret < 0)
2702		goto out;
2703	/*
2704	 * We have allocated a new chunk. We also need to activate that chunk to
2705	 * grant metadata tickets for zoned filesystem.
2706	 */
2707	ret = btrfs_zoned_activate_one_bg(fs_info, cache->space_info, true);
2708	if (ret < 0)
2709		goto out;
2710
2711	ret = inc_block_group_ro(cache, 0);
2712	if (ret == -ETXTBSY)
2713		goto unlock_out;
2714out:
2715	if (cache->flags & BTRFS_BLOCK_GROUP_SYSTEM) {
2716		alloc_flags = btrfs_get_alloc_profile(fs_info, cache->flags);
2717		mutex_lock(&fs_info->chunk_mutex);
2718		check_system_chunk(trans, alloc_flags);
2719		mutex_unlock(&fs_info->chunk_mutex);
2720	}
2721unlock_out:
2722	mutex_unlock(&fs_info->ro_block_group_mutex);
2723
2724	btrfs_end_transaction(trans);
2725	return ret;
2726}
2727
2728void btrfs_dec_block_group_ro(struct btrfs_block_group *cache)
2729{
2730	struct btrfs_space_info *sinfo = cache->space_info;
2731	u64 num_bytes;
2732
2733	BUG_ON(!cache->ro);
2734
2735	spin_lock(&sinfo->lock);
2736	spin_lock(&cache->lock);
2737	if (!--cache->ro) {
2738		if (btrfs_is_zoned(cache->fs_info)) {
2739			/* Migrate zone_unusable bytes back */
2740			cache->zone_unusable =
2741				(cache->alloc_offset - cache->used) +
2742				(cache->length - cache->zone_capacity);
2743			sinfo->bytes_zone_unusable += cache->zone_unusable;
2744			sinfo->bytes_readonly -= cache->zone_unusable;
2745		}
2746		num_bytes = cache->length - cache->reserved -
2747			    cache->pinned - cache->bytes_super -
2748			    cache->zone_unusable - cache->used;
2749		sinfo->bytes_readonly -= num_bytes;
2750		list_del_init(&cache->ro_list);
2751	}
2752	spin_unlock(&cache->lock);
2753	spin_unlock(&sinfo->lock);
2754}
2755
2756static int update_block_group_item(struct btrfs_trans_handle *trans,
2757				   struct btrfs_path *path,
2758				   struct btrfs_block_group *cache)
2759{
2760	struct btrfs_fs_info *fs_info = trans->fs_info;
2761	int ret;
2762	struct btrfs_root *root = btrfs_block_group_root(fs_info);
2763	unsigned long bi;
2764	struct extent_buffer *leaf;
2765	struct btrfs_block_group_item bgi;
2766	struct btrfs_key key;
2767	u64 old_commit_used;
2768	u64 used;
2769
2770	/*
2771	 * Block group items update can be triggered out of commit transaction
2772	 * critical section, thus we need a consistent view of used bytes.
2773	 * We cannot use cache->used directly outside of the spin lock, as it
2774	 * may be changed.
2775	 */
2776	spin_lock(&cache->lock);
2777	old_commit_used = cache->commit_used;
2778	used = cache->used;
2779	/* No change in used bytes, can safely skip it. */
2780	if (cache->commit_used == used) {
2781		spin_unlock(&cache->lock);
2782		return 0;
2783	}
2784	cache->commit_used = used;
2785	spin_unlock(&cache->lock);
2786
2787	key.objectid = cache->start;
2788	key.type = BTRFS_BLOCK_GROUP_ITEM_KEY;
2789	key.offset = cache->length;
2790
2791	ret = btrfs_search_slot(trans, root, &key, path, 0, 1);
2792	if (ret) {
2793		if (ret > 0)
2794			ret = -ENOENT;
2795		goto fail;
2796	}
2797
2798	leaf = path->nodes[0];
2799	bi = btrfs_item_ptr_offset(leaf, path->slots[0]);
2800	btrfs_set_stack_block_group_used(&bgi, used);
2801	btrfs_set_stack_block_group_chunk_objectid(&bgi,
2802						   cache->global_root_id);
2803	btrfs_set_stack_block_group_flags(&bgi, cache->flags);
2804	write_extent_buffer(leaf, &bgi, bi, sizeof(bgi));
2805	btrfs_mark_buffer_dirty(leaf);
2806fail:
2807	btrfs_release_path(path);
2808	/* We didn't update the block group item, need to revert @commit_used. */
2809	if (ret < 0) {
2810		spin_lock(&cache->lock);
2811		cache->commit_used = old_commit_used;
2812		spin_unlock(&cache->lock);
2813	}
2814	return ret;
2815
2816}
2817
2818static int cache_save_setup(struct btrfs_block_group *block_group,
2819			    struct btrfs_trans_handle *trans,
2820			    struct btrfs_path *path)
2821{
2822	struct btrfs_fs_info *fs_info = block_group->fs_info;
2823	struct btrfs_root *root = fs_info->tree_root;
2824	struct inode *inode = NULL;
2825	struct extent_changeset *data_reserved = NULL;
2826	u64 alloc_hint = 0;
2827	int dcs = BTRFS_DC_ERROR;
2828	u64 cache_size = 0;
2829	int retries = 0;
2830	int ret = 0;
2831
2832	if (!btrfs_test_opt(fs_info, SPACE_CACHE))
2833		return 0;
2834
2835	/*
2836	 * If this block group is smaller than 100 megs don't bother caching the
2837	 * block group.
2838	 */
2839	if (block_group->length < (100 * SZ_1M)) {
2840		spin_lock(&block_group->lock);
2841		block_group->disk_cache_state = BTRFS_DC_WRITTEN;
2842		spin_unlock(&block_group->lock);
2843		return 0;
2844	}
2845
2846	if (TRANS_ABORTED(trans))
2847		return 0;
2848again:
2849	inode = lookup_free_space_inode(block_group, path);
2850	if (IS_ERR(inode) && PTR_ERR(inode) != -ENOENT) {
2851		ret = PTR_ERR(inode);
2852		btrfs_release_path(path);
2853		goto out;
2854	}
2855
2856	if (IS_ERR(inode)) {
2857		BUG_ON(retries);
2858		retries++;
2859
2860		if (block_group->ro)
2861			goto out_free;
2862
2863		ret = create_free_space_inode(trans, block_group, path);
2864		if (ret)
2865			goto out_free;
2866		goto again;
2867	}
2868
2869	/*
2870	 * We want to set the generation to 0, that way if anything goes wrong
2871	 * from here on out we know not to trust this cache when we load up next
2872	 * time.
2873	 */
2874	BTRFS_I(inode)->generation = 0;
2875	ret = btrfs_update_inode(trans, root, BTRFS_I(inode));
2876	if (ret) {
2877		/*
2878		 * So theoretically we could recover from this, simply set the
2879		 * super cache generation to 0 so we know to invalidate the
2880		 * cache, but then we'd have to keep track of the block groups
2881		 * that fail this way so we know we _have_ to reset this cache
2882		 * before the next commit or risk reading stale cache.  So to
2883		 * limit our exposure to horrible edge cases lets just abort the
2884		 * transaction, this only happens in really bad situations
2885		 * anyway.
2886		 */
2887		btrfs_abort_transaction(trans, ret);
2888		goto out_put;
2889	}
2890	WARN_ON(ret);
2891
2892	/* We've already setup this transaction, go ahead and exit */
2893	if (block_group->cache_generation == trans->transid &&
2894	    i_size_read(inode)) {
2895		dcs = BTRFS_DC_SETUP;
2896		goto out_put;
2897	}
2898
2899	if (i_size_read(inode) > 0) {
2900		ret = btrfs_check_trunc_cache_free_space(fs_info,
2901					&fs_info->global_block_rsv);
2902		if (ret)
2903			goto out_put;
2904
2905		ret = btrfs_truncate_free_space_cache(trans, NULL, inode);
2906		if (ret)
2907			goto out_put;
2908	}
2909
2910	spin_lock(&block_group->lock);
2911	if (block_group->cached != BTRFS_CACHE_FINISHED ||
2912	    !btrfs_test_opt(fs_info, SPACE_CACHE)) {
2913		/*
2914		 * don't bother trying to write stuff out _if_
2915		 * a) we're not cached,
2916		 * b) we're with nospace_cache mount option,
2917		 * c) we're with v2 space_cache (FREE_SPACE_TREE).
2918		 */
2919		dcs = BTRFS_DC_WRITTEN;
2920		spin_unlock(&block_group->lock);
2921		goto out_put;
2922	}
2923	spin_unlock(&block_group->lock);
2924
2925	/*
2926	 * We hit an ENOSPC when setting up the cache in this transaction, just
2927	 * skip doing the setup, we've already cleared the cache so we're safe.
2928	 */
2929	if (test_bit(BTRFS_TRANS_CACHE_ENOSPC, &trans->transaction->flags)) {
2930		ret = -ENOSPC;
2931		goto out_put;
2932	}
2933
2934	/*
2935	 * Try to preallocate enough space based on how big the block group is.
2936	 * Keep in mind this has to include any pinned space which could end up
2937	 * taking up quite a bit since it's not folded into the other space
2938	 * cache.
2939	 */
2940	cache_size = div_u64(block_group->length, SZ_256M);
2941	if (!cache_size)
2942		cache_size = 1;
2943
2944	cache_size *= 16;
2945	cache_size *= fs_info->sectorsize;
2946
2947	ret = btrfs_check_data_free_space(BTRFS_I(inode), &data_reserved, 0,
2948					  cache_size, false);
2949	if (ret)
2950		goto out_put;
2951
2952	ret = btrfs_prealloc_file_range_trans(inode, trans, 0, 0, cache_size,
2953					      cache_size, cache_size,
2954					      &alloc_hint);
2955	/*
2956	 * Our cache requires contiguous chunks so that we don't modify a bunch
2957	 * of metadata or split extents when writing the cache out, which means
2958	 * we can enospc if we are heavily fragmented in addition to just normal
2959	 * out of space conditions.  So if we hit this just skip setting up any
2960	 * other block groups for this transaction, maybe we'll unpin enough
2961	 * space the next time around.
2962	 */
2963	if (!ret)
2964		dcs = BTRFS_DC_SETUP;
2965	else if (ret == -ENOSPC)
2966		set_bit(BTRFS_TRANS_CACHE_ENOSPC, &trans->transaction->flags);
2967
2968out_put:
2969	iput(inode);
2970out_free:
2971	btrfs_release_path(path);
2972out:
2973	spin_lock(&block_group->lock);
2974	if (!ret && dcs == BTRFS_DC_SETUP)
2975		block_group->cache_generation = trans->transid;
2976	block_group->disk_cache_state = dcs;
2977	spin_unlock(&block_group->lock);
2978
2979	extent_changeset_free(data_reserved);
2980	return ret;
2981}
2982
2983int btrfs_setup_space_cache(struct btrfs_trans_handle *trans)
2984{
2985	struct btrfs_fs_info *fs_info = trans->fs_info;
2986	struct btrfs_block_group *cache, *tmp;
2987	struct btrfs_transaction *cur_trans = trans->transaction;
2988	struct btrfs_path *path;
2989
2990	if (list_empty(&cur_trans->dirty_bgs) ||
2991	    !btrfs_test_opt(fs_info, SPACE_CACHE))
2992		return 0;
2993
2994	path = btrfs_alloc_path();
2995	if (!path)
2996		return -ENOMEM;
2997
2998	/* Could add new block groups, use _safe just in case */
2999	list_for_each_entry_safe(cache, tmp, &cur_trans->dirty_bgs,
3000				 dirty_list) {
3001		if (cache->disk_cache_state == BTRFS_DC_CLEAR)
3002			cache_save_setup(cache, trans, path);
3003	}
3004
3005	btrfs_free_path(path);
3006	return 0;
3007}
3008
3009/*
3010 * Transaction commit does final block group cache writeback during a critical
3011 * section where nothing is allowed to change the FS.  This is required in
3012 * order for the cache to actually match the block group, but can introduce a
3013 * lot of latency into the commit.
3014 *
3015 * So, btrfs_start_dirty_block_groups is here to kick off block group cache IO.
3016 * There's a chance we'll have to redo some of it if the block group changes
3017 * again during the commit, but it greatly reduces the commit latency by
3018 * getting rid of the easy block groups while we're still allowing others to
3019 * join the commit.
3020 */
3021int btrfs_start_dirty_block_groups(struct btrfs_trans_handle *trans)
3022{
3023	struct btrfs_fs_info *fs_info = trans->fs_info;
3024	struct btrfs_block_group *cache;
3025	struct btrfs_transaction *cur_trans = trans->transaction;
3026	int ret = 0;
3027	int should_put;
3028	struct btrfs_path *path = NULL;
3029	LIST_HEAD(dirty);
3030	struct list_head *io = &cur_trans->io_bgs;
 
3031	int loops = 0;
3032
3033	spin_lock(&cur_trans->dirty_bgs_lock);
3034	if (list_empty(&cur_trans->dirty_bgs)) {
3035		spin_unlock(&cur_trans->dirty_bgs_lock);
3036		return 0;
3037	}
3038	list_splice_init(&cur_trans->dirty_bgs, &dirty);
3039	spin_unlock(&cur_trans->dirty_bgs_lock);
3040
3041again:
3042	/* Make sure all the block groups on our dirty list actually exist */
3043	btrfs_create_pending_block_groups(trans);
3044
3045	if (!path) {
3046		path = btrfs_alloc_path();
3047		if (!path) {
3048			ret = -ENOMEM;
3049			goto out;
3050		}
3051	}
3052
3053	/*
3054	 * cache_write_mutex is here only to save us from balance or automatic
3055	 * removal of empty block groups deleting this block group while we are
3056	 * writing out the cache
3057	 */
3058	mutex_lock(&trans->transaction->cache_write_mutex);
3059	while (!list_empty(&dirty)) {
3060		bool drop_reserve = true;
3061
3062		cache = list_first_entry(&dirty, struct btrfs_block_group,
 
3063					 dirty_list);
3064		/*
3065		 * This can happen if something re-dirties a block group that
3066		 * is already under IO.  Just wait for it to finish and then do
3067		 * it all again
3068		 */
3069		if (!list_empty(&cache->io_list)) {
3070			list_del_init(&cache->io_list);
3071			btrfs_wait_cache_io(trans, cache, path);
3072			btrfs_put_block_group(cache);
3073		}
3074
3075
3076		/*
3077		 * btrfs_wait_cache_io uses the cache->dirty_list to decide if
3078		 * it should update the cache_state.  Don't delete until after
3079		 * we wait.
3080		 *
3081		 * Since we're not running in the commit critical section
3082		 * we need the dirty_bgs_lock to protect from update_block_group
3083		 */
3084		spin_lock(&cur_trans->dirty_bgs_lock);
3085		list_del_init(&cache->dirty_list);
3086		spin_unlock(&cur_trans->dirty_bgs_lock);
3087
3088		should_put = 1;
3089
3090		cache_save_setup(cache, trans, path);
3091
3092		if (cache->disk_cache_state == BTRFS_DC_SETUP) {
3093			cache->io_ctl.inode = NULL;
3094			ret = btrfs_write_out_cache(trans, cache, path);
3095			if (ret == 0 && cache->io_ctl.inode) {
 
3096				should_put = 0;
3097
3098				/*
3099				 * The cache_write_mutex is protecting the
3100				 * io_list, also refer to the definition of
3101				 * btrfs_transaction::io_bgs for more details
3102				 */
3103				list_add_tail(&cache->io_list, io);
3104			} else {
3105				/*
3106				 * If we failed to write the cache, the
3107				 * generation will be bad and life goes on
3108				 */
3109				ret = 0;
3110			}
3111		}
3112		if (!ret) {
3113			ret = update_block_group_item(trans, path, cache);
3114			/*
3115			 * Our block group might still be attached to the list
3116			 * of new block groups in the transaction handle of some
3117			 * other task (struct btrfs_trans_handle->new_bgs). This
3118			 * means its block group item isn't yet in the extent
3119			 * tree. If this happens ignore the error, as we will
3120			 * try again later in the critical section of the
3121			 * transaction commit.
3122			 */
3123			if (ret == -ENOENT) {
3124				ret = 0;
3125				spin_lock(&cur_trans->dirty_bgs_lock);
3126				if (list_empty(&cache->dirty_list)) {
3127					list_add_tail(&cache->dirty_list,
3128						      &cur_trans->dirty_bgs);
3129					btrfs_get_block_group(cache);
3130					drop_reserve = false;
3131				}
3132				spin_unlock(&cur_trans->dirty_bgs_lock);
3133			} else if (ret) {
3134				btrfs_abort_transaction(trans, ret);
3135			}
3136		}
3137
3138		/* If it's not on the io list, we need to put the block group */
3139		if (should_put)
3140			btrfs_put_block_group(cache);
3141		if (drop_reserve)
3142			btrfs_delayed_refs_rsv_release(fs_info, 1);
 
 
 
 
3143		/*
3144		 * Avoid blocking other tasks for too long. It might even save
3145		 * us from writing caches for block groups that are going to be
3146		 * removed.
3147		 */
3148		mutex_unlock(&trans->transaction->cache_write_mutex);
3149		if (ret)
3150			goto out;
3151		mutex_lock(&trans->transaction->cache_write_mutex);
3152	}
3153	mutex_unlock(&trans->transaction->cache_write_mutex);
3154
3155	/*
3156	 * Go through delayed refs for all the stuff we've just kicked off
3157	 * and then loop back (just once)
3158	 */
3159	if (!ret)
3160		ret = btrfs_run_delayed_refs(trans, 0);
3161	if (!ret && loops == 0) {
3162		loops++;
3163		spin_lock(&cur_trans->dirty_bgs_lock);
3164		list_splice_init(&cur_trans->dirty_bgs, &dirty);
3165		/*
3166		 * dirty_bgs_lock protects us from concurrent block group
3167		 * deletes too (not just cache_write_mutex).
3168		 */
3169		if (!list_empty(&dirty)) {
3170			spin_unlock(&cur_trans->dirty_bgs_lock);
3171			goto again;
3172		}
3173		spin_unlock(&cur_trans->dirty_bgs_lock);
3174	}
3175out:
3176	if (ret < 0) {
3177		spin_lock(&cur_trans->dirty_bgs_lock);
3178		list_splice_init(&dirty, &cur_trans->dirty_bgs);
3179		spin_unlock(&cur_trans->dirty_bgs_lock);
3180		btrfs_cleanup_dirty_bgs(cur_trans, fs_info);
3181	}
3182
3183	btrfs_free_path(path);
3184	return ret;
3185}
3186
3187int btrfs_write_dirty_block_groups(struct btrfs_trans_handle *trans)
3188{
3189	struct btrfs_fs_info *fs_info = trans->fs_info;
3190	struct btrfs_block_group *cache;
3191	struct btrfs_transaction *cur_trans = trans->transaction;
3192	int ret = 0;
3193	int should_put;
3194	struct btrfs_path *path;
3195	struct list_head *io = &cur_trans->io_bgs;
 
3196
3197	path = btrfs_alloc_path();
3198	if (!path)
3199		return -ENOMEM;
3200
3201	/*
3202	 * Even though we are in the critical section of the transaction commit,
3203	 * we can still have concurrent tasks adding elements to this
3204	 * transaction's list of dirty block groups. These tasks correspond to
3205	 * endio free space workers started when writeback finishes for a
3206	 * space cache, which run inode.c:btrfs_finish_ordered_io(), and can
3207	 * allocate new block groups as a result of COWing nodes of the root
3208	 * tree when updating the free space inode. The writeback for the space
3209	 * caches is triggered by an earlier call to
3210	 * btrfs_start_dirty_block_groups() and iterations of the following
3211	 * loop.
3212	 * Also we want to do the cache_save_setup first and then run the
3213	 * delayed refs to make sure we have the best chance at doing this all
3214	 * in one shot.
3215	 */
3216	spin_lock(&cur_trans->dirty_bgs_lock);
3217	while (!list_empty(&cur_trans->dirty_bgs)) {
3218		cache = list_first_entry(&cur_trans->dirty_bgs,
3219					 struct btrfs_block_group,
3220					 dirty_list);
3221
3222		/*
3223		 * This can happen if cache_save_setup re-dirties a block group
3224		 * that is already under IO.  Just wait for it to finish and
3225		 * then do it all again
3226		 */
3227		if (!list_empty(&cache->io_list)) {
3228			spin_unlock(&cur_trans->dirty_bgs_lock);
3229			list_del_init(&cache->io_list);
3230			btrfs_wait_cache_io(trans, cache, path);
3231			btrfs_put_block_group(cache);
3232			spin_lock(&cur_trans->dirty_bgs_lock);
3233		}
3234
3235		/*
3236		 * Don't remove from the dirty list until after we've waited on
3237		 * any pending IO
3238		 */
3239		list_del_init(&cache->dirty_list);
3240		spin_unlock(&cur_trans->dirty_bgs_lock);
3241		should_put = 1;
3242
3243		cache_save_setup(cache, trans, path);
3244
3245		if (!ret)
3246			ret = btrfs_run_delayed_refs(trans,
3247						     (unsigned long) -1);
3248
3249		if (!ret && cache->disk_cache_state == BTRFS_DC_SETUP) {
3250			cache->io_ctl.inode = NULL;
3251			ret = btrfs_write_out_cache(trans, cache, path);
3252			if (ret == 0 && cache->io_ctl.inode) {
 
3253				should_put = 0;
3254				list_add_tail(&cache->io_list, io);
3255			} else {
3256				/*
3257				 * If we failed to write the cache, the
3258				 * generation will be bad and life goes on
3259				 */
3260				ret = 0;
3261			}
3262		}
3263		if (!ret) {
3264			ret = update_block_group_item(trans, path, cache);
3265			/*
3266			 * One of the free space endio workers might have
3267			 * created a new block group while updating a free space
3268			 * cache's inode (at inode.c:btrfs_finish_ordered_io())
3269			 * and hasn't released its transaction handle yet, in
3270			 * which case the new block group is still attached to
3271			 * its transaction handle and its creation has not
3272			 * finished yet (no block group item in the extent tree
3273			 * yet, etc). If this is the case, wait for all free
3274			 * space endio workers to finish and retry. This is a
3275			 * very rare case so no need for a more efficient and
3276			 * complex approach.
3277			 */
3278			if (ret == -ENOENT) {
3279				wait_event(cur_trans->writer_wait,
3280				   atomic_read(&cur_trans->num_writers) == 1);
3281				ret = update_block_group_item(trans, path, cache);
3282			}
3283			if (ret)
3284				btrfs_abort_transaction(trans, ret);
3285		}
3286
3287		/* If its not on the io list, we need to put the block group */
3288		if (should_put)
3289			btrfs_put_block_group(cache);
3290		btrfs_delayed_refs_rsv_release(fs_info, 1);
3291		spin_lock(&cur_trans->dirty_bgs_lock);
3292	}
3293	spin_unlock(&cur_trans->dirty_bgs_lock);
3294
3295	/*
3296	 * Refer to the definition of io_bgs member for details why it's safe
3297	 * to use it without any locking
3298	 */
3299	while (!list_empty(io)) {
3300		cache = list_first_entry(io, struct btrfs_block_group,
3301					 io_list);
3302		list_del_init(&cache->io_list);
3303		btrfs_wait_cache_io(trans, cache, path);
3304		btrfs_put_block_group(cache);
3305	}
3306
3307	btrfs_free_path(path);
3308	return ret;
3309}
3310
3311int btrfs_update_block_group(struct btrfs_trans_handle *trans,
3312			     u64 bytenr, u64 num_bytes, bool alloc)
3313{
3314	struct btrfs_fs_info *info = trans->fs_info;
3315	struct btrfs_block_group *cache = NULL;
3316	u64 total = num_bytes;
3317	u64 old_val;
3318	u64 byte_in_group;
3319	int factor;
3320	int ret = 0;
3321
3322	/* Block accounting for super block */
3323	spin_lock(&info->delalloc_root_lock);
3324	old_val = btrfs_super_bytes_used(info->super_copy);
3325	if (alloc)
3326		old_val += num_bytes;
3327	else
3328		old_val -= num_bytes;
3329	btrfs_set_super_bytes_used(info->super_copy, old_val);
3330	spin_unlock(&info->delalloc_root_lock);
3331
3332	while (total) {
3333		bool reclaim;
3334
3335		cache = btrfs_lookup_block_group(info, bytenr);
3336		if (!cache) {
3337			ret = -ENOENT;
3338			break;
3339		}
3340		factor = btrfs_bg_type_to_factor(cache->flags);
3341
3342		/*
3343		 * If this block group has free space cache written out, we
3344		 * need to make sure to load it if we are removing space.  This
3345		 * is because we need the unpinning stage to actually add the
3346		 * space back to the block group, otherwise we will leak space.
3347		 */
3348		if (!alloc && !btrfs_block_group_done(cache))
3349			btrfs_cache_block_group(cache, true);
3350
3351		byte_in_group = bytenr - cache->start;
3352		WARN_ON(byte_in_group > cache->length);
3353
3354		spin_lock(&cache->space_info->lock);
3355		spin_lock(&cache->lock);
3356
3357		if (btrfs_test_opt(info, SPACE_CACHE) &&
3358		    cache->disk_cache_state < BTRFS_DC_CLEAR)
3359			cache->disk_cache_state = BTRFS_DC_CLEAR;
3360
3361		old_val = cache->used;
3362		num_bytes = min(total, cache->length - byte_in_group);
3363		if (alloc) {
3364			old_val += num_bytes;
3365			cache->used = old_val;
3366			cache->reserved -= num_bytes;
3367			cache->space_info->bytes_reserved -= num_bytes;
3368			cache->space_info->bytes_used += num_bytes;
3369			cache->space_info->disk_used += num_bytes * factor;
3370			spin_unlock(&cache->lock);
3371			spin_unlock(&cache->space_info->lock);
3372		} else {
3373			old_val -= num_bytes;
3374			cache->used = old_val;
3375			cache->pinned += num_bytes;
3376			btrfs_space_info_update_bytes_pinned(info,
3377					cache->space_info, num_bytes);
3378			cache->space_info->bytes_used -= num_bytes;
3379			cache->space_info->disk_used -= num_bytes * factor;
3380
3381			reclaim = should_reclaim_block_group(cache, num_bytes);
3382			spin_unlock(&cache->lock);
3383			spin_unlock(&cache->space_info->lock);
3384
3385			set_extent_dirty(&trans->transaction->pinned_extents,
 
 
 
 
3386					 bytenr, bytenr + num_bytes - 1,
3387					 GFP_NOFS | __GFP_NOFAIL);
3388		}
3389
3390		spin_lock(&trans->transaction->dirty_bgs_lock);
3391		if (list_empty(&cache->dirty_list)) {
3392			list_add_tail(&cache->dirty_list,
3393				      &trans->transaction->dirty_bgs);
3394			trans->delayed_ref_updates++;
3395			btrfs_get_block_group(cache);
3396		}
3397		spin_unlock(&trans->transaction->dirty_bgs_lock);
3398
3399		/*
3400		 * No longer have used bytes in this block group, queue it for
3401		 * deletion. We do this after adding the block group to the
3402		 * dirty list to avoid races between cleaner kthread and space
3403		 * cache writeout.
3404		 */
3405		if (!alloc && old_val == 0) {
3406			if (!btrfs_test_opt(info, DISCARD_ASYNC))
3407				btrfs_mark_bg_unused(cache);
3408		} else if (!alloc && reclaim) {
3409			btrfs_mark_bg_to_reclaim(cache);
3410		}
3411
3412		btrfs_put_block_group(cache);
3413		total -= num_bytes;
3414		bytenr += num_bytes;
3415	}
3416
3417	/* Modified block groups are accounted for in the delayed_refs_rsv. */
3418	btrfs_update_delayed_refs_rsv(trans);
3419	return ret;
3420}
3421
3422/*
3423 * Update the block_group and space info counters.
3424 *
3425 * @cache:	The cache we are manipulating
3426 * @ram_bytes:  The number of bytes of file content, and will be same to
3427 *              @num_bytes except for the compress path.
3428 * @num_bytes:	The number of bytes in question
3429 * @delalloc:   The blocks are allocated for the delalloc write
3430 *
3431 * This is called by the allocator when it reserves space. If this is a
3432 * reservation and the block group has become read only we cannot make the
3433 * reservation and return -EAGAIN, otherwise this function always succeeds.
3434 */
3435int btrfs_add_reserved_bytes(struct btrfs_block_group *cache,
3436			     u64 ram_bytes, u64 num_bytes, int delalloc)
3437{
3438	struct btrfs_space_info *space_info = cache->space_info;
3439	int ret = 0;
3440
3441	spin_lock(&space_info->lock);
3442	spin_lock(&cache->lock);
3443	if (cache->ro) {
3444		ret = -EAGAIN;
3445	} else {
3446		cache->reserved += num_bytes;
3447		space_info->bytes_reserved += num_bytes;
3448		trace_btrfs_space_reservation(cache->fs_info, "space_info",
3449					      space_info->flags, num_bytes, 1);
3450		btrfs_space_info_update_bytes_may_use(cache->fs_info,
3451						      space_info, -ram_bytes);
3452		if (delalloc)
3453			cache->delalloc_bytes += num_bytes;
3454
3455		/*
3456		 * Compression can use less space than we reserved, so wake
3457		 * tickets if that happens
3458		 */
3459		if (num_bytes < ram_bytes)
3460			btrfs_try_granting_tickets(cache->fs_info, space_info);
3461	}
3462	spin_unlock(&cache->lock);
3463	spin_unlock(&space_info->lock);
3464	return ret;
3465}
3466
3467/*
3468 * Update the block_group and space info counters.
3469 *
3470 * @cache:      The cache we are manipulating
3471 * @num_bytes:  The number of bytes in question
3472 * @delalloc:   The blocks are allocated for the delalloc write
3473 *
3474 * This is called by somebody who is freeing space that was never actually used
3475 * on disk.  For example if you reserve some space for a new leaf in transaction
3476 * A and before transaction A commits you free that leaf, you call this with
3477 * reserve set to 0 in order to clear the reservation.
3478 */
3479void btrfs_free_reserved_bytes(struct btrfs_block_group *cache,
3480			       u64 num_bytes, int delalloc)
3481{
3482	struct btrfs_space_info *space_info = cache->space_info;
3483
3484	spin_lock(&space_info->lock);
3485	spin_lock(&cache->lock);
3486	if (cache->ro)
3487		space_info->bytes_readonly += num_bytes;
3488	cache->reserved -= num_bytes;
3489	space_info->bytes_reserved -= num_bytes;
3490	space_info->max_extent_size = 0;
3491
3492	if (delalloc)
3493		cache->delalloc_bytes -= num_bytes;
3494	spin_unlock(&cache->lock);
3495
3496	btrfs_try_granting_tickets(cache->fs_info, space_info);
3497	spin_unlock(&space_info->lock);
3498}
3499
3500static void force_metadata_allocation(struct btrfs_fs_info *info)
3501{
3502	struct list_head *head = &info->space_info;
3503	struct btrfs_space_info *found;
3504
3505	list_for_each_entry(found, head, list) {
 
3506		if (found->flags & BTRFS_BLOCK_GROUP_METADATA)
3507			found->force_alloc = CHUNK_ALLOC_FORCE;
3508	}
 
3509}
3510
3511static int should_alloc_chunk(struct btrfs_fs_info *fs_info,
3512			      struct btrfs_space_info *sinfo, int force)
3513{
3514	u64 bytes_used = btrfs_space_info_used(sinfo, false);
3515	u64 thresh;
3516
3517	if (force == CHUNK_ALLOC_FORCE)
3518		return 1;
3519
3520	/*
3521	 * in limited mode, we want to have some free space up to
3522	 * about 1% of the FS size.
3523	 */
3524	if (force == CHUNK_ALLOC_LIMITED) {
3525		thresh = btrfs_super_total_bytes(fs_info->super_copy);
3526		thresh = max_t(u64, SZ_64M, mult_perc(thresh, 1));
3527
3528		if (sinfo->total_bytes - bytes_used < thresh)
3529			return 1;
3530	}
3531
3532	if (bytes_used + SZ_2M < mult_perc(sinfo->total_bytes, 80))
3533		return 0;
3534	return 1;
3535}
3536
3537int btrfs_force_chunk_alloc(struct btrfs_trans_handle *trans, u64 type)
3538{
3539	u64 alloc_flags = btrfs_get_alloc_profile(trans->fs_info, type);
3540
3541	return btrfs_chunk_alloc(trans, alloc_flags, CHUNK_ALLOC_FORCE);
3542}
3543
3544static struct btrfs_block_group *do_chunk_alloc(struct btrfs_trans_handle *trans, u64 flags)
3545{
3546	struct btrfs_block_group *bg;
3547	int ret;
3548
3549	/*
3550	 * Check if we have enough space in the system space info because we
3551	 * will need to update device items in the chunk btree and insert a new
3552	 * chunk item in the chunk btree as well. This will allocate a new
3553	 * system block group if needed.
3554	 */
3555	check_system_chunk(trans, flags);
3556
3557	bg = btrfs_create_chunk(trans, flags);
3558	if (IS_ERR(bg)) {
3559		ret = PTR_ERR(bg);
3560		goto out;
3561	}
3562
3563	ret = btrfs_chunk_alloc_add_chunk_item(trans, bg);
3564	/*
3565	 * Normally we are not expected to fail with -ENOSPC here, since we have
3566	 * previously reserved space in the system space_info and allocated one
3567	 * new system chunk if necessary. However there are three exceptions:
3568	 *
3569	 * 1) We may have enough free space in the system space_info but all the
3570	 *    existing system block groups have a profile which can not be used
3571	 *    for extent allocation.
3572	 *
3573	 *    This happens when mounting in degraded mode. For example we have a
3574	 *    RAID1 filesystem with 2 devices, lose one device and mount the fs
3575	 *    using the other device in degraded mode. If we then allocate a chunk,
3576	 *    we may have enough free space in the existing system space_info, but
3577	 *    none of the block groups can be used for extent allocation since they
3578	 *    have a RAID1 profile, and because we are in degraded mode with a
3579	 *    single device, we are forced to allocate a new system chunk with a
3580	 *    SINGLE profile. Making check_system_chunk() iterate over all system
3581	 *    block groups and check if they have a usable profile and enough space
3582	 *    can be slow on very large filesystems, so we tolerate the -ENOSPC and
3583	 *    try again after forcing allocation of a new system chunk. Like this
3584	 *    we avoid paying the cost of that search in normal circumstances, when
3585	 *    we were not mounted in degraded mode;
3586	 *
3587	 * 2) We had enough free space info the system space_info, and one suitable
3588	 *    block group to allocate from when we called check_system_chunk()
3589	 *    above. However right after we called it, the only system block group
3590	 *    with enough free space got turned into RO mode by a running scrub,
3591	 *    and in this case we have to allocate a new one and retry. We only
3592	 *    need do this allocate and retry once, since we have a transaction
3593	 *    handle and scrub uses the commit root to search for block groups;
3594	 *
3595	 * 3) We had one system block group with enough free space when we called
3596	 *    check_system_chunk(), but after that, right before we tried to
3597	 *    allocate the last extent buffer we needed, a discard operation came
3598	 *    in and it temporarily removed the last free space entry from the
3599	 *    block group (discard removes a free space entry, discards it, and
3600	 *    then adds back the entry to the block group cache).
3601	 */
3602	if (ret == -ENOSPC) {
3603		const u64 sys_flags = btrfs_system_alloc_profile(trans->fs_info);
3604		struct btrfs_block_group *sys_bg;
3605
3606		sys_bg = btrfs_create_chunk(trans, sys_flags);
3607		if (IS_ERR(sys_bg)) {
3608			ret = PTR_ERR(sys_bg);
3609			btrfs_abort_transaction(trans, ret);
3610			goto out;
3611		}
3612
3613		ret = btrfs_chunk_alloc_add_chunk_item(trans, sys_bg);
3614		if (ret) {
3615			btrfs_abort_transaction(trans, ret);
3616			goto out;
3617		}
3618
3619		ret = btrfs_chunk_alloc_add_chunk_item(trans, bg);
3620		if (ret) {
3621			btrfs_abort_transaction(trans, ret);
3622			goto out;
3623		}
3624	} else if (ret) {
3625		btrfs_abort_transaction(trans, ret);
3626		goto out;
3627	}
3628out:
3629	btrfs_trans_release_chunk_metadata(trans);
3630
3631	if (ret)
3632		return ERR_PTR(ret);
3633
3634	btrfs_get_block_group(bg);
3635	return bg;
3636}
3637
3638/*
3639 * Chunk allocation is done in 2 phases:
3640 *
3641 * 1) Phase 1 - through btrfs_chunk_alloc() we allocate device extents for
3642 *    the chunk, the chunk mapping, create its block group and add the items
3643 *    that belong in the chunk btree to it - more specifically, we need to
3644 *    update device items in the chunk btree and add a new chunk item to it.
3645 *
3646 * 2) Phase 2 - through btrfs_create_pending_block_groups(), we add the block
3647 *    group item to the extent btree and the device extent items to the devices
3648 *    btree.
3649 *
3650 * This is done to prevent deadlocks. For example when COWing a node from the
3651 * extent btree we are holding a write lock on the node's parent and if we
3652 * trigger chunk allocation and attempted to insert the new block group item
3653 * in the extent btree right way, we could deadlock because the path for the
3654 * insertion can include that parent node. At first glance it seems impossible
3655 * to trigger chunk allocation after starting a transaction since tasks should
3656 * reserve enough transaction units (metadata space), however while that is true
3657 * most of the time, chunk allocation may still be triggered for several reasons:
3658 *
3659 * 1) When reserving metadata, we check if there is enough free space in the
3660 *    metadata space_info and therefore don't trigger allocation of a new chunk.
3661 *    However later when the task actually tries to COW an extent buffer from
3662 *    the extent btree or from the device btree for example, it is forced to
3663 *    allocate a new block group (chunk) because the only one that had enough
3664 *    free space was just turned to RO mode by a running scrub for example (or
3665 *    device replace, block group reclaim thread, etc), so we can not use it
3666 *    for allocating an extent and end up being forced to allocate a new one;
3667 *
3668 * 2) Because we only check that the metadata space_info has enough free bytes,
3669 *    we end up not allocating a new metadata chunk in that case. However if
3670 *    the filesystem was mounted in degraded mode, none of the existing block
3671 *    groups might be suitable for extent allocation due to their incompatible
3672 *    profile (for e.g. mounting a 2 devices filesystem, where all block groups
3673 *    use a RAID1 profile, in degraded mode using a single device). In this case
3674 *    when the task attempts to COW some extent buffer of the extent btree for
3675 *    example, it will trigger allocation of a new metadata block group with a
3676 *    suitable profile (SINGLE profile in the example of the degraded mount of
3677 *    the RAID1 filesystem);
3678 *
3679 * 3) The task has reserved enough transaction units / metadata space, but when
3680 *    it attempts to COW an extent buffer from the extent or device btree for
3681 *    example, it does not find any free extent in any metadata block group,
3682 *    therefore forced to try to allocate a new metadata block group.
3683 *    This is because some other task allocated all available extents in the
3684 *    meanwhile - this typically happens with tasks that don't reserve space
3685 *    properly, either intentionally or as a bug. One example where this is
3686 *    done intentionally is fsync, as it does not reserve any transaction units
3687 *    and ends up allocating a variable number of metadata extents for log
3688 *    tree extent buffers;
3689 *
3690 * 4) The task has reserved enough transaction units / metadata space, but right
3691 *    before it tries to allocate the last extent buffer it needs, a discard
3692 *    operation comes in and, temporarily, removes the last free space entry from
3693 *    the only metadata block group that had free space (discard starts by
3694 *    removing a free space entry from a block group, then does the discard
3695 *    operation and, once it's done, it adds back the free space entry to the
3696 *    block group).
3697 *
3698 * We also need this 2 phases setup when adding a device to a filesystem with
3699 * a seed device - we must create new metadata and system chunks without adding
3700 * any of the block group items to the chunk, extent and device btrees. If we
3701 * did not do it this way, we would get ENOSPC when attempting to update those
3702 * btrees, since all the chunks from the seed device are read-only.
3703 *
3704 * Phase 1 does the updates and insertions to the chunk btree because if we had
3705 * it done in phase 2 and have a thundering herd of tasks allocating chunks in
3706 * parallel, we risk having too many system chunks allocated by many tasks if
3707 * many tasks reach phase 1 without the previous ones completing phase 2. In the
3708 * extreme case this leads to exhaustion of the system chunk array in the
3709 * superblock. This is easier to trigger if using a btree node/leaf size of 64K
3710 * and with RAID filesystems (so we have more device items in the chunk btree).
3711 * This has happened before and commit eafa4fd0ad0607 ("btrfs: fix exhaustion of
3712 * the system chunk array due to concurrent allocations") provides more details.
3713 *
3714 * Allocation of system chunks does not happen through this function. A task that
3715 * needs to update the chunk btree (the only btree that uses system chunks), must
3716 * preallocate chunk space by calling either check_system_chunk() or
3717 * btrfs_reserve_chunk_metadata() - the former is used when allocating a data or
3718 * metadata chunk or when removing a chunk, while the later is used before doing
3719 * a modification to the chunk btree - use cases for the later are adding,
3720 * removing and resizing a device as well as relocation of a system chunk.
3721 * See the comment below for more details.
3722 *
3723 * The reservation of system space, done through check_system_chunk(), as well
3724 * as all the updates and insertions into the chunk btree must be done while
3725 * holding fs_info->chunk_mutex. This is important to guarantee that while COWing
3726 * an extent buffer from the chunks btree we never trigger allocation of a new
3727 * system chunk, which would result in a deadlock (trying to lock twice an
3728 * extent buffer of the chunk btree, first time before triggering the chunk
3729 * allocation and the second time during chunk allocation while attempting to
3730 * update the chunks btree). The system chunk array is also updated while holding
3731 * that mutex. The same logic applies to removing chunks - we must reserve system
3732 * space, update the chunk btree and the system chunk array in the superblock
3733 * while holding fs_info->chunk_mutex.
3734 *
3735 * This function, btrfs_chunk_alloc(), belongs to phase 1.
3736 *
3737 * If @force is CHUNK_ALLOC_FORCE:
3738 *    - return 1 if it successfully allocates a chunk,
3739 *    - return errors including -ENOSPC otherwise.
3740 * If @force is NOT CHUNK_ALLOC_FORCE:
3741 *    - return 0 if it doesn't need to allocate a new chunk,
3742 *    - return 1 if it successfully allocates a chunk,
3743 *    - return errors including -ENOSPC otherwise.
3744 */
3745int btrfs_chunk_alloc(struct btrfs_trans_handle *trans, u64 flags,
3746		      enum btrfs_chunk_alloc_enum force)
3747{
3748	struct btrfs_fs_info *fs_info = trans->fs_info;
3749	struct btrfs_space_info *space_info;
3750	struct btrfs_block_group *ret_bg;
3751	bool wait_for_alloc = false;
3752	bool should_alloc = false;
3753	bool from_extent_allocation = false;
3754	int ret = 0;
3755
3756	if (force == CHUNK_ALLOC_FORCE_FOR_EXTENT) {
3757		from_extent_allocation = true;
3758		force = CHUNK_ALLOC_FORCE;
3759	}
3760
3761	/* Don't re-enter if we're already allocating a chunk */
3762	if (trans->allocating_chunk)
3763		return -ENOSPC;
3764	/*
3765	 * Allocation of system chunks can not happen through this path, as we
3766	 * could end up in a deadlock if we are allocating a data or metadata
3767	 * chunk and there is another task modifying the chunk btree.
3768	 *
3769	 * This is because while we are holding the chunk mutex, we will attempt
3770	 * to add the new chunk item to the chunk btree or update an existing
3771	 * device item in the chunk btree, while the other task that is modifying
3772	 * the chunk btree is attempting to COW an extent buffer while holding a
3773	 * lock on it and on its parent - if the COW operation triggers a system
3774	 * chunk allocation, then we can deadlock because we are holding the
3775	 * chunk mutex and we may need to access that extent buffer or its parent
3776	 * in order to add the chunk item or update a device item.
3777	 *
3778	 * Tasks that want to modify the chunk tree should reserve system space
3779	 * before updating the chunk btree, by calling either
3780	 * btrfs_reserve_chunk_metadata() or check_system_chunk().
3781	 * It's possible that after a task reserves the space, it still ends up
3782	 * here - this happens in the cases described above at do_chunk_alloc().
3783	 * The task will have to either retry or fail.
3784	 */
3785	if (flags & BTRFS_BLOCK_GROUP_SYSTEM)
3786		return -ENOSPC;
3787
3788	space_info = btrfs_find_space_info(fs_info, flags);
3789	ASSERT(space_info);
3790
3791	do {
3792		spin_lock(&space_info->lock);
3793		if (force < space_info->force_alloc)
3794			force = space_info->force_alloc;
3795		should_alloc = should_alloc_chunk(fs_info, space_info, force);
3796		if (space_info->full) {
3797			/* No more free physical space */
3798			if (should_alloc)
3799				ret = -ENOSPC;
3800			else
3801				ret = 0;
3802			spin_unlock(&space_info->lock);
3803			return ret;
3804		} else if (!should_alloc) {
3805			spin_unlock(&space_info->lock);
3806			return 0;
3807		} else if (space_info->chunk_alloc) {
3808			/*
3809			 * Someone is already allocating, so we need to block
3810			 * until this someone is finished and then loop to
3811			 * recheck if we should continue with our allocation
3812			 * attempt.
3813			 */
3814			wait_for_alloc = true;
3815			force = CHUNK_ALLOC_NO_FORCE;
3816			spin_unlock(&space_info->lock);
3817			mutex_lock(&fs_info->chunk_mutex);
3818			mutex_unlock(&fs_info->chunk_mutex);
3819		} else {
3820			/* Proceed with allocation */
3821			space_info->chunk_alloc = 1;
3822			wait_for_alloc = false;
3823			spin_unlock(&space_info->lock);
3824		}
3825
3826		cond_resched();
3827	} while (wait_for_alloc);
3828
3829	mutex_lock(&fs_info->chunk_mutex);
3830	trans->allocating_chunk = true;
3831
3832	/*
3833	 * If we have mixed data/metadata chunks we want to make sure we keep
3834	 * allocating mixed chunks instead of individual chunks.
3835	 */
3836	if (btrfs_mixed_space_info(space_info))
3837		flags |= (BTRFS_BLOCK_GROUP_DATA | BTRFS_BLOCK_GROUP_METADATA);
3838
3839	/*
3840	 * if we're doing a data chunk, go ahead and make sure that
3841	 * we keep a reasonable number of metadata chunks allocated in the
3842	 * FS as well.
3843	 */
3844	if (flags & BTRFS_BLOCK_GROUP_DATA && fs_info->metadata_ratio) {
3845		fs_info->data_chunk_allocations++;
3846		if (!(fs_info->data_chunk_allocations %
3847		      fs_info->metadata_ratio))
3848			force_metadata_allocation(fs_info);
3849	}
3850
3851	ret_bg = do_chunk_alloc(trans, flags);
 
 
 
 
 
 
3852	trans->allocating_chunk = false;
3853
3854	if (IS_ERR(ret_bg)) {
3855		ret = PTR_ERR(ret_bg);
3856	} else if (from_extent_allocation) {
3857		/*
3858		 * New block group is likely to be used soon. Try to activate
3859		 * it now. Failure is OK for now.
3860		 */
3861		btrfs_zone_activate(ret_bg);
3862	}
3863
3864	if (!ret)
3865		btrfs_put_block_group(ret_bg);
3866
3867	spin_lock(&space_info->lock);
3868	if (ret < 0) {
3869		if (ret == -ENOSPC)
3870			space_info->full = 1;
3871		else
3872			goto out;
3873	} else {
3874		ret = 1;
3875		space_info->max_extent_size = 0;
3876	}
3877
3878	space_info->force_alloc = CHUNK_ALLOC_NO_FORCE;
3879out:
3880	space_info->chunk_alloc = 0;
3881	spin_unlock(&space_info->lock);
3882	mutex_unlock(&fs_info->chunk_mutex);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3883
3884	return ret;
3885}
3886
3887static u64 get_profile_num_devs(struct btrfs_fs_info *fs_info, u64 type)
3888{
3889	u64 num_dev;
3890
3891	num_dev = btrfs_raid_array[btrfs_bg_flags_to_raid_index(type)].devs_max;
3892	if (!num_dev)
3893		num_dev = fs_info->fs_devices->rw_devices;
3894
3895	return num_dev;
3896}
3897
3898static void reserve_chunk_space(struct btrfs_trans_handle *trans,
3899				u64 bytes,
3900				u64 type)
 
 
 
3901{
3902	struct btrfs_fs_info *fs_info = trans->fs_info;
3903	struct btrfs_space_info *info;
3904	u64 left;
 
3905	int ret = 0;
 
3906
3907	/*
3908	 * Needed because we can end up allocating a system chunk and for an
3909	 * atomic and race free space reservation in the chunk block reserve.
3910	 */
3911	lockdep_assert_held(&fs_info->chunk_mutex);
3912
3913	info = btrfs_find_space_info(fs_info, BTRFS_BLOCK_GROUP_SYSTEM);
3914	spin_lock(&info->lock);
3915	left = info->total_bytes - btrfs_space_info_used(info, true);
3916	spin_unlock(&info->lock);
3917
3918	if (left < bytes && btrfs_test_opt(fs_info, ENOSPC_DEBUG)) {
 
 
 
 
 
 
3919		btrfs_info(fs_info, "left=%llu, need=%llu, flags=%llu",
3920			   left, bytes, type);
3921		btrfs_dump_space_info(fs_info, info, 0, 0);
3922	}
3923
3924	if (left < bytes) {
3925		u64 flags = btrfs_system_alloc_profile(fs_info);
3926		struct btrfs_block_group *bg;
3927
3928		/*
3929		 * Ignore failure to create system chunk. We might end up not
3930		 * needing it, as we might not need to COW all nodes/leafs from
3931		 * the paths we visit in the chunk tree (they were already COWed
3932		 * or created in the current transaction for example).
3933		 */
3934		bg = btrfs_create_chunk(trans, flags);
3935		if (IS_ERR(bg)) {
3936			ret = PTR_ERR(bg);
3937		} else {
3938			/*
3939			 * We have a new chunk. We also need to activate it for
3940			 * zoned filesystem.
3941			 */
3942			ret = btrfs_zoned_activate_one_bg(fs_info, info, true);
3943			if (ret < 0)
3944				return;
3945
3946			/*
3947			 * If we fail to add the chunk item here, we end up
3948			 * trying again at phase 2 of chunk allocation, at
3949			 * btrfs_create_pending_block_groups(). So ignore
3950			 * any error here. An ENOSPC here could happen, due to
3951			 * the cases described at do_chunk_alloc() - the system
3952			 * block group we just created was just turned into RO
3953			 * mode by a scrub for example, or a running discard
3954			 * temporarily removed its free space entries, etc.
3955			 */
3956			btrfs_chunk_alloc_add_chunk_item(trans, bg);
3957		}
3958	}
3959
3960	if (!ret) {
3961		ret = btrfs_block_rsv_add(fs_info,
3962					  &fs_info->chunk_block_rsv,
3963					  bytes, BTRFS_RESERVE_NO_FLUSH);
3964		if (!ret)
3965			trans->chunk_bytes_reserved += bytes;
3966	}
3967}
3968
3969/*
3970 * Reserve space in the system space for allocating or removing a chunk.
3971 * The caller must be holding fs_info->chunk_mutex.
3972 */
3973void check_system_chunk(struct btrfs_trans_handle *trans, u64 type)
3974{
3975	struct btrfs_fs_info *fs_info = trans->fs_info;
3976	const u64 num_devs = get_profile_num_devs(fs_info, type);
3977	u64 bytes;
3978
3979	/* num_devs device items to update and 1 chunk item to add or remove. */
3980	bytes = btrfs_calc_metadata_size(fs_info, num_devs) +
3981		btrfs_calc_insert_metadata_size(fs_info, 1);
3982
3983	reserve_chunk_space(trans, bytes, type);
3984}
3985
3986/*
3987 * Reserve space in the system space, if needed, for doing a modification to the
3988 * chunk btree.
3989 *
3990 * @trans:		A transaction handle.
3991 * @is_item_insertion:	Indicate if the modification is for inserting a new item
3992 *			in the chunk btree or if it's for the deletion or update
3993 *			of an existing item.
3994 *
3995 * This is used in a context where we need to update the chunk btree outside
3996 * block group allocation and removal, to avoid a deadlock with a concurrent
3997 * task that is allocating a metadata or data block group and therefore needs to
3998 * update the chunk btree while holding the chunk mutex. After the update to the
3999 * chunk btree is done, btrfs_trans_release_chunk_metadata() should be called.
4000 *
4001 */
4002void btrfs_reserve_chunk_metadata(struct btrfs_trans_handle *trans,
4003				  bool is_item_insertion)
4004{
4005	struct btrfs_fs_info *fs_info = trans->fs_info;
4006	u64 bytes;
4007
4008	if (is_item_insertion)
4009		bytes = btrfs_calc_insert_metadata_size(fs_info, 1);
4010	else
4011		bytes = btrfs_calc_metadata_size(fs_info, 1);
4012
4013	mutex_lock(&fs_info->chunk_mutex);
4014	reserve_chunk_space(trans, bytes, BTRFS_BLOCK_GROUP_SYSTEM);
4015	mutex_unlock(&fs_info->chunk_mutex);
4016}
4017
4018void btrfs_put_block_group_cache(struct btrfs_fs_info *info)
4019{
4020	struct btrfs_block_group *block_group;
 
4021
4022	block_group = btrfs_lookup_first_block_group(info, 0);
4023	while (block_group) {
4024		btrfs_wait_block_group_cache_done(block_group);
4025		spin_lock(&block_group->lock);
4026		if (test_and_clear_bit(BLOCK_GROUP_FLAG_IREF,
4027				       &block_group->runtime_flags)) {
4028			struct inode *inode = block_group->inode;
4029
4030			block_group->inode = NULL;
 
 
 
 
 
4031			spin_unlock(&block_group->lock);
 
 
 
 
 
 
 
 
4032
4033			ASSERT(block_group->io_ctl.inode == NULL);
4034			iput(inode);
4035		} else {
4036			spin_unlock(&block_group->lock);
4037		}
4038		block_group = btrfs_next_block_group(block_group);
 
 
4039	}
4040}
4041
4042/*
4043 * Must be called only after stopping all workers, since we could have block
4044 * group caching kthreads running, and therefore they could race with us if we
4045 * freed the block groups before stopping them.
4046 */
4047int btrfs_free_block_groups(struct btrfs_fs_info *info)
4048{
4049	struct btrfs_block_group *block_group;
4050	struct btrfs_space_info *space_info;
4051	struct btrfs_caching_control *caching_ctl;
4052	struct rb_node *n;
4053
4054	write_lock(&info->block_group_cache_lock);
4055	while (!list_empty(&info->caching_block_groups)) {
4056		caching_ctl = list_entry(info->caching_block_groups.next,
4057					 struct btrfs_caching_control, list);
4058		list_del(&caching_ctl->list);
4059		btrfs_put_caching_control(caching_ctl);
4060	}
4061	write_unlock(&info->block_group_cache_lock);
4062
4063	spin_lock(&info->unused_bgs_lock);
4064	while (!list_empty(&info->unused_bgs)) {
4065		block_group = list_first_entry(&info->unused_bgs,
4066					       struct btrfs_block_group,
4067					       bg_list);
4068		list_del_init(&block_group->bg_list);
4069		btrfs_put_block_group(block_group);
4070	}
4071
4072	while (!list_empty(&info->reclaim_bgs)) {
4073		block_group = list_first_entry(&info->reclaim_bgs,
4074					       struct btrfs_block_group,
4075					       bg_list);
4076		list_del_init(&block_group->bg_list);
4077		btrfs_put_block_group(block_group);
4078	}
4079	spin_unlock(&info->unused_bgs_lock);
4080
4081	spin_lock(&info->zone_active_bgs_lock);
4082	while (!list_empty(&info->zone_active_bgs)) {
4083		block_group = list_first_entry(&info->zone_active_bgs,
4084					       struct btrfs_block_group,
4085					       active_bg_list);
4086		list_del_init(&block_group->active_bg_list);
4087		btrfs_put_block_group(block_group);
4088	}
4089	spin_unlock(&info->zone_active_bgs_lock);
4090
4091	write_lock(&info->block_group_cache_lock);
4092	while ((n = rb_last(&info->block_group_cache_tree.rb_root)) != NULL) {
4093		block_group = rb_entry(n, struct btrfs_block_group,
4094				       cache_node);
4095		rb_erase_cached(&block_group->cache_node,
4096				&info->block_group_cache_tree);
4097		RB_CLEAR_NODE(&block_group->cache_node);
4098		write_unlock(&info->block_group_cache_lock);
4099
4100		down_write(&block_group->space_info->groups_sem);
4101		list_del(&block_group->list);
4102		up_write(&block_group->space_info->groups_sem);
4103
4104		/*
4105		 * We haven't cached this block group, which means we could
4106		 * possibly have excluded extents on this block group.
4107		 */
4108		if (block_group->cached == BTRFS_CACHE_NO ||
4109		    block_group->cached == BTRFS_CACHE_ERROR)
4110			btrfs_free_excluded_extents(block_group);
4111
4112		btrfs_remove_free_space_cache(block_group);
4113		ASSERT(block_group->cached != BTRFS_CACHE_STARTED);
4114		ASSERT(list_empty(&block_group->dirty_list));
4115		ASSERT(list_empty(&block_group->io_list));
4116		ASSERT(list_empty(&block_group->bg_list));
4117		ASSERT(refcount_read(&block_group->refs) == 1);
4118		ASSERT(block_group->swap_extents == 0);
4119		btrfs_put_block_group(block_group);
4120
4121		write_lock(&info->block_group_cache_lock);
4122	}
4123	write_unlock(&info->block_group_cache_lock);
 
 
 
 
 
 
 
 
4124
4125	btrfs_release_global_block_rsv(info);
4126
4127	while (!list_empty(&info->space_info)) {
4128		space_info = list_entry(info->space_info.next,
4129					struct btrfs_space_info,
4130					list);
4131
4132		/*
4133		 * Do not hide this behind enospc_debug, this is actually
4134		 * important and indicates a real bug if this happens.
4135		 */
4136		if (WARN_ON(space_info->bytes_pinned > 0 ||
 
4137			    space_info->bytes_may_use > 0))
4138			btrfs_dump_space_info(info, space_info, 0, 0);
4139
4140		/*
4141		 * If there was a failure to cleanup a log tree, very likely due
4142		 * to an IO failure on a writeback attempt of one or more of its
4143		 * extent buffers, we could not do proper (and cheap) unaccounting
4144		 * of their reserved space, so don't warn on bytes_reserved > 0 in
4145		 * that case.
4146		 */
4147		if (!(space_info->flags & BTRFS_BLOCK_GROUP_METADATA) ||
4148		    !BTRFS_FS_LOG_CLEANUP_ERROR(info)) {
4149			if (WARN_ON(space_info->bytes_reserved > 0))
4150				btrfs_dump_space_info(info, space_info, 0, 0);
4151		}
4152
4153		WARN_ON(space_info->reclaim_size > 0);
4154		list_del(&space_info->list);
4155		btrfs_sysfs_remove_space_info(space_info);
4156	}
4157	return 0;
4158}
4159
4160void btrfs_freeze_block_group(struct btrfs_block_group *cache)
4161{
4162	atomic_inc(&cache->frozen);
4163}
4164
4165void btrfs_unfreeze_block_group(struct btrfs_block_group *block_group)
4166{
4167	struct btrfs_fs_info *fs_info = block_group->fs_info;
4168	struct extent_map_tree *em_tree;
4169	struct extent_map *em;
4170	bool cleanup;
4171
4172	spin_lock(&block_group->lock);
4173	cleanup = (atomic_dec_and_test(&block_group->frozen) &&
4174		   test_bit(BLOCK_GROUP_FLAG_REMOVED, &block_group->runtime_flags));
4175	spin_unlock(&block_group->lock);
4176
4177	if (cleanup) {
4178		em_tree = &fs_info->mapping_tree;
4179		write_lock(&em_tree->lock);
4180		em = lookup_extent_mapping(em_tree, block_group->start,
4181					   1);
4182		BUG_ON(!em); /* logic error, can't happen */
4183		remove_extent_mapping(em_tree, em);
4184		write_unlock(&em_tree->lock);
4185
4186		/* once for us and once for the tree */
4187		free_extent_map(em);
4188		free_extent_map(em);
4189
4190		/*
4191		 * We may have left one free space entry and other possible
4192		 * tasks trimming this block group have left 1 entry each one.
4193		 * Free them if any.
4194		 */
4195		btrfs_remove_free_space_cache(block_group);
4196	}
4197}
4198
4199bool btrfs_inc_block_group_swap_extents(struct btrfs_block_group *bg)
4200{
4201	bool ret = true;
4202
4203	spin_lock(&bg->lock);
4204	if (bg->ro)
4205		ret = false;
4206	else
4207		bg->swap_extents++;
4208	spin_unlock(&bg->lock);
4209
4210	return ret;
4211}
4212
4213void btrfs_dec_block_group_swap_extents(struct btrfs_block_group *bg, int amount)
4214{
4215	spin_lock(&bg->lock);
4216	ASSERT(!bg->ro);
4217	ASSERT(bg->swap_extents >= amount);
4218	bg->swap_extents -= amount;
4219	spin_unlock(&bg->lock);
4220}