Linux Audio

Check our new training course

Loading...
v5.14.15
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 * Copyright (C) 2015 Facebook.  All rights reserved.
 
 
 
 
 
 
 
 
 
 
 
 
 
 
   4 */
   5
   6#include <linux/kernel.h>
   7#include <linux/sched/mm.h>
   8#include "ctree.h"
   9#include "disk-io.h"
  10#include "locking.h"
  11#include "free-space-tree.h"
  12#include "transaction.h"
  13#include "block-group.h"
  14
  15static int __add_block_group_free_space(struct btrfs_trans_handle *trans,
  16					struct btrfs_block_group *block_group,
 
  17					struct btrfs_path *path);
  18
  19void set_free_space_tree_thresholds(struct btrfs_block_group *cache)
  20{
  21	u32 bitmap_range;
  22	size_t bitmap_size;
  23	u64 num_bitmaps, total_bitmap_size;
  24
  25	if (WARN_ON(cache->length == 0))
  26		btrfs_warn(cache->fs_info, "block group %llu length is zero",
  27			   cache->start);
  28
  29	/*
  30	 * We convert to bitmaps when the disk space required for using extents
  31	 * exceeds that required for using bitmaps.
  32	 */
  33	bitmap_range = cache->fs_info->sectorsize * BTRFS_FREE_SPACE_BITMAP_BITS;
  34	num_bitmaps = div_u64(cache->length + bitmap_range - 1, bitmap_range);
 
  35	bitmap_size = sizeof(struct btrfs_item) + BTRFS_FREE_SPACE_BITMAP_SIZE;
  36	total_bitmap_size = num_bitmaps * bitmap_size;
  37	cache->bitmap_high_thresh = div_u64(total_bitmap_size,
  38					    sizeof(struct btrfs_item));
  39
  40	/*
  41	 * We allow for a small buffer between the high threshold and low
  42	 * threshold to avoid thrashing back and forth between the two formats.
  43	 */
  44	if (cache->bitmap_high_thresh > 100)
  45		cache->bitmap_low_thresh = cache->bitmap_high_thresh - 100;
  46	else
  47		cache->bitmap_low_thresh = 0;
  48}
  49
  50static int add_new_free_space_info(struct btrfs_trans_handle *trans,
  51				   struct btrfs_block_group *block_group,
 
  52				   struct btrfs_path *path)
  53{
  54	struct btrfs_root *root = trans->fs_info->free_space_root;
  55	struct btrfs_free_space_info *info;
  56	struct btrfs_key key;
  57	struct extent_buffer *leaf;
  58	int ret;
  59
  60	key.objectid = block_group->start;
  61	key.type = BTRFS_FREE_SPACE_INFO_KEY;
  62	key.offset = block_group->length;
  63
  64	ret = btrfs_insert_empty_item(trans, root, path, &key, sizeof(*info));
  65	if (ret)
  66		goto out;
  67
  68	leaf = path->nodes[0];
  69	info = btrfs_item_ptr(leaf, path->slots[0],
  70			      struct btrfs_free_space_info);
  71	btrfs_set_free_space_extent_count(leaf, info, 0);
  72	btrfs_set_free_space_flags(leaf, info, 0);
  73	btrfs_mark_buffer_dirty(leaf);
  74
  75	ret = 0;
  76out:
  77	btrfs_release_path(path);
  78	return ret;
  79}
  80
  81EXPORT_FOR_TESTS
  82struct btrfs_free_space_info *search_free_space_info(
  83		struct btrfs_trans_handle *trans,
  84		struct btrfs_block_group *block_group,
  85		struct btrfs_path *path, int cow)
  86{
  87	struct btrfs_fs_info *fs_info = block_group->fs_info;
  88	struct btrfs_root *root = fs_info->free_space_root;
  89	struct btrfs_key key;
  90	int ret;
  91
  92	key.objectid = block_group->start;
  93	key.type = BTRFS_FREE_SPACE_INFO_KEY;
  94	key.offset = block_group->length;
  95
  96	ret = btrfs_search_slot(trans, root, &key, path, 0, cow);
  97	if (ret < 0)
  98		return ERR_PTR(ret);
  99	if (ret != 0) {
 100		btrfs_warn(fs_info, "missing free space info for %llu",
 101			   block_group->start);
 102		ASSERT(0);
 103		return ERR_PTR(-ENOENT);
 104	}
 105
 106	return btrfs_item_ptr(path->nodes[0], path->slots[0],
 107			      struct btrfs_free_space_info);
 108}
 109
 110/*
 111 * btrfs_search_slot() but we're looking for the greatest key less than the
 112 * passed key.
 113 */
 114static int btrfs_search_prev_slot(struct btrfs_trans_handle *trans,
 115				  struct btrfs_root *root,
 116				  struct btrfs_key *key, struct btrfs_path *p,
 117				  int ins_len, int cow)
 118{
 119	int ret;
 120
 121	ret = btrfs_search_slot(trans, root, key, p, ins_len, cow);
 122	if (ret < 0)
 123		return ret;
 124
 125	if (ret == 0) {
 126		ASSERT(0);
 127		return -EIO;
 128	}
 129
 130	if (p->slots[0] == 0) {
 131		ASSERT(0);
 132		return -EIO;
 133	}
 134	p->slots[0]--;
 135
 136	return 0;
 137}
 138
 139static inline u32 free_space_bitmap_size(const struct btrfs_fs_info *fs_info,
 140					 u64 size)
 141{
 142	return DIV_ROUND_UP(size >> fs_info->sectorsize_bits, BITS_PER_BYTE);
 143}
 144
 145static unsigned long *alloc_bitmap(u32 bitmap_size)
 146{
 147	unsigned long *ret;
 148	unsigned int nofs_flag;
 149	u32 bitmap_rounded_size = round_up(bitmap_size, sizeof(unsigned long));
 150
 151	/*
 152	 * GFP_NOFS doesn't work with kvmalloc(), but we really can't recurse
 153	 * into the filesystem as the free space bitmap can be modified in the
 154	 * critical section of a transaction commit.
 155	 *
 156	 * TODO: push the memalloc_nofs_{save,restore}() to the caller where we
 157	 * know that recursion is unsafe.
 158	 */
 159	nofs_flag = memalloc_nofs_save();
 160	ret = kvzalloc(bitmap_rounded_size, GFP_KERNEL);
 161	memalloc_nofs_restore(nofs_flag);
 162	return ret;
 163}
 164
 165static void le_bitmap_set(unsigned long *map, unsigned int start, int len)
 166{
 167	u8 *p = ((u8 *)map) + BIT_BYTE(start);
 168	const unsigned int size = start + len;
 169	int bits_to_set = BITS_PER_BYTE - (start % BITS_PER_BYTE);
 170	u8 mask_to_set = BITMAP_FIRST_BYTE_MASK(start);
 171
 172	while (len - bits_to_set >= 0) {
 173		*p |= mask_to_set;
 174		len -= bits_to_set;
 175		bits_to_set = BITS_PER_BYTE;
 176		mask_to_set = ~0;
 177		p++;
 178	}
 179	if (len) {
 180		mask_to_set &= BITMAP_LAST_BYTE_MASK(size);
 181		*p |= mask_to_set;
 182	}
 183}
 184
 185EXPORT_FOR_TESTS
 186int convert_free_space_to_bitmaps(struct btrfs_trans_handle *trans,
 187				  struct btrfs_block_group *block_group,
 
 188				  struct btrfs_path *path)
 189{
 190	struct btrfs_fs_info *fs_info = trans->fs_info;
 191	struct btrfs_root *root = fs_info->free_space_root;
 192	struct btrfs_free_space_info *info;
 193	struct btrfs_key key, found_key;
 194	struct extent_buffer *leaf;
 195	unsigned long *bitmap;
 196	char *bitmap_cursor;
 197	u64 start, end;
 198	u64 bitmap_range, i;
 199	u32 bitmap_size, flags, expected_extent_count;
 200	u32 extent_count = 0;
 201	int done = 0, nr;
 202	int ret;
 203
 204	bitmap_size = free_space_bitmap_size(fs_info, block_group->length);
 
 205	bitmap = alloc_bitmap(bitmap_size);
 206	if (!bitmap) {
 207		ret = -ENOMEM;
 208		goto out;
 209	}
 210
 211	start = block_group->start;
 212	end = block_group->start + block_group->length;
 213
 214	key.objectid = end - 1;
 215	key.type = (u8)-1;
 216	key.offset = (u64)-1;
 217
 218	while (!done) {
 219		ret = btrfs_search_prev_slot(trans, root, &key, path, -1, 1);
 220		if (ret)
 221			goto out;
 222
 223		leaf = path->nodes[0];
 224		nr = 0;
 225		path->slots[0]++;
 226		while (path->slots[0] > 0) {
 227			btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0] - 1);
 228
 229			if (found_key.type == BTRFS_FREE_SPACE_INFO_KEY) {
 230				ASSERT(found_key.objectid == block_group->start);
 231				ASSERT(found_key.offset == block_group->length);
 232				done = 1;
 233				break;
 234			} else if (found_key.type == BTRFS_FREE_SPACE_EXTENT_KEY) {
 235				u64 first, last;
 236
 237				ASSERT(found_key.objectid >= start);
 238				ASSERT(found_key.objectid < end);
 239				ASSERT(found_key.objectid + found_key.offset <= end);
 240
 241				first = div_u64(found_key.objectid - start,
 242						fs_info->sectorsize);
 243				last = div_u64(found_key.objectid + found_key.offset - start,
 244					       fs_info->sectorsize);
 245				le_bitmap_set(bitmap, first, last - first);
 246
 247				extent_count++;
 248				nr++;
 249				path->slots[0]--;
 250			} else {
 251				ASSERT(0);
 252			}
 253		}
 254
 255		ret = btrfs_del_items(trans, root, path, path->slots[0], nr);
 256		if (ret)
 257			goto out;
 258		btrfs_release_path(path);
 259	}
 260
 261	info = search_free_space_info(trans, block_group, path, 1);
 262	if (IS_ERR(info)) {
 263		ret = PTR_ERR(info);
 264		goto out;
 265	}
 266	leaf = path->nodes[0];
 267	flags = btrfs_free_space_flags(leaf, info);
 268	flags |= BTRFS_FREE_SPACE_USING_BITMAPS;
 269	btrfs_set_free_space_flags(leaf, info, flags);
 270	expected_extent_count = btrfs_free_space_extent_count(leaf, info);
 271	btrfs_mark_buffer_dirty(leaf);
 272	btrfs_release_path(path);
 273
 274	if (extent_count != expected_extent_count) {
 275		btrfs_err(fs_info,
 276			  "incorrect extent count for %llu; counted %u, expected %u",
 277			  block_group->start, extent_count,
 278			  expected_extent_count);
 279		ASSERT(0);
 280		ret = -EIO;
 281		goto out;
 282	}
 283
 284	bitmap_cursor = (char *)bitmap;
 285	bitmap_range = fs_info->sectorsize * BTRFS_FREE_SPACE_BITMAP_BITS;
 286	i = start;
 287	while (i < end) {
 288		unsigned long ptr;
 289		u64 extent_size;
 290		u32 data_size;
 291
 292		extent_size = min(end - i, bitmap_range);
 293		data_size = free_space_bitmap_size(fs_info, extent_size);
 
 294
 295		key.objectid = i;
 296		key.type = BTRFS_FREE_SPACE_BITMAP_KEY;
 297		key.offset = extent_size;
 298
 299		ret = btrfs_insert_empty_item(trans, root, path, &key,
 300					      data_size);
 301		if (ret)
 302			goto out;
 303
 304		leaf = path->nodes[0];
 305		ptr = btrfs_item_ptr_offset(leaf, path->slots[0]);
 306		write_extent_buffer(leaf, bitmap_cursor, ptr,
 307				    data_size);
 308		btrfs_mark_buffer_dirty(leaf);
 309		btrfs_release_path(path);
 310
 311		i += extent_size;
 312		bitmap_cursor += data_size;
 313	}
 314
 315	ret = 0;
 316out:
 317	kvfree(bitmap);
 318	if (ret)
 319		btrfs_abort_transaction(trans, ret);
 320	return ret;
 321}
 322
 323EXPORT_FOR_TESTS
 324int convert_free_space_to_extents(struct btrfs_trans_handle *trans,
 325				  struct btrfs_block_group *block_group,
 
 326				  struct btrfs_path *path)
 327{
 328	struct btrfs_fs_info *fs_info = trans->fs_info;
 329	struct btrfs_root *root = fs_info->free_space_root;
 330	struct btrfs_free_space_info *info;
 331	struct btrfs_key key, found_key;
 332	struct extent_buffer *leaf;
 333	unsigned long *bitmap;
 334	u64 start, end;
 
 
 
 335	u32 bitmap_size, flags, expected_extent_count;
 336	unsigned long nrbits, start_bit, end_bit;
 337	u32 extent_count = 0;
 338	int done = 0, nr;
 339	int ret;
 340
 341	bitmap_size = free_space_bitmap_size(fs_info, block_group->length);
 
 342	bitmap = alloc_bitmap(bitmap_size);
 343	if (!bitmap) {
 344		ret = -ENOMEM;
 345		goto out;
 346	}
 347
 348	start = block_group->start;
 349	end = block_group->start + block_group->length;
 350
 351	key.objectid = end - 1;
 352	key.type = (u8)-1;
 353	key.offset = (u64)-1;
 354
 355	while (!done) {
 356		ret = btrfs_search_prev_slot(trans, root, &key, path, -1, 1);
 357		if (ret)
 358			goto out;
 359
 360		leaf = path->nodes[0];
 361		nr = 0;
 362		path->slots[0]++;
 363		while (path->slots[0] > 0) {
 364			btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0] - 1);
 365
 366			if (found_key.type == BTRFS_FREE_SPACE_INFO_KEY) {
 367				ASSERT(found_key.objectid == block_group->start);
 368				ASSERT(found_key.offset == block_group->length);
 369				done = 1;
 370				break;
 371			} else if (found_key.type == BTRFS_FREE_SPACE_BITMAP_KEY) {
 372				unsigned long ptr;
 373				char *bitmap_cursor;
 374				u32 bitmap_pos, data_size;
 375
 376				ASSERT(found_key.objectid >= start);
 377				ASSERT(found_key.objectid < end);
 378				ASSERT(found_key.objectid + found_key.offset <= end);
 379
 380				bitmap_pos = div_u64(found_key.objectid - start,
 381						     fs_info->sectorsize *
 382						     BITS_PER_BYTE);
 383				bitmap_cursor = ((char *)bitmap) + bitmap_pos;
 384				data_size = free_space_bitmap_size(fs_info,
 385								found_key.offset);
 386
 387				ptr = btrfs_item_ptr_offset(leaf, path->slots[0] - 1);
 388				read_extent_buffer(leaf, bitmap_cursor, ptr,
 389						   data_size);
 390
 391				nr++;
 392				path->slots[0]--;
 393			} else {
 394				ASSERT(0);
 395			}
 396		}
 397
 398		ret = btrfs_del_items(trans, root, path, path->slots[0], nr);
 399		if (ret)
 400			goto out;
 401		btrfs_release_path(path);
 402	}
 403
 404	info = search_free_space_info(trans, block_group, path, 1);
 405	if (IS_ERR(info)) {
 406		ret = PTR_ERR(info);
 407		goto out;
 408	}
 409	leaf = path->nodes[0];
 410	flags = btrfs_free_space_flags(leaf, info);
 411	flags &= ~BTRFS_FREE_SPACE_USING_BITMAPS;
 412	btrfs_set_free_space_flags(leaf, info, flags);
 413	expected_extent_count = btrfs_free_space_extent_count(leaf, info);
 414	btrfs_mark_buffer_dirty(leaf);
 415	btrfs_release_path(path);
 416
 417	nrbits = block_group->length >> block_group->fs_info->sectorsize_bits;
 418	start_bit = find_next_bit_le(bitmap, nrbits, 0);
 
 
 
 
 
 
 
 
 419
 420	while (start_bit < nrbits) {
 421		end_bit = find_next_zero_bit_le(bitmap, nrbits, start_bit);
 422		ASSERT(start_bit < end_bit);
 
 423
 424		key.objectid = start + start_bit * block_group->fs_info->sectorsize;
 
 
 
 
 
 
 
 425		key.type = BTRFS_FREE_SPACE_EXTENT_KEY;
 426		key.offset = (end_bit - start_bit) * block_group->fs_info->sectorsize;
 427
 428		ret = btrfs_insert_empty_item(trans, root, path, &key, 0);
 429		if (ret)
 430			goto out;
 431		btrfs_release_path(path);
 432
 433		extent_count++;
 434
 435		start_bit = find_next_bit_le(bitmap, nrbits, end_bit);
 436	}
 437
 438	if (extent_count != expected_extent_count) {
 439		btrfs_err(fs_info,
 440			  "incorrect extent count for %llu; counted %u, expected %u",
 441			  block_group->start, extent_count,
 442			  expected_extent_count);
 443		ASSERT(0);
 444		ret = -EIO;
 445		goto out;
 446	}
 447
 448	ret = 0;
 449out:
 450	kvfree(bitmap);
 451	if (ret)
 452		btrfs_abort_transaction(trans, ret);
 453	return ret;
 454}
 455
 456static int update_free_space_extent_count(struct btrfs_trans_handle *trans,
 457					  struct btrfs_block_group *block_group,
 
 458					  struct btrfs_path *path,
 459					  int new_extents)
 460{
 461	struct btrfs_free_space_info *info;
 462	u32 flags;
 463	u32 extent_count;
 464	int ret = 0;
 465
 466	if (new_extents == 0)
 467		return 0;
 468
 469	info = search_free_space_info(trans, block_group, path, 1);
 470	if (IS_ERR(info)) {
 471		ret = PTR_ERR(info);
 472		goto out;
 473	}
 474	flags = btrfs_free_space_flags(path->nodes[0], info);
 475	extent_count = btrfs_free_space_extent_count(path->nodes[0], info);
 476
 477	extent_count += new_extents;
 478	btrfs_set_free_space_extent_count(path->nodes[0], info, extent_count);
 479	btrfs_mark_buffer_dirty(path->nodes[0]);
 480	btrfs_release_path(path);
 481
 482	if (!(flags & BTRFS_FREE_SPACE_USING_BITMAPS) &&
 483	    extent_count > block_group->bitmap_high_thresh) {
 484		ret = convert_free_space_to_bitmaps(trans, block_group, path);
 
 485	} else if ((flags & BTRFS_FREE_SPACE_USING_BITMAPS) &&
 486		   extent_count < block_group->bitmap_low_thresh) {
 487		ret = convert_free_space_to_extents(trans, block_group, path);
 
 488	}
 489
 490out:
 491	return ret;
 492}
 493
 494EXPORT_FOR_TESTS
 495int free_space_test_bit(struct btrfs_block_group *block_group,
 496			struct btrfs_path *path, u64 offset)
 497{
 498	struct extent_buffer *leaf;
 499	struct btrfs_key key;
 500	u64 found_start, found_end;
 501	unsigned long ptr, i;
 502
 503	leaf = path->nodes[0];
 504	btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
 505	ASSERT(key.type == BTRFS_FREE_SPACE_BITMAP_KEY);
 506
 507	found_start = key.objectid;
 508	found_end = key.objectid + key.offset;
 509	ASSERT(offset >= found_start && offset < found_end);
 510
 511	ptr = btrfs_item_ptr_offset(leaf, path->slots[0]);
 512	i = div_u64(offset - found_start,
 513		    block_group->fs_info->sectorsize);
 514	return !!extent_buffer_test_bit(leaf, ptr, i);
 515}
 516
 517static void free_space_set_bits(struct btrfs_block_group *block_group,
 518				struct btrfs_path *path, u64 *start, u64 *size,
 519				int bit)
 520{
 521	struct btrfs_fs_info *fs_info = block_group->fs_info;
 522	struct extent_buffer *leaf;
 523	struct btrfs_key key;
 524	u64 end = *start + *size;
 525	u64 found_start, found_end;
 526	unsigned long ptr, first, last;
 527
 528	leaf = path->nodes[0];
 529	btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
 530	ASSERT(key.type == BTRFS_FREE_SPACE_BITMAP_KEY);
 531
 532	found_start = key.objectid;
 533	found_end = key.objectid + key.offset;
 534	ASSERT(*start >= found_start && *start < found_end);
 535	ASSERT(end > found_start);
 536
 537	if (end > found_end)
 538		end = found_end;
 539
 540	ptr = btrfs_item_ptr_offset(leaf, path->slots[0]);
 541	first = (*start - found_start) >> fs_info->sectorsize_bits;
 542	last = (end - found_start) >> fs_info->sectorsize_bits;
 543	if (bit)
 544		extent_buffer_bitmap_set(leaf, ptr, first, last - first);
 545	else
 546		extent_buffer_bitmap_clear(leaf, ptr, first, last - first);
 547	btrfs_mark_buffer_dirty(leaf);
 548
 549	*size -= end - *start;
 550	*start = end;
 551}
 552
 553/*
 554 * We can't use btrfs_next_item() in modify_free_space_bitmap() because
 555 * btrfs_next_leaf() doesn't get the path for writing. We can forgo the fancy
 556 * tree walking in btrfs_next_leaf() anyways because we know exactly what we're
 557 * looking for.
 558 */
 559static int free_space_next_bitmap(struct btrfs_trans_handle *trans,
 560				  struct btrfs_root *root, struct btrfs_path *p)
 561{
 562	struct btrfs_key key;
 563
 564	if (p->slots[0] + 1 < btrfs_header_nritems(p->nodes[0])) {
 565		p->slots[0]++;
 566		return 0;
 567	}
 568
 569	btrfs_item_key_to_cpu(p->nodes[0], &key, p->slots[0]);
 570	btrfs_release_path(p);
 571
 572	key.objectid += key.offset;
 573	key.type = (u8)-1;
 574	key.offset = (u64)-1;
 575
 576	return btrfs_search_prev_slot(trans, root, &key, p, 0, 1);
 577}
 578
 579/*
 580 * If remove is 1, then we are removing free space, thus clearing bits in the
 581 * bitmap. If remove is 0, then we are adding free space, thus setting bits in
 582 * the bitmap.
 583 */
 584static int modify_free_space_bitmap(struct btrfs_trans_handle *trans,
 585				    struct btrfs_block_group *block_group,
 
 586				    struct btrfs_path *path,
 587				    u64 start, u64 size, int remove)
 588{
 589	struct btrfs_root *root = block_group->fs_info->free_space_root;
 590	struct btrfs_key key;
 591	u64 end = start + size;
 592	u64 cur_start, cur_size;
 593	int prev_bit, next_bit;
 594	int new_extents;
 595	int ret;
 596
 597	/*
 598	 * Read the bit for the block immediately before the extent of space if
 599	 * that block is within the block group.
 600	 */
 601	if (start > block_group->start) {
 602		u64 prev_block = start - block_group->fs_info->sectorsize;
 603
 604		key.objectid = prev_block;
 605		key.type = (u8)-1;
 606		key.offset = (u64)-1;
 607
 608		ret = btrfs_search_prev_slot(trans, root, &key, path, 0, 1);
 609		if (ret)
 610			goto out;
 611
 612		prev_bit = free_space_test_bit(block_group, path, prev_block);
 613
 614		/* The previous block may have been in the previous bitmap. */
 615		btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0]);
 616		if (start >= key.objectid + key.offset) {
 617			ret = free_space_next_bitmap(trans, root, path);
 618			if (ret)
 619				goto out;
 620		}
 621	} else {
 622		key.objectid = start;
 623		key.type = (u8)-1;
 624		key.offset = (u64)-1;
 625
 626		ret = btrfs_search_prev_slot(trans, root, &key, path, 0, 1);
 627		if (ret)
 628			goto out;
 629
 630		prev_bit = -1;
 631	}
 632
 633	/*
 634	 * Iterate over all of the bitmaps overlapped by the extent of space,
 635	 * clearing/setting bits as required.
 636	 */
 637	cur_start = start;
 638	cur_size = size;
 639	while (1) {
 640		free_space_set_bits(block_group, path, &cur_start, &cur_size,
 641				    !remove);
 642		if (cur_size == 0)
 643			break;
 644		ret = free_space_next_bitmap(trans, root, path);
 645		if (ret)
 646			goto out;
 647	}
 648
 649	/*
 650	 * Read the bit for the block immediately after the extent of space if
 651	 * that block is within the block group.
 652	 */
 653	if (end < block_group->start + block_group->length) {
 654		/* The next block may be in the next bitmap. */
 655		btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0]);
 656		if (end >= key.objectid + key.offset) {
 657			ret = free_space_next_bitmap(trans, root, path);
 658			if (ret)
 659				goto out;
 660		}
 661
 662		next_bit = free_space_test_bit(block_group, path, end);
 663	} else {
 664		next_bit = -1;
 665	}
 666
 667	if (remove) {
 668		new_extents = -1;
 669		if (prev_bit == 1) {
 670			/* Leftover on the left. */
 671			new_extents++;
 672		}
 673		if (next_bit == 1) {
 674			/* Leftover on the right. */
 675			new_extents++;
 676		}
 677	} else {
 678		new_extents = 1;
 679		if (prev_bit == 1) {
 680			/* Merging with neighbor on the left. */
 681			new_extents--;
 682		}
 683		if (next_bit == 1) {
 684			/* Merging with neighbor on the right. */
 685			new_extents--;
 686		}
 687	}
 688
 689	btrfs_release_path(path);
 690	ret = update_free_space_extent_count(trans, block_group, path,
 691					     new_extents);
 692
 693out:
 694	return ret;
 695}
 696
 697static int remove_free_space_extent(struct btrfs_trans_handle *trans,
 698				    struct btrfs_block_group *block_group,
 
 699				    struct btrfs_path *path,
 700				    u64 start, u64 size)
 701{
 702	struct btrfs_root *root = trans->fs_info->free_space_root;
 703	struct btrfs_key key;
 704	u64 found_start, found_end;
 705	u64 end = start + size;
 706	int new_extents = -1;
 707	int ret;
 708
 709	key.objectid = start;
 710	key.type = (u8)-1;
 711	key.offset = (u64)-1;
 712
 713	ret = btrfs_search_prev_slot(trans, root, &key, path, -1, 1);
 714	if (ret)
 715		goto out;
 716
 717	btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0]);
 718
 719	ASSERT(key.type == BTRFS_FREE_SPACE_EXTENT_KEY);
 720
 721	found_start = key.objectid;
 722	found_end = key.objectid + key.offset;
 723	ASSERT(start >= found_start && end <= found_end);
 724
 725	/*
 726	 * Okay, now that we've found the free space extent which contains the
 727	 * free space that we are removing, there are four cases:
 728	 *
 729	 * 1. We're using the whole extent: delete the key we found and
 730	 * decrement the free space extent count.
 731	 * 2. We are using part of the extent starting at the beginning: delete
 732	 * the key we found and insert a new key representing the leftover at
 733	 * the end. There is no net change in the number of extents.
 734	 * 3. We are using part of the extent ending at the end: delete the key
 735	 * we found and insert a new key representing the leftover at the
 736	 * beginning. There is no net change in the number of extents.
 737	 * 4. We are using part of the extent in the middle: delete the key we
 738	 * found and insert two new keys representing the leftovers on each
 739	 * side. Where we used to have one extent, we now have two, so increment
 740	 * the extent count. We may need to convert the block group to bitmaps
 741	 * as a result.
 742	 */
 743
 744	/* Delete the existing key (cases 1-4). */
 745	ret = btrfs_del_item(trans, root, path);
 746	if (ret)
 747		goto out;
 748
 749	/* Add a key for leftovers at the beginning (cases 3 and 4). */
 750	if (start > found_start) {
 751		key.objectid = found_start;
 752		key.type = BTRFS_FREE_SPACE_EXTENT_KEY;
 753		key.offset = start - found_start;
 754
 755		btrfs_release_path(path);
 756		ret = btrfs_insert_empty_item(trans, root, path, &key, 0);
 757		if (ret)
 758			goto out;
 759		new_extents++;
 760	}
 761
 762	/* Add a key for leftovers at the end (cases 2 and 4). */
 763	if (end < found_end) {
 764		key.objectid = end;
 765		key.type = BTRFS_FREE_SPACE_EXTENT_KEY;
 766		key.offset = found_end - end;
 767
 768		btrfs_release_path(path);
 769		ret = btrfs_insert_empty_item(trans, root, path, &key, 0);
 770		if (ret)
 771			goto out;
 772		new_extents++;
 773	}
 774
 775	btrfs_release_path(path);
 776	ret = update_free_space_extent_count(trans, block_group, path,
 777					     new_extents);
 778
 779out:
 780	return ret;
 781}
 782
 783EXPORT_FOR_TESTS
 784int __remove_from_free_space_tree(struct btrfs_trans_handle *trans,
 785				  struct btrfs_block_group *block_group,
 
 786				  struct btrfs_path *path, u64 start, u64 size)
 787{
 788	struct btrfs_free_space_info *info;
 789	u32 flags;
 790	int ret;
 791
 792	if (block_group->needs_free_space) {
 793		ret = __add_block_group_free_space(trans, block_group, path);
 
 794		if (ret)
 795			return ret;
 796	}
 797
 798	info = search_free_space_info(NULL, block_group, path, 0);
 799	if (IS_ERR(info))
 800		return PTR_ERR(info);
 801	flags = btrfs_free_space_flags(path->nodes[0], info);
 802	btrfs_release_path(path);
 803
 804	if (flags & BTRFS_FREE_SPACE_USING_BITMAPS) {
 805		return modify_free_space_bitmap(trans, block_group, path,
 806						start, size, 1);
 807	} else {
 808		return remove_free_space_extent(trans, block_group, path,
 809						start, size);
 810	}
 811}
 812
 813int remove_from_free_space_tree(struct btrfs_trans_handle *trans,
 
 814				u64 start, u64 size)
 815{
 816	struct btrfs_block_group *block_group;
 817	struct btrfs_path *path;
 818	int ret;
 819
 820	if (!btrfs_fs_compat_ro(trans->fs_info, FREE_SPACE_TREE))
 821		return 0;
 822
 823	path = btrfs_alloc_path();
 824	if (!path) {
 825		ret = -ENOMEM;
 826		goto out;
 827	}
 828
 829	block_group = btrfs_lookup_block_group(trans->fs_info, start);
 830	if (!block_group) {
 831		ASSERT(0);
 832		ret = -ENOENT;
 833		goto out;
 834	}
 835
 836	mutex_lock(&block_group->free_space_lock);
 837	ret = __remove_from_free_space_tree(trans, block_group, path, start,
 838					    size);
 839	mutex_unlock(&block_group->free_space_lock);
 840
 841	btrfs_put_block_group(block_group);
 842out:
 843	btrfs_free_path(path);
 844	if (ret)
 845		btrfs_abort_transaction(trans, ret);
 846	return ret;
 847}
 848
 849static int add_free_space_extent(struct btrfs_trans_handle *trans,
 850				 struct btrfs_block_group *block_group,
 
 851				 struct btrfs_path *path,
 852				 u64 start, u64 size)
 853{
 854	struct btrfs_root *root = trans->fs_info->free_space_root;
 855	struct btrfs_key key, new_key;
 856	u64 found_start, found_end;
 857	u64 end = start + size;
 858	int new_extents = 1;
 859	int ret;
 860
 861	/*
 862	 * We are adding a new extent of free space, but we need to merge
 863	 * extents. There are four cases here:
 864	 *
 865	 * 1. The new extent does not have any immediate neighbors to merge
 866	 * with: add the new key and increment the free space extent count. We
 867	 * may need to convert the block group to bitmaps as a result.
 868	 * 2. The new extent has an immediate neighbor before it: remove the
 869	 * previous key and insert a new key combining both of them. There is no
 870	 * net change in the number of extents.
 871	 * 3. The new extent has an immediate neighbor after it: remove the next
 872	 * key and insert a new key combining both of them. There is no net
 873	 * change in the number of extents.
 874	 * 4. The new extent has immediate neighbors on both sides: remove both
 875	 * of the keys and insert a new key combining all of them. Where we used
 876	 * to have two extents, we now have one, so decrement the extent count.
 877	 */
 878
 879	new_key.objectid = start;
 880	new_key.type = BTRFS_FREE_SPACE_EXTENT_KEY;
 881	new_key.offset = size;
 882
 883	/* Search for a neighbor on the left. */
 884	if (start == block_group->start)
 885		goto right;
 886	key.objectid = start - 1;
 887	key.type = (u8)-1;
 888	key.offset = (u64)-1;
 889
 890	ret = btrfs_search_prev_slot(trans, root, &key, path, -1, 1);
 891	if (ret)
 892		goto out;
 893
 894	btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0]);
 895
 896	if (key.type != BTRFS_FREE_SPACE_EXTENT_KEY) {
 897		ASSERT(key.type == BTRFS_FREE_SPACE_INFO_KEY);
 898		btrfs_release_path(path);
 899		goto right;
 900	}
 901
 902	found_start = key.objectid;
 903	found_end = key.objectid + key.offset;
 904	ASSERT(found_start >= block_group->start &&
 905	       found_end > block_group->start);
 906	ASSERT(found_start < start && found_end <= start);
 907
 908	/*
 909	 * Delete the neighbor on the left and absorb it into the new key (cases
 910	 * 2 and 4).
 911	 */
 912	if (found_end == start) {
 913		ret = btrfs_del_item(trans, root, path);
 914		if (ret)
 915			goto out;
 916		new_key.objectid = found_start;
 917		new_key.offset += key.offset;
 918		new_extents--;
 919	}
 920	btrfs_release_path(path);
 921
 922right:
 923	/* Search for a neighbor on the right. */
 924	if (end == block_group->start + block_group->length)
 925		goto insert;
 926	key.objectid = end;
 927	key.type = (u8)-1;
 928	key.offset = (u64)-1;
 929
 930	ret = btrfs_search_prev_slot(trans, root, &key, path, -1, 1);
 931	if (ret)
 932		goto out;
 933
 934	btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0]);
 935
 936	if (key.type != BTRFS_FREE_SPACE_EXTENT_KEY) {
 937		ASSERT(key.type == BTRFS_FREE_SPACE_INFO_KEY);
 938		btrfs_release_path(path);
 939		goto insert;
 940	}
 941
 942	found_start = key.objectid;
 943	found_end = key.objectid + key.offset;
 944	ASSERT(found_start >= block_group->start &&
 945	       found_end > block_group->start);
 946	ASSERT((found_start < start && found_end <= start) ||
 947	       (found_start >= end && found_end > end));
 948
 949	/*
 950	 * Delete the neighbor on the right and absorb it into the new key
 951	 * (cases 3 and 4).
 952	 */
 953	if (found_start == end) {
 954		ret = btrfs_del_item(trans, root, path);
 955		if (ret)
 956			goto out;
 957		new_key.offset += key.offset;
 958		new_extents--;
 959	}
 960	btrfs_release_path(path);
 961
 962insert:
 963	/* Insert the new key (cases 1-4). */
 964	ret = btrfs_insert_empty_item(trans, root, path, &new_key, 0);
 965	if (ret)
 966		goto out;
 967
 968	btrfs_release_path(path);
 969	ret = update_free_space_extent_count(trans, block_group, path,
 970					     new_extents);
 971
 972out:
 973	return ret;
 974}
 975
 976EXPORT_FOR_TESTS
 977int __add_to_free_space_tree(struct btrfs_trans_handle *trans,
 978			     struct btrfs_block_group *block_group,
 
 979			     struct btrfs_path *path, u64 start, u64 size)
 980{
 981	struct btrfs_free_space_info *info;
 982	u32 flags;
 983	int ret;
 984
 985	if (block_group->needs_free_space) {
 986		ret = __add_block_group_free_space(trans, block_group, path);
 
 987		if (ret)
 988			return ret;
 989	}
 990
 991	info = search_free_space_info(NULL, block_group, path, 0);
 992	if (IS_ERR(info))
 993		return PTR_ERR(info);
 994	flags = btrfs_free_space_flags(path->nodes[0], info);
 995	btrfs_release_path(path);
 996
 997	if (flags & BTRFS_FREE_SPACE_USING_BITMAPS) {
 998		return modify_free_space_bitmap(trans, block_group, path,
 999						start, size, 0);
1000	} else {
1001		return add_free_space_extent(trans, block_group, path, start,
1002					     size);
1003	}
1004}
1005
1006int add_to_free_space_tree(struct btrfs_trans_handle *trans,
 
1007			   u64 start, u64 size)
1008{
1009	struct btrfs_block_group *block_group;
1010	struct btrfs_path *path;
1011	int ret;
1012
1013	if (!btrfs_fs_compat_ro(trans->fs_info, FREE_SPACE_TREE))
1014		return 0;
1015
1016	path = btrfs_alloc_path();
1017	if (!path) {
1018		ret = -ENOMEM;
1019		goto out;
1020	}
1021
1022	block_group = btrfs_lookup_block_group(trans->fs_info, start);
1023	if (!block_group) {
1024		ASSERT(0);
1025		ret = -ENOENT;
1026		goto out;
1027	}
1028
1029	mutex_lock(&block_group->free_space_lock);
1030	ret = __add_to_free_space_tree(trans, block_group, path, start, size);
 
1031	mutex_unlock(&block_group->free_space_lock);
1032
1033	btrfs_put_block_group(block_group);
1034out:
1035	btrfs_free_path(path);
1036	if (ret)
1037		btrfs_abort_transaction(trans, ret);
1038	return ret;
1039}
1040
1041/*
1042 * Populate the free space tree by walking the extent tree. Operations on the
1043 * extent tree that happen as a result of writes to the free space tree will go
1044 * through the normal add/remove hooks.
1045 */
1046static int populate_free_space_tree(struct btrfs_trans_handle *trans,
1047				    struct btrfs_block_group *block_group)
 
1048{
1049	struct btrfs_root *extent_root = trans->fs_info->extent_root;
1050	struct btrfs_path *path, *path2;
1051	struct btrfs_key key;
1052	u64 start, end;
1053	int ret;
1054
1055	path = btrfs_alloc_path();
1056	if (!path)
1057		return -ENOMEM;
1058	path->reada = READA_FORWARD;
1059
1060	path2 = btrfs_alloc_path();
1061	if (!path2) {
1062		btrfs_free_path(path);
1063		return -ENOMEM;
1064	}
1065
1066	ret = add_new_free_space_info(trans, block_group, path2);
1067	if (ret)
1068		goto out;
1069
1070	mutex_lock(&block_group->free_space_lock);
1071
1072	/*
1073	 * Iterate through all of the extent and metadata items in this block
1074	 * group, adding the free space between them and the free space at the
1075	 * end. Note that EXTENT_ITEM and METADATA_ITEM are less than
1076	 * BLOCK_GROUP_ITEM, so an extent may precede the block group that it's
1077	 * contained in.
1078	 */
1079	key.objectid = block_group->start;
1080	key.type = BTRFS_EXTENT_ITEM_KEY;
1081	key.offset = 0;
1082
1083	ret = btrfs_search_slot_for_read(extent_root, &key, path, 1, 0);
1084	if (ret < 0)
1085		goto out_locked;
1086	ASSERT(ret == 0);
1087
1088	start = block_group->start;
1089	end = block_group->start + block_group->length;
1090	while (1) {
1091		btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0]);
1092
1093		if (key.type == BTRFS_EXTENT_ITEM_KEY ||
1094		    key.type == BTRFS_METADATA_ITEM_KEY) {
1095			if (key.objectid >= end)
1096				break;
1097
1098			if (start < key.objectid) {
1099				ret = __add_to_free_space_tree(trans,
1100							       block_group,
1101							       path2, start,
1102							       key.objectid -
1103							       start);
1104				if (ret)
1105					goto out_locked;
1106			}
1107			start = key.objectid;
1108			if (key.type == BTRFS_METADATA_ITEM_KEY)
1109				start += trans->fs_info->nodesize;
1110			else
1111				start += key.offset;
1112		} else if (key.type == BTRFS_BLOCK_GROUP_ITEM_KEY) {
1113			if (key.objectid != block_group->start)
1114				break;
1115		}
1116
1117		ret = btrfs_next_item(extent_root, path);
1118		if (ret < 0)
1119			goto out_locked;
1120		if (ret)
1121			break;
1122	}
1123	if (start < end) {
1124		ret = __add_to_free_space_tree(trans, block_group, path2,
1125					       start, end - start);
1126		if (ret)
1127			goto out_locked;
1128	}
1129
1130	ret = 0;
1131out_locked:
1132	mutex_unlock(&block_group->free_space_lock);
1133out:
1134	btrfs_free_path(path2);
1135	btrfs_free_path(path);
1136	return ret;
1137}
1138
1139int btrfs_create_free_space_tree(struct btrfs_fs_info *fs_info)
1140{
1141	struct btrfs_trans_handle *trans;
1142	struct btrfs_root *tree_root = fs_info->tree_root;
1143	struct btrfs_root *free_space_root;
1144	struct btrfs_block_group *block_group;
1145	struct rb_node *node;
1146	int ret;
1147
1148	trans = btrfs_start_transaction(tree_root, 0);
1149	if (IS_ERR(trans))
1150		return PTR_ERR(trans);
1151
1152	set_bit(BTRFS_FS_CREATING_FREE_SPACE_TREE, &fs_info->flags);
1153	set_bit(BTRFS_FS_FREE_SPACE_TREE_UNTRUSTED, &fs_info->flags);
1154	free_space_root = btrfs_create_tree(trans,
1155					    BTRFS_FREE_SPACE_TREE_OBJECTID);
1156	if (IS_ERR(free_space_root)) {
1157		ret = PTR_ERR(free_space_root);
1158		goto abort;
1159	}
1160	fs_info->free_space_root = free_space_root;
1161
1162	node = rb_first(&fs_info->block_group_cache_tree);
1163	while (node) {
1164		block_group = rb_entry(node, struct btrfs_block_group,
1165				       cache_node);
1166		ret = populate_free_space_tree(trans, block_group);
1167		if (ret)
1168			goto abort;
1169		node = rb_next(node);
1170	}
1171
1172	btrfs_set_fs_compat_ro(fs_info, FREE_SPACE_TREE);
1173	btrfs_set_fs_compat_ro(fs_info, FREE_SPACE_TREE_VALID);
1174	clear_bit(BTRFS_FS_CREATING_FREE_SPACE_TREE, &fs_info->flags);
 
1175	ret = btrfs_commit_transaction(trans);
 
 
1176
1177	/*
1178	 * Now that we've committed the transaction any reading of our commit
1179	 * root will be safe, so we can cache from the free space tree now.
1180	 */
1181	clear_bit(BTRFS_FS_FREE_SPACE_TREE_UNTRUSTED, &fs_info->flags);
1182	return ret;
1183
1184abort:
1185	clear_bit(BTRFS_FS_CREATING_FREE_SPACE_TREE, &fs_info->flags);
1186	clear_bit(BTRFS_FS_FREE_SPACE_TREE_UNTRUSTED, &fs_info->flags);
1187	btrfs_abort_transaction(trans, ret);
1188	btrfs_end_transaction(trans);
1189	return ret;
1190}
1191
1192static int clear_free_space_tree(struct btrfs_trans_handle *trans,
1193				 struct btrfs_root *root)
1194{
1195	struct btrfs_path *path;
1196	struct btrfs_key key;
1197	int nr;
1198	int ret;
1199
1200	path = btrfs_alloc_path();
1201	if (!path)
1202		return -ENOMEM;
1203
 
 
1204	key.objectid = 0;
1205	key.type = 0;
1206	key.offset = 0;
1207
1208	while (1) {
1209		ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
1210		if (ret < 0)
1211			goto out;
1212
1213		nr = btrfs_header_nritems(path->nodes[0]);
1214		if (!nr)
1215			break;
1216
1217		path->slots[0] = 0;
1218		ret = btrfs_del_items(trans, root, path, 0, nr);
1219		if (ret)
1220			goto out;
1221
1222		btrfs_release_path(path);
1223	}
1224
1225	ret = 0;
1226out:
1227	btrfs_free_path(path);
1228	return ret;
1229}
1230
1231int btrfs_clear_free_space_tree(struct btrfs_fs_info *fs_info)
1232{
1233	struct btrfs_trans_handle *trans;
1234	struct btrfs_root *tree_root = fs_info->tree_root;
1235	struct btrfs_root *free_space_root = fs_info->free_space_root;
1236	int ret;
1237
1238	trans = btrfs_start_transaction(tree_root, 0);
1239	if (IS_ERR(trans))
1240		return PTR_ERR(trans);
1241
1242	btrfs_clear_fs_compat_ro(fs_info, FREE_SPACE_TREE);
1243	btrfs_clear_fs_compat_ro(fs_info, FREE_SPACE_TREE_VALID);
1244	fs_info->free_space_root = NULL;
1245
1246	ret = clear_free_space_tree(trans, free_space_root);
1247	if (ret)
1248		goto abort;
1249
1250	ret = btrfs_del_root(trans, &free_space_root->root_key);
1251	if (ret)
1252		goto abort;
1253
1254	list_del(&free_space_root->dirty_list);
1255
1256	btrfs_tree_lock(free_space_root->node);
1257	btrfs_clean_tree_block(free_space_root->node);
1258	btrfs_tree_unlock(free_space_root->node);
1259	btrfs_free_tree_block(trans, free_space_root, free_space_root->node,
1260			      0, 1);
1261
1262	btrfs_put_root(free_space_root);
 
 
 
 
 
 
1263
1264	return btrfs_commit_transaction(trans);
1265
1266abort:
1267	btrfs_abort_transaction(trans, ret);
1268	btrfs_end_transaction(trans);
1269	return ret;
1270}
1271
1272static int __add_block_group_free_space(struct btrfs_trans_handle *trans,
1273					struct btrfs_block_group *block_group,
 
1274					struct btrfs_path *path)
1275{
 
1276	int ret;
1277
 
 
 
1278	block_group->needs_free_space = 0;
1279
1280	ret = add_new_free_space_info(trans, block_group, path);
1281	if (ret)
1282		return ret;
1283
1284	return __add_to_free_space_tree(trans, block_group, path,
1285					block_group->start,
1286					block_group->length);
1287}
1288
1289int add_block_group_free_space(struct btrfs_trans_handle *trans,
1290			       struct btrfs_block_group *block_group)
 
1291{
1292	struct btrfs_fs_info *fs_info = trans->fs_info;
1293	struct btrfs_path *path = NULL;
1294	int ret = 0;
1295
1296	if (!btrfs_fs_compat_ro(fs_info, FREE_SPACE_TREE))
1297		return 0;
1298
1299	mutex_lock(&block_group->free_space_lock);
1300	if (!block_group->needs_free_space)
1301		goto out;
1302
1303	path = btrfs_alloc_path();
1304	if (!path) {
1305		ret = -ENOMEM;
1306		goto out;
1307	}
1308
1309	ret = __add_block_group_free_space(trans, block_group, path);
1310
1311out:
1312	btrfs_free_path(path);
1313	mutex_unlock(&block_group->free_space_lock);
1314	if (ret)
1315		btrfs_abort_transaction(trans, ret);
1316	return ret;
1317}
1318
1319int remove_block_group_free_space(struct btrfs_trans_handle *trans,
1320				  struct btrfs_block_group *block_group)
 
1321{
1322	struct btrfs_root *root = trans->fs_info->free_space_root;
1323	struct btrfs_path *path;
1324	struct btrfs_key key, found_key;
1325	struct extent_buffer *leaf;
1326	u64 start, end;
1327	int done = 0, nr;
1328	int ret;
1329
1330	if (!btrfs_fs_compat_ro(trans->fs_info, FREE_SPACE_TREE))
1331		return 0;
1332
1333	if (block_group->needs_free_space) {
1334		/* We never added this block group to the free space tree. */
1335		return 0;
1336	}
1337
1338	path = btrfs_alloc_path();
1339	if (!path) {
1340		ret = -ENOMEM;
1341		goto out;
1342	}
1343
1344	start = block_group->start;
1345	end = block_group->start + block_group->length;
1346
1347	key.objectid = end - 1;
1348	key.type = (u8)-1;
1349	key.offset = (u64)-1;
1350
1351	while (!done) {
1352		ret = btrfs_search_prev_slot(trans, root, &key, path, -1, 1);
1353		if (ret)
1354			goto out;
1355
1356		leaf = path->nodes[0];
1357		nr = 0;
1358		path->slots[0]++;
1359		while (path->slots[0] > 0) {
1360			btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0] - 1);
1361
1362			if (found_key.type == BTRFS_FREE_SPACE_INFO_KEY) {
1363				ASSERT(found_key.objectid == block_group->start);
1364				ASSERT(found_key.offset == block_group->length);
1365				done = 1;
1366				nr++;
1367				path->slots[0]--;
1368				break;
1369			} else if (found_key.type == BTRFS_FREE_SPACE_EXTENT_KEY ||
1370				   found_key.type == BTRFS_FREE_SPACE_BITMAP_KEY) {
1371				ASSERT(found_key.objectid >= start);
1372				ASSERT(found_key.objectid < end);
1373				ASSERT(found_key.objectid + found_key.offset <= end);
1374				nr++;
1375				path->slots[0]--;
1376			} else {
1377				ASSERT(0);
1378			}
1379		}
1380
1381		ret = btrfs_del_items(trans, root, path, path->slots[0], nr);
1382		if (ret)
1383			goto out;
1384		btrfs_release_path(path);
1385	}
1386
1387	ret = 0;
1388out:
1389	btrfs_free_path(path);
1390	if (ret)
1391		btrfs_abort_transaction(trans, ret);
1392	return ret;
1393}
1394
1395static int load_free_space_bitmaps(struct btrfs_caching_control *caching_ctl,
1396				   struct btrfs_path *path,
1397				   u32 expected_extent_count)
1398{
1399	struct btrfs_block_group *block_group;
1400	struct btrfs_fs_info *fs_info;
1401	struct btrfs_root *root;
1402	struct btrfs_key key;
1403	int prev_bit = 0, bit;
1404	/* Initialize to silence GCC. */
1405	u64 extent_start = 0;
1406	u64 end, offset;
1407	u64 total_found = 0;
1408	u32 extent_count = 0;
1409	int ret;
1410
1411	block_group = caching_ctl->block_group;
1412	fs_info = block_group->fs_info;
1413	root = fs_info->free_space_root;
1414
1415	end = block_group->start + block_group->length;
1416
1417	while (1) {
1418		ret = btrfs_next_item(root, path);
1419		if (ret < 0)
1420			goto out;
1421		if (ret)
1422			break;
1423
1424		btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0]);
1425
1426		if (key.type == BTRFS_FREE_SPACE_INFO_KEY)
1427			break;
1428
1429		ASSERT(key.type == BTRFS_FREE_SPACE_BITMAP_KEY);
1430		ASSERT(key.objectid < end && key.objectid + key.offset <= end);
1431
1432		caching_ctl->progress = key.objectid;
1433
1434		offset = key.objectid;
1435		while (offset < key.objectid + key.offset) {
1436			bit = free_space_test_bit(block_group, path, offset);
1437			if (prev_bit == 0 && bit == 1) {
1438				extent_start = offset;
1439			} else if (prev_bit == 1 && bit == 0) {
1440				total_found += add_new_free_space(block_group,
 
1441								  extent_start,
1442								  offset);
1443				if (total_found > CACHING_CTL_WAKE_UP) {
1444					total_found = 0;
1445					wake_up(&caching_ctl->wait);
1446				}
1447				extent_count++;
1448			}
1449			prev_bit = bit;
1450			offset += fs_info->sectorsize;
1451		}
1452	}
1453	if (prev_bit == 1) {
1454		total_found += add_new_free_space(block_group, extent_start,
1455						  end);
1456		extent_count++;
1457	}
1458
1459	if (extent_count != expected_extent_count) {
1460		btrfs_err(fs_info,
1461			  "incorrect extent count for %llu; counted %u, expected %u",
1462			  block_group->start, extent_count,
1463			  expected_extent_count);
1464		ASSERT(0);
1465		ret = -EIO;
1466		goto out;
1467	}
1468
1469	caching_ctl->progress = (u64)-1;
1470
1471	ret = 0;
1472out:
1473	return ret;
1474}
1475
1476static int load_free_space_extents(struct btrfs_caching_control *caching_ctl,
1477				   struct btrfs_path *path,
1478				   u32 expected_extent_count)
1479{
1480	struct btrfs_block_group *block_group;
1481	struct btrfs_fs_info *fs_info;
1482	struct btrfs_root *root;
1483	struct btrfs_key key;
1484	u64 end;
1485	u64 total_found = 0;
1486	u32 extent_count = 0;
1487	int ret;
1488
1489	block_group = caching_ctl->block_group;
1490	fs_info = block_group->fs_info;
1491	root = fs_info->free_space_root;
1492
1493	end = block_group->start + block_group->length;
1494
1495	while (1) {
1496		ret = btrfs_next_item(root, path);
1497		if (ret < 0)
1498			goto out;
1499		if (ret)
1500			break;
1501
1502		btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0]);
1503
1504		if (key.type == BTRFS_FREE_SPACE_INFO_KEY)
1505			break;
1506
1507		ASSERT(key.type == BTRFS_FREE_SPACE_EXTENT_KEY);
1508		ASSERT(key.objectid < end && key.objectid + key.offset <= end);
1509
1510		caching_ctl->progress = key.objectid;
1511
1512		total_found += add_new_free_space(block_group, key.objectid,
 
1513						  key.objectid + key.offset);
1514		if (total_found > CACHING_CTL_WAKE_UP) {
1515			total_found = 0;
1516			wake_up(&caching_ctl->wait);
1517		}
1518		extent_count++;
1519	}
1520
1521	if (extent_count != expected_extent_count) {
1522		btrfs_err(fs_info,
1523			  "incorrect extent count for %llu; counted %u, expected %u",
1524			  block_group->start, extent_count,
1525			  expected_extent_count);
1526		ASSERT(0);
1527		ret = -EIO;
1528		goto out;
1529	}
1530
1531	caching_ctl->progress = (u64)-1;
1532
1533	ret = 0;
1534out:
1535	return ret;
1536}
1537
1538int load_free_space_tree(struct btrfs_caching_control *caching_ctl)
1539{
1540	struct btrfs_block_group *block_group;
 
1541	struct btrfs_free_space_info *info;
1542	struct btrfs_path *path;
1543	u32 extent_count, flags;
1544	int ret;
1545
1546	block_group = caching_ctl->block_group;
 
1547
1548	path = btrfs_alloc_path();
1549	if (!path)
1550		return -ENOMEM;
1551
1552	/*
1553	 * Just like caching_thread() doesn't want to deadlock on the extent
1554	 * tree, we don't want to deadlock on the free space tree.
1555	 */
1556	path->skip_locking = 1;
1557	path->search_commit_root = 1;
1558	path->reada = READA_FORWARD;
1559
1560	info = search_free_space_info(NULL, block_group, path, 0);
1561	if (IS_ERR(info)) {
1562		ret = PTR_ERR(info);
1563		goto out;
1564	}
1565	extent_count = btrfs_free_space_extent_count(path->nodes[0], info);
1566	flags = btrfs_free_space_flags(path->nodes[0], info);
1567
1568	/*
1569	 * We left path pointing to the free space info item, so now
1570	 * load_free_space_foo can just iterate through the free space tree from
1571	 * there.
1572	 */
1573	if (flags & BTRFS_FREE_SPACE_USING_BITMAPS)
1574		ret = load_free_space_bitmaps(caching_ctl, path, extent_count);
1575	else
1576		ret = load_free_space_extents(caching_ctl, path, extent_count);
1577
1578out:
1579	btrfs_free_path(path);
1580	return ret;
1581}
v4.10.11
 
   1/*
   2 * Copyright (C) 2015 Facebook.  All rights reserved.
   3 *
   4 * This program is free software; you can redistribute it and/or
   5 * modify it under the terms of the GNU General Public
   6 * License v2 as published by the Free Software Foundation.
   7 *
   8 * This program is distributed in the hope that it will be useful,
   9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
  10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
  11 * General Public License for more details.
  12 *
  13 * You should have received a copy of the GNU General Public
  14 * License along with this program; if not, write to the
  15 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
  16 * Boston, MA 021110-1307, USA.
  17 */
  18
  19#include <linux/kernel.h>
  20#include <linux/vmalloc.h>
  21#include "ctree.h"
  22#include "disk-io.h"
  23#include "locking.h"
  24#include "free-space-tree.h"
  25#include "transaction.h"
 
  26
  27static int __add_block_group_free_space(struct btrfs_trans_handle *trans,
  28					struct btrfs_fs_info *fs_info,
  29					struct btrfs_block_group_cache *block_group,
  30					struct btrfs_path *path);
  31
  32void set_free_space_tree_thresholds(struct btrfs_block_group_cache *cache)
  33{
  34	u32 bitmap_range;
  35	size_t bitmap_size;
  36	u64 num_bitmaps, total_bitmap_size;
  37
 
 
 
 
  38	/*
  39	 * We convert to bitmaps when the disk space required for using extents
  40	 * exceeds that required for using bitmaps.
  41	 */
  42	bitmap_range = cache->fs_info->sectorsize * BTRFS_FREE_SPACE_BITMAP_BITS;
  43	num_bitmaps = div_u64(cache->key.offset + bitmap_range - 1,
  44			      bitmap_range);
  45	bitmap_size = sizeof(struct btrfs_item) + BTRFS_FREE_SPACE_BITMAP_SIZE;
  46	total_bitmap_size = num_bitmaps * bitmap_size;
  47	cache->bitmap_high_thresh = div_u64(total_bitmap_size,
  48					    sizeof(struct btrfs_item));
  49
  50	/*
  51	 * We allow for a small buffer between the high threshold and low
  52	 * threshold to avoid thrashing back and forth between the two formats.
  53	 */
  54	if (cache->bitmap_high_thresh > 100)
  55		cache->bitmap_low_thresh = cache->bitmap_high_thresh - 100;
  56	else
  57		cache->bitmap_low_thresh = 0;
  58}
  59
  60static int add_new_free_space_info(struct btrfs_trans_handle *trans,
  61				   struct btrfs_fs_info *fs_info,
  62				   struct btrfs_block_group_cache *block_group,
  63				   struct btrfs_path *path)
  64{
  65	struct btrfs_root *root = fs_info->free_space_root;
  66	struct btrfs_free_space_info *info;
  67	struct btrfs_key key;
  68	struct extent_buffer *leaf;
  69	int ret;
  70
  71	key.objectid = block_group->key.objectid;
  72	key.type = BTRFS_FREE_SPACE_INFO_KEY;
  73	key.offset = block_group->key.offset;
  74
  75	ret = btrfs_insert_empty_item(trans, root, path, &key, sizeof(*info));
  76	if (ret)
  77		goto out;
  78
  79	leaf = path->nodes[0];
  80	info = btrfs_item_ptr(leaf, path->slots[0],
  81			      struct btrfs_free_space_info);
  82	btrfs_set_free_space_extent_count(leaf, info, 0);
  83	btrfs_set_free_space_flags(leaf, info, 0);
  84	btrfs_mark_buffer_dirty(leaf);
  85
  86	ret = 0;
  87out:
  88	btrfs_release_path(path);
  89	return ret;
  90}
  91
  92struct btrfs_free_space_info *
  93search_free_space_info(struct btrfs_trans_handle *trans,
  94		       struct btrfs_fs_info *fs_info,
  95		       struct btrfs_block_group_cache *block_group,
  96		       struct btrfs_path *path, int cow)
  97{
 
  98	struct btrfs_root *root = fs_info->free_space_root;
  99	struct btrfs_key key;
 100	int ret;
 101
 102	key.objectid = block_group->key.objectid;
 103	key.type = BTRFS_FREE_SPACE_INFO_KEY;
 104	key.offset = block_group->key.offset;
 105
 106	ret = btrfs_search_slot(trans, root, &key, path, 0, cow);
 107	if (ret < 0)
 108		return ERR_PTR(ret);
 109	if (ret != 0) {
 110		btrfs_warn(fs_info, "missing free space info for %llu",
 111			   block_group->key.objectid);
 112		ASSERT(0);
 113		return ERR_PTR(-ENOENT);
 114	}
 115
 116	return btrfs_item_ptr(path->nodes[0], path->slots[0],
 117			      struct btrfs_free_space_info);
 118}
 119
 120/*
 121 * btrfs_search_slot() but we're looking for the greatest key less than the
 122 * passed key.
 123 */
 124static int btrfs_search_prev_slot(struct btrfs_trans_handle *trans,
 125				  struct btrfs_root *root,
 126				  struct btrfs_key *key, struct btrfs_path *p,
 127				  int ins_len, int cow)
 128{
 129	int ret;
 130
 131	ret = btrfs_search_slot(trans, root, key, p, ins_len, cow);
 132	if (ret < 0)
 133		return ret;
 134
 135	if (ret == 0) {
 136		ASSERT(0);
 137		return -EIO;
 138	}
 139
 140	if (p->slots[0] == 0) {
 141		ASSERT(0);
 142		return -EIO;
 143	}
 144	p->slots[0]--;
 145
 146	return 0;
 147}
 148
 149static inline u32 free_space_bitmap_size(u64 size, u32 sectorsize)
 
 150{
 151	return DIV_ROUND_UP((u32)div_u64(size, sectorsize), BITS_PER_BYTE);
 152}
 153
 154static u8 *alloc_bitmap(u32 bitmap_size)
 155{
 156	void *mem;
 
 
 157
 158	/*
 159	 * The allocation size varies, observed numbers were < 4K up to 16K.
 160	 * Using vmalloc unconditionally would be too heavy, we'll try
 161	 * contiguous allocations first.
 
 
 
 162	 */
 163	if  (bitmap_size <= PAGE_SIZE)
 164		return kzalloc(bitmap_size, GFP_NOFS);
 
 
 
 165
 166	mem = kzalloc(bitmap_size, GFP_NOFS | __GFP_NOWARN);
 167	if (mem)
 168		return mem;
 
 
 
 169
 170	return __vmalloc(bitmap_size, GFP_NOFS | __GFP_HIGHMEM | __GFP_ZERO,
 171			 PAGE_KERNEL);
 
 
 
 
 
 
 
 
 
 172}
 173
 
 174int convert_free_space_to_bitmaps(struct btrfs_trans_handle *trans,
 175				  struct btrfs_fs_info *fs_info,
 176				  struct btrfs_block_group_cache *block_group,
 177				  struct btrfs_path *path)
 178{
 
 179	struct btrfs_root *root = fs_info->free_space_root;
 180	struct btrfs_free_space_info *info;
 181	struct btrfs_key key, found_key;
 182	struct extent_buffer *leaf;
 183	u8 *bitmap, *bitmap_cursor;
 
 184	u64 start, end;
 185	u64 bitmap_range, i;
 186	u32 bitmap_size, flags, expected_extent_count;
 187	u32 extent_count = 0;
 188	int done = 0, nr;
 189	int ret;
 190
 191	bitmap_size = free_space_bitmap_size(block_group->key.offset,
 192					     fs_info->sectorsize);
 193	bitmap = alloc_bitmap(bitmap_size);
 194	if (!bitmap) {
 195		ret = -ENOMEM;
 196		goto out;
 197	}
 198
 199	start = block_group->key.objectid;
 200	end = block_group->key.objectid + block_group->key.offset;
 201
 202	key.objectid = end - 1;
 203	key.type = (u8)-1;
 204	key.offset = (u64)-1;
 205
 206	while (!done) {
 207		ret = btrfs_search_prev_slot(trans, root, &key, path, -1, 1);
 208		if (ret)
 209			goto out;
 210
 211		leaf = path->nodes[0];
 212		nr = 0;
 213		path->slots[0]++;
 214		while (path->slots[0] > 0) {
 215			btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0] - 1);
 216
 217			if (found_key.type == BTRFS_FREE_SPACE_INFO_KEY) {
 218				ASSERT(found_key.objectid == block_group->key.objectid);
 219				ASSERT(found_key.offset == block_group->key.offset);
 220				done = 1;
 221				break;
 222			} else if (found_key.type == BTRFS_FREE_SPACE_EXTENT_KEY) {
 223				u64 first, last;
 224
 225				ASSERT(found_key.objectid >= start);
 226				ASSERT(found_key.objectid < end);
 227				ASSERT(found_key.objectid + found_key.offset <= end);
 228
 229				first = div_u64(found_key.objectid - start,
 230						fs_info->sectorsize);
 231				last = div_u64(found_key.objectid + found_key.offset - start,
 232					       fs_info->sectorsize);
 233				le_bitmap_set(bitmap, first, last - first);
 234
 235				extent_count++;
 236				nr++;
 237				path->slots[0]--;
 238			} else {
 239				ASSERT(0);
 240			}
 241		}
 242
 243		ret = btrfs_del_items(trans, root, path, path->slots[0], nr);
 244		if (ret)
 245			goto out;
 246		btrfs_release_path(path);
 247	}
 248
 249	info = search_free_space_info(trans, fs_info, block_group, path, 1);
 250	if (IS_ERR(info)) {
 251		ret = PTR_ERR(info);
 252		goto out;
 253	}
 254	leaf = path->nodes[0];
 255	flags = btrfs_free_space_flags(leaf, info);
 256	flags |= BTRFS_FREE_SPACE_USING_BITMAPS;
 257	btrfs_set_free_space_flags(leaf, info, flags);
 258	expected_extent_count = btrfs_free_space_extent_count(leaf, info);
 259	btrfs_mark_buffer_dirty(leaf);
 260	btrfs_release_path(path);
 261
 262	if (extent_count != expected_extent_count) {
 263		btrfs_err(fs_info,
 264			  "incorrect extent count for %llu; counted %u, expected %u",
 265			  block_group->key.objectid, extent_count,
 266			  expected_extent_count);
 267		ASSERT(0);
 268		ret = -EIO;
 269		goto out;
 270	}
 271
 272	bitmap_cursor = bitmap;
 273	bitmap_range = fs_info->sectorsize * BTRFS_FREE_SPACE_BITMAP_BITS;
 274	i = start;
 275	while (i < end) {
 276		unsigned long ptr;
 277		u64 extent_size;
 278		u32 data_size;
 279
 280		extent_size = min(end - i, bitmap_range);
 281		data_size = free_space_bitmap_size(extent_size,
 282						   fs_info->sectorsize);
 283
 284		key.objectid = i;
 285		key.type = BTRFS_FREE_SPACE_BITMAP_KEY;
 286		key.offset = extent_size;
 287
 288		ret = btrfs_insert_empty_item(trans, root, path, &key,
 289					      data_size);
 290		if (ret)
 291			goto out;
 292
 293		leaf = path->nodes[0];
 294		ptr = btrfs_item_ptr_offset(leaf, path->slots[0]);
 295		write_extent_buffer(leaf, bitmap_cursor, ptr,
 296				    data_size);
 297		btrfs_mark_buffer_dirty(leaf);
 298		btrfs_release_path(path);
 299
 300		i += extent_size;
 301		bitmap_cursor += data_size;
 302	}
 303
 304	ret = 0;
 305out:
 306	kvfree(bitmap);
 307	if (ret)
 308		btrfs_abort_transaction(trans, ret);
 309	return ret;
 310}
 311
 
 312int convert_free_space_to_extents(struct btrfs_trans_handle *trans,
 313				  struct btrfs_fs_info *fs_info,
 314				  struct btrfs_block_group_cache *block_group,
 315				  struct btrfs_path *path)
 316{
 
 317	struct btrfs_root *root = fs_info->free_space_root;
 318	struct btrfs_free_space_info *info;
 319	struct btrfs_key key, found_key;
 320	struct extent_buffer *leaf;
 321	u8 *bitmap;
 322	u64 start, end;
 323	/* Initialize to silence GCC. */
 324	u64 extent_start = 0;
 325	u64 offset;
 326	u32 bitmap_size, flags, expected_extent_count;
 327	int prev_bit = 0, bit, bitnr;
 328	u32 extent_count = 0;
 329	int done = 0, nr;
 330	int ret;
 331
 332	bitmap_size = free_space_bitmap_size(block_group->key.offset,
 333					     fs_info->sectorsize);
 334	bitmap = alloc_bitmap(bitmap_size);
 335	if (!bitmap) {
 336		ret = -ENOMEM;
 337		goto out;
 338	}
 339
 340	start = block_group->key.objectid;
 341	end = block_group->key.objectid + block_group->key.offset;
 342
 343	key.objectid = end - 1;
 344	key.type = (u8)-1;
 345	key.offset = (u64)-1;
 346
 347	while (!done) {
 348		ret = btrfs_search_prev_slot(trans, root, &key, path, -1, 1);
 349		if (ret)
 350			goto out;
 351
 352		leaf = path->nodes[0];
 353		nr = 0;
 354		path->slots[0]++;
 355		while (path->slots[0] > 0) {
 356			btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0] - 1);
 357
 358			if (found_key.type == BTRFS_FREE_SPACE_INFO_KEY) {
 359				ASSERT(found_key.objectid == block_group->key.objectid);
 360				ASSERT(found_key.offset == block_group->key.offset);
 361				done = 1;
 362				break;
 363			} else if (found_key.type == BTRFS_FREE_SPACE_BITMAP_KEY) {
 364				unsigned long ptr;
 365				u8 *bitmap_cursor;
 366				u32 bitmap_pos, data_size;
 367
 368				ASSERT(found_key.objectid >= start);
 369				ASSERT(found_key.objectid < end);
 370				ASSERT(found_key.objectid + found_key.offset <= end);
 371
 372				bitmap_pos = div_u64(found_key.objectid - start,
 373						     fs_info->sectorsize *
 374						     BITS_PER_BYTE);
 375				bitmap_cursor = bitmap + bitmap_pos;
 376				data_size = free_space_bitmap_size(found_key.offset,
 377								   fs_info->sectorsize);
 378
 379				ptr = btrfs_item_ptr_offset(leaf, path->slots[0] - 1);
 380				read_extent_buffer(leaf, bitmap_cursor, ptr,
 381						   data_size);
 382
 383				nr++;
 384				path->slots[0]--;
 385			} else {
 386				ASSERT(0);
 387			}
 388		}
 389
 390		ret = btrfs_del_items(trans, root, path, path->slots[0], nr);
 391		if (ret)
 392			goto out;
 393		btrfs_release_path(path);
 394	}
 395
 396	info = search_free_space_info(trans, fs_info, block_group, path, 1);
 397	if (IS_ERR(info)) {
 398		ret = PTR_ERR(info);
 399		goto out;
 400	}
 401	leaf = path->nodes[0];
 402	flags = btrfs_free_space_flags(leaf, info);
 403	flags &= ~BTRFS_FREE_SPACE_USING_BITMAPS;
 404	btrfs_set_free_space_flags(leaf, info, flags);
 405	expected_extent_count = btrfs_free_space_extent_count(leaf, info);
 406	btrfs_mark_buffer_dirty(leaf);
 407	btrfs_release_path(path);
 408
 409	offset = start;
 410	bitnr = 0;
 411	while (offset < end) {
 412		bit = !!le_test_bit(bitnr, bitmap);
 413		if (prev_bit == 0 && bit == 1) {
 414			extent_start = offset;
 415		} else if (prev_bit == 1 && bit == 0) {
 416			key.objectid = extent_start;
 417			key.type = BTRFS_FREE_SPACE_EXTENT_KEY;
 418			key.offset = offset - extent_start;
 419
 420			ret = btrfs_insert_empty_item(trans, root, path, &key, 0);
 421			if (ret)
 422				goto out;
 423			btrfs_release_path(path);
 424
 425			extent_count++;
 426		}
 427		prev_bit = bit;
 428		offset += fs_info->sectorsize;
 429		bitnr++;
 430	}
 431	if (prev_bit == 1) {
 432		key.objectid = extent_start;
 433		key.type = BTRFS_FREE_SPACE_EXTENT_KEY;
 434		key.offset = end - extent_start;
 435
 436		ret = btrfs_insert_empty_item(trans, root, path, &key, 0);
 437		if (ret)
 438			goto out;
 439		btrfs_release_path(path);
 440
 441		extent_count++;
 
 
 442	}
 443
 444	if (extent_count != expected_extent_count) {
 445		btrfs_err(fs_info,
 446			  "incorrect extent count for %llu; counted %u, expected %u",
 447			  block_group->key.objectid, extent_count,
 448			  expected_extent_count);
 449		ASSERT(0);
 450		ret = -EIO;
 451		goto out;
 452	}
 453
 454	ret = 0;
 455out:
 456	kvfree(bitmap);
 457	if (ret)
 458		btrfs_abort_transaction(trans, ret);
 459	return ret;
 460}
 461
 462static int update_free_space_extent_count(struct btrfs_trans_handle *trans,
 463					  struct btrfs_fs_info *fs_info,
 464					  struct btrfs_block_group_cache *block_group,
 465					  struct btrfs_path *path,
 466					  int new_extents)
 467{
 468	struct btrfs_free_space_info *info;
 469	u32 flags;
 470	u32 extent_count;
 471	int ret = 0;
 472
 473	if (new_extents == 0)
 474		return 0;
 475
 476	info = search_free_space_info(trans, fs_info, block_group, path, 1);
 477	if (IS_ERR(info)) {
 478		ret = PTR_ERR(info);
 479		goto out;
 480	}
 481	flags = btrfs_free_space_flags(path->nodes[0], info);
 482	extent_count = btrfs_free_space_extent_count(path->nodes[0], info);
 483
 484	extent_count += new_extents;
 485	btrfs_set_free_space_extent_count(path->nodes[0], info, extent_count);
 486	btrfs_mark_buffer_dirty(path->nodes[0]);
 487	btrfs_release_path(path);
 488
 489	if (!(flags & BTRFS_FREE_SPACE_USING_BITMAPS) &&
 490	    extent_count > block_group->bitmap_high_thresh) {
 491		ret = convert_free_space_to_bitmaps(trans, fs_info, block_group,
 492						    path);
 493	} else if ((flags & BTRFS_FREE_SPACE_USING_BITMAPS) &&
 494		   extent_count < block_group->bitmap_low_thresh) {
 495		ret = convert_free_space_to_extents(trans, fs_info, block_group,
 496						    path);
 497	}
 498
 499out:
 500	return ret;
 501}
 502
 503int free_space_test_bit(struct btrfs_block_group_cache *block_group,
 
 504			struct btrfs_path *path, u64 offset)
 505{
 506	struct extent_buffer *leaf;
 507	struct btrfs_key key;
 508	u64 found_start, found_end;
 509	unsigned long ptr, i;
 510
 511	leaf = path->nodes[0];
 512	btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
 513	ASSERT(key.type == BTRFS_FREE_SPACE_BITMAP_KEY);
 514
 515	found_start = key.objectid;
 516	found_end = key.objectid + key.offset;
 517	ASSERT(offset >= found_start && offset < found_end);
 518
 519	ptr = btrfs_item_ptr_offset(leaf, path->slots[0]);
 520	i = div_u64(offset - found_start,
 521		    block_group->fs_info->sectorsize);
 522	return !!extent_buffer_test_bit(leaf, ptr, i);
 523}
 524
 525static void free_space_set_bits(struct btrfs_block_group_cache *block_group,
 526				struct btrfs_path *path, u64 *start, u64 *size,
 527				int bit)
 528{
 529	struct btrfs_fs_info *fs_info = block_group->fs_info;
 530	struct extent_buffer *leaf;
 531	struct btrfs_key key;
 532	u64 end = *start + *size;
 533	u64 found_start, found_end;
 534	unsigned long ptr, first, last;
 535
 536	leaf = path->nodes[0];
 537	btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
 538	ASSERT(key.type == BTRFS_FREE_SPACE_BITMAP_KEY);
 539
 540	found_start = key.objectid;
 541	found_end = key.objectid + key.offset;
 542	ASSERT(*start >= found_start && *start < found_end);
 543	ASSERT(end > found_start);
 544
 545	if (end > found_end)
 546		end = found_end;
 547
 548	ptr = btrfs_item_ptr_offset(leaf, path->slots[0]);
 549	first = div_u64(*start - found_start, fs_info->sectorsize);
 550	last = div_u64(end - found_start, fs_info->sectorsize);
 551	if (bit)
 552		extent_buffer_bitmap_set(leaf, ptr, first, last - first);
 553	else
 554		extent_buffer_bitmap_clear(leaf, ptr, first, last - first);
 555	btrfs_mark_buffer_dirty(leaf);
 556
 557	*size -= end - *start;
 558	*start = end;
 559}
 560
 561/*
 562 * We can't use btrfs_next_item() in modify_free_space_bitmap() because
 563 * btrfs_next_leaf() doesn't get the path for writing. We can forgo the fancy
 564 * tree walking in btrfs_next_leaf() anyways because we know exactly what we're
 565 * looking for.
 566 */
 567static int free_space_next_bitmap(struct btrfs_trans_handle *trans,
 568				  struct btrfs_root *root, struct btrfs_path *p)
 569{
 570	struct btrfs_key key;
 571
 572	if (p->slots[0] + 1 < btrfs_header_nritems(p->nodes[0])) {
 573		p->slots[0]++;
 574		return 0;
 575	}
 576
 577	btrfs_item_key_to_cpu(p->nodes[0], &key, p->slots[0]);
 578	btrfs_release_path(p);
 579
 580	key.objectid += key.offset;
 581	key.type = (u8)-1;
 582	key.offset = (u64)-1;
 583
 584	return btrfs_search_prev_slot(trans, root, &key, p, 0, 1);
 585}
 586
 587/*
 588 * If remove is 1, then we are removing free space, thus clearing bits in the
 589 * bitmap. If remove is 0, then we are adding free space, thus setting bits in
 590 * the bitmap.
 591 */
 592static int modify_free_space_bitmap(struct btrfs_trans_handle *trans,
 593				    struct btrfs_fs_info *fs_info,
 594				    struct btrfs_block_group_cache *block_group,
 595				    struct btrfs_path *path,
 596				    u64 start, u64 size, int remove)
 597{
 598	struct btrfs_root *root = fs_info->free_space_root;
 599	struct btrfs_key key;
 600	u64 end = start + size;
 601	u64 cur_start, cur_size;
 602	int prev_bit, next_bit;
 603	int new_extents;
 604	int ret;
 605
 606	/*
 607	 * Read the bit for the block immediately before the extent of space if
 608	 * that block is within the block group.
 609	 */
 610	if (start > block_group->key.objectid) {
 611		u64 prev_block = start - block_group->fs_info->sectorsize;
 612
 613		key.objectid = prev_block;
 614		key.type = (u8)-1;
 615		key.offset = (u64)-1;
 616
 617		ret = btrfs_search_prev_slot(trans, root, &key, path, 0, 1);
 618		if (ret)
 619			goto out;
 620
 621		prev_bit = free_space_test_bit(block_group, path, prev_block);
 622
 623		/* The previous block may have been in the previous bitmap. */
 624		btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0]);
 625		if (start >= key.objectid + key.offset) {
 626			ret = free_space_next_bitmap(trans, root, path);
 627			if (ret)
 628				goto out;
 629		}
 630	} else {
 631		key.objectid = start;
 632		key.type = (u8)-1;
 633		key.offset = (u64)-1;
 634
 635		ret = btrfs_search_prev_slot(trans, root, &key, path, 0, 1);
 636		if (ret)
 637			goto out;
 638
 639		prev_bit = -1;
 640	}
 641
 642	/*
 643	 * Iterate over all of the bitmaps overlapped by the extent of space,
 644	 * clearing/setting bits as required.
 645	 */
 646	cur_start = start;
 647	cur_size = size;
 648	while (1) {
 649		free_space_set_bits(block_group, path, &cur_start, &cur_size,
 650				    !remove);
 651		if (cur_size == 0)
 652			break;
 653		ret = free_space_next_bitmap(trans, root, path);
 654		if (ret)
 655			goto out;
 656	}
 657
 658	/*
 659	 * Read the bit for the block immediately after the extent of space if
 660	 * that block is within the block group.
 661	 */
 662	if (end < block_group->key.objectid + block_group->key.offset) {
 663		/* The next block may be in the next bitmap. */
 664		btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0]);
 665		if (end >= key.objectid + key.offset) {
 666			ret = free_space_next_bitmap(trans, root, path);
 667			if (ret)
 668				goto out;
 669		}
 670
 671		next_bit = free_space_test_bit(block_group, path, end);
 672	} else {
 673		next_bit = -1;
 674	}
 675
 676	if (remove) {
 677		new_extents = -1;
 678		if (prev_bit == 1) {
 679			/* Leftover on the left. */
 680			new_extents++;
 681		}
 682		if (next_bit == 1) {
 683			/* Leftover on the right. */
 684			new_extents++;
 685		}
 686	} else {
 687		new_extents = 1;
 688		if (prev_bit == 1) {
 689			/* Merging with neighbor on the left. */
 690			new_extents--;
 691		}
 692		if (next_bit == 1) {
 693			/* Merging with neighbor on the right. */
 694			new_extents--;
 695		}
 696	}
 697
 698	btrfs_release_path(path);
 699	ret = update_free_space_extent_count(trans, fs_info, block_group, path,
 700					     new_extents);
 701
 702out:
 703	return ret;
 704}
 705
 706static int remove_free_space_extent(struct btrfs_trans_handle *trans,
 707				    struct btrfs_fs_info *fs_info,
 708				    struct btrfs_block_group_cache *block_group,
 709				    struct btrfs_path *path,
 710				    u64 start, u64 size)
 711{
 712	struct btrfs_root *root = fs_info->free_space_root;
 713	struct btrfs_key key;
 714	u64 found_start, found_end;
 715	u64 end = start + size;
 716	int new_extents = -1;
 717	int ret;
 718
 719	key.objectid = start;
 720	key.type = (u8)-1;
 721	key.offset = (u64)-1;
 722
 723	ret = btrfs_search_prev_slot(trans, root, &key, path, -1, 1);
 724	if (ret)
 725		goto out;
 726
 727	btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0]);
 728
 729	ASSERT(key.type == BTRFS_FREE_SPACE_EXTENT_KEY);
 730
 731	found_start = key.objectid;
 732	found_end = key.objectid + key.offset;
 733	ASSERT(start >= found_start && end <= found_end);
 734
 735	/*
 736	 * Okay, now that we've found the free space extent which contains the
 737	 * free space that we are removing, there are four cases:
 738	 *
 739	 * 1. We're using the whole extent: delete the key we found and
 740	 * decrement the free space extent count.
 741	 * 2. We are using part of the extent starting at the beginning: delete
 742	 * the key we found and insert a new key representing the leftover at
 743	 * the end. There is no net change in the number of extents.
 744	 * 3. We are using part of the extent ending at the end: delete the key
 745	 * we found and insert a new key representing the leftover at the
 746	 * beginning. There is no net change in the number of extents.
 747	 * 4. We are using part of the extent in the middle: delete the key we
 748	 * found and insert two new keys representing the leftovers on each
 749	 * side. Where we used to have one extent, we now have two, so increment
 750	 * the extent count. We may need to convert the block group to bitmaps
 751	 * as a result.
 752	 */
 753
 754	/* Delete the existing key (cases 1-4). */
 755	ret = btrfs_del_item(trans, root, path);
 756	if (ret)
 757		goto out;
 758
 759	/* Add a key for leftovers at the beginning (cases 3 and 4). */
 760	if (start > found_start) {
 761		key.objectid = found_start;
 762		key.type = BTRFS_FREE_SPACE_EXTENT_KEY;
 763		key.offset = start - found_start;
 764
 765		btrfs_release_path(path);
 766		ret = btrfs_insert_empty_item(trans, root, path, &key, 0);
 767		if (ret)
 768			goto out;
 769		new_extents++;
 770	}
 771
 772	/* Add a key for leftovers at the end (cases 2 and 4). */
 773	if (end < found_end) {
 774		key.objectid = end;
 775		key.type = BTRFS_FREE_SPACE_EXTENT_KEY;
 776		key.offset = found_end - end;
 777
 778		btrfs_release_path(path);
 779		ret = btrfs_insert_empty_item(trans, root, path, &key, 0);
 780		if (ret)
 781			goto out;
 782		new_extents++;
 783	}
 784
 785	btrfs_release_path(path);
 786	ret = update_free_space_extent_count(trans, fs_info, block_group, path,
 787					     new_extents);
 788
 789out:
 790	return ret;
 791}
 792
 
 793int __remove_from_free_space_tree(struct btrfs_trans_handle *trans,
 794				  struct btrfs_fs_info *fs_info,
 795				  struct btrfs_block_group_cache *block_group,
 796				  struct btrfs_path *path, u64 start, u64 size)
 797{
 798	struct btrfs_free_space_info *info;
 799	u32 flags;
 800	int ret;
 801
 802	if (block_group->needs_free_space) {
 803		ret = __add_block_group_free_space(trans, fs_info, block_group,
 804						   path);
 805		if (ret)
 806			return ret;
 807	}
 808
 809	info = search_free_space_info(NULL, fs_info, block_group, path, 0);
 810	if (IS_ERR(info))
 811		return PTR_ERR(info);
 812	flags = btrfs_free_space_flags(path->nodes[0], info);
 813	btrfs_release_path(path);
 814
 815	if (flags & BTRFS_FREE_SPACE_USING_BITMAPS) {
 816		return modify_free_space_bitmap(trans, fs_info, block_group,
 817						path, start, size, 1);
 818	} else {
 819		return remove_free_space_extent(trans, fs_info, block_group,
 820						path, start, size);
 821	}
 822}
 823
 824int remove_from_free_space_tree(struct btrfs_trans_handle *trans,
 825				struct btrfs_fs_info *fs_info,
 826				u64 start, u64 size)
 827{
 828	struct btrfs_block_group_cache *block_group;
 829	struct btrfs_path *path;
 830	int ret;
 831
 832	if (!btrfs_fs_compat_ro(fs_info, FREE_SPACE_TREE))
 833		return 0;
 834
 835	path = btrfs_alloc_path();
 836	if (!path) {
 837		ret = -ENOMEM;
 838		goto out;
 839	}
 840
 841	block_group = btrfs_lookup_block_group(fs_info, start);
 842	if (!block_group) {
 843		ASSERT(0);
 844		ret = -ENOENT;
 845		goto out;
 846	}
 847
 848	mutex_lock(&block_group->free_space_lock);
 849	ret = __remove_from_free_space_tree(trans, fs_info, block_group, path,
 850					    start, size);
 851	mutex_unlock(&block_group->free_space_lock);
 852
 853	btrfs_put_block_group(block_group);
 854out:
 855	btrfs_free_path(path);
 856	if (ret)
 857		btrfs_abort_transaction(trans, ret);
 858	return ret;
 859}
 860
 861static int add_free_space_extent(struct btrfs_trans_handle *trans,
 862				 struct btrfs_fs_info *fs_info,
 863				 struct btrfs_block_group_cache *block_group,
 864				 struct btrfs_path *path,
 865				 u64 start, u64 size)
 866{
 867	struct btrfs_root *root = fs_info->free_space_root;
 868	struct btrfs_key key, new_key;
 869	u64 found_start, found_end;
 870	u64 end = start + size;
 871	int new_extents = 1;
 872	int ret;
 873
 874	/*
 875	 * We are adding a new extent of free space, but we need to merge
 876	 * extents. There are four cases here:
 877	 *
 878	 * 1. The new extent does not have any immediate neighbors to merge
 879	 * with: add the new key and increment the free space extent count. We
 880	 * may need to convert the block group to bitmaps as a result.
 881	 * 2. The new extent has an immediate neighbor before it: remove the
 882	 * previous key and insert a new key combining both of them. There is no
 883	 * net change in the number of extents.
 884	 * 3. The new extent has an immediate neighbor after it: remove the next
 885	 * key and insert a new key combining both of them. There is no net
 886	 * change in the number of extents.
 887	 * 4. The new extent has immediate neighbors on both sides: remove both
 888	 * of the keys and insert a new key combining all of them. Where we used
 889	 * to have two extents, we now have one, so decrement the extent count.
 890	 */
 891
 892	new_key.objectid = start;
 893	new_key.type = BTRFS_FREE_SPACE_EXTENT_KEY;
 894	new_key.offset = size;
 895
 896	/* Search for a neighbor on the left. */
 897	if (start == block_group->key.objectid)
 898		goto right;
 899	key.objectid = start - 1;
 900	key.type = (u8)-1;
 901	key.offset = (u64)-1;
 902
 903	ret = btrfs_search_prev_slot(trans, root, &key, path, -1, 1);
 904	if (ret)
 905		goto out;
 906
 907	btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0]);
 908
 909	if (key.type != BTRFS_FREE_SPACE_EXTENT_KEY) {
 910		ASSERT(key.type == BTRFS_FREE_SPACE_INFO_KEY);
 911		btrfs_release_path(path);
 912		goto right;
 913	}
 914
 915	found_start = key.objectid;
 916	found_end = key.objectid + key.offset;
 917	ASSERT(found_start >= block_group->key.objectid &&
 918	       found_end > block_group->key.objectid);
 919	ASSERT(found_start < start && found_end <= start);
 920
 921	/*
 922	 * Delete the neighbor on the left and absorb it into the new key (cases
 923	 * 2 and 4).
 924	 */
 925	if (found_end == start) {
 926		ret = btrfs_del_item(trans, root, path);
 927		if (ret)
 928			goto out;
 929		new_key.objectid = found_start;
 930		new_key.offset += key.offset;
 931		new_extents--;
 932	}
 933	btrfs_release_path(path);
 934
 935right:
 936	/* Search for a neighbor on the right. */
 937	if (end == block_group->key.objectid + block_group->key.offset)
 938		goto insert;
 939	key.objectid = end;
 940	key.type = (u8)-1;
 941	key.offset = (u64)-1;
 942
 943	ret = btrfs_search_prev_slot(trans, root, &key, path, -1, 1);
 944	if (ret)
 945		goto out;
 946
 947	btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0]);
 948
 949	if (key.type != BTRFS_FREE_SPACE_EXTENT_KEY) {
 950		ASSERT(key.type == BTRFS_FREE_SPACE_INFO_KEY);
 951		btrfs_release_path(path);
 952		goto insert;
 953	}
 954
 955	found_start = key.objectid;
 956	found_end = key.objectid + key.offset;
 957	ASSERT(found_start >= block_group->key.objectid &&
 958	       found_end > block_group->key.objectid);
 959	ASSERT((found_start < start && found_end <= start) ||
 960	       (found_start >= end && found_end > end));
 961
 962	/*
 963	 * Delete the neighbor on the right and absorb it into the new key
 964	 * (cases 3 and 4).
 965	 */
 966	if (found_start == end) {
 967		ret = btrfs_del_item(trans, root, path);
 968		if (ret)
 969			goto out;
 970		new_key.offset += key.offset;
 971		new_extents--;
 972	}
 973	btrfs_release_path(path);
 974
 975insert:
 976	/* Insert the new key (cases 1-4). */
 977	ret = btrfs_insert_empty_item(trans, root, path, &new_key, 0);
 978	if (ret)
 979		goto out;
 980
 981	btrfs_release_path(path);
 982	ret = update_free_space_extent_count(trans, fs_info, block_group, path,
 983					     new_extents);
 984
 985out:
 986	return ret;
 987}
 988
 
 989int __add_to_free_space_tree(struct btrfs_trans_handle *trans,
 990			     struct btrfs_fs_info *fs_info,
 991			     struct btrfs_block_group_cache *block_group,
 992			     struct btrfs_path *path, u64 start, u64 size)
 993{
 994	struct btrfs_free_space_info *info;
 995	u32 flags;
 996	int ret;
 997
 998	if (block_group->needs_free_space) {
 999		ret = __add_block_group_free_space(trans, fs_info, block_group,
1000						   path);
1001		if (ret)
1002			return ret;
1003	}
1004
1005	info = search_free_space_info(NULL, fs_info, block_group, path, 0);
1006	if (IS_ERR(info))
1007		return PTR_ERR(info);
1008	flags = btrfs_free_space_flags(path->nodes[0], info);
1009	btrfs_release_path(path);
1010
1011	if (flags & BTRFS_FREE_SPACE_USING_BITMAPS) {
1012		return modify_free_space_bitmap(trans, fs_info, block_group,
1013						path, start, size, 0);
1014	} else {
1015		return add_free_space_extent(trans, fs_info, block_group, path,
1016					     start, size);
1017	}
1018}
1019
1020int add_to_free_space_tree(struct btrfs_trans_handle *trans,
1021			   struct btrfs_fs_info *fs_info,
1022			   u64 start, u64 size)
1023{
1024	struct btrfs_block_group_cache *block_group;
1025	struct btrfs_path *path;
1026	int ret;
1027
1028	if (!btrfs_fs_compat_ro(fs_info, FREE_SPACE_TREE))
1029		return 0;
1030
1031	path = btrfs_alloc_path();
1032	if (!path) {
1033		ret = -ENOMEM;
1034		goto out;
1035	}
1036
1037	block_group = btrfs_lookup_block_group(fs_info, start);
1038	if (!block_group) {
1039		ASSERT(0);
1040		ret = -ENOENT;
1041		goto out;
1042	}
1043
1044	mutex_lock(&block_group->free_space_lock);
1045	ret = __add_to_free_space_tree(trans, fs_info, block_group, path, start,
1046				       size);
1047	mutex_unlock(&block_group->free_space_lock);
1048
1049	btrfs_put_block_group(block_group);
1050out:
1051	btrfs_free_path(path);
1052	if (ret)
1053		btrfs_abort_transaction(trans, ret);
1054	return ret;
1055}
1056
1057/*
1058 * Populate the free space tree by walking the extent tree. Operations on the
1059 * extent tree that happen as a result of writes to the free space tree will go
1060 * through the normal add/remove hooks.
1061 */
1062static int populate_free_space_tree(struct btrfs_trans_handle *trans,
1063				    struct btrfs_fs_info *fs_info,
1064				    struct btrfs_block_group_cache *block_group)
1065{
1066	struct btrfs_root *extent_root = fs_info->extent_root;
1067	struct btrfs_path *path, *path2;
1068	struct btrfs_key key;
1069	u64 start, end;
1070	int ret;
1071
1072	path = btrfs_alloc_path();
1073	if (!path)
1074		return -ENOMEM;
1075	path->reada = 1;
1076
1077	path2 = btrfs_alloc_path();
1078	if (!path2) {
1079		btrfs_free_path(path);
1080		return -ENOMEM;
1081	}
1082
1083	ret = add_new_free_space_info(trans, fs_info, block_group, path2);
1084	if (ret)
1085		goto out;
1086
1087	mutex_lock(&block_group->free_space_lock);
1088
1089	/*
1090	 * Iterate through all of the extent and metadata items in this block
1091	 * group, adding the free space between them and the free space at the
1092	 * end. Note that EXTENT_ITEM and METADATA_ITEM are less than
1093	 * BLOCK_GROUP_ITEM, so an extent may precede the block group that it's
1094	 * contained in.
1095	 */
1096	key.objectid = block_group->key.objectid;
1097	key.type = BTRFS_EXTENT_ITEM_KEY;
1098	key.offset = 0;
1099
1100	ret = btrfs_search_slot_for_read(extent_root, &key, path, 1, 0);
1101	if (ret < 0)
1102		goto out_locked;
1103	ASSERT(ret == 0);
1104
1105	start = block_group->key.objectid;
1106	end = block_group->key.objectid + block_group->key.offset;
1107	while (1) {
1108		btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0]);
1109
1110		if (key.type == BTRFS_EXTENT_ITEM_KEY ||
1111		    key.type == BTRFS_METADATA_ITEM_KEY) {
1112			if (key.objectid >= end)
1113				break;
1114
1115			if (start < key.objectid) {
1116				ret = __add_to_free_space_tree(trans, fs_info,
1117							       block_group,
1118							       path2, start,
1119							       key.objectid -
1120							       start);
1121				if (ret)
1122					goto out_locked;
1123			}
1124			start = key.objectid;
1125			if (key.type == BTRFS_METADATA_ITEM_KEY)
1126				start += fs_info->nodesize;
1127			else
1128				start += key.offset;
1129		} else if (key.type == BTRFS_BLOCK_GROUP_ITEM_KEY) {
1130			if (key.objectid != block_group->key.objectid)
1131				break;
1132		}
1133
1134		ret = btrfs_next_item(extent_root, path);
1135		if (ret < 0)
1136			goto out_locked;
1137		if (ret)
1138			break;
1139	}
1140	if (start < end) {
1141		ret = __add_to_free_space_tree(trans, fs_info, block_group,
1142					       path2, start, end - start);
1143		if (ret)
1144			goto out_locked;
1145	}
1146
1147	ret = 0;
1148out_locked:
1149	mutex_unlock(&block_group->free_space_lock);
1150out:
1151	btrfs_free_path(path2);
1152	btrfs_free_path(path);
1153	return ret;
1154}
1155
1156int btrfs_create_free_space_tree(struct btrfs_fs_info *fs_info)
1157{
1158	struct btrfs_trans_handle *trans;
1159	struct btrfs_root *tree_root = fs_info->tree_root;
1160	struct btrfs_root *free_space_root;
1161	struct btrfs_block_group_cache *block_group;
1162	struct rb_node *node;
1163	int ret;
1164
1165	trans = btrfs_start_transaction(tree_root, 0);
1166	if (IS_ERR(trans))
1167		return PTR_ERR(trans);
1168
1169	set_bit(BTRFS_FS_CREATING_FREE_SPACE_TREE, &fs_info->flags);
1170	free_space_root = btrfs_create_tree(trans, fs_info,
 
1171					    BTRFS_FREE_SPACE_TREE_OBJECTID);
1172	if (IS_ERR(free_space_root)) {
1173		ret = PTR_ERR(free_space_root);
1174		goto abort;
1175	}
1176	fs_info->free_space_root = free_space_root;
1177
1178	node = rb_first(&fs_info->block_group_cache_tree);
1179	while (node) {
1180		block_group = rb_entry(node, struct btrfs_block_group_cache,
1181				       cache_node);
1182		ret = populate_free_space_tree(trans, fs_info, block_group);
1183		if (ret)
1184			goto abort;
1185		node = rb_next(node);
1186	}
1187
1188	btrfs_set_fs_compat_ro(fs_info, FREE_SPACE_TREE);
1189	btrfs_set_fs_compat_ro(fs_info, FREE_SPACE_TREE_VALID);
1190	clear_bit(BTRFS_FS_CREATING_FREE_SPACE_TREE, &fs_info->flags);
1191
1192	ret = btrfs_commit_transaction(trans);
1193	if (ret)
1194		return ret;
1195
1196	return 0;
 
 
 
 
 
1197
1198abort:
1199	clear_bit(BTRFS_FS_CREATING_FREE_SPACE_TREE, &fs_info->flags);
 
1200	btrfs_abort_transaction(trans, ret);
1201	btrfs_end_transaction(trans);
1202	return ret;
1203}
1204
1205static int clear_free_space_tree(struct btrfs_trans_handle *trans,
1206				 struct btrfs_root *root)
1207{
1208	struct btrfs_path *path;
1209	struct btrfs_key key;
1210	int nr;
1211	int ret;
1212
1213	path = btrfs_alloc_path();
1214	if (!path)
1215		return -ENOMEM;
1216
1217	path->leave_spinning = 1;
1218
1219	key.objectid = 0;
1220	key.type = 0;
1221	key.offset = 0;
1222
1223	while (1) {
1224		ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
1225		if (ret < 0)
1226			goto out;
1227
1228		nr = btrfs_header_nritems(path->nodes[0]);
1229		if (!nr)
1230			break;
1231
1232		path->slots[0] = 0;
1233		ret = btrfs_del_items(trans, root, path, 0, nr);
1234		if (ret)
1235			goto out;
1236
1237		btrfs_release_path(path);
1238	}
1239
1240	ret = 0;
1241out:
1242	btrfs_free_path(path);
1243	return ret;
1244}
1245
1246int btrfs_clear_free_space_tree(struct btrfs_fs_info *fs_info)
1247{
1248	struct btrfs_trans_handle *trans;
1249	struct btrfs_root *tree_root = fs_info->tree_root;
1250	struct btrfs_root *free_space_root = fs_info->free_space_root;
1251	int ret;
1252
1253	trans = btrfs_start_transaction(tree_root, 0);
1254	if (IS_ERR(trans))
1255		return PTR_ERR(trans);
1256
1257	btrfs_clear_fs_compat_ro(fs_info, FREE_SPACE_TREE);
1258	btrfs_clear_fs_compat_ro(fs_info, FREE_SPACE_TREE_VALID);
1259	fs_info->free_space_root = NULL;
1260
1261	ret = clear_free_space_tree(trans, free_space_root);
1262	if (ret)
1263		goto abort;
1264
1265	ret = btrfs_del_root(trans, tree_root, &free_space_root->root_key);
1266	if (ret)
1267		goto abort;
1268
1269	list_del(&free_space_root->dirty_list);
1270
1271	btrfs_tree_lock(free_space_root->node);
1272	clean_tree_block(trans, fs_info, free_space_root->node);
1273	btrfs_tree_unlock(free_space_root->node);
1274	btrfs_free_tree_block(trans, free_space_root, free_space_root->node,
1275			      0, 1);
1276
1277	free_extent_buffer(free_space_root->node);
1278	free_extent_buffer(free_space_root->commit_root);
1279	kfree(free_space_root);
1280
1281	ret = btrfs_commit_transaction(trans);
1282	if (ret)
1283		return ret;
1284
1285	return 0;
1286
1287abort:
1288	btrfs_abort_transaction(trans, ret);
1289	btrfs_end_transaction(trans);
1290	return ret;
1291}
1292
1293static int __add_block_group_free_space(struct btrfs_trans_handle *trans,
1294					struct btrfs_fs_info *fs_info,
1295					struct btrfs_block_group_cache *block_group,
1296					struct btrfs_path *path)
1297{
1298	u64 start, end;
1299	int ret;
1300
1301	start = block_group->key.objectid;
1302	end = block_group->key.objectid + block_group->key.offset;
1303
1304	block_group->needs_free_space = 0;
1305
1306	ret = add_new_free_space_info(trans, fs_info, block_group, path);
1307	if (ret)
1308		return ret;
1309
1310	return __add_to_free_space_tree(trans, fs_info, block_group, path,
1311					block_group->key.objectid,
1312					block_group->key.offset);
1313}
1314
1315int add_block_group_free_space(struct btrfs_trans_handle *trans,
1316			       struct btrfs_fs_info *fs_info,
1317			       struct btrfs_block_group_cache *block_group)
1318{
 
1319	struct btrfs_path *path = NULL;
1320	int ret = 0;
1321
1322	if (!btrfs_fs_compat_ro(fs_info, FREE_SPACE_TREE))
1323		return 0;
1324
1325	mutex_lock(&block_group->free_space_lock);
1326	if (!block_group->needs_free_space)
1327		goto out;
1328
1329	path = btrfs_alloc_path();
1330	if (!path) {
1331		ret = -ENOMEM;
1332		goto out;
1333	}
1334
1335	ret = __add_block_group_free_space(trans, fs_info, block_group, path);
1336
1337out:
1338	btrfs_free_path(path);
1339	mutex_unlock(&block_group->free_space_lock);
1340	if (ret)
1341		btrfs_abort_transaction(trans, ret);
1342	return ret;
1343}
1344
1345int remove_block_group_free_space(struct btrfs_trans_handle *trans,
1346				  struct btrfs_fs_info *fs_info,
1347				  struct btrfs_block_group_cache *block_group)
1348{
1349	struct btrfs_root *root = fs_info->free_space_root;
1350	struct btrfs_path *path;
1351	struct btrfs_key key, found_key;
1352	struct extent_buffer *leaf;
1353	u64 start, end;
1354	int done = 0, nr;
1355	int ret;
1356
1357	if (!btrfs_fs_compat_ro(fs_info, FREE_SPACE_TREE))
1358		return 0;
1359
1360	if (block_group->needs_free_space) {
1361		/* We never added this block group to the free space tree. */
1362		return 0;
1363	}
1364
1365	path = btrfs_alloc_path();
1366	if (!path) {
1367		ret = -ENOMEM;
1368		goto out;
1369	}
1370
1371	start = block_group->key.objectid;
1372	end = block_group->key.objectid + block_group->key.offset;
1373
1374	key.objectid = end - 1;
1375	key.type = (u8)-1;
1376	key.offset = (u64)-1;
1377
1378	while (!done) {
1379		ret = btrfs_search_prev_slot(trans, root, &key, path, -1, 1);
1380		if (ret)
1381			goto out;
1382
1383		leaf = path->nodes[0];
1384		nr = 0;
1385		path->slots[0]++;
1386		while (path->slots[0] > 0) {
1387			btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0] - 1);
1388
1389			if (found_key.type == BTRFS_FREE_SPACE_INFO_KEY) {
1390				ASSERT(found_key.objectid == block_group->key.objectid);
1391				ASSERT(found_key.offset == block_group->key.offset);
1392				done = 1;
1393				nr++;
1394				path->slots[0]--;
1395				break;
1396			} else if (found_key.type == BTRFS_FREE_SPACE_EXTENT_KEY ||
1397				   found_key.type == BTRFS_FREE_SPACE_BITMAP_KEY) {
1398				ASSERT(found_key.objectid >= start);
1399				ASSERT(found_key.objectid < end);
1400				ASSERT(found_key.objectid + found_key.offset <= end);
1401				nr++;
1402				path->slots[0]--;
1403			} else {
1404				ASSERT(0);
1405			}
1406		}
1407
1408		ret = btrfs_del_items(trans, root, path, path->slots[0], nr);
1409		if (ret)
1410			goto out;
1411		btrfs_release_path(path);
1412	}
1413
1414	ret = 0;
1415out:
1416	btrfs_free_path(path);
1417	if (ret)
1418		btrfs_abort_transaction(trans, ret);
1419	return ret;
1420}
1421
1422static int load_free_space_bitmaps(struct btrfs_caching_control *caching_ctl,
1423				   struct btrfs_path *path,
1424				   u32 expected_extent_count)
1425{
1426	struct btrfs_block_group_cache *block_group;
1427	struct btrfs_fs_info *fs_info;
1428	struct btrfs_root *root;
1429	struct btrfs_key key;
1430	int prev_bit = 0, bit;
1431	/* Initialize to silence GCC. */
1432	u64 extent_start = 0;
1433	u64 end, offset;
1434	u64 total_found = 0;
1435	u32 extent_count = 0;
1436	int ret;
1437
1438	block_group = caching_ctl->block_group;
1439	fs_info = block_group->fs_info;
1440	root = fs_info->free_space_root;
1441
1442	end = block_group->key.objectid + block_group->key.offset;
1443
1444	while (1) {
1445		ret = btrfs_next_item(root, path);
1446		if (ret < 0)
1447			goto out;
1448		if (ret)
1449			break;
1450
1451		btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0]);
1452
1453		if (key.type == BTRFS_FREE_SPACE_INFO_KEY)
1454			break;
1455
1456		ASSERT(key.type == BTRFS_FREE_SPACE_BITMAP_KEY);
1457		ASSERT(key.objectid < end && key.objectid + key.offset <= end);
1458
1459		caching_ctl->progress = key.objectid;
1460
1461		offset = key.objectid;
1462		while (offset < key.objectid + key.offset) {
1463			bit = free_space_test_bit(block_group, path, offset);
1464			if (prev_bit == 0 && bit == 1) {
1465				extent_start = offset;
1466			} else if (prev_bit == 1 && bit == 0) {
1467				total_found += add_new_free_space(block_group,
1468								  fs_info,
1469								  extent_start,
1470								  offset);
1471				if (total_found > CACHING_CTL_WAKE_UP) {
1472					total_found = 0;
1473					wake_up(&caching_ctl->wait);
1474				}
1475				extent_count++;
1476			}
1477			prev_bit = bit;
1478			offset += fs_info->sectorsize;
1479		}
1480	}
1481	if (prev_bit == 1) {
1482		total_found += add_new_free_space(block_group, fs_info,
1483						  extent_start, end);
1484		extent_count++;
1485	}
1486
1487	if (extent_count != expected_extent_count) {
1488		btrfs_err(fs_info,
1489			  "incorrect extent count for %llu; counted %u, expected %u",
1490			  block_group->key.objectid, extent_count,
1491			  expected_extent_count);
1492		ASSERT(0);
1493		ret = -EIO;
1494		goto out;
1495	}
1496
1497	caching_ctl->progress = (u64)-1;
1498
1499	ret = 0;
1500out:
1501	return ret;
1502}
1503
1504static int load_free_space_extents(struct btrfs_caching_control *caching_ctl,
1505				   struct btrfs_path *path,
1506				   u32 expected_extent_count)
1507{
1508	struct btrfs_block_group_cache *block_group;
1509	struct btrfs_fs_info *fs_info;
1510	struct btrfs_root *root;
1511	struct btrfs_key key;
1512	u64 end;
1513	u64 total_found = 0;
1514	u32 extent_count = 0;
1515	int ret;
1516
1517	block_group = caching_ctl->block_group;
1518	fs_info = block_group->fs_info;
1519	root = fs_info->free_space_root;
1520
1521	end = block_group->key.objectid + block_group->key.offset;
1522
1523	while (1) {
1524		ret = btrfs_next_item(root, path);
1525		if (ret < 0)
1526			goto out;
1527		if (ret)
1528			break;
1529
1530		btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0]);
1531
1532		if (key.type == BTRFS_FREE_SPACE_INFO_KEY)
1533			break;
1534
1535		ASSERT(key.type == BTRFS_FREE_SPACE_EXTENT_KEY);
1536		ASSERT(key.objectid < end && key.objectid + key.offset <= end);
1537
1538		caching_ctl->progress = key.objectid;
1539
1540		total_found += add_new_free_space(block_group, fs_info,
1541						  key.objectid,
1542						  key.objectid + key.offset);
1543		if (total_found > CACHING_CTL_WAKE_UP) {
1544			total_found = 0;
1545			wake_up(&caching_ctl->wait);
1546		}
1547		extent_count++;
1548	}
1549
1550	if (extent_count != expected_extent_count) {
1551		btrfs_err(fs_info,
1552			  "incorrect extent count for %llu; counted %u, expected %u",
1553			  block_group->key.objectid, extent_count,
1554			  expected_extent_count);
1555		ASSERT(0);
1556		ret = -EIO;
1557		goto out;
1558	}
1559
1560	caching_ctl->progress = (u64)-1;
1561
1562	ret = 0;
1563out:
1564	return ret;
1565}
1566
1567int load_free_space_tree(struct btrfs_caching_control *caching_ctl)
1568{
1569	struct btrfs_block_group_cache *block_group;
1570	struct btrfs_fs_info *fs_info;
1571	struct btrfs_free_space_info *info;
1572	struct btrfs_path *path;
1573	u32 extent_count, flags;
1574	int ret;
1575
1576	block_group = caching_ctl->block_group;
1577	fs_info = block_group->fs_info;
1578
1579	path = btrfs_alloc_path();
1580	if (!path)
1581		return -ENOMEM;
1582
1583	/*
1584	 * Just like caching_thread() doesn't want to deadlock on the extent
1585	 * tree, we don't want to deadlock on the free space tree.
1586	 */
1587	path->skip_locking = 1;
1588	path->search_commit_root = 1;
1589	path->reada = 1;
1590
1591	info = search_free_space_info(NULL, fs_info, block_group, path, 0);
1592	if (IS_ERR(info)) {
1593		ret = PTR_ERR(info);
1594		goto out;
1595	}
1596	extent_count = btrfs_free_space_extent_count(path->nodes[0], info);
1597	flags = btrfs_free_space_flags(path->nodes[0], info);
1598
1599	/*
1600	 * We left path pointing to the free space info item, so now
1601	 * load_free_space_foo can just iterate through the free space tree from
1602	 * there.
1603	 */
1604	if (flags & BTRFS_FREE_SPACE_USING_BITMAPS)
1605		ret = load_free_space_bitmaps(caching_ctl, path, extent_count);
1606	else
1607		ret = load_free_space_extents(caching_ctl, path, extent_count);
1608
1609out:
1610	btrfs_free_path(path);
1611	return ret;
1612}