Linux Audio

Check our new training course

Loading...
   1/*
   2 * Copyright (C) 2008 Red Hat.  All rights reserved.
   3 *
   4 * This program is free software; you can redistribute it and/or
   5 * modify it under the terms of the GNU General Public
   6 * License v2 as published by the Free Software Foundation.
   7 *
   8 * This program is distributed in the hope that it will be useful,
   9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
  10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
  11 * General Public License for more details.
  12 *
  13 * You should have received a copy of the GNU General Public
  14 * License along with this program; if not, write to the
  15 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
  16 * Boston, MA 021110-1307, USA.
  17 */
  18
  19#include <linux/pagemap.h>
  20#include <linux/sched.h>
  21#include <linux/slab.h>
  22#include <linux/math64.h>
  23#include <linux/ratelimit.h>
  24#include "ctree.h"
  25#include "free-space-cache.h"
  26#include "transaction.h"
  27#include "disk-io.h"
  28#include "extent_io.h"
  29#include "inode-map.h"
  30
  31#define BITS_PER_BITMAP		(PAGE_CACHE_SIZE * 8)
  32#define MAX_CACHE_BYTES_PER_GIG	(32 * 1024)
  33
  34static int link_free_space(struct btrfs_free_space_ctl *ctl,
  35			   struct btrfs_free_space *info);
  36static void unlink_free_space(struct btrfs_free_space_ctl *ctl,
  37			      struct btrfs_free_space *info);
  38
  39static struct inode *__lookup_free_space_inode(struct btrfs_root *root,
  40					       struct btrfs_path *path,
  41					       u64 offset)
  42{
  43	struct btrfs_key key;
  44	struct btrfs_key location;
  45	struct btrfs_disk_key disk_key;
  46	struct btrfs_free_space_header *header;
  47	struct extent_buffer *leaf;
  48	struct inode *inode = NULL;
  49	int ret;
  50
  51	key.objectid = BTRFS_FREE_SPACE_OBJECTID;
  52	key.offset = offset;
  53	key.type = 0;
  54
  55	ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
  56	if (ret < 0)
  57		return ERR_PTR(ret);
  58	if (ret > 0) {
  59		btrfs_release_path(path);
  60		return ERR_PTR(-ENOENT);
  61	}
  62
  63	leaf = path->nodes[0];
  64	header = btrfs_item_ptr(leaf, path->slots[0],
  65				struct btrfs_free_space_header);
  66	btrfs_free_space_key(leaf, header, &disk_key);
  67	btrfs_disk_key_to_cpu(&location, &disk_key);
  68	btrfs_release_path(path);
  69
  70	inode = btrfs_iget(root->fs_info->sb, &location, root, NULL);
  71	if (!inode)
  72		return ERR_PTR(-ENOENT);
  73	if (IS_ERR(inode))
  74		return inode;
  75	if (is_bad_inode(inode)) {
  76		iput(inode);
  77		return ERR_PTR(-ENOENT);
  78	}
  79
  80	mapping_set_gfp_mask(inode->i_mapping,
  81			mapping_gfp_mask(inode->i_mapping) & ~__GFP_FS);
  82
  83	return inode;
  84}
  85
  86struct inode *lookup_free_space_inode(struct btrfs_root *root,
  87				      struct btrfs_block_group_cache
  88				      *block_group, struct btrfs_path *path)
  89{
  90	struct inode *inode = NULL;
  91	u32 flags = BTRFS_INODE_NODATASUM | BTRFS_INODE_NODATACOW;
  92
  93	spin_lock(&block_group->lock);
  94	if (block_group->inode)
  95		inode = igrab(block_group->inode);
  96	spin_unlock(&block_group->lock);
  97	if (inode)
  98		return inode;
  99
 100	inode = __lookup_free_space_inode(root, path,
 101					  block_group->key.objectid);
 102	if (IS_ERR(inode))
 103		return inode;
 104
 105	spin_lock(&block_group->lock);
 106	if (!((BTRFS_I(inode)->flags & flags) == flags)) {
 107		printk(KERN_INFO "Old style space inode found, converting.\n");
 108		BTRFS_I(inode)->flags |= BTRFS_INODE_NODATASUM |
 109			BTRFS_INODE_NODATACOW;
 110		block_group->disk_cache_state = BTRFS_DC_CLEAR;
 111	}
 112
 113	if (!block_group->iref) {
 114		block_group->inode = igrab(inode);
 115		block_group->iref = 1;
 116	}
 117	spin_unlock(&block_group->lock);
 118
 119	return inode;
 120}
 121
 122int __create_free_space_inode(struct btrfs_root *root,
 123			      struct btrfs_trans_handle *trans,
 124			      struct btrfs_path *path, u64 ino, u64 offset)
 125{
 126	struct btrfs_key key;
 127	struct btrfs_disk_key disk_key;
 128	struct btrfs_free_space_header *header;
 129	struct btrfs_inode_item *inode_item;
 130	struct extent_buffer *leaf;
 131	u64 flags = BTRFS_INODE_NOCOMPRESS | BTRFS_INODE_PREALLOC;
 132	int ret;
 133
 134	ret = btrfs_insert_empty_inode(trans, root, path, ino);
 135	if (ret)
 136		return ret;
 137
 138	/* We inline crc's for the free disk space cache */
 139	if (ino != BTRFS_FREE_INO_OBJECTID)
 140		flags |= BTRFS_INODE_NODATASUM | BTRFS_INODE_NODATACOW;
 141
 142	leaf = path->nodes[0];
 143	inode_item = btrfs_item_ptr(leaf, path->slots[0],
 144				    struct btrfs_inode_item);
 145	btrfs_item_key(leaf, &disk_key, path->slots[0]);
 146	memset_extent_buffer(leaf, 0, (unsigned long)inode_item,
 147			     sizeof(*inode_item));
 148	btrfs_set_inode_generation(leaf, inode_item, trans->transid);
 149	btrfs_set_inode_size(leaf, inode_item, 0);
 150	btrfs_set_inode_nbytes(leaf, inode_item, 0);
 151	btrfs_set_inode_uid(leaf, inode_item, 0);
 152	btrfs_set_inode_gid(leaf, inode_item, 0);
 153	btrfs_set_inode_mode(leaf, inode_item, S_IFREG | 0600);
 154	btrfs_set_inode_flags(leaf, inode_item, flags);
 155	btrfs_set_inode_nlink(leaf, inode_item, 1);
 156	btrfs_set_inode_transid(leaf, inode_item, trans->transid);
 157	btrfs_set_inode_block_group(leaf, inode_item, offset);
 158	btrfs_mark_buffer_dirty(leaf);
 159	btrfs_release_path(path);
 160
 161	key.objectid = BTRFS_FREE_SPACE_OBJECTID;
 162	key.offset = offset;
 163	key.type = 0;
 164
 165	ret = btrfs_insert_empty_item(trans, root, path, &key,
 166				      sizeof(struct btrfs_free_space_header));
 167	if (ret < 0) {
 168		btrfs_release_path(path);
 169		return ret;
 170	}
 171	leaf = path->nodes[0];
 172	header = btrfs_item_ptr(leaf, path->slots[0],
 173				struct btrfs_free_space_header);
 174	memset_extent_buffer(leaf, 0, (unsigned long)header, sizeof(*header));
 175	btrfs_set_free_space_key(leaf, header, &disk_key);
 176	btrfs_mark_buffer_dirty(leaf);
 177	btrfs_release_path(path);
 178
 179	return 0;
 180}
 181
 182int create_free_space_inode(struct btrfs_root *root,
 183			    struct btrfs_trans_handle *trans,
 184			    struct btrfs_block_group_cache *block_group,
 185			    struct btrfs_path *path)
 186{
 187	int ret;
 188	u64 ino;
 189
 190	ret = btrfs_find_free_objectid(root, &ino);
 191	if (ret < 0)
 192		return ret;
 193
 194	return __create_free_space_inode(root, trans, path, ino,
 195					 block_group->key.objectid);
 196}
 197
 198int btrfs_truncate_free_space_cache(struct btrfs_root *root,
 199				    struct btrfs_trans_handle *trans,
 200				    struct btrfs_path *path,
 201				    struct inode *inode)
 202{
 203	struct btrfs_block_rsv *rsv;
 204	u64 needed_bytes;
 205	loff_t oldsize;
 206	int ret = 0;
 207
 208	rsv = trans->block_rsv;
 209	trans->block_rsv = &root->fs_info->global_block_rsv;
 210
 211	/* 1 for slack space, 1 for updating the inode */
 212	needed_bytes = btrfs_calc_trunc_metadata_size(root, 1) +
 213		btrfs_calc_trans_metadata_size(root, 1);
 214
 215	spin_lock(&trans->block_rsv->lock);
 216	if (trans->block_rsv->reserved < needed_bytes) {
 217		spin_unlock(&trans->block_rsv->lock);
 218		trans->block_rsv = rsv;
 219		return -ENOSPC;
 220	}
 221	spin_unlock(&trans->block_rsv->lock);
 222
 223	oldsize = i_size_read(inode);
 224	btrfs_i_size_write(inode, 0);
 225	truncate_pagecache(inode, oldsize, 0);
 226
 227	/*
 228	 * We don't need an orphan item because truncating the free space cache
 229	 * will never be split across transactions.
 230	 */
 231	ret = btrfs_truncate_inode_items(trans, root, inode,
 232					 0, BTRFS_EXTENT_DATA_KEY);
 233
 234	if (ret) {
 235		trans->block_rsv = rsv;
 236		btrfs_abort_transaction(trans, root, ret);
 237		return ret;
 238	}
 239
 240	ret = btrfs_update_inode(trans, root, inode);
 241	if (ret)
 242		btrfs_abort_transaction(trans, root, ret);
 243	trans->block_rsv = rsv;
 244
 245	return ret;
 246}
 247
 248static int readahead_cache(struct inode *inode)
 249{
 250	struct file_ra_state *ra;
 251	unsigned long last_index;
 252
 253	ra = kzalloc(sizeof(*ra), GFP_NOFS);
 254	if (!ra)
 255		return -ENOMEM;
 256
 257	file_ra_state_init(ra, inode->i_mapping);
 258	last_index = (i_size_read(inode) - 1) >> PAGE_CACHE_SHIFT;
 259
 260	page_cache_sync_readahead(inode->i_mapping, ra, NULL, 0, last_index);
 261
 262	kfree(ra);
 263
 264	return 0;
 265}
 266
 267struct io_ctl {
 268	void *cur, *orig;
 269	struct page *page;
 270	struct page **pages;
 271	struct btrfs_root *root;
 272	unsigned long size;
 273	int index;
 274	int num_pages;
 275	unsigned check_crcs:1;
 276};
 277
 278static int io_ctl_init(struct io_ctl *io_ctl, struct inode *inode,
 279		       struct btrfs_root *root)
 280{
 281	memset(io_ctl, 0, sizeof(struct io_ctl));
 282	io_ctl->num_pages = (i_size_read(inode) + PAGE_CACHE_SIZE - 1) >>
 283		PAGE_CACHE_SHIFT;
 284	io_ctl->pages = kzalloc(sizeof(struct page *) * io_ctl->num_pages,
 285				GFP_NOFS);
 286	if (!io_ctl->pages)
 287		return -ENOMEM;
 288	io_ctl->root = root;
 289	if (btrfs_ino(inode) != BTRFS_FREE_INO_OBJECTID)
 290		io_ctl->check_crcs = 1;
 291	return 0;
 292}
 293
 294static void io_ctl_free(struct io_ctl *io_ctl)
 295{
 296	kfree(io_ctl->pages);
 297}
 298
 299static void io_ctl_unmap_page(struct io_ctl *io_ctl)
 300{
 301	if (io_ctl->cur) {
 302		kunmap(io_ctl->page);
 303		io_ctl->cur = NULL;
 304		io_ctl->orig = NULL;
 305	}
 306}
 307
 308static void io_ctl_map_page(struct io_ctl *io_ctl, int clear)
 309{
 310	WARN_ON(io_ctl->cur);
 311	BUG_ON(io_ctl->index >= io_ctl->num_pages);
 312	io_ctl->page = io_ctl->pages[io_ctl->index++];
 313	io_ctl->cur = kmap(io_ctl->page);
 314	io_ctl->orig = io_ctl->cur;
 315	io_ctl->size = PAGE_CACHE_SIZE;
 316	if (clear)
 317		memset(io_ctl->cur, 0, PAGE_CACHE_SIZE);
 318}
 319
 320static void io_ctl_drop_pages(struct io_ctl *io_ctl)
 321{
 322	int i;
 323
 324	io_ctl_unmap_page(io_ctl);
 325
 326	for (i = 0; i < io_ctl->num_pages; i++) {
 327		if (io_ctl->pages[i]) {
 328			ClearPageChecked(io_ctl->pages[i]);
 329			unlock_page(io_ctl->pages[i]);
 330			page_cache_release(io_ctl->pages[i]);
 331		}
 332	}
 333}
 334
 335static int io_ctl_prepare_pages(struct io_ctl *io_ctl, struct inode *inode,
 336				int uptodate)
 337{
 338	struct page *page;
 339	gfp_t mask = btrfs_alloc_write_mask(inode->i_mapping);
 340	int i;
 341
 342	for (i = 0; i < io_ctl->num_pages; i++) {
 343		page = find_or_create_page(inode->i_mapping, i, mask);
 344		if (!page) {
 345			io_ctl_drop_pages(io_ctl);
 346			return -ENOMEM;
 347		}
 348		io_ctl->pages[i] = page;
 349		if (uptodate && !PageUptodate(page)) {
 350			btrfs_readpage(NULL, page);
 351			lock_page(page);
 352			if (!PageUptodate(page)) {
 353				printk(KERN_ERR "btrfs: error reading free "
 354				       "space cache\n");
 355				io_ctl_drop_pages(io_ctl);
 356				return -EIO;
 357			}
 358		}
 359	}
 360
 361	for (i = 0; i < io_ctl->num_pages; i++) {
 362		clear_page_dirty_for_io(io_ctl->pages[i]);
 363		set_page_extent_mapped(io_ctl->pages[i]);
 364	}
 365
 366	return 0;
 367}
 368
 369static void io_ctl_set_generation(struct io_ctl *io_ctl, u64 generation)
 370{
 371	__le64 *val;
 372
 373	io_ctl_map_page(io_ctl, 1);
 374
 375	/*
 376	 * Skip the csum areas.  If we don't check crcs then we just have a
 377	 * 64bit chunk at the front of the first page.
 378	 */
 379	if (io_ctl->check_crcs) {
 380		io_ctl->cur += (sizeof(u32) * io_ctl->num_pages);
 381		io_ctl->size -= sizeof(u64) + (sizeof(u32) * io_ctl->num_pages);
 382	} else {
 383		io_ctl->cur += sizeof(u64);
 384		io_ctl->size -= sizeof(u64) * 2;
 385	}
 386
 387	val = io_ctl->cur;
 388	*val = cpu_to_le64(generation);
 389	io_ctl->cur += sizeof(u64);
 390}
 391
 392static int io_ctl_check_generation(struct io_ctl *io_ctl, u64 generation)
 393{
 394	__le64 *gen;
 395
 396	/*
 397	 * Skip the crc area.  If we don't check crcs then we just have a 64bit
 398	 * chunk at the front of the first page.
 399	 */
 400	if (io_ctl->check_crcs) {
 401		io_ctl->cur += sizeof(u32) * io_ctl->num_pages;
 402		io_ctl->size -= sizeof(u64) +
 403			(sizeof(u32) * io_ctl->num_pages);
 404	} else {
 405		io_ctl->cur += sizeof(u64);
 406		io_ctl->size -= sizeof(u64) * 2;
 407	}
 408
 409	gen = io_ctl->cur;
 410	if (le64_to_cpu(*gen) != generation) {
 411		printk_ratelimited(KERN_ERR "btrfs: space cache generation "
 412				   "(%Lu) does not match inode (%Lu)\n", *gen,
 413				   generation);
 414		io_ctl_unmap_page(io_ctl);
 415		return -EIO;
 416	}
 417	io_ctl->cur += sizeof(u64);
 418	return 0;
 419}
 420
 421static void io_ctl_set_crc(struct io_ctl *io_ctl, int index)
 422{
 423	u32 *tmp;
 424	u32 crc = ~(u32)0;
 425	unsigned offset = 0;
 426
 427	if (!io_ctl->check_crcs) {
 428		io_ctl_unmap_page(io_ctl);
 429		return;
 430	}
 431
 432	if (index == 0)
 433		offset = sizeof(u32) * io_ctl->num_pages;
 434
 435	crc = btrfs_csum_data(io_ctl->root, io_ctl->orig + offset, crc,
 436			      PAGE_CACHE_SIZE - offset);
 437	btrfs_csum_final(crc, (char *)&crc);
 438	io_ctl_unmap_page(io_ctl);
 439	tmp = kmap(io_ctl->pages[0]);
 440	tmp += index;
 441	*tmp = crc;
 442	kunmap(io_ctl->pages[0]);
 443}
 444
 445static int io_ctl_check_crc(struct io_ctl *io_ctl, int index)
 446{
 447	u32 *tmp, val;
 448	u32 crc = ~(u32)0;
 449	unsigned offset = 0;
 450
 451	if (!io_ctl->check_crcs) {
 452		io_ctl_map_page(io_ctl, 0);
 453		return 0;
 454	}
 455
 456	if (index == 0)
 457		offset = sizeof(u32) * io_ctl->num_pages;
 458
 459	tmp = kmap(io_ctl->pages[0]);
 460	tmp += index;
 461	val = *tmp;
 462	kunmap(io_ctl->pages[0]);
 463
 464	io_ctl_map_page(io_ctl, 0);
 465	crc = btrfs_csum_data(io_ctl->root, io_ctl->orig + offset, crc,
 466			      PAGE_CACHE_SIZE - offset);
 467	btrfs_csum_final(crc, (char *)&crc);
 468	if (val != crc) {
 469		printk_ratelimited(KERN_ERR "btrfs: csum mismatch on free "
 470				   "space cache\n");
 471		io_ctl_unmap_page(io_ctl);
 472		return -EIO;
 473	}
 474
 475	return 0;
 476}
 477
 478static int io_ctl_add_entry(struct io_ctl *io_ctl, u64 offset, u64 bytes,
 479			    void *bitmap)
 480{
 481	struct btrfs_free_space_entry *entry;
 482
 483	if (!io_ctl->cur)
 484		return -ENOSPC;
 485
 486	entry = io_ctl->cur;
 487	entry->offset = cpu_to_le64(offset);
 488	entry->bytes = cpu_to_le64(bytes);
 489	entry->type = (bitmap) ? BTRFS_FREE_SPACE_BITMAP :
 490		BTRFS_FREE_SPACE_EXTENT;
 491	io_ctl->cur += sizeof(struct btrfs_free_space_entry);
 492	io_ctl->size -= sizeof(struct btrfs_free_space_entry);
 493
 494	if (io_ctl->size >= sizeof(struct btrfs_free_space_entry))
 495		return 0;
 496
 497	io_ctl_set_crc(io_ctl, io_ctl->index - 1);
 498
 499	/* No more pages to map */
 500	if (io_ctl->index >= io_ctl->num_pages)
 501		return 0;
 502
 503	/* map the next page */
 504	io_ctl_map_page(io_ctl, 1);
 505	return 0;
 506}
 507
 508static int io_ctl_add_bitmap(struct io_ctl *io_ctl, void *bitmap)
 509{
 510	if (!io_ctl->cur)
 511		return -ENOSPC;
 512
 513	/*
 514	 * If we aren't at the start of the current page, unmap this one and
 515	 * map the next one if there is any left.
 516	 */
 517	if (io_ctl->cur != io_ctl->orig) {
 518		io_ctl_set_crc(io_ctl, io_ctl->index - 1);
 519		if (io_ctl->index >= io_ctl->num_pages)
 520			return -ENOSPC;
 521		io_ctl_map_page(io_ctl, 0);
 522	}
 523
 524	memcpy(io_ctl->cur, bitmap, PAGE_CACHE_SIZE);
 525	io_ctl_set_crc(io_ctl, io_ctl->index - 1);
 526	if (io_ctl->index < io_ctl->num_pages)
 527		io_ctl_map_page(io_ctl, 0);
 528	return 0;
 529}
 530
 531static void io_ctl_zero_remaining_pages(struct io_ctl *io_ctl)
 532{
 533	/*
 534	 * If we're not on the boundary we know we've modified the page and we
 535	 * need to crc the page.
 536	 */
 537	if (io_ctl->cur != io_ctl->orig)
 538		io_ctl_set_crc(io_ctl, io_ctl->index - 1);
 539	else
 540		io_ctl_unmap_page(io_ctl);
 541
 542	while (io_ctl->index < io_ctl->num_pages) {
 543		io_ctl_map_page(io_ctl, 1);
 544		io_ctl_set_crc(io_ctl, io_ctl->index - 1);
 545	}
 546}
 547
 548static int io_ctl_read_entry(struct io_ctl *io_ctl,
 549			    struct btrfs_free_space *entry, u8 *type)
 550{
 551	struct btrfs_free_space_entry *e;
 552	int ret;
 553
 554	if (!io_ctl->cur) {
 555		ret = io_ctl_check_crc(io_ctl, io_ctl->index);
 556		if (ret)
 557			return ret;
 558	}
 559
 560	e = io_ctl->cur;
 561	entry->offset = le64_to_cpu(e->offset);
 562	entry->bytes = le64_to_cpu(e->bytes);
 563	*type = e->type;
 564	io_ctl->cur += sizeof(struct btrfs_free_space_entry);
 565	io_ctl->size -= sizeof(struct btrfs_free_space_entry);
 566
 567	if (io_ctl->size >= sizeof(struct btrfs_free_space_entry))
 568		return 0;
 569
 570	io_ctl_unmap_page(io_ctl);
 571
 572	return 0;
 573}
 574
 575static int io_ctl_read_bitmap(struct io_ctl *io_ctl,
 576			      struct btrfs_free_space *entry)
 577{
 578	int ret;
 579
 580	ret = io_ctl_check_crc(io_ctl, io_ctl->index);
 581	if (ret)
 582		return ret;
 583
 584	memcpy(entry->bitmap, io_ctl->cur, PAGE_CACHE_SIZE);
 585	io_ctl_unmap_page(io_ctl);
 586
 587	return 0;
 588}
 589
 590/*
 591 * Since we attach pinned extents after the fact we can have contiguous sections
 592 * of free space that are split up in entries.  This poses a problem with the
 593 * tree logging stuff since it could have allocated across what appears to be 2
 594 * entries since we would have merged the entries when adding the pinned extents
 595 * back to the free space cache.  So run through the space cache that we just
 596 * loaded and merge contiguous entries.  This will make the log replay stuff not
 597 * blow up and it will make for nicer allocator behavior.
 598 */
 599static void merge_space_tree(struct btrfs_free_space_ctl *ctl)
 600{
 601	struct btrfs_free_space *e, *prev = NULL;
 602	struct rb_node *n;
 603
 604again:
 605	spin_lock(&ctl->tree_lock);
 606	for (n = rb_first(&ctl->free_space_offset); n; n = rb_next(n)) {
 607		e = rb_entry(n, struct btrfs_free_space, offset_index);
 608		if (!prev)
 609			goto next;
 610		if (e->bitmap || prev->bitmap)
 611			goto next;
 612		if (prev->offset + prev->bytes == e->offset) {
 613			unlink_free_space(ctl, prev);
 614			unlink_free_space(ctl, e);
 615			prev->bytes += e->bytes;
 616			kmem_cache_free(btrfs_free_space_cachep, e);
 617			link_free_space(ctl, prev);
 618			prev = NULL;
 619			spin_unlock(&ctl->tree_lock);
 620			goto again;
 621		}
 622next:
 623		prev = e;
 624	}
 625	spin_unlock(&ctl->tree_lock);
 626}
 627
 628int __load_free_space_cache(struct btrfs_root *root, struct inode *inode,
 629			    struct btrfs_free_space_ctl *ctl,
 630			    struct btrfs_path *path, u64 offset)
 631{
 632	struct btrfs_free_space_header *header;
 633	struct extent_buffer *leaf;
 634	struct io_ctl io_ctl;
 635	struct btrfs_key key;
 636	struct btrfs_free_space *e, *n;
 637	struct list_head bitmaps;
 638	u64 num_entries;
 639	u64 num_bitmaps;
 640	u64 generation;
 641	u8 type;
 642	int ret = 0;
 643
 644	INIT_LIST_HEAD(&bitmaps);
 645
 646	/* Nothing in the space cache, goodbye */
 647	if (!i_size_read(inode))
 648		return 0;
 649
 650	key.objectid = BTRFS_FREE_SPACE_OBJECTID;
 651	key.offset = offset;
 652	key.type = 0;
 653
 654	ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
 655	if (ret < 0)
 656		return 0;
 657	else if (ret > 0) {
 658		btrfs_release_path(path);
 659		return 0;
 660	}
 661
 662	ret = -1;
 663
 664	leaf = path->nodes[0];
 665	header = btrfs_item_ptr(leaf, path->slots[0],
 666				struct btrfs_free_space_header);
 667	num_entries = btrfs_free_space_entries(leaf, header);
 668	num_bitmaps = btrfs_free_space_bitmaps(leaf, header);
 669	generation = btrfs_free_space_generation(leaf, header);
 670	btrfs_release_path(path);
 671
 672	if (BTRFS_I(inode)->generation != generation) {
 673		printk(KERN_ERR "btrfs: free space inode generation (%llu) did"
 674		       " not match free space cache generation (%llu)\n",
 675		       (unsigned long long)BTRFS_I(inode)->generation,
 676		       (unsigned long long)generation);
 677		return 0;
 678	}
 679
 680	if (!num_entries)
 681		return 0;
 682
 683	ret = io_ctl_init(&io_ctl, inode, root);
 684	if (ret)
 685		return ret;
 686
 687	ret = readahead_cache(inode);
 688	if (ret)
 689		goto out;
 690
 691	ret = io_ctl_prepare_pages(&io_ctl, inode, 1);
 692	if (ret)
 693		goto out;
 694
 695	ret = io_ctl_check_crc(&io_ctl, 0);
 696	if (ret)
 697		goto free_cache;
 698
 699	ret = io_ctl_check_generation(&io_ctl, generation);
 700	if (ret)
 701		goto free_cache;
 702
 703	while (num_entries) {
 704		e = kmem_cache_zalloc(btrfs_free_space_cachep,
 705				      GFP_NOFS);
 706		if (!e)
 707			goto free_cache;
 708
 709		ret = io_ctl_read_entry(&io_ctl, e, &type);
 710		if (ret) {
 711			kmem_cache_free(btrfs_free_space_cachep, e);
 712			goto free_cache;
 713		}
 714
 715		if (!e->bytes) {
 716			kmem_cache_free(btrfs_free_space_cachep, e);
 717			goto free_cache;
 718		}
 719
 720		if (type == BTRFS_FREE_SPACE_EXTENT) {
 721			spin_lock(&ctl->tree_lock);
 722			ret = link_free_space(ctl, e);
 723			spin_unlock(&ctl->tree_lock);
 724			if (ret) {
 725				printk(KERN_ERR "Duplicate entries in "
 726				       "free space cache, dumping\n");
 727				kmem_cache_free(btrfs_free_space_cachep, e);
 728				goto free_cache;
 729			}
 730		} else {
 731			BUG_ON(!num_bitmaps);
 732			num_bitmaps--;
 733			e->bitmap = kzalloc(PAGE_CACHE_SIZE, GFP_NOFS);
 734			if (!e->bitmap) {
 735				kmem_cache_free(
 736					btrfs_free_space_cachep, e);
 737				goto free_cache;
 738			}
 739			spin_lock(&ctl->tree_lock);
 740			ret = link_free_space(ctl, e);
 741			ctl->total_bitmaps++;
 742			ctl->op->recalc_thresholds(ctl);
 743			spin_unlock(&ctl->tree_lock);
 744			if (ret) {
 745				printk(KERN_ERR "Duplicate entries in "
 746				       "free space cache, dumping\n");
 747				kmem_cache_free(btrfs_free_space_cachep, e);
 748				goto free_cache;
 749			}
 750			list_add_tail(&e->list, &bitmaps);
 751		}
 752
 753		num_entries--;
 754	}
 755
 756	io_ctl_unmap_page(&io_ctl);
 757
 758	/*
 759	 * We add the bitmaps at the end of the entries in order that
 760	 * the bitmap entries are added to the cache.
 761	 */
 762	list_for_each_entry_safe(e, n, &bitmaps, list) {
 763		list_del_init(&e->list);
 764		ret = io_ctl_read_bitmap(&io_ctl, e);
 765		if (ret)
 766			goto free_cache;
 767	}
 768
 769	io_ctl_drop_pages(&io_ctl);
 770	merge_space_tree(ctl);
 771	ret = 1;
 772out:
 773	io_ctl_free(&io_ctl);
 774	return ret;
 775free_cache:
 776	io_ctl_drop_pages(&io_ctl);
 777	__btrfs_remove_free_space_cache(ctl);
 778	goto out;
 779}
 780
 781int load_free_space_cache(struct btrfs_fs_info *fs_info,
 782			  struct btrfs_block_group_cache *block_group)
 783{
 784	struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl;
 785	struct btrfs_root *root = fs_info->tree_root;
 786	struct inode *inode;
 787	struct btrfs_path *path;
 788	int ret = 0;
 789	bool matched;
 790	u64 used = btrfs_block_group_used(&block_group->item);
 791
 792	/*
 793	 * If this block group has been marked to be cleared for one reason or
 794	 * another then we can't trust the on disk cache, so just return.
 795	 */
 796	spin_lock(&block_group->lock);
 797	if (block_group->disk_cache_state != BTRFS_DC_WRITTEN) {
 798		spin_unlock(&block_group->lock);
 799		return 0;
 800	}
 801	spin_unlock(&block_group->lock);
 802
 803	path = btrfs_alloc_path();
 804	if (!path)
 805		return 0;
 806	path->search_commit_root = 1;
 807	path->skip_locking = 1;
 808
 809	inode = lookup_free_space_inode(root, block_group, path);
 810	if (IS_ERR(inode)) {
 811		btrfs_free_path(path);
 812		return 0;
 813	}
 814
 815	/* We may have converted the inode and made the cache invalid. */
 816	spin_lock(&block_group->lock);
 817	if (block_group->disk_cache_state != BTRFS_DC_WRITTEN) {
 818		spin_unlock(&block_group->lock);
 819		btrfs_free_path(path);
 820		goto out;
 821	}
 822	spin_unlock(&block_group->lock);
 823
 824	ret = __load_free_space_cache(fs_info->tree_root, inode, ctl,
 825				      path, block_group->key.objectid);
 826	btrfs_free_path(path);
 827	if (ret <= 0)
 828		goto out;
 829
 830	spin_lock(&ctl->tree_lock);
 831	matched = (ctl->free_space == (block_group->key.offset - used -
 832				       block_group->bytes_super));
 833	spin_unlock(&ctl->tree_lock);
 834
 835	if (!matched) {
 836		__btrfs_remove_free_space_cache(ctl);
 837		printk(KERN_ERR "block group %llu has an wrong amount of free "
 838		       "space\n", block_group->key.objectid);
 839		ret = -1;
 840	}
 841out:
 842	if (ret < 0) {
 843		/* This cache is bogus, make sure it gets cleared */
 844		spin_lock(&block_group->lock);
 845		block_group->disk_cache_state = BTRFS_DC_CLEAR;
 846		spin_unlock(&block_group->lock);
 847		ret = 0;
 848
 849		printk(KERN_ERR "btrfs: failed to load free space cache "
 850		       "for block group %llu\n", block_group->key.objectid);
 851	}
 852
 853	iput(inode);
 854	return ret;
 855}
 856
 857/**
 858 * __btrfs_write_out_cache - write out cached info to an inode
 859 * @root - the root the inode belongs to
 860 * @ctl - the free space cache we are going to write out
 861 * @block_group - the block_group for this cache if it belongs to a block_group
 862 * @trans - the trans handle
 863 * @path - the path to use
 864 * @offset - the offset for the key we'll insert
 865 *
 866 * This function writes out a free space cache struct to disk for quick recovery
 867 * on mount.  This will return 0 if it was successfull in writing the cache out,
 868 * and -1 if it was not.
 869 */
 870int __btrfs_write_out_cache(struct btrfs_root *root, struct inode *inode,
 871			    struct btrfs_free_space_ctl *ctl,
 872			    struct btrfs_block_group_cache *block_group,
 873			    struct btrfs_trans_handle *trans,
 874			    struct btrfs_path *path, u64 offset)
 875{
 876	struct btrfs_free_space_header *header;
 877	struct extent_buffer *leaf;
 878	struct rb_node *node;
 879	struct list_head *pos, *n;
 880	struct extent_state *cached_state = NULL;
 881	struct btrfs_free_cluster *cluster = NULL;
 882	struct extent_io_tree *unpin = NULL;
 883	struct io_ctl io_ctl;
 884	struct list_head bitmap_list;
 885	struct btrfs_key key;
 886	u64 start, extent_start, extent_end, len;
 887	int entries = 0;
 888	int bitmaps = 0;
 889	int ret;
 890	int err = -1;
 891
 892	INIT_LIST_HEAD(&bitmap_list);
 893
 894	if (!i_size_read(inode))
 895		return -1;
 896
 897	ret = io_ctl_init(&io_ctl, inode, root);
 898	if (ret)
 899		return -1;
 900
 901	/* Get the cluster for this block_group if it exists */
 902	if (block_group && !list_empty(&block_group->cluster_list))
 903		cluster = list_entry(block_group->cluster_list.next,
 904				     struct btrfs_free_cluster,
 905				     block_group_list);
 906
 907	/* Lock all pages first so we can lock the extent safely. */
 908	io_ctl_prepare_pages(&io_ctl, inode, 0);
 909
 910	lock_extent_bits(&BTRFS_I(inode)->io_tree, 0, i_size_read(inode) - 1,
 911			 0, &cached_state);
 912
 913	node = rb_first(&ctl->free_space_offset);
 914	if (!node && cluster) {
 915		node = rb_first(&cluster->root);
 916		cluster = NULL;
 917	}
 918
 919	/* Make sure we can fit our crcs into the first page */
 920	if (io_ctl.check_crcs &&
 921	    (io_ctl.num_pages * sizeof(u32)) >= PAGE_CACHE_SIZE) {
 922		WARN_ON(1);
 923		goto out_nospc;
 924	}
 925
 926	io_ctl_set_generation(&io_ctl, trans->transid);
 927
 928	/* Write out the extent entries */
 929	while (node) {
 930		struct btrfs_free_space *e;
 931
 932		e = rb_entry(node, struct btrfs_free_space, offset_index);
 933		entries++;
 934
 935		ret = io_ctl_add_entry(&io_ctl, e->offset, e->bytes,
 936				       e->bitmap);
 937		if (ret)
 938			goto out_nospc;
 939
 940		if (e->bitmap) {
 941			list_add_tail(&e->list, &bitmap_list);
 942			bitmaps++;
 943		}
 944		node = rb_next(node);
 945		if (!node && cluster) {
 946			node = rb_first(&cluster->root);
 947			cluster = NULL;
 948		}
 949	}
 950
 951	/*
 952	 * We want to add any pinned extents to our free space cache
 953	 * so we don't leak the space
 954	 */
 955
 956	/*
 957	 * We shouldn't have switched the pinned extents yet so this is the
 958	 * right one
 959	 */
 960	unpin = root->fs_info->pinned_extents;
 961
 962	if (block_group)
 963		start = block_group->key.objectid;
 964
 965	while (block_group && (start < block_group->key.objectid +
 966			       block_group->key.offset)) {
 967		ret = find_first_extent_bit(unpin, start,
 968					    &extent_start, &extent_end,
 969					    EXTENT_DIRTY);
 970		if (ret) {
 971			ret = 0;
 972			break;
 973		}
 974
 975		/* This pinned extent is out of our range */
 976		if (extent_start >= block_group->key.objectid +
 977		    block_group->key.offset)
 978			break;
 979
 980		extent_start = max(extent_start, start);
 981		extent_end = min(block_group->key.objectid +
 982				 block_group->key.offset, extent_end + 1);
 983		len = extent_end - extent_start;
 984
 985		entries++;
 986		ret = io_ctl_add_entry(&io_ctl, extent_start, len, NULL);
 987		if (ret)
 988			goto out_nospc;
 989
 990		start = extent_end;
 991	}
 992
 993	/* Write out the bitmaps */
 994	list_for_each_safe(pos, n, &bitmap_list) {
 995		struct btrfs_free_space *entry =
 996			list_entry(pos, struct btrfs_free_space, list);
 997
 998		ret = io_ctl_add_bitmap(&io_ctl, entry->bitmap);
 999		if (ret)
1000			goto out_nospc;
1001		list_del_init(&entry->list);
1002	}
1003
1004	/* Zero out the rest of the pages just to make sure */
1005	io_ctl_zero_remaining_pages(&io_ctl);
1006
1007	ret = btrfs_dirty_pages(root, inode, io_ctl.pages, io_ctl.num_pages,
1008				0, i_size_read(inode), &cached_state);
1009	io_ctl_drop_pages(&io_ctl);
1010	unlock_extent_cached(&BTRFS_I(inode)->io_tree, 0,
1011			     i_size_read(inode) - 1, &cached_state, GFP_NOFS);
1012
1013	if (ret)
1014		goto out;
1015
1016
1017	btrfs_wait_ordered_range(inode, 0, (u64)-1);
1018
1019	key.objectid = BTRFS_FREE_SPACE_OBJECTID;
1020	key.offset = offset;
1021	key.type = 0;
1022
1023	ret = btrfs_search_slot(trans, root, &key, path, 0, 1);
1024	if (ret < 0) {
1025		clear_extent_bit(&BTRFS_I(inode)->io_tree, 0, inode->i_size - 1,
1026				 EXTENT_DIRTY | EXTENT_DELALLOC, 0, 0, NULL,
1027				 GFP_NOFS);
1028		goto out;
1029	}
1030	leaf = path->nodes[0];
1031	if (ret > 0) {
1032		struct btrfs_key found_key;
1033		BUG_ON(!path->slots[0]);
1034		path->slots[0]--;
1035		btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
1036		if (found_key.objectid != BTRFS_FREE_SPACE_OBJECTID ||
1037		    found_key.offset != offset) {
1038			clear_extent_bit(&BTRFS_I(inode)->io_tree, 0,
1039					 inode->i_size - 1,
1040					 EXTENT_DIRTY | EXTENT_DELALLOC, 0, 0,
1041					 NULL, GFP_NOFS);
1042			btrfs_release_path(path);
1043			goto out;
1044		}
1045	}
1046
1047	BTRFS_I(inode)->generation = trans->transid;
1048	header = btrfs_item_ptr(leaf, path->slots[0],
1049				struct btrfs_free_space_header);
1050	btrfs_set_free_space_entries(leaf, header, entries);
1051	btrfs_set_free_space_bitmaps(leaf, header, bitmaps);
1052	btrfs_set_free_space_generation(leaf, header, trans->transid);
1053	btrfs_mark_buffer_dirty(leaf);
1054	btrfs_release_path(path);
1055
1056	err = 0;
1057out:
1058	io_ctl_free(&io_ctl);
1059	if (err) {
1060		invalidate_inode_pages2(inode->i_mapping);
1061		BTRFS_I(inode)->generation = 0;
1062	}
1063	btrfs_update_inode(trans, root, inode);
1064	return err;
1065
1066out_nospc:
1067	list_for_each_safe(pos, n, &bitmap_list) {
1068		struct btrfs_free_space *entry =
1069			list_entry(pos, struct btrfs_free_space, list);
1070		list_del_init(&entry->list);
1071	}
1072	io_ctl_drop_pages(&io_ctl);
1073	unlock_extent_cached(&BTRFS_I(inode)->io_tree, 0,
1074			     i_size_read(inode) - 1, &cached_state, GFP_NOFS);
1075	goto out;
1076}
1077
1078int btrfs_write_out_cache(struct btrfs_root *root,
1079			  struct btrfs_trans_handle *trans,
1080			  struct btrfs_block_group_cache *block_group,
1081			  struct btrfs_path *path)
1082{
1083	struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl;
1084	struct inode *inode;
1085	int ret = 0;
1086
1087	root = root->fs_info->tree_root;
1088
1089	spin_lock(&block_group->lock);
1090	if (block_group->disk_cache_state < BTRFS_DC_SETUP) {
1091		spin_unlock(&block_group->lock);
1092		return 0;
1093	}
1094	spin_unlock(&block_group->lock);
1095
1096	inode = lookup_free_space_inode(root, block_group, path);
1097	if (IS_ERR(inode))
1098		return 0;
1099
1100	ret = __btrfs_write_out_cache(root, inode, ctl, block_group, trans,
1101				      path, block_group->key.objectid);
1102	if (ret) {
1103		spin_lock(&block_group->lock);
1104		block_group->disk_cache_state = BTRFS_DC_ERROR;
1105		spin_unlock(&block_group->lock);
1106		ret = 0;
1107#ifdef DEBUG
1108		printk(KERN_ERR "btrfs: failed to write free space cache "
1109		       "for block group %llu\n", block_group->key.objectid);
1110#endif
1111	}
1112
1113	iput(inode);
1114	return ret;
1115}
1116
1117static inline unsigned long offset_to_bit(u64 bitmap_start, u32 unit,
1118					  u64 offset)
1119{
1120	BUG_ON(offset < bitmap_start);
1121	offset -= bitmap_start;
1122	return (unsigned long)(div_u64(offset, unit));
1123}
1124
1125static inline unsigned long bytes_to_bits(u64 bytes, u32 unit)
1126{
1127	return (unsigned long)(div_u64(bytes, unit));
1128}
1129
1130static inline u64 offset_to_bitmap(struct btrfs_free_space_ctl *ctl,
1131				   u64 offset)
1132{
1133	u64 bitmap_start;
1134	u64 bytes_per_bitmap;
1135
1136	bytes_per_bitmap = BITS_PER_BITMAP * ctl->unit;
1137	bitmap_start = offset - ctl->start;
1138	bitmap_start = div64_u64(bitmap_start, bytes_per_bitmap);
1139	bitmap_start *= bytes_per_bitmap;
1140	bitmap_start += ctl->start;
1141
1142	return bitmap_start;
1143}
1144
1145static int tree_insert_offset(struct rb_root *root, u64 offset,
1146			      struct rb_node *node, int bitmap)
1147{
1148	struct rb_node **p = &root->rb_node;
1149	struct rb_node *parent = NULL;
1150	struct btrfs_free_space *info;
1151
1152	while (*p) {
1153		parent = *p;
1154		info = rb_entry(parent, struct btrfs_free_space, offset_index);
1155
1156		if (offset < info->offset) {
1157			p = &(*p)->rb_left;
1158		} else if (offset > info->offset) {
1159			p = &(*p)->rb_right;
1160		} else {
1161			/*
1162			 * we could have a bitmap entry and an extent entry
1163			 * share the same offset.  If this is the case, we want
1164			 * the extent entry to always be found first if we do a
1165			 * linear search through the tree, since we want to have
1166			 * the quickest allocation time, and allocating from an
1167			 * extent is faster than allocating from a bitmap.  So
1168			 * if we're inserting a bitmap and we find an entry at
1169			 * this offset, we want to go right, or after this entry
1170			 * logically.  If we are inserting an extent and we've
1171			 * found a bitmap, we want to go left, or before
1172			 * logically.
1173			 */
1174			if (bitmap) {
1175				if (info->bitmap) {
1176					WARN_ON_ONCE(1);
1177					return -EEXIST;
1178				}
1179				p = &(*p)->rb_right;
1180			} else {
1181				if (!info->bitmap) {
1182					WARN_ON_ONCE(1);
1183					return -EEXIST;
1184				}
1185				p = &(*p)->rb_left;
1186			}
1187		}
1188	}
1189
1190	rb_link_node(node, parent, p);
1191	rb_insert_color(node, root);
1192
1193	return 0;
1194}
1195
1196/*
1197 * searches the tree for the given offset.
1198 *
1199 * fuzzy - If this is set, then we are trying to make an allocation, and we just
1200 * want a section that has at least bytes size and comes at or after the given
1201 * offset.
1202 */
1203static struct btrfs_free_space *
1204tree_search_offset(struct btrfs_free_space_ctl *ctl,
1205		   u64 offset, int bitmap_only, int fuzzy)
1206{
1207	struct rb_node *n = ctl->free_space_offset.rb_node;
1208	struct btrfs_free_space *entry, *prev = NULL;
1209
1210	/* find entry that is closest to the 'offset' */
1211	while (1) {
1212		if (!n) {
1213			entry = NULL;
1214			break;
1215		}
1216
1217		entry = rb_entry(n, struct btrfs_free_space, offset_index);
1218		prev = entry;
1219
1220		if (offset < entry->offset)
1221			n = n->rb_left;
1222		else if (offset > entry->offset)
1223			n = n->rb_right;
1224		else
1225			break;
1226	}
1227
1228	if (bitmap_only) {
1229		if (!entry)
1230			return NULL;
1231		if (entry->bitmap)
1232			return entry;
1233
1234		/*
1235		 * bitmap entry and extent entry may share same offset,
1236		 * in that case, bitmap entry comes after extent entry.
1237		 */
1238		n = rb_next(n);
1239		if (!n)
1240			return NULL;
1241		entry = rb_entry(n, struct btrfs_free_space, offset_index);
1242		if (entry->offset != offset)
1243			return NULL;
1244
1245		WARN_ON(!entry->bitmap);
1246		return entry;
1247	} else if (entry) {
1248		if (entry->bitmap) {
1249			/*
1250			 * if previous extent entry covers the offset,
1251			 * we should return it instead of the bitmap entry
1252			 */
1253			n = &entry->offset_index;
1254			while (1) {
1255				n = rb_prev(n);
1256				if (!n)
1257					break;
1258				prev = rb_entry(n, struct btrfs_free_space,
1259						offset_index);
1260				if (!prev->bitmap) {
1261					if (prev->offset + prev->bytes > offset)
1262						entry = prev;
1263					break;
1264				}
1265			}
1266		}
1267		return entry;
1268	}
1269
1270	if (!prev)
1271		return NULL;
1272
1273	/* find last entry before the 'offset' */
1274	entry = prev;
1275	if (entry->offset > offset) {
1276		n = rb_prev(&entry->offset_index);
1277		if (n) {
1278			entry = rb_entry(n, struct btrfs_free_space,
1279					offset_index);
1280			BUG_ON(entry->offset > offset);
1281		} else {
1282			if (fuzzy)
1283				return entry;
1284			else
1285				return NULL;
1286		}
1287	}
1288
1289	if (entry->bitmap) {
1290		n = &entry->offset_index;
1291		while (1) {
1292			n = rb_prev(n);
1293			if (!n)
1294				break;
1295			prev = rb_entry(n, struct btrfs_free_space,
1296					offset_index);
1297			if (!prev->bitmap) {
1298				if (prev->offset + prev->bytes > offset)
1299					return prev;
1300				break;
1301			}
1302		}
1303		if (entry->offset + BITS_PER_BITMAP * ctl->unit > offset)
1304			return entry;
1305	} else if (entry->offset + entry->bytes > offset)
1306		return entry;
1307
1308	if (!fuzzy)
1309		return NULL;
1310
1311	while (1) {
1312		if (entry->bitmap) {
1313			if (entry->offset + BITS_PER_BITMAP *
1314			    ctl->unit > offset)
1315				break;
1316		} else {
1317			if (entry->offset + entry->bytes > offset)
1318				break;
1319		}
1320
1321		n = rb_next(&entry->offset_index);
1322		if (!n)
1323			return NULL;
1324		entry = rb_entry(n, struct btrfs_free_space, offset_index);
1325	}
1326	return entry;
1327}
1328
1329static inline void
1330__unlink_free_space(struct btrfs_free_space_ctl *ctl,
1331		    struct btrfs_free_space *info)
1332{
1333	rb_erase(&info->offset_index, &ctl->free_space_offset);
1334	ctl->free_extents--;
1335}
1336
1337static void unlink_free_space(struct btrfs_free_space_ctl *ctl,
1338			      struct btrfs_free_space *info)
1339{
1340	__unlink_free_space(ctl, info);
1341	ctl->free_space -= info->bytes;
1342}
1343
1344static int link_free_space(struct btrfs_free_space_ctl *ctl,
1345			   struct btrfs_free_space *info)
1346{
1347	int ret = 0;
1348
1349	BUG_ON(!info->bitmap && !info->bytes);
1350	ret = tree_insert_offset(&ctl->free_space_offset, info->offset,
1351				 &info->offset_index, (info->bitmap != NULL));
1352	if (ret)
1353		return ret;
1354
1355	ctl->free_space += info->bytes;
1356	ctl->free_extents++;
1357	return ret;
1358}
1359
1360static void recalculate_thresholds(struct btrfs_free_space_ctl *ctl)
1361{
1362	struct btrfs_block_group_cache *block_group = ctl->private;
1363	u64 max_bytes;
1364	u64 bitmap_bytes;
1365	u64 extent_bytes;
1366	u64 size = block_group->key.offset;
1367	u64 bytes_per_bg = BITS_PER_BITMAP * block_group->sectorsize;
1368	int max_bitmaps = div64_u64(size + bytes_per_bg - 1, bytes_per_bg);
1369
1370	BUG_ON(ctl->total_bitmaps > max_bitmaps);
1371
1372	/*
1373	 * The goal is to keep the total amount of memory used per 1gb of space
1374	 * at or below 32k, so we need to adjust how much memory we allow to be
1375	 * used by extent based free space tracking
1376	 */
1377	if (size < 1024 * 1024 * 1024)
1378		max_bytes = MAX_CACHE_BYTES_PER_GIG;
1379	else
1380		max_bytes = MAX_CACHE_BYTES_PER_GIG *
1381			div64_u64(size, 1024 * 1024 * 1024);
1382
1383	/*
1384	 * we want to account for 1 more bitmap than what we have so we can make
1385	 * sure we don't go over our overall goal of MAX_CACHE_BYTES_PER_GIG as
1386	 * we add more bitmaps.
1387	 */
1388	bitmap_bytes = (ctl->total_bitmaps + 1) * PAGE_CACHE_SIZE;
1389
1390	if (bitmap_bytes >= max_bytes) {
1391		ctl->extents_thresh = 0;
1392		return;
1393	}
1394
1395	/*
1396	 * we want the extent entry threshold to always be at most 1/2 the maxw
1397	 * bytes we can have, or whatever is less than that.
1398	 */
1399	extent_bytes = max_bytes - bitmap_bytes;
1400	extent_bytes = min_t(u64, extent_bytes, div64_u64(max_bytes, 2));
1401
1402	ctl->extents_thresh =
1403		div64_u64(extent_bytes, (sizeof(struct btrfs_free_space)));
1404}
1405
1406static inline void __bitmap_clear_bits(struct btrfs_free_space_ctl *ctl,
1407				       struct btrfs_free_space *info,
1408				       u64 offset, u64 bytes)
1409{
1410	unsigned long start, count;
1411
1412	start = offset_to_bit(info->offset, ctl->unit, offset);
1413	count = bytes_to_bits(bytes, ctl->unit);
1414	BUG_ON(start + count > BITS_PER_BITMAP);
1415
1416	bitmap_clear(info->bitmap, start, count);
1417
1418	info->bytes -= bytes;
1419}
1420
1421static void bitmap_clear_bits(struct btrfs_free_space_ctl *ctl,
1422			      struct btrfs_free_space *info, u64 offset,
1423			      u64 bytes)
1424{
1425	__bitmap_clear_bits(ctl, info, offset, bytes);
1426	ctl->free_space -= bytes;
1427}
1428
1429static void bitmap_set_bits(struct btrfs_free_space_ctl *ctl,
1430			    struct btrfs_free_space *info, u64 offset,
1431			    u64 bytes)
1432{
1433	unsigned long start, count;
1434
1435	start = offset_to_bit(info->offset, ctl->unit, offset);
1436	count = bytes_to_bits(bytes, ctl->unit);
1437	BUG_ON(start + count > BITS_PER_BITMAP);
1438
1439	bitmap_set(info->bitmap, start, count);
1440
1441	info->bytes += bytes;
1442	ctl->free_space += bytes;
1443}
1444
1445static int search_bitmap(struct btrfs_free_space_ctl *ctl,
1446			 struct btrfs_free_space *bitmap_info, u64 *offset,
1447			 u64 *bytes)
1448{
1449	unsigned long found_bits = 0;
1450	unsigned long bits, i;
1451	unsigned long next_zero;
1452
1453	i = offset_to_bit(bitmap_info->offset, ctl->unit,
1454			  max_t(u64, *offset, bitmap_info->offset));
1455	bits = bytes_to_bits(*bytes, ctl->unit);
1456
1457	for (i = find_next_bit(bitmap_info->bitmap, BITS_PER_BITMAP, i);
1458	     i < BITS_PER_BITMAP;
1459	     i = find_next_bit(bitmap_info->bitmap, BITS_PER_BITMAP, i + 1)) {
1460		next_zero = find_next_zero_bit(bitmap_info->bitmap,
1461					       BITS_PER_BITMAP, i);
1462		if ((next_zero - i) >= bits) {
1463			found_bits = next_zero - i;
1464			break;
1465		}
1466		i = next_zero;
1467	}
1468
1469	if (found_bits) {
1470		*offset = (u64)(i * ctl->unit) + bitmap_info->offset;
1471		*bytes = (u64)(found_bits) * ctl->unit;
1472		return 0;
1473	}
1474
1475	return -1;
1476}
1477
1478static struct btrfs_free_space *
1479find_free_space(struct btrfs_free_space_ctl *ctl, u64 *offset, u64 *bytes)
1480{
1481	struct btrfs_free_space *entry;
1482	struct rb_node *node;
1483	int ret;
1484
1485	if (!ctl->free_space_offset.rb_node)
1486		return NULL;
1487
1488	entry = tree_search_offset(ctl, offset_to_bitmap(ctl, *offset), 0, 1);
1489	if (!entry)
1490		return NULL;
1491
1492	for (node = &entry->offset_index; node; node = rb_next(node)) {
1493		entry = rb_entry(node, struct btrfs_free_space, offset_index);
1494		if (entry->bytes < *bytes)
1495			continue;
1496
1497		if (entry->bitmap) {
1498			ret = search_bitmap(ctl, entry, offset, bytes);
1499			if (!ret)
1500				return entry;
1501			continue;
1502		}
1503
1504		*offset = entry->offset;
1505		*bytes = entry->bytes;
1506		return entry;
1507	}
1508
1509	return NULL;
1510}
1511
1512static void add_new_bitmap(struct btrfs_free_space_ctl *ctl,
1513			   struct btrfs_free_space *info, u64 offset)
1514{
1515	info->offset = offset_to_bitmap(ctl, offset);
1516	info->bytes = 0;
1517	INIT_LIST_HEAD(&info->list);
1518	link_free_space(ctl, info);
1519	ctl->total_bitmaps++;
1520
1521	ctl->op->recalc_thresholds(ctl);
1522}
1523
1524static void free_bitmap(struct btrfs_free_space_ctl *ctl,
1525			struct btrfs_free_space *bitmap_info)
1526{
1527	unlink_free_space(ctl, bitmap_info);
1528	kfree(bitmap_info->bitmap);
1529	kmem_cache_free(btrfs_free_space_cachep, bitmap_info);
1530	ctl->total_bitmaps--;
1531	ctl->op->recalc_thresholds(ctl);
1532}
1533
1534static noinline int remove_from_bitmap(struct btrfs_free_space_ctl *ctl,
1535			      struct btrfs_free_space *bitmap_info,
1536			      u64 *offset, u64 *bytes)
1537{
1538	u64 end;
1539	u64 search_start, search_bytes;
1540	int ret;
1541
1542again:
1543	end = bitmap_info->offset + (u64)(BITS_PER_BITMAP * ctl->unit) - 1;
1544
1545	/*
1546	 * We need to search for bits in this bitmap.  We could only cover some
1547	 * of the extent in this bitmap thanks to how we add space, so we need
1548	 * to search for as much as it as we can and clear that amount, and then
1549	 * go searching for the next bit.
1550	 */
1551	search_start = *offset;
1552	search_bytes = ctl->unit;
1553	search_bytes = min(search_bytes, end - search_start + 1);
1554	ret = search_bitmap(ctl, bitmap_info, &search_start, &search_bytes);
1555	BUG_ON(ret < 0 || search_start != *offset);
1556
1557	/* We may have found more bits than what we need */
1558	search_bytes = min(search_bytes, *bytes);
1559
1560	/* Cannot clear past the end of the bitmap */
1561	search_bytes = min(search_bytes, end - search_start + 1);
1562
1563	bitmap_clear_bits(ctl, bitmap_info, search_start, search_bytes);
1564	*offset += search_bytes;
1565	*bytes -= search_bytes;
1566
1567	if (*bytes) {
1568		struct rb_node *next = rb_next(&bitmap_info->offset_index);
1569		if (!bitmap_info->bytes)
1570			free_bitmap(ctl, bitmap_info);
1571
1572		/*
1573		 * no entry after this bitmap, but we still have bytes to
1574		 * remove, so something has gone wrong.
1575		 */
1576		if (!next)
1577			return -EINVAL;
1578
1579		bitmap_info = rb_entry(next, struct btrfs_free_space,
1580				       offset_index);
1581
1582		/*
1583		 * if the next entry isn't a bitmap we need to return to let the
1584		 * extent stuff do its work.
1585		 */
1586		if (!bitmap_info->bitmap)
1587			return -EAGAIN;
1588
1589		/*
1590		 * Ok the next item is a bitmap, but it may not actually hold
1591		 * the information for the rest of this free space stuff, so
1592		 * look for it, and if we don't find it return so we can try
1593		 * everything over again.
1594		 */
1595		search_start = *offset;
1596		search_bytes = ctl->unit;
1597		ret = search_bitmap(ctl, bitmap_info, &search_start,
1598				    &search_bytes);
1599		if (ret < 0 || search_start != *offset)
1600			return -EAGAIN;
1601
1602		goto again;
1603	} else if (!bitmap_info->bytes)
1604		free_bitmap(ctl, bitmap_info);
1605
1606	return 0;
1607}
1608
1609static u64 add_bytes_to_bitmap(struct btrfs_free_space_ctl *ctl,
1610			       struct btrfs_free_space *info, u64 offset,
1611			       u64 bytes)
1612{
1613	u64 bytes_to_set = 0;
1614	u64 end;
1615
1616	end = info->offset + (u64)(BITS_PER_BITMAP * ctl->unit);
1617
1618	bytes_to_set = min(end - offset, bytes);
1619
1620	bitmap_set_bits(ctl, info, offset, bytes_to_set);
1621
1622	return bytes_to_set;
1623
1624}
1625
1626static bool use_bitmap(struct btrfs_free_space_ctl *ctl,
1627		      struct btrfs_free_space *info)
1628{
1629	struct btrfs_block_group_cache *block_group = ctl->private;
1630
1631	/*
1632	 * If we are below the extents threshold then we can add this as an
1633	 * extent, and don't have to deal with the bitmap
1634	 */
1635	if (ctl->free_extents < ctl->extents_thresh) {
1636		/*
1637		 * If this block group has some small extents we don't want to
1638		 * use up all of our free slots in the cache with them, we want
1639		 * to reserve them to larger extents, however if we have plent
1640		 * of cache left then go ahead an dadd them, no sense in adding
1641		 * the overhead of a bitmap if we don't have to.
1642		 */
1643		if (info->bytes <= block_group->sectorsize * 4) {
1644			if (ctl->free_extents * 2 <= ctl->extents_thresh)
1645				return false;
1646		} else {
1647			return false;
1648		}
1649	}
1650
1651	/*
1652	 * some block groups are so tiny they can't be enveloped by a bitmap, so
1653	 * don't even bother to create a bitmap for this
1654	 */
1655	if (BITS_PER_BITMAP * block_group->sectorsize >
1656	    block_group->key.offset)
1657		return false;
1658
1659	return true;
1660}
1661
1662static struct btrfs_free_space_op free_space_op = {
1663	.recalc_thresholds	= recalculate_thresholds,
1664	.use_bitmap		= use_bitmap,
1665};
1666
1667static int insert_into_bitmap(struct btrfs_free_space_ctl *ctl,
1668			      struct btrfs_free_space *info)
1669{
1670	struct btrfs_free_space *bitmap_info;
1671	struct btrfs_block_group_cache *block_group = NULL;
1672	int added = 0;
1673	u64 bytes, offset, bytes_added;
1674	int ret;
1675
1676	bytes = info->bytes;
1677	offset = info->offset;
1678
1679	if (!ctl->op->use_bitmap(ctl, info))
1680		return 0;
1681
1682	if (ctl->op == &free_space_op)
1683		block_group = ctl->private;
1684again:
1685	/*
1686	 * Since we link bitmaps right into the cluster we need to see if we
1687	 * have a cluster here, and if so and it has our bitmap we need to add
1688	 * the free space to that bitmap.
1689	 */
1690	if (block_group && !list_empty(&block_group->cluster_list)) {
1691		struct btrfs_free_cluster *cluster;
1692		struct rb_node *node;
1693		struct btrfs_free_space *entry;
1694
1695		cluster = list_entry(block_group->cluster_list.next,
1696				     struct btrfs_free_cluster,
1697				     block_group_list);
1698		spin_lock(&cluster->lock);
1699		node = rb_first(&cluster->root);
1700		if (!node) {
1701			spin_unlock(&cluster->lock);
1702			goto no_cluster_bitmap;
1703		}
1704
1705		entry = rb_entry(node, struct btrfs_free_space, offset_index);
1706		if (!entry->bitmap) {
1707			spin_unlock(&cluster->lock);
1708			goto no_cluster_bitmap;
1709		}
1710
1711		if (entry->offset == offset_to_bitmap(ctl, offset)) {
1712			bytes_added = add_bytes_to_bitmap(ctl, entry,
1713							  offset, bytes);
1714			bytes -= bytes_added;
1715			offset += bytes_added;
1716		}
1717		spin_unlock(&cluster->lock);
1718		if (!bytes) {
1719			ret = 1;
1720			goto out;
1721		}
1722	}
1723
1724no_cluster_bitmap:
1725	bitmap_info = tree_search_offset(ctl, offset_to_bitmap(ctl, offset),
1726					 1, 0);
1727	if (!bitmap_info) {
1728		BUG_ON(added);
1729		goto new_bitmap;
1730	}
1731
1732	bytes_added = add_bytes_to_bitmap(ctl, bitmap_info, offset, bytes);
1733	bytes -= bytes_added;
1734	offset += bytes_added;
1735	added = 0;
1736
1737	if (!bytes) {
1738		ret = 1;
1739		goto out;
1740	} else
1741		goto again;
1742
1743new_bitmap:
1744	if (info && info->bitmap) {
1745		add_new_bitmap(ctl, info, offset);
1746		added = 1;
1747		info = NULL;
1748		goto again;
1749	} else {
1750		spin_unlock(&ctl->tree_lock);
1751
1752		/* no pre-allocated info, allocate a new one */
1753		if (!info) {
1754			info = kmem_cache_zalloc(btrfs_free_space_cachep,
1755						 GFP_NOFS);
1756			if (!info) {
1757				spin_lock(&ctl->tree_lock);
1758				ret = -ENOMEM;
1759				goto out;
1760			}
1761		}
1762
1763		/* allocate the bitmap */
1764		info->bitmap = kzalloc(PAGE_CACHE_SIZE, GFP_NOFS);
1765		spin_lock(&ctl->tree_lock);
1766		if (!info->bitmap) {
1767			ret = -ENOMEM;
1768			goto out;
1769		}
1770		goto again;
1771	}
1772
1773out:
1774	if (info) {
1775		if (info->bitmap)
1776			kfree(info->bitmap);
1777		kmem_cache_free(btrfs_free_space_cachep, info);
1778	}
1779
1780	return ret;
1781}
1782
1783static bool try_merge_free_space(struct btrfs_free_space_ctl *ctl,
1784			  struct btrfs_free_space *info, bool update_stat)
1785{
1786	struct btrfs_free_space *left_info;
1787	struct btrfs_free_space *right_info;
1788	bool merged = false;
1789	u64 offset = info->offset;
1790	u64 bytes = info->bytes;
1791
1792	/*
1793	 * first we want to see if there is free space adjacent to the range we
1794	 * are adding, if there is remove that struct and add a new one to
1795	 * cover the entire range
1796	 */
1797	right_info = tree_search_offset(ctl, offset + bytes, 0, 0);
1798	if (right_info && rb_prev(&right_info->offset_index))
1799		left_info = rb_entry(rb_prev(&right_info->offset_index),
1800				     struct btrfs_free_space, offset_index);
1801	else
1802		left_info = tree_search_offset(ctl, offset - 1, 0, 0);
1803
1804	if (right_info && !right_info->bitmap) {
1805		if (update_stat)
1806			unlink_free_space(ctl, right_info);
1807		else
1808			__unlink_free_space(ctl, right_info);
1809		info->bytes += right_info->bytes;
1810		kmem_cache_free(btrfs_free_space_cachep, right_info);
1811		merged = true;
1812	}
1813
1814	if (left_info && !left_info->bitmap &&
1815	    left_info->offset + left_info->bytes == offset) {
1816		if (update_stat)
1817			unlink_free_space(ctl, left_info);
1818		else
1819			__unlink_free_space(ctl, left_info);
1820		info->offset = left_info->offset;
1821		info->bytes += left_info->bytes;
1822		kmem_cache_free(btrfs_free_space_cachep, left_info);
1823		merged = true;
1824	}
1825
1826	return merged;
1827}
1828
1829int __btrfs_add_free_space(struct btrfs_free_space_ctl *ctl,
1830			   u64 offset, u64 bytes)
1831{
1832	struct btrfs_free_space *info;
1833	int ret = 0;
1834
1835	info = kmem_cache_zalloc(btrfs_free_space_cachep, GFP_NOFS);
1836	if (!info)
1837		return -ENOMEM;
1838
1839	info->offset = offset;
1840	info->bytes = bytes;
1841
1842	spin_lock(&ctl->tree_lock);
1843
1844	if (try_merge_free_space(ctl, info, true))
1845		goto link;
1846
1847	/*
1848	 * There was no extent directly to the left or right of this new
1849	 * extent then we know we're going to have to allocate a new extent, so
1850	 * before we do that see if we need to drop this into a bitmap
1851	 */
1852	ret = insert_into_bitmap(ctl, info);
1853	if (ret < 0) {
1854		goto out;
1855	} else if (ret) {
1856		ret = 0;
1857		goto out;
1858	}
1859link:
1860	ret = link_free_space(ctl, info);
1861	if (ret)
1862		kmem_cache_free(btrfs_free_space_cachep, info);
1863out:
1864	spin_unlock(&ctl->tree_lock);
1865
1866	if (ret) {
1867		printk(KERN_CRIT "btrfs: unable to add free space :%d\n", ret);
1868		BUG_ON(ret == -EEXIST);
1869	}
1870
1871	return ret;
1872}
1873
1874int btrfs_remove_free_space(struct btrfs_block_group_cache *block_group,
1875			    u64 offset, u64 bytes)
1876{
1877	struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl;
1878	struct btrfs_free_space *info;
1879	int ret = 0;
1880
1881	spin_lock(&ctl->tree_lock);
1882
1883again:
1884	if (!bytes)
1885		goto out_lock;
1886
1887	info = tree_search_offset(ctl, offset, 0, 0);
1888	if (!info) {
1889		/*
1890		 * oops didn't find an extent that matched the space we wanted
1891		 * to remove, look for a bitmap instead
1892		 */
1893		info = tree_search_offset(ctl, offset_to_bitmap(ctl, offset),
1894					  1, 0);
1895		if (!info) {
1896			/* the tree logging code might be calling us before we
1897			 * have fully loaded the free space rbtree for this
1898			 * block group.  So it is possible the entry won't
1899			 * be in the rbtree yet at all.  The caching code
1900			 * will make sure not to put it in the rbtree if
1901			 * the logging code has pinned it.
1902			 */
1903			goto out_lock;
1904		}
1905	}
1906
1907	if (!info->bitmap) {
1908		unlink_free_space(ctl, info);
1909		if (offset == info->offset) {
1910			u64 to_free = min(bytes, info->bytes);
1911
1912			info->bytes -= to_free;
1913			info->offset += to_free;
1914			if (info->bytes) {
1915				ret = link_free_space(ctl, info);
1916				WARN_ON(ret);
1917			} else {
1918				kmem_cache_free(btrfs_free_space_cachep, info);
1919			}
1920
1921			offset += to_free;
1922			bytes -= to_free;
1923			goto again;
1924		} else {
1925			u64 old_end = info->bytes + info->offset;
1926
1927			info->bytes = offset - info->offset;
1928			ret = link_free_space(ctl, info);
1929			WARN_ON(ret);
1930			if (ret)
1931				goto out_lock;
1932
1933			/* Not enough bytes in this entry to satisfy us */
1934			if (old_end < offset + bytes) {
1935				bytes -= old_end - offset;
1936				offset = old_end;
1937				goto again;
1938			} else if (old_end == offset + bytes) {
1939				/* all done */
1940				goto out_lock;
1941			}
1942			spin_unlock(&ctl->tree_lock);
1943
1944			ret = btrfs_add_free_space(block_group, offset + bytes,
1945						   old_end - (offset + bytes));
1946			WARN_ON(ret);
1947			goto out;
1948		}
1949	}
1950
1951	ret = remove_from_bitmap(ctl, info, &offset, &bytes);
1952	if (ret == -EAGAIN)
1953		goto again;
1954	BUG_ON(ret); /* logic error */
1955out_lock:
1956	spin_unlock(&ctl->tree_lock);
1957out:
1958	return ret;
1959}
1960
1961void btrfs_dump_free_space(struct btrfs_block_group_cache *block_group,
1962			   u64 bytes)
1963{
1964	struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl;
1965	struct btrfs_free_space *info;
1966	struct rb_node *n;
1967	int count = 0;
1968
1969	for (n = rb_first(&ctl->free_space_offset); n; n = rb_next(n)) {
1970		info = rb_entry(n, struct btrfs_free_space, offset_index);
1971		if (info->bytes >= bytes)
1972			count++;
1973		printk(KERN_CRIT "entry offset %llu, bytes %llu, bitmap %s\n",
1974		       (unsigned long long)info->offset,
1975		       (unsigned long long)info->bytes,
1976		       (info->bitmap) ? "yes" : "no");
1977	}
1978	printk(KERN_INFO "block group has cluster?: %s\n",
1979	       list_empty(&block_group->cluster_list) ? "no" : "yes");
1980	printk(KERN_INFO "%d blocks of free space at or bigger than bytes is"
1981	       "\n", count);
1982}
1983
1984void btrfs_init_free_space_ctl(struct btrfs_block_group_cache *block_group)
1985{
1986	struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl;
1987
1988	spin_lock_init(&ctl->tree_lock);
1989	ctl->unit = block_group->sectorsize;
1990	ctl->start = block_group->key.objectid;
1991	ctl->private = block_group;
1992	ctl->op = &free_space_op;
1993
1994	/*
1995	 * we only want to have 32k of ram per block group for keeping
1996	 * track of free space, and if we pass 1/2 of that we want to
1997	 * start converting things over to using bitmaps
1998	 */
1999	ctl->extents_thresh = ((1024 * 32) / 2) /
2000				sizeof(struct btrfs_free_space);
2001}
2002
2003/*
2004 * for a given cluster, put all of its extents back into the free
2005 * space cache.  If the block group passed doesn't match the block group
2006 * pointed to by the cluster, someone else raced in and freed the
2007 * cluster already.  In that case, we just return without changing anything
2008 */
2009static int
2010__btrfs_return_cluster_to_free_space(
2011			     struct btrfs_block_group_cache *block_group,
2012			     struct btrfs_free_cluster *cluster)
2013{
2014	struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl;
2015	struct btrfs_free_space *entry;
2016	struct rb_node *node;
2017
2018	spin_lock(&cluster->lock);
2019	if (cluster->block_group != block_group)
2020		goto out;
2021
2022	cluster->block_group = NULL;
2023	cluster->window_start = 0;
2024	list_del_init(&cluster->block_group_list);
2025
2026	node = rb_first(&cluster->root);
2027	while (node) {
2028		bool bitmap;
2029
2030		entry = rb_entry(node, struct btrfs_free_space, offset_index);
2031		node = rb_next(&entry->offset_index);
2032		rb_erase(&entry->offset_index, &cluster->root);
2033
2034		bitmap = (entry->bitmap != NULL);
2035		if (!bitmap)
2036			try_merge_free_space(ctl, entry, false);
2037		tree_insert_offset(&ctl->free_space_offset,
2038				   entry->offset, &entry->offset_index, bitmap);
2039	}
2040	cluster->root = RB_ROOT;
2041
2042out:
2043	spin_unlock(&cluster->lock);
2044	btrfs_put_block_group(block_group);
2045	return 0;
2046}
2047
2048void __btrfs_remove_free_space_cache_locked(struct btrfs_free_space_ctl *ctl)
2049{
2050	struct btrfs_free_space *info;
2051	struct rb_node *node;
2052
2053	while ((node = rb_last(&ctl->free_space_offset)) != NULL) {
2054		info = rb_entry(node, struct btrfs_free_space, offset_index);
2055		if (!info->bitmap) {
2056			unlink_free_space(ctl, info);
2057			kmem_cache_free(btrfs_free_space_cachep, info);
2058		} else {
2059			free_bitmap(ctl, info);
2060		}
2061		if (need_resched()) {
2062			spin_unlock(&ctl->tree_lock);
2063			cond_resched();
2064			spin_lock(&ctl->tree_lock);
2065		}
2066	}
2067}
2068
2069void __btrfs_remove_free_space_cache(struct btrfs_free_space_ctl *ctl)
2070{
2071	spin_lock(&ctl->tree_lock);
2072	__btrfs_remove_free_space_cache_locked(ctl);
2073	spin_unlock(&ctl->tree_lock);
2074}
2075
2076void btrfs_remove_free_space_cache(struct btrfs_block_group_cache *block_group)
2077{
2078	struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl;
2079	struct btrfs_free_cluster *cluster;
2080	struct list_head *head;
2081
2082	spin_lock(&ctl->tree_lock);
2083	while ((head = block_group->cluster_list.next) !=
2084	       &block_group->cluster_list) {
2085		cluster = list_entry(head, struct btrfs_free_cluster,
2086				     block_group_list);
2087
2088		WARN_ON(cluster->block_group != block_group);
2089		__btrfs_return_cluster_to_free_space(block_group, cluster);
2090		if (need_resched()) {
2091			spin_unlock(&ctl->tree_lock);
2092			cond_resched();
2093			spin_lock(&ctl->tree_lock);
2094		}
2095	}
2096	__btrfs_remove_free_space_cache_locked(ctl);
2097	spin_unlock(&ctl->tree_lock);
2098
2099}
2100
2101u64 btrfs_find_space_for_alloc(struct btrfs_block_group_cache *block_group,
2102			       u64 offset, u64 bytes, u64 empty_size)
2103{
2104	struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl;
2105	struct btrfs_free_space *entry = NULL;
2106	u64 bytes_search = bytes + empty_size;
2107	u64 ret = 0;
2108
2109	spin_lock(&ctl->tree_lock);
2110	entry = find_free_space(ctl, &offset, &bytes_search);
2111	if (!entry)
2112		goto out;
2113
2114	ret = offset;
2115	if (entry->bitmap) {
2116		bitmap_clear_bits(ctl, entry, offset, bytes);
2117		if (!entry->bytes)
2118			free_bitmap(ctl, entry);
2119	} else {
2120		unlink_free_space(ctl, entry);
2121		entry->offset += bytes;
2122		entry->bytes -= bytes;
2123		if (!entry->bytes)
2124			kmem_cache_free(btrfs_free_space_cachep, entry);
2125		else
2126			link_free_space(ctl, entry);
2127	}
2128
2129out:
2130	spin_unlock(&ctl->tree_lock);
2131
2132	return ret;
2133}
2134
2135/*
2136 * given a cluster, put all of its extents back into the free space
2137 * cache.  If a block group is passed, this function will only free
2138 * a cluster that belongs to the passed block group.
2139 *
2140 * Otherwise, it'll get a reference on the block group pointed to by the
2141 * cluster and remove the cluster from it.
2142 */
2143int btrfs_return_cluster_to_free_space(
2144			       struct btrfs_block_group_cache *block_group,
2145			       struct btrfs_free_cluster *cluster)
2146{
2147	struct btrfs_free_space_ctl *ctl;
2148	int ret;
2149
2150	/* first, get a safe pointer to the block group */
2151	spin_lock(&cluster->lock);
2152	if (!block_group) {
2153		block_group = cluster->block_group;
2154		if (!block_group) {
2155			spin_unlock(&cluster->lock);
2156			return 0;
2157		}
2158	} else if (cluster->block_group != block_group) {
2159		/* someone else has already freed it don't redo their work */
2160		spin_unlock(&cluster->lock);
2161		return 0;
2162	}
2163	atomic_inc(&block_group->count);
2164	spin_unlock(&cluster->lock);
2165
2166	ctl = block_group->free_space_ctl;
2167
2168	/* now return any extents the cluster had on it */
2169	spin_lock(&ctl->tree_lock);
2170	ret = __btrfs_return_cluster_to_free_space(block_group, cluster);
2171	spin_unlock(&ctl->tree_lock);
2172
2173	/* finally drop our ref */
2174	btrfs_put_block_group(block_group);
2175	return ret;
2176}
2177
2178static u64 btrfs_alloc_from_bitmap(struct btrfs_block_group_cache *block_group,
2179				   struct btrfs_free_cluster *cluster,
2180				   struct btrfs_free_space *entry,
2181				   u64 bytes, u64 min_start)
2182{
2183	struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl;
2184	int err;
2185	u64 search_start = cluster->window_start;
2186	u64 search_bytes = bytes;
2187	u64 ret = 0;
2188
2189	search_start = min_start;
2190	search_bytes = bytes;
2191
2192	err = search_bitmap(ctl, entry, &search_start, &search_bytes);
2193	if (err)
2194		return 0;
2195
2196	ret = search_start;
2197	__bitmap_clear_bits(ctl, entry, ret, bytes);
2198
2199	return ret;
2200}
2201
2202/*
2203 * given a cluster, try to allocate 'bytes' from it, returns 0
2204 * if it couldn't find anything suitably large, or a logical disk offset
2205 * if things worked out
2206 */
2207u64 btrfs_alloc_from_cluster(struct btrfs_block_group_cache *block_group,
2208			     struct btrfs_free_cluster *cluster, u64 bytes,
2209			     u64 min_start)
2210{
2211	struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl;
2212	struct btrfs_free_space *entry = NULL;
2213	struct rb_node *node;
2214	u64 ret = 0;
2215
2216	spin_lock(&cluster->lock);
2217	if (bytes > cluster->max_size)
2218		goto out;
2219
2220	if (cluster->block_group != block_group)
2221		goto out;
2222
2223	node = rb_first(&cluster->root);
2224	if (!node)
2225		goto out;
2226
2227	entry = rb_entry(node, struct btrfs_free_space, offset_index);
2228	while(1) {
2229		if (entry->bytes < bytes ||
2230		    (!entry->bitmap && entry->offset < min_start)) {
2231			node = rb_next(&entry->offset_index);
2232			if (!node)
2233				break;
2234			entry = rb_entry(node, struct btrfs_free_space,
2235					 offset_index);
2236			continue;
2237		}
2238
2239		if (entry->bitmap) {
2240			ret = btrfs_alloc_from_bitmap(block_group,
2241						      cluster, entry, bytes,
2242						      cluster->window_start);
2243			if (ret == 0) {
2244				node = rb_next(&entry->offset_index);
2245				if (!node)
2246					break;
2247				entry = rb_entry(node, struct btrfs_free_space,
2248						 offset_index);
2249				continue;
2250			}
2251			cluster->window_start += bytes;
2252		} else {
2253			ret = entry->offset;
2254
2255			entry->offset += bytes;
2256			entry->bytes -= bytes;
2257		}
2258
2259		if (entry->bytes == 0)
2260			rb_erase(&entry->offset_index, &cluster->root);
2261		break;
2262	}
2263out:
2264	spin_unlock(&cluster->lock);
2265
2266	if (!ret)
2267		return 0;
2268
2269	spin_lock(&ctl->tree_lock);
2270
2271	ctl->free_space -= bytes;
2272	if (entry->bytes == 0) {
2273		ctl->free_extents--;
2274		if (entry->bitmap) {
2275			kfree(entry->bitmap);
2276			ctl->total_bitmaps--;
2277			ctl->op->recalc_thresholds(ctl);
2278		}
2279		kmem_cache_free(btrfs_free_space_cachep, entry);
2280	}
2281
2282	spin_unlock(&ctl->tree_lock);
2283
2284	return ret;
2285}
2286
2287static int btrfs_bitmap_cluster(struct btrfs_block_group_cache *block_group,
2288				struct btrfs_free_space *entry,
2289				struct btrfs_free_cluster *cluster,
2290				u64 offset, u64 bytes,
2291				u64 cont1_bytes, u64 min_bytes)
2292{
2293	struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl;
2294	unsigned long next_zero;
2295	unsigned long i;
2296	unsigned long want_bits;
2297	unsigned long min_bits;
2298	unsigned long found_bits;
2299	unsigned long start = 0;
2300	unsigned long total_found = 0;
2301	int ret;
2302
2303	i = offset_to_bit(entry->offset, block_group->sectorsize,
2304			  max_t(u64, offset, entry->offset));
2305	want_bits = bytes_to_bits(bytes, block_group->sectorsize);
2306	min_bits = bytes_to_bits(min_bytes, block_group->sectorsize);
2307
2308again:
2309	found_bits = 0;
2310	for (i = find_next_bit(entry->bitmap, BITS_PER_BITMAP, i);
2311	     i < BITS_PER_BITMAP;
2312	     i = find_next_bit(entry->bitmap, BITS_PER_BITMAP, i + 1)) {
2313		next_zero = find_next_zero_bit(entry->bitmap,
2314					       BITS_PER_BITMAP, i);
2315		if (next_zero - i >= min_bits) {
2316			found_bits = next_zero - i;
2317			break;
2318		}
2319		i = next_zero;
2320	}
2321
2322	if (!found_bits)
2323		return -ENOSPC;
2324
2325	if (!total_found) {
2326		start = i;
2327		cluster->max_size = 0;
2328	}
2329
2330	total_found += found_bits;
2331
2332	if (cluster->max_size < found_bits * block_group->sectorsize)
2333		cluster->max_size = found_bits * block_group->sectorsize;
2334
2335	if (total_found < want_bits || cluster->max_size < cont1_bytes) {
2336		i = next_zero + 1;
2337		goto again;
2338	}
2339
2340	cluster->window_start = start * block_group->sectorsize +
2341		entry->offset;
2342	rb_erase(&entry->offset_index, &ctl->free_space_offset);
2343	ret = tree_insert_offset(&cluster->root, entry->offset,
2344				 &entry->offset_index, 1);
2345	BUG_ON(ret); /* -EEXIST; Logic error */
2346
2347	trace_btrfs_setup_cluster(block_group, cluster,
2348				  total_found * block_group->sectorsize, 1);
2349	return 0;
2350}
2351
2352/*
2353 * This searches the block group for just extents to fill the cluster with.
2354 * Try to find a cluster with at least bytes total bytes, at least one
2355 * extent of cont1_bytes, and other clusters of at least min_bytes.
2356 */
2357static noinline int
2358setup_cluster_no_bitmap(struct btrfs_block_group_cache *block_group,
2359			struct btrfs_free_cluster *cluster,
2360			struct list_head *bitmaps, u64 offset, u64 bytes,
2361			u64 cont1_bytes, u64 min_bytes)
2362{
2363	struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl;
2364	struct btrfs_free_space *first = NULL;
2365	struct btrfs_free_space *entry = NULL;
2366	struct btrfs_free_space *last;
2367	struct rb_node *node;
2368	u64 window_start;
2369	u64 window_free;
2370	u64 max_extent;
2371	u64 total_size = 0;
2372
2373	entry = tree_search_offset(ctl, offset, 0, 1);
2374	if (!entry)
2375		return -ENOSPC;
2376
2377	/*
2378	 * We don't want bitmaps, so just move along until we find a normal
2379	 * extent entry.
2380	 */
2381	while (entry->bitmap || entry->bytes < min_bytes) {
2382		if (entry->bitmap && list_empty(&entry->list))
2383			list_add_tail(&entry->list, bitmaps);
2384		node = rb_next(&entry->offset_index);
2385		if (!node)
2386			return -ENOSPC;
2387		entry = rb_entry(node, struct btrfs_free_space, offset_index);
2388	}
2389
2390	window_start = entry->offset;
2391	window_free = entry->bytes;
2392	max_extent = entry->bytes;
2393	first = entry;
2394	last = entry;
2395
2396	for (node = rb_next(&entry->offset_index); node;
2397	     node = rb_next(&entry->offset_index)) {
2398		entry = rb_entry(node, struct btrfs_free_space, offset_index);
2399
2400		if (entry->bitmap) {
2401			if (list_empty(&entry->list))
2402				list_add_tail(&entry->list, bitmaps);
2403			continue;
2404		}
2405
2406		if (entry->bytes < min_bytes)
2407			continue;
2408
2409		last = entry;
2410		window_free += entry->bytes;
2411		if (entry->bytes > max_extent)
2412			max_extent = entry->bytes;
2413	}
2414
2415	if (window_free < bytes || max_extent < cont1_bytes)
2416		return -ENOSPC;
2417
2418	cluster->window_start = first->offset;
2419
2420	node = &first->offset_index;
2421
2422	/*
2423	 * now we've found our entries, pull them out of the free space
2424	 * cache and put them into the cluster rbtree
2425	 */
2426	do {
2427		int ret;
2428
2429		entry = rb_entry(node, struct btrfs_free_space, offset_index);
2430		node = rb_next(&entry->offset_index);
2431		if (entry->bitmap || entry->bytes < min_bytes)
2432			continue;
2433
2434		rb_erase(&entry->offset_index, &ctl->free_space_offset);
2435		ret = tree_insert_offset(&cluster->root, entry->offset,
2436					 &entry->offset_index, 0);
2437		total_size += entry->bytes;
2438		BUG_ON(ret); /* -EEXIST; Logic error */
2439	} while (node && entry != last);
2440
2441	cluster->max_size = max_extent;
2442	trace_btrfs_setup_cluster(block_group, cluster, total_size, 0);
2443	return 0;
2444}
2445
2446/*
2447 * This specifically looks for bitmaps that may work in the cluster, we assume
2448 * that we have already failed to find extents that will work.
2449 */
2450static noinline int
2451setup_cluster_bitmap(struct btrfs_block_group_cache *block_group,
2452		     struct btrfs_free_cluster *cluster,
2453		     struct list_head *bitmaps, u64 offset, u64 bytes,
2454		     u64 cont1_bytes, u64 min_bytes)
2455{
2456	struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl;
2457	struct btrfs_free_space *entry;
2458	int ret = -ENOSPC;
2459	u64 bitmap_offset = offset_to_bitmap(ctl, offset);
2460
2461	if (ctl->total_bitmaps == 0)
2462		return -ENOSPC;
2463
2464	/*
2465	 * The bitmap that covers offset won't be in the list unless offset
2466	 * is just its start offset.
2467	 */
2468	entry = list_first_entry(bitmaps, struct btrfs_free_space, list);
2469	if (entry->offset != bitmap_offset) {
2470		entry = tree_search_offset(ctl, bitmap_offset, 1, 0);
2471		if (entry && list_empty(&entry->list))
2472			list_add(&entry->list, bitmaps);
2473	}
2474
2475	list_for_each_entry(entry, bitmaps, list) {
2476		if (entry->bytes < bytes)
2477			continue;
2478		ret = btrfs_bitmap_cluster(block_group, entry, cluster, offset,
2479					   bytes, cont1_bytes, min_bytes);
2480		if (!ret)
2481			return 0;
2482	}
2483
2484	/*
2485	 * The bitmaps list has all the bitmaps that record free space
2486	 * starting after offset, so no more search is required.
2487	 */
2488	return -ENOSPC;
2489}
2490
2491/*
2492 * here we try to find a cluster of blocks in a block group.  The goal
2493 * is to find at least bytes+empty_size.
2494 * We might not find them all in one contiguous area.
2495 *
2496 * returns zero and sets up cluster if things worked out, otherwise
2497 * it returns -enospc
2498 */
2499int btrfs_find_space_cluster(struct btrfs_trans_handle *trans,
2500			     struct btrfs_root *root,
2501			     struct btrfs_block_group_cache *block_group,
2502			     struct btrfs_free_cluster *cluster,
2503			     u64 offset, u64 bytes, u64 empty_size)
2504{
2505	struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl;
2506	struct btrfs_free_space *entry, *tmp;
2507	LIST_HEAD(bitmaps);
2508	u64 min_bytes;
2509	u64 cont1_bytes;
2510	int ret;
2511
2512	/*
2513	 * Choose the minimum extent size we'll require for this
2514	 * cluster.  For SSD_SPREAD, don't allow any fragmentation.
2515	 * For metadata, allow allocates with smaller extents.  For
2516	 * data, keep it dense.
2517	 */
2518	if (btrfs_test_opt(root, SSD_SPREAD)) {
2519		cont1_bytes = min_bytes = bytes + empty_size;
2520	} else if (block_group->flags & BTRFS_BLOCK_GROUP_METADATA) {
2521		cont1_bytes = bytes;
2522		min_bytes = block_group->sectorsize;
2523	} else {
2524		cont1_bytes = max(bytes, (bytes + empty_size) >> 2);
2525		min_bytes = block_group->sectorsize;
2526	}
2527
2528	spin_lock(&ctl->tree_lock);
2529
2530	/*
2531	 * If we know we don't have enough space to make a cluster don't even
2532	 * bother doing all the work to try and find one.
2533	 */
2534	if (ctl->free_space < bytes) {
2535		spin_unlock(&ctl->tree_lock);
2536		return -ENOSPC;
2537	}
2538
2539	spin_lock(&cluster->lock);
2540
2541	/* someone already found a cluster, hooray */
2542	if (cluster->block_group) {
2543		ret = 0;
2544		goto out;
2545	}
2546
2547	trace_btrfs_find_cluster(block_group, offset, bytes, empty_size,
2548				 min_bytes);
2549
2550	INIT_LIST_HEAD(&bitmaps);
2551	ret = setup_cluster_no_bitmap(block_group, cluster, &bitmaps, offset,
2552				      bytes + empty_size,
2553				      cont1_bytes, min_bytes);
2554	if (ret)
2555		ret = setup_cluster_bitmap(block_group, cluster, &bitmaps,
2556					   offset, bytes + empty_size,
2557					   cont1_bytes, min_bytes);
2558
2559	/* Clear our temporary list */
2560	list_for_each_entry_safe(entry, tmp, &bitmaps, list)
2561		list_del_init(&entry->list);
2562
2563	if (!ret) {
2564		atomic_inc(&block_group->count);
2565		list_add_tail(&cluster->block_group_list,
2566			      &block_group->cluster_list);
2567		cluster->block_group = block_group;
2568	} else {
2569		trace_btrfs_failed_cluster_setup(block_group);
2570	}
2571out:
2572	spin_unlock(&cluster->lock);
2573	spin_unlock(&ctl->tree_lock);
2574
2575	return ret;
2576}
2577
2578/*
2579 * simple code to zero out a cluster
2580 */
2581void btrfs_init_free_cluster(struct btrfs_free_cluster *cluster)
2582{
2583	spin_lock_init(&cluster->lock);
2584	spin_lock_init(&cluster->refill_lock);
2585	cluster->root = RB_ROOT;
2586	cluster->max_size = 0;
2587	INIT_LIST_HEAD(&cluster->block_group_list);
2588	cluster->block_group = NULL;
2589}
2590
2591static int do_trimming(struct btrfs_block_group_cache *block_group,
2592		       u64 *total_trimmed, u64 start, u64 bytes,
2593		       u64 reserved_start, u64 reserved_bytes)
2594{
2595	struct btrfs_space_info *space_info = block_group->space_info;
2596	struct btrfs_fs_info *fs_info = block_group->fs_info;
2597	int ret;
2598	int update = 0;
2599	u64 trimmed = 0;
2600
2601	spin_lock(&space_info->lock);
2602	spin_lock(&block_group->lock);
2603	if (!block_group->ro) {
2604		block_group->reserved += reserved_bytes;
2605		space_info->bytes_reserved += reserved_bytes;
2606		update = 1;
2607	}
2608	spin_unlock(&block_group->lock);
2609	spin_unlock(&space_info->lock);
2610
2611	ret = btrfs_error_discard_extent(fs_info->extent_root,
2612					 start, bytes, &trimmed);
2613	if (!ret)
2614		*total_trimmed += trimmed;
2615
2616	btrfs_add_free_space(block_group, reserved_start, reserved_bytes);
2617
2618	if (update) {
2619		spin_lock(&space_info->lock);
2620		spin_lock(&block_group->lock);
2621		if (block_group->ro)
2622			space_info->bytes_readonly += reserved_bytes;
2623		block_group->reserved -= reserved_bytes;
2624		space_info->bytes_reserved -= reserved_bytes;
2625		spin_unlock(&space_info->lock);
2626		spin_unlock(&block_group->lock);
2627	}
2628
2629	return ret;
2630}
2631
2632static int trim_no_bitmap(struct btrfs_block_group_cache *block_group,
2633			  u64 *total_trimmed, u64 start, u64 end, u64 minlen)
2634{
2635	struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl;
2636	struct btrfs_free_space *entry;
2637	struct rb_node *node;
2638	int ret = 0;
2639	u64 extent_start;
2640	u64 extent_bytes;
2641	u64 bytes;
2642
2643	while (start < end) {
2644		spin_lock(&ctl->tree_lock);
2645
2646		if (ctl->free_space < minlen) {
2647			spin_unlock(&ctl->tree_lock);
2648			break;
2649		}
2650
2651		entry = tree_search_offset(ctl, start, 0, 1);
2652		if (!entry) {
2653			spin_unlock(&ctl->tree_lock);
2654			break;
2655		}
2656
2657		/* skip bitmaps */
2658		while (entry->bitmap) {
2659			node = rb_next(&entry->offset_index);
2660			if (!node) {
2661				spin_unlock(&ctl->tree_lock);
2662				goto out;
2663			}
2664			entry = rb_entry(node, struct btrfs_free_space,
2665					 offset_index);
2666		}
2667
2668		if (entry->offset >= end) {
2669			spin_unlock(&ctl->tree_lock);
2670			break;
2671		}
2672
2673		extent_start = entry->offset;
2674		extent_bytes = entry->bytes;
2675		start = max(start, extent_start);
2676		bytes = min(extent_start + extent_bytes, end) - start;
2677		if (bytes < minlen) {
2678			spin_unlock(&ctl->tree_lock);
2679			goto next;
2680		}
2681
2682		unlink_free_space(ctl, entry);
2683		kmem_cache_free(btrfs_free_space_cachep, entry);
2684
2685		spin_unlock(&ctl->tree_lock);
2686
2687		ret = do_trimming(block_group, total_trimmed, start, bytes,
2688				  extent_start, extent_bytes);
2689		if (ret)
2690			break;
2691next:
2692		start += bytes;
2693
2694		if (fatal_signal_pending(current)) {
2695			ret = -ERESTARTSYS;
2696			break;
2697		}
2698
2699		cond_resched();
2700	}
2701out:
2702	return ret;
2703}
2704
2705static int trim_bitmaps(struct btrfs_block_group_cache *block_group,
2706			u64 *total_trimmed, u64 start, u64 end, u64 minlen)
2707{
2708	struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl;
2709	struct btrfs_free_space *entry;
2710	int ret = 0;
2711	int ret2;
2712	u64 bytes;
2713	u64 offset = offset_to_bitmap(ctl, start);
2714
2715	while (offset < end) {
2716		bool next_bitmap = false;
2717
2718		spin_lock(&ctl->tree_lock);
2719
2720		if (ctl->free_space < minlen) {
2721			spin_unlock(&ctl->tree_lock);
2722			break;
2723		}
2724
2725		entry = tree_search_offset(ctl, offset, 1, 0);
2726		if (!entry) {
2727			spin_unlock(&ctl->tree_lock);
2728			next_bitmap = true;
2729			goto next;
2730		}
2731
2732		bytes = minlen;
2733		ret2 = search_bitmap(ctl, entry, &start, &bytes);
2734		if (ret2 || start >= end) {
2735			spin_unlock(&ctl->tree_lock);
2736			next_bitmap = true;
2737			goto next;
2738		}
2739
2740		bytes = min(bytes, end - start);
2741		if (bytes < minlen) {
2742			spin_unlock(&ctl->tree_lock);
2743			goto next;
2744		}
2745
2746		bitmap_clear_bits(ctl, entry, start, bytes);
2747		if (entry->bytes == 0)
2748			free_bitmap(ctl, entry);
2749
2750		spin_unlock(&ctl->tree_lock);
2751
2752		ret = do_trimming(block_group, total_trimmed, start, bytes,
2753				  start, bytes);
2754		if (ret)
2755			break;
2756next:
2757		if (next_bitmap) {
2758			offset += BITS_PER_BITMAP * ctl->unit;
2759		} else {
2760			start += bytes;
2761			if (start >= offset + BITS_PER_BITMAP * ctl->unit)
2762				offset += BITS_PER_BITMAP * ctl->unit;
2763		}
2764
2765		if (fatal_signal_pending(current)) {
2766			ret = -ERESTARTSYS;
2767			break;
2768		}
2769
2770		cond_resched();
2771	}
2772
2773	return ret;
2774}
2775
2776int btrfs_trim_block_group(struct btrfs_block_group_cache *block_group,
2777			   u64 *trimmed, u64 start, u64 end, u64 minlen)
2778{
2779	int ret;
2780
2781	*trimmed = 0;
2782
2783	ret = trim_no_bitmap(block_group, trimmed, start, end, minlen);
2784	if (ret)
2785		return ret;
2786
2787	ret = trim_bitmaps(block_group, trimmed, start, end, minlen);
2788
2789	return ret;
2790}
2791
2792/*
2793 * Find the left-most item in the cache tree, and then return the
2794 * smallest inode number in the item.
2795 *
2796 * Note: the returned inode number may not be the smallest one in
2797 * the tree, if the left-most item is a bitmap.
2798 */
2799u64 btrfs_find_ino_for_alloc(struct btrfs_root *fs_root)
2800{
2801	struct btrfs_free_space_ctl *ctl = fs_root->free_ino_ctl;
2802	struct btrfs_free_space *entry = NULL;
2803	u64 ino = 0;
2804
2805	spin_lock(&ctl->tree_lock);
2806
2807	if (RB_EMPTY_ROOT(&ctl->free_space_offset))
2808		goto out;
2809
2810	entry = rb_entry(rb_first(&ctl->free_space_offset),
2811			 struct btrfs_free_space, offset_index);
2812
2813	if (!entry->bitmap) {
2814		ino = entry->offset;
2815
2816		unlink_free_space(ctl, entry);
2817		entry->offset++;
2818		entry->bytes--;
2819		if (!entry->bytes)
2820			kmem_cache_free(btrfs_free_space_cachep, entry);
2821		else
2822			link_free_space(ctl, entry);
2823	} else {
2824		u64 offset = 0;
2825		u64 count = 1;
2826		int ret;
2827
2828		ret = search_bitmap(ctl, entry, &offset, &count);
2829		/* Logic error; Should be empty if it can't find anything */
2830		BUG_ON(ret);
2831
2832		ino = offset;
2833		bitmap_clear_bits(ctl, entry, offset, 1);
2834		if (entry->bytes == 0)
2835			free_bitmap(ctl, entry);
2836	}
2837out:
2838	spin_unlock(&ctl->tree_lock);
2839
2840	return ino;
2841}
2842
2843struct inode *lookup_free_ino_inode(struct btrfs_root *root,
2844				    struct btrfs_path *path)
2845{
2846	struct inode *inode = NULL;
2847
2848	spin_lock(&root->cache_lock);
2849	if (root->cache_inode)
2850		inode = igrab(root->cache_inode);
2851	spin_unlock(&root->cache_lock);
2852	if (inode)
2853		return inode;
2854
2855	inode = __lookup_free_space_inode(root, path, 0);
2856	if (IS_ERR(inode))
2857		return inode;
2858
2859	spin_lock(&root->cache_lock);
2860	if (!btrfs_fs_closing(root->fs_info))
2861		root->cache_inode = igrab(inode);
2862	spin_unlock(&root->cache_lock);
2863
2864	return inode;
2865}
2866
2867int create_free_ino_inode(struct btrfs_root *root,
2868			  struct btrfs_trans_handle *trans,
2869			  struct btrfs_path *path)
2870{
2871	return __create_free_space_inode(root, trans, path,
2872					 BTRFS_FREE_INO_OBJECTID, 0);
2873}
2874
2875int load_free_ino_cache(struct btrfs_fs_info *fs_info, struct btrfs_root *root)
2876{
2877	struct btrfs_free_space_ctl *ctl = root->free_ino_ctl;
2878	struct btrfs_path *path;
2879	struct inode *inode;
2880	int ret = 0;
2881	u64 root_gen = btrfs_root_generation(&root->root_item);
2882
2883	if (!btrfs_test_opt(root, INODE_MAP_CACHE))
2884		return 0;
2885
2886	/*
2887	 * If we're unmounting then just return, since this does a search on the
2888	 * normal root and not the commit root and we could deadlock.
2889	 */
2890	if (btrfs_fs_closing(fs_info))
2891		return 0;
2892
2893	path = btrfs_alloc_path();
2894	if (!path)
2895		return 0;
2896
2897	inode = lookup_free_ino_inode(root, path);
2898	if (IS_ERR(inode))
2899		goto out;
2900
2901	if (root_gen != BTRFS_I(inode)->generation)
2902		goto out_put;
2903
2904	ret = __load_free_space_cache(root, inode, ctl, path, 0);
2905
2906	if (ret < 0)
2907		printk(KERN_ERR "btrfs: failed to load free ino cache for "
2908		       "root %llu\n", root->root_key.objectid);
2909out_put:
2910	iput(inode);
2911out:
2912	btrfs_free_path(path);
2913	return ret;
2914}
2915
2916int btrfs_write_out_ino_cache(struct btrfs_root *root,
2917			      struct btrfs_trans_handle *trans,
2918			      struct btrfs_path *path)
2919{
2920	struct btrfs_free_space_ctl *ctl = root->free_ino_ctl;
2921	struct inode *inode;
2922	int ret;
2923
2924	if (!btrfs_test_opt(root, INODE_MAP_CACHE))
2925		return 0;
2926
2927	inode = lookup_free_ino_inode(root, path);
2928	if (IS_ERR(inode))
2929		return 0;
2930
2931	ret = __btrfs_write_out_cache(root, inode, ctl, NULL, trans, path, 0);
2932	if (ret) {
2933		btrfs_delalloc_release_metadata(inode, inode->i_size);
2934#ifdef DEBUG
2935		printk(KERN_ERR "btrfs: failed to write free ino cache "
2936		       "for root %llu\n", root->root_key.objectid);
2937#endif
2938	}
2939
2940	iput(inode);
2941	return ret;
2942}