Linux Audio

Check our new training course

Loading...
v4.10.11
   1/*
   2 * Copyright (C) 2008 Red Hat.  All rights reserved.
   3 *
   4 * This program is free software; you can redistribute it and/or
   5 * modify it under the terms of the GNU General Public
   6 * License v2 as published by the Free Software Foundation.
   7 *
   8 * This program is distributed in the hope that it will be useful,
   9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
  10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
  11 * General Public License for more details.
  12 *
  13 * You should have received a copy of the GNU General Public
  14 * License along with this program; if not, write to the
  15 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
  16 * Boston, MA 021110-1307, USA.
  17 */
  18
  19#include <linux/pagemap.h>
  20#include <linux/sched.h>
  21#include <linux/slab.h>
  22#include <linux/math64.h>
  23#include <linux/ratelimit.h>
  24#include "ctree.h"
  25#include "free-space-cache.h"
  26#include "transaction.h"
  27#include "disk-io.h"
  28#include "extent_io.h"
  29#include "inode-map.h"
  30#include "volumes.h"
  31
  32#define BITS_PER_BITMAP		(PAGE_SIZE * 8UL)
  33#define MAX_CACHE_BYTES_PER_GIG	SZ_32K
  34
  35struct btrfs_trim_range {
  36	u64 start;
  37	u64 bytes;
  38	struct list_head list;
  39};
  40
  41static int link_free_space(struct btrfs_free_space_ctl *ctl,
  42			   struct btrfs_free_space *info);
  43static void unlink_free_space(struct btrfs_free_space_ctl *ctl,
  44			      struct btrfs_free_space *info);
  45static int btrfs_wait_cache_io_root(struct btrfs_root *root,
  46			     struct btrfs_trans_handle *trans,
  47			     struct btrfs_io_ctl *io_ctl,
  48			     struct btrfs_path *path);
  49
  50static struct inode *__lookup_free_space_inode(struct btrfs_root *root,
  51					       struct btrfs_path *path,
  52					       u64 offset)
  53{
  54	struct btrfs_fs_info *fs_info = root->fs_info;
  55	struct btrfs_key key;
  56	struct btrfs_key location;
  57	struct btrfs_disk_key disk_key;
  58	struct btrfs_free_space_header *header;
  59	struct extent_buffer *leaf;
  60	struct inode *inode = NULL;
  61	int ret;
  62
  63	key.objectid = BTRFS_FREE_SPACE_OBJECTID;
  64	key.offset = offset;
  65	key.type = 0;
  66
  67	ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
  68	if (ret < 0)
  69		return ERR_PTR(ret);
  70	if (ret > 0) {
  71		btrfs_release_path(path);
  72		return ERR_PTR(-ENOENT);
  73	}
  74
  75	leaf = path->nodes[0];
  76	header = btrfs_item_ptr(leaf, path->slots[0],
  77				struct btrfs_free_space_header);
  78	btrfs_free_space_key(leaf, header, &disk_key);
  79	btrfs_disk_key_to_cpu(&location, &disk_key);
  80	btrfs_release_path(path);
  81
  82	inode = btrfs_iget(fs_info->sb, &location, root, NULL);
 
 
  83	if (IS_ERR(inode))
  84		return inode;
  85	if (is_bad_inode(inode)) {
  86		iput(inode);
  87		return ERR_PTR(-ENOENT);
  88	}
  89
  90	mapping_set_gfp_mask(inode->i_mapping,
  91			mapping_gfp_constraint(inode->i_mapping,
  92			~(__GFP_FS | __GFP_HIGHMEM)));
  93
  94	return inode;
  95}
  96
  97struct inode *lookup_free_space_inode(struct btrfs_root *root,
  98				      struct btrfs_block_group_cache
  99				      *block_group, struct btrfs_path *path)
 100{
 101	struct inode *inode = NULL;
 102	struct btrfs_fs_info *fs_info = root->fs_info;
 103	u32 flags = BTRFS_INODE_NODATASUM | BTRFS_INODE_NODATACOW;
 104
 105	spin_lock(&block_group->lock);
 106	if (block_group->inode)
 107		inode = igrab(block_group->inode);
 108	spin_unlock(&block_group->lock);
 109	if (inode)
 110		return inode;
 111
 112	inode = __lookup_free_space_inode(root, path,
 113					  block_group->key.objectid);
 114	if (IS_ERR(inode))
 115		return inode;
 116
 117	spin_lock(&block_group->lock);
 118	if (!((BTRFS_I(inode)->flags & flags) == flags)) {
 119		btrfs_info(fs_info, "Old style space inode found, converting.");
 120		BTRFS_I(inode)->flags |= BTRFS_INODE_NODATASUM |
 121			BTRFS_INODE_NODATACOW;
 122		block_group->disk_cache_state = BTRFS_DC_CLEAR;
 123	}
 124
 125	if (!block_group->iref) {
 126		block_group->inode = igrab(inode);
 127		block_group->iref = 1;
 128	}
 129	spin_unlock(&block_group->lock);
 130
 131	return inode;
 132}
 133
 134static int __create_free_space_inode(struct btrfs_root *root,
 135				     struct btrfs_trans_handle *trans,
 136				     struct btrfs_path *path,
 137				     u64 ino, u64 offset)
 138{
 139	struct btrfs_key key;
 140	struct btrfs_disk_key disk_key;
 141	struct btrfs_free_space_header *header;
 142	struct btrfs_inode_item *inode_item;
 143	struct extent_buffer *leaf;
 144	u64 flags = BTRFS_INODE_NOCOMPRESS | BTRFS_INODE_PREALLOC;
 145	int ret;
 146
 147	ret = btrfs_insert_empty_inode(trans, root, path, ino);
 148	if (ret)
 149		return ret;
 150
 151	/* We inline crc's for the free disk space cache */
 152	if (ino != BTRFS_FREE_INO_OBJECTID)
 153		flags |= BTRFS_INODE_NODATASUM | BTRFS_INODE_NODATACOW;
 154
 155	leaf = path->nodes[0];
 156	inode_item = btrfs_item_ptr(leaf, path->slots[0],
 157				    struct btrfs_inode_item);
 158	btrfs_item_key(leaf, &disk_key, path->slots[0]);
 159	memzero_extent_buffer(leaf, (unsigned long)inode_item,
 160			     sizeof(*inode_item));
 161	btrfs_set_inode_generation(leaf, inode_item, trans->transid);
 162	btrfs_set_inode_size(leaf, inode_item, 0);
 163	btrfs_set_inode_nbytes(leaf, inode_item, 0);
 164	btrfs_set_inode_uid(leaf, inode_item, 0);
 165	btrfs_set_inode_gid(leaf, inode_item, 0);
 166	btrfs_set_inode_mode(leaf, inode_item, S_IFREG | 0600);
 167	btrfs_set_inode_flags(leaf, inode_item, flags);
 
 168	btrfs_set_inode_nlink(leaf, inode_item, 1);
 169	btrfs_set_inode_transid(leaf, inode_item, trans->transid);
 170	btrfs_set_inode_block_group(leaf, inode_item, offset);
 171	btrfs_mark_buffer_dirty(leaf);
 172	btrfs_release_path(path);
 173
 174	key.objectid = BTRFS_FREE_SPACE_OBJECTID;
 175	key.offset = offset;
 176	key.type = 0;
 
 177	ret = btrfs_insert_empty_item(trans, root, path, &key,
 178				      sizeof(struct btrfs_free_space_header));
 179	if (ret < 0) {
 180		btrfs_release_path(path);
 181		return ret;
 182	}
 183
 184	leaf = path->nodes[0];
 185	header = btrfs_item_ptr(leaf, path->slots[0],
 186				struct btrfs_free_space_header);
 187	memzero_extent_buffer(leaf, (unsigned long)header, sizeof(*header));
 188	btrfs_set_free_space_key(leaf, header, &disk_key);
 189	btrfs_mark_buffer_dirty(leaf);
 190	btrfs_release_path(path);
 191
 192	return 0;
 193}
 194
 195int create_free_space_inode(struct btrfs_root *root,
 196			    struct btrfs_trans_handle *trans,
 197			    struct btrfs_block_group_cache *block_group,
 198			    struct btrfs_path *path)
 199{
 200	int ret;
 201	u64 ino;
 202
 203	ret = btrfs_find_free_objectid(root, &ino);
 204	if (ret < 0)
 205		return ret;
 206
 207	return __create_free_space_inode(root, trans, path, ino,
 208					 block_group->key.objectid);
 209}
 210
 211int btrfs_check_trunc_cache_free_space(struct btrfs_fs_info *fs_info,
 212				       struct btrfs_block_rsv *rsv)
 213{
 214	u64 needed_bytes;
 215	int ret;
 216
 217	/* 1 for slack space, 1 for updating the inode */
 218	needed_bytes = btrfs_calc_trunc_metadata_size(fs_info, 1) +
 219		btrfs_calc_trans_metadata_size(fs_info, 1);
 220
 221	spin_lock(&rsv->lock);
 222	if (rsv->reserved < needed_bytes)
 223		ret = -ENOSPC;
 224	else
 225		ret = 0;
 226	spin_unlock(&rsv->lock);
 227	return ret;
 228}
 229
 230int btrfs_truncate_free_space_cache(struct btrfs_root *root,
 231				    struct btrfs_trans_handle *trans,
 232				    struct btrfs_block_group_cache *block_group,
 233				    struct inode *inode)
 234{
 
 
 235	int ret = 0;
 236	struct btrfs_path *path = btrfs_alloc_path();
 237	bool locked = false;
 238
 239	if (!path) {
 240		ret = -ENOMEM;
 241		goto fail;
 242	}
 243
 244	if (block_group) {
 245		locked = true;
 246		mutex_lock(&trans->transaction->cache_write_mutex);
 247		if (!list_empty(&block_group->io_list)) {
 248			list_del_init(&block_group->io_list);
 249
 250			btrfs_wait_cache_io(trans, block_group, path);
 251			btrfs_put_block_group(block_group);
 252		}
 253
 254		/*
 255		 * now that we've truncated the cache away, its no longer
 256		 * setup or written
 257		 */
 258		spin_lock(&block_group->lock);
 259		block_group->disk_cache_state = BTRFS_DC_CLEAR;
 260		spin_unlock(&block_group->lock);
 261	}
 262	btrfs_free_path(path);
 263
 
 264	btrfs_i_size_write(inode, 0);
 265	truncate_pagecache(inode, 0);
 266
 267	/*
 268	 * We don't need an orphan item because truncating the free space cache
 269	 * will never be split across transactions.
 270	 * We don't need to check for -EAGAIN because we're a free space
 271	 * cache inode
 272	 */
 273	ret = btrfs_truncate_inode_items(trans, root, inode,
 274					 0, BTRFS_EXTENT_DATA_KEY);
 275	if (ret)
 276		goto fail;
 277
 278	ret = btrfs_update_inode(trans, root, inode);
 279
 280fail:
 281	if (locked)
 282		mutex_unlock(&trans->transaction->cache_write_mutex);
 283	if (ret)
 284		btrfs_abort_transaction(trans, ret);
 285
 
 286	return ret;
 287}
 288
 289static int readahead_cache(struct inode *inode)
 290{
 291	struct file_ra_state *ra;
 292	unsigned long last_index;
 293
 294	ra = kzalloc(sizeof(*ra), GFP_NOFS);
 295	if (!ra)
 296		return -ENOMEM;
 297
 298	file_ra_state_init(ra, inode->i_mapping);
 299	last_index = (i_size_read(inode) - 1) >> PAGE_SHIFT;
 300
 301	page_cache_sync_readahead(inode->i_mapping, ra, NULL, 0, last_index);
 302
 303	kfree(ra);
 304
 305	return 0;
 306}
 307
 308static int io_ctl_init(struct btrfs_io_ctl *io_ctl, struct inode *inode,
 309		       int write)
 
 310{
 311	int num_pages;
 312	int check_crcs = 0;
 313
 314	num_pages = DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE);
 315
 316	if (btrfs_ino(inode) != BTRFS_FREE_INO_OBJECTID)
 317		check_crcs = 1;
 318
 319	/* Make sure we can fit our crcs into the first page */
 320	if (write && check_crcs &&
 321	    (num_pages * sizeof(u32)) >= PAGE_SIZE)
 322		return -ENOSPC;
 323
 324	memset(io_ctl, 0, sizeof(struct btrfs_io_ctl));
 325
 326	io_ctl->pages = kcalloc(num_pages, sizeof(struct page *), GFP_NOFS);
 327	if (!io_ctl->pages)
 328		return -ENOMEM;
 329
 330	io_ctl->num_pages = num_pages;
 331	io_ctl->fs_info = btrfs_sb(inode->i_sb);
 332	io_ctl->check_crcs = check_crcs;
 333	io_ctl->inode = inode;
 334
 335	return 0;
 336}
 337
 338static void io_ctl_free(struct btrfs_io_ctl *io_ctl)
 339{
 340	kfree(io_ctl->pages);
 341	io_ctl->pages = NULL;
 342}
 343
 344static void io_ctl_unmap_page(struct btrfs_io_ctl *io_ctl)
 345{
 346	if (io_ctl->cur) {
 347		io_ctl->cur = NULL;
 348		io_ctl->orig = NULL;
 349	}
 350}
 351
 352static void io_ctl_map_page(struct btrfs_io_ctl *io_ctl, int clear)
 353{
 354	ASSERT(io_ctl->index < io_ctl->num_pages);
 355	io_ctl->page = io_ctl->pages[io_ctl->index++];
 356	io_ctl->cur = page_address(io_ctl->page);
 357	io_ctl->orig = io_ctl->cur;
 358	io_ctl->size = PAGE_SIZE;
 359	if (clear)
 360		memset(io_ctl->cur, 0, PAGE_SIZE);
 361}
 362
 363static void io_ctl_drop_pages(struct btrfs_io_ctl *io_ctl)
 364{
 365	int i;
 366
 367	io_ctl_unmap_page(io_ctl);
 368
 369	for (i = 0; i < io_ctl->num_pages; i++) {
 370		if (io_ctl->pages[i]) {
 371			ClearPageChecked(io_ctl->pages[i]);
 372			unlock_page(io_ctl->pages[i]);
 373			put_page(io_ctl->pages[i]);
 374		}
 375	}
 376}
 377
 378static int io_ctl_prepare_pages(struct btrfs_io_ctl *io_ctl, struct inode *inode,
 379				int uptodate)
 380{
 381	struct page *page;
 382	gfp_t mask = btrfs_alloc_write_mask(inode->i_mapping);
 383	int i;
 384
 385	for (i = 0; i < io_ctl->num_pages; i++) {
 386		page = find_or_create_page(inode->i_mapping, i, mask);
 387		if (!page) {
 388			io_ctl_drop_pages(io_ctl);
 389			return -ENOMEM;
 390		}
 391		io_ctl->pages[i] = page;
 392		if (uptodate && !PageUptodate(page)) {
 393			btrfs_readpage(NULL, page);
 394			lock_page(page);
 395			if (!PageUptodate(page)) {
 396				btrfs_err(BTRFS_I(inode)->root->fs_info,
 397					   "error reading free space cache");
 398				io_ctl_drop_pages(io_ctl);
 399				return -EIO;
 400			}
 401		}
 402	}
 403
 404	for (i = 0; i < io_ctl->num_pages; i++) {
 405		clear_page_dirty_for_io(io_ctl->pages[i]);
 406		set_page_extent_mapped(io_ctl->pages[i]);
 407	}
 408
 409	return 0;
 410}
 411
 412static void io_ctl_set_generation(struct btrfs_io_ctl *io_ctl, u64 generation)
 413{
 414	__le64 *val;
 415
 416	io_ctl_map_page(io_ctl, 1);
 417
 418	/*
 419	 * Skip the csum areas.  If we don't check crcs then we just have a
 420	 * 64bit chunk at the front of the first page.
 421	 */
 422	if (io_ctl->check_crcs) {
 423		io_ctl->cur += (sizeof(u32) * io_ctl->num_pages);
 424		io_ctl->size -= sizeof(u64) + (sizeof(u32) * io_ctl->num_pages);
 425	} else {
 426		io_ctl->cur += sizeof(u64);
 427		io_ctl->size -= sizeof(u64) * 2;
 428	}
 429
 430	val = io_ctl->cur;
 431	*val = cpu_to_le64(generation);
 432	io_ctl->cur += sizeof(u64);
 433}
 434
 435static int io_ctl_check_generation(struct btrfs_io_ctl *io_ctl, u64 generation)
 436{
 437	__le64 *gen;
 438
 439	/*
 440	 * Skip the crc area.  If we don't check crcs then we just have a 64bit
 441	 * chunk at the front of the first page.
 442	 */
 443	if (io_ctl->check_crcs) {
 444		io_ctl->cur += sizeof(u32) * io_ctl->num_pages;
 445		io_ctl->size -= sizeof(u64) +
 446			(sizeof(u32) * io_ctl->num_pages);
 447	} else {
 448		io_ctl->cur += sizeof(u64);
 449		io_ctl->size -= sizeof(u64) * 2;
 450	}
 451
 452	gen = io_ctl->cur;
 453	if (le64_to_cpu(*gen) != generation) {
 454		btrfs_err_rl(io_ctl->fs_info,
 455			"space cache generation (%llu) does not match inode (%llu)",
 456				*gen, generation);
 457		io_ctl_unmap_page(io_ctl);
 458		return -EIO;
 459	}
 460	io_ctl->cur += sizeof(u64);
 461	return 0;
 462}
 463
 464static void io_ctl_set_crc(struct btrfs_io_ctl *io_ctl, int index)
 465{
 466	u32 *tmp;
 467	u32 crc = ~(u32)0;
 468	unsigned offset = 0;
 469
 470	if (!io_ctl->check_crcs) {
 471		io_ctl_unmap_page(io_ctl);
 472		return;
 473	}
 474
 475	if (index == 0)
 476		offset = sizeof(u32) * io_ctl->num_pages;
 477
 478	crc = btrfs_csum_data(io_ctl->orig + offset, crc,
 479			      PAGE_SIZE - offset);
 480	btrfs_csum_final(crc, (u8 *)&crc);
 481	io_ctl_unmap_page(io_ctl);
 482	tmp = page_address(io_ctl->pages[0]);
 483	tmp += index;
 484	*tmp = crc;
 485}
 486
 487static int io_ctl_check_crc(struct btrfs_io_ctl *io_ctl, int index)
 488{
 489	u32 *tmp, val;
 490	u32 crc = ~(u32)0;
 491	unsigned offset = 0;
 492
 493	if (!io_ctl->check_crcs) {
 494		io_ctl_map_page(io_ctl, 0);
 495		return 0;
 496	}
 497
 498	if (index == 0)
 499		offset = sizeof(u32) * io_ctl->num_pages;
 500
 501	tmp = page_address(io_ctl->pages[0]);
 502	tmp += index;
 503	val = *tmp;
 504
 505	io_ctl_map_page(io_ctl, 0);
 506	crc = btrfs_csum_data(io_ctl->orig + offset, crc,
 507			      PAGE_SIZE - offset);
 508	btrfs_csum_final(crc, (u8 *)&crc);
 509	if (val != crc) {
 510		btrfs_err_rl(io_ctl->fs_info,
 511			"csum mismatch on free space cache");
 512		io_ctl_unmap_page(io_ctl);
 513		return -EIO;
 514	}
 515
 516	return 0;
 517}
 518
 519static int io_ctl_add_entry(struct btrfs_io_ctl *io_ctl, u64 offset, u64 bytes,
 520			    void *bitmap)
 521{
 522	struct btrfs_free_space_entry *entry;
 523
 524	if (!io_ctl->cur)
 525		return -ENOSPC;
 526
 527	entry = io_ctl->cur;
 528	entry->offset = cpu_to_le64(offset);
 529	entry->bytes = cpu_to_le64(bytes);
 530	entry->type = (bitmap) ? BTRFS_FREE_SPACE_BITMAP :
 531		BTRFS_FREE_SPACE_EXTENT;
 532	io_ctl->cur += sizeof(struct btrfs_free_space_entry);
 533	io_ctl->size -= sizeof(struct btrfs_free_space_entry);
 534
 535	if (io_ctl->size >= sizeof(struct btrfs_free_space_entry))
 536		return 0;
 537
 538	io_ctl_set_crc(io_ctl, io_ctl->index - 1);
 539
 540	/* No more pages to map */
 541	if (io_ctl->index >= io_ctl->num_pages)
 542		return 0;
 543
 544	/* map the next page */
 545	io_ctl_map_page(io_ctl, 1);
 546	return 0;
 547}
 548
 549static int io_ctl_add_bitmap(struct btrfs_io_ctl *io_ctl, void *bitmap)
 550{
 551	if (!io_ctl->cur)
 552		return -ENOSPC;
 553
 554	/*
 555	 * If we aren't at the start of the current page, unmap this one and
 556	 * map the next one if there is any left.
 557	 */
 558	if (io_ctl->cur != io_ctl->orig) {
 559		io_ctl_set_crc(io_ctl, io_ctl->index - 1);
 560		if (io_ctl->index >= io_ctl->num_pages)
 561			return -ENOSPC;
 562		io_ctl_map_page(io_ctl, 0);
 563	}
 564
 565	memcpy(io_ctl->cur, bitmap, PAGE_SIZE);
 566	io_ctl_set_crc(io_ctl, io_ctl->index - 1);
 567	if (io_ctl->index < io_ctl->num_pages)
 568		io_ctl_map_page(io_ctl, 0);
 569	return 0;
 570}
 571
 572static void io_ctl_zero_remaining_pages(struct btrfs_io_ctl *io_ctl)
 573{
 574	/*
 575	 * If we're not on the boundary we know we've modified the page and we
 576	 * need to crc the page.
 577	 */
 578	if (io_ctl->cur != io_ctl->orig)
 579		io_ctl_set_crc(io_ctl, io_ctl->index - 1);
 580	else
 581		io_ctl_unmap_page(io_ctl);
 582
 583	while (io_ctl->index < io_ctl->num_pages) {
 584		io_ctl_map_page(io_ctl, 1);
 585		io_ctl_set_crc(io_ctl, io_ctl->index - 1);
 586	}
 587}
 588
 589static int io_ctl_read_entry(struct btrfs_io_ctl *io_ctl,
 590			    struct btrfs_free_space *entry, u8 *type)
 591{
 592	struct btrfs_free_space_entry *e;
 593	int ret;
 594
 595	if (!io_ctl->cur) {
 596		ret = io_ctl_check_crc(io_ctl, io_ctl->index);
 597		if (ret)
 598			return ret;
 599	}
 600
 601	e = io_ctl->cur;
 602	entry->offset = le64_to_cpu(e->offset);
 603	entry->bytes = le64_to_cpu(e->bytes);
 604	*type = e->type;
 605	io_ctl->cur += sizeof(struct btrfs_free_space_entry);
 606	io_ctl->size -= sizeof(struct btrfs_free_space_entry);
 607
 608	if (io_ctl->size >= sizeof(struct btrfs_free_space_entry))
 609		return 0;
 610
 611	io_ctl_unmap_page(io_ctl);
 612
 613	return 0;
 614}
 615
 616static int io_ctl_read_bitmap(struct btrfs_io_ctl *io_ctl,
 617			      struct btrfs_free_space *entry)
 618{
 619	int ret;
 620
 621	ret = io_ctl_check_crc(io_ctl, io_ctl->index);
 622	if (ret)
 623		return ret;
 624
 625	memcpy(entry->bitmap, io_ctl->cur, PAGE_SIZE);
 626	io_ctl_unmap_page(io_ctl);
 627
 628	return 0;
 629}
 630
 631/*
 632 * Since we attach pinned extents after the fact we can have contiguous sections
 633 * of free space that are split up in entries.  This poses a problem with the
 634 * tree logging stuff since it could have allocated across what appears to be 2
 635 * entries since we would have merged the entries when adding the pinned extents
 636 * back to the free space cache.  So run through the space cache that we just
 637 * loaded and merge contiguous entries.  This will make the log replay stuff not
 638 * blow up and it will make for nicer allocator behavior.
 639 */
 640static void merge_space_tree(struct btrfs_free_space_ctl *ctl)
 641{
 642	struct btrfs_free_space *e, *prev = NULL;
 643	struct rb_node *n;
 644
 645again:
 646	spin_lock(&ctl->tree_lock);
 647	for (n = rb_first(&ctl->free_space_offset); n; n = rb_next(n)) {
 648		e = rb_entry(n, struct btrfs_free_space, offset_index);
 649		if (!prev)
 650			goto next;
 651		if (e->bitmap || prev->bitmap)
 652			goto next;
 653		if (prev->offset + prev->bytes == e->offset) {
 654			unlink_free_space(ctl, prev);
 655			unlink_free_space(ctl, e);
 656			prev->bytes += e->bytes;
 657			kmem_cache_free(btrfs_free_space_cachep, e);
 658			link_free_space(ctl, prev);
 659			prev = NULL;
 660			spin_unlock(&ctl->tree_lock);
 661			goto again;
 662		}
 663next:
 664		prev = e;
 665	}
 666	spin_unlock(&ctl->tree_lock);
 667}
 668
 669static int __load_free_space_cache(struct btrfs_root *root, struct inode *inode,
 670				   struct btrfs_free_space_ctl *ctl,
 671				   struct btrfs_path *path, u64 offset)
 672{
 673	struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
 674	struct btrfs_free_space_header *header;
 675	struct extent_buffer *leaf;
 676	struct btrfs_io_ctl io_ctl;
 677	struct btrfs_key key;
 678	struct btrfs_free_space *e, *n;
 679	LIST_HEAD(bitmaps);
 680	u64 num_entries;
 681	u64 num_bitmaps;
 682	u64 generation;
 683	u8 type;
 684	int ret = 0;
 685
 
 
 686	/* Nothing in the space cache, goodbye */
 687	if (!i_size_read(inode))
 688		return 0;
 689
 690	key.objectid = BTRFS_FREE_SPACE_OBJECTID;
 691	key.offset = offset;
 692	key.type = 0;
 693
 694	ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
 695	if (ret < 0)
 696		return 0;
 697	else if (ret > 0) {
 698		btrfs_release_path(path);
 699		return 0;
 
 700	}
 701
 702	ret = -1;
 703
 704	leaf = path->nodes[0];
 705	header = btrfs_item_ptr(leaf, path->slots[0],
 706				struct btrfs_free_space_header);
 707	num_entries = btrfs_free_space_entries(leaf, header);
 708	num_bitmaps = btrfs_free_space_bitmaps(leaf, header);
 709	generation = btrfs_free_space_generation(leaf, header);
 710	btrfs_release_path(path);
 711
 712	if (!BTRFS_I(inode)->generation) {
 713		btrfs_info(fs_info,
 714			   "The free space cache file (%llu) is invalid. skip it\n",
 715			   offset);
 716		return 0;
 717	}
 718
 719	if (BTRFS_I(inode)->generation != generation) {
 720		btrfs_err(fs_info,
 721			  "free space inode generation (%llu) did not match free space cache generation (%llu)",
 722			  BTRFS_I(inode)->generation, generation);
 723		return 0;
 
 724	}
 725
 726	if (!num_entries)
 727		return 0;
 728
 729	ret = io_ctl_init(&io_ctl, inode, 0);
 730	if (ret)
 731		return ret;
 732
 733	ret = readahead_cache(inode);
 734	if (ret)
 735		goto out;
 736
 737	ret = io_ctl_prepare_pages(&io_ctl, inode, 1);
 738	if (ret)
 739		goto out;
 740
 741	ret = io_ctl_check_crc(&io_ctl, 0);
 742	if (ret)
 743		goto free_cache;
 
 
 
 744
 745	ret = io_ctl_check_generation(&io_ctl, generation);
 746	if (ret)
 747		goto free_cache;
 748
 749	while (num_entries) {
 750		e = kmem_cache_zalloc(btrfs_free_space_cachep,
 751				      GFP_NOFS);
 752		if (!e)
 753			goto free_cache;
 754
 755		ret = io_ctl_read_entry(&io_ctl, e, &type);
 756		if (ret) {
 757			kmem_cache_free(btrfs_free_space_cachep, e);
 758			goto free_cache;
 
 
 
 
 
 
 759		}
 
 760
 761		if (!e->bytes) {
 762			kmem_cache_free(btrfs_free_space_cachep, e);
 763			goto free_cache;
 764		}
 765
 766		if (type == BTRFS_FREE_SPACE_EXTENT) {
 767			spin_lock(&ctl->tree_lock);
 768			ret = link_free_space(ctl, e);
 769			spin_unlock(&ctl->tree_lock);
 770			if (ret) {
 771				btrfs_err(fs_info,
 772					"Duplicate entries in free space cache, dumping");
 773				kmem_cache_free(btrfs_free_space_cachep, e);
 
 
 
 
 
 
 
 
 
 
 774				goto free_cache;
 775			}
 776		} else {
 777			ASSERT(num_bitmaps);
 778			num_bitmaps--;
 779			e->bitmap = kzalloc(PAGE_SIZE, GFP_NOFS);
 780			if (!e->bitmap) {
 781				kmem_cache_free(
 782					btrfs_free_space_cachep, e);
 
 
 
 
 
 
 
 
 
 783				goto free_cache;
 784			}
 785			spin_lock(&ctl->tree_lock);
 786			ret = link_free_space(ctl, e);
 787			ctl->total_bitmaps++;
 788			ctl->op->recalc_thresholds(ctl);
 789			spin_unlock(&ctl->tree_lock);
 790			if (ret) {
 791				btrfs_err(fs_info,
 792					"Duplicate entries in free space cache, dumping");
 793				kmem_cache_free(btrfs_free_space_cachep, e);
 
 
 794				goto free_cache;
 795			}
 796			list_add_tail(&e->list, &bitmaps);
 797		}
 798
 799		num_entries--;
 800	}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 801
 802	io_ctl_unmap_page(&io_ctl);
 
 
 
 
 
 
 803
 804	/*
 805	 * We add the bitmaps at the end of the entries in order that
 806	 * the bitmap entries are added to the cache.
 807	 */
 808	list_for_each_entry_safe(e, n, &bitmaps, list) {
 
 
 
 
 
 
 
 
 
 809		list_del_init(&e->list);
 810		ret = io_ctl_read_bitmap(&io_ctl, e);
 811		if (ret)
 812			goto free_cache;
 
 
 
 
 813	}
 814
 815	io_ctl_drop_pages(&io_ctl);
 816	merge_space_tree(ctl);
 817	ret = 1;
 818out:
 819	io_ctl_free(&io_ctl);
 820	return ret;
 821free_cache:
 822	io_ctl_drop_pages(&io_ctl);
 823	__btrfs_remove_free_space_cache(ctl);
 824	goto out;
 825}
 826
 827int load_free_space_cache(struct btrfs_fs_info *fs_info,
 828			  struct btrfs_block_group_cache *block_group)
 829{
 830	struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl;
 831	struct btrfs_root *root = fs_info->tree_root;
 832	struct inode *inode;
 833	struct btrfs_path *path;
 834	int ret = 0;
 835	bool matched;
 836	u64 used = btrfs_block_group_used(&block_group->item);
 837
 838	/*
 
 
 
 
 
 
 
 839	 * If this block group has been marked to be cleared for one reason or
 840	 * another then we can't trust the on disk cache, so just return.
 841	 */
 842	spin_lock(&block_group->lock);
 843	if (block_group->disk_cache_state != BTRFS_DC_WRITTEN) {
 844		spin_unlock(&block_group->lock);
 845		return 0;
 846	}
 847	spin_unlock(&block_group->lock);
 848
 849	path = btrfs_alloc_path();
 850	if (!path)
 851		return 0;
 852	path->search_commit_root = 1;
 853	path->skip_locking = 1;
 854
 855	inode = lookup_free_space_inode(root, block_group, path);
 856	if (IS_ERR(inode)) {
 857		btrfs_free_path(path);
 858		return 0;
 859	}
 860
 861	/* We may have converted the inode and made the cache invalid. */
 862	spin_lock(&block_group->lock);
 863	if (block_group->disk_cache_state != BTRFS_DC_WRITTEN) {
 864		spin_unlock(&block_group->lock);
 865		btrfs_free_path(path);
 866		goto out;
 867	}
 868	spin_unlock(&block_group->lock);
 869
 870	ret = __load_free_space_cache(fs_info->tree_root, inode, ctl,
 871				      path, block_group->key.objectid);
 872	btrfs_free_path(path);
 873	if (ret <= 0)
 874		goto out;
 875
 876	spin_lock(&ctl->tree_lock);
 877	matched = (ctl->free_space == (block_group->key.offset - used -
 878				       block_group->bytes_super));
 879	spin_unlock(&ctl->tree_lock);
 880
 881	if (!matched) {
 882		__btrfs_remove_free_space_cache(ctl);
 883		btrfs_warn(fs_info,
 884			   "block group %llu has wrong amount of free space",
 885			   block_group->key.objectid);
 886		ret = -1;
 887	}
 888out:
 889	if (ret < 0) {
 890		/* This cache is bogus, make sure it gets cleared */
 891		spin_lock(&block_group->lock);
 892		block_group->disk_cache_state = BTRFS_DC_CLEAR;
 893		spin_unlock(&block_group->lock);
 894		ret = 0;
 895
 896		btrfs_warn(fs_info,
 897			   "failed to load free space cache for block group %llu, rebuilding it now",
 898			   block_group->key.objectid);
 899	}
 900
 901	iput(inode);
 902	return ret;
 903}
 904
 905static noinline_for_stack
 906int write_cache_extent_entries(struct btrfs_io_ctl *io_ctl,
 907			      struct btrfs_free_space_ctl *ctl,
 908			      struct btrfs_block_group_cache *block_group,
 909			      int *entries, int *bitmaps,
 910			      struct list_head *bitmap_list)
 911{
 912	int ret;
 913	struct btrfs_free_cluster *cluster = NULL;
 914	struct btrfs_free_cluster *cluster_locked = NULL;
 915	struct rb_node *node = rb_first(&ctl->free_space_offset);
 916	struct btrfs_trim_range *trim_entry;
 917
 918	/* Get the cluster for this block_group if it exists */
 919	if (block_group && !list_empty(&block_group->cluster_list)) {
 920		cluster = list_entry(block_group->cluster_list.next,
 921				     struct btrfs_free_cluster,
 922				     block_group_list);
 923	}
 924
 925	if (!node && cluster) {
 926		cluster_locked = cluster;
 927		spin_lock(&cluster_locked->lock);
 928		node = rb_first(&cluster->root);
 929		cluster = NULL;
 930	}
 931
 932	/* Write out the extent entries */
 933	while (node) {
 934		struct btrfs_free_space *e;
 935
 936		e = rb_entry(node, struct btrfs_free_space, offset_index);
 937		*entries += 1;
 938
 939		ret = io_ctl_add_entry(io_ctl, e->offset, e->bytes,
 940				       e->bitmap);
 941		if (ret)
 942			goto fail;
 943
 944		if (e->bitmap) {
 945			list_add_tail(&e->list, bitmap_list);
 946			*bitmaps += 1;
 947		}
 948		node = rb_next(node);
 949		if (!node && cluster) {
 950			node = rb_first(&cluster->root);
 951			cluster_locked = cluster;
 952			spin_lock(&cluster_locked->lock);
 953			cluster = NULL;
 954		}
 955	}
 956	if (cluster_locked) {
 957		spin_unlock(&cluster_locked->lock);
 958		cluster_locked = NULL;
 959	}
 960
 961	/*
 962	 * Make sure we don't miss any range that was removed from our rbtree
 963	 * because trimming is running. Otherwise after a umount+mount (or crash
 964	 * after committing the transaction) we would leak free space and get
 965	 * an inconsistent free space cache report from fsck.
 966	 */
 967	list_for_each_entry(trim_entry, &ctl->trimming_ranges, list) {
 968		ret = io_ctl_add_entry(io_ctl, trim_entry->start,
 969				       trim_entry->bytes, NULL);
 970		if (ret)
 971			goto fail;
 972		*entries += 1;
 973	}
 974
 975	return 0;
 976fail:
 977	if (cluster_locked)
 978		spin_unlock(&cluster_locked->lock);
 979	return -ENOSPC;
 980}
 981
 982static noinline_for_stack int
 983update_cache_item(struct btrfs_trans_handle *trans,
 984		  struct btrfs_root *root,
 985		  struct inode *inode,
 986		  struct btrfs_path *path, u64 offset,
 987		  int entries, int bitmaps)
 988{
 989	struct btrfs_key key;
 990	struct btrfs_free_space_header *header;
 991	struct extent_buffer *leaf;
 992	int ret;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 993
 994	key.objectid = BTRFS_FREE_SPACE_OBJECTID;
 995	key.offset = offset;
 996	key.type = 0;
 997
 998	ret = btrfs_search_slot(trans, root, &key, path, 0, 1);
 999	if (ret < 0) {
1000		clear_extent_bit(&BTRFS_I(inode)->io_tree, 0, inode->i_size - 1,
1001				 EXTENT_DIRTY | EXTENT_DELALLOC, 0, 0, NULL,
1002				 GFP_NOFS);
1003		goto fail;
1004	}
1005	leaf = path->nodes[0];
1006	if (ret > 0) {
1007		struct btrfs_key found_key;
1008		ASSERT(path->slots[0]);
1009		path->slots[0]--;
1010		btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
1011		if (found_key.objectid != BTRFS_FREE_SPACE_OBJECTID ||
1012		    found_key.offset != offset) {
1013			clear_extent_bit(&BTRFS_I(inode)->io_tree, 0,
1014					 inode->i_size - 1,
1015					 EXTENT_DIRTY | EXTENT_DELALLOC, 0, 0,
1016					 NULL, GFP_NOFS);
1017			btrfs_release_path(path);
1018			goto fail;
1019		}
1020	}
1021
1022	BTRFS_I(inode)->generation = trans->transid;
1023	header = btrfs_item_ptr(leaf, path->slots[0],
1024				struct btrfs_free_space_header);
1025	btrfs_set_free_space_entries(leaf, header, entries);
1026	btrfs_set_free_space_bitmaps(leaf, header, bitmaps);
1027	btrfs_set_free_space_generation(leaf, header, trans->transid);
1028	btrfs_mark_buffer_dirty(leaf);
1029	btrfs_release_path(path);
1030
1031	return 0;
 
1032
1033fail:
1034	return -1;
1035}
1036
1037static noinline_for_stack int
1038write_pinned_extent_entries(struct btrfs_fs_info *fs_info,
1039			    struct btrfs_block_group_cache *block_group,
1040			    struct btrfs_io_ctl *io_ctl,
1041			    int *entries)
1042{
1043	u64 start, extent_start, extent_end, len;
1044	struct extent_io_tree *unpin = NULL;
1045	int ret;
1046
1047	if (!block_group)
1048		return 0;
 
 
 
1049
1050	/*
1051	 * We want to add any pinned extents to our free space cache
1052	 * so we don't leak the space
1053	 *
1054	 * We shouldn't have switched the pinned extents yet so this is the
1055	 * right one
1056	 */
1057	unpin = fs_info->pinned_extents;
1058
1059	start = block_group->key.objectid;
1060
1061	while (start < block_group->key.objectid + block_group->key.offset) {
1062		ret = find_first_extent_bit(unpin, start,
1063					    &extent_start, &extent_end,
1064					    EXTENT_DIRTY, NULL);
1065		if (ret)
1066			return 0;
1067
1068		/* This pinned extent is out of our range */
1069		if (extent_start >= block_group->key.objectid +
1070		    block_group->key.offset)
1071			return 0;
1072
1073		extent_start = max(extent_start, start);
1074		extent_end = min(block_group->key.objectid +
1075				 block_group->key.offset, extent_end + 1);
1076		len = extent_end - extent_start;
1077
1078		*entries += 1;
1079		ret = io_ctl_add_entry(io_ctl, extent_start, len, NULL);
1080		if (ret)
1081			return -ENOSPC;
1082
1083		start = extent_end;
1084	}
1085
1086	return 0;
1087}
1088
1089static noinline_for_stack int
1090write_bitmap_entries(struct btrfs_io_ctl *io_ctl, struct list_head *bitmap_list)
1091{
1092	struct btrfs_free_space *entry, *next;
1093	int ret;
1094
1095	/* Write out the bitmaps */
1096	list_for_each_entry_safe(entry, next, bitmap_list, list) {
1097		ret = io_ctl_add_bitmap(io_ctl, entry->bitmap);
1098		if (ret)
1099			return -ENOSPC;
1100		list_del_init(&entry->list);
 
 
1101	}
1102
1103	return 0;
1104}
1105
1106static int flush_dirty_cache(struct inode *inode)
1107{
1108	int ret;
1109
1110	ret = btrfs_wait_ordered_range(inode, 0, (u64)-1);
1111	if (ret)
1112		clear_extent_bit(&BTRFS_I(inode)->io_tree, 0, inode->i_size - 1,
1113				 EXTENT_DIRTY | EXTENT_DELALLOC, 0, 0, NULL,
1114				 GFP_NOFS);
1115
1116	return ret;
1117}
1118
1119static void noinline_for_stack
1120cleanup_bitmap_list(struct list_head *bitmap_list)
1121{
1122	struct btrfs_free_space *entry, *next;
 
 
1123
1124	list_for_each_entry_safe(entry, next, bitmap_list, list)
1125		list_del_init(&entry->list);
1126}
 
 
1127
1128static void noinline_for_stack
1129cleanup_write_cache_enospc(struct inode *inode,
1130			   struct btrfs_io_ctl *io_ctl,
1131			   struct extent_state **cached_state,
1132			   struct list_head *bitmap_list)
1133{
1134	io_ctl_drop_pages(io_ctl);
1135	unlock_extent_cached(&BTRFS_I(inode)->io_tree, 0,
1136			     i_size_read(inode) - 1, cached_state,
1137			     GFP_NOFS);
1138}
1139
1140static int __btrfs_wait_cache_io(struct btrfs_root *root,
1141				 struct btrfs_trans_handle *trans,
1142				 struct btrfs_block_group_cache *block_group,
1143				 struct btrfs_io_ctl *io_ctl,
1144				 struct btrfs_path *path, u64 offset)
1145{
1146	int ret;
1147	struct inode *inode = io_ctl->inode;
1148	struct btrfs_fs_info *fs_info;
1149
1150	if (!inode)
1151		return 0;
1152
1153	fs_info = btrfs_sb(inode->i_sb);
 
 
1154
1155	/* Flush the dirty pages in the cache file. */
1156	ret = flush_dirty_cache(inode);
1157	if (ret)
1158		goto out;
 
 
 
1159
1160	/* Update the cache item to tell everyone this cache file is valid. */
1161	ret = update_cache_item(trans, root, inode, path, offset,
1162				io_ctl->entries, io_ctl->bitmaps);
1163out:
1164	io_ctl_free(io_ctl);
1165	if (ret) {
1166		invalidate_inode_pages2(inode->i_mapping);
1167		BTRFS_I(inode)->generation = 0;
1168		if (block_group) {
1169#ifdef DEBUG
1170			btrfs_err(fs_info,
1171				  "failed to write free space cache for block group %llu",
1172				  block_group->key.objectid);
1173#endif
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1174		}
1175	}
1176	btrfs_update_inode(trans, root, inode);
1177
1178	if (block_group) {
1179		/* the dirty list is protected by the dirty_bgs_lock */
1180		spin_lock(&trans->transaction->dirty_bgs_lock);
1181
1182		/* the disk_cache_state is protected by the block group lock */
1183		spin_lock(&block_group->lock);
1184
1185		/*
1186		 * only mark this as written if we didn't get put back on
1187		 * the dirty list while waiting for IO.   Otherwise our
1188		 * cache state won't be right, and we won't get written again
1189		 */
1190		if (!ret && list_empty(&block_group->dirty_list))
1191			block_group->disk_cache_state = BTRFS_DC_WRITTEN;
1192		else if (ret)
1193			block_group->disk_cache_state = BTRFS_DC_ERROR;
 
 
 
 
 
1194
1195		spin_unlock(&block_group->lock);
1196		spin_unlock(&trans->transaction->dirty_bgs_lock);
1197		io_ctl->inode = NULL;
1198		iput(inode);
1199	}
1200
1201	return ret;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1202
1203}
1204
1205static int btrfs_wait_cache_io_root(struct btrfs_root *root,
1206				    struct btrfs_trans_handle *trans,
1207				    struct btrfs_io_ctl *io_ctl,
1208				    struct btrfs_path *path)
1209{
1210	return __btrfs_wait_cache_io(root, trans, NULL, io_ctl, path, 0);
1211}
1212
1213int btrfs_wait_cache_io(struct btrfs_trans_handle *trans,
1214			struct btrfs_block_group_cache *block_group,
1215			struct btrfs_path *path)
1216{
1217	return __btrfs_wait_cache_io(block_group->fs_info->tree_root, trans,
1218				     block_group, &block_group->io_ctl,
1219				     path, block_group->key.objectid);
1220}
1221
1222/**
1223 * __btrfs_write_out_cache - write out cached info to an inode
1224 * @root - the root the inode belongs to
1225 * @ctl - the free space cache we are going to write out
1226 * @block_group - the block_group for this cache if it belongs to a block_group
1227 * @trans - the trans handle
1228 * @path - the path to use
1229 * @offset - the offset for the key we'll insert
1230 *
1231 * This function writes out a free space cache struct to disk for quick recovery
1232 * on mount.  This will return 0 if it was successful in writing the cache out,
1233 * or an errno if it was not.
1234 */
1235static int __btrfs_write_out_cache(struct btrfs_root *root, struct inode *inode,
1236				   struct btrfs_free_space_ctl *ctl,
1237				   struct btrfs_block_group_cache *block_group,
1238				   struct btrfs_io_ctl *io_ctl,
1239				   struct btrfs_trans_handle *trans,
1240				   struct btrfs_path *path, u64 offset)
1241{
1242	struct btrfs_fs_info *fs_info = root->fs_info;
1243	struct extent_state *cached_state = NULL;
1244	LIST_HEAD(bitmap_list);
1245	int entries = 0;
1246	int bitmaps = 0;
1247	int ret;
1248	int must_iput = 0;
1249
1250	if (!i_size_read(inode))
1251		return -EIO;
1252
1253	WARN_ON(io_ctl->pages);
1254	ret = io_ctl_init(io_ctl, inode, 1);
1255	if (ret)
1256		return ret;
 
1257
1258	if (block_group && (block_group->flags & BTRFS_BLOCK_GROUP_DATA)) {
1259		down_write(&block_group->data_rwsem);
1260		spin_lock(&block_group->lock);
1261		if (block_group->delalloc_bytes) {
1262			block_group->disk_cache_state = BTRFS_DC_WRITTEN;
1263			spin_unlock(&block_group->lock);
1264			up_write(&block_group->data_rwsem);
1265			BTRFS_I(inode)->generation = 0;
1266			ret = 0;
1267			must_iput = 1;
1268			goto out;
1269		}
1270		spin_unlock(&block_group->lock);
1271	}
1272
1273	/* Lock all pages first so we can lock the extent safely. */
1274	ret = io_ctl_prepare_pages(io_ctl, inode, 0);
1275	if (ret)
1276		goto out;
1277
1278	lock_extent_bits(&BTRFS_I(inode)->io_tree, 0, i_size_read(inode) - 1,
1279			 &cached_state);
1280
1281	io_ctl_set_generation(io_ctl, trans->transid);
1282
1283	mutex_lock(&ctl->cache_writeout_mutex);
1284	/* Write out the extent entries in the free space cache */
1285	spin_lock(&ctl->tree_lock);
1286	ret = write_cache_extent_entries(io_ctl, ctl,
1287					 block_group, &entries, &bitmaps,
1288					 &bitmap_list);
1289	if (ret)
1290		goto out_nospc_locked;
1291
1292	/*
1293	 * Some spaces that are freed in the current transaction are pinned,
1294	 * they will be added into free space cache after the transaction is
1295	 * committed, we shouldn't lose them.
1296	 *
1297	 * If this changes while we are working we'll get added back to
1298	 * the dirty list and redo it.  No locking needed
1299	 */
1300	ret = write_pinned_extent_entries(fs_info, block_group,
1301					  io_ctl, &entries);
1302	if (ret)
1303		goto out_nospc_locked;
1304
1305	/*
1306	 * At last, we write out all the bitmaps and keep cache_writeout_mutex
1307	 * locked while doing it because a concurrent trim can be manipulating
1308	 * or freeing the bitmap.
1309	 */
1310	ret = write_bitmap_entries(io_ctl, &bitmap_list);
1311	spin_unlock(&ctl->tree_lock);
1312	mutex_unlock(&ctl->cache_writeout_mutex);
1313	if (ret)
1314		goto out_nospc;
1315
1316	/* Zero out the rest of the pages just to make sure */
1317	io_ctl_zero_remaining_pages(io_ctl);
1318
1319	/* Everything is written out, now we dirty the pages in the file. */
1320	ret = btrfs_dirty_pages(inode, io_ctl->pages, io_ctl->num_pages, 0,
1321				i_size_read(inode), &cached_state);
1322	if (ret)
1323		goto out_nospc;
1324
1325	if (block_group && (block_group->flags & BTRFS_BLOCK_GROUP_DATA))
1326		up_write(&block_group->data_rwsem);
1327	/*
1328	 * Release the pages and unlock the extent, we will flush
1329	 * them out later
1330	 */
1331	io_ctl_drop_pages(io_ctl);
1332
 
 
 
1333	unlock_extent_cached(&BTRFS_I(inode)->io_tree, 0,
1334			     i_size_read(inode) - 1, &cached_state, GFP_NOFS);
1335
1336	/*
1337	 * at this point the pages are under IO and we're happy,
1338	 * The caller is responsible for waiting on them and updating the
1339	 * the cache and the inode
1340	 */
1341	io_ctl->entries = entries;
1342	io_ctl->bitmaps = bitmaps;
1343
1344	ret = btrfs_fdatawrite_range(inode, 0, (u64)-1);
1345	if (ret)
 
 
 
 
 
 
 
 
 
 
 
 
1346		goto out;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1347
1348	return 0;
1349
1350out:
1351	io_ctl->inode = NULL;
1352	io_ctl_free(io_ctl);
1353	if (ret) {
1354		invalidate_inode_pages2(inode->i_mapping);
1355		BTRFS_I(inode)->generation = 0;
1356	}
1357	btrfs_update_inode(trans, root, inode);
1358	if (must_iput)
1359		iput(inode);
1360	return ret;
1361
1362out_nospc_locked:
1363	cleanup_bitmap_list(&bitmap_list);
1364	spin_unlock(&ctl->tree_lock);
1365	mutex_unlock(&ctl->cache_writeout_mutex);
1366
1367out_nospc:
1368	cleanup_write_cache_enospc(inode, io_ctl, &cached_state, &bitmap_list);
1369
1370	if (block_group && (block_group->flags & BTRFS_BLOCK_GROUP_DATA))
1371		up_write(&block_group->data_rwsem);
1372
1373	goto out;
1374}
1375
1376int btrfs_write_out_cache(struct btrfs_fs_info *fs_info,
1377			  struct btrfs_trans_handle *trans,
1378			  struct btrfs_block_group_cache *block_group,
1379			  struct btrfs_path *path)
1380{
1381	struct btrfs_root *root = fs_info->tree_root;
1382	struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl;
1383	struct inode *inode;
1384	int ret = 0;
1385
 
 
1386	spin_lock(&block_group->lock);
1387	if (block_group->disk_cache_state < BTRFS_DC_SETUP) {
1388		spin_unlock(&block_group->lock);
1389		return 0;
1390	}
1391	spin_unlock(&block_group->lock);
1392
1393	inode = lookup_free_space_inode(root, block_group, path);
1394	if (IS_ERR(inode))
1395		return 0;
1396
1397	ret = __btrfs_write_out_cache(root, inode, ctl, block_group,
1398				      &block_group->io_ctl, trans,
1399				      path, block_group->key.objectid);
1400	if (ret) {
1401#ifdef DEBUG
1402		btrfs_err(fs_info,
1403			  "failed to write free space cache for block group %llu",
1404			  block_group->key.objectid);
1405#endif
1406		spin_lock(&block_group->lock);
1407		block_group->disk_cache_state = BTRFS_DC_ERROR;
1408		spin_unlock(&block_group->lock);
 
1409
1410		block_group->io_ctl.inode = NULL;
1411		iput(inode);
1412	}
1413
1414	/*
1415	 * if ret == 0 the caller is expected to call btrfs_wait_cache_io
1416	 * to wait for IO and put the inode
1417	 */
1418
1419	return ret;
1420}
1421
1422static inline unsigned long offset_to_bit(u64 bitmap_start, u32 unit,
1423					  u64 offset)
1424{
1425	ASSERT(offset >= bitmap_start);
1426	offset -= bitmap_start;
1427	return (unsigned long)(div_u64(offset, unit));
1428}
1429
1430static inline unsigned long bytes_to_bits(u64 bytes, u32 unit)
1431{
1432	return (unsigned long)(div_u64(bytes, unit));
1433}
1434
1435static inline u64 offset_to_bitmap(struct btrfs_free_space_ctl *ctl,
1436				   u64 offset)
1437{
1438	u64 bitmap_start;
1439	u64 bytes_per_bitmap;
1440
1441	bytes_per_bitmap = BITS_PER_BITMAP * ctl->unit;
1442	bitmap_start = offset - ctl->start;
1443	bitmap_start = div64_u64(bitmap_start, bytes_per_bitmap);
1444	bitmap_start *= bytes_per_bitmap;
1445	bitmap_start += ctl->start;
1446
1447	return bitmap_start;
1448}
1449
1450static int tree_insert_offset(struct rb_root *root, u64 offset,
1451			      struct rb_node *node, int bitmap)
1452{
1453	struct rb_node **p = &root->rb_node;
1454	struct rb_node *parent = NULL;
1455	struct btrfs_free_space *info;
1456
1457	while (*p) {
1458		parent = *p;
1459		info = rb_entry(parent, struct btrfs_free_space, offset_index);
1460
1461		if (offset < info->offset) {
1462			p = &(*p)->rb_left;
1463		} else if (offset > info->offset) {
1464			p = &(*p)->rb_right;
1465		} else {
1466			/*
1467			 * we could have a bitmap entry and an extent entry
1468			 * share the same offset.  If this is the case, we want
1469			 * the extent entry to always be found first if we do a
1470			 * linear search through the tree, since we want to have
1471			 * the quickest allocation time, and allocating from an
1472			 * extent is faster than allocating from a bitmap.  So
1473			 * if we're inserting a bitmap and we find an entry at
1474			 * this offset, we want to go right, or after this entry
1475			 * logically.  If we are inserting an extent and we've
1476			 * found a bitmap, we want to go left, or before
1477			 * logically.
1478			 */
1479			if (bitmap) {
1480				if (info->bitmap) {
1481					WARN_ON_ONCE(1);
1482					return -EEXIST;
1483				}
1484				p = &(*p)->rb_right;
1485			} else {
1486				if (!info->bitmap) {
1487					WARN_ON_ONCE(1);
1488					return -EEXIST;
1489				}
1490				p = &(*p)->rb_left;
1491			}
1492		}
1493	}
1494
1495	rb_link_node(node, parent, p);
1496	rb_insert_color(node, root);
1497
1498	return 0;
1499}
1500
1501/*
1502 * searches the tree for the given offset.
1503 *
1504 * fuzzy - If this is set, then we are trying to make an allocation, and we just
1505 * want a section that has at least bytes size and comes at or after the given
1506 * offset.
1507 */
1508static struct btrfs_free_space *
1509tree_search_offset(struct btrfs_free_space_ctl *ctl,
1510		   u64 offset, int bitmap_only, int fuzzy)
1511{
1512	struct rb_node *n = ctl->free_space_offset.rb_node;
1513	struct btrfs_free_space *entry, *prev = NULL;
1514
1515	/* find entry that is closest to the 'offset' */
1516	while (1) {
1517		if (!n) {
1518			entry = NULL;
1519			break;
1520		}
1521
1522		entry = rb_entry(n, struct btrfs_free_space, offset_index);
1523		prev = entry;
1524
1525		if (offset < entry->offset)
1526			n = n->rb_left;
1527		else if (offset > entry->offset)
1528			n = n->rb_right;
1529		else
1530			break;
1531	}
1532
1533	if (bitmap_only) {
1534		if (!entry)
1535			return NULL;
1536		if (entry->bitmap)
1537			return entry;
1538
1539		/*
1540		 * bitmap entry and extent entry may share same offset,
1541		 * in that case, bitmap entry comes after extent entry.
1542		 */
1543		n = rb_next(n);
1544		if (!n)
1545			return NULL;
1546		entry = rb_entry(n, struct btrfs_free_space, offset_index);
1547		if (entry->offset != offset)
1548			return NULL;
1549
1550		WARN_ON(!entry->bitmap);
1551		return entry;
1552	} else if (entry) {
1553		if (entry->bitmap) {
1554			/*
1555			 * if previous extent entry covers the offset,
1556			 * we should return it instead of the bitmap entry
1557			 */
1558			n = rb_prev(&entry->offset_index);
1559			if (n) {
 
 
 
1560				prev = rb_entry(n, struct btrfs_free_space,
1561						offset_index);
1562				if (!prev->bitmap &&
1563				    prev->offset + prev->bytes > offset)
1564					entry = prev;
 
 
1565			}
1566		}
1567		return entry;
1568	}
1569
1570	if (!prev)
1571		return NULL;
1572
1573	/* find last entry before the 'offset' */
1574	entry = prev;
1575	if (entry->offset > offset) {
1576		n = rb_prev(&entry->offset_index);
1577		if (n) {
1578			entry = rb_entry(n, struct btrfs_free_space,
1579					offset_index);
1580			ASSERT(entry->offset <= offset);
1581		} else {
1582			if (fuzzy)
1583				return entry;
1584			else
1585				return NULL;
1586		}
1587	}
1588
1589	if (entry->bitmap) {
1590		n = rb_prev(&entry->offset_index);
1591		if (n) {
 
 
 
1592			prev = rb_entry(n, struct btrfs_free_space,
1593					offset_index);
1594			if (!prev->bitmap &&
1595			    prev->offset + prev->bytes > offset)
1596				return prev;
 
 
1597		}
1598		if (entry->offset + BITS_PER_BITMAP * ctl->unit > offset)
1599			return entry;
1600	} else if (entry->offset + entry->bytes > offset)
1601		return entry;
1602
1603	if (!fuzzy)
1604		return NULL;
1605
1606	while (1) {
1607		if (entry->bitmap) {
1608			if (entry->offset + BITS_PER_BITMAP *
1609			    ctl->unit > offset)
1610				break;
1611		} else {
1612			if (entry->offset + entry->bytes > offset)
1613				break;
1614		}
1615
1616		n = rb_next(&entry->offset_index);
1617		if (!n)
1618			return NULL;
1619		entry = rb_entry(n, struct btrfs_free_space, offset_index);
1620	}
1621	return entry;
1622}
1623
1624static inline void
1625__unlink_free_space(struct btrfs_free_space_ctl *ctl,
1626		    struct btrfs_free_space *info)
1627{
1628	rb_erase(&info->offset_index, &ctl->free_space_offset);
1629	ctl->free_extents--;
1630}
1631
1632static void unlink_free_space(struct btrfs_free_space_ctl *ctl,
1633			      struct btrfs_free_space *info)
1634{
1635	__unlink_free_space(ctl, info);
1636	ctl->free_space -= info->bytes;
1637}
1638
1639static int link_free_space(struct btrfs_free_space_ctl *ctl,
1640			   struct btrfs_free_space *info)
1641{
1642	int ret = 0;
1643
1644	ASSERT(info->bytes || info->bitmap);
1645	ret = tree_insert_offset(&ctl->free_space_offset, info->offset,
1646				 &info->offset_index, (info->bitmap != NULL));
1647	if (ret)
1648		return ret;
1649
1650	ctl->free_space += info->bytes;
1651	ctl->free_extents++;
1652	return ret;
1653}
1654
1655static void recalculate_thresholds(struct btrfs_free_space_ctl *ctl)
1656{
1657	struct btrfs_block_group_cache *block_group = ctl->private;
1658	u64 max_bytes;
1659	u64 bitmap_bytes;
1660	u64 extent_bytes;
1661	u64 size = block_group->key.offset;
1662	u64 bytes_per_bg = BITS_PER_BITMAP * ctl->unit;
1663	u64 max_bitmaps = div64_u64(size + bytes_per_bg - 1, bytes_per_bg);
1664
1665	max_bitmaps = max_t(u64, max_bitmaps, 1);
1666
1667	ASSERT(ctl->total_bitmaps <= max_bitmaps);
1668
1669	/*
1670	 * The goal is to keep the total amount of memory used per 1gb of space
1671	 * at or below 32k, so we need to adjust how much memory we allow to be
1672	 * used by extent based free space tracking
1673	 */
1674	if (size < SZ_1G)
1675		max_bytes = MAX_CACHE_BYTES_PER_GIG;
1676	else
1677		max_bytes = MAX_CACHE_BYTES_PER_GIG * div_u64(size, SZ_1G);
 
1678
1679	/*
1680	 * we want to account for 1 more bitmap than what we have so we can make
1681	 * sure we don't go over our overall goal of MAX_CACHE_BYTES_PER_GIG as
1682	 * we add more bitmaps.
1683	 */
1684	bitmap_bytes = (ctl->total_bitmaps + 1) * ctl->unit;
1685
1686	if (bitmap_bytes >= max_bytes) {
1687		ctl->extents_thresh = 0;
1688		return;
1689	}
1690
1691	/*
1692	 * we want the extent entry threshold to always be at most 1/2 the max
1693	 * bytes we can have, or whatever is less than that.
1694	 */
1695	extent_bytes = max_bytes - bitmap_bytes;
1696	extent_bytes = min_t(u64, extent_bytes, max_bytes >> 1);
1697
1698	ctl->extents_thresh =
1699		div_u64(extent_bytes, sizeof(struct btrfs_free_space));
1700}
1701
1702static inline void __bitmap_clear_bits(struct btrfs_free_space_ctl *ctl,
1703				       struct btrfs_free_space *info,
1704				       u64 offset, u64 bytes)
1705{
1706	unsigned long start, count;
1707
1708	start = offset_to_bit(info->offset, ctl->unit, offset);
1709	count = bytes_to_bits(bytes, ctl->unit);
1710	ASSERT(start + count <= BITS_PER_BITMAP);
1711
1712	bitmap_clear(info->bitmap, start, count);
1713
1714	info->bytes -= bytes;
1715}
1716
1717static void bitmap_clear_bits(struct btrfs_free_space_ctl *ctl,
1718			      struct btrfs_free_space *info, u64 offset,
1719			      u64 bytes)
1720{
1721	__bitmap_clear_bits(ctl, info, offset, bytes);
1722	ctl->free_space -= bytes;
1723}
1724
1725static void bitmap_set_bits(struct btrfs_free_space_ctl *ctl,
1726			    struct btrfs_free_space *info, u64 offset,
1727			    u64 bytes)
1728{
1729	unsigned long start, count;
1730
1731	start = offset_to_bit(info->offset, ctl->unit, offset);
1732	count = bytes_to_bits(bytes, ctl->unit);
1733	ASSERT(start + count <= BITS_PER_BITMAP);
1734
1735	bitmap_set(info->bitmap, start, count);
1736
1737	info->bytes += bytes;
1738	ctl->free_space += bytes;
1739}
1740
1741/*
1742 * If we can not find suitable extent, we will use bytes to record
1743 * the size of the max extent.
1744 */
1745static int search_bitmap(struct btrfs_free_space_ctl *ctl,
1746			 struct btrfs_free_space *bitmap_info, u64 *offset,
1747			 u64 *bytes, bool for_alloc)
1748{
1749	unsigned long found_bits = 0;
1750	unsigned long max_bits = 0;
1751	unsigned long bits, i;
1752	unsigned long next_zero;
1753	unsigned long extent_bits;
1754
1755	/*
1756	 * Skip searching the bitmap if we don't have a contiguous section that
1757	 * is large enough for this allocation.
1758	 */
1759	if (for_alloc &&
1760	    bitmap_info->max_extent_size &&
1761	    bitmap_info->max_extent_size < *bytes) {
1762		*bytes = bitmap_info->max_extent_size;
1763		return -1;
1764	}
1765
1766	i = offset_to_bit(bitmap_info->offset, ctl->unit,
1767			  max_t(u64, *offset, bitmap_info->offset));
1768	bits = bytes_to_bits(*bytes, ctl->unit);
1769
1770	for_each_set_bit_from(i, bitmap_info->bitmap, BITS_PER_BITMAP) {
1771		if (for_alloc && bits == 1) {
1772			found_bits = 1;
1773			break;
1774		}
1775		next_zero = find_next_zero_bit(bitmap_info->bitmap,
1776					       BITS_PER_BITMAP, i);
1777		extent_bits = next_zero - i;
1778		if (extent_bits >= bits) {
1779			found_bits = extent_bits;
1780			break;
1781		} else if (extent_bits > max_bits) {
1782			max_bits = extent_bits;
1783		}
1784		i = next_zero;
1785	}
1786
1787	if (found_bits) {
1788		*offset = (u64)(i * ctl->unit) + bitmap_info->offset;
1789		*bytes = (u64)(found_bits) * ctl->unit;
1790		return 0;
1791	}
1792
1793	*bytes = (u64)(max_bits) * ctl->unit;
1794	bitmap_info->max_extent_size = *bytes;
1795	return -1;
1796}
1797
1798/* Cache the size of the max extent in bytes */
1799static struct btrfs_free_space *
1800find_free_space(struct btrfs_free_space_ctl *ctl, u64 *offset, u64 *bytes,
1801		unsigned long align, u64 *max_extent_size)
1802{
1803	struct btrfs_free_space *entry;
1804	struct rb_node *node;
1805	u64 tmp;
1806	u64 align_off;
1807	int ret;
1808
1809	if (!ctl->free_space_offset.rb_node)
1810		goto out;
1811
1812	entry = tree_search_offset(ctl, offset_to_bitmap(ctl, *offset), 0, 1);
1813	if (!entry)
1814		goto out;
1815
1816	for (node = &entry->offset_index; node; node = rb_next(node)) {
1817		entry = rb_entry(node, struct btrfs_free_space, offset_index);
1818		if (entry->bytes < *bytes) {
1819			if (entry->bytes > *max_extent_size)
1820				*max_extent_size = entry->bytes;
1821			continue;
1822		}
1823
1824		/* make sure the space returned is big enough
1825		 * to match our requested alignment
1826		 */
1827		if (*bytes >= align) {
1828			tmp = entry->offset - ctl->start + align - 1;
1829			tmp = div64_u64(tmp, align);
1830			tmp = tmp * align + ctl->start;
1831			align_off = tmp - entry->offset;
1832		} else {
1833			align_off = 0;
1834			tmp = entry->offset;
1835		}
1836
1837		if (entry->bytes < *bytes + align_off) {
1838			if (entry->bytes > *max_extent_size)
1839				*max_extent_size = entry->bytes;
1840			continue;
1841		}
1842
1843		if (entry->bitmap) {
1844			u64 size = *bytes;
1845
1846			ret = search_bitmap(ctl, entry, &tmp, &size, true);
1847			if (!ret) {
1848				*offset = tmp;
1849				*bytes = size;
1850				return entry;
1851			} else if (size > *max_extent_size) {
1852				*max_extent_size = size;
1853			}
1854			continue;
1855		}
1856
1857		*offset = tmp;
1858		*bytes = entry->bytes - align_off;
1859		return entry;
1860	}
1861out:
1862	return NULL;
1863}
1864
1865static void add_new_bitmap(struct btrfs_free_space_ctl *ctl,
1866			   struct btrfs_free_space *info, u64 offset)
1867{
1868	info->offset = offset_to_bitmap(ctl, offset);
1869	info->bytes = 0;
1870	INIT_LIST_HEAD(&info->list);
1871	link_free_space(ctl, info);
1872	ctl->total_bitmaps++;
1873
1874	ctl->op->recalc_thresholds(ctl);
1875}
1876
1877static void free_bitmap(struct btrfs_free_space_ctl *ctl,
1878			struct btrfs_free_space *bitmap_info)
1879{
1880	unlink_free_space(ctl, bitmap_info);
1881	kfree(bitmap_info->bitmap);
1882	kmem_cache_free(btrfs_free_space_cachep, bitmap_info);
1883	ctl->total_bitmaps--;
1884	ctl->op->recalc_thresholds(ctl);
1885}
1886
1887static noinline int remove_from_bitmap(struct btrfs_free_space_ctl *ctl,
1888			      struct btrfs_free_space *bitmap_info,
1889			      u64 *offset, u64 *bytes)
1890{
1891	u64 end;
1892	u64 search_start, search_bytes;
1893	int ret;
1894
1895again:
1896	end = bitmap_info->offset + (u64)(BITS_PER_BITMAP * ctl->unit) - 1;
1897
1898	/*
1899	 * We need to search for bits in this bitmap.  We could only cover some
1900	 * of the extent in this bitmap thanks to how we add space, so we need
1901	 * to search for as much as it as we can and clear that amount, and then
1902	 * go searching for the next bit.
 
 
 
 
1903	 */
1904	search_start = *offset;
1905	search_bytes = ctl->unit;
1906	search_bytes = min(search_bytes, end - search_start + 1);
1907	ret = search_bitmap(ctl, bitmap_info, &search_start, &search_bytes,
1908			    false);
1909	if (ret < 0 || search_start != *offset)
1910		return -EINVAL;
1911
1912	/* We may have found more bits than what we need */
1913	search_bytes = min(search_bytes, *bytes);
1914
1915	/* Cannot clear past the end of the bitmap */
1916	search_bytes = min(search_bytes, end - search_start + 1);
 
 
1917
1918	bitmap_clear_bits(ctl, bitmap_info, search_start, search_bytes);
1919	*offset += search_bytes;
1920	*bytes -= search_bytes;
 
 
 
 
 
1921
1922	if (*bytes) {
1923		struct rb_node *next = rb_next(&bitmap_info->offset_index);
1924		if (!bitmap_info->bytes)
1925			free_bitmap(ctl, bitmap_info);
1926
1927		/*
1928		 * no entry after this bitmap, but we still have bytes to
1929		 * remove, so something has gone wrong.
1930		 */
1931		if (!next)
1932			return -EINVAL;
1933
1934		bitmap_info = rb_entry(next, struct btrfs_free_space,
1935				       offset_index);
1936
1937		/*
1938		 * if the next entry isn't a bitmap we need to return to let the
1939		 * extent stuff do its work.
1940		 */
1941		if (!bitmap_info->bitmap)
1942			return -EAGAIN;
1943
1944		/*
1945		 * Ok the next item is a bitmap, but it may not actually hold
1946		 * the information for the rest of this free space stuff, so
1947		 * look for it, and if we don't find it return so we can try
1948		 * everything over again.
1949		 */
1950		search_start = *offset;
1951		search_bytes = ctl->unit;
1952		ret = search_bitmap(ctl, bitmap_info, &search_start,
1953				    &search_bytes, false);
1954		if (ret < 0 || search_start != *offset)
1955			return -EAGAIN;
1956
1957		goto again;
1958	} else if (!bitmap_info->bytes)
1959		free_bitmap(ctl, bitmap_info);
1960
1961	return 0;
1962}
1963
1964static u64 add_bytes_to_bitmap(struct btrfs_free_space_ctl *ctl,
1965			       struct btrfs_free_space *info, u64 offset,
1966			       u64 bytes)
1967{
1968	u64 bytes_to_set = 0;
1969	u64 end;
1970
1971	end = info->offset + (u64)(BITS_PER_BITMAP * ctl->unit);
1972
1973	bytes_to_set = min(end - offset, bytes);
1974
1975	bitmap_set_bits(ctl, info, offset, bytes_to_set);
1976
1977	/*
1978	 * We set some bytes, we have no idea what the max extent size is
1979	 * anymore.
1980	 */
1981	info->max_extent_size = 0;
1982
1983	return bytes_to_set;
1984
1985}
1986
1987static bool use_bitmap(struct btrfs_free_space_ctl *ctl,
1988		      struct btrfs_free_space *info)
1989{
1990	struct btrfs_block_group_cache *block_group = ctl->private;
1991	struct btrfs_fs_info *fs_info = block_group->fs_info;
1992	bool forced = false;
1993
1994#ifdef CONFIG_BTRFS_DEBUG
1995	if (btrfs_should_fragment_free_space(block_group))
1996		forced = true;
1997#endif
1998
1999	/*
2000	 * If we are below the extents threshold then we can add this as an
2001	 * extent, and don't have to deal with the bitmap
2002	 */
2003	if (!forced && ctl->free_extents < ctl->extents_thresh) {
2004		/*
2005		 * If this block group has some small extents we don't want to
2006		 * use up all of our free slots in the cache with them, we want
2007		 * to reserve them to larger extents, however if we have plenty
2008		 * of cache left then go ahead an dadd them, no sense in adding
2009		 * the overhead of a bitmap if we don't have to.
2010		 */
2011		if (info->bytes <= fs_info->sectorsize * 4) {
2012			if (ctl->free_extents * 2 <= ctl->extents_thresh)
2013				return false;
2014		} else {
2015			return false;
2016		}
2017	}
2018
2019	/*
2020	 * The original block groups from mkfs can be really small, like 8
2021	 * megabytes, so don't bother with a bitmap for those entries.  However
2022	 * some block groups can be smaller than what a bitmap would cover but
2023	 * are still large enough that they could overflow the 32k memory limit,
2024	 * so allow those block groups to still be allowed to have a bitmap
2025	 * entry.
2026	 */
2027	if (((BITS_PER_BITMAP * ctl->unit) >> 1) > block_group->key.offset)
 
2028		return false;
2029
2030	return true;
2031}
2032
2033static const struct btrfs_free_space_op free_space_op = {
2034	.recalc_thresholds	= recalculate_thresholds,
2035	.use_bitmap		= use_bitmap,
2036};
2037
2038static int insert_into_bitmap(struct btrfs_free_space_ctl *ctl,
2039			      struct btrfs_free_space *info)
2040{
2041	struct btrfs_free_space *bitmap_info;
2042	struct btrfs_block_group_cache *block_group = NULL;
2043	int added = 0;
2044	u64 bytes, offset, bytes_added;
2045	int ret;
2046
2047	bytes = info->bytes;
2048	offset = info->offset;
2049
2050	if (!ctl->op->use_bitmap(ctl, info))
2051		return 0;
2052
2053	if (ctl->op == &free_space_op)
2054		block_group = ctl->private;
2055again:
2056	/*
2057	 * Since we link bitmaps right into the cluster we need to see if we
2058	 * have a cluster here, and if so and it has our bitmap we need to add
2059	 * the free space to that bitmap.
2060	 */
2061	if (block_group && !list_empty(&block_group->cluster_list)) {
2062		struct btrfs_free_cluster *cluster;
2063		struct rb_node *node;
2064		struct btrfs_free_space *entry;
2065
2066		cluster = list_entry(block_group->cluster_list.next,
2067				     struct btrfs_free_cluster,
2068				     block_group_list);
2069		spin_lock(&cluster->lock);
2070		node = rb_first(&cluster->root);
2071		if (!node) {
2072			spin_unlock(&cluster->lock);
2073			goto no_cluster_bitmap;
2074		}
2075
2076		entry = rb_entry(node, struct btrfs_free_space, offset_index);
2077		if (!entry->bitmap) {
2078			spin_unlock(&cluster->lock);
2079			goto no_cluster_bitmap;
2080		}
2081
2082		if (entry->offset == offset_to_bitmap(ctl, offset)) {
2083			bytes_added = add_bytes_to_bitmap(ctl, entry,
2084							  offset, bytes);
2085			bytes -= bytes_added;
2086			offset += bytes_added;
2087		}
2088		spin_unlock(&cluster->lock);
2089		if (!bytes) {
2090			ret = 1;
2091			goto out;
2092		}
2093	}
2094
2095no_cluster_bitmap:
2096	bitmap_info = tree_search_offset(ctl, offset_to_bitmap(ctl, offset),
2097					 1, 0);
2098	if (!bitmap_info) {
2099		ASSERT(added == 0);
2100		goto new_bitmap;
2101	}
2102
2103	bytes_added = add_bytes_to_bitmap(ctl, bitmap_info, offset, bytes);
2104	bytes -= bytes_added;
2105	offset += bytes_added;
2106	added = 0;
2107
2108	if (!bytes) {
2109		ret = 1;
2110		goto out;
2111	} else
2112		goto again;
2113
2114new_bitmap:
2115	if (info && info->bitmap) {
2116		add_new_bitmap(ctl, info, offset);
2117		added = 1;
2118		info = NULL;
2119		goto again;
2120	} else {
2121		spin_unlock(&ctl->tree_lock);
2122
2123		/* no pre-allocated info, allocate a new one */
2124		if (!info) {
2125			info = kmem_cache_zalloc(btrfs_free_space_cachep,
2126						 GFP_NOFS);
2127			if (!info) {
2128				spin_lock(&ctl->tree_lock);
2129				ret = -ENOMEM;
2130				goto out;
2131			}
2132		}
2133
2134		/* allocate the bitmap */
2135		info->bitmap = kzalloc(PAGE_SIZE, GFP_NOFS);
2136		spin_lock(&ctl->tree_lock);
2137		if (!info->bitmap) {
2138			ret = -ENOMEM;
2139			goto out;
2140		}
2141		goto again;
2142	}
2143
2144out:
2145	if (info) {
2146		if (info->bitmap)
2147			kfree(info->bitmap);
2148		kmem_cache_free(btrfs_free_space_cachep, info);
2149	}
2150
2151	return ret;
2152}
2153
2154static bool try_merge_free_space(struct btrfs_free_space_ctl *ctl,
2155			  struct btrfs_free_space *info, bool update_stat)
2156{
2157	struct btrfs_free_space *left_info;
2158	struct btrfs_free_space *right_info;
2159	bool merged = false;
2160	u64 offset = info->offset;
2161	u64 bytes = info->bytes;
2162
2163	/*
2164	 * first we want to see if there is free space adjacent to the range we
2165	 * are adding, if there is remove that struct and add a new one to
2166	 * cover the entire range
2167	 */
2168	right_info = tree_search_offset(ctl, offset + bytes, 0, 0);
2169	if (right_info && rb_prev(&right_info->offset_index))
2170		left_info = rb_entry(rb_prev(&right_info->offset_index),
2171				     struct btrfs_free_space, offset_index);
2172	else
2173		left_info = tree_search_offset(ctl, offset - 1, 0, 0);
2174
2175	if (right_info && !right_info->bitmap) {
2176		if (update_stat)
2177			unlink_free_space(ctl, right_info);
2178		else
2179			__unlink_free_space(ctl, right_info);
2180		info->bytes += right_info->bytes;
2181		kmem_cache_free(btrfs_free_space_cachep, right_info);
2182		merged = true;
2183	}
2184
2185	if (left_info && !left_info->bitmap &&
2186	    left_info->offset + left_info->bytes == offset) {
2187		if (update_stat)
2188			unlink_free_space(ctl, left_info);
2189		else
2190			__unlink_free_space(ctl, left_info);
2191		info->offset = left_info->offset;
2192		info->bytes += left_info->bytes;
2193		kmem_cache_free(btrfs_free_space_cachep, left_info);
2194		merged = true;
2195	}
2196
2197	return merged;
2198}
2199
2200static bool steal_from_bitmap_to_end(struct btrfs_free_space_ctl *ctl,
2201				     struct btrfs_free_space *info,
2202				     bool update_stat)
2203{
2204	struct btrfs_free_space *bitmap;
2205	unsigned long i;
2206	unsigned long j;
2207	const u64 end = info->offset + info->bytes;
2208	const u64 bitmap_offset = offset_to_bitmap(ctl, end);
2209	u64 bytes;
2210
2211	bitmap = tree_search_offset(ctl, bitmap_offset, 1, 0);
2212	if (!bitmap)
2213		return false;
2214
2215	i = offset_to_bit(bitmap->offset, ctl->unit, end);
2216	j = find_next_zero_bit(bitmap->bitmap, BITS_PER_BITMAP, i);
2217	if (j == i)
2218		return false;
2219	bytes = (j - i) * ctl->unit;
2220	info->bytes += bytes;
2221
2222	if (update_stat)
2223		bitmap_clear_bits(ctl, bitmap, end, bytes);
2224	else
2225		__bitmap_clear_bits(ctl, bitmap, end, bytes);
2226
2227	if (!bitmap->bytes)
2228		free_bitmap(ctl, bitmap);
2229
2230	return true;
2231}
2232
2233static bool steal_from_bitmap_to_front(struct btrfs_free_space_ctl *ctl,
2234				       struct btrfs_free_space *info,
2235				       bool update_stat)
2236{
2237	struct btrfs_free_space *bitmap;
2238	u64 bitmap_offset;
2239	unsigned long i;
2240	unsigned long j;
2241	unsigned long prev_j;
2242	u64 bytes;
2243
2244	bitmap_offset = offset_to_bitmap(ctl, info->offset);
2245	/* If we're on a boundary, try the previous logical bitmap. */
2246	if (bitmap_offset == info->offset) {
2247		if (info->offset == 0)
2248			return false;
2249		bitmap_offset = offset_to_bitmap(ctl, info->offset - 1);
2250	}
2251
2252	bitmap = tree_search_offset(ctl, bitmap_offset, 1, 0);
2253	if (!bitmap)
2254		return false;
2255
2256	i = offset_to_bit(bitmap->offset, ctl->unit, info->offset) - 1;
2257	j = 0;
2258	prev_j = (unsigned long)-1;
2259	for_each_clear_bit_from(j, bitmap->bitmap, BITS_PER_BITMAP) {
2260		if (j > i)
2261			break;
2262		prev_j = j;
2263	}
2264	if (prev_j == i)
2265		return false;
2266
2267	if (prev_j == (unsigned long)-1)
2268		bytes = (i + 1) * ctl->unit;
2269	else
2270		bytes = (i - prev_j) * ctl->unit;
2271
2272	info->offset -= bytes;
2273	info->bytes += bytes;
2274
2275	if (update_stat)
2276		bitmap_clear_bits(ctl, bitmap, info->offset, bytes);
2277	else
2278		__bitmap_clear_bits(ctl, bitmap, info->offset, bytes);
2279
2280	if (!bitmap->bytes)
2281		free_bitmap(ctl, bitmap);
2282
2283	return true;
2284}
2285
2286/*
2287 * We prefer always to allocate from extent entries, both for clustered and
2288 * non-clustered allocation requests. So when attempting to add a new extent
2289 * entry, try to see if there's adjacent free space in bitmap entries, and if
2290 * there is, migrate that space from the bitmaps to the extent.
2291 * Like this we get better chances of satisfying space allocation requests
2292 * because we attempt to satisfy them based on a single cache entry, and never
2293 * on 2 or more entries - even if the entries represent a contiguous free space
2294 * region (e.g. 1 extent entry + 1 bitmap entry starting where the extent entry
2295 * ends).
2296 */
2297static void steal_from_bitmap(struct btrfs_free_space_ctl *ctl,
2298			      struct btrfs_free_space *info,
2299			      bool update_stat)
2300{
2301	/*
2302	 * Only work with disconnected entries, as we can change their offset,
2303	 * and must be extent entries.
2304	 */
2305	ASSERT(!info->bitmap);
2306	ASSERT(RB_EMPTY_NODE(&info->offset_index));
2307
2308	if (ctl->total_bitmaps > 0) {
2309		bool stole_end;
2310		bool stole_front = false;
2311
2312		stole_end = steal_from_bitmap_to_end(ctl, info, update_stat);
2313		if (ctl->total_bitmaps > 0)
2314			stole_front = steal_from_bitmap_to_front(ctl, info,
2315								 update_stat);
2316
2317		if (stole_end || stole_front)
2318			try_merge_free_space(ctl, info, update_stat);
2319	}
2320}
2321
2322int __btrfs_add_free_space(struct btrfs_fs_info *fs_info,
2323			   struct btrfs_free_space_ctl *ctl,
2324			   u64 offset, u64 bytes)
2325{
2326	struct btrfs_free_space *info;
2327	int ret = 0;
2328
2329	info = kmem_cache_zalloc(btrfs_free_space_cachep, GFP_NOFS);
2330	if (!info)
2331		return -ENOMEM;
2332
2333	info->offset = offset;
2334	info->bytes = bytes;
2335	RB_CLEAR_NODE(&info->offset_index);
2336
2337	spin_lock(&ctl->tree_lock);
2338
2339	if (try_merge_free_space(ctl, info, true))
2340		goto link;
2341
2342	/*
2343	 * There was no extent directly to the left or right of this new
2344	 * extent then we know we're going to have to allocate a new extent, so
2345	 * before we do that see if we need to drop this into a bitmap
2346	 */
2347	ret = insert_into_bitmap(ctl, info);
2348	if (ret < 0) {
2349		goto out;
2350	} else if (ret) {
2351		ret = 0;
2352		goto out;
2353	}
2354link:
2355	/*
2356	 * Only steal free space from adjacent bitmaps if we're sure we're not
2357	 * going to add the new free space to existing bitmap entries - because
2358	 * that would mean unnecessary work that would be reverted. Therefore
2359	 * attempt to steal space from bitmaps if we're adding an extent entry.
2360	 */
2361	steal_from_bitmap(ctl, info, true);
2362
2363	ret = link_free_space(ctl, info);
2364	if (ret)
2365		kmem_cache_free(btrfs_free_space_cachep, info);
2366out:
2367	spin_unlock(&ctl->tree_lock);
2368
2369	if (ret) {
2370		btrfs_crit(fs_info, "unable to add free space :%d", ret);
2371		ASSERT(ret != -EEXIST);
2372	}
2373
2374	return ret;
2375}
2376
2377int btrfs_remove_free_space(struct btrfs_block_group_cache *block_group,
2378			    u64 offset, u64 bytes)
2379{
2380	struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl;
2381	struct btrfs_free_space *info;
2382	int ret;
2383	bool re_search = false;
2384
2385	spin_lock(&ctl->tree_lock);
2386
2387again:
2388	ret = 0;
2389	if (!bytes)
2390		goto out_lock;
2391
2392	info = tree_search_offset(ctl, offset, 0, 0);
2393	if (!info) {
2394		/*
2395		 * oops didn't find an extent that matched the space we wanted
2396		 * to remove, look for a bitmap instead
2397		 */
2398		info = tree_search_offset(ctl, offset_to_bitmap(ctl, offset),
2399					  1, 0);
2400		if (!info) {
2401			/*
2402			 * If we found a partial bit of our free space in a
2403			 * bitmap but then couldn't find the other part this may
2404			 * be a problem, so WARN about it.
2405			 */
2406			WARN_ON(re_search);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2407			goto out_lock;
2408		}
 
 
2409	}
2410
2411	re_search = false;
2412	if (!info->bitmap) {
2413		unlink_free_space(ctl, info);
2414		if (offset == info->offset) {
2415			u64 to_free = min(bytes, info->bytes);
 
 
 
 
 
2416
2417			info->bytes -= to_free;
2418			info->offset += to_free;
2419			if (info->bytes) {
2420				ret = link_free_space(ctl, info);
2421				WARN_ON(ret);
2422			} else {
2423				kmem_cache_free(btrfs_free_space_cachep, info);
2424			}
2425
2426			offset += to_free;
2427			bytes -= to_free;
2428			goto again;
2429		} else {
2430			u64 old_end = info->bytes + info->offset;
 
 
 
 
 
 
 
 
2431
2432			info->bytes = offset - info->offset;
 
2433			ret = link_free_space(ctl, info);
2434			WARN_ON(ret);
2435			if (ret)
2436				goto out_lock;
2437
2438			/* Not enough bytes in this entry to satisfy us */
2439			if (old_end < offset + bytes) {
2440				bytes -= old_end - offset;
2441				offset = old_end;
2442				goto again;
2443			} else if (old_end == offset + bytes) {
2444				/* all done */
2445				goto out_lock;
2446			}
2447			spin_unlock(&ctl->tree_lock);
2448
2449			ret = btrfs_add_free_space(block_group, offset + bytes,
2450						   old_end - (offset + bytes));
2451			WARN_ON(ret);
2452			goto out;
2453		}
 
 
 
 
 
 
 
 
 
2454	}
2455
2456	ret = remove_from_bitmap(ctl, info, &offset, &bytes);
2457	if (ret == -EAGAIN) {
2458		re_search = true;
2459		goto again;
2460	}
2461out_lock:
2462	spin_unlock(&ctl->tree_lock);
2463out:
2464	return ret;
2465}
2466
2467void btrfs_dump_free_space(struct btrfs_block_group_cache *block_group,
2468			   u64 bytes)
2469{
2470	struct btrfs_fs_info *fs_info = block_group->fs_info;
2471	struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl;
2472	struct btrfs_free_space *info;
2473	struct rb_node *n;
2474	int count = 0;
2475
2476	for (n = rb_first(&ctl->free_space_offset); n; n = rb_next(n)) {
2477		info = rb_entry(n, struct btrfs_free_space, offset_index);
2478		if (info->bytes >= bytes && !block_group->ro)
2479			count++;
2480		btrfs_crit(fs_info, "entry offset %llu, bytes %llu, bitmap %s",
2481			   info->offset, info->bytes,
 
2482		       (info->bitmap) ? "yes" : "no");
2483	}
2484	btrfs_info(fs_info, "block group has cluster?: %s",
2485	       list_empty(&block_group->cluster_list) ? "no" : "yes");
2486	btrfs_info(fs_info,
2487		   "%d blocks of free space at or bigger than bytes is", count);
2488}
2489
2490void btrfs_init_free_space_ctl(struct btrfs_block_group_cache *block_group)
2491{
2492	struct btrfs_fs_info *fs_info = block_group->fs_info;
2493	struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl;
2494
2495	spin_lock_init(&ctl->tree_lock);
2496	ctl->unit = fs_info->sectorsize;
2497	ctl->start = block_group->key.objectid;
2498	ctl->private = block_group;
2499	ctl->op = &free_space_op;
2500	INIT_LIST_HEAD(&ctl->trimming_ranges);
2501	mutex_init(&ctl->cache_writeout_mutex);
2502
2503	/*
2504	 * we only want to have 32k of ram per block group for keeping
2505	 * track of free space, and if we pass 1/2 of that we want to
2506	 * start converting things over to using bitmaps
2507	 */
2508	ctl->extents_thresh = (SZ_32K / 2) / sizeof(struct btrfs_free_space);
 
2509}
2510
2511/*
2512 * for a given cluster, put all of its extents back into the free
2513 * space cache.  If the block group passed doesn't match the block group
2514 * pointed to by the cluster, someone else raced in and freed the
2515 * cluster already.  In that case, we just return without changing anything
2516 */
2517static int
2518__btrfs_return_cluster_to_free_space(
2519			     struct btrfs_block_group_cache *block_group,
2520			     struct btrfs_free_cluster *cluster)
2521{
2522	struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl;
2523	struct btrfs_free_space *entry;
2524	struct rb_node *node;
2525
2526	spin_lock(&cluster->lock);
2527	if (cluster->block_group != block_group)
2528		goto out;
2529
2530	cluster->block_group = NULL;
2531	cluster->window_start = 0;
2532	list_del_init(&cluster->block_group_list);
2533
2534	node = rb_first(&cluster->root);
2535	while (node) {
2536		bool bitmap;
2537
2538		entry = rb_entry(node, struct btrfs_free_space, offset_index);
2539		node = rb_next(&entry->offset_index);
2540		rb_erase(&entry->offset_index, &cluster->root);
2541		RB_CLEAR_NODE(&entry->offset_index);
2542
2543		bitmap = (entry->bitmap != NULL);
2544		if (!bitmap) {
2545			try_merge_free_space(ctl, entry, false);
2546			steal_from_bitmap(ctl, entry, false);
2547		}
2548		tree_insert_offset(&ctl->free_space_offset,
2549				   entry->offset, &entry->offset_index, bitmap);
2550	}
2551	cluster->root = RB_ROOT;
2552
2553out:
2554	spin_unlock(&cluster->lock);
2555	btrfs_put_block_group(block_group);
2556	return 0;
2557}
2558
2559static void __btrfs_remove_free_space_cache_locked(
2560				struct btrfs_free_space_ctl *ctl)
2561{
2562	struct btrfs_free_space *info;
2563	struct rb_node *node;
2564
2565	while ((node = rb_last(&ctl->free_space_offset)) != NULL) {
2566		info = rb_entry(node, struct btrfs_free_space, offset_index);
2567		if (!info->bitmap) {
2568			unlink_free_space(ctl, info);
2569			kmem_cache_free(btrfs_free_space_cachep, info);
2570		} else {
2571			free_bitmap(ctl, info);
2572		}
2573
2574		cond_resched_lock(&ctl->tree_lock);
 
 
 
2575	}
2576}
2577
2578void __btrfs_remove_free_space_cache(struct btrfs_free_space_ctl *ctl)
2579{
2580	spin_lock(&ctl->tree_lock);
2581	__btrfs_remove_free_space_cache_locked(ctl);
2582	spin_unlock(&ctl->tree_lock);
2583}
2584
2585void btrfs_remove_free_space_cache(struct btrfs_block_group_cache *block_group)
2586{
2587	struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl;
2588	struct btrfs_free_cluster *cluster;
2589	struct list_head *head;
2590
2591	spin_lock(&ctl->tree_lock);
2592	while ((head = block_group->cluster_list.next) !=
2593	       &block_group->cluster_list) {
2594		cluster = list_entry(head, struct btrfs_free_cluster,
2595				     block_group_list);
2596
2597		WARN_ON(cluster->block_group != block_group);
2598		__btrfs_return_cluster_to_free_space(block_group, cluster);
2599
2600		cond_resched_lock(&ctl->tree_lock);
 
 
 
2601	}
2602	__btrfs_remove_free_space_cache_locked(ctl);
2603	spin_unlock(&ctl->tree_lock);
2604
2605}
2606
2607u64 btrfs_find_space_for_alloc(struct btrfs_block_group_cache *block_group,
2608			       u64 offset, u64 bytes, u64 empty_size,
2609			       u64 *max_extent_size)
2610{
2611	struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl;
2612	struct btrfs_free_space *entry = NULL;
2613	u64 bytes_search = bytes + empty_size;
2614	u64 ret = 0;
2615	u64 align_gap = 0;
2616	u64 align_gap_len = 0;
2617
2618	spin_lock(&ctl->tree_lock);
2619	entry = find_free_space(ctl, &offset, &bytes_search,
2620				block_group->full_stripe_len, max_extent_size);
2621	if (!entry)
2622		goto out;
2623
2624	ret = offset;
2625	if (entry->bitmap) {
2626		bitmap_clear_bits(ctl, entry, offset, bytes);
2627		if (!entry->bytes)
2628			free_bitmap(ctl, entry);
2629	} else {
2630		unlink_free_space(ctl, entry);
2631		align_gap_len = offset - entry->offset;
2632		align_gap = entry->offset;
2633
2634		entry->offset = offset + bytes;
2635		WARN_ON(entry->bytes < bytes + align_gap_len);
2636
2637		entry->bytes -= bytes + align_gap_len;
2638		if (!entry->bytes)
2639			kmem_cache_free(btrfs_free_space_cachep, entry);
2640		else
2641			link_free_space(ctl, entry);
2642	}
 
2643out:
2644	spin_unlock(&ctl->tree_lock);
2645
2646	if (align_gap_len)
2647		__btrfs_add_free_space(block_group->fs_info, ctl,
2648				       align_gap, align_gap_len);
2649	return ret;
2650}
2651
2652/*
2653 * given a cluster, put all of its extents back into the free space
2654 * cache.  If a block group is passed, this function will only free
2655 * a cluster that belongs to the passed block group.
2656 *
2657 * Otherwise, it'll get a reference on the block group pointed to by the
2658 * cluster and remove the cluster from it.
2659 */
2660int btrfs_return_cluster_to_free_space(
2661			       struct btrfs_block_group_cache *block_group,
2662			       struct btrfs_free_cluster *cluster)
2663{
2664	struct btrfs_free_space_ctl *ctl;
2665	int ret;
2666
2667	/* first, get a safe pointer to the block group */
2668	spin_lock(&cluster->lock);
2669	if (!block_group) {
2670		block_group = cluster->block_group;
2671		if (!block_group) {
2672			spin_unlock(&cluster->lock);
2673			return 0;
2674		}
2675	} else if (cluster->block_group != block_group) {
2676		/* someone else has already freed it don't redo their work */
2677		spin_unlock(&cluster->lock);
2678		return 0;
2679	}
2680	atomic_inc(&block_group->count);
2681	spin_unlock(&cluster->lock);
2682
2683	ctl = block_group->free_space_ctl;
2684
2685	/* now return any extents the cluster had on it */
2686	spin_lock(&ctl->tree_lock);
2687	ret = __btrfs_return_cluster_to_free_space(block_group, cluster);
2688	spin_unlock(&ctl->tree_lock);
2689
2690	/* finally drop our ref */
2691	btrfs_put_block_group(block_group);
2692	return ret;
2693}
2694
2695static u64 btrfs_alloc_from_bitmap(struct btrfs_block_group_cache *block_group,
2696				   struct btrfs_free_cluster *cluster,
2697				   struct btrfs_free_space *entry,
2698				   u64 bytes, u64 min_start,
2699				   u64 *max_extent_size)
2700{
2701	struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl;
2702	int err;
2703	u64 search_start = cluster->window_start;
2704	u64 search_bytes = bytes;
2705	u64 ret = 0;
2706
2707	search_start = min_start;
2708	search_bytes = bytes;
2709
2710	err = search_bitmap(ctl, entry, &search_start, &search_bytes, true);
2711	if (err) {
2712		if (search_bytes > *max_extent_size)
2713			*max_extent_size = search_bytes;
2714		return 0;
2715	}
2716
2717	ret = search_start;
2718	__bitmap_clear_bits(ctl, entry, ret, bytes);
2719
2720	return ret;
2721}
2722
2723/*
2724 * given a cluster, try to allocate 'bytes' from it, returns 0
2725 * if it couldn't find anything suitably large, or a logical disk offset
2726 * if things worked out
2727 */
2728u64 btrfs_alloc_from_cluster(struct btrfs_block_group_cache *block_group,
2729			     struct btrfs_free_cluster *cluster, u64 bytes,
2730			     u64 min_start, u64 *max_extent_size)
2731{
2732	struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl;
2733	struct btrfs_free_space *entry = NULL;
2734	struct rb_node *node;
2735	u64 ret = 0;
2736
2737	spin_lock(&cluster->lock);
2738	if (bytes > cluster->max_size)
2739		goto out;
2740
2741	if (cluster->block_group != block_group)
2742		goto out;
2743
2744	node = rb_first(&cluster->root);
2745	if (!node)
2746		goto out;
2747
2748	entry = rb_entry(node, struct btrfs_free_space, offset_index);
2749	while (1) {
2750		if (entry->bytes < bytes && entry->bytes > *max_extent_size)
2751			*max_extent_size = entry->bytes;
2752
2753		if (entry->bytes < bytes ||
2754		    (!entry->bitmap && entry->offset < min_start)) {
2755			node = rb_next(&entry->offset_index);
2756			if (!node)
2757				break;
2758			entry = rb_entry(node, struct btrfs_free_space,
2759					 offset_index);
2760			continue;
2761		}
2762
2763		if (entry->bitmap) {
2764			ret = btrfs_alloc_from_bitmap(block_group,
2765						      cluster, entry, bytes,
2766						      cluster->window_start,
2767						      max_extent_size);
2768			if (ret == 0) {
2769				node = rb_next(&entry->offset_index);
2770				if (!node)
2771					break;
2772				entry = rb_entry(node, struct btrfs_free_space,
2773						 offset_index);
2774				continue;
2775			}
2776			cluster->window_start += bytes;
2777		} else {
2778			ret = entry->offset;
2779
2780			entry->offset += bytes;
2781			entry->bytes -= bytes;
2782		}
2783
2784		if (entry->bytes == 0)
2785			rb_erase(&entry->offset_index, &cluster->root);
2786		break;
2787	}
2788out:
2789	spin_unlock(&cluster->lock);
2790
2791	if (!ret)
2792		return 0;
2793
2794	spin_lock(&ctl->tree_lock);
2795
2796	ctl->free_space -= bytes;
2797	if (entry->bytes == 0) {
2798		ctl->free_extents--;
2799		if (entry->bitmap) {
2800			kfree(entry->bitmap);
2801			ctl->total_bitmaps--;
2802			ctl->op->recalc_thresholds(ctl);
2803		}
2804		kmem_cache_free(btrfs_free_space_cachep, entry);
2805	}
2806
2807	spin_unlock(&ctl->tree_lock);
2808
2809	return ret;
2810}
2811
2812static int btrfs_bitmap_cluster(struct btrfs_block_group_cache *block_group,
2813				struct btrfs_free_space *entry,
2814				struct btrfs_free_cluster *cluster,
2815				u64 offset, u64 bytes,
2816				u64 cont1_bytes, u64 min_bytes)
2817{
2818	struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl;
2819	unsigned long next_zero;
2820	unsigned long i;
2821	unsigned long want_bits;
2822	unsigned long min_bits;
2823	unsigned long found_bits;
2824	unsigned long max_bits = 0;
2825	unsigned long start = 0;
2826	unsigned long total_found = 0;
2827	int ret;
 
2828
2829	i = offset_to_bit(entry->offset, ctl->unit,
2830			  max_t(u64, offset, entry->offset));
2831	want_bits = bytes_to_bits(bytes, ctl->unit);
2832	min_bits = bytes_to_bits(min_bytes, ctl->unit);
2833
2834	/*
2835	 * Don't bother looking for a cluster in this bitmap if it's heavily
2836	 * fragmented.
2837	 */
2838	if (entry->max_extent_size &&
2839	    entry->max_extent_size < cont1_bytes)
2840		return -ENOSPC;
2841again:
2842	found_bits = 0;
2843	for_each_set_bit_from(i, entry->bitmap, BITS_PER_BITMAP) {
 
 
2844		next_zero = find_next_zero_bit(entry->bitmap,
2845					       BITS_PER_BITMAP, i);
2846		if (next_zero - i >= min_bits) {
2847			found_bits = next_zero - i;
2848			if (found_bits > max_bits)
2849				max_bits = found_bits;
2850			break;
2851		}
2852		if (next_zero - i > max_bits)
2853			max_bits = next_zero - i;
2854		i = next_zero;
2855	}
2856
2857	if (!found_bits) {
2858		entry->max_extent_size = (u64)max_bits * ctl->unit;
2859		return -ENOSPC;
2860	}
2861
2862	if (!total_found) {
2863		start = i;
2864		cluster->max_size = 0;
2865	}
2866
2867	total_found += found_bits;
2868
2869	if (cluster->max_size < found_bits * ctl->unit)
2870		cluster->max_size = found_bits * ctl->unit;
2871
2872	if (total_found < want_bits || cluster->max_size < cont1_bytes) {
2873		i = next_zero + 1;
 
 
 
 
 
2874		goto again;
2875	}
2876
2877	cluster->window_start = start * ctl->unit + entry->offset;
 
2878	rb_erase(&entry->offset_index, &ctl->free_space_offset);
2879	ret = tree_insert_offset(&cluster->root, entry->offset,
2880				 &entry->offset_index, 1);
2881	ASSERT(!ret); /* -EEXIST; Logic error */
2882
2883	trace_btrfs_setup_cluster(block_group, cluster,
2884				  total_found * ctl->unit, 1);
2885	return 0;
2886}
2887
2888/*
2889 * This searches the block group for just extents to fill the cluster with.
2890 * Try to find a cluster with at least bytes total bytes, at least one
2891 * extent of cont1_bytes, and other clusters of at least min_bytes.
2892 */
2893static noinline int
2894setup_cluster_no_bitmap(struct btrfs_block_group_cache *block_group,
2895			struct btrfs_free_cluster *cluster,
2896			struct list_head *bitmaps, u64 offset, u64 bytes,
2897			u64 cont1_bytes, u64 min_bytes)
2898{
2899	struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl;
2900	struct btrfs_free_space *first = NULL;
2901	struct btrfs_free_space *entry = NULL;
 
2902	struct btrfs_free_space *last;
2903	struct rb_node *node;
 
2904	u64 window_free;
2905	u64 max_extent;
2906	u64 total_size = 0;
2907
2908	entry = tree_search_offset(ctl, offset, 0, 1);
2909	if (!entry)
2910		return -ENOSPC;
2911
2912	/*
2913	 * We don't want bitmaps, so just move along until we find a normal
2914	 * extent entry.
2915	 */
2916	while (entry->bitmap || entry->bytes < min_bytes) {
2917		if (entry->bitmap && list_empty(&entry->list))
2918			list_add_tail(&entry->list, bitmaps);
2919		node = rb_next(&entry->offset_index);
2920		if (!node)
2921			return -ENOSPC;
2922		entry = rb_entry(node, struct btrfs_free_space, offset_index);
2923	}
2924
 
2925	window_free = entry->bytes;
2926	max_extent = entry->bytes;
2927	first = entry;
2928	last = entry;
 
2929
2930	for (node = rb_next(&entry->offset_index); node;
2931	     node = rb_next(&entry->offset_index)) {
 
 
2932		entry = rb_entry(node, struct btrfs_free_space, offset_index);
2933
2934		if (entry->bitmap) {
2935			if (list_empty(&entry->list))
2936				list_add_tail(&entry->list, bitmaps);
2937			continue;
2938		}
2939
2940		if (entry->bytes < min_bytes)
2941			continue;
2942
2943		last = entry;
2944		window_free += entry->bytes;
2945		if (entry->bytes > max_extent)
 
 
 
 
2946			max_extent = entry->bytes;
 
 
 
 
 
 
 
2947	}
2948
2949	if (window_free < bytes || max_extent < cont1_bytes)
2950		return -ENOSPC;
2951
2952	cluster->window_start = first->offset;
2953
2954	node = &first->offset_index;
2955
2956	/*
2957	 * now we've found our entries, pull them out of the free space
2958	 * cache and put them into the cluster rbtree
2959	 */
2960	do {
2961		int ret;
2962
2963		entry = rb_entry(node, struct btrfs_free_space, offset_index);
2964		node = rb_next(&entry->offset_index);
2965		if (entry->bitmap || entry->bytes < min_bytes)
2966			continue;
2967
2968		rb_erase(&entry->offset_index, &ctl->free_space_offset);
2969		ret = tree_insert_offset(&cluster->root, entry->offset,
2970					 &entry->offset_index, 0);
2971		total_size += entry->bytes;
2972		ASSERT(!ret); /* -EEXIST; Logic error */
2973	} while (node && entry != last);
2974
2975	cluster->max_size = max_extent;
2976	trace_btrfs_setup_cluster(block_group, cluster, total_size, 0);
2977	return 0;
2978}
2979
2980/*
2981 * This specifically looks for bitmaps that may work in the cluster, we assume
2982 * that we have already failed to find extents that will work.
2983 */
2984static noinline int
2985setup_cluster_bitmap(struct btrfs_block_group_cache *block_group,
2986		     struct btrfs_free_cluster *cluster,
2987		     struct list_head *bitmaps, u64 offset, u64 bytes,
2988		     u64 cont1_bytes, u64 min_bytes)
2989{
2990	struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl;
2991	struct btrfs_free_space *entry = NULL;
 
2992	int ret = -ENOSPC;
2993	u64 bitmap_offset = offset_to_bitmap(ctl, offset);
2994
2995	if (ctl->total_bitmaps == 0)
2996		return -ENOSPC;
2997
2998	/*
2999	 * The bitmap that covers offset won't be in the list unless offset
3000	 * is just its start offset.
3001	 */
3002	if (!list_empty(bitmaps))
3003		entry = list_first_entry(bitmaps, struct btrfs_free_space, list);
3004
3005	if (!entry || entry->offset != bitmap_offset) {
3006		entry = tree_search_offset(ctl, bitmap_offset, 1, 0);
3007		if (entry && list_empty(&entry->list))
3008			list_add(&entry->list, bitmaps);
3009	}
3010
3011	list_for_each_entry(entry, bitmaps, list) {
3012		if (entry->bytes < bytes)
3013			continue;
3014		ret = btrfs_bitmap_cluster(block_group, entry, cluster, offset,
3015					   bytes, cont1_bytes, min_bytes);
3016		if (!ret)
3017			return 0;
3018	}
3019
3020	/*
3021	 * The bitmaps list has all the bitmaps that record free space
3022	 * starting after offset, so no more search is required.
3023	 */
3024	return -ENOSPC;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3025}
3026
3027/*
3028 * here we try to find a cluster of blocks in a block group.  The goal
3029 * is to find at least bytes+empty_size.
3030 * We might not find them all in one contiguous area.
3031 *
3032 * returns zero and sets up cluster if things worked out, otherwise
3033 * it returns -enospc
3034 */
3035int btrfs_find_space_cluster(struct btrfs_fs_info *fs_info,
 
3036			     struct btrfs_block_group_cache *block_group,
3037			     struct btrfs_free_cluster *cluster,
3038			     u64 offset, u64 bytes, u64 empty_size)
3039{
3040	struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl;
 
3041	struct btrfs_free_space *entry, *tmp;
3042	LIST_HEAD(bitmaps);
3043	u64 min_bytes;
3044	u64 cont1_bytes;
3045	int ret;
3046
3047	/*
3048	 * Choose the minimum extent size we'll require for this
3049	 * cluster.  For SSD_SPREAD, don't allow any fragmentation.
3050	 * For metadata, allow allocates with smaller extents.  For
3051	 * data, keep it dense.
3052	 */
3053	if (btrfs_test_opt(fs_info, SSD_SPREAD)) {
3054		cont1_bytes = min_bytes = bytes + empty_size;
3055	} else if (block_group->flags & BTRFS_BLOCK_GROUP_METADATA) {
3056		cont1_bytes = bytes;
3057		min_bytes = fs_info->sectorsize;
3058	} else {
3059		cont1_bytes = max(bytes, (bytes + empty_size) >> 2);
3060		min_bytes = fs_info->sectorsize;
3061	}
 
 
 
 
 
3062
3063	spin_lock(&ctl->tree_lock);
3064
3065	/*
3066	 * If we know we don't have enough space to make a cluster don't even
3067	 * bother doing all the work to try and find one.
3068	 */
3069	if (ctl->free_space < bytes) {
3070		spin_unlock(&ctl->tree_lock);
3071		return -ENOSPC;
3072	}
3073
3074	spin_lock(&cluster->lock);
3075
3076	/* someone already found a cluster, hooray */
3077	if (cluster->block_group) {
3078		ret = 0;
3079		goto out;
3080	}
3081
3082	trace_btrfs_find_cluster(block_group, offset, bytes, empty_size,
3083				 min_bytes);
3084
3085	ret = setup_cluster_no_bitmap(block_group, cluster, &bitmaps, offset,
3086				      bytes + empty_size,
3087				      cont1_bytes, min_bytes);
3088	if (ret)
3089		ret = setup_cluster_bitmap(block_group, cluster, &bitmaps,
3090					   offset, bytes + empty_size,
3091					   cont1_bytes, min_bytes);
3092
3093	/* Clear our temporary list */
3094	list_for_each_entry_safe(entry, tmp, &bitmaps, list)
3095		list_del_init(&entry->list);
3096
3097	if (!ret) {
3098		atomic_inc(&block_group->count);
3099		list_add_tail(&cluster->block_group_list,
3100			      &block_group->cluster_list);
3101		cluster->block_group = block_group;
3102	} else {
3103		trace_btrfs_failed_cluster_setup(block_group);
3104	}
3105out:
3106	spin_unlock(&cluster->lock);
3107	spin_unlock(&ctl->tree_lock);
3108
3109	return ret;
3110}
3111
3112/*
3113 * simple code to zero out a cluster
3114 */
3115void btrfs_init_free_cluster(struct btrfs_free_cluster *cluster)
3116{
3117	spin_lock_init(&cluster->lock);
3118	spin_lock_init(&cluster->refill_lock);
3119	cluster->root = RB_ROOT;
3120	cluster->max_size = 0;
3121	cluster->fragmented = false;
3122	INIT_LIST_HEAD(&cluster->block_group_list);
3123	cluster->block_group = NULL;
3124}
3125
3126static int do_trimming(struct btrfs_block_group_cache *block_group,
3127		       u64 *total_trimmed, u64 start, u64 bytes,
3128		       u64 reserved_start, u64 reserved_bytes,
3129		       struct btrfs_trim_range *trim_entry)
3130{
3131	struct btrfs_space_info *space_info = block_group->space_info;
3132	struct btrfs_fs_info *fs_info = block_group->fs_info;
3133	struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl;
3134	int ret;
3135	int update = 0;
3136	u64 trimmed = 0;
3137
3138	spin_lock(&space_info->lock);
3139	spin_lock(&block_group->lock);
3140	if (!block_group->ro) {
3141		block_group->reserved += reserved_bytes;
3142		space_info->bytes_reserved += reserved_bytes;
3143		update = 1;
3144	}
3145	spin_unlock(&block_group->lock);
3146	spin_unlock(&space_info->lock);
3147
3148	ret = btrfs_discard_extent(fs_info, start, bytes, &trimmed);
3149	if (!ret)
3150		*total_trimmed += trimmed;
3151
3152	mutex_lock(&ctl->cache_writeout_mutex);
3153	btrfs_add_free_space(block_group, reserved_start, reserved_bytes);
3154	list_del(&trim_entry->list);
3155	mutex_unlock(&ctl->cache_writeout_mutex);
3156
3157	if (update) {
3158		spin_lock(&space_info->lock);
3159		spin_lock(&block_group->lock);
3160		if (block_group->ro)
3161			space_info->bytes_readonly += reserved_bytes;
3162		block_group->reserved -= reserved_bytes;
3163		space_info->bytes_reserved -= reserved_bytes;
3164		spin_unlock(&space_info->lock);
3165		spin_unlock(&block_group->lock);
3166	}
3167
3168	return ret;
3169}
3170
3171static int trim_no_bitmap(struct btrfs_block_group_cache *block_group,
3172			  u64 *total_trimmed, u64 start, u64 end, u64 minlen)
3173{
3174	struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl;
3175	struct btrfs_free_space *entry;
3176	struct rb_node *node;
 
 
3177	int ret = 0;
3178	u64 extent_start;
3179	u64 extent_bytes;
3180	u64 bytes;
3181
3182	while (start < end) {
3183		struct btrfs_trim_range trim_entry;
3184
3185		mutex_lock(&ctl->cache_writeout_mutex);
3186		spin_lock(&ctl->tree_lock);
3187
3188		if (ctl->free_space < minlen) {
3189			spin_unlock(&ctl->tree_lock);
3190			mutex_unlock(&ctl->cache_writeout_mutex);
3191			break;
3192		}
3193
3194		entry = tree_search_offset(ctl, start, 0, 1);
3195		if (!entry) {
 
 
 
 
 
3196			spin_unlock(&ctl->tree_lock);
3197			mutex_unlock(&ctl->cache_writeout_mutex);
3198			break;
3199		}
3200
3201		/* skip bitmaps */
3202		while (entry->bitmap) {
3203			node = rb_next(&entry->offset_index);
3204			if (!node) {
 
 
 
 
 
 
 
 
 
 
3205				spin_unlock(&ctl->tree_lock);
3206				mutex_unlock(&ctl->cache_writeout_mutex);
3207				goto out;
3208			}
3209			entry = rb_entry(node, struct btrfs_free_space,
3210					 offset_index);
3211		}
3212
3213		if (entry->offset >= end) {
3214			spin_unlock(&ctl->tree_lock);
3215			mutex_unlock(&ctl->cache_writeout_mutex);
3216			break;
3217		}
3218
3219		extent_start = entry->offset;
3220		extent_bytes = entry->bytes;
3221		start = max(start, extent_start);
3222		bytes = min(extent_start + extent_bytes, end) - start;
3223		if (bytes < minlen) {
3224			spin_unlock(&ctl->tree_lock);
3225			mutex_unlock(&ctl->cache_writeout_mutex);
3226			goto next;
3227		}
3228
3229		unlink_free_space(ctl, entry);
3230		kmem_cache_free(btrfs_free_space_cachep, entry);
3231
3232		spin_unlock(&ctl->tree_lock);
3233		trim_entry.start = extent_start;
3234		trim_entry.bytes = extent_bytes;
3235		list_add_tail(&trim_entry.list, &ctl->trimming_ranges);
3236		mutex_unlock(&ctl->cache_writeout_mutex);
3237
3238		ret = do_trimming(block_group, total_trimmed, start, bytes,
3239				  extent_start, extent_bytes, &trim_entry);
3240		if (ret)
3241			break;
3242next:
3243		start += bytes;
3244
3245		if (fatal_signal_pending(current)) {
3246			ret = -ERESTARTSYS;
3247			break;
3248		}
3249
3250		cond_resched();
3251	}
3252out:
3253	return ret;
3254}
3255
3256static int trim_bitmaps(struct btrfs_block_group_cache *block_group,
3257			u64 *total_trimmed, u64 start, u64 end, u64 minlen)
3258{
3259	struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl;
3260	struct btrfs_free_space *entry;
3261	int ret = 0;
3262	int ret2;
3263	u64 bytes;
3264	u64 offset = offset_to_bitmap(ctl, start);
3265
3266	while (offset < end) {
3267		bool next_bitmap = false;
3268		struct btrfs_trim_range trim_entry;
3269
3270		mutex_lock(&ctl->cache_writeout_mutex);
3271		spin_lock(&ctl->tree_lock);
3272
3273		if (ctl->free_space < minlen) {
3274			spin_unlock(&ctl->tree_lock);
3275			mutex_unlock(&ctl->cache_writeout_mutex);
3276			break;
3277		}
3278
3279		entry = tree_search_offset(ctl, offset, 1, 0);
3280		if (!entry) {
3281			spin_unlock(&ctl->tree_lock);
3282			mutex_unlock(&ctl->cache_writeout_mutex);
3283			next_bitmap = true;
3284			goto next;
3285		}
3286
3287		bytes = minlen;
3288		ret2 = search_bitmap(ctl, entry, &start, &bytes, false);
3289		if (ret2 || start >= end) {
3290			spin_unlock(&ctl->tree_lock);
3291			mutex_unlock(&ctl->cache_writeout_mutex);
3292			next_bitmap = true;
3293			goto next;
3294		}
3295
3296		bytes = min(bytes, end - start);
3297		if (bytes < minlen) {
3298			spin_unlock(&ctl->tree_lock);
3299			mutex_unlock(&ctl->cache_writeout_mutex);
3300			goto next;
3301		}
3302
3303		bitmap_clear_bits(ctl, entry, start, bytes);
3304		if (entry->bytes == 0)
3305			free_bitmap(ctl, entry);
 
 
 
 
 
 
 
 
 
 
 
3306
3307		spin_unlock(&ctl->tree_lock);
3308		trim_entry.start = start;
3309		trim_entry.bytes = bytes;
3310		list_add_tail(&trim_entry.list, &ctl->trimming_ranges);
3311		mutex_unlock(&ctl->cache_writeout_mutex);
3312
3313		ret = do_trimming(block_group, total_trimmed, start, bytes,
3314				  start, bytes, &trim_entry);
3315		if (ret)
3316			break;
3317next:
3318		if (next_bitmap) {
3319			offset += BITS_PER_BITMAP * ctl->unit;
3320		} else {
3321			start += bytes;
3322			if (start >= offset + BITS_PER_BITMAP * ctl->unit)
3323				offset += BITS_PER_BITMAP * ctl->unit;
3324		}
 
 
3325
3326		if (fatal_signal_pending(current)) {
3327			ret = -ERESTARTSYS;
3328			break;
3329		}
3330
3331		cond_resched();
3332	}
3333
3334	return ret;
3335}
3336
3337void btrfs_get_block_group_trimming(struct btrfs_block_group_cache *cache)
3338{
3339	atomic_inc(&cache->trimming);
3340}
3341
3342void btrfs_put_block_group_trimming(struct btrfs_block_group_cache *block_group)
3343{
3344	struct btrfs_fs_info *fs_info = block_group->fs_info;
3345	struct extent_map_tree *em_tree;
3346	struct extent_map *em;
3347	bool cleanup;
3348
3349	spin_lock(&block_group->lock);
3350	cleanup = (atomic_dec_and_test(&block_group->trimming) &&
3351		   block_group->removed);
3352	spin_unlock(&block_group->lock);
3353
3354	if (cleanup) {
3355		mutex_lock(&fs_info->chunk_mutex);
3356		em_tree = &fs_info->mapping_tree.map_tree;
3357		write_lock(&em_tree->lock);
3358		em = lookup_extent_mapping(em_tree, block_group->key.objectid,
3359					   1);
3360		BUG_ON(!em); /* logic error, can't happen */
3361		/*
3362		 * remove_extent_mapping() will delete us from the pinned_chunks
3363		 * list, which is protected by the chunk mutex.
3364		 */
3365		remove_extent_mapping(em_tree, em);
3366		write_unlock(&em_tree->lock);
3367		mutex_unlock(&fs_info->chunk_mutex);
3368
3369		/* once for us and once for the tree */
3370		free_extent_map(em);
3371		free_extent_map(em);
3372
3373		/*
3374		 * We've left one free space entry and other tasks trimming
3375		 * this block group have left 1 entry each one. Free them.
3376		 */
3377		__btrfs_remove_free_space_cache(block_group->free_space_ctl);
3378	}
3379}
3380
3381int btrfs_trim_block_group(struct btrfs_block_group_cache *block_group,
3382			   u64 *trimmed, u64 start, u64 end, u64 minlen)
3383{
3384	int ret;
3385
3386	*trimmed = 0;
3387
3388	spin_lock(&block_group->lock);
3389	if (block_group->removed) {
3390		spin_unlock(&block_group->lock);
3391		return 0;
3392	}
3393	btrfs_get_block_group_trimming(block_group);
3394	spin_unlock(&block_group->lock);
3395
3396	ret = trim_no_bitmap(block_group, trimmed, start, end, minlen);
3397	if (ret)
3398		goto out;
3399
3400	ret = trim_bitmaps(block_group, trimmed, start, end, minlen);
3401out:
3402	btrfs_put_block_group_trimming(block_group);
3403	return ret;
3404}
3405
3406/*
3407 * Find the left-most item in the cache tree, and then return the
3408 * smallest inode number in the item.
3409 *
3410 * Note: the returned inode number may not be the smallest one in
3411 * the tree, if the left-most item is a bitmap.
3412 */
3413u64 btrfs_find_ino_for_alloc(struct btrfs_root *fs_root)
3414{
3415	struct btrfs_free_space_ctl *ctl = fs_root->free_ino_ctl;
3416	struct btrfs_free_space *entry = NULL;
3417	u64 ino = 0;
3418
3419	spin_lock(&ctl->tree_lock);
3420
3421	if (RB_EMPTY_ROOT(&ctl->free_space_offset))
3422		goto out;
3423
3424	entry = rb_entry(rb_first(&ctl->free_space_offset),
3425			 struct btrfs_free_space, offset_index);
3426
3427	if (!entry->bitmap) {
3428		ino = entry->offset;
3429
3430		unlink_free_space(ctl, entry);
3431		entry->offset++;
3432		entry->bytes--;
3433		if (!entry->bytes)
3434			kmem_cache_free(btrfs_free_space_cachep, entry);
3435		else
3436			link_free_space(ctl, entry);
3437	} else {
3438		u64 offset = 0;
3439		u64 count = 1;
3440		int ret;
3441
3442		ret = search_bitmap(ctl, entry, &offset, &count, true);
3443		/* Logic error; Should be empty if it can't find anything */
3444		ASSERT(!ret);
3445
3446		ino = offset;
3447		bitmap_clear_bits(ctl, entry, offset, 1);
3448		if (entry->bytes == 0)
3449			free_bitmap(ctl, entry);
3450	}
3451out:
3452	spin_unlock(&ctl->tree_lock);
3453
3454	return ino;
3455}
3456
3457struct inode *lookup_free_ino_inode(struct btrfs_root *root,
3458				    struct btrfs_path *path)
3459{
3460	struct inode *inode = NULL;
3461
3462	spin_lock(&root->ino_cache_lock);
3463	if (root->ino_cache_inode)
3464		inode = igrab(root->ino_cache_inode);
3465	spin_unlock(&root->ino_cache_lock);
3466	if (inode)
3467		return inode;
3468
3469	inode = __lookup_free_space_inode(root, path, 0);
3470	if (IS_ERR(inode))
3471		return inode;
3472
3473	spin_lock(&root->ino_cache_lock);
3474	if (!btrfs_fs_closing(root->fs_info))
3475		root->ino_cache_inode = igrab(inode);
3476	spin_unlock(&root->ino_cache_lock);
3477
3478	return inode;
3479}
3480
3481int create_free_ino_inode(struct btrfs_root *root,
3482			  struct btrfs_trans_handle *trans,
3483			  struct btrfs_path *path)
3484{
3485	return __create_free_space_inode(root, trans, path,
3486					 BTRFS_FREE_INO_OBJECTID, 0);
3487}
3488
3489int load_free_ino_cache(struct btrfs_fs_info *fs_info, struct btrfs_root *root)
3490{
3491	struct btrfs_free_space_ctl *ctl = root->free_ino_ctl;
3492	struct btrfs_path *path;
3493	struct inode *inode;
3494	int ret = 0;
3495	u64 root_gen = btrfs_root_generation(&root->root_item);
3496
3497	if (!btrfs_test_opt(fs_info, INODE_MAP_CACHE))
3498		return 0;
3499
3500	/*
3501	 * If we're unmounting then just return, since this does a search on the
3502	 * normal root and not the commit root and we could deadlock.
3503	 */
3504	if (btrfs_fs_closing(fs_info))
3505		return 0;
3506
3507	path = btrfs_alloc_path();
3508	if (!path)
3509		return 0;
3510
3511	inode = lookup_free_ino_inode(root, path);
3512	if (IS_ERR(inode))
3513		goto out;
3514
3515	if (root_gen != BTRFS_I(inode)->generation)
3516		goto out_put;
3517
3518	ret = __load_free_space_cache(root, inode, ctl, path, 0);
3519
3520	if (ret < 0)
3521		btrfs_err(fs_info,
3522			"failed to load free ino cache for root %llu",
3523			root->root_key.objectid);
3524out_put:
3525	iput(inode);
3526out:
3527	btrfs_free_path(path);
3528	return ret;
3529}
3530
3531int btrfs_write_out_ino_cache(struct btrfs_root *root,
3532			      struct btrfs_trans_handle *trans,
3533			      struct btrfs_path *path,
3534			      struct inode *inode)
3535{
3536	struct btrfs_fs_info *fs_info = root->fs_info;
3537	struct btrfs_free_space_ctl *ctl = root->free_ino_ctl;
 
3538	int ret;
3539	struct btrfs_io_ctl io_ctl;
3540	bool release_metadata = true;
3541
3542	if (!btrfs_test_opt(fs_info, INODE_MAP_CACHE))
3543		return 0;
3544
3545	memset(&io_ctl, 0, sizeof(io_ctl));
3546	ret = __btrfs_write_out_cache(root, inode, ctl, NULL, &io_ctl,
3547				      trans, path, 0);
3548	if (!ret) {
3549		/*
3550		 * At this point writepages() didn't error out, so our metadata
3551		 * reservation is released when the writeback finishes, at
3552		 * inode.c:btrfs_finish_ordered_io(), regardless of it finishing
3553		 * with or without an error.
3554		 */
3555		release_metadata = false;
3556		ret = btrfs_wait_cache_io_root(root, trans, &io_ctl, path);
3557	}
3558
3559	if (ret) {
3560		if (release_metadata)
3561			btrfs_delalloc_release_metadata(inode, inode->i_size);
3562#ifdef DEBUG
3563		btrfs_err(fs_info,
3564			  "failed to write free ino cache for root %llu",
3565			  root->root_key.objectid);
3566#endif
3567	}
3568
3569	return ret;
3570}
3571
3572#ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS
3573/*
3574 * Use this if you need to make a bitmap or extent entry specifically, it
3575 * doesn't do any of the merging that add_free_space does, this acts a lot like
3576 * how the free space cache loading stuff works, so you can get really weird
3577 * configurations.
3578 */
3579int test_add_free_space_entry(struct btrfs_block_group_cache *cache,
3580			      u64 offset, u64 bytes, bool bitmap)
3581{
3582	struct btrfs_free_space_ctl *ctl = cache->free_space_ctl;
3583	struct btrfs_free_space *info = NULL, *bitmap_info;
3584	void *map = NULL;
3585	u64 bytes_added;
3586	int ret;
3587
3588again:
3589	if (!info) {
3590		info = kmem_cache_zalloc(btrfs_free_space_cachep, GFP_NOFS);
3591		if (!info)
3592			return -ENOMEM;
3593	}
3594
3595	if (!bitmap) {
3596		spin_lock(&ctl->tree_lock);
3597		info->offset = offset;
3598		info->bytes = bytes;
3599		info->max_extent_size = 0;
3600		ret = link_free_space(ctl, info);
3601		spin_unlock(&ctl->tree_lock);
3602		if (ret)
3603			kmem_cache_free(btrfs_free_space_cachep, info);
3604		return ret;
3605	}
3606
3607	if (!map) {
3608		map = kzalloc(PAGE_SIZE, GFP_NOFS);
3609		if (!map) {
3610			kmem_cache_free(btrfs_free_space_cachep, info);
3611			return -ENOMEM;
3612		}
3613	}
3614
3615	spin_lock(&ctl->tree_lock);
3616	bitmap_info = tree_search_offset(ctl, offset_to_bitmap(ctl, offset),
3617					 1, 0);
3618	if (!bitmap_info) {
3619		info->bitmap = map;
3620		map = NULL;
3621		add_new_bitmap(ctl, info, offset);
3622		bitmap_info = info;
3623		info = NULL;
3624	}
3625
3626	bytes_added = add_bytes_to_bitmap(ctl, bitmap_info, offset, bytes);
3627
3628	bytes -= bytes_added;
3629	offset += bytes_added;
3630	spin_unlock(&ctl->tree_lock);
3631
3632	if (bytes)
3633		goto again;
3634
3635	if (info)
3636		kmem_cache_free(btrfs_free_space_cachep, info);
3637	if (map)
3638		kfree(map);
3639	return 0;
3640}
3641
3642/*
3643 * Checks to see if the given range is in the free space cache.  This is really
3644 * just used to check the absence of space, so if there is free space in the
3645 * range at all we will return 1.
3646 */
3647int test_check_exists(struct btrfs_block_group_cache *cache,
3648		      u64 offset, u64 bytes)
3649{
3650	struct btrfs_free_space_ctl *ctl = cache->free_space_ctl;
3651	struct btrfs_free_space *info;
3652	int ret = 0;
3653
3654	spin_lock(&ctl->tree_lock);
3655	info = tree_search_offset(ctl, offset, 0, 0);
3656	if (!info) {
3657		info = tree_search_offset(ctl, offset_to_bitmap(ctl, offset),
3658					  1, 0);
3659		if (!info)
3660			goto out;
3661	}
3662
3663have_info:
3664	if (info->bitmap) {
3665		u64 bit_off, bit_bytes;
3666		struct rb_node *n;
3667		struct btrfs_free_space *tmp;
3668
3669		bit_off = offset;
3670		bit_bytes = ctl->unit;
3671		ret = search_bitmap(ctl, info, &bit_off, &bit_bytes, false);
3672		if (!ret) {
3673			if (bit_off == offset) {
3674				ret = 1;
3675				goto out;
3676			} else if (bit_off > offset &&
3677				   offset + bytes > bit_off) {
3678				ret = 1;
3679				goto out;
3680			}
3681		}
3682
3683		n = rb_prev(&info->offset_index);
3684		while (n) {
3685			tmp = rb_entry(n, struct btrfs_free_space,
3686				       offset_index);
3687			if (tmp->offset + tmp->bytes < offset)
3688				break;
3689			if (offset + bytes < tmp->offset) {
3690				n = rb_prev(&tmp->offset_index);
3691				continue;
3692			}
3693			info = tmp;
3694			goto have_info;
3695		}
3696
3697		n = rb_next(&info->offset_index);
3698		while (n) {
3699			tmp = rb_entry(n, struct btrfs_free_space,
3700				       offset_index);
3701			if (offset + bytes < tmp->offset)
3702				break;
3703			if (tmp->offset + tmp->bytes < offset) {
3704				n = rb_next(&tmp->offset_index);
3705				continue;
3706			}
3707			info = tmp;
3708			goto have_info;
3709		}
3710
3711		ret = 0;
3712		goto out;
3713	}
3714
3715	if (info->offset == offset) {
3716		ret = 1;
3717		goto out;
3718	}
3719
3720	if (offset > info->offset && offset < info->offset + info->bytes)
3721		ret = 1;
3722out:
3723	spin_unlock(&ctl->tree_lock);
3724	return ret;
3725}
3726#endif /* CONFIG_BTRFS_FS_RUN_SANITY_TESTS */
v3.1
   1/*
   2 * Copyright (C) 2008 Red Hat.  All rights reserved.
   3 *
   4 * This program is free software; you can redistribute it and/or
   5 * modify it under the terms of the GNU General Public
   6 * License v2 as published by the Free Software Foundation.
   7 *
   8 * This program is distributed in the hope that it will be useful,
   9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
  10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
  11 * General Public License for more details.
  12 *
  13 * You should have received a copy of the GNU General Public
  14 * License along with this program; if not, write to the
  15 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
  16 * Boston, MA 021110-1307, USA.
  17 */
  18
  19#include <linux/pagemap.h>
  20#include <linux/sched.h>
  21#include <linux/slab.h>
  22#include <linux/math64.h>
 
  23#include "ctree.h"
  24#include "free-space-cache.h"
  25#include "transaction.h"
  26#include "disk-io.h"
  27#include "extent_io.h"
  28#include "inode-map.h"
 
  29
  30#define BITS_PER_BITMAP		(PAGE_CACHE_SIZE * 8)
  31#define MAX_CACHE_BYTES_PER_GIG	(32 * 1024)
 
 
 
 
 
 
  32
  33static int link_free_space(struct btrfs_free_space_ctl *ctl,
  34			   struct btrfs_free_space *info);
 
 
 
 
 
 
  35
  36static struct inode *__lookup_free_space_inode(struct btrfs_root *root,
  37					       struct btrfs_path *path,
  38					       u64 offset)
  39{
 
  40	struct btrfs_key key;
  41	struct btrfs_key location;
  42	struct btrfs_disk_key disk_key;
  43	struct btrfs_free_space_header *header;
  44	struct extent_buffer *leaf;
  45	struct inode *inode = NULL;
  46	int ret;
  47
  48	key.objectid = BTRFS_FREE_SPACE_OBJECTID;
  49	key.offset = offset;
  50	key.type = 0;
  51
  52	ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
  53	if (ret < 0)
  54		return ERR_PTR(ret);
  55	if (ret > 0) {
  56		btrfs_release_path(path);
  57		return ERR_PTR(-ENOENT);
  58	}
  59
  60	leaf = path->nodes[0];
  61	header = btrfs_item_ptr(leaf, path->slots[0],
  62				struct btrfs_free_space_header);
  63	btrfs_free_space_key(leaf, header, &disk_key);
  64	btrfs_disk_key_to_cpu(&location, &disk_key);
  65	btrfs_release_path(path);
  66
  67	inode = btrfs_iget(root->fs_info->sb, &location, root, NULL);
  68	if (!inode)
  69		return ERR_PTR(-ENOENT);
  70	if (IS_ERR(inode))
  71		return inode;
  72	if (is_bad_inode(inode)) {
  73		iput(inode);
  74		return ERR_PTR(-ENOENT);
  75	}
  76
  77	inode->i_mapping->flags &= ~__GFP_FS;
 
 
  78
  79	return inode;
  80}
  81
  82struct inode *lookup_free_space_inode(struct btrfs_root *root,
  83				      struct btrfs_block_group_cache
  84				      *block_group, struct btrfs_path *path)
  85{
  86	struct inode *inode = NULL;
 
 
  87
  88	spin_lock(&block_group->lock);
  89	if (block_group->inode)
  90		inode = igrab(block_group->inode);
  91	spin_unlock(&block_group->lock);
  92	if (inode)
  93		return inode;
  94
  95	inode = __lookup_free_space_inode(root, path,
  96					  block_group->key.objectid);
  97	if (IS_ERR(inode))
  98		return inode;
  99
 100	spin_lock(&block_group->lock);
 101	if (BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM) {
 102		printk(KERN_INFO "Old style space inode found, converting.\n");
 103		BTRFS_I(inode)->flags &= ~BTRFS_INODE_NODATASUM;
 
 104		block_group->disk_cache_state = BTRFS_DC_CLEAR;
 105	}
 106
 107	if (!btrfs_fs_closing(root->fs_info)) {
 108		block_group->inode = igrab(inode);
 109		block_group->iref = 1;
 110	}
 111	spin_unlock(&block_group->lock);
 112
 113	return inode;
 114}
 115
 116int __create_free_space_inode(struct btrfs_root *root,
 117			      struct btrfs_trans_handle *trans,
 118			      struct btrfs_path *path, u64 ino, u64 offset)
 
 119{
 120	struct btrfs_key key;
 121	struct btrfs_disk_key disk_key;
 122	struct btrfs_free_space_header *header;
 123	struct btrfs_inode_item *inode_item;
 124	struct extent_buffer *leaf;
 
 125	int ret;
 126
 127	ret = btrfs_insert_empty_inode(trans, root, path, ino);
 128	if (ret)
 129		return ret;
 130
 
 
 
 
 131	leaf = path->nodes[0];
 132	inode_item = btrfs_item_ptr(leaf, path->slots[0],
 133				    struct btrfs_inode_item);
 134	btrfs_item_key(leaf, &disk_key, path->slots[0]);
 135	memset_extent_buffer(leaf, 0, (unsigned long)inode_item,
 136			     sizeof(*inode_item));
 137	btrfs_set_inode_generation(leaf, inode_item, trans->transid);
 138	btrfs_set_inode_size(leaf, inode_item, 0);
 139	btrfs_set_inode_nbytes(leaf, inode_item, 0);
 140	btrfs_set_inode_uid(leaf, inode_item, 0);
 141	btrfs_set_inode_gid(leaf, inode_item, 0);
 142	btrfs_set_inode_mode(leaf, inode_item, S_IFREG | 0600);
 143	btrfs_set_inode_flags(leaf, inode_item, BTRFS_INODE_NOCOMPRESS |
 144			      BTRFS_INODE_PREALLOC);
 145	btrfs_set_inode_nlink(leaf, inode_item, 1);
 146	btrfs_set_inode_transid(leaf, inode_item, trans->transid);
 147	btrfs_set_inode_block_group(leaf, inode_item, offset);
 148	btrfs_mark_buffer_dirty(leaf);
 149	btrfs_release_path(path);
 150
 151	key.objectid = BTRFS_FREE_SPACE_OBJECTID;
 152	key.offset = offset;
 153	key.type = 0;
 154
 155	ret = btrfs_insert_empty_item(trans, root, path, &key,
 156				      sizeof(struct btrfs_free_space_header));
 157	if (ret < 0) {
 158		btrfs_release_path(path);
 159		return ret;
 160	}
 
 161	leaf = path->nodes[0];
 162	header = btrfs_item_ptr(leaf, path->slots[0],
 163				struct btrfs_free_space_header);
 164	memset_extent_buffer(leaf, 0, (unsigned long)header, sizeof(*header));
 165	btrfs_set_free_space_key(leaf, header, &disk_key);
 166	btrfs_mark_buffer_dirty(leaf);
 167	btrfs_release_path(path);
 168
 169	return 0;
 170}
 171
 172int create_free_space_inode(struct btrfs_root *root,
 173			    struct btrfs_trans_handle *trans,
 174			    struct btrfs_block_group_cache *block_group,
 175			    struct btrfs_path *path)
 176{
 177	int ret;
 178	u64 ino;
 179
 180	ret = btrfs_find_free_objectid(root, &ino);
 181	if (ret < 0)
 182		return ret;
 183
 184	return __create_free_space_inode(root, trans, path, ino,
 185					 block_group->key.objectid);
 186}
 187
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 188int btrfs_truncate_free_space_cache(struct btrfs_root *root,
 189				    struct btrfs_trans_handle *trans,
 190				    struct btrfs_path *path,
 191				    struct inode *inode)
 192{
 193	struct btrfs_block_rsv *rsv;
 194	loff_t oldsize;
 195	int ret = 0;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 196
 197	rsv = trans->block_rsv;
 198	trans->block_rsv = root->orphan_block_rsv;
 199	ret = btrfs_block_rsv_check(trans, root,
 200				    root->orphan_block_rsv,
 201				    0, 5);
 202	if (ret)
 203		return ret;
 
 
 204
 205	oldsize = i_size_read(inode);
 206	btrfs_i_size_write(inode, 0);
 207	truncate_pagecache(inode, oldsize, 0);
 208
 209	/*
 210	 * We don't need an orphan item because truncating the free space cache
 211	 * will never be split across transactions.
 
 
 212	 */
 213	ret = btrfs_truncate_inode_items(trans, root, inode,
 214					 0, BTRFS_EXTENT_DATA_KEY);
 
 
 
 
 215
 216	trans->block_rsv = rsv;
 217	if (ret) {
 218		WARN_ON(1);
 219		return ret;
 220	}
 221
 222	ret = btrfs_update_inode(trans, root, inode);
 223	return ret;
 224}
 225
 226static int readahead_cache(struct inode *inode)
 227{
 228	struct file_ra_state *ra;
 229	unsigned long last_index;
 230
 231	ra = kzalloc(sizeof(*ra), GFP_NOFS);
 232	if (!ra)
 233		return -ENOMEM;
 234
 235	file_ra_state_init(ra, inode->i_mapping);
 236	last_index = (i_size_read(inode) - 1) >> PAGE_CACHE_SHIFT;
 237
 238	page_cache_sync_readahead(inode->i_mapping, ra, NULL, 0, last_index);
 239
 240	kfree(ra);
 241
 242	return 0;
 243}
 244
 245int __load_free_space_cache(struct btrfs_root *root, struct inode *inode,
 246			    struct btrfs_free_space_ctl *ctl,
 247			    struct btrfs_path *path, u64 offset)
 248{
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 249	struct btrfs_free_space_header *header;
 250	struct extent_buffer *leaf;
 251	struct page *page;
 252	struct btrfs_key key;
 253	struct list_head bitmaps;
 
 254	u64 num_entries;
 255	u64 num_bitmaps;
 256	u64 generation;
 257	pgoff_t index = 0;
 258	int ret = 0;
 259
 260	INIT_LIST_HEAD(&bitmaps);
 261
 262	/* Nothing in the space cache, goodbye */
 263	if (!i_size_read(inode))
 264		goto out;
 265
 266	key.objectid = BTRFS_FREE_SPACE_OBJECTID;
 267	key.offset = offset;
 268	key.type = 0;
 269
 270	ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
 271	if (ret < 0)
 272		goto out;
 273	else if (ret > 0) {
 274		btrfs_release_path(path);
 275		ret = 0;
 276		goto out;
 277	}
 278
 279	ret = -1;
 280
 281	leaf = path->nodes[0];
 282	header = btrfs_item_ptr(leaf, path->slots[0],
 283				struct btrfs_free_space_header);
 284	num_entries = btrfs_free_space_entries(leaf, header);
 285	num_bitmaps = btrfs_free_space_bitmaps(leaf, header);
 286	generation = btrfs_free_space_generation(leaf, header);
 287	btrfs_release_path(path);
 288
 
 
 
 
 
 
 
 289	if (BTRFS_I(inode)->generation != generation) {
 290		printk(KERN_ERR "btrfs: free space inode generation (%llu) did"
 291		       " not match free space cache generation (%llu)\n",
 292		       (unsigned long long)BTRFS_I(inode)->generation,
 293		       (unsigned long long)generation);
 294		goto out;
 295	}
 296
 297	if (!num_entries)
 
 
 
 
 
 
 
 
 298		goto out;
 299
 300	ret = readahead_cache(inode);
 301	if (ret)
 302		goto out;
 303
 304	while (1) {
 305		struct btrfs_free_space_entry *entry;
 306		struct btrfs_free_space *e;
 307		void *addr;
 308		unsigned long offset = 0;
 309		int need_loop = 0;
 310
 311		if (!num_entries && !num_bitmaps)
 312			break;
 
 313
 314		page = find_or_create_page(inode->i_mapping, index, GFP_NOFS);
 315		if (!page)
 
 
 316			goto free_cache;
 317
 318		if (!PageUptodate(page)) {
 319			btrfs_readpage(NULL, page);
 320			lock_page(page);
 321			if (!PageUptodate(page)) {
 322				unlock_page(page);
 323				page_cache_release(page);
 324				printk(KERN_ERR "btrfs: error reading free "
 325				       "space cache\n");
 326				goto free_cache;
 327			}
 328		}
 329		addr = kmap(page);
 330
 331		if (index == 0) {
 332			u64 *gen;
 
 
 333
 334			/*
 335			 * We put a bogus crc in the front of the first page in
 336			 * case old kernels try to mount a fs with the new
 337			 * format to make sure they discard the cache.
 338			 */
 339			addr += sizeof(u64);
 340			offset += sizeof(u64);
 341
 342			gen = addr;
 343			if (*gen != BTRFS_I(inode)->generation) {
 344				printk(KERN_ERR "btrfs: space cache generation"
 345				       " (%llu) does not match inode (%llu)\n",
 346				       (unsigned long long)*gen,
 347				       (unsigned long long)
 348				       BTRFS_I(inode)->generation);
 349				kunmap(page);
 350				unlock_page(page);
 351				page_cache_release(page);
 352				goto free_cache;
 353			}
 354			addr += sizeof(u64);
 355			offset += sizeof(u64);
 356		}
 357		entry = addr;
 358
 359		while (1) {
 360			if (!num_entries)
 361				break;
 362
 363			need_loop = 1;
 364			e = kmem_cache_zalloc(btrfs_free_space_cachep,
 365					      GFP_NOFS);
 366			if (!e) {
 367				kunmap(page);
 368				unlock_page(page);
 369				page_cache_release(page);
 370				goto free_cache;
 371			}
 372
 373			e->offset = le64_to_cpu(entry->offset);
 374			e->bytes = le64_to_cpu(entry->bytes);
 375			if (!e->bytes) {
 376				kunmap(page);
 
 
 
 377				kmem_cache_free(btrfs_free_space_cachep, e);
 378				unlock_page(page);
 379				page_cache_release(page);
 380				goto free_cache;
 381			}
 
 
 382
 383			if (entry->type == BTRFS_FREE_SPACE_EXTENT) {
 384				spin_lock(&ctl->tree_lock);
 385				ret = link_free_space(ctl, e);
 386				spin_unlock(&ctl->tree_lock);
 387				if (ret) {
 388					printk(KERN_ERR "Duplicate entries in "
 389					       "free space cache, dumping\n");
 390					kunmap(page);
 391					unlock_page(page);
 392					page_cache_release(page);
 393					goto free_cache;
 394				}
 395			} else {
 396				e->bitmap = kzalloc(PAGE_CACHE_SIZE, GFP_NOFS);
 397				if (!e->bitmap) {
 398					kunmap(page);
 399					kmem_cache_free(
 400						btrfs_free_space_cachep, e);
 401					unlock_page(page);
 402					page_cache_release(page);
 403					goto free_cache;
 404				}
 405				spin_lock(&ctl->tree_lock);
 406				ret = link_free_space(ctl, e);
 407				ctl->total_bitmaps++;
 408				ctl->op->recalc_thresholds(ctl);
 409				spin_unlock(&ctl->tree_lock);
 410				if (ret) {
 411					printk(KERN_ERR "Duplicate entries in "
 412					       "free space cache, dumping\n");
 413					kunmap(page);
 414					unlock_page(page);
 415					page_cache_release(page);
 416					goto free_cache;
 417				}
 418				list_add_tail(&e->list, &bitmaps);
 419			}
 420
 421			num_entries--;
 422			offset += sizeof(struct btrfs_free_space_entry);
 423			if (offset + sizeof(struct btrfs_free_space_entry) >=
 424			    PAGE_CACHE_SIZE)
 425				break;
 426			entry++;
 427		}
 428
 429		/*
 430		 * We read an entry out of this page, we need to move on to the
 431		 * next page.
 432		 */
 433		if (need_loop) {
 434			kunmap(page);
 435			goto next;
 436		}
 437
 438		/*
 439		 * We add the bitmaps at the end of the entries in order that
 440		 * the bitmap entries are added to the cache.
 441		 */
 442		e = list_entry(bitmaps.next, struct btrfs_free_space, list);
 443		list_del_init(&e->list);
 444		memcpy(e->bitmap, addr, PAGE_CACHE_SIZE);
 445		kunmap(page);
 446		num_bitmaps--;
 447next:
 448		unlock_page(page);
 449		page_cache_release(page);
 450		index++;
 451	}
 452
 
 
 453	ret = 1;
 454out:
 
 455	return ret;
 456free_cache:
 
 457	__btrfs_remove_free_space_cache(ctl);
 458	goto out;
 459}
 460
 461int load_free_space_cache(struct btrfs_fs_info *fs_info,
 462			  struct btrfs_block_group_cache *block_group)
 463{
 464	struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl;
 465	struct btrfs_root *root = fs_info->tree_root;
 466	struct inode *inode;
 467	struct btrfs_path *path;
 468	int ret;
 469	bool matched;
 470	u64 used = btrfs_block_group_used(&block_group->item);
 471
 472	/*
 473	 * If we're unmounting then just return, since this does a search on the
 474	 * normal root and not the commit root and we could deadlock.
 475	 */
 476	if (btrfs_fs_closing(fs_info))
 477		return 0;
 478
 479	/*
 480	 * If this block group has been marked to be cleared for one reason or
 481	 * another then we can't trust the on disk cache, so just return.
 482	 */
 483	spin_lock(&block_group->lock);
 484	if (block_group->disk_cache_state != BTRFS_DC_WRITTEN) {
 485		spin_unlock(&block_group->lock);
 486		return 0;
 487	}
 488	spin_unlock(&block_group->lock);
 489
 490	path = btrfs_alloc_path();
 491	if (!path)
 492		return 0;
 
 
 493
 494	inode = lookup_free_space_inode(root, block_group, path);
 495	if (IS_ERR(inode)) {
 496		btrfs_free_path(path);
 497		return 0;
 498	}
 499
 
 
 
 
 
 
 
 
 
 500	ret = __load_free_space_cache(fs_info->tree_root, inode, ctl,
 501				      path, block_group->key.objectid);
 502	btrfs_free_path(path);
 503	if (ret <= 0)
 504		goto out;
 505
 506	spin_lock(&ctl->tree_lock);
 507	matched = (ctl->free_space == (block_group->key.offset - used -
 508				       block_group->bytes_super));
 509	spin_unlock(&ctl->tree_lock);
 510
 511	if (!matched) {
 512		__btrfs_remove_free_space_cache(ctl);
 513		printk(KERN_ERR "block group %llu has an wrong amount of free "
 514		       "space\n", block_group->key.objectid);
 
 515		ret = -1;
 516	}
 517out:
 518	if (ret < 0) {
 519		/* This cache is bogus, make sure it gets cleared */
 520		spin_lock(&block_group->lock);
 521		block_group->disk_cache_state = BTRFS_DC_CLEAR;
 522		spin_unlock(&block_group->lock);
 523		ret = 0;
 524
 525		printk(KERN_ERR "btrfs: failed to load free space cache "
 526		       "for block group %llu\n", block_group->key.objectid);
 
 527	}
 528
 529	iput(inode);
 530	return ret;
 531}
 532
 533int __btrfs_write_out_cache(struct btrfs_root *root, struct inode *inode,
 534			    struct btrfs_free_space_ctl *ctl,
 535			    struct btrfs_block_group_cache *block_group,
 536			    struct btrfs_trans_handle *trans,
 537			    struct btrfs_path *path, u64 offset)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 538{
 
 539	struct btrfs_free_space_header *header;
 540	struct extent_buffer *leaf;
 541	struct rb_node *node;
 542	struct list_head *pos, *n;
 543	struct page **pages;
 544	struct page *page;
 545	struct extent_state *cached_state = NULL;
 546	struct btrfs_free_cluster *cluster = NULL;
 547	struct extent_io_tree *unpin = NULL;
 548	struct list_head bitmap_list;
 549	struct btrfs_key key;
 550	u64 start, end, len;
 551	u64 bytes = 0;
 552	u32 crc = ~(u32)0;
 553	int index = 0, num_pages = 0;
 554	int entries = 0;
 555	int bitmaps = 0;
 556	int ret = -1;
 557	bool next_page = false;
 558	bool out_of_space = false;
 559
 560	INIT_LIST_HEAD(&bitmap_list);
 
 
 561
 562	node = rb_first(&ctl->free_space_offset);
 563	if (!node)
 564		return 0;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 565
 566	if (!i_size_read(inode))
 567		return -1;
 
 
 
 
 
 
 568
 569	num_pages = (i_size_read(inode) + PAGE_CACHE_SIZE - 1) >>
 570		PAGE_CACHE_SHIFT;
 571
 572	filemap_write_and_wait(inode->i_mapping);
 573	btrfs_wait_ordered_range(inode, inode->i_size &
 574				 ~(root->sectorsize - 1), (u64)-1);
 575
 576	pages = kzalloc(sizeof(struct page *) * num_pages, GFP_NOFS);
 577	if (!pages)
 578		return -1;
 
 
 
 
 
 
 579
 580	/* Get the cluster for this block_group if it exists */
 581	if (block_group && !list_empty(&block_group->cluster_list))
 582		cluster = list_entry(block_group->cluster_list.next,
 583				     struct btrfs_free_cluster,
 584				     block_group_list);
 585
 586	/*
 
 
 
 587	 * We shouldn't have switched the pinned extents yet so this is the
 588	 * right one
 589	 */
 590	unpin = root->fs_info->pinned_extents;
 
 
 
 
 
 
 
 
 
 591
 592	/*
 593	 * Lock all pages first so we can lock the extent safely.
 594	 *
 595	 * NOTE: Because we hold the ref the entire time we're going to write to
 596	 * the page find_get_page should never fail, so we don't do a check
 597	 * after find_get_page at this point.  Just putting this here so people
 598	 * know and don't freak out.
 599	 */
 600	while (index < num_pages) {
 601		page = find_or_create_page(inode->i_mapping, index, GFP_NOFS);
 602		if (!page) {
 603			int i;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 604
 605			for (i = 0; i < num_pages; i++) {
 606				unlock_page(pages[i]);
 607				page_cache_release(pages[i]);
 608			}
 609			goto out;
 610		}
 611		pages[index] = page;
 612		index++;
 613	}
 614
 615	index = 0;
 616	lock_extent_bits(&BTRFS_I(inode)->io_tree, 0, i_size_read(inode) - 1,
 617			 0, &cached_state, GFP_NOFS);
 
 
 
 
 
 
 
 
 
 
 
 
 618
 619	/*
 620	 * When searching for pinned extents, we need to start at our start
 621	 * offset.
 622	 */
 623	if (block_group)
 624		start = block_group->key.objectid;
 625
 626	/* Write out the extent entries */
 627	do {
 628		struct btrfs_free_space_entry *entry;
 629		void *addr, *orig;
 630		unsigned long offset = 0;
 631
 632		next_page = false;
 
 
 
 
 
 
 
 
 
 
 633
 634		if (index >= num_pages) {
 635			out_of_space = true;
 636			break;
 637		}
 
 
 
 
 
 638
 639		page = pages[index];
 
 640
 641		orig = addr = kmap(page);
 642		if (index == 0) {
 643			u64 *gen;
 644
 645			/*
 646			 * We're going to put in a bogus crc for this page to
 647			 * make sure that old kernels who aren't aware of this
 648			 * format will be sure to discard the cache.
 649			 */
 650			addr += sizeof(u64);
 651			offset += sizeof(u64);
 652
 653			gen = addr;
 654			*gen = trans->transid;
 655			addr += sizeof(u64);
 656			offset += sizeof(u64);
 657		}
 658		entry = addr;
 659
 660		memset(addr, 0, PAGE_CACHE_SIZE - offset);
 661		while (node && !next_page) {
 662			struct btrfs_free_space *e;
 663
 664			e = rb_entry(node, struct btrfs_free_space, offset_index);
 665			entries++;
 666
 667			entry->offset = cpu_to_le64(e->offset);
 668			entry->bytes = cpu_to_le64(e->bytes);
 669			if (e->bitmap) {
 670				entry->type = BTRFS_FREE_SPACE_BITMAP;
 671				list_add_tail(&e->list, &bitmap_list);
 672				bitmaps++;
 673			} else {
 674				entry->type = BTRFS_FREE_SPACE_EXTENT;
 675			}
 676			node = rb_next(node);
 677			if (!node && cluster) {
 678				node = rb_first(&cluster->root);
 679				cluster = NULL;
 680			}
 681			offset += sizeof(struct btrfs_free_space_entry);
 682			if (offset + sizeof(struct btrfs_free_space_entry) >=
 683			    PAGE_CACHE_SIZE)
 684				next_page = true;
 685			entry++;
 686		}
 
 
 
 
 
 
 
 
 
 687
 688		/*
 689		 * We want to add any pinned extents to our free space cache
 690		 * so we don't leak the space
 
 691		 */
 692		while (block_group && !next_page &&
 693		       (start < block_group->key.objectid +
 694			block_group->key.offset)) {
 695			ret = find_first_extent_bit(unpin, start, &start, &end,
 696						    EXTENT_DIRTY);
 697			if (ret) {
 698				ret = 0;
 699				break;
 700			}
 701
 702			/* This pinned extent is out of our range */
 703			if (start >= block_group->key.objectid +
 704			    block_group->key.offset)
 705				break;
 
 706
 707			len = block_group->key.objectid +
 708				block_group->key.offset - start;
 709			len = min(len, end + 1 - start);
 710
 711			entries++;
 712			entry->offset = cpu_to_le64(start);
 713			entry->bytes = cpu_to_le64(len);
 714			entry->type = BTRFS_FREE_SPACE_EXTENT;
 715
 716			start = end + 1;
 717			offset += sizeof(struct btrfs_free_space_entry);
 718			if (offset + sizeof(struct btrfs_free_space_entry) >=
 719			    PAGE_CACHE_SIZE)
 720				next_page = true;
 721			entry++;
 722		}
 723
 724		/* Generate bogus crc value */
 725		if (index == 0) {
 726			u32 *tmp;
 727			crc = btrfs_csum_data(root, orig + sizeof(u64), crc,
 728					      PAGE_CACHE_SIZE - sizeof(u64));
 729			btrfs_csum_final(crc, (char *)&crc);
 730			crc++;
 731			tmp = orig;
 732			*tmp = crc;
 733		}
 734
 735		kunmap(page);
 736
 737		bytes += PAGE_CACHE_SIZE;
 
 
 
 
 
 
 738
 739		index++;
 740	} while (node || next_page);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 741
 742	/* Write out the bitmaps */
 743	list_for_each_safe(pos, n, &bitmap_list) {
 744		void *addr;
 745		struct btrfs_free_space *entry =
 746			list_entry(pos, struct btrfs_free_space, list);
 747
 748		if (index >= num_pages) {
 749			out_of_space = true;
 750			break;
 
 
 
 
 
 
 
 
 751		}
 752		page = pages[index];
 
 
 
 
 
 
 753
 754		addr = kmap(page);
 755		memcpy(addr, entry->bitmap, PAGE_CACHE_SIZE);
 756		kunmap(page);
 757		bytes += PAGE_CACHE_SIZE;
 
 
 
 
 
 
 
 
 
 758
 759		list_del_init(&entry->list);
 760		index++;
 761	}
 
 
 
 
 
 
 
 
 
 762
 763	if (out_of_space) {
 764		btrfs_drop_pages(pages, num_pages);
 765		unlock_extent_cached(&BTRFS_I(inode)->io_tree, 0,
 766				     i_size_read(inode) - 1, &cached_state,
 767				     GFP_NOFS);
 768		ret = 0;
 769		goto out;
 770	}
 
 
 771
 772	/* Zero out the rest of the pages just to make sure */
 773	while (index < num_pages) {
 774		void *addr;
 
 
 
 
 
 775
 776		page = pages[index];
 777		addr = kmap(page);
 778		memset(addr, 0, PAGE_CACHE_SIZE);
 779		kunmap(page);
 780		bytes += PAGE_CACHE_SIZE;
 781		index++;
 782	}
 783
 784	ret = btrfs_dirty_pages(root, inode, pages, num_pages, 0,
 785					    bytes, &cached_state);
 786	btrfs_drop_pages(pages, num_pages);
 787	unlock_extent_cached(&BTRFS_I(inode)->io_tree, 0,
 788			     i_size_read(inode) - 1, &cached_state, GFP_NOFS);
 789
 790	if (ret) {
 791		ret = 0;
 792		goto out;
 793	}
 
 
 
 794
 795	BTRFS_I(inode)->generation = trans->transid;
 796
 797	filemap_write_and_wait(inode->i_mapping);
 798
 799	key.objectid = BTRFS_FREE_SPACE_OBJECTID;
 800	key.offset = offset;
 801	key.type = 0;
 802
 803	ret = btrfs_search_slot(trans, root, &key, path, 1, 1);
 804	if (ret < 0) {
 805		ret = -1;
 806		clear_extent_bit(&BTRFS_I(inode)->io_tree, 0, bytes - 1,
 807				 EXTENT_DIRTY | EXTENT_DELALLOC |
 808				 EXTENT_DO_ACCOUNTING, 0, 0, NULL, GFP_NOFS);
 809		goto out;
 810	}
 811	leaf = path->nodes[0];
 812	if (ret > 0) {
 813		struct btrfs_key found_key;
 814		BUG_ON(!path->slots[0]);
 815		path->slots[0]--;
 816		btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
 817		if (found_key.objectid != BTRFS_FREE_SPACE_OBJECTID ||
 818		    found_key.offset != offset) {
 819			ret = -1;
 820			clear_extent_bit(&BTRFS_I(inode)->io_tree, 0, bytes - 1,
 821					 EXTENT_DIRTY | EXTENT_DELALLOC |
 822					 EXTENT_DO_ACCOUNTING, 0, 0, NULL,
 823					 GFP_NOFS);
 824			btrfs_release_path(path);
 825			goto out;
 826		}
 827	}
 828	header = btrfs_item_ptr(leaf, path->slots[0],
 829				struct btrfs_free_space_header);
 830	btrfs_set_free_space_entries(leaf, header, entries);
 831	btrfs_set_free_space_bitmaps(leaf, header, bitmaps);
 832	btrfs_set_free_space_generation(leaf, header, trans->transid);
 833	btrfs_mark_buffer_dirty(leaf);
 834	btrfs_release_path(path);
 835
 836	ret = 1;
 837
 838out:
 839	kfree(pages);
 840	if (ret != 1) {
 841		invalidate_inode_pages2_range(inode->i_mapping, 0, index);
 
 842		BTRFS_I(inode)->generation = 0;
 843	}
 844	btrfs_update_inode(trans, root, inode);
 
 
 845	return ret;
 
 
 
 
 
 
 
 
 
 
 
 
 
 846}
 847
 848int btrfs_write_out_cache(struct btrfs_root *root,
 849			  struct btrfs_trans_handle *trans,
 850			  struct btrfs_block_group_cache *block_group,
 851			  struct btrfs_path *path)
 852{
 
 853	struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl;
 854	struct inode *inode;
 855	int ret = 0;
 856
 857	root = root->fs_info->tree_root;
 858
 859	spin_lock(&block_group->lock);
 860	if (block_group->disk_cache_state < BTRFS_DC_SETUP) {
 861		spin_unlock(&block_group->lock);
 862		return 0;
 863	}
 864	spin_unlock(&block_group->lock);
 865
 866	inode = lookup_free_space_inode(root, block_group, path);
 867	if (IS_ERR(inode))
 868		return 0;
 869
 870	ret = __btrfs_write_out_cache(root, inode, ctl, block_group, trans,
 
 871				      path, block_group->key.objectid);
 872	if (ret < 0) {
 
 
 
 
 
 873		spin_lock(&block_group->lock);
 874		block_group->disk_cache_state = BTRFS_DC_ERROR;
 875		spin_unlock(&block_group->lock);
 876		ret = 0;
 877
 878		printk(KERN_ERR "btrfs: failed to write free space cace "
 879		       "for block group %llu\n", block_group->key.objectid);
 880	}
 881
 882	iput(inode);
 
 
 
 
 883	return ret;
 884}
 885
 886static inline unsigned long offset_to_bit(u64 bitmap_start, u32 unit,
 887					  u64 offset)
 888{
 889	BUG_ON(offset < bitmap_start);
 890	offset -= bitmap_start;
 891	return (unsigned long)(div_u64(offset, unit));
 892}
 893
 894static inline unsigned long bytes_to_bits(u64 bytes, u32 unit)
 895{
 896	return (unsigned long)(div_u64(bytes, unit));
 897}
 898
 899static inline u64 offset_to_bitmap(struct btrfs_free_space_ctl *ctl,
 900				   u64 offset)
 901{
 902	u64 bitmap_start;
 903	u64 bytes_per_bitmap;
 904
 905	bytes_per_bitmap = BITS_PER_BITMAP * ctl->unit;
 906	bitmap_start = offset - ctl->start;
 907	bitmap_start = div64_u64(bitmap_start, bytes_per_bitmap);
 908	bitmap_start *= bytes_per_bitmap;
 909	bitmap_start += ctl->start;
 910
 911	return bitmap_start;
 912}
 913
 914static int tree_insert_offset(struct rb_root *root, u64 offset,
 915			      struct rb_node *node, int bitmap)
 916{
 917	struct rb_node **p = &root->rb_node;
 918	struct rb_node *parent = NULL;
 919	struct btrfs_free_space *info;
 920
 921	while (*p) {
 922		parent = *p;
 923		info = rb_entry(parent, struct btrfs_free_space, offset_index);
 924
 925		if (offset < info->offset) {
 926			p = &(*p)->rb_left;
 927		} else if (offset > info->offset) {
 928			p = &(*p)->rb_right;
 929		} else {
 930			/*
 931			 * we could have a bitmap entry and an extent entry
 932			 * share the same offset.  If this is the case, we want
 933			 * the extent entry to always be found first if we do a
 934			 * linear search through the tree, since we want to have
 935			 * the quickest allocation time, and allocating from an
 936			 * extent is faster than allocating from a bitmap.  So
 937			 * if we're inserting a bitmap and we find an entry at
 938			 * this offset, we want to go right, or after this entry
 939			 * logically.  If we are inserting an extent and we've
 940			 * found a bitmap, we want to go left, or before
 941			 * logically.
 942			 */
 943			if (bitmap) {
 944				if (info->bitmap) {
 945					WARN_ON_ONCE(1);
 946					return -EEXIST;
 947				}
 948				p = &(*p)->rb_right;
 949			} else {
 950				if (!info->bitmap) {
 951					WARN_ON_ONCE(1);
 952					return -EEXIST;
 953				}
 954				p = &(*p)->rb_left;
 955			}
 956		}
 957	}
 958
 959	rb_link_node(node, parent, p);
 960	rb_insert_color(node, root);
 961
 962	return 0;
 963}
 964
 965/*
 966 * searches the tree for the given offset.
 967 *
 968 * fuzzy - If this is set, then we are trying to make an allocation, and we just
 969 * want a section that has at least bytes size and comes at or after the given
 970 * offset.
 971 */
 972static struct btrfs_free_space *
 973tree_search_offset(struct btrfs_free_space_ctl *ctl,
 974		   u64 offset, int bitmap_only, int fuzzy)
 975{
 976	struct rb_node *n = ctl->free_space_offset.rb_node;
 977	struct btrfs_free_space *entry, *prev = NULL;
 978
 979	/* find entry that is closest to the 'offset' */
 980	while (1) {
 981		if (!n) {
 982			entry = NULL;
 983			break;
 984		}
 985
 986		entry = rb_entry(n, struct btrfs_free_space, offset_index);
 987		prev = entry;
 988
 989		if (offset < entry->offset)
 990			n = n->rb_left;
 991		else if (offset > entry->offset)
 992			n = n->rb_right;
 993		else
 994			break;
 995	}
 996
 997	if (bitmap_only) {
 998		if (!entry)
 999			return NULL;
1000		if (entry->bitmap)
1001			return entry;
1002
1003		/*
1004		 * bitmap entry and extent entry may share same offset,
1005		 * in that case, bitmap entry comes after extent entry.
1006		 */
1007		n = rb_next(n);
1008		if (!n)
1009			return NULL;
1010		entry = rb_entry(n, struct btrfs_free_space, offset_index);
1011		if (entry->offset != offset)
1012			return NULL;
1013
1014		WARN_ON(!entry->bitmap);
1015		return entry;
1016	} else if (entry) {
1017		if (entry->bitmap) {
1018			/*
1019			 * if previous extent entry covers the offset,
1020			 * we should return it instead of the bitmap entry
1021			 */
1022			n = &entry->offset_index;
1023			while (1) {
1024				n = rb_prev(n);
1025				if (!n)
1026					break;
1027				prev = rb_entry(n, struct btrfs_free_space,
1028						offset_index);
1029				if (!prev->bitmap) {
1030					if (prev->offset + prev->bytes > offset)
1031						entry = prev;
1032					break;
1033				}
1034			}
1035		}
1036		return entry;
1037	}
1038
1039	if (!prev)
1040		return NULL;
1041
1042	/* find last entry before the 'offset' */
1043	entry = prev;
1044	if (entry->offset > offset) {
1045		n = rb_prev(&entry->offset_index);
1046		if (n) {
1047			entry = rb_entry(n, struct btrfs_free_space,
1048					offset_index);
1049			BUG_ON(entry->offset > offset);
1050		} else {
1051			if (fuzzy)
1052				return entry;
1053			else
1054				return NULL;
1055		}
1056	}
1057
1058	if (entry->bitmap) {
1059		n = &entry->offset_index;
1060		while (1) {
1061			n = rb_prev(n);
1062			if (!n)
1063				break;
1064			prev = rb_entry(n, struct btrfs_free_space,
1065					offset_index);
1066			if (!prev->bitmap) {
1067				if (prev->offset + prev->bytes > offset)
1068					return prev;
1069				break;
1070			}
1071		}
1072		if (entry->offset + BITS_PER_BITMAP * ctl->unit > offset)
1073			return entry;
1074	} else if (entry->offset + entry->bytes > offset)
1075		return entry;
1076
1077	if (!fuzzy)
1078		return NULL;
1079
1080	while (1) {
1081		if (entry->bitmap) {
1082			if (entry->offset + BITS_PER_BITMAP *
1083			    ctl->unit > offset)
1084				break;
1085		} else {
1086			if (entry->offset + entry->bytes > offset)
1087				break;
1088		}
1089
1090		n = rb_next(&entry->offset_index);
1091		if (!n)
1092			return NULL;
1093		entry = rb_entry(n, struct btrfs_free_space, offset_index);
1094	}
1095	return entry;
1096}
1097
1098static inline void
1099__unlink_free_space(struct btrfs_free_space_ctl *ctl,
1100		    struct btrfs_free_space *info)
1101{
1102	rb_erase(&info->offset_index, &ctl->free_space_offset);
1103	ctl->free_extents--;
1104}
1105
1106static void unlink_free_space(struct btrfs_free_space_ctl *ctl,
1107			      struct btrfs_free_space *info)
1108{
1109	__unlink_free_space(ctl, info);
1110	ctl->free_space -= info->bytes;
1111}
1112
1113static int link_free_space(struct btrfs_free_space_ctl *ctl,
1114			   struct btrfs_free_space *info)
1115{
1116	int ret = 0;
1117
1118	BUG_ON(!info->bitmap && !info->bytes);
1119	ret = tree_insert_offset(&ctl->free_space_offset, info->offset,
1120				 &info->offset_index, (info->bitmap != NULL));
1121	if (ret)
1122		return ret;
1123
1124	ctl->free_space += info->bytes;
1125	ctl->free_extents++;
1126	return ret;
1127}
1128
1129static void recalculate_thresholds(struct btrfs_free_space_ctl *ctl)
1130{
1131	struct btrfs_block_group_cache *block_group = ctl->private;
1132	u64 max_bytes;
1133	u64 bitmap_bytes;
1134	u64 extent_bytes;
1135	u64 size = block_group->key.offset;
1136	u64 bytes_per_bg = BITS_PER_BITMAP * block_group->sectorsize;
1137	int max_bitmaps = div64_u64(size + bytes_per_bg - 1, bytes_per_bg);
 
 
1138
1139	BUG_ON(ctl->total_bitmaps > max_bitmaps);
1140
1141	/*
1142	 * The goal is to keep the total amount of memory used per 1gb of space
1143	 * at or below 32k, so we need to adjust how much memory we allow to be
1144	 * used by extent based free space tracking
1145	 */
1146	if (size < 1024 * 1024 * 1024)
1147		max_bytes = MAX_CACHE_BYTES_PER_GIG;
1148	else
1149		max_bytes = MAX_CACHE_BYTES_PER_GIG *
1150			div64_u64(size, 1024 * 1024 * 1024);
1151
1152	/*
1153	 * we want to account for 1 more bitmap than what we have so we can make
1154	 * sure we don't go over our overall goal of MAX_CACHE_BYTES_PER_GIG as
1155	 * we add more bitmaps.
1156	 */
1157	bitmap_bytes = (ctl->total_bitmaps + 1) * PAGE_CACHE_SIZE;
1158
1159	if (bitmap_bytes >= max_bytes) {
1160		ctl->extents_thresh = 0;
1161		return;
1162	}
1163
1164	/*
1165	 * we want the extent entry threshold to always be at most 1/2 the maxw
1166	 * bytes we can have, or whatever is less than that.
1167	 */
1168	extent_bytes = max_bytes - bitmap_bytes;
1169	extent_bytes = min_t(u64, extent_bytes, div64_u64(max_bytes, 2));
1170
1171	ctl->extents_thresh =
1172		div64_u64(extent_bytes, (sizeof(struct btrfs_free_space)));
1173}
1174
1175static inline void __bitmap_clear_bits(struct btrfs_free_space_ctl *ctl,
1176				       struct btrfs_free_space *info,
1177				       u64 offset, u64 bytes)
1178{
1179	unsigned long start, count;
1180
1181	start = offset_to_bit(info->offset, ctl->unit, offset);
1182	count = bytes_to_bits(bytes, ctl->unit);
1183	BUG_ON(start + count > BITS_PER_BITMAP);
1184
1185	bitmap_clear(info->bitmap, start, count);
1186
1187	info->bytes -= bytes;
1188}
1189
1190static void bitmap_clear_bits(struct btrfs_free_space_ctl *ctl,
1191			      struct btrfs_free_space *info, u64 offset,
1192			      u64 bytes)
1193{
1194	__bitmap_clear_bits(ctl, info, offset, bytes);
1195	ctl->free_space -= bytes;
1196}
1197
1198static void bitmap_set_bits(struct btrfs_free_space_ctl *ctl,
1199			    struct btrfs_free_space *info, u64 offset,
1200			    u64 bytes)
1201{
1202	unsigned long start, count;
1203
1204	start = offset_to_bit(info->offset, ctl->unit, offset);
1205	count = bytes_to_bits(bytes, ctl->unit);
1206	BUG_ON(start + count > BITS_PER_BITMAP);
1207
1208	bitmap_set(info->bitmap, start, count);
1209
1210	info->bytes += bytes;
1211	ctl->free_space += bytes;
1212}
1213
 
 
 
 
1214static int search_bitmap(struct btrfs_free_space_ctl *ctl,
1215			 struct btrfs_free_space *bitmap_info, u64 *offset,
1216			 u64 *bytes)
1217{
1218	unsigned long found_bits = 0;
 
1219	unsigned long bits, i;
1220	unsigned long next_zero;
 
 
 
 
 
 
 
 
 
 
 
 
1221
1222	i = offset_to_bit(bitmap_info->offset, ctl->unit,
1223			  max_t(u64, *offset, bitmap_info->offset));
1224	bits = bytes_to_bits(*bytes, ctl->unit);
1225
1226	for (i = find_next_bit(bitmap_info->bitmap, BITS_PER_BITMAP, i);
1227	     i < BITS_PER_BITMAP;
1228	     i = find_next_bit(bitmap_info->bitmap, BITS_PER_BITMAP, i + 1)) {
 
 
1229		next_zero = find_next_zero_bit(bitmap_info->bitmap,
1230					       BITS_PER_BITMAP, i);
1231		if ((next_zero - i) >= bits) {
1232			found_bits = next_zero - i;
 
1233			break;
 
 
1234		}
1235		i = next_zero;
1236	}
1237
1238	if (found_bits) {
1239		*offset = (u64)(i * ctl->unit) + bitmap_info->offset;
1240		*bytes = (u64)(found_bits) * ctl->unit;
1241		return 0;
1242	}
1243
 
 
1244	return -1;
1245}
1246
 
1247static struct btrfs_free_space *
1248find_free_space(struct btrfs_free_space_ctl *ctl, u64 *offset, u64 *bytes)
 
1249{
1250	struct btrfs_free_space *entry;
1251	struct rb_node *node;
 
 
1252	int ret;
1253
1254	if (!ctl->free_space_offset.rb_node)
1255		return NULL;
1256
1257	entry = tree_search_offset(ctl, offset_to_bitmap(ctl, *offset), 0, 1);
1258	if (!entry)
1259		return NULL;
1260
1261	for (node = &entry->offset_index; node; node = rb_next(node)) {
1262		entry = rb_entry(node, struct btrfs_free_space, offset_index);
1263		if (entry->bytes < *bytes)
 
 
1264			continue;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1265
1266		if (entry->bitmap) {
1267			ret = search_bitmap(ctl, entry, offset, bytes);
1268			if (!ret)
 
 
 
 
1269				return entry;
 
 
 
1270			continue;
1271		}
1272
1273		*offset = entry->offset;
1274		*bytes = entry->bytes;
1275		return entry;
1276	}
1277
1278	return NULL;
1279}
1280
1281static void add_new_bitmap(struct btrfs_free_space_ctl *ctl,
1282			   struct btrfs_free_space *info, u64 offset)
1283{
1284	info->offset = offset_to_bitmap(ctl, offset);
1285	info->bytes = 0;
 
1286	link_free_space(ctl, info);
1287	ctl->total_bitmaps++;
1288
1289	ctl->op->recalc_thresholds(ctl);
1290}
1291
1292static void free_bitmap(struct btrfs_free_space_ctl *ctl,
1293			struct btrfs_free_space *bitmap_info)
1294{
1295	unlink_free_space(ctl, bitmap_info);
1296	kfree(bitmap_info->bitmap);
1297	kmem_cache_free(btrfs_free_space_cachep, bitmap_info);
1298	ctl->total_bitmaps--;
1299	ctl->op->recalc_thresholds(ctl);
1300}
1301
1302static noinline int remove_from_bitmap(struct btrfs_free_space_ctl *ctl,
1303			      struct btrfs_free_space *bitmap_info,
1304			      u64 *offset, u64 *bytes)
1305{
1306	u64 end;
1307	u64 search_start, search_bytes;
1308	int ret;
1309
1310again:
1311	end = bitmap_info->offset + (u64)(BITS_PER_BITMAP * ctl->unit) - 1;
1312
1313	/*
1314	 * XXX - this can go away after a few releases.
1315	 *
1316	 * since the only user of btrfs_remove_free_space is the tree logging
1317	 * stuff, and the only way to test that is under crash conditions, we
1318	 * want to have this debug stuff here just in case somethings not
1319	 * working.  Search the bitmap for the space we are trying to use to
1320	 * make sure its actually there.  If its not there then we need to stop
1321	 * because something has gone wrong.
1322	 */
1323	search_start = *offset;
1324	search_bytes = *bytes;
 
 
 
 
 
 
 
 
 
 
1325	search_bytes = min(search_bytes, end - search_start + 1);
1326	ret = search_bitmap(ctl, bitmap_info, &search_start, &search_bytes);
1327	BUG_ON(ret < 0 || search_start != *offset);
1328
1329	if (*offset > bitmap_info->offset && *offset + *bytes > end) {
1330		bitmap_clear_bits(ctl, bitmap_info, *offset, end - *offset + 1);
1331		*bytes -= end - *offset + 1;
1332		*offset = end + 1;
1333	} else if (*offset >= bitmap_info->offset && *offset + *bytes <= end) {
1334		bitmap_clear_bits(ctl, bitmap_info, *offset, *bytes);
1335		*bytes = 0;
1336	}
1337
1338	if (*bytes) {
1339		struct rb_node *next = rb_next(&bitmap_info->offset_index);
1340		if (!bitmap_info->bytes)
1341			free_bitmap(ctl, bitmap_info);
1342
1343		/*
1344		 * no entry after this bitmap, but we still have bytes to
1345		 * remove, so something has gone wrong.
1346		 */
1347		if (!next)
1348			return -EINVAL;
1349
1350		bitmap_info = rb_entry(next, struct btrfs_free_space,
1351				       offset_index);
1352
1353		/*
1354		 * if the next entry isn't a bitmap we need to return to let the
1355		 * extent stuff do its work.
1356		 */
1357		if (!bitmap_info->bitmap)
1358			return -EAGAIN;
1359
1360		/*
1361		 * Ok the next item is a bitmap, but it may not actually hold
1362		 * the information for the rest of this free space stuff, so
1363		 * look for it, and if we don't find it return so we can try
1364		 * everything over again.
1365		 */
1366		search_start = *offset;
1367		search_bytes = *bytes;
1368		ret = search_bitmap(ctl, bitmap_info, &search_start,
1369				    &search_bytes);
1370		if (ret < 0 || search_start != *offset)
1371			return -EAGAIN;
1372
1373		goto again;
1374	} else if (!bitmap_info->bytes)
1375		free_bitmap(ctl, bitmap_info);
1376
1377	return 0;
1378}
1379
1380static u64 add_bytes_to_bitmap(struct btrfs_free_space_ctl *ctl,
1381			       struct btrfs_free_space *info, u64 offset,
1382			       u64 bytes)
1383{
1384	u64 bytes_to_set = 0;
1385	u64 end;
1386
1387	end = info->offset + (u64)(BITS_PER_BITMAP * ctl->unit);
1388
1389	bytes_to_set = min(end - offset, bytes);
1390
1391	bitmap_set_bits(ctl, info, offset, bytes_to_set);
1392
 
 
 
 
 
 
1393	return bytes_to_set;
1394
1395}
1396
1397static bool use_bitmap(struct btrfs_free_space_ctl *ctl,
1398		      struct btrfs_free_space *info)
1399{
1400	struct btrfs_block_group_cache *block_group = ctl->private;
 
 
 
 
 
 
 
1401
1402	/*
1403	 * If we are below the extents threshold then we can add this as an
1404	 * extent, and don't have to deal with the bitmap
1405	 */
1406	if (ctl->free_extents < ctl->extents_thresh) {
1407		/*
1408		 * If this block group has some small extents we don't want to
1409		 * use up all of our free slots in the cache with them, we want
1410		 * to reserve them to larger extents, however if we have plent
1411		 * of cache left then go ahead an dadd them, no sense in adding
1412		 * the overhead of a bitmap if we don't have to.
1413		 */
1414		if (info->bytes <= block_group->sectorsize * 4) {
1415			if (ctl->free_extents * 2 <= ctl->extents_thresh)
1416				return false;
1417		} else {
1418			return false;
1419		}
1420	}
1421
1422	/*
1423	 * some block groups are so tiny they can't be enveloped by a bitmap, so
1424	 * don't even bother to create a bitmap for this
 
 
 
 
1425	 */
1426	if (BITS_PER_BITMAP * block_group->sectorsize >
1427	    block_group->key.offset)
1428		return false;
1429
1430	return true;
1431}
1432
1433static struct btrfs_free_space_op free_space_op = {
1434	.recalc_thresholds	= recalculate_thresholds,
1435	.use_bitmap		= use_bitmap,
1436};
1437
1438static int insert_into_bitmap(struct btrfs_free_space_ctl *ctl,
1439			      struct btrfs_free_space *info)
1440{
1441	struct btrfs_free_space *bitmap_info;
1442	struct btrfs_block_group_cache *block_group = NULL;
1443	int added = 0;
1444	u64 bytes, offset, bytes_added;
1445	int ret;
1446
1447	bytes = info->bytes;
1448	offset = info->offset;
1449
1450	if (!ctl->op->use_bitmap(ctl, info))
1451		return 0;
1452
1453	if (ctl->op == &free_space_op)
1454		block_group = ctl->private;
1455again:
1456	/*
1457	 * Since we link bitmaps right into the cluster we need to see if we
1458	 * have a cluster here, and if so and it has our bitmap we need to add
1459	 * the free space to that bitmap.
1460	 */
1461	if (block_group && !list_empty(&block_group->cluster_list)) {
1462		struct btrfs_free_cluster *cluster;
1463		struct rb_node *node;
1464		struct btrfs_free_space *entry;
1465
1466		cluster = list_entry(block_group->cluster_list.next,
1467				     struct btrfs_free_cluster,
1468				     block_group_list);
1469		spin_lock(&cluster->lock);
1470		node = rb_first(&cluster->root);
1471		if (!node) {
1472			spin_unlock(&cluster->lock);
1473			goto no_cluster_bitmap;
1474		}
1475
1476		entry = rb_entry(node, struct btrfs_free_space, offset_index);
1477		if (!entry->bitmap) {
1478			spin_unlock(&cluster->lock);
1479			goto no_cluster_bitmap;
1480		}
1481
1482		if (entry->offset == offset_to_bitmap(ctl, offset)) {
1483			bytes_added = add_bytes_to_bitmap(ctl, entry,
1484							  offset, bytes);
1485			bytes -= bytes_added;
1486			offset += bytes_added;
1487		}
1488		spin_unlock(&cluster->lock);
1489		if (!bytes) {
1490			ret = 1;
1491			goto out;
1492		}
1493	}
1494
1495no_cluster_bitmap:
1496	bitmap_info = tree_search_offset(ctl, offset_to_bitmap(ctl, offset),
1497					 1, 0);
1498	if (!bitmap_info) {
1499		BUG_ON(added);
1500		goto new_bitmap;
1501	}
1502
1503	bytes_added = add_bytes_to_bitmap(ctl, bitmap_info, offset, bytes);
1504	bytes -= bytes_added;
1505	offset += bytes_added;
1506	added = 0;
1507
1508	if (!bytes) {
1509		ret = 1;
1510		goto out;
1511	} else
1512		goto again;
1513
1514new_bitmap:
1515	if (info && info->bitmap) {
1516		add_new_bitmap(ctl, info, offset);
1517		added = 1;
1518		info = NULL;
1519		goto again;
1520	} else {
1521		spin_unlock(&ctl->tree_lock);
1522
1523		/* no pre-allocated info, allocate a new one */
1524		if (!info) {
1525			info = kmem_cache_zalloc(btrfs_free_space_cachep,
1526						 GFP_NOFS);
1527			if (!info) {
1528				spin_lock(&ctl->tree_lock);
1529				ret = -ENOMEM;
1530				goto out;
1531			}
1532		}
1533
1534		/* allocate the bitmap */
1535		info->bitmap = kzalloc(PAGE_CACHE_SIZE, GFP_NOFS);
1536		spin_lock(&ctl->tree_lock);
1537		if (!info->bitmap) {
1538			ret = -ENOMEM;
1539			goto out;
1540		}
1541		goto again;
1542	}
1543
1544out:
1545	if (info) {
1546		if (info->bitmap)
1547			kfree(info->bitmap);
1548		kmem_cache_free(btrfs_free_space_cachep, info);
1549	}
1550
1551	return ret;
1552}
1553
1554static bool try_merge_free_space(struct btrfs_free_space_ctl *ctl,
1555			  struct btrfs_free_space *info, bool update_stat)
1556{
1557	struct btrfs_free_space *left_info;
1558	struct btrfs_free_space *right_info;
1559	bool merged = false;
1560	u64 offset = info->offset;
1561	u64 bytes = info->bytes;
1562
1563	/*
1564	 * first we want to see if there is free space adjacent to the range we
1565	 * are adding, if there is remove that struct and add a new one to
1566	 * cover the entire range
1567	 */
1568	right_info = tree_search_offset(ctl, offset + bytes, 0, 0);
1569	if (right_info && rb_prev(&right_info->offset_index))
1570		left_info = rb_entry(rb_prev(&right_info->offset_index),
1571				     struct btrfs_free_space, offset_index);
1572	else
1573		left_info = tree_search_offset(ctl, offset - 1, 0, 0);
1574
1575	if (right_info && !right_info->bitmap) {
1576		if (update_stat)
1577			unlink_free_space(ctl, right_info);
1578		else
1579			__unlink_free_space(ctl, right_info);
1580		info->bytes += right_info->bytes;
1581		kmem_cache_free(btrfs_free_space_cachep, right_info);
1582		merged = true;
1583	}
1584
1585	if (left_info && !left_info->bitmap &&
1586	    left_info->offset + left_info->bytes == offset) {
1587		if (update_stat)
1588			unlink_free_space(ctl, left_info);
1589		else
1590			__unlink_free_space(ctl, left_info);
1591		info->offset = left_info->offset;
1592		info->bytes += left_info->bytes;
1593		kmem_cache_free(btrfs_free_space_cachep, left_info);
1594		merged = true;
1595	}
1596
1597	return merged;
1598}
1599
1600int __btrfs_add_free_space(struct btrfs_free_space_ctl *ctl,
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1601			   u64 offset, u64 bytes)
1602{
1603	struct btrfs_free_space *info;
1604	int ret = 0;
1605
1606	info = kmem_cache_zalloc(btrfs_free_space_cachep, GFP_NOFS);
1607	if (!info)
1608		return -ENOMEM;
1609
1610	info->offset = offset;
1611	info->bytes = bytes;
 
1612
1613	spin_lock(&ctl->tree_lock);
1614
1615	if (try_merge_free_space(ctl, info, true))
1616		goto link;
1617
1618	/*
1619	 * There was no extent directly to the left or right of this new
1620	 * extent then we know we're going to have to allocate a new extent, so
1621	 * before we do that see if we need to drop this into a bitmap
1622	 */
1623	ret = insert_into_bitmap(ctl, info);
1624	if (ret < 0) {
1625		goto out;
1626	} else if (ret) {
1627		ret = 0;
1628		goto out;
1629	}
1630link:
 
 
 
 
 
 
 
 
1631	ret = link_free_space(ctl, info);
1632	if (ret)
1633		kmem_cache_free(btrfs_free_space_cachep, info);
1634out:
1635	spin_unlock(&ctl->tree_lock);
1636
1637	if (ret) {
1638		printk(KERN_CRIT "btrfs: unable to add free space :%d\n", ret);
1639		BUG_ON(ret == -EEXIST);
1640	}
1641
1642	return ret;
1643}
1644
1645int btrfs_remove_free_space(struct btrfs_block_group_cache *block_group,
1646			    u64 offset, u64 bytes)
1647{
1648	struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl;
1649	struct btrfs_free_space *info;
1650	struct btrfs_free_space *next_info = NULL;
1651	int ret = 0;
1652
1653	spin_lock(&ctl->tree_lock);
1654
1655again:
 
 
 
 
1656	info = tree_search_offset(ctl, offset, 0, 0);
1657	if (!info) {
1658		/*
1659		 * oops didn't find an extent that matched the space we wanted
1660		 * to remove, look for a bitmap instead
1661		 */
1662		info = tree_search_offset(ctl, offset_to_bitmap(ctl, offset),
1663					  1, 0);
1664		if (!info) {
1665			WARN_ON(1);
1666			goto out_lock;
1667		}
1668	}
1669
1670	if (info->bytes < bytes && rb_next(&info->offset_index)) {
1671		u64 end;
1672		next_info = rb_entry(rb_next(&info->offset_index),
1673					     struct btrfs_free_space,
1674					     offset_index);
1675
1676		if (next_info->bitmap)
1677			end = next_info->offset +
1678			      BITS_PER_BITMAP * ctl->unit - 1;
1679		else
1680			end = next_info->offset + next_info->bytes;
1681
1682		if (next_info->bytes < bytes ||
1683		    next_info->offset > offset || offset > end) {
1684			printk(KERN_CRIT "Found free space at %llu, size %llu,"
1685			      " trying to use %llu\n",
1686			      (unsigned long long)info->offset,
1687			      (unsigned long long)info->bytes,
1688			      (unsigned long long)bytes);
1689			WARN_ON(1);
1690			ret = -EINVAL;
1691			goto out_lock;
1692		}
1693
1694		info = next_info;
1695	}
1696
1697	if (info->bytes == bytes) {
 
1698		unlink_free_space(ctl, info);
1699		if (info->bitmap) {
1700			kfree(info->bitmap);
1701			ctl->total_bitmaps--;
1702		}
1703		kmem_cache_free(btrfs_free_space_cachep, info);
1704		goto out_lock;
1705	}
1706
1707	if (!info->bitmap && info->offset == offset) {
1708		unlink_free_space(ctl, info);
1709		info->offset += bytes;
1710		info->bytes -= bytes;
1711		link_free_space(ctl, info);
1712		goto out_lock;
1713	}
 
1714
1715	if (!info->bitmap && info->offset <= offset &&
1716	    info->offset + info->bytes >= offset + bytes) {
1717		u64 old_start = info->offset;
1718		/*
1719		 * we're freeing space in the middle of the info,
1720		 * this can happen during tree log replay
1721		 *
1722		 * first unlink the old info and then
1723		 * insert it again after the hole we're creating
1724		 */
1725		unlink_free_space(ctl, info);
1726		if (offset + bytes < info->offset + info->bytes) {
1727			u64 old_end = info->offset + info->bytes;
1728
1729			info->offset = offset + bytes;
1730			info->bytes = old_end - info->offset;
1731			ret = link_free_space(ctl, info);
1732			WARN_ON(ret);
1733			if (ret)
1734				goto out_lock;
1735		} else {
1736			/* the hole we're creating ends at the end
1737			 * of the info struct, just free the info
1738			 */
1739			kmem_cache_free(btrfs_free_space_cachep, info);
 
 
 
 
 
 
 
 
 
 
 
1740		}
1741		spin_unlock(&ctl->tree_lock);
1742
1743		/* step two, insert a new info struct to cover
1744		 * anything before the hole
1745		 */
1746		ret = btrfs_add_free_space(block_group, old_start,
1747					   offset - old_start);
1748		WARN_ON(ret);
1749		goto out;
1750	}
1751
1752	ret = remove_from_bitmap(ctl, info, &offset, &bytes);
1753	if (ret == -EAGAIN)
 
1754		goto again;
1755	BUG_ON(ret);
1756out_lock:
1757	spin_unlock(&ctl->tree_lock);
1758out:
1759	return ret;
1760}
1761
1762void btrfs_dump_free_space(struct btrfs_block_group_cache *block_group,
1763			   u64 bytes)
1764{
 
1765	struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl;
1766	struct btrfs_free_space *info;
1767	struct rb_node *n;
1768	int count = 0;
1769
1770	for (n = rb_first(&ctl->free_space_offset); n; n = rb_next(n)) {
1771		info = rb_entry(n, struct btrfs_free_space, offset_index);
1772		if (info->bytes >= bytes)
1773			count++;
1774		printk(KERN_CRIT "entry offset %llu, bytes %llu, bitmap %s\n",
1775		       (unsigned long long)info->offset,
1776		       (unsigned long long)info->bytes,
1777		       (info->bitmap) ? "yes" : "no");
1778	}
1779	printk(KERN_INFO "block group has cluster?: %s\n",
1780	       list_empty(&block_group->cluster_list) ? "no" : "yes");
1781	printk(KERN_INFO "%d blocks of free space at or bigger than bytes is"
1782	       "\n", count);
1783}
1784
1785void btrfs_init_free_space_ctl(struct btrfs_block_group_cache *block_group)
1786{
 
1787	struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl;
1788
1789	spin_lock_init(&ctl->tree_lock);
1790	ctl->unit = block_group->sectorsize;
1791	ctl->start = block_group->key.objectid;
1792	ctl->private = block_group;
1793	ctl->op = &free_space_op;
 
 
1794
1795	/*
1796	 * we only want to have 32k of ram per block group for keeping
1797	 * track of free space, and if we pass 1/2 of that we want to
1798	 * start converting things over to using bitmaps
1799	 */
1800	ctl->extents_thresh = ((1024 * 32) / 2) /
1801				sizeof(struct btrfs_free_space);
1802}
1803
1804/*
1805 * for a given cluster, put all of its extents back into the free
1806 * space cache.  If the block group passed doesn't match the block group
1807 * pointed to by the cluster, someone else raced in and freed the
1808 * cluster already.  In that case, we just return without changing anything
1809 */
1810static int
1811__btrfs_return_cluster_to_free_space(
1812			     struct btrfs_block_group_cache *block_group,
1813			     struct btrfs_free_cluster *cluster)
1814{
1815	struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl;
1816	struct btrfs_free_space *entry;
1817	struct rb_node *node;
1818
1819	spin_lock(&cluster->lock);
1820	if (cluster->block_group != block_group)
1821		goto out;
1822
1823	cluster->block_group = NULL;
1824	cluster->window_start = 0;
1825	list_del_init(&cluster->block_group_list);
1826
1827	node = rb_first(&cluster->root);
1828	while (node) {
1829		bool bitmap;
1830
1831		entry = rb_entry(node, struct btrfs_free_space, offset_index);
1832		node = rb_next(&entry->offset_index);
1833		rb_erase(&entry->offset_index, &cluster->root);
 
1834
1835		bitmap = (entry->bitmap != NULL);
1836		if (!bitmap)
1837			try_merge_free_space(ctl, entry, false);
 
 
1838		tree_insert_offset(&ctl->free_space_offset,
1839				   entry->offset, &entry->offset_index, bitmap);
1840	}
1841	cluster->root = RB_ROOT;
1842
1843out:
1844	spin_unlock(&cluster->lock);
1845	btrfs_put_block_group(block_group);
1846	return 0;
1847}
1848
1849void __btrfs_remove_free_space_cache_locked(struct btrfs_free_space_ctl *ctl)
 
1850{
1851	struct btrfs_free_space *info;
1852	struct rb_node *node;
1853
1854	while ((node = rb_last(&ctl->free_space_offset)) != NULL) {
1855		info = rb_entry(node, struct btrfs_free_space, offset_index);
1856		if (!info->bitmap) {
1857			unlink_free_space(ctl, info);
1858			kmem_cache_free(btrfs_free_space_cachep, info);
1859		} else {
1860			free_bitmap(ctl, info);
1861		}
1862		if (need_resched()) {
1863			spin_unlock(&ctl->tree_lock);
1864			cond_resched();
1865			spin_lock(&ctl->tree_lock);
1866		}
1867	}
1868}
1869
1870void __btrfs_remove_free_space_cache(struct btrfs_free_space_ctl *ctl)
1871{
1872	spin_lock(&ctl->tree_lock);
1873	__btrfs_remove_free_space_cache_locked(ctl);
1874	spin_unlock(&ctl->tree_lock);
1875}
1876
1877void btrfs_remove_free_space_cache(struct btrfs_block_group_cache *block_group)
1878{
1879	struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl;
1880	struct btrfs_free_cluster *cluster;
1881	struct list_head *head;
1882
1883	spin_lock(&ctl->tree_lock);
1884	while ((head = block_group->cluster_list.next) !=
1885	       &block_group->cluster_list) {
1886		cluster = list_entry(head, struct btrfs_free_cluster,
1887				     block_group_list);
1888
1889		WARN_ON(cluster->block_group != block_group);
1890		__btrfs_return_cluster_to_free_space(block_group, cluster);
1891		if (need_resched()) {
1892			spin_unlock(&ctl->tree_lock);
1893			cond_resched();
1894			spin_lock(&ctl->tree_lock);
1895		}
1896	}
1897	__btrfs_remove_free_space_cache_locked(ctl);
1898	spin_unlock(&ctl->tree_lock);
1899
1900}
1901
1902u64 btrfs_find_space_for_alloc(struct btrfs_block_group_cache *block_group,
1903			       u64 offset, u64 bytes, u64 empty_size)
 
1904{
1905	struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl;
1906	struct btrfs_free_space *entry = NULL;
1907	u64 bytes_search = bytes + empty_size;
1908	u64 ret = 0;
 
 
1909
1910	spin_lock(&ctl->tree_lock);
1911	entry = find_free_space(ctl, &offset, &bytes_search);
 
1912	if (!entry)
1913		goto out;
1914
1915	ret = offset;
1916	if (entry->bitmap) {
1917		bitmap_clear_bits(ctl, entry, offset, bytes);
1918		if (!entry->bytes)
1919			free_bitmap(ctl, entry);
1920	} else {
1921		unlink_free_space(ctl, entry);
1922		entry->offset += bytes;
1923		entry->bytes -= bytes;
 
 
 
 
 
1924		if (!entry->bytes)
1925			kmem_cache_free(btrfs_free_space_cachep, entry);
1926		else
1927			link_free_space(ctl, entry);
1928	}
1929
1930out:
1931	spin_unlock(&ctl->tree_lock);
1932
 
 
 
1933	return ret;
1934}
1935
1936/*
1937 * given a cluster, put all of its extents back into the free space
1938 * cache.  If a block group is passed, this function will only free
1939 * a cluster that belongs to the passed block group.
1940 *
1941 * Otherwise, it'll get a reference on the block group pointed to by the
1942 * cluster and remove the cluster from it.
1943 */
1944int btrfs_return_cluster_to_free_space(
1945			       struct btrfs_block_group_cache *block_group,
1946			       struct btrfs_free_cluster *cluster)
1947{
1948	struct btrfs_free_space_ctl *ctl;
1949	int ret;
1950
1951	/* first, get a safe pointer to the block group */
1952	spin_lock(&cluster->lock);
1953	if (!block_group) {
1954		block_group = cluster->block_group;
1955		if (!block_group) {
1956			spin_unlock(&cluster->lock);
1957			return 0;
1958		}
1959	} else if (cluster->block_group != block_group) {
1960		/* someone else has already freed it don't redo their work */
1961		spin_unlock(&cluster->lock);
1962		return 0;
1963	}
1964	atomic_inc(&block_group->count);
1965	spin_unlock(&cluster->lock);
1966
1967	ctl = block_group->free_space_ctl;
1968
1969	/* now return any extents the cluster had on it */
1970	spin_lock(&ctl->tree_lock);
1971	ret = __btrfs_return_cluster_to_free_space(block_group, cluster);
1972	spin_unlock(&ctl->tree_lock);
1973
1974	/* finally drop our ref */
1975	btrfs_put_block_group(block_group);
1976	return ret;
1977}
1978
1979static u64 btrfs_alloc_from_bitmap(struct btrfs_block_group_cache *block_group,
1980				   struct btrfs_free_cluster *cluster,
1981				   struct btrfs_free_space *entry,
1982				   u64 bytes, u64 min_start)
 
1983{
1984	struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl;
1985	int err;
1986	u64 search_start = cluster->window_start;
1987	u64 search_bytes = bytes;
1988	u64 ret = 0;
1989
1990	search_start = min_start;
1991	search_bytes = bytes;
1992
1993	err = search_bitmap(ctl, entry, &search_start, &search_bytes);
1994	if (err)
 
 
1995		return 0;
 
1996
1997	ret = search_start;
1998	__bitmap_clear_bits(ctl, entry, ret, bytes);
1999
2000	return ret;
2001}
2002
2003/*
2004 * given a cluster, try to allocate 'bytes' from it, returns 0
2005 * if it couldn't find anything suitably large, or a logical disk offset
2006 * if things worked out
2007 */
2008u64 btrfs_alloc_from_cluster(struct btrfs_block_group_cache *block_group,
2009			     struct btrfs_free_cluster *cluster, u64 bytes,
2010			     u64 min_start)
2011{
2012	struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl;
2013	struct btrfs_free_space *entry = NULL;
2014	struct rb_node *node;
2015	u64 ret = 0;
2016
2017	spin_lock(&cluster->lock);
2018	if (bytes > cluster->max_size)
2019		goto out;
2020
2021	if (cluster->block_group != block_group)
2022		goto out;
2023
2024	node = rb_first(&cluster->root);
2025	if (!node)
2026		goto out;
2027
2028	entry = rb_entry(node, struct btrfs_free_space, offset_index);
2029	while(1) {
 
 
 
2030		if (entry->bytes < bytes ||
2031		    (!entry->bitmap && entry->offset < min_start)) {
2032			node = rb_next(&entry->offset_index);
2033			if (!node)
2034				break;
2035			entry = rb_entry(node, struct btrfs_free_space,
2036					 offset_index);
2037			continue;
2038		}
2039
2040		if (entry->bitmap) {
2041			ret = btrfs_alloc_from_bitmap(block_group,
2042						      cluster, entry, bytes,
2043						      min_start);
 
2044			if (ret == 0) {
2045				node = rb_next(&entry->offset_index);
2046				if (!node)
2047					break;
2048				entry = rb_entry(node, struct btrfs_free_space,
2049						 offset_index);
2050				continue;
2051			}
 
2052		} else {
2053			ret = entry->offset;
2054
2055			entry->offset += bytes;
2056			entry->bytes -= bytes;
2057		}
2058
2059		if (entry->bytes == 0)
2060			rb_erase(&entry->offset_index, &cluster->root);
2061		break;
2062	}
2063out:
2064	spin_unlock(&cluster->lock);
2065
2066	if (!ret)
2067		return 0;
2068
2069	spin_lock(&ctl->tree_lock);
2070
2071	ctl->free_space -= bytes;
2072	if (entry->bytes == 0) {
2073		ctl->free_extents--;
2074		if (entry->bitmap) {
2075			kfree(entry->bitmap);
2076			ctl->total_bitmaps--;
2077			ctl->op->recalc_thresholds(ctl);
2078		}
2079		kmem_cache_free(btrfs_free_space_cachep, entry);
2080	}
2081
2082	spin_unlock(&ctl->tree_lock);
2083
2084	return ret;
2085}
2086
2087static int btrfs_bitmap_cluster(struct btrfs_block_group_cache *block_group,
2088				struct btrfs_free_space *entry,
2089				struct btrfs_free_cluster *cluster,
2090				u64 offset, u64 bytes, u64 min_bytes)
 
2091{
2092	struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl;
2093	unsigned long next_zero;
2094	unsigned long i;
2095	unsigned long search_bits;
2096	unsigned long total_bits;
2097	unsigned long found_bits;
 
2098	unsigned long start = 0;
2099	unsigned long total_found = 0;
2100	int ret;
2101	bool found = false;
2102
2103	i = offset_to_bit(entry->offset, block_group->sectorsize,
2104			  max_t(u64, offset, entry->offset));
2105	search_bits = bytes_to_bits(bytes, block_group->sectorsize);
2106	total_bits = bytes_to_bits(min_bytes, block_group->sectorsize);
2107
 
 
 
 
 
 
 
2108again:
2109	found_bits = 0;
2110	for (i = find_next_bit(entry->bitmap, BITS_PER_BITMAP, i);
2111	     i < BITS_PER_BITMAP;
2112	     i = find_next_bit(entry->bitmap, BITS_PER_BITMAP, i + 1)) {
2113		next_zero = find_next_zero_bit(entry->bitmap,
2114					       BITS_PER_BITMAP, i);
2115		if (next_zero - i >= search_bits) {
2116			found_bits = next_zero - i;
 
 
2117			break;
2118		}
 
 
2119		i = next_zero;
2120	}
2121
2122	if (!found_bits)
 
2123		return -ENOSPC;
 
2124
2125	if (!found) {
2126		start = i;
2127		found = true;
2128	}
2129
2130	total_found += found_bits;
2131
2132	if (cluster->max_size < found_bits * block_group->sectorsize)
2133		cluster->max_size = found_bits * block_group->sectorsize;
2134
2135	if (total_found < total_bits) {
2136		i = find_next_bit(entry->bitmap, BITS_PER_BITMAP, next_zero);
2137		if (i - start > total_bits * 2) {
2138			total_found = 0;
2139			cluster->max_size = 0;
2140			found = false;
2141		}
2142		goto again;
2143	}
2144
2145	cluster->window_start = start * block_group->sectorsize +
2146		entry->offset;
2147	rb_erase(&entry->offset_index, &ctl->free_space_offset);
2148	ret = tree_insert_offset(&cluster->root, entry->offset,
2149				 &entry->offset_index, 1);
2150	BUG_ON(ret);
2151
 
 
2152	return 0;
2153}
2154
2155/*
2156 * This searches the block group for just extents to fill the cluster with.
 
 
2157 */
2158static noinline int
2159setup_cluster_no_bitmap(struct btrfs_block_group_cache *block_group,
2160			struct btrfs_free_cluster *cluster,
2161			struct list_head *bitmaps, u64 offset, u64 bytes,
2162			u64 min_bytes)
2163{
2164	struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl;
2165	struct btrfs_free_space *first = NULL;
2166	struct btrfs_free_space *entry = NULL;
2167	struct btrfs_free_space *prev = NULL;
2168	struct btrfs_free_space *last;
2169	struct rb_node *node;
2170	u64 window_start;
2171	u64 window_free;
2172	u64 max_extent;
2173	u64 max_gap = 128 * 1024;
2174
2175	entry = tree_search_offset(ctl, offset, 0, 1);
2176	if (!entry)
2177		return -ENOSPC;
2178
2179	/*
2180	 * We don't want bitmaps, so just move along until we find a normal
2181	 * extent entry.
2182	 */
2183	while (entry->bitmap) {
2184		if (list_empty(&entry->list))
2185			list_add_tail(&entry->list, bitmaps);
2186		node = rb_next(&entry->offset_index);
2187		if (!node)
2188			return -ENOSPC;
2189		entry = rb_entry(node, struct btrfs_free_space, offset_index);
2190	}
2191
2192	window_start = entry->offset;
2193	window_free = entry->bytes;
2194	max_extent = entry->bytes;
2195	first = entry;
2196	last = entry;
2197	prev = entry;
2198
2199	while (window_free <= min_bytes) {
2200		node = rb_next(&entry->offset_index);
2201		if (!node)
2202			return -ENOSPC;
2203		entry = rb_entry(node, struct btrfs_free_space, offset_index);
2204
2205		if (entry->bitmap) {
2206			if (list_empty(&entry->list))
2207				list_add_tail(&entry->list, bitmaps);
2208			continue;
2209		}
2210
2211		/*
2212		 * we haven't filled the empty size and the window is
2213		 * very large.  reset and try again
2214		 */
2215		if (entry->offset - (prev->offset + prev->bytes) > max_gap ||
2216		    entry->offset - window_start > (min_bytes * 2)) {
2217			first = entry;
2218			window_start = entry->offset;
2219			window_free = entry->bytes;
2220			last = entry;
2221			max_extent = entry->bytes;
2222		} else {
2223			last = entry;
2224			window_free += entry->bytes;
2225			if (entry->bytes > max_extent)
2226				max_extent = entry->bytes;
2227		}
2228		prev = entry;
2229	}
2230
 
 
 
2231	cluster->window_start = first->offset;
2232
2233	node = &first->offset_index;
2234
2235	/*
2236	 * now we've found our entries, pull them out of the free space
2237	 * cache and put them into the cluster rbtree
2238	 */
2239	do {
2240		int ret;
2241
2242		entry = rb_entry(node, struct btrfs_free_space, offset_index);
2243		node = rb_next(&entry->offset_index);
2244		if (entry->bitmap)
2245			continue;
2246
2247		rb_erase(&entry->offset_index, &ctl->free_space_offset);
2248		ret = tree_insert_offset(&cluster->root, entry->offset,
2249					 &entry->offset_index, 0);
2250		BUG_ON(ret);
 
2251	} while (node && entry != last);
2252
2253	cluster->max_size = max_extent;
2254
2255	return 0;
2256}
2257
2258/*
2259 * This specifically looks for bitmaps that may work in the cluster, we assume
2260 * that we have already failed to find extents that will work.
2261 */
2262static noinline int
2263setup_cluster_bitmap(struct btrfs_block_group_cache *block_group,
2264		     struct btrfs_free_cluster *cluster,
2265		     struct list_head *bitmaps, u64 offset, u64 bytes,
2266		     u64 min_bytes)
2267{
2268	struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl;
2269	struct btrfs_free_space *entry;
2270	struct rb_node *node;
2271	int ret = -ENOSPC;
 
2272
2273	if (ctl->total_bitmaps == 0)
2274		return -ENOSPC;
2275
2276	/*
2277	 * First check our cached list of bitmaps and see if there is an entry
2278	 * here that will work.
2279	 */
 
 
 
 
 
 
 
 
 
2280	list_for_each_entry(entry, bitmaps, list) {
2281		if (entry->bytes < min_bytes)
2282			continue;
2283		ret = btrfs_bitmap_cluster(block_group, entry, cluster, offset,
2284					   bytes, min_bytes);
2285		if (!ret)
2286			return 0;
2287	}
2288
2289	/*
2290	 * If we do have entries on our list and we are here then we didn't find
2291	 * anything, so go ahead and get the next entry after the last entry in
2292	 * this list and start the search from there.
2293	 */
2294	if (!list_empty(bitmaps)) {
2295		entry = list_entry(bitmaps->prev, struct btrfs_free_space,
2296				   list);
2297		node = rb_next(&entry->offset_index);
2298		if (!node)
2299			return -ENOSPC;
2300		entry = rb_entry(node, struct btrfs_free_space, offset_index);
2301		goto search;
2302	}
2303
2304	entry = tree_search_offset(ctl, offset_to_bitmap(ctl, offset), 0, 1);
2305	if (!entry)
2306		return -ENOSPC;
2307
2308search:
2309	node = &entry->offset_index;
2310	do {
2311		entry = rb_entry(node, struct btrfs_free_space, offset_index);
2312		node = rb_next(&entry->offset_index);
2313		if (!entry->bitmap)
2314			continue;
2315		if (entry->bytes < min_bytes)
2316			continue;
2317		ret = btrfs_bitmap_cluster(block_group, entry, cluster, offset,
2318					   bytes, min_bytes);
2319	} while (ret && node);
2320
2321	return ret;
2322}
2323
2324/*
2325 * here we try to find a cluster of blocks in a block group.  The goal
2326 * is to find at least bytes free and up to empty_size + bytes free.
2327 * We might not find them all in one contiguous area.
2328 *
2329 * returns zero and sets up cluster if things worked out, otherwise
2330 * it returns -enospc
2331 */
2332int btrfs_find_space_cluster(struct btrfs_trans_handle *trans,
2333			     struct btrfs_root *root,
2334			     struct btrfs_block_group_cache *block_group,
2335			     struct btrfs_free_cluster *cluster,
2336			     u64 offset, u64 bytes, u64 empty_size)
2337{
2338	struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl;
2339	struct list_head bitmaps;
2340	struct btrfs_free_space *entry, *tmp;
 
2341	u64 min_bytes;
 
2342	int ret;
2343
2344	/* for metadata, allow allocates with more holes */
2345	if (btrfs_test_opt(root, SSD_SPREAD)) {
2346		min_bytes = bytes + empty_size;
 
 
 
 
 
2347	} else if (block_group->flags & BTRFS_BLOCK_GROUP_METADATA) {
2348		/*
2349		 * we want to do larger allocations when we are
2350		 * flushing out the delayed refs, it helps prevent
2351		 * making more work as we go along.
2352		 */
2353		if (trans->transaction->delayed_refs.flushing)
2354			min_bytes = max(bytes, (bytes + empty_size) >> 1);
2355		else
2356			min_bytes = max(bytes, (bytes + empty_size) >> 4);
2357	} else
2358		min_bytes = max(bytes, (bytes + empty_size) >> 2);
2359
2360	spin_lock(&ctl->tree_lock);
2361
2362	/*
2363	 * If we know we don't have enough space to make a cluster don't even
2364	 * bother doing all the work to try and find one.
2365	 */
2366	if (ctl->free_space < min_bytes) {
2367		spin_unlock(&ctl->tree_lock);
2368		return -ENOSPC;
2369	}
2370
2371	spin_lock(&cluster->lock);
2372
2373	/* someone already found a cluster, hooray */
2374	if (cluster->block_group) {
2375		ret = 0;
2376		goto out;
2377	}
2378
2379	INIT_LIST_HEAD(&bitmaps);
 
 
2380	ret = setup_cluster_no_bitmap(block_group, cluster, &bitmaps, offset,
2381				      bytes, min_bytes);
 
2382	if (ret)
2383		ret = setup_cluster_bitmap(block_group, cluster, &bitmaps,
2384					   offset, bytes, min_bytes);
 
2385
2386	/* Clear our temporary list */
2387	list_for_each_entry_safe(entry, tmp, &bitmaps, list)
2388		list_del_init(&entry->list);
2389
2390	if (!ret) {
2391		atomic_inc(&block_group->count);
2392		list_add_tail(&cluster->block_group_list,
2393			      &block_group->cluster_list);
2394		cluster->block_group = block_group;
 
 
2395	}
2396out:
2397	spin_unlock(&cluster->lock);
2398	spin_unlock(&ctl->tree_lock);
2399
2400	return ret;
2401}
2402
2403/*
2404 * simple code to zero out a cluster
2405 */
2406void btrfs_init_free_cluster(struct btrfs_free_cluster *cluster)
2407{
2408	spin_lock_init(&cluster->lock);
2409	spin_lock_init(&cluster->refill_lock);
2410	cluster->root = RB_ROOT;
2411	cluster->max_size = 0;
 
2412	INIT_LIST_HEAD(&cluster->block_group_list);
2413	cluster->block_group = NULL;
2414}
2415
2416int btrfs_trim_block_group(struct btrfs_block_group_cache *block_group,
2417			   u64 *trimmed, u64 start, u64 end, u64 minlen)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2418{
2419	struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl;
2420	struct btrfs_free_space *entry = NULL;
2421	struct btrfs_fs_info *fs_info = block_group->fs_info;
2422	u64 bytes = 0;
2423	u64 actually_trimmed;
2424	int ret = 0;
 
 
 
2425
2426	*trimmed = 0;
 
2427
2428	while (start < end) {
2429		spin_lock(&ctl->tree_lock);
2430
2431		if (ctl->free_space < minlen) {
2432			spin_unlock(&ctl->tree_lock);
 
2433			break;
2434		}
2435
2436		entry = tree_search_offset(ctl, start, 0, 1);
2437		if (!entry)
2438			entry = tree_search_offset(ctl,
2439						   offset_to_bitmap(ctl, start),
2440						   1, 1);
2441
2442		if (!entry || entry->offset >= end) {
2443			spin_unlock(&ctl->tree_lock);
 
2444			break;
2445		}
2446
2447		if (entry->bitmap) {
2448			ret = search_bitmap(ctl, entry, &start, &bytes);
2449			if (!ret) {
2450				if (start >= end) {
2451					spin_unlock(&ctl->tree_lock);
2452					break;
2453				}
2454				bytes = min(bytes, end - start);
2455				bitmap_clear_bits(ctl, entry, start, bytes);
2456				if (entry->bytes == 0)
2457					free_bitmap(ctl, entry);
2458			} else {
2459				start = entry->offset + BITS_PER_BITMAP *
2460					block_group->sectorsize;
2461				spin_unlock(&ctl->tree_lock);
2462				ret = 0;
2463				continue;
2464			}
2465		} else {
2466			start = entry->offset;
2467			bytes = min(entry->bytes, end - start);
2468			unlink_free_space(ctl, entry);
2469			kmem_cache_free(btrfs_free_space_cachep, entry);
 
 
 
 
 
 
 
 
 
 
 
 
 
2470		}
2471
 
 
 
2472		spin_unlock(&ctl->tree_lock);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2473
2474		if (bytes >= minlen) {
2475			int update_ret;
2476			update_ret = btrfs_update_reserved_bytes(block_group,
2477								 bytes, 1, 1);
2478
2479			ret = btrfs_error_discard_extent(fs_info->extent_root,
2480							 start,
2481							 bytes,
2482							 &actually_trimmed);
2483
2484			btrfs_add_free_space(block_group, start, bytes);
2485			if (!update_ret)
2486				btrfs_update_reserved_bytes(block_group,
2487							    bytes, 0, 1);
2488
2489			if (ret)
2490				break;
2491			*trimmed += actually_trimmed;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2492		}
2493		start += bytes;
2494		bytes = 0;
2495
2496		if (fatal_signal_pending(current)) {
2497			ret = -ERESTARTSYS;
2498			break;
2499		}
2500
2501		cond_resched();
2502	}
2503
2504	return ret;
2505}
2506
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2507/*
2508 * Find the left-most item in the cache tree, and then return the
2509 * smallest inode number in the item.
2510 *
2511 * Note: the returned inode number may not be the smallest one in
2512 * the tree, if the left-most item is a bitmap.
2513 */
2514u64 btrfs_find_ino_for_alloc(struct btrfs_root *fs_root)
2515{
2516	struct btrfs_free_space_ctl *ctl = fs_root->free_ino_ctl;
2517	struct btrfs_free_space *entry = NULL;
2518	u64 ino = 0;
2519
2520	spin_lock(&ctl->tree_lock);
2521
2522	if (RB_EMPTY_ROOT(&ctl->free_space_offset))
2523		goto out;
2524
2525	entry = rb_entry(rb_first(&ctl->free_space_offset),
2526			 struct btrfs_free_space, offset_index);
2527
2528	if (!entry->bitmap) {
2529		ino = entry->offset;
2530
2531		unlink_free_space(ctl, entry);
2532		entry->offset++;
2533		entry->bytes--;
2534		if (!entry->bytes)
2535			kmem_cache_free(btrfs_free_space_cachep, entry);
2536		else
2537			link_free_space(ctl, entry);
2538	} else {
2539		u64 offset = 0;
2540		u64 count = 1;
2541		int ret;
2542
2543		ret = search_bitmap(ctl, entry, &offset, &count);
2544		BUG_ON(ret);
 
2545
2546		ino = offset;
2547		bitmap_clear_bits(ctl, entry, offset, 1);
2548		if (entry->bytes == 0)
2549			free_bitmap(ctl, entry);
2550	}
2551out:
2552	spin_unlock(&ctl->tree_lock);
2553
2554	return ino;
2555}
2556
2557struct inode *lookup_free_ino_inode(struct btrfs_root *root,
2558				    struct btrfs_path *path)
2559{
2560	struct inode *inode = NULL;
2561
2562	spin_lock(&root->cache_lock);
2563	if (root->cache_inode)
2564		inode = igrab(root->cache_inode);
2565	spin_unlock(&root->cache_lock);
2566	if (inode)
2567		return inode;
2568
2569	inode = __lookup_free_space_inode(root, path, 0);
2570	if (IS_ERR(inode))
2571		return inode;
2572
2573	spin_lock(&root->cache_lock);
2574	if (!btrfs_fs_closing(root->fs_info))
2575		root->cache_inode = igrab(inode);
2576	spin_unlock(&root->cache_lock);
2577
2578	return inode;
2579}
2580
2581int create_free_ino_inode(struct btrfs_root *root,
2582			  struct btrfs_trans_handle *trans,
2583			  struct btrfs_path *path)
2584{
2585	return __create_free_space_inode(root, trans, path,
2586					 BTRFS_FREE_INO_OBJECTID, 0);
2587}
2588
2589int load_free_ino_cache(struct btrfs_fs_info *fs_info, struct btrfs_root *root)
2590{
2591	struct btrfs_free_space_ctl *ctl = root->free_ino_ctl;
2592	struct btrfs_path *path;
2593	struct inode *inode;
2594	int ret = 0;
2595	u64 root_gen = btrfs_root_generation(&root->root_item);
2596
2597	if (!btrfs_test_opt(root, INODE_MAP_CACHE))
2598		return 0;
2599
2600	/*
2601	 * If we're unmounting then just return, since this does a search on the
2602	 * normal root and not the commit root and we could deadlock.
2603	 */
2604	if (btrfs_fs_closing(fs_info))
2605		return 0;
2606
2607	path = btrfs_alloc_path();
2608	if (!path)
2609		return 0;
2610
2611	inode = lookup_free_ino_inode(root, path);
2612	if (IS_ERR(inode))
2613		goto out;
2614
2615	if (root_gen != BTRFS_I(inode)->generation)
2616		goto out_put;
2617
2618	ret = __load_free_space_cache(root, inode, ctl, path, 0);
2619
2620	if (ret < 0)
2621		printk(KERN_ERR "btrfs: failed to load free ino cache for "
2622		       "root %llu\n", root->root_key.objectid);
 
2623out_put:
2624	iput(inode);
2625out:
2626	btrfs_free_path(path);
2627	return ret;
2628}
2629
2630int btrfs_write_out_ino_cache(struct btrfs_root *root,
2631			      struct btrfs_trans_handle *trans,
2632			      struct btrfs_path *path)
 
2633{
 
2634	struct btrfs_free_space_ctl *ctl = root->free_ino_ctl;
2635	struct inode *inode;
2636	int ret;
 
 
2637
2638	if (!btrfs_test_opt(root, INODE_MAP_CACHE))
2639		return 0;
2640
2641	inode = lookup_free_ino_inode(root, path);
2642	if (IS_ERR(inode))
2643		return 0;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2644
2645	ret = __btrfs_write_out_cache(root, inode, ctl, NULL, trans, path, 0);
2646	if (ret < 0)
2647		printk(KERN_ERR "btrfs: failed to write free ino cache "
2648		       "for root %llu\n", root->root_key.objectid);
2649
2650	iput(inode);
 
 
 
2651	return ret;
2652}