Linux Audio

Check our new training course

Open-source upstreaming

Need help get the support for your hardware in upstream Linux?
Loading...
v5.4
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 * Copyright (C) 2007,2008 Oracle.  All rights reserved.
   4 */
   5
   6#include <linux/sched.h>
   7#include <linux/slab.h>
   8#include <linux/rbtree.h>
   9#include <linux/mm.h>
  10#include "ctree.h"
  11#include "disk-io.h"
  12#include "transaction.h"
  13#include "print-tree.h"
  14#include "locking.h"
  15#include "volumes.h"
  16#include "qgroup.h"
 
  17
  18static int split_node(struct btrfs_trans_handle *trans, struct btrfs_root
  19		      *root, struct btrfs_path *path, int level);
  20static int split_leaf(struct btrfs_trans_handle *trans, struct btrfs_root *root,
  21		      const struct btrfs_key *ins_key, struct btrfs_path *path,
  22		      int data_size, int extend);
  23static int push_node_left(struct btrfs_trans_handle *trans,
  24			  struct extent_buffer *dst,
  25			  struct extent_buffer *src, int empty);
  26static int balance_node_right(struct btrfs_trans_handle *trans,
  27			      struct extent_buffer *dst_buf,
  28			      struct extent_buffer *src_buf);
  29static void del_ptr(struct btrfs_root *root, struct btrfs_path *path,
  30		    int level, int slot);
  31
  32static const struct btrfs_csums {
  33	u16		size;
  34	const char	*name;
 
  35} btrfs_csums[] = {
  36	[BTRFS_CSUM_TYPE_CRC32] = { .size = 4, .name = "crc32c" },
 
 
 
 
  37};
  38
  39int btrfs_super_csum_size(const struct btrfs_super_block *s)
  40{
  41	u16 t = btrfs_super_csum_type(s);
  42	/*
  43	 * csum type is validated at mount time
  44	 */
  45	return btrfs_csums[t].size;
  46}
  47
  48const char *btrfs_super_csum_name(u16 csum_type)
  49{
  50	/* csum type is validated at mount time */
  51	return btrfs_csums[csum_type].name;
  52}
  53
  54struct btrfs_path *btrfs_alloc_path(void)
 
 
 
 
  55{
  56	return kmem_cache_zalloc(btrfs_path_cachep, GFP_NOFS);
 
 
 
  57}
  58
  59/*
  60 * set all locked nodes in the path to blocking locks.  This should
  61 * be done before scheduling
  62 */
  63noinline void btrfs_set_path_blocking(struct btrfs_path *p)
  64{
  65	int i;
  66	for (i = 0; i < BTRFS_MAX_LEVEL; i++) {
  67		if (!p->nodes[i] || !p->locks[i])
  68			continue;
  69		/*
  70		 * If we currently have a spinning reader or writer lock this
  71		 * will bump the count of blocking holders and drop the
  72		 * spinlock.
  73		 */
  74		if (p->locks[i] == BTRFS_READ_LOCK) {
  75			btrfs_set_lock_blocking_read(p->nodes[i]);
  76			p->locks[i] = BTRFS_READ_LOCK_BLOCKING;
  77		} else if (p->locks[i] == BTRFS_WRITE_LOCK) {
  78			btrfs_set_lock_blocking_write(p->nodes[i]);
  79			p->locks[i] = BTRFS_WRITE_LOCK_BLOCKING;
  80		}
  81	}
  82}
  83
  84/* this also releases the path */
  85void btrfs_free_path(struct btrfs_path *p)
  86{
  87	if (!p)
  88		return;
  89	btrfs_release_path(p);
  90	kmem_cache_free(btrfs_path_cachep, p);
  91}
  92
  93/*
  94 * path release drops references on the extent buffers in the path
  95 * and it drops any locks held by this path
  96 *
  97 * It is safe to call this on paths that no locks or extent buffers held.
  98 */
  99noinline void btrfs_release_path(struct btrfs_path *p)
 100{
 101	int i;
 102
 103	for (i = 0; i < BTRFS_MAX_LEVEL; i++) {
 104		p->slots[i] = 0;
 105		if (!p->nodes[i])
 106			continue;
 107		if (p->locks[i]) {
 108			btrfs_tree_unlock_rw(p->nodes[i], p->locks[i]);
 109			p->locks[i] = 0;
 110		}
 111		free_extent_buffer(p->nodes[i]);
 112		p->nodes[i] = NULL;
 113	}
 114}
 115
 116/*
 117 * safely gets a reference on the root node of a tree.  A lock
 118 * is not taken, so a concurrent writer may put a different node
 119 * at the root of the tree.  See btrfs_lock_root_node for the
 120 * looping required.
 121 *
 122 * The extent buffer returned by this has a reference taken, so
 123 * it won't disappear.  It may stop being the root of the tree
 124 * at any time because there are no locks held.
 125 */
 126struct extent_buffer *btrfs_root_node(struct btrfs_root *root)
 127{
 128	struct extent_buffer *eb;
 129
 130	while (1) {
 131		rcu_read_lock();
 132		eb = rcu_dereference(root->node);
 133
 134		/*
 135		 * RCU really hurts here, we could free up the root node because
 136		 * it was COWed but we may not get the new root node yet so do
 137		 * the inc_not_zero dance and if it doesn't work then
 138		 * synchronize_rcu and try again.
 139		 */
 140		if (atomic_inc_not_zero(&eb->refs)) {
 141			rcu_read_unlock();
 142			break;
 143		}
 144		rcu_read_unlock();
 145		synchronize_rcu();
 146	}
 147	return eb;
 148}
 149
 150/* loop around taking references on and locking the root node of the
 151 * tree until you end up with a lock on the root.  A locked buffer
 152 * is returned, with a reference held.
 153 */
 154struct extent_buffer *btrfs_lock_root_node(struct btrfs_root *root)
 155{
 156	struct extent_buffer *eb;
 157
 158	while (1) {
 159		eb = btrfs_root_node(root);
 160		btrfs_tree_lock(eb);
 161		if (eb == root->node)
 162			break;
 163		btrfs_tree_unlock(eb);
 164		free_extent_buffer(eb);
 165	}
 166	return eb;
 167}
 168
 169/* loop around taking references on and locking the root node of the
 170 * tree until you end up with a lock on the root.  A locked buffer
 171 * is returned, with a reference held.
 172 */
 173struct extent_buffer *btrfs_read_lock_root_node(struct btrfs_root *root)
 174{
 175	struct extent_buffer *eb;
 176
 177	while (1) {
 178		eb = btrfs_root_node(root);
 179		btrfs_tree_read_lock(eb);
 180		if (eb == root->node)
 181			break;
 182		btrfs_tree_read_unlock(eb);
 183		free_extent_buffer(eb);
 184	}
 185	return eb;
 186}
 187
 188/* cowonly root (everything not a reference counted cow subvolume), just get
 189 * put onto a simple dirty list.  transaction.c walks this to make sure they
 190 * get properly updated on disk.
 191 */
 192static void add_root_to_dirty_list(struct btrfs_root *root)
 193{
 194	struct btrfs_fs_info *fs_info = root->fs_info;
 195
 196	if (test_bit(BTRFS_ROOT_DIRTY, &root->state) ||
 197	    !test_bit(BTRFS_ROOT_TRACK_DIRTY, &root->state))
 198		return;
 199
 200	spin_lock(&fs_info->trans_lock);
 201	if (!test_and_set_bit(BTRFS_ROOT_DIRTY, &root->state)) {
 202		/* Want the extent tree to be the last on the list */
 203		if (root->root_key.objectid == BTRFS_EXTENT_TREE_OBJECTID)
 204			list_move_tail(&root->dirty_list,
 205				       &fs_info->dirty_cowonly_roots);
 206		else
 207			list_move(&root->dirty_list,
 208				  &fs_info->dirty_cowonly_roots);
 209	}
 210	spin_unlock(&fs_info->trans_lock);
 211}
 212
 213/*
 214 * used by snapshot creation to make a copy of a root for a tree with
 215 * a given objectid.  The buffer with the new root node is returned in
 216 * cow_ret, and this func returns zero on success or a negative error code.
 217 */
 218int btrfs_copy_root(struct btrfs_trans_handle *trans,
 219		      struct btrfs_root *root,
 220		      struct extent_buffer *buf,
 221		      struct extent_buffer **cow_ret, u64 new_root_objectid)
 222{
 223	struct btrfs_fs_info *fs_info = root->fs_info;
 224	struct extent_buffer *cow;
 225	int ret = 0;
 226	int level;
 227	struct btrfs_disk_key disk_key;
 228
 229	WARN_ON(test_bit(BTRFS_ROOT_REF_COWS, &root->state) &&
 230		trans->transid != fs_info->running_transaction->transid);
 231	WARN_ON(test_bit(BTRFS_ROOT_REF_COWS, &root->state) &&
 232		trans->transid != root->last_trans);
 233
 234	level = btrfs_header_level(buf);
 235	if (level == 0)
 236		btrfs_item_key(buf, &disk_key, 0);
 237	else
 238		btrfs_node_key(buf, &disk_key, 0);
 239
 240	cow = btrfs_alloc_tree_block(trans, root, 0, new_root_objectid,
 241			&disk_key, level, buf->start, 0);
 
 242	if (IS_ERR(cow))
 243		return PTR_ERR(cow);
 244
 245	copy_extent_buffer_full(cow, buf);
 246	btrfs_set_header_bytenr(cow, cow->start);
 247	btrfs_set_header_generation(cow, trans->transid);
 248	btrfs_set_header_backref_rev(cow, BTRFS_MIXED_BACKREF_REV);
 249	btrfs_clear_header_flag(cow, BTRFS_HEADER_FLAG_WRITTEN |
 250				     BTRFS_HEADER_FLAG_RELOC);
 251	if (new_root_objectid == BTRFS_TREE_RELOC_OBJECTID)
 252		btrfs_set_header_flag(cow, BTRFS_HEADER_FLAG_RELOC);
 253	else
 254		btrfs_set_header_owner(cow, new_root_objectid);
 255
 256	write_extent_buffer_fsid(cow, fs_info->fs_devices->metadata_uuid);
 257
 258	WARN_ON(btrfs_header_generation(buf) > trans->transid);
 259	if (new_root_objectid == BTRFS_TREE_RELOC_OBJECTID)
 260		ret = btrfs_inc_ref(trans, root, cow, 1);
 261	else
 262		ret = btrfs_inc_ref(trans, root, cow, 0);
 263
 264	if (ret)
 
 
 265		return ret;
 
 266
 267	btrfs_mark_buffer_dirty(cow);
 268	*cow_ret = cow;
 269	return 0;
 270}
 271
 272enum mod_log_op {
 273	MOD_LOG_KEY_REPLACE,
 274	MOD_LOG_KEY_ADD,
 275	MOD_LOG_KEY_REMOVE,
 276	MOD_LOG_KEY_REMOVE_WHILE_FREEING,
 277	MOD_LOG_KEY_REMOVE_WHILE_MOVING,
 278	MOD_LOG_MOVE_KEYS,
 279	MOD_LOG_ROOT_REPLACE,
 280};
 281
 282struct tree_mod_root {
 283	u64 logical;
 284	u8 level;
 285};
 286
 287struct tree_mod_elem {
 288	struct rb_node node;
 289	u64 logical;
 290	u64 seq;
 291	enum mod_log_op op;
 292
 293	/* this is used for MOD_LOG_KEY_* and MOD_LOG_MOVE_KEYS operations */
 294	int slot;
 295
 296	/* this is used for MOD_LOG_KEY* and MOD_LOG_ROOT_REPLACE */
 297	u64 generation;
 298
 299	/* those are used for op == MOD_LOG_KEY_{REPLACE,REMOVE} */
 300	struct btrfs_disk_key key;
 301	u64 blockptr;
 302
 303	/* this is used for op == MOD_LOG_MOVE_KEYS */
 304	struct {
 305		int dst_slot;
 306		int nr_items;
 307	} move;
 308
 309	/* this is used for op == MOD_LOG_ROOT_REPLACE */
 310	struct tree_mod_root old_root;
 311};
 312
 313/*
 314 * Pull a new tree mod seq number for our operation.
 315 */
 316static inline u64 btrfs_inc_tree_mod_seq(struct btrfs_fs_info *fs_info)
 317{
 318	return atomic64_inc_return(&fs_info->tree_mod_seq);
 319}
 320
 321/*
 322 * This adds a new blocker to the tree mod log's blocker list if the @elem
 323 * passed does not already have a sequence number set. So when a caller expects
 324 * to record tree modifications, it should ensure to set elem->seq to zero
 325 * before calling btrfs_get_tree_mod_seq.
 326 * Returns a fresh, unused tree log modification sequence number, even if no new
 327 * blocker was added.
 328 */
 329u64 btrfs_get_tree_mod_seq(struct btrfs_fs_info *fs_info,
 330			   struct seq_list *elem)
 331{
 332	write_lock(&fs_info->tree_mod_log_lock);
 333	spin_lock(&fs_info->tree_mod_seq_lock);
 334	if (!elem->seq) {
 335		elem->seq = btrfs_inc_tree_mod_seq(fs_info);
 336		list_add_tail(&elem->list, &fs_info->tree_mod_seq_list);
 337	}
 338	spin_unlock(&fs_info->tree_mod_seq_lock);
 339	write_unlock(&fs_info->tree_mod_log_lock);
 340
 341	return elem->seq;
 342}
 343
 344void btrfs_put_tree_mod_seq(struct btrfs_fs_info *fs_info,
 345			    struct seq_list *elem)
 346{
 347	struct rb_root *tm_root;
 348	struct rb_node *node;
 349	struct rb_node *next;
 350	struct seq_list *cur_elem;
 351	struct tree_mod_elem *tm;
 352	u64 min_seq = (u64)-1;
 353	u64 seq_putting = elem->seq;
 354
 355	if (!seq_putting)
 356		return;
 357
 358	spin_lock(&fs_info->tree_mod_seq_lock);
 359	list_del(&elem->list);
 360	elem->seq = 0;
 361
 362	list_for_each_entry(cur_elem, &fs_info->tree_mod_seq_list, list) {
 363		if (cur_elem->seq < min_seq) {
 364			if (seq_putting > cur_elem->seq) {
 365				/*
 366				 * blocker with lower sequence number exists, we
 367				 * cannot remove anything from the log
 368				 */
 369				spin_unlock(&fs_info->tree_mod_seq_lock);
 370				return;
 371			}
 372			min_seq = cur_elem->seq;
 373		}
 374	}
 375	spin_unlock(&fs_info->tree_mod_seq_lock);
 376
 377	/*
 378	 * anything that's lower than the lowest existing (read: blocked)
 379	 * sequence number can be removed from the tree.
 380	 */
 381	write_lock(&fs_info->tree_mod_log_lock);
 382	tm_root = &fs_info->tree_mod_log;
 383	for (node = rb_first(tm_root); node; node = next) {
 384		next = rb_next(node);
 385		tm = rb_entry(node, struct tree_mod_elem, node);
 386		if (tm->seq > min_seq)
 387			continue;
 388		rb_erase(node, tm_root);
 389		kfree(tm);
 390	}
 391	write_unlock(&fs_info->tree_mod_log_lock);
 392}
 393
 394/*
 395 * key order of the log:
 396 *       node/leaf start address -> sequence
 397 *
 398 * The 'start address' is the logical address of the *new* root node
 399 * for root replace operations, or the logical address of the affected
 400 * block for all other operations.
 401 */
 402static noinline int
 403__tree_mod_log_insert(struct btrfs_fs_info *fs_info, struct tree_mod_elem *tm)
 404{
 405	struct rb_root *tm_root;
 406	struct rb_node **new;
 407	struct rb_node *parent = NULL;
 408	struct tree_mod_elem *cur;
 409
 410	lockdep_assert_held_write(&fs_info->tree_mod_log_lock);
 411
 412	tm->seq = btrfs_inc_tree_mod_seq(fs_info);
 413
 414	tm_root = &fs_info->tree_mod_log;
 415	new = &tm_root->rb_node;
 416	while (*new) {
 417		cur = rb_entry(*new, struct tree_mod_elem, node);
 418		parent = *new;
 419		if (cur->logical < tm->logical)
 420			new = &((*new)->rb_left);
 421		else if (cur->logical > tm->logical)
 422			new = &((*new)->rb_right);
 423		else if (cur->seq < tm->seq)
 424			new = &((*new)->rb_left);
 425		else if (cur->seq > tm->seq)
 426			new = &((*new)->rb_right);
 427		else
 428			return -EEXIST;
 429	}
 430
 431	rb_link_node(&tm->node, parent, new);
 432	rb_insert_color(&tm->node, tm_root);
 433	return 0;
 434}
 435
 436/*
 437 * Determines if logging can be omitted. Returns 1 if it can. Otherwise, it
 438 * returns zero with the tree_mod_log_lock acquired. The caller must hold
 439 * this until all tree mod log insertions are recorded in the rb tree and then
 440 * write unlock fs_info::tree_mod_log_lock.
 441 */
 442static inline int tree_mod_dont_log(struct btrfs_fs_info *fs_info,
 443				    struct extent_buffer *eb) {
 444	smp_mb();
 445	if (list_empty(&(fs_info)->tree_mod_seq_list))
 446		return 1;
 447	if (eb && btrfs_header_level(eb) == 0)
 448		return 1;
 449
 450	write_lock(&fs_info->tree_mod_log_lock);
 451	if (list_empty(&(fs_info)->tree_mod_seq_list)) {
 452		write_unlock(&fs_info->tree_mod_log_lock);
 453		return 1;
 454	}
 455
 456	return 0;
 457}
 458
 459/* Similar to tree_mod_dont_log, but doesn't acquire any locks. */
 460static inline int tree_mod_need_log(const struct btrfs_fs_info *fs_info,
 461				    struct extent_buffer *eb)
 462{
 463	smp_mb();
 464	if (list_empty(&(fs_info)->tree_mod_seq_list))
 465		return 0;
 466	if (eb && btrfs_header_level(eb) == 0)
 467		return 0;
 468
 469	return 1;
 470}
 471
 472static struct tree_mod_elem *
 473alloc_tree_mod_elem(struct extent_buffer *eb, int slot,
 474		    enum mod_log_op op, gfp_t flags)
 475{
 476	struct tree_mod_elem *tm;
 477
 478	tm = kzalloc(sizeof(*tm), flags);
 479	if (!tm)
 480		return NULL;
 481
 482	tm->logical = eb->start;
 483	if (op != MOD_LOG_KEY_ADD) {
 484		btrfs_node_key(eb, &tm->key, slot);
 485		tm->blockptr = btrfs_node_blockptr(eb, slot);
 486	}
 487	tm->op = op;
 488	tm->slot = slot;
 489	tm->generation = btrfs_node_ptr_generation(eb, slot);
 490	RB_CLEAR_NODE(&tm->node);
 491
 492	return tm;
 493}
 494
 495static noinline int tree_mod_log_insert_key(struct extent_buffer *eb, int slot,
 496		enum mod_log_op op, gfp_t flags)
 497{
 498	struct tree_mod_elem *tm;
 499	int ret;
 500
 501	if (!tree_mod_need_log(eb->fs_info, eb))
 502		return 0;
 503
 504	tm = alloc_tree_mod_elem(eb, slot, op, flags);
 505	if (!tm)
 506		return -ENOMEM;
 507
 508	if (tree_mod_dont_log(eb->fs_info, eb)) {
 509		kfree(tm);
 510		return 0;
 511	}
 512
 513	ret = __tree_mod_log_insert(eb->fs_info, tm);
 514	write_unlock(&eb->fs_info->tree_mod_log_lock);
 515	if (ret)
 516		kfree(tm);
 517
 518	return ret;
 519}
 520
 521static noinline int tree_mod_log_insert_move(struct extent_buffer *eb,
 522		int dst_slot, int src_slot, int nr_items)
 523{
 524	struct tree_mod_elem *tm = NULL;
 525	struct tree_mod_elem **tm_list = NULL;
 526	int ret = 0;
 527	int i;
 528	int locked = 0;
 529
 530	if (!tree_mod_need_log(eb->fs_info, eb))
 531		return 0;
 532
 533	tm_list = kcalloc(nr_items, sizeof(struct tree_mod_elem *), GFP_NOFS);
 534	if (!tm_list)
 535		return -ENOMEM;
 536
 537	tm = kzalloc(sizeof(*tm), GFP_NOFS);
 538	if (!tm) {
 539		ret = -ENOMEM;
 540		goto free_tms;
 541	}
 542
 543	tm->logical = eb->start;
 544	tm->slot = src_slot;
 545	tm->move.dst_slot = dst_slot;
 546	tm->move.nr_items = nr_items;
 547	tm->op = MOD_LOG_MOVE_KEYS;
 548
 549	for (i = 0; i + dst_slot < src_slot && i < nr_items; i++) {
 550		tm_list[i] = alloc_tree_mod_elem(eb, i + dst_slot,
 551		    MOD_LOG_KEY_REMOVE_WHILE_MOVING, GFP_NOFS);
 552		if (!tm_list[i]) {
 553			ret = -ENOMEM;
 554			goto free_tms;
 555		}
 556	}
 557
 558	if (tree_mod_dont_log(eb->fs_info, eb))
 559		goto free_tms;
 560	locked = 1;
 561
 562	/*
 563	 * When we override something during the move, we log these removals.
 564	 * This can only happen when we move towards the beginning of the
 565	 * buffer, i.e. dst_slot < src_slot.
 566	 */
 567	for (i = 0; i + dst_slot < src_slot && i < nr_items; i++) {
 568		ret = __tree_mod_log_insert(eb->fs_info, tm_list[i]);
 569		if (ret)
 570			goto free_tms;
 571	}
 572
 573	ret = __tree_mod_log_insert(eb->fs_info, tm);
 574	if (ret)
 575		goto free_tms;
 576	write_unlock(&eb->fs_info->tree_mod_log_lock);
 577	kfree(tm_list);
 578
 579	return 0;
 580free_tms:
 581	for (i = 0; i < nr_items; i++) {
 582		if (tm_list[i] && !RB_EMPTY_NODE(&tm_list[i]->node))
 583			rb_erase(&tm_list[i]->node, &eb->fs_info->tree_mod_log);
 584		kfree(tm_list[i]);
 585	}
 586	if (locked)
 587		write_unlock(&eb->fs_info->tree_mod_log_lock);
 588	kfree(tm_list);
 589	kfree(tm);
 590
 591	return ret;
 592}
 593
 594static inline int
 595__tree_mod_log_free_eb(struct btrfs_fs_info *fs_info,
 596		       struct tree_mod_elem **tm_list,
 597		       int nritems)
 598{
 599	int i, j;
 600	int ret;
 601
 602	for (i = nritems - 1; i >= 0; i--) {
 603		ret = __tree_mod_log_insert(fs_info, tm_list[i]);
 604		if (ret) {
 605			for (j = nritems - 1; j > i; j--)
 606				rb_erase(&tm_list[j]->node,
 607					 &fs_info->tree_mod_log);
 608			return ret;
 609		}
 610	}
 611
 612	return 0;
 613}
 614
 615static noinline int tree_mod_log_insert_root(struct extent_buffer *old_root,
 616			 struct extent_buffer *new_root, int log_removal)
 617{
 618	struct btrfs_fs_info *fs_info = old_root->fs_info;
 619	struct tree_mod_elem *tm = NULL;
 620	struct tree_mod_elem **tm_list = NULL;
 621	int nritems = 0;
 622	int ret = 0;
 623	int i;
 624
 625	if (!tree_mod_need_log(fs_info, NULL))
 626		return 0;
 627
 628	if (log_removal && btrfs_header_level(old_root) > 0) {
 629		nritems = btrfs_header_nritems(old_root);
 630		tm_list = kcalloc(nritems, sizeof(struct tree_mod_elem *),
 631				  GFP_NOFS);
 632		if (!tm_list) {
 633			ret = -ENOMEM;
 634			goto free_tms;
 635		}
 636		for (i = 0; i < nritems; i++) {
 637			tm_list[i] = alloc_tree_mod_elem(old_root, i,
 638			    MOD_LOG_KEY_REMOVE_WHILE_FREEING, GFP_NOFS);
 639			if (!tm_list[i]) {
 640				ret = -ENOMEM;
 641				goto free_tms;
 642			}
 643		}
 644	}
 645
 646	tm = kzalloc(sizeof(*tm), GFP_NOFS);
 647	if (!tm) {
 648		ret = -ENOMEM;
 649		goto free_tms;
 650	}
 651
 652	tm->logical = new_root->start;
 653	tm->old_root.logical = old_root->start;
 654	tm->old_root.level = btrfs_header_level(old_root);
 655	tm->generation = btrfs_header_generation(old_root);
 656	tm->op = MOD_LOG_ROOT_REPLACE;
 657
 658	if (tree_mod_dont_log(fs_info, NULL))
 659		goto free_tms;
 660
 661	if (tm_list)
 662		ret = __tree_mod_log_free_eb(fs_info, tm_list, nritems);
 663	if (!ret)
 664		ret = __tree_mod_log_insert(fs_info, tm);
 665
 666	write_unlock(&fs_info->tree_mod_log_lock);
 667	if (ret)
 668		goto free_tms;
 669	kfree(tm_list);
 670
 671	return ret;
 672
 673free_tms:
 674	if (tm_list) {
 675		for (i = 0; i < nritems; i++)
 676			kfree(tm_list[i]);
 677		kfree(tm_list);
 678	}
 679	kfree(tm);
 680
 681	return ret;
 682}
 683
 684static struct tree_mod_elem *
 685__tree_mod_log_search(struct btrfs_fs_info *fs_info, u64 start, u64 min_seq,
 686		      int smallest)
 687{
 688	struct rb_root *tm_root;
 689	struct rb_node *node;
 690	struct tree_mod_elem *cur = NULL;
 691	struct tree_mod_elem *found = NULL;
 692
 693	read_lock(&fs_info->tree_mod_log_lock);
 694	tm_root = &fs_info->tree_mod_log;
 695	node = tm_root->rb_node;
 696	while (node) {
 697		cur = rb_entry(node, struct tree_mod_elem, node);
 698		if (cur->logical < start) {
 699			node = node->rb_left;
 700		} else if (cur->logical > start) {
 701			node = node->rb_right;
 702		} else if (cur->seq < min_seq) {
 703			node = node->rb_left;
 704		} else if (!smallest) {
 705			/* we want the node with the highest seq */
 706			if (found)
 707				BUG_ON(found->seq > cur->seq);
 708			found = cur;
 709			node = node->rb_left;
 710		} else if (cur->seq > min_seq) {
 711			/* we want the node with the smallest seq */
 712			if (found)
 713				BUG_ON(found->seq < cur->seq);
 714			found = cur;
 715			node = node->rb_right;
 716		} else {
 717			found = cur;
 718			break;
 719		}
 720	}
 721	read_unlock(&fs_info->tree_mod_log_lock);
 722
 723	return found;
 724}
 725
 726/*
 727 * this returns the element from the log with the smallest time sequence
 728 * value that's in the log (the oldest log item). any element with a time
 729 * sequence lower than min_seq will be ignored.
 730 */
 731static struct tree_mod_elem *
 732tree_mod_log_search_oldest(struct btrfs_fs_info *fs_info, u64 start,
 733			   u64 min_seq)
 734{
 735	return __tree_mod_log_search(fs_info, start, min_seq, 1);
 736}
 737
 738/*
 739 * this returns the element from the log with the largest time sequence
 740 * value that's in the log (the most recent log item). any element with
 741 * a time sequence lower than min_seq will be ignored.
 742 */
 743static struct tree_mod_elem *
 744tree_mod_log_search(struct btrfs_fs_info *fs_info, u64 start, u64 min_seq)
 745{
 746	return __tree_mod_log_search(fs_info, start, min_seq, 0);
 747}
 748
 749static noinline int tree_mod_log_eb_copy(struct extent_buffer *dst,
 750		     struct extent_buffer *src, unsigned long dst_offset,
 751		     unsigned long src_offset, int nr_items)
 752{
 753	struct btrfs_fs_info *fs_info = dst->fs_info;
 754	int ret = 0;
 755	struct tree_mod_elem **tm_list = NULL;
 756	struct tree_mod_elem **tm_list_add, **tm_list_rem;
 757	int i;
 758	int locked = 0;
 759
 760	if (!tree_mod_need_log(fs_info, NULL))
 761		return 0;
 762
 763	if (btrfs_header_level(dst) == 0 && btrfs_header_level(src) == 0)
 764		return 0;
 765
 766	tm_list = kcalloc(nr_items * 2, sizeof(struct tree_mod_elem *),
 767			  GFP_NOFS);
 768	if (!tm_list)
 769		return -ENOMEM;
 770
 771	tm_list_add = tm_list;
 772	tm_list_rem = tm_list + nr_items;
 773	for (i = 0; i < nr_items; i++) {
 774		tm_list_rem[i] = alloc_tree_mod_elem(src, i + src_offset,
 775		    MOD_LOG_KEY_REMOVE, GFP_NOFS);
 776		if (!tm_list_rem[i]) {
 777			ret = -ENOMEM;
 778			goto free_tms;
 779		}
 780
 781		tm_list_add[i] = alloc_tree_mod_elem(dst, i + dst_offset,
 782		    MOD_LOG_KEY_ADD, GFP_NOFS);
 783		if (!tm_list_add[i]) {
 784			ret = -ENOMEM;
 785			goto free_tms;
 786		}
 787	}
 788
 789	if (tree_mod_dont_log(fs_info, NULL))
 790		goto free_tms;
 791	locked = 1;
 792
 793	for (i = 0; i < nr_items; i++) {
 794		ret = __tree_mod_log_insert(fs_info, tm_list_rem[i]);
 795		if (ret)
 796			goto free_tms;
 797		ret = __tree_mod_log_insert(fs_info, tm_list_add[i]);
 798		if (ret)
 799			goto free_tms;
 800	}
 801
 802	write_unlock(&fs_info->tree_mod_log_lock);
 803	kfree(tm_list);
 804
 805	return 0;
 806
 807free_tms:
 808	for (i = 0; i < nr_items * 2; i++) {
 809		if (tm_list[i] && !RB_EMPTY_NODE(&tm_list[i]->node))
 810			rb_erase(&tm_list[i]->node, &fs_info->tree_mod_log);
 811		kfree(tm_list[i]);
 812	}
 813	if (locked)
 814		write_unlock(&fs_info->tree_mod_log_lock);
 815	kfree(tm_list);
 816
 817	return ret;
 818}
 819
 820static noinline int tree_mod_log_free_eb(struct extent_buffer *eb)
 821{
 822	struct tree_mod_elem **tm_list = NULL;
 823	int nritems = 0;
 824	int i;
 825	int ret = 0;
 826
 827	if (btrfs_header_level(eb) == 0)
 828		return 0;
 829
 830	if (!tree_mod_need_log(eb->fs_info, NULL))
 831		return 0;
 832
 833	nritems = btrfs_header_nritems(eb);
 834	tm_list = kcalloc(nritems, sizeof(struct tree_mod_elem *), GFP_NOFS);
 835	if (!tm_list)
 836		return -ENOMEM;
 837
 838	for (i = 0; i < nritems; i++) {
 839		tm_list[i] = alloc_tree_mod_elem(eb, i,
 840		    MOD_LOG_KEY_REMOVE_WHILE_FREEING, GFP_NOFS);
 841		if (!tm_list[i]) {
 842			ret = -ENOMEM;
 843			goto free_tms;
 844		}
 845	}
 846
 847	if (tree_mod_dont_log(eb->fs_info, eb))
 848		goto free_tms;
 849
 850	ret = __tree_mod_log_free_eb(eb->fs_info, tm_list, nritems);
 851	write_unlock(&eb->fs_info->tree_mod_log_lock);
 852	if (ret)
 853		goto free_tms;
 854	kfree(tm_list);
 855
 856	return 0;
 857
 858free_tms:
 859	for (i = 0; i < nritems; i++)
 860		kfree(tm_list[i]);
 861	kfree(tm_list);
 862
 863	return ret;
 864}
 865
 866/*
 867 * check if the tree block can be shared by multiple trees
 868 */
 869int btrfs_block_can_be_shared(struct btrfs_root *root,
 870			      struct extent_buffer *buf)
 871{
 872	/*
 873	 * Tree blocks not in reference counted trees and tree roots
 874	 * are never shared. If a block was allocated after the last
 875	 * snapshot and the block was not allocated by tree relocation,
 876	 * we know the block is not shared.
 877	 */
 878	if (test_bit(BTRFS_ROOT_REF_COWS, &root->state) &&
 879	    buf != root->node && buf != root->commit_root &&
 880	    (btrfs_header_generation(buf) <=
 881	     btrfs_root_last_snapshot(&root->root_item) ||
 882	     btrfs_header_flag(buf, BTRFS_HEADER_FLAG_RELOC)))
 883		return 1;
 884
 885	return 0;
 886}
 887
 888static noinline int update_ref_for_cow(struct btrfs_trans_handle *trans,
 889				       struct btrfs_root *root,
 890				       struct extent_buffer *buf,
 891				       struct extent_buffer *cow,
 892				       int *last_ref)
 893{
 894	struct btrfs_fs_info *fs_info = root->fs_info;
 895	u64 refs;
 896	u64 owner;
 897	u64 flags;
 898	u64 new_flags = 0;
 899	int ret;
 900
 901	/*
 902	 * Backrefs update rules:
 903	 *
 904	 * Always use full backrefs for extent pointers in tree block
 905	 * allocated by tree relocation.
 906	 *
 907	 * If a shared tree block is no longer referenced by its owner
 908	 * tree (btrfs_header_owner(buf) == root->root_key.objectid),
 909	 * use full backrefs for extent pointers in tree block.
 910	 *
 911	 * If a tree block is been relocating
 912	 * (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID),
 913	 * use full backrefs for extent pointers in tree block.
 914	 * The reason for this is some operations (such as drop tree)
 915	 * are only allowed for blocks use full backrefs.
 916	 */
 917
 918	if (btrfs_block_can_be_shared(root, buf)) {
 919		ret = btrfs_lookup_extent_info(trans, fs_info, buf->start,
 920					       btrfs_header_level(buf), 1,
 921					       &refs, &flags);
 922		if (ret)
 923			return ret;
 924		if (refs == 0) {
 925			ret = -EROFS;
 926			btrfs_handle_fs_error(fs_info, ret, NULL);
 927			return ret;
 928		}
 929	} else {
 930		refs = 1;
 931		if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID ||
 932		    btrfs_header_backref_rev(buf) < BTRFS_MIXED_BACKREF_REV)
 933			flags = BTRFS_BLOCK_FLAG_FULL_BACKREF;
 934		else
 935			flags = 0;
 936	}
 937
 938	owner = btrfs_header_owner(buf);
 939	BUG_ON(owner == BTRFS_TREE_RELOC_OBJECTID &&
 940	       !(flags & BTRFS_BLOCK_FLAG_FULL_BACKREF));
 941
 942	if (refs > 1) {
 943		if ((owner == root->root_key.objectid ||
 944		     root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID) &&
 945		    !(flags & BTRFS_BLOCK_FLAG_FULL_BACKREF)) {
 946			ret = btrfs_inc_ref(trans, root, buf, 1);
 947			if (ret)
 948				return ret;
 949
 950			if (root->root_key.objectid ==
 951			    BTRFS_TREE_RELOC_OBJECTID) {
 952				ret = btrfs_dec_ref(trans, root, buf, 0);
 953				if (ret)
 954					return ret;
 955				ret = btrfs_inc_ref(trans, root, cow, 1);
 956				if (ret)
 957					return ret;
 958			}
 959			new_flags |= BTRFS_BLOCK_FLAG_FULL_BACKREF;
 960		} else {
 961
 962			if (root->root_key.objectid ==
 963			    BTRFS_TREE_RELOC_OBJECTID)
 964				ret = btrfs_inc_ref(trans, root, cow, 1);
 965			else
 966				ret = btrfs_inc_ref(trans, root, cow, 0);
 967			if (ret)
 968				return ret;
 969		}
 970		if (new_flags != 0) {
 971			int level = btrfs_header_level(buf);
 972
 973			ret = btrfs_set_disk_extent_flags(trans,
 974							  buf->start,
 975							  buf->len,
 976							  new_flags, level, 0);
 977			if (ret)
 978				return ret;
 979		}
 980	} else {
 981		if (flags & BTRFS_BLOCK_FLAG_FULL_BACKREF) {
 982			if (root->root_key.objectid ==
 983			    BTRFS_TREE_RELOC_OBJECTID)
 984				ret = btrfs_inc_ref(trans, root, cow, 1);
 985			else
 986				ret = btrfs_inc_ref(trans, root, cow, 0);
 987			if (ret)
 988				return ret;
 989			ret = btrfs_dec_ref(trans, root, buf, 1);
 990			if (ret)
 991				return ret;
 992		}
 993		btrfs_clean_tree_block(buf);
 994		*last_ref = 1;
 995	}
 996	return 0;
 997}
 998
 999static struct extent_buffer *alloc_tree_block_no_bg_flush(
1000					  struct btrfs_trans_handle *trans,
1001					  struct btrfs_root *root,
1002					  u64 parent_start,
1003					  const struct btrfs_disk_key *disk_key,
1004					  int level,
1005					  u64 hint,
1006					  u64 empty_size)
1007{
1008	struct btrfs_fs_info *fs_info = root->fs_info;
1009	struct extent_buffer *ret;
1010
1011	/*
1012	 * If we are COWing a node/leaf from the extent, chunk, device or free
1013	 * space trees, make sure that we do not finish block group creation of
1014	 * pending block groups. We do this to avoid a deadlock.
1015	 * COWing can result in allocation of a new chunk, and flushing pending
1016	 * block groups (btrfs_create_pending_block_groups()) can be triggered
1017	 * when finishing allocation of a new chunk. Creation of a pending block
1018	 * group modifies the extent, chunk, device and free space trees,
1019	 * therefore we could deadlock with ourselves since we are holding a
1020	 * lock on an extent buffer that btrfs_create_pending_block_groups() may
1021	 * try to COW later.
1022	 * For similar reasons, we also need to delay flushing pending block
1023	 * groups when splitting a leaf or node, from one of those trees, since
1024	 * we are holding a write lock on it and its parent or when inserting a
1025	 * new root node for one of those trees.
1026	 */
1027	if (root == fs_info->extent_root ||
1028	    root == fs_info->chunk_root ||
1029	    root == fs_info->dev_root ||
1030	    root == fs_info->free_space_root)
1031		trans->can_flush_pending_bgs = false;
1032
1033	ret = btrfs_alloc_tree_block(trans, root, parent_start,
1034				     root->root_key.objectid, disk_key, level,
1035				     hint, empty_size);
1036	trans->can_flush_pending_bgs = true;
1037
1038	return ret;
1039}
1040
1041/*
1042 * does the dirty work in cow of a single block.  The parent block (if
1043 * supplied) is updated to point to the new cow copy.  The new buffer is marked
1044 * dirty and returned locked.  If you modify the block it needs to be marked
1045 * dirty again.
1046 *
1047 * search_start -- an allocation hint for the new block
1048 *
1049 * empty_size -- a hint that you plan on doing more cow.  This is the size in
1050 * bytes the allocator should try to find free next to the block it returns.
1051 * This is just a hint and may be ignored by the allocator.
1052 */
1053static noinline int __btrfs_cow_block(struct btrfs_trans_handle *trans,
1054			     struct btrfs_root *root,
1055			     struct extent_buffer *buf,
1056			     struct extent_buffer *parent, int parent_slot,
1057			     struct extent_buffer **cow_ret,
1058			     u64 search_start, u64 empty_size)
 
1059{
1060	struct btrfs_fs_info *fs_info = root->fs_info;
1061	struct btrfs_disk_key disk_key;
1062	struct extent_buffer *cow;
1063	int level, ret;
1064	int last_ref = 0;
1065	int unlock_orig = 0;
1066	u64 parent_start = 0;
1067
1068	if (*cow_ret == buf)
1069		unlock_orig = 1;
1070
1071	btrfs_assert_tree_locked(buf);
1072
1073	WARN_ON(test_bit(BTRFS_ROOT_REF_COWS, &root->state) &&
1074		trans->transid != fs_info->running_transaction->transid);
1075	WARN_ON(test_bit(BTRFS_ROOT_REF_COWS, &root->state) &&
1076		trans->transid != root->last_trans);
1077
1078	level = btrfs_header_level(buf);
1079
1080	if (level == 0)
1081		btrfs_item_key(buf, &disk_key, 0);
1082	else
1083		btrfs_node_key(buf, &disk_key, 0);
1084
1085	if ((root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID) && parent)
1086		parent_start = parent->start;
1087
1088	cow = alloc_tree_block_no_bg_flush(trans, root, parent_start, &disk_key,
1089					   level, search_start, empty_size);
 
1090	if (IS_ERR(cow))
1091		return PTR_ERR(cow);
1092
1093	/* cow is set to blocking by btrfs_init_new_buffer */
1094
1095	copy_extent_buffer_full(cow, buf);
1096	btrfs_set_header_bytenr(cow, cow->start);
1097	btrfs_set_header_generation(cow, trans->transid);
1098	btrfs_set_header_backref_rev(cow, BTRFS_MIXED_BACKREF_REV);
1099	btrfs_clear_header_flag(cow, BTRFS_HEADER_FLAG_WRITTEN |
1100				     BTRFS_HEADER_FLAG_RELOC);
1101	if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID)
1102		btrfs_set_header_flag(cow, BTRFS_HEADER_FLAG_RELOC);
1103	else
1104		btrfs_set_header_owner(cow, root->root_key.objectid);
1105
1106	write_extent_buffer_fsid(cow, fs_info->fs_devices->metadata_uuid);
1107
1108	ret = update_ref_for_cow(trans, root, buf, cow, &last_ref);
1109	if (ret) {
 
 
1110		btrfs_abort_transaction(trans, ret);
1111		return ret;
1112	}
1113
1114	if (test_bit(BTRFS_ROOT_REF_COWS, &root->state)) {
1115		ret = btrfs_reloc_cow_block(trans, root, buf, cow);
1116		if (ret) {
 
 
1117			btrfs_abort_transaction(trans, ret);
1118			return ret;
1119		}
1120	}
1121
1122	if (buf == root->node) {
1123		WARN_ON(parent && parent != buf);
1124		if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID ||
1125		    btrfs_header_backref_rev(buf) < BTRFS_MIXED_BACKREF_REV)
1126			parent_start = buf->start;
1127
1128		extent_buffer_get(cow);
1129		ret = tree_mod_log_insert_root(root->node, cow, 1);
1130		BUG_ON(ret < 0);
1131		rcu_assign_pointer(root->node, cow);
1132
1133		btrfs_free_tree_block(trans, root, buf, parent_start,
1134				      last_ref);
1135		free_extent_buffer(buf);
1136		add_root_to_dirty_list(root);
1137	} else {
1138		WARN_ON(trans->transid != btrfs_header_generation(parent));
1139		tree_mod_log_insert_key(parent, parent_slot,
1140					MOD_LOG_KEY_REPLACE, GFP_NOFS);
1141		btrfs_set_node_blockptr(parent, parent_slot,
1142					cow->start);
1143		btrfs_set_node_ptr_generation(parent, parent_slot,
1144					      trans->transid);
1145		btrfs_mark_buffer_dirty(parent);
1146		if (last_ref) {
1147			ret = tree_mod_log_free_eb(buf);
1148			if (ret) {
 
 
1149				btrfs_abort_transaction(trans, ret);
1150				return ret;
1151			}
1152		}
1153		btrfs_free_tree_block(trans, root, buf, parent_start,
1154				      last_ref);
1155	}
1156	if (unlock_orig)
1157		btrfs_tree_unlock(buf);
1158	free_extent_buffer_stale(buf);
1159	btrfs_mark_buffer_dirty(cow);
1160	*cow_ret = cow;
1161	return 0;
1162}
1163
1164/*
1165 * returns the logical address of the oldest predecessor of the given root.
1166 * entries older than time_seq are ignored.
1167 */
1168static struct tree_mod_elem *__tree_mod_log_oldest_root(
1169		struct extent_buffer *eb_root, u64 time_seq)
1170{
1171	struct tree_mod_elem *tm;
1172	struct tree_mod_elem *found = NULL;
1173	u64 root_logical = eb_root->start;
1174	int looped = 0;
1175
1176	if (!time_seq)
1177		return NULL;
1178
1179	/*
1180	 * the very last operation that's logged for a root is the
1181	 * replacement operation (if it is replaced at all). this has
1182	 * the logical address of the *new* root, making it the very
1183	 * first operation that's logged for this root.
1184	 */
1185	while (1) {
1186		tm = tree_mod_log_search_oldest(eb_root->fs_info, root_logical,
1187						time_seq);
1188		if (!looped && !tm)
1189			return NULL;
1190		/*
1191		 * if there are no tree operation for the oldest root, we simply
1192		 * return it. this should only happen if that (old) root is at
1193		 * level 0.
1194		 */
1195		if (!tm)
1196			break;
1197
1198		/*
1199		 * if there's an operation that's not a root replacement, we
1200		 * found the oldest version of our root. normally, we'll find a
1201		 * MOD_LOG_KEY_REMOVE_WHILE_FREEING operation here.
1202		 */
1203		if (tm->op != MOD_LOG_ROOT_REPLACE)
1204			break;
1205
1206		found = tm;
1207		root_logical = tm->old_root.logical;
1208		looped = 1;
1209	}
1210
1211	/* if there's no old root to return, return what we found instead */
1212	if (!found)
1213		found = tm;
1214
1215	return found;
1216}
1217
1218/*
1219 * tm is a pointer to the first operation to rewind within eb. then, all
1220 * previous operations will be rewound (until we reach something older than
1221 * time_seq).
1222 */
1223static void
1224__tree_mod_log_rewind(struct btrfs_fs_info *fs_info, struct extent_buffer *eb,
1225		      u64 time_seq, struct tree_mod_elem *first_tm)
1226{
1227	u32 n;
1228	struct rb_node *next;
1229	struct tree_mod_elem *tm = first_tm;
1230	unsigned long o_dst;
1231	unsigned long o_src;
1232	unsigned long p_size = sizeof(struct btrfs_key_ptr);
1233
1234	n = btrfs_header_nritems(eb);
1235	read_lock(&fs_info->tree_mod_log_lock);
1236	while (tm && tm->seq >= time_seq) {
1237		/*
1238		 * all the operations are recorded with the operator used for
1239		 * the modification. as we're going backwards, we do the
1240		 * opposite of each operation here.
1241		 */
1242		switch (tm->op) {
1243		case MOD_LOG_KEY_REMOVE_WHILE_FREEING:
1244			BUG_ON(tm->slot < n);
1245			/* Fallthrough */
1246		case MOD_LOG_KEY_REMOVE_WHILE_MOVING:
1247		case MOD_LOG_KEY_REMOVE:
1248			btrfs_set_node_key(eb, &tm->key, tm->slot);
1249			btrfs_set_node_blockptr(eb, tm->slot, tm->blockptr);
1250			btrfs_set_node_ptr_generation(eb, tm->slot,
1251						      tm->generation);
1252			n++;
1253			break;
1254		case MOD_LOG_KEY_REPLACE:
1255			BUG_ON(tm->slot >= n);
1256			btrfs_set_node_key(eb, &tm->key, tm->slot);
1257			btrfs_set_node_blockptr(eb, tm->slot, tm->blockptr);
1258			btrfs_set_node_ptr_generation(eb, tm->slot,
1259						      tm->generation);
1260			break;
1261		case MOD_LOG_KEY_ADD:
1262			/* if a move operation is needed it's in the log */
1263			n--;
1264			break;
1265		case MOD_LOG_MOVE_KEYS:
1266			o_dst = btrfs_node_key_ptr_offset(tm->slot);
1267			o_src = btrfs_node_key_ptr_offset(tm->move.dst_slot);
1268			memmove_extent_buffer(eb, o_dst, o_src,
1269					      tm->move.nr_items * p_size);
1270			break;
1271		case MOD_LOG_ROOT_REPLACE:
1272			/*
1273			 * this operation is special. for roots, this must be
1274			 * handled explicitly before rewinding.
1275			 * for non-roots, this operation may exist if the node
1276			 * was a root: root A -> child B; then A gets empty and
1277			 * B is promoted to the new root. in the mod log, we'll
1278			 * have a root-replace operation for B, a tree block
1279			 * that is no root. we simply ignore that operation.
1280			 */
1281			break;
1282		}
1283		next = rb_next(&tm->node);
1284		if (!next)
1285			break;
1286		tm = rb_entry(next, struct tree_mod_elem, node);
1287		if (tm->logical != first_tm->logical)
1288			break;
1289	}
1290	read_unlock(&fs_info->tree_mod_log_lock);
1291	btrfs_set_header_nritems(eb, n);
1292}
1293
1294/*
1295 * Called with eb read locked. If the buffer cannot be rewound, the same buffer
1296 * is returned. If rewind operations happen, a fresh buffer is returned. The
1297 * returned buffer is always read-locked. If the returned buffer is not the
1298 * input buffer, the lock on the input buffer is released and the input buffer
1299 * is freed (its refcount is decremented).
1300 */
1301static struct extent_buffer *
1302tree_mod_log_rewind(struct btrfs_fs_info *fs_info, struct btrfs_path *path,
1303		    struct extent_buffer *eb, u64 time_seq)
1304{
1305	struct extent_buffer *eb_rewin;
1306	struct tree_mod_elem *tm;
1307
1308	if (!time_seq)
1309		return eb;
1310
1311	if (btrfs_header_level(eb) == 0)
1312		return eb;
1313
1314	tm = tree_mod_log_search(fs_info, eb->start, time_seq);
1315	if (!tm)
1316		return eb;
1317
1318	btrfs_set_path_blocking(path);
1319	btrfs_set_lock_blocking_read(eb);
1320
1321	if (tm->op == MOD_LOG_KEY_REMOVE_WHILE_FREEING) {
1322		BUG_ON(tm->slot != 0);
1323		eb_rewin = alloc_dummy_extent_buffer(fs_info, eb->start);
1324		if (!eb_rewin) {
1325			btrfs_tree_read_unlock_blocking(eb);
1326			free_extent_buffer(eb);
1327			return NULL;
1328		}
1329		btrfs_set_header_bytenr(eb_rewin, eb->start);
1330		btrfs_set_header_backref_rev(eb_rewin,
1331					     btrfs_header_backref_rev(eb));
1332		btrfs_set_header_owner(eb_rewin, btrfs_header_owner(eb));
1333		btrfs_set_header_level(eb_rewin, btrfs_header_level(eb));
1334	} else {
1335		eb_rewin = btrfs_clone_extent_buffer(eb);
1336		if (!eb_rewin) {
1337			btrfs_tree_read_unlock_blocking(eb);
1338			free_extent_buffer(eb);
1339			return NULL;
1340		}
1341	}
1342
1343	btrfs_tree_read_unlock_blocking(eb);
1344	free_extent_buffer(eb);
1345
1346	btrfs_tree_read_lock(eb_rewin);
1347	__tree_mod_log_rewind(fs_info, eb_rewin, time_seq, tm);
1348	WARN_ON(btrfs_header_nritems(eb_rewin) >
1349		BTRFS_NODEPTRS_PER_BLOCK(fs_info));
1350
1351	return eb_rewin;
1352}
1353
1354/*
1355 * get_old_root() rewinds the state of @root's root node to the given @time_seq
1356 * value. If there are no changes, the current root->root_node is returned. If
1357 * anything changed in between, there's a fresh buffer allocated on which the
1358 * rewind operations are done. In any case, the returned buffer is read locked.
1359 * Returns NULL on error (with no locks held).
1360 */
1361static inline struct extent_buffer *
1362get_old_root(struct btrfs_root *root, u64 time_seq)
1363{
1364	struct btrfs_fs_info *fs_info = root->fs_info;
1365	struct tree_mod_elem *tm;
1366	struct extent_buffer *eb = NULL;
1367	struct extent_buffer *eb_root;
1368	u64 eb_root_owner = 0;
1369	struct extent_buffer *old;
1370	struct tree_mod_root *old_root = NULL;
1371	u64 old_generation = 0;
1372	u64 logical;
1373	int level;
1374
1375	eb_root = btrfs_read_lock_root_node(root);
1376	tm = __tree_mod_log_oldest_root(eb_root, time_seq);
1377	if (!tm)
1378		return eb_root;
1379
1380	if (tm->op == MOD_LOG_ROOT_REPLACE) {
1381		old_root = &tm->old_root;
1382		old_generation = tm->generation;
1383		logical = old_root->logical;
1384		level = old_root->level;
1385	} else {
1386		logical = eb_root->start;
1387		level = btrfs_header_level(eb_root);
1388	}
1389
1390	tm = tree_mod_log_search(fs_info, logical, time_seq);
1391	if (old_root && tm && tm->op != MOD_LOG_KEY_REMOVE_WHILE_FREEING) {
1392		btrfs_tree_read_unlock(eb_root);
1393		free_extent_buffer(eb_root);
1394		old = read_tree_block(fs_info, logical, 0, level, NULL);
1395		if (WARN_ON(IS_ERR(old) || !extent_buffer_uptodate(old))) {
1396			if (!IS_ERR(old))
1397				free_extent_buffer(old);
1398			btrfs_warn(fs_info,
1399				   "failed to read tree block %llu from get_old_root",
1400				   logical);
1401		} else {
1402			eb = btrfs_clone_extent_buffer(old);
1403			free_extent_buffer(old);
1404		}
1405	} else if (old_root) {
1406		eb_root_owner = btrfs_header_owner(eb_root);
1407		btrfs_tree_read_unlock(eb_root);
1408		free_extent_buffer(eb_root);
1409		eb = alloc_dummy_extent_buffer(fs_info, logical);
1410	} else {
1411		btrfs_set_lock_blocking_read(eb_root);
1412		eb = btrfs_clone_extent_buffer(eb_root);
1413		btrfs_tree_read_unlock_blocking(eb_root);
1414		free_extent_buffer(eb_root);
1415	}
1416
1417	if (!eb)
1418		return NULL;
1419	btrfs_tree_read_lock(eb);
1420	if (old_root) {
1421		btrfs_set_header_bytenr(eb, eb->start);
1422		btrfs_set_header_backref_rev(eb, BTRFS_MIXED_BACKREF_REV);
1423		btrfs_set_header_owner(eb, eb_root_owner);
1424		btrfs_set_header_level(eb, old_root->level);
1425		btrfs_set_header_generation(eb, old_generation);
1426	}
1427	if (tm)
1428		__tree_mod_log_rewind(fs_info, eb, time_seq, tm);
1429	else
1430		WARN_ON(btrfs_header_level(eb) != 0);
1431	WARN_ON(btrfs_header_nritems(eb) > BTRFS_NODEPTRS_PER_BLOCK(fs_info));
1432
1433	return eb;
1434}
1435
1436int btrfs_old_root_level(struct btrfs_root *root, u64 time_seq)
1437{
1438	struct tree_mod_elem *tm;
1439	int level;
1440	struct extent_buffer *eb_root = btrfs_root_node(root);
1441
1442	tm = __tree_mod_log_oldest_root(eb_root, time_seq);
1443	if (tm && tm->op == MOD_LOG_ROOT_REPLACE) {
1444		level = tm->old_root.level;
1445	} else {
1446		level = btrfs_header_level(eb_root);
1447	}
1448	free_extent_buffer(eb_root);
1449
1450	return level;
1451}
1452
1453static inline int should_cow_block(struct btrfs_trans_handle *trans,
1454				   struct btrfs_root *root,
1455				   struct extent_buffer *buf)
1456{
1457	if (btrfs_is_testing(root->fs_info))
1458		return 0;
1459
1460	/* Ensure we can see the FORCE_COW bit */
1461	smp_mb__before_atomic();
1462
1463	/*
1464	 * We do not need to cow a block if
1465	 * 1) this block is not created or changed in this transaction;
1466	 * 2) this block does not belong to TREE_RELOC tree;
1467	 * 3) the root is not forced COW.
1468	 *
1469	 * What is forced COW:
1470	 *    when we create snapshot during committing the transaction,
1471	 *    after we've finished copying src root, we must COW the shared
1472	 *    block to ensure the metadata consistency.
1473	 */
1474	if (btrfs_header_generation(buf) == trans->transid &&
1475	    !btrfs_header_flag(buf, BTRFS_HEADER_FLAG_WRITTEN) &&
1476	    !(root->root_key.objectid != BTRFS_TREE_RELOC_OBJECTID &&
1477	      btrfs_header_flag(buf, BTRFS_HEADER_FLAG_RELOC)) &&
1478	    !test_bit(BTRFS_ROOT_FORCE_COW, &root->state))
1479		return 0;
1480	return 1;
1481}
1482
1483/*
1484 * cows a single block, see __btrfs_cow_block for the real work.
1485 * This version of it has extra checks so that a block isn't COWed more than
1486 * once per transaction, as long as it hasn't been written yet
1487 */
1488noinline int btrfs_cow_block(struct btrfs_trans_handle *trans,
1489		    struct btrfs_root *root, struct extent_buffer *buf,
1490		    struct extent_buffer *parent, int parent_slot,
1491		    struct extent_buffer **cow_ret)
 
1492{
1493	struct btrfs_fs_info *fs_info = root->fs_info;
1494	u64 search_start;
1495	int ret;
1496
1497	if (test_bit(BTRFS_ROOT_DELETING, &root->state))
1498		btrfs_err(fs_info,
1499			"COW'ing blocks on a fs root that's being dropped");
1500
1501	if (trans->transaction != fs_info->running_transaction)
1502		WARN(1, KERN_CRIT "trans %llu running %llu\n",
1503		       trans->transid,
1504		       fs_info->running_transaction->transid);
1505
1506	if (trans->transid != fs_info->generation)
1507		WARN(1, KERN_CRIT "trans %llu running %llu\n",
1508		       trans->transid, fs_info->generation);
1509
1510	if (!should_cow_block(trans, root, buf)) {
1511		trans->dirty = true;
1512		*cow_ret = buf;
1513		return 0;
1514	}
1515
1516	search_start = buf->start & ~((u64)SZ_1G - 1);
1517
1518	if (parent)
1519		btrfs_set_lock_blocking_write(parent);
1520	btrfs_set_lock_blocking_write(buf);
1521
1522	/*
1523	 * Before CoWing this block for later modification, check if it's
1524	 * the subtree root and do the delayed subtree trace if needed.
1525	 *
1526	 * Also We don't care about the error, as it's handled internally.
1527	 */
1528	btrfs_qgroup_trace_subtree_after_cow(trans, root, buf);
1529	ret = __btrfs_cow_block(trans, root, buf, parent,
1530				 parent_slot, cow_ret, search_start, 0);
1531
1532	trace_btrfs_cow_block(root, buf, *cow_ret);
1533
1534	return ret;
1535}
 
1536
1537/*
1538 * helper function for defrag to decide if two blocks pointed to by a
1539 * node are actually close by
1540 */
1541static int close_blocks(u64 blocknr, u64 other, u32 blocksize)
1542{
1543	if (blocknr < other && other - (blocknr + blocksize) < 32768)
1544		return 1;
1545	if (blocknr > other && blocknr - (other + blocksize) < 32768)
1546		return 1;
1547	return 0;
1548}
1549
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1550/*
1551 * compare two keys in a memcmp fashion
1552 */
1553static int comp_keys(const struct btrfs_disk_key *disk,
1554		     const struct btrfs_key *k2)
1555{
1556	struct btrfs_key k1;
1557
1558	btrfs_disk_key_to_cpu(&k1, disk);
1559
1560	return btrfs_comp_cpu_keys(&k1, k2);
1561}
 
1562
1563/*
1564 * same as comp_keys only with two btrfs_key's
1565 */
1566int btrfs_comp_cpu_keys(const struct btrfs_key *k1, const struct btrfs_key *k2)
1567{
1568	if (k1->objectid > k2->objectid)
1569		return 1;
1570	if (k1->objectid < k2->objectid)
1571		return -1;
1572	if (k1->type > k2->type)
1573		return 1;
1574	if (k1->type < k2->type)
1575		return -1;
1576	if (k1->offset > k2->offset)
1577		return 1;
1578	if (k1->offset < k2->offset)
1579		return -1;
1580	return 0;
1581}
1582
1583/*
1584 * this is used by the defrag code to go through all the
1585 * leaves pointed to by a node and reallocate them so that
1586 * disk order is close to key order
1587 */
1588int btrfs_realloc_node(struct btrfs_trans_handle *trans,
1589		       struct btrfs_root *root, struct extent_buffer *parent,
1590		       int start_slot, u64 *last_ret,
1591		       struct btrfs_key *progress)
1592{
1593	struct btrfs_fs_info *fs_info = root->fs_info;
1594	struct extent_buffer *cur;
1595	u64 blocknr;
1596	u64 gen;
1597	u64 search_start = *last_ret;
1598	u64 last_block = 0;
1599	u64 other;
1600	u32 parent_nritems;
1601	int end_slot;
1602	int i;
1603	int err = 0;
1604	int parent_level;
1605	int uptodate;
1606	u32 blocksize;
1607	int progress_passed = 0;
1608	struct btrfs_disk_key disk_key;
1609
1610	parent_level = btrfs_header_level(parent);
1611
1612	WARN_ON(trans->transaction != fs_info->running_transaction);
1613	WARN_ON(trans->transid != fs_info->generation);
1614
1615	parent_nritems = btrfs_header_nritems(parent);
1616	blocksize = fs_info->nodesize;
1617	end_slot = parent_nritems - 1;
1618
1619	if (parent_nritems <= 1)
1620		return 0;
1621
1622	btrfs_set_lock_blocking_write(parent);
1623
1624	for (i = start_slot; i <= end_slot; i++) {
1625		struct btrfs_key first_key;
1626		int close = 1;
1627
1628		btrfs_node_key(parent, &disk_key, i);
1629		if (!progress_passed && comp_keys(&disk_key, progress) < 0)
1630			continue;
1631
1632		progress_passed = 1;
1633		blocknr = btrfs_node_blockptr(parent, i);
1634		gen = btrfs_node_ptr_generation(parent, i);
1635		btrfs_node_key_to_cpu(parent, &first_key, i);
1636		if (last_block == 0)
1637			last_block = blocknr;
1638
1639		if (i > 0) {
1640			other = btrfs_node_blockptr(parent, i - 1);
1641			close = close_blocks(blocknr, other, blocksize);
1642		}
1643		if (!close && i < end_slot) {
1644			other = btrfs_node_blockptr(parent, i + 1);
1645			close = close_blocks(blocknr, other, blocksize);
1646		}
1647		if (close) {
1648			last_block = blocknr;
1649			continue;
1650		}
1651
1652		cur = find_extent_buffer(fs_info, blocknr);
1653		if (cur)
1654			uptodate = btrfs_buffer_uptodate(cur, gen, 0);
1655		else
1656			uptodate = 0;
1657		if (!cur || !uptodate) {
1658			if (!cur) {
1659				cur = read_tree_block(fs_info, blocknr, gen,
1660						      parent_level - 1,
1661						      &first_key);
1662				if (IS_ERR(cur)) {
1663					return PTR_ERR(cur);
1664				} else if (!extent_buffer_uptodate(cur)) {
1665					free_extent_buffer(cur);
1666					return -EIO;
1667				}
1668			} else if (!uptodate) {
1669				err = btrfs_read_buffer(cur, gen,
1670						parent_level - 1,&first_key);
1671				if (err) {
1672					free_extent_buffer(cur);
1673					return err;
1674				}
1675			}
1676		}
1677		if (search_start == 0)
1678			search_start = last_block;
1679
1680		btrfs_tree_lock(cur);
1681		btrfs_set_lock_blocking_write(cur);
1682		err = __btrfs_cow_block(trans, root, cur, parent, i,
1683					&cur, search_start,
1684					min(16 * blocksize,
1685					    (end_slot - i) * blocksize));
 
1686		if (err) {
1687			btrfs_tree_unlock(cur);
1688			free_extent_buffer(cur);
1689			break;
1690		}
1691		search_start = cur->start;
1692		last_block = cur->start;
1693		*last_ret = search_start;
1694		btrfs_tree_unlock(cur);
1695		free_extent_buffer(cur);
1696	}
1697	return err;
1698}
1699
1700/*
1701 * search for key in the extent_buffer.  The items start at offset p,
1702 * and they are item_size apart.  There are 'max' items in p.
1703 *
1704 * the slot in the array is returned via slot, and it points to
1705 * the place where you would insert key if it is not found in
1706 * the array.
1707 *
1708 * slot may point to max if the key is bigger than all of the keys
1709 */
1710static noinline int generic_bin_search(struct extent_buffer *eb,
1711				       unsigned long p, int item_size,
1712				       const struct btrfs_key *key,
1713				       int max, int *slot)
1714{
1715	int low = 0;
1716	int high = max;
1717	int mid;
1718	int ret;
1719	struct btrfs_disk_key *tmp = NULL;
1720	struct btrfs_disk_key unaligned;
1721	unsigned long offset;
1722	char *kaddr = NULL;
1723	unsigned long map_start = 0;
1724	unsigned long map_len = 0;
1725	int err;
1726
1727	if (low > high) {
1728		btrfs_err(eb->fs_info,
1729		 "%s: low (%d) > high (%d) eb %llu owner %llu level %d",
1730			  __func__, low, high, eb->start,
1731			  btrfs_header_owner(eb), btrfs_header_level(eb));
1732		return -EINVAL;
1733	}
1734
1735	while (low < high) {
 
 
 
 
 
 
1736		mid = (low + high) / 2;
1737		offset = p + mid * item_size;
 
1738
1739		if (!kaddr || offset < map_start ||
1740		    (offset + sizeof(struct btrfs_disk_key)) >
1741		    map_start + map_len) {
1742
1743			err = map_private_extent_buffer(eb, offset,
1744						sizeof(struct btrfs_disk_key),
1745						&kaddr, &map_start, &map_len);
1746
1747			if (!err) {
1748				tmp = (struct btrfs_disk_key *)(kaddr + offset -
1749							map_start);
1750			} else if (err == 1) {
1751				read_extent_buffer(eb, &unaligned,
1752						   offset, sizeof(unaligned));
1753				tmp = &unaligned;
1754			} else {
1755				return err;
1756			}
1757
 
 
1758		} else {
1759			tmp = (struct btrfs_disk_key *)(kaddr + offset -
1760							map_start);
1761		}
 
1762		ret = comp_keys(tmp, key);
1763
1764		if (ret < 0)
1765			low = mid + 1;
1766		else if (ret > 0)
1767			high = mid;
1768		else {
1769			*slot = mid;
1770			return 0;
1771		}
1772	}
1773	*slot = low;
1774	return 1;
1775}
1776
1777/*
1778 * simple bin_search frontend that does the right thing for
1779 * leaves vs nodes
1780 */
1781int btrfs_bin_search(struct extent_buffer *eb, const struct btrfs_key *key,
1782		     int level, int *slot)
1783{
1784	if (level == 0)
1785		return generic_bin_search(eb,
1786					  offsetof(struct btrfs_leaf, items),
1787					  sizeof(struct btrfs_item),
1788					  key, btrfs_header_nritems(eb),
1789					  slot);
1790	else
1791		return generic_bin_search(eb,
1792					  offsetof(struct btrfs_node, ptrs),
1793					  sizeof(struct btrfs_key_ptr),
1794					  key, btrfs_header_nritems(eb),
1795					  slot);
1796}
1797
1798static void root_add_used(struct btrfs_root *root, u32 size)
1799{
1800	spin_lock(&root->accounting_lock);
1801	btrfs_set_root_used(&root->root_item,
1802			    btrfs_root_used(&root->root_item) + size);
1803	spin_unlock(&root->accounting_lock);
1804}
1805
1806static void root_sub_used(struct btrfs_root *root, u32 size)
1807{
1808	spin_lock(&root->accounting_lock);
1809	btrfs_set_root_used(&root->root_item,
1810			    btrfs_root_used(&root->root_item) - size);
1811	spin_unlock(&root->accounting_lock);
1812}
1813
1814/* given a node and slot number, this reads the blocks it points to.  The
1815 * extent buffer is returned with a reference taken (but unlocked).
1816 */
1817struct extent_buffer *btrfs_read_node_slot(struct extent_buffer *parent,
1818					   int slot)
1819{
1820	int level = btrfs_header_level(parent);
1821	struct extent_buffer *eb;
1822	struct btrfs_key first_key;
1823
1824	if (slot < 0 || slot >= btrfs_header_nritems(parent))
1825		return ERR_PTR(-ENOENT);
1826
1827	BUG_ON(level == 0);
1828
1829	btrfs_node_key_to_cpu(parent, &first_key, slot);
1830	eb = read_tree_block(parent->fs_info, btrfs_node_blockptr(parent, slot),
 
1831			     btrfs_node_ptr_generation(parent, slot),
1832			     level - 1, &first_key);
1833	if (!IS_ERR(eb) && !extent_buffer_uptodate(eb)) {
1834		free_extent_buffer(eb);
1835		eb = ERR_PTR(-EIO);
1836	}
1837
1838	return eb;
1839}
1840
1841/*
1842 * node level balancing, used to make sure nodes are in proper order for
1843 * item deletion.  We balance from the top down, so we have to make sure
1844 * that a deletion won't leave an node completely empty later on.
1845 */
1846static noinline int balance_level(struct btrfs_trans_handle *trans,
1847			 struct btrfs_root *root,
1848			 struct btrfs_path *path, int level)
1849{
1850	struct btrfs_fs_info *fs_info = root->fs_info;
1851	struct extent_buffer *right = NULL;
1852	struct extent_buffer *mid;
1853	struct extent_buffer *left = NULL;
1854	struct extent_buffer *parent = NULL;
1855	int ret = 0;
1856	int wret;
1857	int pslot;
1858	int orig_slot = path->slots[level];
1859	u64 orig_ptr;
1860
1861	ASSERT(level > 0);
1862
1863	mid = path->nodes[level];
1864
1865	WARN_ON(path->locks[level] != BTRFS_WRITE_LOCK &&
1866		path->locks[level] != BTRFS_WRITE_LOCK_BLOCKING);
1867	WARN_ON(btrfs_header_generation(mid) != trans->transid);
1868
1869	orig_ptr = btrfs_node_blockptr(mid, orig_slot);
1870
1871	if (level < BTRFS_MAX_LEVEL - 1) {
1872		parent = path->nodes[level + 1];
1873		pslot = path->slots[level + 1];
1874	}
1875
1876	/*
1877	 * deal with the case where there is only one pointer in the root
1878	 * by promoting the node below to a root
1879	 */
1880	if (!parent) {
1881		struct extent_buffer *child;
1882
1883		if (btrfs_header_nritems(mid) != 1)
1884			return 0;
1885
1886		/* promote the child to a root */
1887		child = btrfs_read_node_slot(mid, 0);
1888		if (IS_ERR(child)) {
1889			ret = PTR_ERR(child);
1890			btrfs_handle_fs_error(fs_info, ret, NULL);
1891			goto enospc;
1892		}
1893
1894		btrfs_tree_lock(child);
1895		btrfs_set_lock_blocking_write(child);
1896		ret = btrfs_cow_block(trans, root, child, mid, 0, &child);
1897		if (ret) {
1898			btrfs_tree_unlock(child);
1899			free_extent_buffer(child);
1900			goto enospc;
1901		}
1902
1903		ret = tree_mod_log_insert_root(root->node, child, 1);
1904		BUG_ON(ret < 0);
1905		rcu_assign_pointer(root->node, child);
1906
1907		add_root_to_dirty_list(root);
1908		btrfs_tree_unlock(child);
1909
1910		path->locks[level] = 0;
1911		path->nodes[level] = NULL;
1912		btrfs_clean_tree_block(mid);
1913		btrfs_tree_unlock(mid);
1914		/* once for the path */
1915		free_extent_buffer(mid);
1916
1917		root_sub_used(root, mid->len);
1918		btrfs_free_tree_block(trans, root, mid, 0, 1);
1919		/* once for the root ptr */
1920		free_extent_buffer_stale(mid);
1921		return 0;
1922	}
1923	if (btrfs_header_nritems(mid) >
1924	    BTRFS_NODEPTRS_PER_BLOCK(fs_info) / 4)
1925		return 0;
1926
1927	left = btrfs_read_node_slot(parent, pslot - 1);
1928	if (IS_ERR(left))
1929		left = NULL;
1930
1931	if (left) {
1932		btrfs_tree_lock(left);
1933		btrfs_set_lock_blocking_write(left);
1934		wret = btrfs_cow_block(trans, root, left,
1935				       parent, pslot - 1, &left);
 
1936		if (wret) {
1937			ret = wret;
1938			goto enospc;
1939		}
1940	}
1941
1942	right = btrfs_read_node_slot(parent, pslot + 1);
1943	if (IS_ERR(right))
1944		right = NULL;
1945
1946	if (right) {
1947		btrfs_tree_lock(right);
1948		btrfs_set_lock_blocking_write(right);
1949		wret = btrfs_cow_block(trans, root, right,
1950				       parent, pslot + 1, &right);
 
1951		if (wret) {
1952			ret = wret;
1953			goto enospc;
1954		}
1955	}
1956
1957	/* first, try to make some room in the middle buffer */
1958	if (left) {
1959		orig_slot += btrfs_header_nritems(left);
1960		wret = push_node_left(trans, left, mid, 1);
1961		if (wret < 0)
1962			ret = wret;
1963	}
1964
1965	/*
1966	 * then try to empty the right most buffer into the middle
1967	 */
1968	if (right) {
1969		wret = push_node_left(trans, mid, right, 1);
1970		if (wret < 0 && wret != -ENOSPC)
1971			ret = wret;
1972		if (btrfs_header_nritems(right) == 0) {
1973			btrfs_clean_tree_block(right);
1974			btrfs_tree_unlock(right);
1975			del_ptr(root, path, level + 1, pslot + 1);
1976			root_sub_used(root, right->len);
1977			btrfs_free_tree_block(trans, root, right, 0, 1);
1978			free_extent_buffer_stale(right);
1979			right = NULL;
1980		} else {
1981			struct btrfs_disk_key right_key;
1982			btrfs_node_key(right, &right_key, 0);
1983			ret = tree_mod_log_insert_key(parent, pslot + 1,
1984					MOD_LOG_KEY_REPLACE, GFP_NOFS);
1985			BUG_ON(ret < 0);
1986			btrfs_set_node_key(parent, &right_key, pslot + 1);
1987			btrfs_mark_buffer_dirty(parent);
1988		}
1989	}
1990	if (btrfs_header_nritems(mid) == 1) {
1991		/*
1992		 * we're not allowed to leave a node with one item in the
1993		 * tree during a delete.  A deletion from lower in the tree
1994		 * could try to delete the only pointer in this node.
1995		 * So, pull some keys from the left.
1996		 * There has to be a left pointer at this point because
1997		 * otherwise we would have pulled some pointers from the
1998		 * right
1999		 */
2000		if (!left) {
2001			ret = -EROFS;
2002			btrfs_handle_fs_error(fs_info, ret, NULL);
2003			goto enospc;
2004		}
2005		wret = balance_node_right(trans, mid, left);
2006		if (wret < 0) {
2007			ret = wret;
2008			goto enospc;
2009		}
2010		if (wret == 1) {
2011			wret = push_node_left(trans, left, mid, 1);
2012			if (wret < 0)
2013				ret = wret;
2014		}
2015		BUG_ON(wret == 1);
2016	}
2017	if (btrfs_header_nritems(mid) == 0) {
2018		btrfs_clean_tree_block(mid);
2019		btrfs_tree_unlock(mid);
2020		del_ptr(root, path, level + 1, pslot);
2021		root_sub_used(root, mid->len);
2022		btrfs_free_tree_block(trans, root, mid, 0, 1);
2023		free_extent_buffer_stale(mid);
2024		mid = NULL;
2025	} else {
2026		/* update the parent key to reflect our changes */
2027		struct btrfs_disk_key mid_key;
2028		btrfs_node_key(mid, &mid_key, 0);
2029		ret = tree_mod_log_insert_key(parent, pslot,
2030				MOD_LOG_KEY_REPLACE, GFP_NOFS);
2031		BUG_ON(ret < 0);
2032		btrfs_set_node_key(parent, &mid_key, pslot);
2033		btrfs_mark_buffer_dirty(parent);
2034	}
2035
2036	/* update the path */
2037	if (left) {
2038		if (btrfs_header_nritems(left) > orig_slot) {
2039			extent_buffer_get(left);
2040			/* left was locked after cow */
2041			path->nodes[level] = left;
2042			path->slots[level + 1] -= 1;
2043			path->slots[level] = orig_slot;
2044			if (mid) {
2045				btrfs_tree_unlock(mid);
2046				free_extent_buffer(mid);
2047			}
2048		} else {
2049			orig_slot -= btrfs_header_nritems(left);
2050			path->slots[level] = orig_slot;
2051		}
2052	}
2053	/* double check we haven't messed things up */
2054	if (orig_ptr !=
2055	    btrfs_node_blockptr(path->nodes[level], path->slots[level]))
2056		BUG();
2057enospc:
2058	if (right) {
2059		btrfs_tree_unlock(right);
2060		free_extent_buffer(right);
2061	}
2062	if (left) {
2063		if (path->nodes[level] != left)
2064			btrfs_tree_unlock(left);
2065		free_extent_buffer(left);
2066	}
2067	return ret;
2068}
2069
2070/* Node balancing for insertion.  Here we only split or push nodes around
2071 * when they are completely full.  This is also done top down, so we
2072 * have to be pessimistic.
2073 */
2074static noinline int push_nodes_for_insert(struct btrfs_trans_handle *trans,
2075					  struct btrfs_root *root,
2076					  struct btrfs_path *path, int level)
2077{
2078	struct btrfs_fs_info *fs_info = root->fs_info;
2079	struct extent_buffer *right = NULL;
2080	struct extent_buffer *mid;
2081	struct extent_buffer *left = NULL;
2082	struct extent_buffer *parent = NULL;
2083	int ret = 0;
2084	int wret;
2085	int pslot;
2086	int orig_slot = path->slots[level];
2087
2088	if (level == 0)
2089		return 1;
2090
2091	mid = path->nodes[level];
2092	WARN_ON(btrfs_header_generation(mid) != trans->transid);
2093
2094	if (level < BTRFS_MAX_LEVEL - 1) {
2095		parent = path->nodes[level + 1];
2096		pslot = path->slots[level + 1];
2097	}
2098
2099	if (!parent)
2100		return 1;
2101
2102	left = btrfs_read_node_slot(parent, pslot - 1);
2103	if (IS_ERR(left))
2104		left = NULL;
2105
2106	/* first, try to make some room in the middle buffer */
2107	if (left) {
2108		u32 left_nr;
2109
2110		btrfs_tree_lock(left);
2111		btrfs_set_lock_blocking_write(left);
2112
2113		left_nr = btrfs_header_nritems(left);
2114		if (left_nr >= BTRFS_NODEPTRS_PER_BLOCK(fs_info) - 1) {
2115			wret = 1;
2116		} else {
2117			ret = btrfs_cow_block(trans, root, left, parent,
2118					      pslot - 1, &left);
 
2119			if (ret)
2120				wret = 1;
2121			else {
2122				wret = push_node_left(trans, left, mid, 0);
2123			}
2124		}
2125		if (wret < 0)
2126			ret = wret;
2127		if (wret == 0) {
2128			struct btrfs_disk_key disk_key;
2129			orig_slot += left_nr;
2130			btrfs_node_key(mid, &disk_key, 0);
2131			ret = tree_mod_log_insert_key(parent, pslot,
2132					MOD_LOG_KEY_REPLACE, GFP_NOFS);
2133			BUG_ON(ret < 0);
2134			btrfs_set_node_key(parent, &disk_key, pslot);
2135			btrfs_mark_buffer_dirty(parent);
2136			if (btrfs_header_nritems(left) > orig_slot) {
2137				path->nodes[level] = left;
2138				path->slots[level + 1] -= 1;
2139				path->slots[level] = orig_slot;
2140				btrfs_tree_unlock(mid);
2141				free_extent_buffer(mid);
2142			} else {
2143				orig_slot -=
2144					btrfs_header_nritems(left);
2145				path->slots[level] = orig_slot;
2146				btrfs_tree_unlock(left);
2147				free_extent_buffer(left);
2148			}
2149			return 0;
2150		}
2151		btrfs_tree_unlock(left);
2152		free_extent_buffer(left);
2153	}
2154	right = btrfs_read_node_slot(parent, pslot + 1);
2155	if (IS_ERR(right))
2156		right = NULL;
2157
2158	/*
2159	 * then try to empty the right most buffer into the middle
2160	 */
2161	if (right) {
2162		u32 right_nr;
2163
2164		btrfs_tree_lock(right);
2165		btrfs_set_lock_blocking_write(right);
2166
2167		right_nr = btrfs_header_nritems(right);
2168		if (right_nr >= BTRFS_NODEPTRS_PER_BLOCK(fs_info) - 1) {
2169			wret = 1;
2170		} else {
2171			ret = btrfs_cow_block(trans, root, right,
2172					      parent, pslot + 1,
2173					      &right);
2174			if (ret)
2175				wret = 1;
2176			else {
2177				wret = balance_node_right(trans, right, mid);
2178			}
2179		}
2180		if (wret < 0)
2181			ret = wret;
2182		if (wret == 0) {
2183			struct btrfs_disk_key disk_key;
2184
2185			btrfs_node_key(right, &disk_key, 0);
2186			ret = tree_mod_log_insert_key(parent, pslot + 1,
2187					MOD_LOG_KEY_REPLACE, GFP_NOFS);
2188			BUG_ON(ret < 0);
2189			btrfs_set_node_key(parent, &disk_key, pslot + 1);
2190			btrfs_mark_buffer_dirty(parent);
2191
2192			if (btrfs_header_nritems(mid) <= orig_slot) {
2193				path->nodes[level] = right;
2194				path->slots[level + 1] += 1;
2195				path->slots[level] = orig_slot -
2196					btrfs_header_nritems(mid);
2197				btrfs_tree_unlock(mid);
2198				free_extent_buffer(mid);
2199			} else {
2200				btrfs_tree_unlock(right);
2201				free_extent_buffer(right);
2202			}
2203			return 0;
2204		}
2205		btrfs_tree_unlock(right);
2206		free_extent_buffer(right);
2207	}
2208	return 1;
2209}
2210
2211/*
2212 * readahead one full node of leaves, finding things that are close
2213 * to the block in 'slot', and triggering ra on them.
2214 */
2215static void reada_for_search(struct btrfs_fs_info *fs_info,
2216			     struct btrfs_path *path,
2217			     int level, int slot, u64 objectid)
2218{
2219	struct extent_buffer *node;
2220	struct btrfs_disk_key disk_key;
2221	u32 nritems;
2222	u64 search;
2223	u64 target;
2224	u64 nread = 0;
 
2225	struct extent_buffer *eb;
2226	u32 nr;
2227	u32 blocksize;
2228	u32 nscan = 0;
2229
2230	if (level != 1)
2231		return;
2232
2233	if (!path->nodes[level])
2234		return;
2235
2236	node = path->nodes[level];
2237
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2238	search = btrfs_node_blockptr(node, slot);
2239	blocksize = fs_info->nodesize;
2240	eb = find_extent_buffer(fs_info, search);
2241	if (eb) {
2242		free_extent_buffer(eb);
2243		return;
2244	}
2245
2246	target = search;
2247
2248	nritems = btrfs_header_nritems(node);
2249	nr = slot;
2250
2251	while (1) {
2252		if (path->reada == READA_BACK) {
2253			if (nr == 0)
2254				break;
2255			nr--;
2256		} else if (path->reada == READA_FORWARD) {
 
2257			nr++;
2258			if (nr >= nritems)
2259				break;
2260		}
2261		if (path->reada == READA_BACK && objectid) {
2262			btrfs_node_key(node, &disk_key, nr);
2263			if (btrfs_disk_key_objectid(&disk_key) != objectid)
2264				break;
2265		}
2266		search = btrfs_node_blockptr(node, nr);
2267		if ((search <= target && target - search <= 65536) ||
 
2268		    (search > target && search - target <= 65536)) {
2269			readahead_tree_block(fs_info, search);
2270			nread += blocksize;
2271		}
2272		nscan++;
2273		if ((nread > 65536 || nscan > 32))
2274			break;
2275	}
2276}
2277
2278static noinline void reada_for_balance(struct btrfs_fs_info *fs_info,
2279				       struct btrfs_path *path, int level)
2280{
 
2281	int slot;
2282	int nritems;
2283	struct extent_buffer *parent;
2284	struct extent_buffer *eb;
2285	u64 gen;
2286	u64 block1 = 0;
2287	u64 block2 = 0;
2288
2289	parent = path->nodes[level + 1];
2290	if (!parent)
2291		return;
2292
2293	nritems = btrfs_header_nritems(parent);
2294	slot = path->slots[level + 1];
2295
2296	if (slot > 0) {
2297		block1 = btrfs_node_blockptr(parent, slot - 1);
2298		gen = btrfs_node_ptr_generation(parent, slot - 1);
2299		eb = find_extent_buffer(fs_info, block1);
2300		/*
2301		 * if we get -eagain from btrfs_buffer_uptodate, we
2302		 * don't want to return eagain here.  That will loop
2303		 * forever
2304		 */
2305		if (eb && btrfs_buffer_uptodate(eb, gen, 1) != 0)
2306			block1 = 0;
2307		free_extent_buffer(eb);
2308	}
2309	if (slot + 1 < nritems) {
2310		block2 = btrfs_node_blockptr(parent, slot + 1);
2311		gen = btrfs_node_ptr_generation(parent, slot + 1);
2312		eb = find_extent_buffer(fs_info, block2);
2313		if (eb && btrfs_buffer_uptodate(eb, gen, 1) != 0)
2314			block2 = 0;
2315		free_extent_buffer(eb);
2316	}
2317
2318	if (block1)
2319		readahead_tree_block(fs_info, block1);
2320	if (block2)
2321		readahead_tree_block(fs_info, block2);
2322}
2323
2324
2325/*
2326 * when we walk down the tree, it is usually safe to unlock the higher layers
2327 * in the tree.  The exceptions are when our path goes through slot 0, because
2328 * operations on the tree might require changing key pointers higher up in the
2329 * tree.
2330 *
2331 * callers might also have set path->keep_locks, which tells this code to keep
2332 * the lock if the path points to the last slot in the block.  This is part of
2333 * walking through the tree, and selecting the next slot in the higher block.
2334 *
2335 * lowest_unlock sets the lowest level in the tree we're allowed to unlock.  so
2336 * if lowest_unlock is 1, level 0 won't be unlocked
2337 */
2338static noinline void unlock_up(struct btrfs_path *path, int level,
2339			       int lowest_unlock, int min_write_lock_level,
2340			       int *write_lock_level)
2341{
2342	int i;
2343	int skip_level = level;
2344	int no_skips = 0;
2345	struct extent_buffer *t;
2346
2347	for (i = level; i < BTRFS_MAX_LEVEL; i++) {
2348		if (!path->nodes[i])
2349			break;
2350		if (!path->locks[i])
2351			break;
2352		if (!no_skips && path->slots[i] == 0) {
2353			skip_level = i + 1;
2354			continue;
2355		}
2356		if (!no_skips && path->keep_locks) {
2357			u32 nritems;
2358			t = path->nodes[i];
2359			nritems = btrfs_header_nritems(t);
2360			if (nritems < 1 || path->slots[i] >= nritems - 1) {
2361				skip_level = i + 1;
2362				continue;
2363			}
2364		}
2365		if (skip_level < i && i >= lowest_unlock)
2366			no_skips = 1;
2367
2368		t = path->nodes[i];
2369		if (i >= lowest_unlock && i > skip_level) {
2370			btrfs_tree_unlock_rw(t, path->locks[i]);
2371			path->locks[i] = 0;
2372			if (write_lock_level &&
2373			    i > min_write_lock_level &&
2374			    i <= *write_lock_level) {
2375				*write_lock_level = i - 1;
2376			}
2377		}
2378	}
2379}
2380
2381/*
2382 * This releases any locks held in the path starting at level and
2383 * going all the way up to the root.
2384 *
2385 * btrfs_search_slot will keep the lock held on higher nodes in a few
2386 * corner cases, such as COW of the block at slot zero in the node.  This
2387 * ignores those rules, and it should only be called when there are no
2388 * more updates to be done higher up in the tree.
2389 */
2390noinline void btrfs_unlock_up_safe(struct btrfs_path *path, int level)
2391{
2392	int i;
2393
2394	if (path->keep_locks)
2395		return;
2396
2397	for (i = level; i < BTRFS_MAX_LEVEL; i++) {
2398		if (!path->nodes[i])
2399			continue;
2400		if (!path->locks[i])
2401			continue;
2402		btrfs_tree_unlock_rw(path->nodes[i], path->locks[i]);
2403		path->locks[i] = 0;
2404	}
2405}
2406
2407/*
2408 * helper function for btrfs_search_slot.  The goal is to find a block
2409 * in cache without setting the path to blocking.  If we find the block
2410 * we return zero and the path is unchanged.
2411 *
2412 * If we can't find the block, we set the path blocking and do some
2413 * reada.  -EAGAIN is returned and the search must be repeated.
2414 */
2415static int
2416read_block_for_search(struct btrfs_root *root, struct btrfs_path *p,
2417		      struct extent_buffer **eb_ret, int level, int slot,
2418		      const struct btrfs_key *key)
2419{
2420	struct btrfs_fs_info *fs_info = root->fs_info;
2421	u64 blocknr;
2422	u64 gen;
2423	struct extent_buffer *b = *eb_ret;
2424	struct extent_buffer *tmp;
2425	struct btrfs_key first_key;
2426	int ret;
2427	int parent_level;
2428
2429	blocknr = btrfs_node_blockptr(b, slot);
2430	gen = btrfs_node_ptr_generation(b, slot);
2431	parent_level = btrfs_header_level(b);
2432	btrfs_node_key_to_cpu(b, &first_key, slot);
2433
2434	tmp = find_extent_buffer(fs_info, blocknr);
2435	if (tmp) {
 
 
 
2436		/* first we do an atomic uptodate check */
2437		if (btrfs_buffer_uptodate(tmp, gen, 1) > 0) {
2438			/*
2439			 * Do extra check for first_key, eb can be stale due to
2440			 * being cached, read from scrub, or have multiple
2441			 * parents (shared tree blocks).
2442			 */
2443			if (btrfs_verify_level_key(tmp,
2444					parent_level - 1, &first_key, gen)) {
2445				free_extent_buffer(tmp);
2446				return -EUCLEAN;
2447			}
2448			*eb_ret = tmp;
2449			return 0;
2450		}
2451
2452		/* the pages were up to date, but we failed
2453		 * the generation number check.  Do a full
2454		 * read for the generation number that is correct.
2455		 * We must do this without dropping locks so
2456		 * we can trust our generation number
2457		 */
2458		btrfs_set_path_blocking(p);
2459
2460		/* now we're allowed to do a blocking uptodate check */
2461		ret = btrfs_read_buffer(tmp, gen, parent_level - 1, &first_key);
2462		if (!ret) {
2463			*eb_ret = tmp;
2464			return 0;
2465		}
2466		free_extent_buffer(tmp);
2467		btrfs_release_path(p);
2468		return -EIO;
2469	}
2470
2471	/*
2472	 * reduce lock contention at high levels
2473	 * of the btree by dropping locks before
2474	 * we read.  Don't release the lock on the current
2475	 * level because we need to walk this node to figure
2476	 * out which blocks to read.
2477	 */
2478	btrfs_unlock_up_safe(p, level + 1);
2479	btrfs_set_path_blocking(p);
2480
2481	if (p->reada != READA_NONE)
2482		reada_for_search(fs_info, p, level, slot, key->objectid);
2483
2484	ret = -EAGAIN;
2485	tmp = read_tree_block(fs_info, blocknr, gen, parent_level - 1,
2486			      &first_key);
2487	if (!IS_ERR(tmp)) {
2488		/*
2489		 * If the read above didn't mark this buffer up to date,
2490		 * it will never end up being up to date.  Set ret to EIO now
2491		 * and give up so that our caller doesn't loop forever
2492		 * on our EAGAINs.
2493		 */
2494		if (!extent_buffer_uptodate(tmp))
2495			ret = -EIO;
2496		free_extent_buffer(tmp);
2497	} else {
2498		ret = PTR_ERR(tmp);
2499	}
2500
2501	btrfs_release_path(p);
2502	return ret;
2503}
2504
2505/*
2506 * helper function for btrfs_search_slot.  This does all of the checks
2507 * for node-level blocks and does any balancing required based on
2508 * the ins_len.
2509 *
2510 * If no extra work was required, zero is returned.  If we had to
2511 * drop the path, -EAGAIN is returned and btrfs_search_slot must
2512 * start over
2513 */
2514static int
2515setup_nodes_for_search(struct btrfs_trans_handle *trans,
2516		       struct btrfs_root *root, struct btrfs_path *p,
2517		       struct extent_buffer *b, int level, int ins_len,
2518		       int *write_lock_level)
2519{
2520	struct btrfs_fs_info *fs_info = root->fs_info;
2521	int ret;
2522
2523	if ((p->search_for_split || ins_len > 0) && btrfs_header_nritems(b) >=
2524	    BTRFS_NODEPTRS_PER_BLOCK(fs_info) - 3) {
2525		int sret;
2526
2527		if (*write_lock_level < level + 1) {
2528			*write_lock_level = level + 1;
2529			btrfs_release_path(p);
2530			goto again;
2531		}
2532
2533		btrfs_set_path_blocking(p);
2534		reada_for_balance(fs_info, p, level);
2535		sret = split_node(trans, root, p, level);
2536
2537		BUG_ON(sret > 0);
2538		if (sret) {
2539			ret = sret;
2540			goto done;
2541		}
2542		b = p->nodes[level];
2543	} else if (ins_len < 0 && btrfs_header_nritems(b) <
2544		   BTRFS_NODEPTRS_PER_BLOCK(fs_info) / 2) {
2545		int sret;
2546
2547		if (*write_lock_level < level + 1) {
2548			*write_lock_level = level + 1;
2549			btrfs_release_path(p);
2550			goto again;
2551		}
2552
2553		btrfs_set_path_blocking(p);
2554		reada_for_balance(fs_info, p, level);
2555		sret = balance_level(trans, root, p, level);
 
2556
2557		if (sret) {
2558			ret = sret;
2559			goto done;
2560		}
2561		b = p->nodes[level];
2562		if (!b) {
2563			btrfs_release_path(p);
2564			goto again;
2565		}
2566		BUG_ON(btrfs_header_nritems(b) == 1);
2567	}
2568	return 0;
2569
2570again:
2571	ret = -EAGAIN;
2572done:
2573	return ret;
2574}
2575
2576static int key_search(struct extent_buffer *b, const struct btrfs_key *key,
2577		      int level, int *prev_cmp, int *slot)
2578{
2579	if (*prev_cmp != 0) {
2580		*prev_cmp = btrfs_bin_search(b, key, level, slot);
2581		return *prev_cmp;
2582	}
2583
2584	*slot = 0;
2585
2586	return 0;
2587}
2588
2589int btrfs_find_item(struct btrfs_root *fs_root, struct btrfs_path *path,
2590		u64 iobjectid, u64 ioff, u8 key_type,
2591		struct btrfs_key *found_key)
2592{
2593	int ret;
2594	struct btrfs_key key;
2595	struct extent_buffer *eb;
2596
2597	ASSERT(path);
2598	ASSERT(found_key);
2599
2600	key.type = key_type;
2601	key.objectid = iobjectid;
2602	key.offset = ioff;
2603
2604	ret = btrfs_search_slot(NULL, fs_root, &key, path, 0, 0);
2605	if (ret < 0)
2606		return ret;
2607
2608	eb = path->nodes[0];
2609	if (ret && path->slots[0] >= btrfs_header_nritems(eb)) {
2610		ret = btrfs_next_leaf(fs_root, path);
2611		if (ret)
2612			return ret;
2613		eb = path->nodes[0];
2614	}
2615
2616	btrfs_item_key_to_cpu(eb, found_key, path->slots[0]);
2617	if (found_key->type != key.type ||
2618			found_key->objectid != key.objectid)
2619		return 1;
2620
2621	return 0;
2622}
2623
2624static struct extent_buffer *btrfs_search_slot_get_root(struct btrfs_root *root,
2625							struct btrfs_path *p,
2626							int write_lock_level)
2627{
2628	struct btrfs_fs_info *fs_info = root->fs_info;
2629	struct extent_buffer *b;
2630	int root_lock;
2631	int level = 0;
2632
2633	/* We try very hard to do read locks on the root */
2634	root_lock = BTRFS_READ_LOCK;
2635
2636	if (p->search_commit_root) {
2637		/*
2638		 * The commit roots are read only so we always do read locks,
2639		 * and we always must hold the commit_root_sem when doing
2640		 * searches on them, the only exception is send where we don't
2641		 * want to block transaction commits for a long time, so
2642		 * we need to clone the commit root in order to avoid races
2643		 * with transaction commits that create a snapshot of one of
2644		 * the roots used by a send operation.
2645		 */
2646		if (p->need_commit_sem) {
2647			down_read(&fs_info->commit_root_sem);
2648			b = btrfs_clone_extent_buffer(root->commit_root);
2649			up_read(&fs_info->commit_root_sem);
2650			if (!b)
2651				return ERR_PTR(-ENOMEM);
2652
2653		} else {
2654			b = root->commit_root;
2655			extent_buffer_get(b);
2656		}
2657		level = btrfs_header_level(b);
2658		/*
2659		 * Ensure that all callers have set skip_locking when
2660		 * p->search_commit_root = 1.
2661		 */
2662		ASSERT(p->skip_locking == 1);
2663
2664		goto out;
2665	}
2666
2667	if (p->skip_locking) {
2668		b = btrfs_root_node(root);
2669		level = btrfs_header_level(b);
2670		goto out;
2671	}
2672
2673	/*
2674	 * If the level is set to maximum, we can skip trying to get the read
2675	 * lock.
2676	 */
2677	if (write_lock_level < BTRFS_MAX_LEVEL) {
2678		/*
2679		 * We don't know the level of the root node until we actually
2680		 * have it read locked
2681		 */
2682		b = btrfs_read_lock_root_node(root);
2683		level = btrfs_header_level(b);
2684		if (level > write_lock_level)
2685			goto out;
2686
2687		/* Whoops, must trade for write lock */
2688		btrfs_tree_read_unlock(b);
2689		free_extent_buffer(b);
2690	}
2691
2692	b = btrfs_lock_root_node(root);
2693	root_lock = BTRFS_WRITE_LOCK;
2694
2695	/* The level might have changed, check again */
2696	level = btrfs_header_level(b);
2697
2698out:
2699	p->nodes[level] = b;
2700	if (!p->skip_locking)
2701		p->locks[level] = root_lock;
2702	/*
2703	 * Callers are responsible for dropping b's references.
2704	 */
2705	return b;
2706}
2707
2708
2709/*
2710 * btrfs_search_slot - look for a key in a tree and perform necessary
2711 * modifications to preserve tree invariants.
2712 *
2713 * @trans:	Handle of transaction, used when modifying the tree
2714 * @p:		Holds all btree nodes along the search path
2715 * @root:	The root node of the tree
2716 * @key:	The key we are looking for
2717 * @ins_len:	Indicates purpose of search, for inserts it is 1, for
2718 *		deletions it's -1. 0 for plain searches
 
 
 
 
 
 
2719 * @cow:	boolean should CoW operations be performed. Must always be 1
2720 *		when modifying the tree.
2721 *
2722 * If @ins_len > 0, nodes and leaves will be split as we walk down the tree.
2723 * If @ins_len < 0, nodes will be merged as we walk down the tree (if possible)
2724 *
2725 * If @key is found, 0 is returned and you can find the item in the leaf level
2726 * of the path (level 0)
2727 *
2728 * If @key isn't found, 1 is returned and the leaf level of the path (level 0)
2729 * points to the slot where it should be inserted
2730 *
2731 * If an error is encountered while searching the tree a negative error number
2732 * is returned
2733 */
2734int btrfs_search_slot(struct btrfs_trans_handle *trans, struct btrfs_root *root,
2735		      const struct btrfs_key *key, struct btrfs_path *p,
2736		      int ins_len, int cow)
2737{
2738	struct extent_buffer *b;
2739	int slot;
2740	int ret;
2741	int err;
2742	int level;
2743	int lowest_unlock = 1;
2744	/* everything at write_lock_level or lower must be write locked */
2745	int write_lock_level = 0;
2746	u8 lowest_level = 0;
2747	int min_write_lock_level;
2748	int prev_cmp;
2749
2750	lowest_level = p->lowest_level;
2751	WARN_ON(lowest_level && ins_len > 0);
2752	WARN_ON(p->nodes[0] != NULL);
2753	BUG_ON(!cow && ins_len);
2754
2755	if (ins_len < 0) {
2756		lowest_unlock = 2;
2757
2758		/* when we are removing items, we might have to go up to level
2759		 * two as we update tree pointers  Make sure we keep write
2760		 * for those levels as well
2761		 */
2762		write_lock_level = 2;
2763	} else if (ins_len > 0) {
2764		/*
2765		 * for inserting items, make sure we have a write lock on
2766		 * level 1 so we can update keys
2767		 */
2768		write_lock_level = 1;
2769	}
2770
2771	if (!cow)
2772		write_lock_level = -1;
2773
2774	if (cow && (p->keep_locks || p->lowest_level))
2775		write_lock_level = BTRFS_MAX_LEVEL;
2776
2777	min_write_lock_level = write_lock_level;
2778
2779again:
2780	prev_cmp = -1;
2781	b = btrfs_search_slot_get_root(root, p, write_lock_level);
2782	if (IS_ERR(b)) {
2783		ret = PTR_ERR(b);
2784		goto done;
2785	}
2786
2787	while (b) {
 
 
2788		level = btrfs_header_level(b);
2789
2790		/*
2791		 * setup the path here so we can release it under lock
2792		 * contention with the cow code
2793		 */
2794		if (cow) {
2795			bool last_level = (level == (BTRFS_MAX_LEVEL - 1));
2796
2797			/*
2798			 * if we don't really need to cow this block
2799			 * then we don't want to set the path blocking,
2800			 * so we test it here
2801			 */
2802			if (!should_cow_block(trans, root, b)) {
2803				trans->dirty = true;
2804				goto cow_done;
2805			}
2806
2807			/*
2808			 * must have write locks on this node and the
2809			 * parent
2810			 */
2811			if (level > write_lock_level ||
2812			    (level + 1 > write_lock_level &&
2813			    level + 1 < BTRFS_MAX_LEVEL &&
2814			    p->nodes[level + 1])) {
2815				write_lock_level = level + 1;
2816				btrfs_release_path(p);
2817				goto again;
2818			}
2819
2820			btrfs_set_path_blocking(p);
2821			if (last_level)
2822				err = btrfs_cow_block(trans, root, b, NULL, 0,
2823						      &b);
 
2824			else
2825				err = btrfs_cow_block(trans, root, b,
2826						      p->nodes[level + 1],
2827						      p->slots[level + 1], &b);
 
2828			if (err) {
2829				ret = err;
2830				goto done;
2831			}
2832		}
2833cow_done:
2834		p->nodes[level] = b;
2835		/*
2836		 * Leave path with blocking locks to avoid massive
2837		 * lock context switch, this is made on purpose.
2838		 */
2839
2840		/*
2841		 * we have a lock on b and as long as we aren't changing
2842		 * the tree, there is no way to for the items in b to change.
2843		 * It is safe to drop the lock on our parent before we
2844		 * go through the expensive btree search on b.
2845		 *
2846		 * If we're inserting or deleting (ins_len != 0), then we might
2847		 * be changing slot zero, which may require changing the parent.
2848		 * So, we can't drop the lock until after we know which slot
2849		 * we're operating on.
2850		 */
2851		if (!ins_len && !p->keep_locks) {
2852			int u = level + 1;
2853
2854			if (u < BTRFS_MAX_LEVEL && p->locks[u]) {
2855				btrfs_tree_unlock_rw(p->nodes[u], p->locks[u]);
2856				p->locks[u] = 0;
2857			}
2858		}
2859
2860		ret = key_search(b, key, level, &prev_cmp, &slot);
2861		if (ret < 0)
2862			goto done;
2863
2864		if (level != 0) {
2865			int dec = 0;
2866			if (ret && slot > 0) {
2867				dec = 1;
2868				slot -= 1;
2869			}
2870			p->slots[level] = slot;
2871			err = setup_nodes_for_search(trans, root, p, b, level,
2872					     ins_len, &write_lock_level);
2873			if (err == -EAGAIN)
2874				goto again;
2875			if (err) {
2876				ret = err;
2877				goto done;
2878			}
2879			b = p->nodes[level];
2880			slot = p->slots[level];
2881
 
 
2882			/*
2883			 * slot 0 is special, if we change the key
2884			 * we have to update the parent pointer
2885			 * which means we must have a write lock
2886			 * on the parent
 
 
 
 
2887			 */
2888			if (slot == 0 && ins_len &&
2889			    write_lock_level < level + 1) {
2890				write_lock_level = level + 1;
2891				btrfs_release_path(p);
2892				goto again;
2893			}
2894
2895			unlock_up(p, level, lowest_unlock,
2896				  min_write_lock_level, &write_lock_level);
2897
2898			if (level == lowest_level) {
2899				if (dec)
2900					p->slots[level]++;
2901				goto done;
2902			}
2903
2904			err = read_block_for_search(root, p, &b, level,
2905						    slot, key);
2906			if (err == -EAGAIN)
2907				goto again;
2908			if (err) {
2909				ret = err;
2910				goto done;
2911			}
2912
2913			if (!p->skip_locking) {
2914				level = btrfs_header_level(b);
2915				if (level <= write_lock_level) {
2916					if (!btrfs_try_tree_write_lock(b)) {
2917						btrfs_set_path_blocking(p);
2918						btrfs_tree_lock(b);
2919					}
2920					p->locks[level] = BTRFS_WRITE_LOCK;
2921				} else {
2922					if (!btrfs_tree_read_lock_atomic(b)) {
2923						btrfs_set_path_blocking(p);
2924						btrfs_tree_read_lock(b);
2925					}
2926					p->locks[level] = BTRFS_READ_LOCK;
2927				}
2928				p->nodes[level] = b;
2929			}
2930		} else {
2931			p->slots[level] = slot;
2932			if (ins_len > 0 &&
2933			    btrfs_leaf_free_space(b) < ins_len) {
2934				if (write_lock_level < 1) {
2935					write_lock_level = 1;
2936					btrfs_release_path(p);
2937					goto again;
2938				}
2939
2940				btrfs_set_path_blocking(p);
2941				err = split_leaf(trans, root, key,
2942						 p, ins_len, ret == 0);
2943
2944				BUG_ON(err > 0);
2945				if (err) {
2946					ret = err;
2947					goto done;
2948				}
2949			}
2950			if (!p->search_for_split)
2951				unlock_up(p, level, lowest_unlock,
2952					  min_write_lock_level, NULL);
2953			goto done;
2954		}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2955	}
2956	ret = 1;
2957done:
2958	/*
2959	 * we don't really know what they plan on doing with the path
2960	 * from here on, so for now just mark it as blocking
2961	 */
2962	if (!p->leave_spinning)
2963		btrfs_set_path_blocking(p);
2964	if (ret < 0 && !p->skip_release_on_error)
2965		btrfs_release_path(p);
2966	return ret;
2967}
 
2968
2969/*
2970 * Like btrfs_search_slot, this looks for a key in the given tree. It uses the
2971 * current state of the tree together with the operations recorded in the tree
2972 * modification log to search for the key in a previous version of this tree, as
2973 * denoted by the time_seq parameter.
2974 *
2975 * Naturally, there is no support for insert, delete or cow operations.
2976 *
2977 * The resulting path and return value will be set up as if we called
2978 * btrfs_search_slot at that point in time with ins_len and cow both set to 0.
2979 */
2980int btrfs_search_old_slot(struct btrfs_root *root, const struct btrfs_key *key,
2981			  struct btrfs_path *p, u64 time_seq)
2982{
2983	struct btrfs_fs_info *fs_info = root->fs_info;
2984	struct extent_buffer *b;
2985	int slot;
2986	int ret;
2987	int err;
2988	int level;
2989	int lowest_unlock = 1;
2990	u8 lowest_level = 0;
2991	int prev_cmp = -1;
2992
2993	lowest_level = p->lowest_level;
2994	WARN_ON(p->nodes[0] != NULL);
2995
2996	if (p->search_commit_root) {
2997		BUG_ON(time_seq);
2998		return btrfs_search_slot(NULL, root, key, p, 0, 0);
2999	}
3000
3001again:
3002	b = get_old_root(root, time_seq);
3003	if (!b) {
3004		ret = -EIO;
3005		goto done;
3006	}
3007	level = btrfs_header_level(b);
3008	p->locks[level] = BTRFS_READ_LOCK;
3009
3010	while (b) {
 
 
3011		level = btrfs_header_level(b);
3012		p->nodes[level] = b;
3013
3014		/*
3015		 * we have a lock on b and as long as we aren't changing
3016		 * the tree, there is no way to for the items in b to change.
3017		 * It is safe to drop the lock on our parent before we
3018		 * go through the expensive btree search on b.
3019		 */
3020		btrfs_unlock_up_safe(p, level + 1);
3021
3022		/*
3023		 * Since we can unwind ebs we want to do a real search every
3024		 * time.
3025		 */
3026		prev_cmp = -1;
3027		ret = key_search(b, key, level, &prev_cmp, &slot);
3028		if (ret < 0)
3029			goto done;
3030
3031		if (level != 0) {
3032			int dec = 0;
3033			if (ret && slot > 0) {
3034				dec = 1;
3035				slot -= 1;
3036			}
3037			p->slots[level] = slot;
3038			unlock_up(p, level, lowest_unlock, 0, NULL);
 
 
3039
3040			if (level == lowest_level) {
3041				if (dec)
3042					p->slots[level]++;
3043				goto done;
3044			}
 
3045
3046			err = read_block_for_search(root, p, &b, level,
3047						    slot, key);
3048			if (err == -EAGAIN)
3049				goto again;
3050			if (err) {
3051				ret = err;
3052				goto done;
3053			}
3054
3055			level = btrfs_header_level(b);
3056			if (!btrfs_tree_read_lock_atomic(b)) {
3057				btrfs_set_path_blocking(p);
3058				btrfs_tree_read_lock(b);
3059			}
3060			b = tree_mod_log_rewind(fs_info, p, b, time_seq);
3061			if (!b) {
3062				ret = -ENOMEM;
3063				goto done;
3064			}
3065			p->locks[level] = BTRFS_READ_LOCK;
3066			p->nodes[level] = b;
3067		} else {
3068			p->slots[level] = slot;
3069			unlock_up(p, level, lowest_unlock, 0, NULL);
3070			goto done;
3071		}
 
 
3072	}
3073	ret = 1;
3074done:
3075	if (!p->leave_spinning)
3076		btrfs_set_path_blocking(p);
3077	if (ret < 0)
3078		btrfs_release_path(p);
3079
3080	return ret;
3081}
3082
3083/*
3084 * helper to use instead of search slot if no exact match is needed but
3085 * instead the next or previous item should be returned.
3086 * When find_higher is true, the next higher item is returned, the next lower
3087 * otherwise.
3088 * When return_any and find_higher are both true, and no higher item is found,
3089 * return the next lower instead.
3090 * When return_any is true and find_higher is false, and no lower item is found,
3091 * return the next higher instead.
3092 * It returns 0 if any item is found, 1 if none is found (tree empty), and
3093 * < 0 on error
3094 */
3095int btrfs_search_slot_for_read(struct btrfs_root *root,
3096			       const struct btrfs_key *key,
3097			       struct btrfs_path *p, int find_higher,
3098			       int return_any)
3099{
3100	int ret;
3101	struct extent_buffer *leaf;
3102
3103again:
3104	ret = btrfs_search_slot(NULL, root, key, p, 0, 0);
3105	if (ret <= 0)
3106		return ret;
3107	/*
3108	 * a return value of 1 means the path is at the position where the
3109	 * item should be inserted. Normally this is the next bigger item,
3110	 * but in case the previous item is the last in a leaf, path points
3111	 * to the first free slot in the previous leaf, i.e. at an invalid
3112	 * item.
3113	 */
3114	leaf = p->nodes[0];
3115
3116	if (find_higher) {
3117		if (p->slots[0] >= btrfs_header_nritems(leaf)) {
3118			ret = btrfs_next_leaf(root, p);
3119			if (ret <= 0)
3120				return ret;
3121			if (!return_any)
3122				return 1;
3123			/*
3124			 * no higher item found, return the next
3125			 * lower instead
3126			 */
3127			return_any = 0;
3128			find_higher = 0;
3129			btrfs_release_path(p);
3130			goto again;
3131		}
3132	} else {
3133		if (p->slots[0] == 0) {
3134			ret = btrfs_prev_leaf(root, p);
3135			if (ret < 0)
3136				return ret;
3137			if (!ret) {
3138				leaf = p->nodes[0];
3139				if (p->slots[0] == btrfs_header_nritems(leaf))
3140					p->slots[0]--;
3141				return 0;
3142			}
3143			if (!return_any)
3144				return 1;
3145			/*
3146			 * no lower item found, return the next
3147			 * higher instead
3148			 */
3149			return_any = 0;
3150			find_higher = 1;
3151			btrfs_release_path(p);
3152			goto again;
3153		} else {
3154			--p->slots[0];
3155		}
3156	}
3157	return 0;
3158}
3159
3160/*
3161 * adjust the pointers going up the tree, starting at level
3162 * making sure the right key of each node is points to 'key'.
3163 * This is used after shifting pointers to the left, so it stops
3164 * fixing up pointers when a given leaf/node is not in slot 0 of the
3165 * higher levels
3166 *
3167 */
3168static void fixup_low_keys(struct btrfs_path *path,
3169			   struct btrfs_disk_key *key, int level)
3170{
3171	int i;
3172	struct extent_buffer *t;
3173	int ret;
3174
3175	for (i = level; i < BTRFS_MAX_LEVEL; i++) {
3176		int tslot = path->slots[i];
3177
3178		if (!path->nodes[i])
3179			break;
3180		t = path->nodes[i];
3181		ret = tree_mod_log_insert_key(t, tslot, MOD_LOG_KEY_REPLACE,
3182				GFP_ATOMIC);
3183		BUG_ON(ret < 0);
3184		btrfs_set_node_key(t, key, tslot);
3185		btrfs_mark_buffer_dirty(path->nodes[i]);
3186		if (tslot != 0)
3187			break;
3188	}
3189}
3190
3191/*
3192 * update item key.
3193 *
3194 * This function isn't completely safe. It's the caller's responsibility
3195 * that the new key won't break the order
3196 */
3197void btrfs_set_item_key_safe(struct btrfs_fs_info *fs_info,
3198			     struct btrfs_path *path,
3199			     const struct btrfs_key *new_key)
3200{
3201	struct btrfs_disk_key disk_key;
3202	struct extent_buffer *eb;
3203	int slot;
3204
3205	eb = path->nodes[0];
3206	slot = path->slots[0];
3207	if (slot > 0) {
3208		btrfs_item_key(eb, &disk_key, slot - 1);
3209		if (unlikely(comp_keys(&disk_key, new_key) >= 0)) {
3210			btrfs_crit(fs_info,
3211		"slot %u key (%llu %u %llu) new key (%llu %u %llu)",
3212				   slot, btrfs_disk_key_objectid(&disk_key),
3213				   btrfs_disk_key_type(&disk_key),
3214				   btrfs_disk_key_offset(&disk_key),
3215				   new_key->objectid, new_key->type,
3216				   new_key->offset);
3217			btrfs_print_leaf(eb);
3218			BUG();
3219		}
3220	}
3221	if (slot < btrfs_header_nritems(eb) - 1) {
3222		btrfs_item_key(eb, &disk_key, slot + 1);
3223		if (unlikely(comp_keys(&disk_key, new_key) <= 0)) {
3224			btrfs_crit(fs_info,
3225		"slot %u key (%llu %u %llu) new key (%llu %u %llu)",
3226				   slot, btrfs_disk_key_objectid(&disk_key),
3227				   btrfs_disk_key_type(&disk_key),
3228				   btrfs_disk_key_offset(&disk_key),
3229				   new_key->objectid, new_key->type,
3230				   new_key->offset);
3231			btrfs_print_leaf(eb);
3232			BUG();
3233		}
3234	}
3235
3236	btrfs_cpu_key_to_disk(&disk_key, new_key);
3237	btrfs_set_item_key(eb, &disk_key, slot);
3238	btrfs_mark_buffer_dirty(eb);
3239	if (slot == 0)
3240		fixup_low_keys(path, &disk_key, 1);
3241}
3242
3243/*
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3244 * try to push data from one node into the next node left in the
3245 * tree.
3246 *
3247 * returns 0 if some ptrs were pushed left, < 0 if there was some horrible
3248 * error, and > 0 if there was no room in the left hand block.
3249 */
3250static int push_node_left(struct btrfs_trans_handle *trans,
3251			  struct extent_buffer *dst,
3252			  struct extent_buffer *src, int empty)
3253{
3254	struct btrfs_fs_info *fs_info = trans->fs_info;
3255	int push_items = 0;
3256	int src_nritems;
3257	int dst_nritems;
3258	int ret = 0;
3259
3260	src_nritems = btrfs_header_nritems(src);
3261	dst_nritems = btrfs_header_nritems(dst);
3262	push_items = BTRFS_NODEPTRS_PER_BLOCK(fs_info) - dst_nritems;
3263	WARN_ON(btrfs_header_generation(src) != trans->transid);
3264	WARN_ON(btrfs_header_generation(dst) != trans->transid);
3265
3266	if (!empty && src_nritems <= 8)
3267		return 1;
3268
3269	if (push_items <= 0)
3270		return 1;
3271
3272	if (empty) {
3273		push_items = min(src_nritems, push_items);
3274		if (push_items < src_nritems) {
3275			/* leave at least 8 pointers in the node if
3276			 * we aren't going to empty it
3277			 */
3278			if (src_nritems - push_items < 8) {
3279				if (push_items <= 8)
3280					return 1;
3281				push_items -= 8;
3282			}
3283		}
3284	} else
3285		push_items = min(src_nritems - 8, push_items);
3286
3287	ret = tree_mod_log_eb_copy(dst, src, dst_nritems, 0, push_items);
 
 
 
 
 
 
3288	if (ret) {
3289		btrfs_abort_transaction(trans, ret);
3290		return ret;
3291	}
3292	copy_extent_buffer(dst, src,
3293			   btrfs_node_key_ptr_offset(dst_nritems),
3294			   btrfs_node_key_ptr_offset(0),
3295			   push_items * sizeof(struct btrfs_key_ptr));
3296
3297	if (push_items < src_nritems) {
3298		/*
3299		 * Don't call tree_mod_log_insert_move here, key removal was
3300		 * already fully logged by tree_mod_log_eb_copy above.
3301		 */
3302		memmove_extent_buffer(src, btrfs_node_key_ptr_offset(0),
3303				      btrfs_node_key_ptr_offset(push_items),
3304				      (src_nritems - push_items) *
3305				      sizeof(struct btrfs_key_ptr));
3306	}
3307	btrfs_set_header_nritems(src, src_nritems - push_items);
3308	btrfs_set_header_nritems(dst, dst_nritems + push_items);
3309	btrfs_mark_buffer_dirty(src);
3310	btrfs_mark_buffer_dirty(dst);
3311
3312	return ret;
3313}
3314
3315/*
3316 * try to push data from one node into the next node right in the
3317 * tree.
3318 *
3319 * returns 0 if some ptrs were pushed, < 0 if there was some horrible
3320 * error, and > 0 if there was no room in the right hand block.
3321 *
3322 * this will  only push up to 1/2 the contents of the left node over
3323 */
3324static int balance_node_right(struct btrfs_trans_handle *trans,
3325			      struct extent_buffer *dst,
3326			      struct extent_buffer *src)
3327{
3328	struct btrfs_fs_info *fs_info = trans->fs_info;
3329	int push_items = 0;
3330	int max_push;
3331	int src_nritems;
3332	int dst_nritems;
3333	int ret = 0;
3334
3335	WARN_ON(btrfs_header_generation(src) != trans->transid);
3336	WARN_ON(btrfs_header_generation(dst) != trans->transid);
3337
3338	src_nritems = btrfs_header_nritems(src);
3339	dst_nritems = btrfs_header_nritems(dst);
3340	push_items = BTRFS_NODEPTRS_PER_BLOCK(fs_info) - dst_nritems;
3341	if (push_items <= 0)
3342		return 1;
3343
3344	if (src_nritems < 4)
3345		return 1;
3346
3347	max_push = src_nritems / 2 + 1;
3348	/* don't try to empty the node */
3349	if (max_push >= src_nritems)
3350		return 1;
3351
3352	if (max_push < push_items)
3353		push_items = max_push;
3354
3355	ret = tree_mod_log_insert_move(dst, push_items, 0, dst_nritems);
 
 
 
 
 
 
3356	BUG_ON(ret < 0);
3357	memmove_extent_buffer(dst, btrfs_node_key_ptr_offset(push_items),
3358				      btrfs_node_key_ptr_offset(0),
3359				      (dst_nritems) *
3360				      sizeof(struct btrfs_key_ptr));
3361
3362	ret = tree_mod_log_eb_copy(dst, src, 0, src_nritems - push_items,
3363				   push_items);
3364	if (ret) {
3365		btrfs_abort_transaction(trans, ret);
3366		return ret;
3367	}
3368	copy_extent_buffer(dst, src,
3369			   btrfs_node_key_ptr_offset(0),
3370			   btrfs_node_key_ptr_offset(src_nritems - push_items),
3371			   push_items * sizeof(struct btrfs_key_ptr));
3372
3373	btrfs_set_header_nritems(src, src_nritems - push_items);
3374	btrfs_set_header_nritems(dst, dst_nritems + push_items);
3375
3376	btrfs_mark_buffer_dirty(src);
3377	btrfs_mark_buffer_dirty(dst);
3378
3379	return ret;
3380}
3381
3382/*
3383 * helper function to insert a new root level in the tree.
3384 * A new node is allocated, and a single item is inserted to
3385 * point to the existing root
3386 *
3387 * returns zero on success or < 0 on failure.
3388 */
3389static noinline int insert_new_root(struct btrfs_trans_handle *trans,
3390			   struct btrfs_root *root,
3391			   struct btrfs_path *path, int level)
3392{
3393	struct btrfs_fs_info *fs_info = root->fs_info;
3394	u64 lower_gen;
3395	struct extent_buffer *lower;
3396	struct extent_buffer *c;
3397	struct extent_buffer *old;
3398	struct btrfs_disk_key lower_key;
3399	int ret;
3400
3401	BUG_ON(path->nodes[level]);
3402	BUG_ON(path->nodes[level-1] != root->node);
3403
3404	lower = path->nodes[level-1];
3405	if (level == 1)
3406		btrfs_item_key(lower, &lower_key, 0);
3407	else
3408		btrfs_node_key(lower, &lower_key, 0);
3409
3410	c = alloc_tree_block_no_bg_flush(trans, root, 0, &lower_key, level,
3411					 root->node->start, 0);
 
3412	if (IS_ERR(c))
3413		return PTR_ERR(c);
3414
3415	root_add_used(root, fs_info->nodesize);
3416
3417	btrfs_set_header_nritems(c, 1);
3418	btrfs_set_node_key(c, &lower_key, 0);
3419	btrfs_set_node_blockptr(c, 0, lower->start);
3420	lower_gen = btrfs_header_generation(lower);
3421	WARN_ON(lower_gen != trans->transid);
3422
3423	btrfs_set_node_ptr_generation(c, 0, lower_gen);
3424
3425	btrfs_mark_buffer_dirty(c);
3426
3427	old = root->node;
3428	ret = tree_mod_log_insert_root(root->node, c, 0);
3429	BUG_ON(ret < 0);
3430	rcu_assign_pointer(root->node, c);
3431
3432	/* the super has an extra ref to root->node */
3433	free_extent_buffer(old);
3434
3435	add_root_to_dirty_list(root);
3436	extent_buffer_get(c);
3437	path->nodes[level] = c;
3438	path->locks[level] = BTRFS_WRITE_LOCK_BLOCKING;
3439	path->slots[level] = 0;
3440	return 0;
3441}
3442
3443/*
3444 * worker function to insert a single pointer in a node.
3445 * the node should have enough room for the pointer already
3446 *
3447 * slot and level indicate where you want the key to go, and
3448 * blocknr is the block the key points to.
3449 */
3450static void insert_ptr(struct btrfs_trans_handle *trans,
3451		       struct btrfs_path *path,
3452		       struct btrfs_disk_key *key, u64 bytenr,
3453		       int slot, int level)
3454{
3455	struct extent_buffer *lower;
3456	int nritems;
3457	int ret;
3458
3459	BUG_ON(!path->nodes[level]);
3460	btrfs_assert_tree_locked(path->nodes[level]);
3461	lower = path->nodes[level];
3462	nritems = btrfs_header_nritems(lower);
3463	BUG_ON(slot > nritems);
3464	BUG_ON(nritems == BTRFS_NODEPTRS_PER_BLOCK(trans->fs_info));
3465	if (slot != nritems) {
3466		if (level) {
3467			ret = tree_mod_log_insert_move(lower, slot + 1, slot,
3468					nritems - slot);
3469			BUG_ON(ret < 0);
3470		}
3471		memmove_extent_buffer(lower,
3472			      btrfs_node_key_ptr_offset(slot + 1),
3473			      btrfs_node_key_ptr_offset(slot),
3474			      (nritems - slot) * sizeof(struct btrfs_key_ptr));
3475	}
3476	if (level) {
3477		ret = tree_mod_log_insert_key(lower, slot, MOD_LOG_KEY_ADD,
3478				GFP_NOFS);
3479		BUG_ON(ret < 0);
3480	}
3481	btrfs_set_node_key(lower, key, slot);
3482	btrfs_set_node_blockptr(lower, slot, bytenr);
3483	WARN_ON(trans->transid == 0);
3484	btrfs_set_node_ptr_generation(lower, slot, trans->transid);
3485	btrfs_set_header_nritems(lower, nritems + 1);
3486	btrfs_mark_buffer_dirty(lower);
3487}
3488
3489/*
3490 * split the node at the specified level in path in two.
3491 * The path is corrected to point to the appropriate node after the split
3492 *
3493 * Before splitting this tries to make some room in the node by pushing
3494 * left and right, if either one works, it returns right away.
3495 *
3496 * returns 0 on success and < 0 on failure
3497 */
3498static noinline int split_node(struct btrfs_trans_handle *trans,
3499			       struct btrfs_root *root,
3500			       struct btrfs_path *path, int level)
3501{
3502	struct btrfs_fs_info *fs_info = root->fs_info;
3503	struct extent_buffer *c;
3504	struct extent_buffer *split;
3505	struct btrfs_disk_key disk_key;
3506	int mid;
3507	int ret;
3508	u32 c_nritems;
3509
3510	c = path->nodes[level];
3511	WARN_ON(btrfs_header_generation(c) != trans->transid);
3512	if (c == root->node) {
3513		/*
3514		 * trying to split the root, lets make a new one
3515		 *
3516		 * tree mod log: We don't log_removal old root in
3517		 * insert_new_root, because that root buffer will be kept as a
3518		 * normal node. We are going to log removal of half of the
3519		 * elements below with tree_mod_log_eb_copy. We're holding a
3520		 * tree lock on the buffer, which is why we cannot race with
3521		 * other tree_mod_log users.
3522		 */
3523		ret = insert_new_root(trans, root, path, level + 1);
3524		if (ret)
3525			return ret;
3526	} else {
3527		ret = push_nodes_for_insert(trans, root, path, level);
3528		c = path->nodes[level];
3529		if (!ret && btrfs_header_nritems(c) <
3530		    BTRFS_NODEPTRS_PER_BLOCK(fs_info) - 3)
3531			return 0;
3532		if (ret < 0)
3533			return ret;
3534	}
3535
3536	c_nritems = btrfs_header_nritems(c);
3537	mid = (c_nritems + 1) / 2;
3538	btrfs_node_key(c, &disk_key, mid);
3539
3540	split = alloc_tree_block_no_bg_flush(trans, root, 0, &disk_key, level,
3541					     c->start, 0);
 
3542	if (IS_ERR(split))
3543		return PTR_ERR(split);
3544
3545	root_add_used(root, fs_info->nodesize);
3546	ASSERT(btrfs_header_level(c) == level);
3547
3548	ret = tree_mod_log_eb_copy(split, c, 0, mid, c_nritems - mid);
3549	if (ret) {
3550		btrfs_abort_transaction(trans, ret);
3551		return ret;
3552	}
3553	copy_extent_buffer(split, c,
3554			   btrfs_node_key_ptr_offset(0),
3555			   btrfs_node_key_ptr_offset(mid),
3556			   (c_nritems - mid) * sizeof(struct btrfs_key_ptr));
3557	btrfs_set_header_nritems(split, c_nritems - mid);
3558	btrfs_set_header_nritems(c, mid);
3559	ret = 0;
3560
3561	btrfs_mark_buffer_dirty(c);
3562	btrfs_mark_buffer_dirty(split);
3563
3564	insert_ptr(trans, path, &disk_key, split->start,
3565		   path->slots[level + 1] + 1, level + 1);
3566
3567	if (path->slots[level] >= mid) {
3568		path->slots[level] -= mid;
3569		btrfs_tree_unlock(c);
3570		free_extent_buffer(c);
3571		path->nodes[level] = split;
3572		path->slots[level + 1] += 1;
3573	} else {
3574		btrfs_tree_unlock(split);
3575		free_extent_buffer(split);
3576	}
3577	return ret;
3578}
3579
3580/*
3581 * how many bytes are required to store the items in a leaf.  start
3582 * and nr indicate which items in the leaf to check.  This totals up the
3583 * space used both by the item structs and the item data
3584 */
3585static int leaf_space_used(struct extent_buffer *l, int start, int nr)
3586{
3587	struct btrfs_item *start_item;
3588	struct btrfs_item *end_item;
3589	struct btrfs_map_token token;
3590	int data_len;
3591	int nritems = btrfs_header_nritems(l);
3592	int end = min(nritems, start + nr) - 1;
3593
3594	if (!nr)
3595		return 0;
3596	btrfs_init_map_token(&token, l);
3597	start_item = btrfs_item_nr(start);
3598	end_item = btrfs_item_nr(end);
3599	data_len = btrfs_token_item_offset(l, start_item, &token) +
3600		btrfs_token_item_size(l, start_item, &token);
3601	data_len = data_len - btrfs_token_item_offset(l, end_item, &token);
3602	data_len += sizeof(struct btrfs_item) * nr;
3603	WARN_ON(data_len < 0);
3604	return data_len;
3605}
3606
3607/*
3608 * The space between the end of the leaf items and
3609 * the start of the leaf data.  IOW, how much room
3610 * the leaf has left for both items and data
3611 */
3612noinline int btrfs_leaf_free_space(struct extent_buffer *leaf)
3613{
3614	struct btrfs_fs_info *fs_info = leaf->fs_info;
3615	int nritems = btrfs_header_nritems(leaf);
3616	int ret;
3617
3618	ret = BTRFS_LEAF_DATA_SIZE(fs_info) - leaf_space_used(leaf, 0, nritems);
3619	if (ret < 0) {
3620		btrfs_crit(fs_info,
3621			   "leaf free space ret %d, leaf data size %lu, used %d nritems %d",
3622			   ret,
3623			   (unsigned long) BTRFS_LEAF_DATA_SIZE(fs_info),
3624			   leaf_space_used(leaf, 0, nritems), nritems);
3625	}
3626	return ret;
3627}
3628
3629/*
3630 * min slot controls the lowest index we're willing to push to the
3631 * right.  We'll push up to and including min_slot, but no lower
3632 */
3633static noinline int __push_leaf_right(struct btrfs_path *path,
3634				      int data_size, int empty,
3635				      struct extent_buffer *right,
3636				      int free_space, u32 left_nritems,
3637				      u32 min_slot)
3638{
3639	struct btrfs_fs_info *fs_info = right->fs_info;
3640	struct extent_buffer *left = path->nodes[0];
3641	struct extent_buffer *upper = path->nodes[1];
3642	struct btrfs_map_token token;
3643	struct btrfs_disk_key disk_key;
3644	int slot;
3645	u32 i;
3646	int push_space = 0;
3647	int push_items = 0;
3648	struct btrfs_item *item;
3649	u32 nr;
3650	u32 right_nritems;
3651	u32 data_end;
3652	u32 this_item_size;
3653
3654	if (empty)
3655		nr = 0;
3656	else
3657		nr = max_t(u32, 1, min_slot);
3658
3659	if (path->slots[0] >= left_nritems)
3660		push_space += data_size;
3661
3662	slot = path->slots[1];
3663	i = left_nritems - 1;
3664	while (i >= nr) {
3665		item = btrfs_item_nr(i);
3666
3667		if (!empty && push_items > 0) {
3668			if (path->slots[0] > i)
3669				break;
3670			if (path->slots[0] == i) {
3671				int space = btrfs_leaf_free_space(left);
3672
3673				if (space + push_space * 2 > free_space)
3674					break;
3675			}
3676		}
3677
3678		if (path->slots[0] == i)
3679			push_space += data_size;
3680
3681		this_item_size = btrfs_item_size(left, item);
3682		if (this_item_size + sizeof(*item) + push_space > free_space)
3683			break;
3684
3685		push_items++;
3686		push_space += this_item_size + sizeof(*item);
3687		if (i == 0)
3688			break;
3689		i--;
3690	}
3691
3692	if (push_items == 0)
3693		goto out_unlock;
3694
3695	WARN_ON(!empty && push_items == left_nritems);
3696
3697	/* push left to right */
3698	right_nritems = btrfs_header_nritems(right);
3699
3700	push_space = btrfs_item_end_nr(left, left_nritems - push_items);
3701	push_space -= leaf_data_end(left);
3702
3703	/* make room in the right data area */
3704	data_end = leaf_data_end(right);
3705	memmove_extent_buffer(right,
3706			      BTRFS_LEAF_DATA_OFFSET + data_end - push_space,
3707			      BTRFS_LEAF_DATA_OFFSET + data_end,
3708			      BTRFS_LEAF_DATA_SIZE(fs_info) - data_end);
3709
3710	/* copy from the left data area */
3711	copy_extent_buffer(right, left, BTRFS_LEAF_DATA_OFFSET +
3712		     BTRFS_LEAF_DATA_SIZE(fs_info) - push_space,
3713		     BTRFS_LEAF_DATA_OFFSET + leaf_data_end(left),
3714		     push_space);
3715
3716	memmove_extent_buffer(right, btrfs_item_nr_offset(push_items),
3717			      btrfs_item_nr_offset(0),
3718			      right_nritems * sizeof(struct btrfs_item));
3719
3720	/* copy the items from left to right */
3721	copy_extent_buffer(right, left, btrfs_item_nr_offset(0),
3722		   btrfs_item_nr_offset(left_nritems - push_items),
3723		   push_items * sizeof(struct btrfs_item));
3724
3725	/* update the item pointers */
3726	btrfs_init_map_token(&token, right);
3727	right_nritems += push_items;
3728	btrfs_set_header_nritems(right, right_nritems);
3729	push_space = BTRFS_LEAF_DATA_SIZE(fs_info);
3730	for (i = 0; i < right_nritems; i++) {
3731		item = btrfs_item_nr(i);
3732		push_space -= btrfs_token_item_size(right, item, &token);
3733		btrfs_set_token_item_offset(right, item, push_space, &token);
3734	}
3735
3736	left_nritems -= push_items;
3737	btrfs_set_header_nritems(left, left_nritems);
3738
3739	if (left_nritems)
3740		btrfs_mark_buffer_dirty(left);
3741	else
3742		btrfs_clean_tree_block(left);
3743
3744	btrfs_mark_buffer_dirty(right);
3745
3746	btrfs_item_key(right, &disk_key, 0);
3747	btrfs_set_node_key(upper, &disk_key, slot + 1);
3748	btrfs_mark_buffer_dirty(upper);
3749
3750	/* then fixup the leaf pointer in the path */
3751	if (path->slots[0] >= left_nritems) {
3752		path->slots[0] -= left_nritems;
3753		if (btrfs_header_nritems(path->nodes[0]) == 0)
3754			btrfs_clean_tree_block(path->nodes[0]);
3755		btrfs_tree_unlock(path->nodes[0]);
3756		free_extent_buffer(path->nodes[0]);
3757		path->nodes[0] = right;
3758		path->slots[1] += 1;
3759	} else {
3760		btrfs_tree_unlock(right);
3761		free_extent_buffer(right);
3762	}
3763	return 0;
3764
3765out_unlock:
3766	btrfs_tree_unlock(right);
3767	free_extent_buffer(right);
3768	return 1;
3769}
3770
3771/*
3772 * push some data in the path leaf to the right, trying to free up at
3773 * least data_size bytes.  returns zero if the push worked, nonzero otherwise
3774 *
3775 * returns 1 if the push failed because the other node didn't have enough
3776 * room, 0 if everything worked out and < 0 if there were major errors.
3777 *
3778 * this will push starting from min_slot to the end of the leaf.  It won't
3779 * push any slot lower than min_slot
3780 */
3781static int push_leaf_right(struct btrfs_trans_handle *trans, struct btrfs_root
3782			   *root, struct btrfs_path *path,
3783			   int min_data_size, int data_size,
3784			   int empty, u32 min_slot)
3785{
3786	struct extent_buffer *left = path->nodes[0];
3787	struct extent_buffer *right;
3788	struct extent_buffer *upper;
3789	int slot;
3790	int free_space;
3791	u32 left_nritems;
3792	int ret;
3793
3794	if (!path->nodes[1])
3795		return 1;
3796
3797	slot = path->slots[1];
3798	upper = path->nodes[1];
3799	if (slot >= btrfs_header_nritems(upper) - 1)
3800		return 1;
3801
3802	btrfs_assert_tree_locked(path->nodes[1]);
3803
3804	right = btrfs_read_node_slot(upper, slot + 1);
3805	/*
3806	 * slot + 1 is not valid or we fail to read the right node,
3807	 * no big deal, just return.
3808	 */
3809	if (IS_ERR(right))
3810		return 1;
3811
3812	btrfs_tree_lock(right);
3813	btrfs_set_lock_blocking_write(right);
3814
3815	free_space = btrfs_leaf_free_space(right);
3816	if (free_space < data_size)
3817		goto out_unlock;
3818
3819	/* cow and double check */
3820	ret = btrfs_cow_block(trans, root, right, upper,
3821			      slot + 1, &right);
3822	if (ret)
3823		goto out_unlock;
3824
3825	free_space = btrfs_leaf_free_space(right);
3826	if (free_space < data_size)
3827		goto out_unlock;
3828
3829	left_nritems = btrfs_header_nritems(left);
3830	if (left_nritems == 0)
3831		goto out_unlock;
3832
 
 
 
 
 
 
3833	if (path->slots[0] == left_nritems && !empty) {
3834		/* Key greater than all keys in the leaf, right neighbor has
3835		 * enough room for it and we're not emptying our leaf to delete
3836		 * it, therefore use right neighbor to insert the new item and
3837		 * no need to touch/dirty our left leaf. */
3838		btrfs_tree_unlock(left);
3839		free_extent_buffer(left);
3840		path->nodes[0] = right;
3841		path->slots[0] = 0;
3842		path->slots[1]++;
3843		return 0;
3844	}
3845
3846	return __push_leaf_right(path, min_data_size, empty,
3847				right, free_space, left_nritems, min_slot);
3848out_unlock:
3849	btrfs_tree_unlock(right);
3850	free_extent_buffer(right);
3851	return 1;
3852}
3853
3854/*
3855 * push some data in the path leaf to the left, trying to free up at
3856 * least data_size bytes.  returns zero if the push worked, nonzero otherwise
3857 *
3858 * max_slot can put a limit on how far into the leaf we'll push items.  The
3859 * item at 'max_slot' won't be touched.  Use (u32)-1 to make us do all the
3860 * items
3861 */
3862static noinline int __push_leaf_left(struct btrfs_path *path, int data_size,
3863				     int empty, struct extent_buffer *left,
3864				     int free_space, u32 right_nritems,
3865				     u32 max_slot)
3866{
3867	struct btrfs_fs_info *fs_info = left->fs_info;
3868	struct btrfs_disk_key disk_key;
3869	struct extent_buffer *right = path->nodes[0];
3870	int i;
3871	int push_space = 0;
3872	int push_items = 0;
3873	struct btrfs_item *item;
3874	u32 old_left_nritems;
3875	u32 nr;
3876	int ret = 0;
3877	u32 this_item_size;
3878	u32 old_left_item_size;
3879	struct btrfs_map_token token;
3880
3881	if (empty)
3882		nr = min(right_nritems, max_slot);
3883	else
3884		nr = min(right_nritems - 1, max_slot);
3885
3886	for (i = 0; i < nr; i++) {
3887		item = btrfs_item_nr(i);
3888
3889		if (!empty && push_items > 0) {
3890			if (path->slots[0] < i)
3891				break;
3892			if (path->slots[0] == i) {
3893				int space = btrfs_leaf_free_space(right);
3894
3895				if (space + push_space * 2 > free_space)
3896					break;
3897			}
3898		}
3899
3900		if (path->slots[0] == i)
3901			push_space += data_size;
3902
3903		this_item_size = btrfs_item_size(right, item);
3904		if (this_item_size + sizeof(*item) + push_space > free_space)
3905			break;
3906
3907		push_items++;
3908		push_space += this_item_size + sizeof(*item);
3909	}
3910
3911	if (push_items == 0) {
3912		ret = 1;
3913		goto out;
3914	}
3915	WARN_ON(!empty && push_items == btrfs_header_nritems(right));
3916
3917	/* push data from right to left */
3918	copy_extent_buffer(left, right,
3919			   btrfs_item_nr_offset(btrfs_header_nritems(left)),
3920			   btrfs_item_nr_offset(0),
3921			   push_items * sizeof(struct btrfs_item));
3922
3923	push_space = BTRFS_LEAF_DATA_SIZE(fs_info) -
3924		     btrfs_item_offset_nr(right, push_items - 1);
3925
3926	copy_extent_buffer(left, right, BTRFS_LEAF_DATA_OFFSET +
3927		     leaf_data_end(left) - push_space,
3928		     BTRFS_LEAF_DATA_OFFSET +
3929		     btrfs_item_offset_nr(right, push_items - 1),
3930		     push_space);
3931	old_left_nritems = btrfs_header_nritems(left);
3932	BUG_ON(old_left_nritems <= 0);
3933
3934	btrfs_init_map_token(&token, left);
3935	old_left_item_size = btrfs_item_offset_nr(left, old_left_nritems - 1);
3936	for (i = old_left_nritems; i < old_left_nritems + push_items; i++) {
3937		u32 ioff;
3938
3939		item = btrfs_item_nr(i);
3940
3941		ioff = btrfs_token_item_offset(left, item, &token);
3942		btrfs_set_token_item_offset(left, item,
3943		      ioff - (BTRFS_LEAF_DATA_SIZE(fs_info) - old_left_item_size),
3944		      &token);
3945	}
3946	btrfs_set_header_nritems(left, old_left_nritems + push_items);
3947
3948	/* fixup right node */
3949	if (push_items > right_nritems)
3950		WARN(1, KERN_CRIT "push items %d nr %u\n", push_items,
3951		       right_nritems);
3952
3953	if (push_items < right_nritems) {
3954		push_space = btrfs_item_offset_nr(right, push_items - 1) -
3955						  leaf_data_end(right);
3956		memmove_extent_buffer(right, BTRFS_LEAF_DATA_OFFSET +
3957				      BTRFS_LEAF_DATA_SIZE(fs_info) - push_space,
3958				      BTRFS_LEAF_DATA_OFFSET +
3959				      leaf_data_end(right), push_space);
3960
3961		memmove_extent_buffer(right, btrfs_item_nr_offset(0),
3962			      btrfs_item_nr_offset(push_items),
3963			     (btrfs_header_nritems(right) - push_items) *
3964			     sizeof(struct btrfs_item));
3965	}
3966
3967	btrfs_init_map_token(&token, right);
3968	right_nritems -= push_items;
3969	btrfs_set_header_nritems(right, right_nritems);
3970	push_space = BTRFS_LEAF_DATA_SIZE(fs_info);
3971	for (i = 0; i < right_nritems; i++) {
3972		item = btrfs_item_nr(i);
3973
3974		push_space = push_space - btrfs_token_item_size(right,
3975								item, &token);
3976		btrfs_set_token_item_offset(right, item, push_space, &token);
3977	}
3978
3979	btrfs_mark_buffer_dirty(left);
3980	if (right_nritems)
3981		btrfs_mark_buffer_dirty(right);
3982	else
3983		btrfs_clean_tree_block(right);
3984
3985	btrfs_item_key(right, &disk_key, 0);
3986	fixup_low_keys(path, &disk_key, 1);
3987
3988	/* then fixup the leaf pointer in the path */
3989	if (path->slots[0] < push_items) {
3990		path->slots[0] += old_left_nritems;
3991		btrfs_tree_unlock(path->nodes[0]);
3992		free_extent_buffer(path->nodes[0]);
3993		path->nodes[0] = left;
3994		path->slots[1] -= 1;
3995	} else {
3996		btrfs_tree_unlock(left);
3997		free_extent_buffer(left);
3998		path->slots[0] -= push_items;
3999	}
4000	BUG_ON(path->slots[0] < 0);
4001	return ret;
4002out:
4003	btrfs_tree_unlock(left);
4004	free_extent_buffer(left);
4005	return ret;
4006}
4007
4008/*
4009 * push some data in the path leaf to the left, trying to free up at
4010 * least data_size bytes.  returns zero if the push worked, nonzero otherwise
4011 *
4012 * max_slot can put a limit on how far into the leaf we'll push items.  The
4013 * item at 'max_slot' won't be touched.  Use (u32)-1 to make us push all the
4014 * items
4015 */
4016static int push_leaf_left(struct btrfs_trans_handle *trans, struct btrfs_root
4017			  *root, struct btrfs_path *path, int min_data_size,
4018			  int data_size, int empty, u32 max_slot)
4019{
4020	struct extent_buffer *right = path->nodes[0];
4021	struct extent_buffer *left;
4022	int slot;
4023	int free_space;
4024	u32 right_nritems;
4025	int ret = 0;
4026
4027	slot = path->slots[1];
4028	if (slot == 0)
4029		return 1;
4030	if (!path->nodes[1])
4031		return 1;
4032
4033	right_nritems = btrfs_header_nritems(right);
4034	if (right_nritems == 0)
4035		return 1;
4036
4037	btrfs_assert_tree_locked(path->nodes[1]);
4038
4039	left = btrfs_read_node_slot(path->nodes[1], slot - 1);
4040	/*
4041	 * slot - 1 is not valid or we fail to read the left node,
4042	 * no big deal, just return.
4043	 */
4044	if (IS_ERR(left))
4045		return 1;
4046
4047	btrfs_tree_lock(left);
4048	btrfs_set_lock_blocking_write(left);
4049
4050	free_space = btrfs_leaf_free_space(left);
4051	if (free_space < data_size) {
4052		ret = 1;
4053		goto out;
4054	}
4055
4056	/* cow and double check */
4057	ret = btrfs_cow_block(trans, root, left,
4058			      path->nodes[1], slot - 1, &left);
 
4059	if (ret) {
4060		/* we hit -ENOSPC, but it isn't fatal here */
4061		if (ret == -ENOSPC)
4062			ret = 1;
4063		goto out;
4064	}
4065
4066	free_space = btrfs_leaf_free_space(left);
4067	if (free_space < data_size) {
4068		ret = 1;
4069		goto out;
4070	}
4071
 
 
 
 
4072	return __push_leaf_left(path, min_data_size,
4073			       empty, left, free_space, right_nritems,
4074			       max_slot);
4075out:
4076	btrfs_tree_unlock(left);
4077	free_extent_buffer(left);
4078	return ret;
4079}
4080
4081/*
4082 * split the path's leaf in two, making sure there is at least data_size
4083 * available for the resulting leaf level of the path.
4084 */
4085static noinline void copy_for_split(struct btrfs_trans_handle *trans,
4086				    struct btrfs_path *path,
4087				    struct extent_buffer *l,
4088				    struct extent_buffer *right,
4089				    int slot, int mid, int nritems)
4090{
4091	struct btrfs_fs_info *fs_info = trans->fs_info;
4092	int data_copy_size;
4093	int rt_data_off;
4094	int i;
4095	struct btrfs_disk_key disk_key;
4096	struct btrfs_map_token token;
4097
4098	nritems = nritems - mid;
4099	btrfs_set_header_nritems(right, nritems);
4100	data_copy_size = btrfs_item_end_nr(l, mid) - leaf_data_end(l);
4101
4102	copy_extent_buffer(right, l, btrfs_item_nr_offset(0),
4103			   btrfs_item_nr_offset(mid),
4104			   nritems * sizeof(struct btrfs_item));
4105
4106	copy_extent_buffer(right, l,
4107		     BTRFS_LEAF_DATA_OFFSET + BTRFS_LEAF_DATA_SIZE(fs_info) -
4108		     data_copy_size, BTRFS_LEAF_DATA_OFFSET +
4109		     leaf_data_end(l), data_copy_size);
4110
4111	rt_data_off = BTRFS_LEAF_DATA_SIZE(fs_info) - btrfs_item_end_nr(l, mid);
4112
4113	btrfs_init_map_token(&token, right);
4114	for (i = 0; i < nritems; i++) {
4115		struct btrfs_item *item = btrfs_item_nr(i);
4116		u32 ioff;
4117
4118		ioff = btrfs_token_item_offset(right, item, &token);
4119		btrfs_set_token_item_offset(right, item,
4120					    ioff + rt_data_off, &token);
4121	}
4122
4123	btrfs_set_header_nritems(l, mid);
4124	btrfs_item_key(right, &disk_key, 0);
4125	insert_ptr(trans, path, &disk_key, right->start, path->slots[1] + 1, 1);
4126
4127	btrfs_mark_buffer_dirty(right);
4128	btrfs_mark_buffer_dirty(l);
4129	BUG_ON(path->slots[0] != slot);
4130
4131	if (mid <= slot) {
4132		btrfs_tree_unlock(path->nodes[0]);
4133		free_extent_buffer(path->nodes[0]);
4134		path->nodes[0] = right;
4135		path->slots[0] -= mid;
4136		path->slots[1] += 1;
4137	} else {
4138		btrfs_tree_unlock(right);
4139		free_extent_buffer(right);
4140	}
4141
4142	BUG_ON(path->slots[0] < 0);
4143}
4144
4145/*
4146 * double splits happen when we need to insert a big item in the middle
4147 * of a leaf.  A double split can leave us with 3 mostly empty leaves:
4148 * leaf: [ slots 0 - N] [ our target ] [ N + 1 - total in leaf ]
4149 *          A                 B                 C
4150 *
4151 * We avoid this by trying to push the items on either side of our target
4152 * into the adjacent leaves.  If all goes well we can avoid the double split
4153 * completely.
4154 */
4155static noinline int push_for_double_split(struct btrfs_trans_handle *trans,
4156					  struct btrfs_root *root,
4157					  struct btrfs_path *path,
4158					  int data_size)
4159{
4160	int ret;
4161	int progress = 0;
4162	int slot;
4163	u32 nritems;
4164	int space_needed = data_size;
4165
4166	slot = path->slots[0];
4167	if (slot < btrfs_header_nritems(path->nodes[0]))
4168		space_needed -= btrfs_leaf_free_space(path->nodes[0]);
4169
4170	/*
4171	 * try to push all the items after our slot into the
4172	 * right leaf
4173	 */
4174	ret = push_leaf_right(trans, root, path, 1, space_needed, 0, slot);
4175	if (ret < 0)
4176		return ret;
4177
4178	if (ret == 0)
4179		progress++;
4180
4181	nritems = btrfs_header_nritems(path->nodes[0]);
4182	/*
4183	 * our goal is to get our slot at the start or end of a leaf.  If
4184	 * we've done so we're done
4185	 */
4186	if (path->slots[0] == 0 || path->slots[0] == nritems)
4187		return 0;
4188
4189	if (btrfs_leaf_free_space(path->nodes[0]) >= data_size)
4190		return 0;
4191
4192	/* try to push all the items before our slot into the next leaf */
4193	slot = path->slots[0];
4194	space_needed = data_size;
4195	if (slot > 0)
4196		space_needed -= btrfs_leaf_free_space(path->nodes[0]);
4197	ret = push_leaf_left(trans, root, path, 1, space_needed, 0, slot);
4198	if (ret < 0)
4199		return ret;
4200
4201	if (ret == 0)
4202		progress++;
4203
4204	if (progress)
4205		return 0;
4206	return 1;
4207}
4208
4209/*
4210 * split the path's leaf in two, making sure there is at least data_size
4211 * available for the resulting leaf level of the path.
4212 *
4213 * returns 0 if all went well and < 0 on failure.
4214 */
4215static noinline int split_leaf(struct btrfs_trans_handle *trans,
4216			       struct btrfs_root *root,
4217			       const struct btrfs_key *ins_key,
4218			       struct btrfs_path *path, int data_size,
4219			       int extend)
4220{
4221	struct btrfs_disk_key disk_key;
4222	struct extent_buffer *l;
4223	u32 nritems;
4224	int mid;
4225	int slot;
4226	struct extent_buffer *right;
4227	struct btrfs_fs_info *fs_info = root->fs_info;
4228	int ret = 0;
4229	int wret;
4230	int split;
4231	int num_doubles = 0;
4232	int tried_avoid_double = 0;
4233
4234	l = path->nodes[0];
4235	slot = path->slots[0];
4236	if (extend && data_size + btrfs_item_size_nr(l, slot) +
4237	    sizeof(struct btrfs_item) > BTRFS_LEAF_DATA_SIZE(fs_info))
4238		return -EOVERFLOW;
4239
4240	/* first try to make some room by pushing left and right */
4241	if (data_size && path->nodes[1]) {
4242		int space_needed = data_size;
4243
4244		if (slot < btrfs_header_nritems(l))
4245			space_needed -= btrfs_leaf_free_space(l);
4246
4247		wret = push_leaf_right(trans, root, path, space_needed,
4248				       space_needed, 0, 0);
4249		if (wret < 0)
4250			return wret;
4251		if (wret) {
4252			space_needed = data_size;
4253			if (slot > 0)
4254				space_needed -= btrfs_leaf_free_space(l);
4255			wret = push_leaf_left(trans, root, path, space_needed,
4256					      space_needed, 0, (u32)-1);
4257			if (wret < 0)
4258				return wret;
4259		}
4260		l = path->nodes[0];
4261
4262		/* did the pushes work? */
4263		if (btrfs_leaf_free_space(l) >= data_size)
4264			return 0;
4265	}
4266
4267	if (!path->nodes[1]) {
4268		ret = insert_new_root(trans, root, path, 1);
4269		if (ret)
4270			return ret;
4271	}
4272again:
4273	split = 1;
4274	l = path->nodes[0];
4275	slot = path->slots[0];
4276	nritems = btrfs_header_nritems(l);
4277	mid = (nritems + 1) / 2;
4278
4279	if (mid <= slot) {
4280		if (nritems == 1 ||
4281		    leaf_space_used(l, mid, nritems - mid) + data_size >
4282			BTRFS_LEAF_DATA_SIZE(fs_info)) {
4283			if (slot >= nritems) {
4284				split = 0;
4285			} else {
4286				mid = slot;
4287				if (mid != nritems &&
4288				    leaf_space_used(l, mid, nritems - mid) +
4289				    data_size > BTRFS_LEAF_DATA_SIZE(fs_info)) {
4290					if (data_size && !tried_avoid_double)
4291						goto push_for_double;
4292					split = 2;
4293				}
4294			}
4295		}
4296	} else {
4297		if (leaf_space_used(l, 0, mid) + data_size >
4298			BTRFS_LEAF_DATA_SIZE(fs_info)) {
4299			if (!extend && data_size && slot == 0) {
4300				split = 0;
4301			} else if ((extend || !data_size) && slot == 0) {
4302				mid = 1;
4303			} else {
4304				mid = slot;
4305				if (mid != nritems &&
4306				    leaf_space_used(l, mid, nritems - mid) +
4307				    data_size > BTRFS_LEAF_DATA_SIZE(fs_info)) {
4308					if (data_size && !tried_avoid_double)
4309						goto push_for_double;
4310					split = 2;
4311				}
4312			}
4313		}
4314	}
4315
4316	if (split == 0)
4317		btrfs_cpu_key_to_disk(&disk_key, ins_key);
4318	else
4319		btrfs_item_key(l, &disk_key, mid);
4320
4321	right = alloc_tree_block_no_bg_flush(trans, root, 0, &disk_key, 0,
4322					     l->start, 0);
 
 
 
 
 
 
 
 
 
 
4323	if (IS_ERR(right))
4324		return PTR_ERR(right);
4325
4326	root_add_used(root, fs_info->nodesize);
4327
4328	if (split == 0) {
4329		if (mid <= slot) {
4330			btrfs_set_header_nritems(right, 0);
4331			insert_ptr(trans, path, &disk_key,
4332				   right->start, path->slots[1] + 1, 1);
4333			btrfs_tree_unlock(path->nodes[0]);
4334			free_extent_buffer(path->nodes[0]);
4335			path->nodes[0] = right;
4336			path->slots[0] = 0;
4337			path->slots[1] += 1;
4338		} else {
4339			btrfs_set_header_nritems(right, 0);
4340			insert_ptr(trans, path, &disk_key,
4341				   right->start, path->slots[1], 1);
4342			btrfs_tree_unlock(path->nodes[0]);
4343			free_extent_buffer(path->nodes[0]);
4344			path->nodes[0] = right;
4345			path->slots[0] = 0;
4346			if (path->slots[1] == 0)
4347				fixup_low_keys(path, &disk_key, 1);
4348		}
4349		/*
4350		 * We create a new leaf 'right' for the required ins_len and
4351		 * we'll do btrfs_mark_buffer_dirty() on this leaf after copying
4352		 * the content of ins_len to 'right'.
4353		 */
4354		return ret;
4355	}
4356
4357	copy_for_split(trans, path, l, right, slot, mid, nritems);
4358
4359	if (split == 2) {
4360		BUG_ON(num_doubles != 0);
4361		num_doubles++;
4362		goto again;
4363	}
4364
4365	return 0;
4366
4367push_for_double:
4368	push_for_double_split(trans, root, path, data_size);
4369	tried_avoid_double = 1;
4370	if (btrfs_leaf_free_space(path->nodes[0]) >= data_size)
4371		return 0;
4372	goto again;
4373}
4374
4375static noinline int setup_leaf_for_split(struct btrfs_trans_handle *trans,
4376					 struct btrfs_root *root,
4377					 struct btrfs_path *path, int ins_len)
4378{
4379	struct btrfs_key key;
4380	struct extent_buffer *leaf;
4381	struct btrfs_file_extent_item *fi;
4382	u64 extent_len = 0;
4383	u32 item_size;
4384	int ret;
4385
4386	leaf = path->nodes[0];
4387	btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
4388
4389	BUG_ON(key.type != BTRFS_EXTENT_DATA_KEY &&
4390	       key.type != BTRFS_EXTENT_CSUM_KEY);
4391
4392	if (btrfs_leaf_free_space(leaf) >= ins_len)
4393		return 0;
4394
4395	item_size = btrfs_item_size_nr(leaf, path->slots[0]);
4396	if (key.type == BTRFS_EXTENT_DATA_KEY) {
4397		fi = btrfs_item_ptr(leaf, path->slots[0],
4398				    struct btrfs_file_extent_item);
4399		extent_len = btrfs_file_extent_num_bytes(leaf, fi);
4400	}
4401	btrfs_release_path(path);
4402
4403	path->keep_locks = 1;
4404	path->search_for_split = 1;
4405	ret = btrfs_search_slot(trans, root, &key, path, 0, 1);
4406	path->search_for_split = 0;
4407	if (ret > 0)
4408		ret = -EAGAIN;
4409	if (ret < 0)
4410		goto err;
4411
4412	ret = -EAGAIN;
4413	leaf = path->nodes[0];
4414	/* if our item isn't there, return now */
4415	if (item_size != btrfs_item_size_nr(leaf, path->slots[0]))
4416		goto err;
4417
4418	/* the leaf has  changed, it now has room.  return now */
4419	if (btrfs_leaf_free_space(path->nodes[0]) >= ins_len)
4420		goto err;
4421
4422	if (key.type == BTRFS_EXTENT_DATA_KEY) {
4423		fi = btrfs_item_ptr(leaf, path->slots[0],
4424				    struct btrfs_file_extent_item);
4425		if (extent_len != btrfs_file_extent_num_bytes(leaf, fi))
4426			goto err;
4427	}
4428
4429	btrfs_set_path_blocking(path);
4430	ret = split_leaf(trans, root, &key, path, ins_len, 1);
4431	if (ret)
4432		goto err;
4433
4434	path->keep_locks = 0;
4435	btrfs_unlock_up_safe(path, 1);
4436	return 0;
4437err:
4438	path->keep_locks = 0;
4439	return ret;
4440}
4441
4442static noinline int split_item(struct btrfs_path *path,
4443			       const struct btrfs_key *new_key,
4444			       unsigned long split_offset)
4445{
4446	struct extent_buffer *leaf;
4447	struct btrfs_item *item;
4448	struct btrfs_item *new_item;
4449	int slot;
4450	char *buf;
4451	u32 nritems;
4452	u32 item_size;
4453	u32 orig_offset;
4454	struct btrfs_disk_key disk_key;
4455
4456	leaf = path->nodes[0];
4457	BUG_ON(btrfs_leaf_free_space(leaf) < sizeof(struct btrfs_item));
4458
4459	btrfs_set_path_blocking(path);
4460
4461	item = btrfs_item_nr(path->slots[0]);
4462	orig_offset = btrfs_item_offset(leaf, item);
4463	item_size = btrfs_item_size(leaf, item);
4464
4465	buf = kmalloc(item_size, GFP_NOFS);
4466	if (!buf)
4467		return -ENOMEM;
4468
4469	read_extent_buffer(leaf, buf, btrfs_item_ptr_offset(leaf,
4470			    path->slots[0]), item_size);
4471
4472	slot = path->slots[0] + 1;
4473	nritems = btrfs_header_nritems(leaf);
4474	if (slot != nritems) {
4475		/* shift the items */
4476		memmove_extent_buffer(leaf, btrfs_item_nr_offset(slot + 1),
4477				btrfs_item_nr_offset(slot),
4478				(nritems - slot) * sizeof(struct btrfs_item));
4479	}
4480
4481	btrfs_cpu_key_to_disk(&disk_key, new_key);
4482	btrfs_set_item_key(leaf, &disk_key, slot);
4483
4484	new_item = btrfs_item_nr(slot);
4485
4486	btrfs_set_item_offset(leaf, new_item, orig_offset);
4487	btrfs_set_item_size(leaf, new_item, item_size - split_offset);
4488
4489	btrfs_set_item_offset(leaf, item,
4490			      orig_offset + item_size - split_offset);
4491	btrfs_set_item_size(leaf, item, split_offset);
4492
4493	btrfs_set_header_nritems(leaf, nritems + 1);
4494
4495	/* write the data for the start of the original item */
4496	write_extent_buffer(leaf, buf,
4497			    btrfs_item_ptr_offset(leaf, path->slots[0]),
4498			    split_offset);
4499
4500	/* write the data for the new item */
4501	write_extent_buffer(leaf, buf + split_offset,
4502			    btrfs_item_ptr_offset(leaf, slot),
4503			    item_size - split_offset);
4504	btrfs_mark_buffer_dirty(leaf);
4505
4506	BUG_ON(btrfs_leaf_free_space(leaf) < 0);
4507	kfree(buf);
4508	return 0;
4509}
4510
4511/*
4512 * This function splits a single item into two items,
4513 * giving 'new_key' to the new item and splitting the
4514 * old one at split_offset (from the start of the item).
4515 *
4516 * The path may be released by this operation.  After
4517 * the split, the path is pointing to the old item.  The
4518 * new item is going to be in the same node as the old one.
4519 *
4520 * Note, the item being split must be smaller enough to live alone on
4521 * a tree block with room for one extra struct btrfs_item
4522 *
4523 * This allows us to split the item in place, keeping a lock on the
4524 * leaf the entire time.
4525 */
4526int btrfs_split_item(struct btrfs_trans_handle *trans,
4527		     struct btrfs_root *root,
4528		     struct btrfs_path *path,
4529		     const struct btrfs_key *new_key,
4530		     unsigned long split_offset)
4531{
4532	int ret;
4533	ret = setup_leaf_for_split(trans, root, path,
4534				   sizeof(struct btrfs_item));
4535	if (ret)
4536		return ret;
4537
4538	ret = split_item(path, new_key, split_offset);
4539	return ret;
4540}
4541
4542/*
4543 * This function duplicate a item, giving 'new_key' to the new item.
4544 * It guarantees both items live in the same tree leaf and the new item
4545 * is contiguous with the original item.
4546 *
4547 * This allows us to split file extent in place, keeping a lock on the
4548 * leaf the entire time.
4549 */
4550int btrfs_duplicate_item(struct btrfs_trans_handle *trans,
4551			 struct btrfs_root *root,
4552			 struct btrfs_path *path,
4553			 const struct btrfs_key *new_key)
4554{
4555	struct extent_buffer *leaf;
4556	int ret;
4557	u32 item_size;
4558
4559	leaf = path->nodes[0];
4560	item_size = btrfs_item_size_nr(leaf, path->slots[0]);
4561	ret = setup_leaf_for_split(trans, root, path,
4562				   item_size + sizeof(struct btrfs_item));
4563	if (ret)
4564		return ret;
4565
4566	path->slots[0]++;
4567	setup_items_for_insert(root, path, new_key, &item_size,
4568			       item_size, item_size +
4569			       sizeof(struct btrfs_item), 1);
4570	leaf = path->nodes[0];
4571	memcpy_extent_buffer(leaf,
4572			     btrfs_item_ptr_offset(leaf, path->slots[0]),
4573			     btrfs_item_ptr_offset(leaf, path->slots[0] - 1),
4574			     item_size);
4575	return 0;
4576}
4577
4578/*
4579 * make the item pointed to by the path smaller.  new_size indicates
4580 * how small to make it, and from_end tells us if we just chop bytes
4581 * off the end of the item or if we shift the item to chop bytes off
4582 * the front.
4583 */
4584void btrfs_truncate_item(struct btrfs_path *path, u32 new_size, int from_end)
4585{
4586	int slot;
4587	struct extent_buffer *leaf;
4588	struct btrfs_item *item;
4589	u32 nritems;
4590	unsigned int data_end;
4591	unsigned int old_data_start;
4592	unsigned int old_size;
4593	unsigned int size_diff;
4594	int i;
4595	struct btrfs_map_token token;
4596
4597	leaf = path->nodes[0];
4598	slot = path->slots[0];
4599
4600	old_size = btrfs_item_size_nr(leaf, slot);
4601	if (old_size == new_size)
4602		return;
4603
4604	nritems = btrfs_header_nritems(leaf);
4605	data_end = leaf_data_end(leaf);
4606
4607	old_data_start = btrfs_item_offset_nr(leaf, slot);
4608
4609	size_diff = old_size - new_size;
4610
4611	BUG_ON(slot < 0);
4612	BUG_ON(slot >= nritems);
4613
4614	/*
4615	 * item0..itemN ... dataN.offset..dataN.size .. data0.size
4616	 */
4617	/* first correct the data pointers */
4618	btrfs_init_map_token(&token, leaf);
4619	for (i = slot; i < nritems; i++) {
4620		u32 ioff;
4621		item = btrfs_item_nr(i);
4622
4623		ioff = btrfs_token_item_offset(leaf, item, &token);
4624		btrfs_set_token_item_offset(leaf, item,
4625					    ioff + size_diff, &token);
4626	}
4627
4628	/* shift the data */
4629	if (from_end) {
4630		memmove_extent_buffer(leaf, BTRFS_LEAF_DATA_OFFSET +
4631			      data_end + size_diff, BTRFS_LEAF_DATA_OFFSET +
4632			      data_end, old_data_start + new_size - data_end);
4633	} else {
4634		struct btrfs_disk_key disk_key;
4635		u64 offset;
4636
4637		btrfs_item_key(leaf, &disk_key, slot);
4638
4639		if (btrfs_disk_key_type(&disk_key) == BTRFS_EXTENT_DATA_KEY) {
4640			unsigned long ptr;
4641			struct btrfs_file_extent_item *fi;
4642
4643			fi = btrfs_item_ptr(leaf, slot,
4644					    struct btrfs_file_extent_item);
4645			fi = (struct btrfs_file_extent_item *)(
4646			     (unsigned long)fi - size_diff);
4647
4648			if (btrfs_file_extent_type(leaf, fi) ==
4649			    BTRFS_FILE_EXTENT_INLINE) {
4650				ptr = btrfs_item_ptr_offset(leaf, slot);
4651				memmove_extent_buffer(leaf, ptr,
4652				      (unsigned long)fi,
4653				      BTRFS_FILE_EXTENT_INLINE_DATA_START);
4654			}
4655		}
4656
4657		memmove_extent_buffer(leaf, BTRFS_LEAF_DATA_OFFSET +
4658			      data_end + size_diff, BTRFS_LEAF_DATA_OFFSET +
4659			      data_end, old_data_start - data_end);
4660
4661		offset = btrfs_disk_key_offset(&disk_key);
4662		btrfs_set_disk_key_offset(&disk_key, offset + size_diff);
4663		btrfs_set_item_key(leaf, &disk_key, slot);
4664		if (slot == 0)
4665			fixup_low_keys(path, &disk_key, 1);
4666	}
4667
4668	item = btrfs_item_nr(slot);
4669	btrfs_set_item_size(leaf, item, new_size);
4670	btrfs_mark_buffer_dirty(leaf);
4671
4672	if (btrfs_leaf_free_space(leaf) < 0) {
4673		btrfs_print_leaf(leaf);
4674		BUG();
4675	}
4676}
4677
4678/*
4679 * make the item pointed to by the path bigger, data_size is the added size.
4680 */
4681void btrfs_extend_item(struct btrfs_path *path, u32 data_size)
4682{
4683	int slot;
4684	struct extent_buffer *leaf;
4685	struct btrfs_item *item;
4686	u32 nritems;
4687	unsigned int data_end;
4688	unsigned int old_data;
4689	unsigned int old_size;
4690	int i;
4691	struct btrfs_map_token token;
4692
4693	leaf = path->nodes[0];
4694
4695	nritems = btrfs_header_nritems(leaf);
4696	data_end = leaf_data_end(leaf);
4697
4698	if (btrfs_leaf_free_space(leaf) < data_size) {
4699		btrfs_print_leaf(leaf);
4700		BUG();
4701	}
4702	slot = path->slots[0];
4703	old_data = btrfs_item_end_nr(leaf, slot);
4704
4705	BUG_ON(slot < 0);
4706	if (slot >= nritems) {
4707		btrfs_print_leaf(leaf);
4708		btrfs_crit(leaf->fs_info, "slot %d too large, nritems %d",
4709			   slot, nritems);
4710		BUG();
4711	}
4712
4713	/*
4714	 * item0..itemN ... dataN.offset..dataN.size .. data0.size
4715	 */
4716	/* first correct the data pointers */
4717	btrfs_init_map_token(&token, leaf);
4718	for (i = slot; i < nritems; i++) {
4719		u32 ioff;
4720		item = btrfs_item_nr(i);
4721
4722		ioff = btrfs_token_item_offset(leaf, item, &token);
4723		btrfs_set_token_item_offset(leaf, item,
4724					    ioff - data_size, &token);
4725	}
4726
4727	/* shift the data */
4728	memmove_extent_buffer(leaf, BTRFS_LEAF_DATA_OFFSET +
4729		      data_end - data_size, BTRFS_LEAF_DATA_OFFSET +
4730		      data_end, old_data - data_end);
4731
4732	data_end = old_data;
4733	old_size = btrfs_item_size_nr(leaf, slot);
4734	item = btrfs_item_nr(slot);
4735	btrfs_set_item_size(leaf, item, old_size + data_size);
4736	btrfs_mark_buffer_dirty(leaf);
4737
4738	if (btrfs_leaf_free_space(leaf) < 0) {
4739		btrfs_print_leaf(leaf);
4740		BUG();
4741	}
4742}
4743
4744/*
4745 * this is a helper for btrfs_insert_empty_items, the main goal here is
4746 * to save stack depth by doing the bulk of the work in a function
4747 * that doesn't call btrfs_search_slot
 
 
 
 
 
 
4748 */
4749void setup_items_for_insert(struct btrfs_root *root, struct btrfs_path *path,
4750			    const struct btrfs_key *cpu_key, u32 *data_size,
4751			    u32 total_data, u32 total_size, int nr)
4752{
4753	struct btrfs_fs_info *fs_info = root->fs_info;
4754	struct btrfs_item *item;
4755	int i;
4756	u32 nritems;
4757	unsigned int data_end;
4758	struct btrfs_disk_key disk_key;
4759	struct extent_buffer *leaf;
4760	int slot;
4761	struct btrfs_map_token token;
 
 
 
 
 
 
4762
4763	if (path->slots[0] == 0) {
4764		btrfs_cpu_key_to_disk(&disk_key, cpu_key);
4765		fixup_low_keys(path, &disk_key, 1);
4766	}
4767	btrfs_unlock_up_safe(path, 1);
4768
4769	leaf = path->nodes[0];
4770	slot = path->slots[0];
4771
4772	nritems = btrfs_header_nritems(leaf);
4773	data_end = leaf_data_end(leaf);
4774
4775	if (btrfs_leaf_free_space(leaf) < total_size) {
4776		btrfs_print_leaf(leaf);
4777		btrfs_crit(fs_info, "not enough freespace need %u have %d",
4778			   total_size, btrfs_leaf_free_space(leaf));
4779		BUG();
4780	}
4781
4782	btrfs_init_map_token(&token, leaf);
4783	if (slot != nritems) {
4784		unsigned int old_data = btrfs_item_end_nr(leaf, slot);
4785
4786		if (old_data < data_end) {
4787			btrfs_print_leaf(leaf);
4788			btrfs_crit(fs_info, "slot %d old_data %d data_end %d",
 
4789				   slot, old_data, data_end);
4790			BUG();
4791		}
4792		/*
4793		 * item0..itemN ... dataN.offset..dataN.size .. data0.size
4794		 */
4795		/* first correct the data pointers */
4796		for (i = slot; i < nritems; i++) {
4797			u32 ioff;
4798
4799			item = btrfs_item_nr(i);
4800			ioff = btrfs_token_item_offset(leaf, item, &token);
4801			btrfs_set_token_item_offset(leaf, item,
4802						    ioff - total_data, &token);
4803		}
4804		/* shift the items */
4805		memmove_extent_buffer(leaf, btrfs_item_nr_offset(slot + nr),
4806			      btrfs_item_nr_offset(slot),
4807			      (nritems - slot) * sizeof(struct btrfs_item));
4808
4809		/* shift the data */
4810		memmove_extent_buffer(leaf, BTRFS_LEAF_DATA_OFFSET +
4811			      data_end - total_data, BTRFS_LEAF_DATA_OFFSET +
4812			      data_end, old_data - data_end);
4813		data_end = old_data;
4814	}
4815
4816	/* setup the item for the new data */
4817	for (i = 0; i < nr; i++) {
4818		btrfs_cpu_key_to_disk(&disk_key, cpu_key + i);
4819		btrfs_set_item_key(leaf, &disk_key, slot + i);
4820		item = btrfs_item_nr(slot + i);
4821		btrfs_set_token_item_offset(leaf, item,
4822					    data_end - data_size[i], &token);
4823		data_end -= data_size[i];
4824		btrfs_set_token_item_size(leaf, item, data_size[i], &token);
 
4825	}
4826
4827	btrfs_set_header_nritems(leaf, nritems + nr);
4828	btrfs_mark_buffer_dirty(leaf);
4829
4830	if (btrfs_leaf_free_space(leaf) < 0) {
4831		btrfs_print_leaf(leaf);
4832		BUG();
4833	}
4834}
4835
4836/*
4837 * Given a key and some data, insert items into the tree.
4838 * This does all the path init required, making room in the tree if needed.
4839 */
4840int btrfs_insert_empty_items(struct btrfs_trans_handle *trans,
4841			    struct btrfs_root *root,
4842			    struct btrfs_path *path,
4843			    const struct btrfs_key *cpu_key, u32 *data_size,
4844			    int nr)
4845{
4846	int ret = 0;
4847	int slot;
4848	int i;
4849	u32 total_size = 0;
4850	u32 total_data = 0;
4851
4852	for (i = 0; i < nr; i++)
4853		total_data += data_size[i];
4854
4855	total_size = total_data + (nr * sizeof(struct btrfs_item));
4856	ret = btrfs_search_slot(trans, root, cpu_key, path, total_size, 1);
4857	if (ret == 0)
4858		return -EEXIST;
4859	if (ret < 0)
4860		return ret;
4861
4862	slot = path->slots[0];
4863	BUG_ON(slot < 0);
4864
4865	setup_items_for_insert(root, path, cpu_key, data_size,
4866			       total_data, total_size, nr);
4867	return 0;
4868}
4869
4870/*
4871 * Given a key and some data, insert an item into the tree.
4872 * This does all the path init required, making room in the tree if needed.
4873 */
4874int btrfs_insert_item(struct btrfs_trans_handle *trans, struct btrfs_root *root,
4875		      const struct btrfs_key *cpu_key, void *data,
4876		      u32 data_size)
4877{
4878	int ret = 0;
4879	struct btrfs_path *path;
4880	struct extent_buffer *leaf;
4881	unsigned long ptr;
4882
4883	path = btrfs_alloc_path();
4884	if (!path)
4885		return -ENOMEM;
4886	ret = btrfs_insert_empty_item(trans, root, path, cpu_key, data_size);
4887	if (!ret) {
4888		leaf = path->nodes[0];
4889		ptr = btrfs_item_ptr_offset(leaf, path->slots[0]);
4890		write_extent_buffer(leaf, data, ptr, data_size);
4891		btrfs_mark_buffer_dirty(leaf);
4892	}
4893	btrfs_free_path(path);
4894	return ret;
4895}
4896
4897/*
4898 * delete the pointer from a given node.
4899 *
4900 * the tree should have been previously balanced so the deletion does not
4901 * empty a node.
4902 */
4903static void del_ptr(struct btrfs_root *root, struct btrfs_path *path,
4904		    int level, int slot)
4905{
4906	struct extent_buffer *parent = path->nodes[level];
4907	u32 nritems;
4908	int ret;
4909
4910	nritems = btrfs_header_nritems(parent);
4911	if (slot != nritems - 1) {
4912		if (level) {
4913			ret = tree_mod_log_insert_move(parent, slot, slot + 1,
4914					nritems - slot - 1);
4915			BUG_ON(ret < 0);
4916		}
4917		memmove_extent_buffer(parent,
4918			      btrfs_node_key_ptr_offset(slot),
4919			      btrfs_node_key_ptr_offset(slot + 1),
4920			      sizeof(struct btrfs_key_ptr) *
4921			      (nritems - slot - 1));
4922	} else if (level) {
4923		ret = tree_mod_log_insert_key(parent, slot, MOD_LOG_KEY_REMOVE,
4924				GFP_NOFS);
4925		BUG_ON(ret < 0);
4926	}
4927
4928	nritems--;
4929	btrfs_set_header_nritems(parent, nritems);
4930	if (nritems == 0 && parent == root->node) {
4931		BUG_ON(btrfs_header_level(root->node) != 1);
4932		/* just turn the root into a leaf and break */
4933		btrfs_set_header_level(root->node, 0);
4934	} else if (slot == 0) {
4935		struct btrfs_disk_key disk_key;
4936
4937		btrfs_node_key(parent, &disk_key, 0);
4938		fixup_low_keys(path, &disk_key, level + 1);
4939	}
4940	btrfs_mark_buffer_dirty(parent);
4941}
4942
4943/*
4944 * a helper function to delete the leaf pointed to by path->slots[1] and
4945 * path->nodes[1].
4946 *
4947 * This deletes the pointer in path->nodes[1] and frees the leaf
4948 * block extent.  zero is returned if it all worked out, < 0 otherwise.
4949 *
4950 * The path must have already been setup for deleting the leaf, including
4951 * all the proper balancing.  path->nodes[1] must be locked.
4952 */
4953static noinline void btrfs_del_leaf(struct btrfs_trans_handle *trans,
4954				    struct btrfs_root *root,
4955				    struct btrfs_path *path,
4956				    struct extent_buffer *leaf)
4957{
4958	WARN_ON(btrfs_header_generation(leaf) != trans->transid);
4959	del_ptr(root, path, 1, path->slots[1]);
4960
4961	/*
4962	 * btrfs_free_extent is expensive, we want to make sure we
4963	 * aren't holding any locks when we call it
4964	 */
4965	btrfs_unlock_up_safe(path, 0);
4966
4967	root_sub_used(root, leaf->len);
4968
4969	extent_buffer_get(leaf);
4970	btrfs_free_tree_block(trans, root, leaf, 0, 1);
4971	free_extent_buffer_stale(leaf);
4972}
4973/*
4974 * delete the item at the leaf level in path.  If that empties
4975 * the leaf, remove it from the tree
4976 */
4977int btrfs_del_items(struct btrfs_trans_handle *trans, struct btrfs_root *root,
4978		    struct btrfs_path *path, int slot, int nr)
4979{
4980	struct btrfs_fs_info *fs_info = root->fs_info;
4981	struct extent_buffer *leaf;
4982	struct btrfs_item *item;
4983	u32 last_off;
4984	u32 dsize = 0;
4985	int ret = 0;
4986	int wret;
4987	int i;
4988	u32 nritems;
4989
4990	leaf = path->nodes[0];
4991	last_off = btrfs_item_offset_nr(leaf, slot + nr - 1);
4992
4993	for (i = 0; i < nr; i++)
4994		dsize += btrfs_item_size_nr(leaf, slot + i);
4995
4996	nritems = btrfs_header_nritems(leaf);
4997
4998	if (slot + nr != nritems) {
4999		int data_end = leaf_data_end(leaf);
5000		struct btrfs_map_token token;
5001
5002		memmove_extent_buffer(leaf, BTRFS_LEAF_DATA_OFFSET +
5003			      data_end + dsize,
5004			      BTRFS_LEAF_DATA_OFFSET + data_end,
5005			      last_off - data_end);
5006
5007		btrfs_init_map_token(&token, leaf);
5008		for (i = slot + nr; i < nritems; i++) {
5009			u32 ioff;
5010
5011			item = btrfs_item_nr(i);
5012			ioff = btrfs_token_item_offset(leaf, item, &token);
5013			btrfs_set_token_item_offset(leaf, item,
5014						    ioff + dsize, &token);
5015		}
5016
5017		memmove_extent_buffer(leaf, btrfs_item_nr_offset(slot),
5018			      btrfs_item_nr_offset(slot + nr),
5019			      sizeof(struct btrfs_item) *
5020			      (nritems - slot - nr));
5021	}
5022	btrfs_set_header_nritems(leaf, nritems - nr);
5023	nritems -= nr;
5024
5025	/* delete the leaf if we've emptied it */
5026	if (nritems == 0) {
5027		if (leaf == root->node) {
5028			btrfs_set_header_level(leaf, 0);
5029		} else {
5030			btrfs_set_path_blocking(path);
5031			btrfs_clean_tree_block(leaf);
5032			btrfs_del_leaf(trans, root, path, leaf);
5033		}
5034	} else {
5035		int used = leaf_space_used(leaf, 0, nritems);
5036		if (slot == 0) {
5037			struct btrfs_disk_key disk_key;
5038
5039			btrfs_item_key(leaf, &disk_key, 0);
5040			fixup_low_keys(path, &disk_key, 1);
5041		}
5042
5043		/* delete the leaf if it is mostly empty */
5044		if (used < BTRFS_LEAF_DATA_SIZE(fs_info) / 3) {
5045			/* push_leaf_left fixes the path.
5046			 * make sure the path still points to our leaf
5047			 * for possible call to del_ptr below
5048			 */
5049			slot = path->slots[1];
5050			extent_buffer_get(leaf);
5051
5052			btrfs_set_path_blocking(path);
5053			wret = push_leaf_left(trans, root, path, 1, 1,
5054					      1, (u32)-1);
5055			if (wret < 0 && wret != -ENOSPC)
5056				ret = wret;
5057
5058			if (path->nodes[0] == leaf &&
5059			    btrfs_header_nritems(leaf)) {
5060				wret = push_leaf_right(trans, root, path, 1,
5061						       1, 1, 0);
5062				if (wret < 0 && wret != -ENOSPC)
5063					ret = wret;
5064			}
5065
5066			if (btrfs_header_nritems(leaf) == 0) {
5067				path->slots[1] = slot;
5068				btrfs_del_leaf(trans, root, path, leaf);
5069				free_extent_buffer(leaf);
5070				ret = 0;
5071			} else {
5072				/* if we're still in the path, make sure
5073				 * we're dirty.  Otherwise, one of the
5074				 * push_leaf functions must have already
5075				 * dirtied this buffer
5076				 */
5077				if (path->nodes[0] == leaf)
5078					btrfs_mark_buffer_dirty(leaf);
5079				free_extent_buffer(leaf);
5080			}
5081		} else {
5082			btrfs_mark_buffer_dirty(leaf);
5083		}
5084	}
5085	return ret;
5086}
5087
5088/*
5089 * search the tree again to find a leaf with lesser keys
5090 * returns 0 if it found something or 1 if there are no lesser leaves.
5091 * returns < 0 on io errors.
5092 *
5093 * This may release the path, and so you may lose any locks held at the
5094 * time you call it.
5095 */
5096int btrfs_prev_leaf(struct btrfs_root *root, struct btrfs_path *path)
5097{
5098	struct btrfs_key key;
5099	struct btrfs_disk_key found_key;
5100	int ret;
5101
5102	btrfs_item_key_to_cpu(path->nodes[0], &key, 0);
5103
5104	if (key.offset > 0) {
5105		key.offset--;
5106	} else if (key.type > 0) {
5107		key.type--;
5108		key.offset = (u64)-1;
5109	} else if (key.objectid > 0) {
5110		key.objectid--;
5111		key.type = (u8)-1;
5112		key.offset = (u64)-1;
5113	} else {
5114		return 1;
5115	}
5116
5117	btrfs_release_path(path);
5118	ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
5119	if (ret < 0)
5120		return ret;
5121	btrfs_item_key(path->nodes[0], &found_key, 0);
5122	ret = comp_keys(&found_key, &key);
5123	/*
5124	 * We might have had an item with the previous key in the tree right
5125	 * before we released our path. And after we released our path, that
5126	 * item might have been pushed to the first slot (0) of the leaf we
5127	 * were holding due to a tree balance. Alternatively, an item with the
5128	 * previous key can exist as the only element of a leaf (big fat item).
5129	 * Therefore account for these 2 cases, so that our callers (like
5130	 * btrfs_previous_item) don't miss an existing item with a key matching
5131	 * the previous key we computed above.
5132	 */
5133	if (ret <= 0)
5134		return 0;
5135	return 1;
5136}
5137
5138/*
5139 * A helper function to walk down the tree starting at min_key, and looking
5140 * for nodes or leaves that are have a minimum transaction id.
5141 * This is used by the btree defrag code, and tree logging
5142 *
5143 * This does not cow, but it does stuff the starting key it finds back
5144 * into min_key, so you can call btrfs_search_slot with cow=1 on the
5145 * key and get a writable path.
5146 *
5147 * This honors path->lowest_level to prevent descent past a given level
5148 * of the tree.
5149 *
5150 * min_trans indicates the oldest transaction that you are interested
5151 * in walking through.  Any nodes or leaves older than min_trans are
5152 * skipped over (without reading them).
5153 *
5154 * returns zero if something useful was found, < 0 on error and 1 if there
5155 * was nothing in the tree that matched the search criteria.
5156 */
5157int btrfs_search_forward(struct btrfs_root *root, struct btrfs_key *min_key,
5158			 struct btrfs_path *path,
5159			 u64 min_trans)
5160{
5161	struct extent_buffer *cur;
5162	struct btrfs_key found_key;
5163	int slot;
5164	int sret;
5165	u32 nritems;
5166	int level;
5167	int ret = 1;
5168	int keep_locks = path->keep_locks;
5169
5170	path->keep_locks = 1;
5171again:
5172	cur = btrfs_read_lock_root_node(root);
5173	level = btrfs_header_level(cur);
5174	WARN_ON(path->nodes[level]);
5175	path->nodes[level] = cur;
5176	path->locks[level] = BTRFS_READ_LOCK;
5177
5178	if (btrfs_header_generation(cur) < min_trans) {
5179		ret = 1;
5180		goto out;
5181	}
5182	while (1) {
5183		nritems = btrfs_header_nritems(cur);
5184		level = btrfs_header_level(cur);
5185		sret = btrfs_bin_search(cur, min_key, level, &slot);
5186		if (sret < 0) {
5187			ret = sret;
5188			goto out;
5189		}
5190
5191		/* at the lowest level, we're done, setup the path and exit */
5192		if (level == path->lowest_level) {
5193			if (slot >= nritems)
5194				goto find_next_key;
5195			ret = 0;
5196			path->slots[level] = slot;
5197			btrfs_item_key_to_cpu(cur, &found_key, slot);
5198			goto out;
5199		}
5200		if (sret && slot > 0)
5201			slot--;
5202		/*
5203		 * check this node pointer against the min_trans parameters.
5204		 * If it is too old, old, skip to the next one.
5205		 */
5206		while (slot < nritems) {
5207			u64 gen;
5208
5209			gen = btrfs_node_ptr_generation(cur, slot);
5210			if (gen < min_trans) {
5211				slot++;
5212				continue;
5213			}
5214			break;
5215		}
5216find_next_key:
5217		/*
5218		 * we didn't find a candidate key in this node, walk forward
5219		 * and find another one
5220		 */
5221		if (slot >= nritems) {
5222			path->slots[level] = slot;
5223			btrfs_set_path_blocking(path);
5224			sret = btrfs_find_next_key(root, path, min_key, level,
5225						  min_trans);
5226			if (sret == 0) {
5227				btrfs_release_path(path);
5228				goto again;
5229			} else {
5230				goto out;
5231			}
5232		}
5233		/* save our key for returning back */
5234		btrfs_node_key_to_cpu(cur, &found_key, slot);
5235		path->slots[level] = slot;
5236		if (level == path->lowest_level) {
5237			ret = 0;
5238			goto out;
5239		}
5240		btrfs_set_path_blocking(path);
5241		cur = btrfs_read_node_slot(cur, slot);
5242		if (IS_ERR(cur)) {
5243			ret = PTR_ERR(cur);
5244			goto out;
5245		}
5246
5247		btrfs_tree_read_lock(cur);
5248
5249		path->locks[level - 1] = BTRFS_READ_LOCK;
5250		path->nodes[level - 1] = cur;
5251		unlock_up(path, level, 1, 0, NULL);
5252	}
5253out:
5254	path->keep_locks = keep_locks;
5255	if (ret == 0) {
5256		btrfs_unlock_up_safe(path, path->lowest_level + 1);
5257		btrfs_set_path_blocking(path);
5258		memcpy(min_key, &found_key, sizeof(found_key));
5259	}
5260	return ret;
5261}
5262
5263/*
5264 * this is similar to btrfs_next_leaf, but does not try to preserve
5265 * and fixup the path.  It looks for and returns the next key in the
5266 * tree based on the current path and the min_trans parameters.
5267 *
5268 * 0 is returned if another key is found, < 0 if there are any errors
5269 * and 1 is returned if there are no higher keys in the tree
5270 *
5271 * path->keep_locks should be set to 1 on the search made before
5272 * calling this function.
5273 */
5274int btrfs_find_next_key(struct btrfs_root *root, struct btrfs_path *path,
5275			struct btrfs_key *key, int level, u64 min_trans)
5276{
5277	int slot;
5278	struct extent_buffer *c;
5279
5280	WARN_ON(!path->keep_locks && !path->skip_locking);
5281	while (level < BTRFS_MAX_LEVEL) {
5282		if (!path->nodes[level])
5283			return 1;
5284
5285		slot = path->slots[level] + 1;
5286		c = path->nodes[level];
5287next:
5288		if (slot >= btrfs_header_nritems(c)) {
5289			int ret;
5290			int orig_lowest;
5291			struct btrfs_key cur_key;
5292			if (level + 1 >= BTRFS_MAX_LEVEL ||
5293			    !path->nodes[level + 1])
5294				return 1;
5295
5296			if (path->locks[level + 1] || path->skip_locking) {
5297				level++;
5298				continue;
5299			}
5300
5301			slot = btrfs_header_nritems(c) - 1;
5302			if (level == 0)
5303				btrfs_item_key_to_cpu(c, &cur_key, slot);
5304			else
5305				btrfs_node_key_to_cpu(c, &cur_key, slot);
5306
5307			orig_lowest = path->lowest_level;
5308			btrfs_release_path(path);
5309			path->lowest_level = level;
5310			ret = btrfs_search_slot(NULL, root, &cur_key, path,
5311						0, 0);
5312			path->lowest_level = orig_lowest;
5313			if (ret < 0)
5314				return ret;
5315
5316			c = path->nodes[level];
5317			slot = path->slots[level];
5318			if (ret == 0)
5319				slot++;
5320			goto next;
5321		}
5322
5323		if (level == 0)
5324			btrfs_item_key_to_cpu(c, key, slot);
5325		else {
5326			u64 gen = btrfs_node_ptr_generation(c, slot);
5327
5328			if (gen < min_trans) {
5329				slot++;
5330				goto next;
5331			}
5332			btrfs_node_key_to_cpu(c, key, slot);
5333		}
5334		return 0;
5335	}
5336	return 1;
5337}
5338
5339/*
5340 * search the tree again to find a leaf with greater keys
5341 * returns 0 if it found something or 1 if there are no greater leaves.
5342 * returns < 0 on io errors.
5343 */
5344int btrfs_next_leaf(struct btrfs_root *root, struct btrfs_path *path)
5345{
5346	return btrfs_next_old_leaf(root, path, 0);
5347}
5348
5349int btrfs_next_old_leaf(struct btrfs_root *root, struct btrfs_path *path,
5350			u64 time_seq)
5351{
5352	int slot;
5353	int level;
5354	struct extent_buffer *c;
5355	struct extent_buffer *next;
5356	struct btrfs_key key;
5357	u32 nritems;
5358	int ret;
5359	int old_spinning = path->leave_spinning;
5360	int next_rw_lock = 0;
5361
5362	nritems = btrfs_header_nritems(path->nodes[0]);
5363	if (nritems == 0)
5364		return 1;
5365
5366	btrfs_item_key_to_cpu(path->nodes[0], &key, nritems - 1);
5367again:
5368	level = 1;
5369	next = NULL;
5370	next_rw_lock = 0;
5371	btrfs_release_path(path);
5372
5373	path->keep_locks = 1;
5374	path->leave_spinning = 1;
5375
5376	if (time_seq)
5377		ret = btrfs_search_old_slot(root, &key, path, time_seq);
5378	else
5379		ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
5380	path->keep_locks = 0;
5381
5382	if (ret < 0)
5383		return ret;
5384
5385	nritems = btrfs_header_nritems(path->nodes[0]);
5386	/*
5387	 * by releasing the path above we dropped all our locks.  A balance
5388	 * could have added more items next to the key that used to be
5389	 * at the very end of the block.  So, check again here and
5390	 * advance the path if there are now more items available.
5391	 */
5392	if (nritems > 0 && path->slots[0] < nritems - 1) {
5393		if (ret == 0)
5394			path->slots[0]++;
5395		ret = 0;
5396		goto done;
5397	}
5398	/*
5399	 * So the above check misses one case:
5400	 * - after releasing the path above, someone has removed the item that
5401	 *   used to be at the very end of the block, and balance between leafs
5402	 *   gets another one with bigger key.offset to replace it.
5403	 *
5404	 * This one should be returned as well, or we can get leaf corruption
5405	 * later(esp. in __btrfs_drop_extents()).
5406	 *
5407	 * And a bit more explanation about this check,
5408	 * with ret > 0, the key isn't found, the path points to the slot
5409	 * where it should be inserted, so the path->slots[0] item must be the
5410	 * bigger one.
5411	 */
5412	if (nritems > 0 && ret > 0 && path->slots[0] == nritems - 1) {
5413		ret = 0;
5414		goto done;
5415	}
5416
5417	while (level < BTRFS_MAX_LEVEL) {
5418		if (!path->nodes[level]) {
5419			ret = 1;
5420			goto done;
5421		}
5422
5423		slot = path->slots[level] + 1;
5424		c = path->nodes[level];
5425		if (slot >= btrfs_header_nritems(c)) {
5426			level++;
5427			if (level == BTRFS_MAX_LEVEL) {
5428				ret = 1;
5429				goto done;
5430			}
5431			continue;
5432		}
5433
5434		if (next) {
5435			btrfs_tree_unlock_rw(next, next_rw_lock);
5436			free_extent_buffer(next);
 
 
 
 
 
 
 
 
 
 
5437		}
5438
5439		next = c;
5440		next_rw_lock = path->locks[level];
5441		ret = read_block_for_search(root, path, &next, level,
5442					    slot, &key);
5443		if (ret == -EAGAIN)
5444			goto again;
5445
5446		if (ret < 0) {
5447			btrfs_release_path(path);
5448			goto done;
5449		}
5450
5451		if (!path->skip_locking) {
5452			ret = btrfs_try_tree_read_lock(next);
5453			if (!ret && time_seq) {
5454				/*
5455				 * If we don't get the lock, we may be racing
5456				 * with push_leaf_left, holding that lock while
5457				 * itself waiting for the leaf we've currently
5458				 * locked. To solve this situation, we give up
5459				 * on our lock and cycle.
5460				 */
5461				free_extent_buffer(next);
5462				btrfs_release_path(path);
5463				cond_resched();
5464				goto again;
5465			}
5466			if (!ret) {
5467				btrfs_set_path_blocking(path);
5468				btrfs_tree_read_lock(next);
5469			}
5470			next_rw_lock = BTRFS_READ_LOCK;
5471		}
5472		break;
5473	}
5474	path->slots[level] = slot;
5475	while (1) {
5476		level--;
5477		c = path->nodes[level];
5478		if (path->locks[level])
5479			btrfs_tree_unlock_rw(c, path->locks[level]);
5480
5481		free_extent_buffer(c);
5482		path->nodes[level] = next;
5483		path->slots[level] = 0;
5484		if (!path->skip_locking)
5485			path->locks[level] = next_rw_lock;
5486		if (!level)
5487			break;
5488
5489		ret = read_block_for_search(root, path, &next, level,
5490					    0, &key);
5491		if (ret == -EAGAIN)
5492			goto again;
5493
5494		if (ret < 0) {
5495			btrfs_release_path(path);
5496			goto done;
5497		}
5498
5499		if (!path->skip_locking) {
5500			ret = btrfs_try_tree_read_lock(next);
5501			if (!ret) {
5502				btrfs_set_path_blocking(path);
5503				btrfs_tree_read_lock(next);
5504			}
5505			next_rw_lock = BTRFS_READ_LOCK;
5506		}
5507	}
5508	ret = 0;
5509done:
5510	unlock_up(path, 0, 1, 0, NULL);
5511	path->leave_spinning = old_spinning;
5512	if (!old_spinning)
5513		btrfs_set_path_blocking(path);
5514
5515	return ret;
5516}
5517
5518/*
5519 * this uses btrfs_prev_leaf to walk backwards in the tree, and keeps
5520 * searching until it gets past min_objectid or finds an item of 'type'
5521 *
5522 * returns 0 if something is found, 1 if nothing was found and < 0 on error
5523 */
5524int btrfs_previous_item(struct btrfs_root *root,
5525			struct btrfs_path *path, u64 min_objectid,
5526			int type)
5527{
5528	struct btrfs_key found_key;
5529	struct extent_buffer *leaf;
5530	u32 nritems;
5531	int ret;
5532
5533	while (1) {
5534		if (path->slots[0] == 0) {
5535			btrfs_set_path_blocking(path);
5536			ret = btrfs_prev_leaf(root, path);
5537			if (ret != 0)
5538				return ret;
5539		} else {
5540			path->slots[0]--;
5541		}
5542		leaf = path->nodes[0];
5543		nritems = btrfs_header_nritems(leaf);
5544		if (nritems == 0)
5545			return 1;
5546		if (path->slots[0] == nritems)
5547			path->slots[0]--;
5548
5549		btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
5550		if (found_key.objectid < min_objectid)
5551			break;
5552		if (found_key.type == type)
5553			return 0;
5554		if (found_key.objectid == min_objectid &&
5555		    found_key.type < type)
5556			break;
5557	}
5558	return 1;
5559}
5560
5561/*
5562 * search in extent tree to find a previous Metadata/Data extent item with
5563 * min objecitd.
5564 *
5565 * returns 0 if something is found, 1 if nothing was found and < 0 on error
5566 */
5567int btrfs_previous_extent_item(struct btrfs_root *root,
5568			struct btrfs_path *path, u64 min_objectid)
5569{
5570	struct btrfs_key found_key;
5571	struct extent_buffer *leaf;
5572	u32 nritems;
5573	int ret;
5574
5575	while (1) {
5576		if (path->slots[0] == 0) {
5577			btrfs_set_path_blocking(path);
5578			ret = btrfs_prev_leaf(root, path);
5579			if (ret != 0)
5580				return ret;
5581		} else {
5582			path->slots[0]--;
5583		}
5584		leaf = path->nodes[0];
5585		nritems = btrfs_header_nritems(leaf);
5586		if (nritems == 0)
5587			return 1;
5588		if (path->slots[0] == nritems)
5589			path->slots[0]--;
5590
5591		btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
5592		if (found_key.objectid < min_objectid)
5593			break;
5594		if (found_key.type == BTRFS_EXTENT_ITEM_KEY ||
5595		    found_key.type == BTRFS_METADATA_ITEM_KEY)
5596			return 0;
5597		if (found_key.objectid == min_objectid &&
5598		    found_key.type < BTRFS_EXTENT_ITEM_KEY)
5599			break;
5600	}
5601	return 1;
5602}
v5.14.15
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 * Copyright (C) 2007,2008 Oracle.  All rights reserved.
   4 */
   5
   6#include <linux/sched.h>
   7#include <linux/slab.h>
   8#include <linux/rbtree.h>
   9#include <linux/mm.h>
  10#include "ctree.h"
  11#include "disk-io.h"
  12#include "transaction.h"
  13#include "print-tree.h"
  14#include "locking.h"
  15#include "volumes.h"
  16#include "qgroup.h"
  17#include "tree-mod-log.h"
  18
  19static int split_node(struct btrfs_trans_handle *trans, struct btrfs_root
  20		      *root, struct btrfs_path *path, int level);
  21static int split_leaf(struct btrfs_trans_handle *trans, struct btrfs_root *root,
  22		      const struct btrfs_key *ins_key, struct btrfs_path *path,
  23		      int data_size, int extend);
  24static int push_node_left(struct btrfs_trans_handle *trans,
  25			  struct extent_buffer *dst,
  26			  struct extent_buffer *src, int empty);
  27static int balance_node_right(struct btrfs_trans_handle *trans,
  28			      struct extent_buffer *dst_buf,
  29			      struct extent_buffer *src_buf);
  30static void del_ptr(struct btrfs_root *root, struct btrfs_path *path,
  31		    int level, int slot);
  32
  33static const struct btrfs_csums {
  34	u16		size;
  35	const char	name[10];
  36	const char	driver[12];
  37} btrfs_csums[] = {
  38	[BTRFS_CSUM_TYPE_CRC32] = { .size = 4, .name = "crc32c" },
  39	[BTRFS_CSUM_TYPE_XXHASH] = { .size = 8, .name = "xxhash64" },
  40	[BTRFS_CSUM_TYPE_SHA256] = { .size = 32, .name = "sha256" },
  41	[BTRFS_CSUM_TYPE_BLAKE2] = { .size = 32, .name = "blake2b",
  42				     .driver = "blake2b-256" },
  43};
  44
  45int btrfs_super_csum_size(const struct btrfs_super_block *s)
  46{
  47	u16 t = btrfs_super_csum_type(s);
  48	/*
  49	 * csum type is validated at mount time
  50	 */
  51	return btrfs_csums[t].size;
  52}
  53
  54const char *btrfs_super_csum_name(u16 csum_type)
  55{
  56	/* csum type is validated at mount time */
  57	return btrfs_csums[csum_type].name;
  58}
  59
  60/*
  61 * Return driver name if defined, otherwise the name that's also a valid driver
  62 * name
  63 */
  64const char *btrfs_super_csum_driver(u16 csum_type)
  65{
  66	/* csum type is validated at mount time */
  67	return btrfs_csums[csum_type].driver[0] ?
  68		btrfs_csums[csum_type].driver :
  69		btrfs_csums[csum_type].name;
  70}
  71
  72size_t __attribute_const__ btrfs_get_num_csums(void)
 
 
 
 
  73{
  74	return ARRAY_SIZE(btrfs_csums);
  75}
  76
  77struct btrfs_path *btrfs_alloc_path(void)
  78{
  79	return kmem_cache_zalloc(btrfs_path_cachep, GFP_NOFS);
 
 
 
 
 
 
 
 
 
 
 
  80}
  81
  82/* this also releases the path */
  83void btrfs_free_path(struct btrfs_path *p)
  84{
  85	if (!p)
  86		return;
  87	btrfs_release_path(p);
  88	kmem_cache_free(btrfs_path_cachep, p);
  89}
  90
  91/*
  92 * path release drops references on the extent buffers in the path
  93 * and it drops any locks held by this path
  94 *
  95 * It is safe to call this on paths that no locks or extent buffers held.
  96 */
  97noinline void btrfs_release_path(struct btrfs_path *p)
  98{
  99	int i;
 100
 101	for (i = 0; i < BTRFS_MAX_LEVEL; i++) {
 102		p->slots[i] = 0;
 103		if (!p->nodes[i])
 104			continue;
 105		if (p->locks[i]) {
 106			btrfs_tree_unlock_rw(p->nodes[i], p->locks[i]);
 107			p->locks[i] = 0;
 108		}
 109		free_extent_buffer(p->nodes[i]);
 110		p->nodes[i] = NULL;
 111	}
 112}
 113
 114/*
 115 * safely gets a reference on the root node of a tree.  A lock
 116 * is not taken, so a concurrent writer may put a different node
 117 * at the root of the tree.  See btrfs_lock_root_node for the
 118 * looping required.
 119 *
 120 * The extent buffer returned by this has a reference taken, so
 121 * it won't disappear.  It may stop being the root of the tree
 122 * at any time because there are no locks held.
 123 */
 124struct extent_buffer *btrfs_root_node(struct btrfs_root *root)
 125{
 126	struct extent_buffer *eb;
 127
 128	while (1) {
 129		rcu_read_lock();
 130		eb = rcu_dereference(root->node);
 131
 132		/*
 133		 * RCU really hurts here, we could free up the root node because
 134		 * it was COWed but we may not get the new root node yet so do
 135		 * the inc_not_zero dance and if it doesn't work then
 136		 * synchronize_rcu and try again.
 137		 */
 138		if (atomic_inc_not_zero(&eb->refs)) {
 139			rcu_read_unlock();
 140			break;
 141		}
 142		rcu_read_unlock();
 143		synchronize_rcu();
 144	}
 145	return eb;
 146}
 147
 148/*
 149 * Cowonly root (not-shareable trees, everything not subvolume or reloc roots),
 150 * just get put onto a simple dirty list.  Transaction walks this list to make
 151 * sure they get properly updated on disk.
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 152 */
 153static void add_root_to_dirty_list(struct btrfs_root *root)
 154{
 155	struct btrfs_fs_info *fs_info = root->fs_info;
 156
 157	if (test_bit(BTRFS_ROOT_DIRTY, &root->state) ||
 158	    !test_bit(BTRFS_ROOT_TRACK_DIRTY, &root->state))
 159		return;
 160
 161	spin_lock(&fs_info->trans_lock);
 162	if (!test_and_set_bit(BTRFS_ROOT_DIRTY, &root->state)) {
 163		/* Want the extent tree to be the last on the list */
 164		if (root->root_key.objectid == BTRFS_EXTENT_TREE_OBJECTID)
 165			list_move_tail(&root->dirty_list,
 166				       &fs_info->dirty_cowonly_roots);
 167		else
 168			list_move(&root->dirty_list,
 169				  &fs_info->dirty_cowonly_roots);
 170	}
 171	spin_unlock(&fs_info->trans_lock);
 172}
 173
 174/*
 175 * used by snapshot creation to make a copy of a root for a tree with
 176 * a given objectid.  The buffer with the new root node is returned in
 177 * cow_ret, and this func returns zero on success or a negative error code.
 178 */
 179int btrfs_copy_root(struct btrfs_trans_handle *trans,
 180		      struct btrfs_root *root,
 181		      struct extent_buffer *buf,
 182		      struct extent_buffer **cow_ret, u64 new_root_objectid)
 183{
 184	struct btrfs_fs_info *fs_info = root->fs_info;
 185	struct extent_buffer *cow;
 186	int ret = 0;
 187	int level;
 188	struct btrfs_disk_key disk_key;
 189
 190	WARN_ON(test_bit(BTRFS_ROOT_SHAREABLE, &root->state) &&
 191		trans->transid != fs_info->running_transaction->transid);
 192	WARN_ON(test_bit(BTRFS_ROOT_SHAREABLE, &root->state) &&
 193		trans->transid != root->last_trans);
 194
 195	level = btrfs_header_level(buf);
 196	if (level == 0)
 197		btrfs_item_key(buf, &disk_key, 0);
 198	else
 199		btrfs_node_key(buf, &disk_key, 0);
 200
 201	cow = btrfs_alloc_tree_block(trans, root, 0, new_root_objectid,
 202				     &disk_key, level, buf->start, 0,
 203				     BTRFS_NESTING_NEW_ROOT);
 204	if (IS_ERR(cow))
 205		return PTR_ERR(cow);
 206
 207	copy_extent_buffer_full(cow, buf);
 208	btrfs_set_header_bytenr(cow, cow->start);
 209	btrfs_set_header_generation(cow, trans->transid);
 210	btrfs_set_header_backref_rev(cow, BTRFS_MIXED_BACKREF_REV);
 211	btrfs_clear_header_flag(cow, BTRFS_HEADER_FLAG_WRITTEN |
 212				     BTRFS_HEADER_FLAG_RELOC);
 213	if (new_root_objectid == BTRFS_TREE_RELOC_OBJECTID)
 214		btrfs_set_header_flag(cow, BTRFS_HEADER_FLAG_RELOC);
 215	else
 216		btrfs_set_header_owner(cow, new_root_objectid);
 217
 218	write_extent_buffer_fsid(cow, fs_info->fs_devices->metadata_uuid);
 219
 220	WARN_ON(btrfs_header_generation(buf) > trans->transid);
 221	if (new_root_objectid == BTRFS_TREE_RELOC_OBJECTID)
 222		ret = btrfs_inc_ref(trans, root, cow, 1);
 223	else
 224		ret = btrfs_inc_ref(trans, root, cow, 0);
 225	if (ret) {
 226		btrfs_tree_unlock(cow);
 227		free_extent_buffer(cow);
 228		btrfs_abort_transaction(trans, ret);
 229		return ret;
 230	}
 231
 232	btrfs_mark_buffer_dirty(cow);
 233	*cow_ret = cow;
 234	return 0;
 235}
 236
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 237/*
 238 * check if the tree block can be shared by multiple trees
 239 */
 240int btrfs_block_can_be_shared(struct btrfs_root *root,
 241			      struct extent_buffer *buf)
 242{
 243	/*
 244	 * Tree blocks not in shareable trees and tree roots are never shared.
 245	 * If a block was allocated after the last snapshot and the block was
 246	 * not allocated by tree relocation, we know the block is not shared.
 
 247	 */
 248	if (test_bit(BTRFS_ROOT_SHAREABLE, &root->state) &&
 249	    buf != root->node && buf != root->commit_root &&
 250	    (btrfs_header_generation(buf) <=
 251	     btrfs_root_last_snapshot(&root->root_item) ||
 252	     btrfs_header_flag(buf, BTRFS_HEADER_FLAG_RELOC)))
 253		return 1;
 254
 255	return 0;
 256}
 257
 258static noinline int update_ref_for_cow(struct btrfs_trans_handle *trans,
 259				       struct btrfs_root *root,
 260				       struct extent_buffer *buf,
 261				       struct extent_buffer *cow,
 262				       int *last_ref)
 263{
 264	struct btrfs_fs_info *fs_info = root->fs_info;
 265	u64 refs;
 266	u64 owner;
 267	u64 flags;
 268	u64 new_flags = 0;
 269	int ret;
 270
 271	/*
 272	 * Backrefs update rules:
 273	 *
 274	 * Always use full backrefs for extent pointers in tree block
 275	 * allocated by tree relocation.
 276	 *
 277	 * If a shared tree block is no longer referenced by its owner
 278	 * tree (btrfs_header_owner(buf) == root->root_key.objectid),
 279	 * use full backrefs for extent pointers in tree block.
 280	 *
 281	 * If a tree block is been relocating
 282	 * (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID),
 283	 * use full backrefs for extent pointers in tree block.
 284	 * The reason for this is some operations (such as drop tree)
 285	 * are only allowed for blocks use full backrefs.
 286	 */
 287
 288	if (btrfs_block_can_be_shared(root, buf)) {
 289		ret = btrfs_lookup_extent_info(trans, fs_info, buf->start,
 290					       btrfs_header_level(buf), 1,
 291					       &refs, &flags);
 292		if (ret)
 293			return ret;
 294		if (refs == 0) {
 295			ret = -EROFS;
 296			btrfs_handle_fs_error(fs_info, ret, NULL);
 297			return ret;
 298		}
 299	} else {
 300		refs = 1;
 301		if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID ||
 302		    btrfs_header_backref_rev(buf) < BTRFS_MIXED_BACKREF_REV)
 303			flags = BTRFS_BLOCK_FLAG_FULL_BACKREF;
 304		else
 305			flags = 0;
 306	}
 307
 308	owner = btrfs_header_owner(buf);
 309	BUG_ON(owner == BTRFS_TREE_RELOC_OBJECTID &&
 310	       !(flags & BTRFS_BLOCK_FLAG_FULL_BACKREF));
 311
 312	if (refs > 1) {
 313		if ((owner == root->root_key.objectid ||
 314		     root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID) &&
 315		    !(flags & BTRFS_BLOCK_FLAG_FULL_BACKREF)) {
 316			ret = btrfs_inc_ref(trans, root, buf, 1);
 317			if (ret)
 318				return ret;
 319
 320			if (root->root_key.objectid ==
 321			    BTRFS_TREE_RELOC_OBJECTID) {
 322				ret = btrfs_dec_ref(trans, root, buf, 0);
 323				if (ret)
 324					return ret;
 325				ret = btrfs_inc_ref(trans, root, cow, 1);
 326				if (ret)
 327					return ret;
 328			}
 329			new_flags |= BTRFS_BLOCK_FLAG_FULL_BACKREF;
 330		} else {
 331
 332			if (root->root_key.objectid ==
 333			    BTRFS_TREE_RELOC_OBJECTID)
 334				ret = btrfs_inc_ref(trans, root, cow, 1);
 335			else
 336				ret = btrfs_inc_ref(trans, root, cow, 0);
 337			if (ret)
 338				return ret;
 339		}
 340		if (new_flags != 0) {
 341			int level = btrfs_header_level(buf);
 342
 343			ret = btrfs_set_disk_extent_flags(trans, buf,
 
 
 344							  new_flags, level, 0);
 345			if (ret)
 346				return ret;
 347		}
 348	} else {
 349		if (flags & BTRFS_BLOCK_FLAG_FULL_BACKREF) {
 350			if (root->root_key.objectid ==
 351			    BTRFS_TREE_RELOC_OBJECTID)
 352				ret = btrfs_inc_ref(trans, root, cow, 1);
 353			else
 354				ret = btrfs_inc_ref(trans, root, cow, 0);
 355			if (ret)
 356				return ret;
 357			ret = btrfs_dec_ref(trans, root, buf, 1);
 358			if (ret)
 359				return ret;
 360		}
 361		btrfs_clean_tree_block(buf);
 362		*last_ref = 1;
 363	}
 364	return 0;
 365}
 366
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 367/*
 368 * does the dirty work in cow of a single block.  The parent block (if
 369 * supplied) is updated to point to the new cow copy.  The new buffer is marked
 370 * dirty and returned locked.  If you modify the block it needs to be marked
 371 * dirty again.
 372 *
 373 * search_start -- an allocation hint for the new block
 374 *
 375 * empty_size -- a hint that you plan on doing more cow.  This is the size in
 376 * bytes the allocator should try to find free next to the block it returns.
 377 * This is just a hint and may be ignored by the allocator.
 378 */
 379static noinline int __btrfs_cow_block(struct btrfs_trans_handle *trans,
 380			     struct btrfs_root *root,
 381			     struct extent_buffer *buf,
 382			     struct extent_buffer *parent, int parent_slot,
 383			     struct extent_buffer **cow_ret,
 384			     u64 search_start, u64 empty_size,
 385			     enum btrfs_lock_nesting nest)
 386{
 387	struct btrfs_fs_info *fs_info = root->fs_info;
 388	struct btrfs_disk_key disk_key;
 389	struct extent_buffer *cow;
 390	int level, ret;
 391	int last_ref = 0;
 392	int unlock_orig = 0;
 393	u64 parent_start = 0;
 394
 395	if (*cow_ret == buf)
 396		unlock_orig = 1;
 397
 398	btrfs_assert_tree_locked(buf);
 399
 400	WARN_ON(test_bit(BTRFS_ROOT_SHAREABLE, &root->state) &&
 401		trans->transid != fs_info->running_transaction->transid);
 402	WARN_ON(test_bit(BTRFS_ROOT_SHAREABLE, &root->state) &&
 403		trans->transid != root->last_trans);
 404
 405	level = btrfs_header_level(buf);
 406
 407	if (level == 0)
 408		btrfs_item_key(buf, &disk_key, 0);
 409	else
 410		btrfs_node_key(buf, &disk_key, 0);
 411
 412	if ((root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID) && parent)
 413		parent_start = parent->start;
 414
 415	cow = btrfs_alloc_tree_block(trans, root, parent_start,
 416				     root->root_key.objectid, &disk_key, level,
 417				     search_start, empty_size, nest);
 418	if (IS_ERR(cow))
 419		return PTR_ERR(cow);
 420
 421	/* cow is set to blocking by btrfs_init_new_buffer */
 422
 423	copy_extent_buffer_full(cow, buf);
 424	btrfs_set_header_bytenr(cow, cow->start);
 425	btrfs_set_header_generation(cow, trans->transid);
 426	btrfs_set_header_backref_rev(cow, BTRFS_MIXED_BACKREF_REV);
 427	btrfs_clear_header_flag(cow, BTRFS_HEADER_FLAG_WRITTEN |
 428				     BTRFS_HEADER_FLAG_RELOC);
 429	if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID)
 430		btrfs_set_header_flag(cow, BTRFS_HEADER_FLAG_RELOC);
 431	else
 432		btrfs_set_header_owner(cow, root->root_key.objectid);
 433
 434	write_extent_buffer_fsid(cow, fs_info->fs_devices->metadata_uuid);
 435
 436	ret = update_ref_for_cow(trans, root, buf, cow, &last_ref);
 437	if (ret) {
 438		btrfs_tree_unlock(cow);
 439		free_extent_buffer(cow);
 440		btrfs_abort_transaction(trans, ret);
 441		return ret;
 442	}
 443
 444	if (test_bit(BTRFS_ROOT_SHAREABLE, &root->state)) {
 445		ret = btrfs_reloc_cow_block(trans, root, buf, cow);
 446		if (ret) {
 447			btrfs_tree_unlock(cow);
 448			free_extent_buffer(cow);
 449			btrfs_abort_transaction(trans, ret);
 450			return ret;
 451		}
 452	}
 453
 454	if (buf == root->node) {
 455		WARN_ON(parent && parent != buf);
 456		if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID ||
 457		    btrfs_header_backref_rev(buf) < BTRFS_MIXED_BACKREF_REV)
 458			parent_start = buf->start;
 459
 460		atomic_inc(&cow->refs);
 461		ret = btrfs_tree_mod_log_insert_root(root->node, cow, true);
 462		BUG_ON(ret < 0);
 463		rcu_assign_pointer(root->node, cow);
 464
 465		btrfs_free_tree_block(trans, root, buf, parent_start,
 466				      last_ref);
 467		free_extent_buffer(buf);
 468		add_root_to_dirty_list(root);
 469	} else {
 470		WARN_ON(trans->transid != btrfs_header_generation(parent));
 471		btrfs_tree_mod_log_insert_key(parent, parent_slot,
 472					      BTRFS_MOD_LOG_KEY_REPLACE, GFP_NOFS);
 473		btrfs_set_node_blockptr(parent, parent_slot,
 474					cow->start);
 475		btrfs_set_node_ptr_generation(parent, parent_slot,
 476					      trans->transid);
 477		btrfs_mark_buffer_dirty(parent);
 478		if (last_ref) {
 479			ret = btrfs_tree_mod_log_free_eb(buf);
 480			if (ret) {
 481				btrfs_tree_unlock(cow);
 482				free_extent_buffer(cow);
 483				btrfs_abort_transaction(trans, ret);
 484				return ret;
 485			}
 486		}
 487		btrfs_free_tree_block(trans, root, buf, parent_start,
 488				      last_ref);
 489	}
 490	if (unlock_orig)
 491		btrfs_tree_unlock(buf);
 492	free_extent_buffer_stale(buf);
 493	btrfs_mark_buffer_dirty(cow);
 494	*cow_ret = cow;
 495	return 0;
 496}
 497
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 498static inline int should_cow_block(struct btrfs_trans_handle *trans,
 499				   struct btrfs_root *root,
 500				   struct extent_buffer *buf)
 501{
 502	if (btrfs_is_testing(root->fs_info))
 503		return 0;
 504
 505	/* Ensure we can see the FORCE_COW bit */
 506	smp_mb__before_atomic();
 507
 508	/*
 509	 * We do not need to cow a block if
 510	 * 1) this block is not created or changed in this transaction;
 511	 * 2) this block does not belong to TREE_RELOC tree;
 512	 * 3) the root is not forced COW.
 513	 *
 514	 * What is forced COW:
 515	 *    when we create snapshot during committing the transaction,
 516	 *    after we've finished copying src root, we must COW the shared
 517	 *    block to ensure the metadata consistency.
 518	 */
 519	if (btrfs_header_generation(buf) == trans->transid &&
 520	    !btrfs_header_flag(buf, BTRFS_HEADER_FLAG_WRITTEN) &&
 521	    !(root->root_key.objectid != BTRFS_TREE_RELOC_OBJECTID &&
 522	      btrfs_header_flag(buf, BTRFS_HEADER_FLAG_RELOC)) &&
 523	    !test_bit(BTRFS_ROOT_FORCE_COW, &root->state))
 524		return 0;
 525	return 1;
 526}
 527
 528/*
 529 * cows a single block, see __btrfs_cow_block for the real work.
 530 * This version of it has extra checks so that a block isn't COWed more than
 531 * once per transaction, as long as it hasn't been written yet
 532 */
 533noinline int btrfs_cow_block(struct btrfs_trans_handle *trans,
 534		    struct btrfs_root *root, struct extent_buffer *buf,
 535		    struct extent_buffer *parent, int parent_slot,
 536		    struct extent_buffer **cow_ret,
 537		    enum btrfs_lock_nesting nest)
 538{
 539	struct btrfs_fs_info *fs_info = root->fs_info;
 540	u64 search_start;
 541	int ret;
 542
 543	if (test_bit(BTRFS_ROOT_DELETING, &root->state))
 544		btrfs_err(fs_info,
 545			"COW'ing blocks on a fs root that's being dropped");
 546
 547	if (trans->transaction != fs_info->running_transaction)
 548		WARN(1, KERN_CRIT "trans %llu running %llu\n",
 549		       trans->transid,
 550		       fs_info->running_transaction->transid);
 551
 552	if (trans->transid != fs_info->generation)
 553		WARN(1, KERN_CRIT "trans %llu running %llu\n",
 554		       trans->transid, fs_info->generation);
 555
 556	if (!should_cow_block(trans, root, buf)) {
 
 557		*cow_ret = buf;
 558		return 0;
 559	}
 560
 561	search_start = buf->start & ~((u64)SZ_1G - 1);
 562
 
 
 
 
 563	/*
 564	 * Before CoWing this block for later modification, check if it's
 565	 * the subtree root and do the delayed subtree trace if needed.
 566	 *
 567	 * Also We don't care about the error, as it's handled internally.
 568	 */
 569	btrfs_qgroup_trace_subtree_after_cow(trans, root, buf);
 570	ret = __btrfs_cow_block(trans, root, buf, parent,
 571				 parent_slot, cow_ret, search_start, 0, nest);
 572
 573	trace_btrfs_cow_block(root, buf, *cow_ret);
 574
 575	return ret;
 576}
 577ALLOW_ERROR_INJECTION(btrfs_cow_block, ERRNO);
 578
 579/*
 580 * helper function for defrag to decide if two blocks pointed to by a
 581 * node are actually close by
 582 */
 583static int close_blocks(u64 blocknr, u64 other, u32 blocksize)
 584{
 585	if (blocknr < other && other - (blocknr + blocksize) < 32768)
 586		return 1;
 587	if (blocknr > other && blocknr - (other + blocksize) < 32768)
 588		return 1;
 589	return 0;
 590}
 591
 592#ifdef __LITTLE_ENDIAN
 593
 594/*
 595 * Compare two keys, on little-endian the disk order is same as CPU order and
 596 * we can avoid the conversion.
 597 */
 598static int comp_keys(const struct btrfs_disk_key *disk_key,
 599		     const struct btrfs_key *k2)
 600{
 601	const struct btrfs_key *k1 = (const struct btrfs_key *)disk_key;
 602
 603	return btrfs_comp_cpu_keys(k1, k2);
 604}
 605
 606#else
 607
 608/*
 609 * compare two keys in a memcmp fashion
 610 */
 611static int comp_keys(const struct btrfs_disk_key *disk,
 612		     const struct btrfs_key *k2)
 613{
 614	struct btrfs_key k1;
 615
 616	btrfs_disk_key_to_cpu(&k1, disk);
 617
 618	return btrfs_comp_cpu_keys(&k1, k2);
 619}
 620#endif
 621
 622/*
 623 * same as comp_keys only with two btrfs_key's
 624 */
 625int __pure btrfs_comp_cpu_keys(const struct btrfs_key *k1, const struct btrfs_key *k2)
 626{
 627	if (k1->objectid > k2->objectid)
 628		return 1;
 629	if (k1->objectid < k2->objectid)
 630		return -1;
 631	if (k1->type > k2->type)
 632		return 1;
 633	if (k1->type < k2->type)
 634		return -1;
 635	if (k1->offset > k2->offset)
 636		return 1;
 637	if (k1->offset < k2->offset)
 638		return -1;
 639	return 0;
 640}
 641
 642/*
 643 * this is used by the defrag code to go through all the
 644 * leaves pointed to by a node and reallocate them so that
 645 * disk order is close to key order
 646 */
 647int btrfs_realloc_node(struct btrfs_trans_handle *trans,
 648		       struct btrfs_root *root, struct extent_buffer *parent,
 649		       int start_slot, u64 *last_ret,
 650		       struct btrfs_key *progress)
 651{
 652	struct btrfs_fs_info *fs_info = root->fs_info;
 653	struct extent_buffer *cur;
 654	u64 blocknr;
 
 655	u64 search_start = *last_ret;
 656	u64 last_block = 0;
 657	u64 other;
 658	u32 parent_nritems;
 659	int end_slot;
 660	int i;
 661	int err = 0;
 
 
 662	u32 blocksize;
 663	int progress_passed = 0;
 664	struct btrfs_disk_key disk_key;
 665
 
 
 666	WARN_ON(trans->transaction != fs_info->running_transaction);
 667	WARN_ON(trans->transid != fs_info->generation);
 668
 669	parent_nritems = btrfs_header_nritems(parent);
 670	blocksize = fs_info->nodesize;
 671	end_slot = parent_nritems - 1;
 672
 673	if (parent_nritems <= 1)
 674		return 0;
 675
 
 
 676	for (i = start_slot; i <= end_slot; i++) {
 
 677		int close = 1;
 678
 679		btrfs_node_key(parent, &disk_key, i);
 680		if (!progress_passed && comp_keys(&disk_key, progress) < 0)
 681			continue;
 682
 683		progress_passed = 1;
 684		blocknr = btrfs_node_blockptr(parent, i);
 
 
 685		if (last_block == 0)
 686			last_block = blocknr;
 687
 688		if (i > 0) {
 689			other = btrfs_node_blockptr(parent, i - 1);
 690			close = close_blocks(blocknr, other, blocksize);
 691		}
 692		if (!close && i < end_slot) {
 693			other = btrfs_node_blockptr(parent, i + 1);
 694			close = close_blocks(blocknr, other, blocksize);
 695		}
 696		if (close) {
 697			last_block = blocknr;
 698			continue;
 699		}
 700
 701		cur = btrfs_read_node_slot(parent, i);
 702		if (IS_ERR(cur))
 703			return PTR_ERR(cur);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 704		if (search_start == 0)
 705			search_start = last_block;
 706
 707		btrfs_tree_lock(cur);
 
 708		err = __btrfs_cow_block(trans, root, cur, parent, i,
 709					&cur, search_start,
 710					min(16 * blocksize,
 711					    (end_slot - i) * blocksize),
 712					BTRFS_NESTING_COW);
 713		if (err) {
 714			btrfs_tree_unlock(cur);
 715			free_extent_buffer(cur);
 716			break;
 717		}
 718		search_start = cur->start;
 719		last_block = cur->start;
 720		*last_ret = search_start;
 721		btrfs_tree_unlock(cur);
 722		free_extent_buffer(cur);
 723	}
 724	return err;
 725}
 726
 727/*
 728 * search for key in the extent_buffer.  The items start at offset p,
 729 * and they are item_size apart.  There are 'max' items in p.
 730 *
 731 * the slot in the array is returned via slot, and it points to
 732 * the place where you would insert key if it is not found in
 733 * the array.
 734 *
 735 * slot may point to max if the key is bigger than all of the keys
 736 */
 737static noinline int generic_bin_search(struct extent_buffer *eb,
 738				       unsigned long p, int item_size,
 739				       const struct btrfs_key *key,
 740				       int max, int *slot)
 741{
 742	int low = 0;
 743	int high = max;
 
 744	int ret;
 745	const int key_size = sizeof(struct btrfs_disk_key);
 
 
 
 
 
 
 746
 747	if (low > high) {
 748		btrfs_err(eb->fs_info,
 749		 "%s: low (%d) > high (%d) eb %llu owner %llu level %d",
 750			  __func__, low, high, eb->start,
 751			  btrfs_header_owner(eb), btrfs_header_level(eb));
 752		return -EINVAL;
 753	}
 754
 755	while (low < high) {
 756		unsigned long oip;
 757		unsigned long offset;
 758		struct btrfs_disk_key *tmp;
 759		struct btrfs_disk_key unaligned;
 760		int mid;
 761
 762		mid = (low + high) / 2;
 763		offset = p + mid * item_size;
 764		oip = offset_in_page(offset);
 765
 766		if (oip + key_size <= PAGE_SIZE) {
 767			const unsigned long idx = get_eb_page_index(offset);
 768			char *kaddr = page_address(eb->pages[idx]);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 769
 770			oip = get_eb_offset_in_page(eb, offset);
 771			tmp = (struct btrfs_disk_key *)(kaddr + oip);
 772		} else {
 773			read_extent_buffer(eb, &unaligned, offset, key_size);
 774			tmp = &unaligned;
 775		}
 776
 777		ret = comp_keys(tmp, key);
 778
 779		if (ret < 0)
 780			low = mid + 1;
 781		else if (ret > 0)
 782			high = mid;
 783		else {
 784			*slot = mid;
 785			return 0;
 786		}
 787	}
 788	*slot = low;
 789	return 1;
 790}
 791
 792/*
 793 * simple bin_search frontend that does the right thing for
 794 * leaves vs nodes
 795 */
 796int btrfs_bin_search(struct extent_buffer *eb, const struct btrfs_key *key,
 797		     int *slot)
 798{
 799	if (btrfs_header_level(eb) == 0)
 800		return generic_bin_search(eb,
 801					  offsetof(struct btrfs_leaf, items),
 802					  sizeof(struct btrfs_item),
 803					  key, btrfs_header_nritems(eb),
 804					  slot);
 805	else
 806		return generic_bin_search(eb,
 807					  offsetof(struct btrfs_node, ptrs),
 808					  sizeof(struct btrfs_key_ptr),
 809					  key, btrfs_header_nritems(eb),
 810					  slot);
 811}
 812
 813static void root_add_used(struct btrfs_root *root, u32 size)
 814{
 815	spin_lock(&root->accounting_lock);
 816	btrfs_set_root_used(&root->root_item,
 817			    btrfs_root_used(&root->root_item) + size);
 818	spin_unlock(&root->accounting_lock);
 819}
 820
 821static void root_sub_used(struct btrfs_root *root, u32 size)
 822{
 823	spin_lock(&root->accounting_lock);
 824	btrfs_set_root_used(&root->root_item,
 825			    btrfs_root_used(&root->root_item) - size);
 826	spin_unlock(&root->accounting_lock);
 827}
 828
 829/* given a node and slot number, this reads the blocks it points to.  The
 830 * extent buffer is returned with a reference taken (but unlocked).
 831 */
 832struct extent_buffer *btrfs_read_node_slot(struct extent_buffer *parent,
 833					   int slot)
 834{
 835	int level = btrfs_header_level(parent);
 836	struct extent_buffer *eb;
 837	struct btrfs_key first_key;
 838
 839	if (slot < 0 || slot >= btrfs_header_nritems(parent))
 840		return ERR_PTR(-ENOENT);
 841
 842	BUG_ON(level == 0);
 843
 844	btrfs_node_key_to_cpu(parent, &first_key, slot);
 845	eb = read_tree_block(parent->fs_info, btrfs_node_blockptr(parent, slot),
 846			     btrfs_header_owner(parent),
 847			     btrfs_node_ptr_generation(parent, slot),
 848			     level - 1, &first_key);
 849	if (!IS_ERR(eb) && !extent_buffer_uptodate(eb)) {
 850		free_extent_buffer(eb);
 851		eb = ERR_PTR(-EIO);
 852	}
 853
 854	return eb;
 855}
 856
 857/*
 858 * node level balancing, used to make sure nodes are in proper order for
 859 * item deletion.  We balance from the top down, so we have to make sure
 860 * that a deletion won't leave an node completely empty later on.
 861 */
 862static noinline int balance_level(struct btrfs_trans_handle *trans,
 863			 struct btrfs_root *root,
 864			 struct btrfs_path *path, int level)
 865{
 866	struct btrfs_fs_info *fs_info = root->fs_info;
 867	struct extent_buffer *right = NULL;
 868	struct extent_buffer *mid;
 869	struct extent_buffer *left = NULL;
 870	struct extent_buffer *parent = NULL;
 871	int ret = 0;
 872	int wret;
 873	int pslot;
 874	int orig_slot = path->slots[level];
 875	u64 orig_ptr;
 876
 877	ASSERT(level > 0);
 878
 879	mid = path->nodes[level];
 880
 881	WARN_ON(path->locks[level] != BTRFS_WRITE_LOCK);
 
 882	WARN_ON(btrfs_header_generation(mid) != trans->transid);
 883
 884	orig_ptr = btrfs_node_blockptr(mid, orig_slot);
 885
 886	if (level < BTRFS_MAX_LEVEL - 1) {
 887		parent = path->nodes[level + 1];
 888		pslot = path->slots[level + 1];
 889	}
 890
 891	/*
 892	 * deal with the case where there is only one pointer in the root
 893	 * by promoting the node below to a root
 894	 */
 895	if (!parent) {
 896		struct extent_buffer *child;
 897
 898		if (btrfs_header_nritems(mid) != 1)
 899			return 0;
 900
 901		/* promote the child to a root */
 902		child = btrfs_read_node_slot(mid, 0);
 903		if (IS_ERR(child)) {
 904			ret = PTR_ERR(child);
 905			btrfs_handle_fs_error(fs_info, ret, NULL);
 906			goto enospc;
 907		}
 908
 909		btrfs_tree_lock(child);
 910		ret = btrfs_cow_block(trans, root, child, mid, 0, &child,
 911				      BTRFS_NESTING_COW);
 912		if (ret) {
 913			btrfs_tree_unlock(child);
 914			free_extent_buffer(child);
 915			goto enospc;
 916		}
 917
 918		ret = btrfs_tree_mod_log_insert_root(root->node, child, true);
 919		BUG_ON(ret < 0);
 920		rcu_assign_pointer(root->node, child);
 921
 922		add_root_to_dirty_list(root);
 923		btrfs_tree_unlock(child);
 924
 925		path->locks[level] = 0;
 926		path->nodes[level] = NULL;
 927		btrfs_clean_tree_block(mid);
 928		btrfs_tree_unlock(mid);
 929		/* once for the path */
 930		free_extent_buffer(mid);
 931
 932		root_sub_used(root, mid->len);
 933		btrfs_free_tree_block(trans, root, mid, 0, 1);
 934		/* once for the root ptr */
 935		free_extent_buffer_stale(mid);
 936		return 0;
 937	}
 938	if (btrfs_header_nritems(mid) >
 939	    BTRFS_NODEPTRS_PER_BLOCK(fs_info) / 4)
 940		return 0;
 941
 942	left = btrfs_read_node_slot(parent, pslot - 1);
 943	if (IS_ERR(left))
 944		left = NULL;
 945
 946	if (left) {
 947		__btrfs_tree_lock(left, BTRFS_NESTING_LEFT);
 
 948		wret = btrfs_cow_block(trans, root, left,
 949				       parent, pslot - 1, &left,
 950				       BTRFS_NESTING_LEFT_COW);
 951		if (wret) {
 952			ret = wret;
 953			goto enospc;
 954		}
 955	}
 956
 957	right = btrfs_read_node_slot(parent, pslot + 1);
 958	if (IS_ERR(right))
 959		right = NULL;
 960
 961	if (right) {
 962		__btrfs_tree_lock(right, BTRFS_NESTING_RIGHT);
 
 963		wret = btrfs_cow_block(trans, root, right,
 964				       parent, pslot + 1, &right,
 965				       BTRFS_NESTING_RIGHT_COW);
 966		if (wret) {
 967			ret = wret;
 968			goto enospc;
 969		}
 970	}
 971
 972	/* first, try to make some room in the middle buffer */
 973	if (left) {
 974		orig_slot += btrfs_header_nritems(left);
 975		wret = push_node_left(trans, left, mid, 1);
 976		if (wret < 0)
 977			ret = wret;
 978	}
 979
 980	/*
 981	 * then try to empty the right most buffer into the middle
 982	 */
 983	if (right) {
 984		wret = push_node_left(trans, mid, right, 1);
 985		if (wret < 0 && wret != -ENOSPC)
 986			ret = wret;
 987		if (btrfs_header_nritems(right) == 0) {
 988			btrfs_clean_tree_block(right);
 989			btrfs_tree_unlock(right);
 990			del_ptr(root, path, level + 1, pslot + 1);
 991			root_sub_used(root, right->len);
 992			btrfs_free_tree_block(trans, root, right, 0, 1);
 993			free_extent_buffer_stale(right);
 994			right = NULL;
 995		} else {
 996			struct btrfs_disk_key right_key;
 997			btrfs_node_key(right, &right_key, 0);
 998			ret = btrfs_tree_mod_log_insert_key(parent, pslot + 1,
 999					BTRFS_MOD_LOG_KEY_REPLACE, GFP_NOFS);
1000			BUG_ON(ret < 0);
1001			btrfs_set_node_key(parent, &right_key, pslot + 1);
1002			btrfs_mark_buffer_dirty(parent);
1003		}
1004	}
1005	if (btrfs_header_nritems(mid) == 1) {
1006		/*
1007		 * we're not allowed to leave a node with one item in the
1008		 * tree during a delete.  A deletion from lower in the tree
1009		 * could try to delete the only pointer in this node.
1010		 * So, pull some keys from the left.
1011		 * There has to be a left pointer at this point because
1012		 * otherwise we would have pulled some pointers from the
1013		 * right
1014		 */
1015		if (!left) {
1016			ret = -EROFS;
1017			btrfs_handle_fs_error(fs_info, ret, NULL);
1018			goto enospc;
1019		}
1020		wret = balance_node_right(trans, mid, left);
1021		if (wret < 0) {
1022			ret = wret;
1023			goto enospc;
1024		}
1025		if (wret == 1) {
1026			wret = push_node_left(trans, left, mid, 1);
1027			if (wret < 0)
1028				ret = wret;
1029		}
1030		BUG_ON(wret == 1);
1031	}
1032	if (btrfs_header_nritems(mid) == 0) {
1033		btrfs_clean_tree_block(mid);
1034		btrfs_tree_unlock(mid);
1035		del_ptr(root, path, level + 1, pslot);
1036		root_sub_used(root, mid->len);
1037		btrfs_free_tree_block(trans, root, mid, 0, 1);
1038		free_extent_buffer_stale(mid);
1039		mid = NULL;
1040	} else {
1041		/* update the parent key to reflect our changes */
1042		struct btrfs_disk_key mid_key;
1043		btrfs_node_key(mid, &mid_key, 0);
1044		ret = btrfs_tree_mod_log_insert_key(parent, pslot,
1045				BTRFS_MOD_LOG_KEY_REPLACE, GFP_NOFS);
1046		BUG_ON(ret < 0);
1047		btrfs_set_node_key(parent, &mid_key, pslot);
1048		btrfs_mark_buffer_dirty(parent);
1049	}
1050
1051	/* update the path */
1052	if (left) {
1053		if (btrfs_header_nritems(left) > orig_slot) {
1054			atomic_inc(&left->refs);
1055			/* left was locked after cow */
1056			path->nodes[level] = left;
1057			path->slots[level + 1] -= 1;
1058			path->slots[level] = orig_slot;
1059			if (mid) {
1060				btrfs_tree_unlock(mid);
1061				free_extent_buffer(mid);
1062			}
1063		} else {
1064			orig_slot -= btrfs_header_nritems(left);
1065			path->slots[level] = orig_slot;
1066		}
1067	}
1068	/* double check we haven't messed things up */
1069	if (orig_ptr !=
1070	    btrfs_node_blockptr(path->nodes[level], path->slots[level]))
1071		BUG();
1072enospc:
1073	if (right) {
1074		btrfs_tree_unlock(right);
1075		free_extent_buffer(right);
1076	}
1077	if (left) {
1078		if (path->nodes[level] != left)
1079			btrfs_tree_unlock(left);
1080		free_extent_buffer(left);
1081	}
1082	return ret;
1083}
1084
1085/* Node balancing for insertion.  Here we only split or push nodes around
1086 * when they are completely full.  This is also done top down, so we
1087 * have to be pessimistic.
1088 */
1089static noinline int push_nodes_for_insert(struct btrfs_trans_handle *trans,
1090					  struct btrfs_root *root,
1091					  struct btrfs_path *path, int level)
1092{
1093	struct btrfs_fs_info *fs_info = root->fs_info;
1094	struct extent_buffer *right = NULL;
1095	struct extent_buffer *mid;
1096	struct extent_buffer *left = NULL;
1097	struct extent_buffer *parent = NULL;
1098	int ret = 0;
1099	int wret;
1100	int pslot;
1101	int orig_slot = path->slots[level];
1102
1103	if (level == 0)
1104		return 1;
1105
1106	mid = path->nodes[level];
1107	WARN_ON(btrfs_header_generation(mid) != trans->transid);
1108
1109	if (level < BTRFS_MAX_LEVEL - 1) {
1110		parent = path->nodes[level + 1];
1111		pslot = path->slots[level + 1];
1112	}
1113
1114	if (!parent)
1115		return 1;
1116
1117	left = btrfs_read_node_slot(parent, pslot - 1);
1118	if (IS_ERR(left))
1119		left = NULL;
1120
1121	/* first, try to make some room in the middle buffer */
1122	if (left) {
1123		u32 left_nr;
1124
1125		__btrfs_tree_lock(left, BTRFS_NESTING_LEFT);
 
1126
1127		left_nr = btrfs_header_nritems(left);
1128		if (left_nr >= BTRFS_NODEPTRS_PER_BLOCK(fs_info) - 1) {
1129			wret = 1;
1130		} else {
1131			ret = btrfs_cow_block(trans, root, left, parent,
1132					      pslot - 1, &left,
1133					      BTRFS_NESTING_LEFT_COW);
1134			if (ret)
1135				wret = 1;
1136			else {
1137				wret = push_node_left(trans, left, mid, 0);
1138			}
1139		}
1140		if (wret < 0)
1141			ret = wret;
1142		if (wret == 0) {
1143			struct btrfs_disk_key disk_key;
1144			orig_slot += left_nr;
1145			btrfs_node_key(mid, &disk_key, 0);
1146			ret = btrfs_tree_mod_log_insert_key(parent, pslot,
1147					BTRFS_MOD_LOG_KEY_REPLACE, GFP_NOFS);
1148			BUG_ON(ret < 0);
1149			btrfs_set_node_key(parent, &disk_key, pslot);
1150			btrfs_mark_buffer_dirty(parent);
1151			if (btrfs_header_nritems(left) > orig_slot) {
1152				path->nodes[level] = left;
1153				path->slots[level + 1] -= 1;
1154				path->slots[level] = orig_slot;
1155				btrfs_tree_unlock(mid);
1156				free_extent_buffer(mid);
1157			} else {
1158				orig_slot -=
1159					btrfs_header_nritems(left);
1160				path->slots[level] = orig_slot;
1161				btrfs_tree_unlock(left);
1162				free_extent_buffer(left);
1163			}
1164			return 0;
1165		}
1166		btrfs_tree_unlock(left);
1167		free_extent_buffer(left);
1168	}
1169	right = btrfs_read_node_slot(parent, pslot + 1);
1170	if (IS_ERR(right))
1171		right = NULL;
1172
1173	/*
1174	 * then try to empty the right most buffer into the middle
1175	 */
1176	if (right) {
1177		u32 right_nr;
1178
1179		__btrfs_tree_lock(right, BTRFS_NESTING_RIGHT);
 
1180
1181		right_nr = btrfs_header_nritems(right);
1182		if (right_nr >= BTRFS_NODEPTRS_PER_BLOCK(fs_info) - 1) {
1183			wret = 1;
1184		} else {
1185			ret = btrfs_cow_block(trans, root, right,
1186					      parent, pslot + 1,
1187					      &right, BTRFS_NESTING_RIGHT_COW);
1188			if (ret)
1189				wret = 1;
1190			else {
1191				wret = balance_node_right(trans, right, mid);
1192			}
1193		}
1194		if (wret < 0)
1195			ret = wret;
1196		if (wret == 0) {
1197			struct btrfs_disk_key disk_key;
1198
1199			btrfs_node_key(right, &disk_key, 0);
1200			ret = btrfs_tree_mod_log_insert_key(parent, pslot + 1,
1201					BTRFS_MOD_LOG_KEY_REPLACE, GFP_NOFS);
1202			BUG_ON(ret < 0);
1203			btrfs_set_node_key(parent, &disk_key, pslot + 1);
1204			btrfs_mark_buffer_dirty(parent);
1205
1206			if (btrfs_header_nritems(mid) <= orig_slot) {
1207				path->nodes[level] = right;
1208				path->slots[level + 1] += 1;
1209				path->slots[level] = orig_slot -
1210					btrfs_header_nritems(mid);
1211				btrfs_tree_unlock(mid);
1212				free_extent_buffer(mid);
1213			} else {
1214				btrfs_tree_unlock(right);
1215				free_extent_buffer(right);
1216			}
1217			return 0;
1218		}
1219		btrfs_tree_unlock(right);
1220		free_extent_buffer(right);
1221	}
1222	return 1;
1223}
1224
1225/*
1226 * readahead one full node of leaves, finding things that are close
1227 * to the block in 'slot', and triggering ra on them.
1228 */
1229static void reada_for_search(struct btrfs_fs_info *fs_info,
1230			     struct btrfs_path *path,
1231			     int level, int slot, u64 objectid)
1232{
1233	struct extent_buffer *node;
1234	struct btrfs_disk_key disk_key;
1235	u32 nritems;
1236	u64 search;
1237	u64 target;
1238	u64 nread = 0;
1239	u64 nread_max;
1240	struct extent_buffer *eb;
1241	u32 nr;
1242	u32 blocksize;
1243	u32 nscan = 0;
1244
1245	if (level != 1 && path->reada != READA_FORWARD_ALWAYS)
1246		return;
1247
1248	if (!path->nodes[level])
1249		return;
1250
1251	node = path->nodes[level];
1252
1253	/*
1254	 * Since the time between visiting leaves is much shorter than the time
1255	 * between visiting nodes, limit read ahead of nodes to 1, to avoid too
1256	 * much IO at once (possibly random).
1257	 */
1258	if (path->reada == READA_FORWARD_ALWAYS) {
1259		if (level > 1)
1260			nread_max = node->fs_info->nodesize;
1261		else
1262			nread_max = SZ_128K;
1263	} else {
1264		nread_max = SZ_64K;
1265	}
1266
1267	search = btrfs_node_blockptr(node, slot);
1268	blocksize = fs_info->nodesize;
1269	eb = find_extent_buffer(fs_info, search);
1270	if (eb) {
1271		free_extent_buffer(eb);
1272		return;
1273	}
1274
1275	target = search;
1276
1277	nritems = btrfs_header_nritems(node);
1278	nr = slot;
1279
1280	while (1) {
1281		if (path->reada == READA_BACK) {
1282			if (nr == 0)
1283				break;
1284			nr--;
1285		} else if (path->reada == READA_FORWARD ||
1286			   path->reada == READA_FORWARD_ALWAYS) {
1287			nr++;
1288			if (nr >= nritems)
1289				break;
1290		}
1291		if (path->reada == READA_BACK && objectid) {
1292			btrfs_node_key(node, &disk_key, nr);
1293			if (btrfs_disk_key_objectid(&disk_key) != objectid)
1294				break;
1295		}
1296		search = btrfs_node_blockptr(node, nr);
1297		if (path->reada == READA_FORWARD_ALWAYS ||
1298		    (search <= target && target - search <= 65536) ||
1299		    (search > target && search - target <= 65536)) {
1300			btrfs_readahead_node_child(node, nr);
1301			nread += blocksize;
1302		}
1303		nscan++;
1304		if (nread > nread_max || nscan > 32)
1305			break;
1306	}
1307}
1308
1309static noinline void reada_for_balance(struct btrfs_path *path, int level)
 
1310{
1311	struct extent_buffer *parent;
1312	int slot;
1313	int nritems;
 
 
 
 
 
1314
1315	parent = path->nodes[level + 1];
1316	if (!parent)
1317		return;
1318
1319	nritems = btrfs_header_nritems(parent);
1320	slot = path->slots[level + 1];
1321
1322	if (slot > 0)
1323		btrfs_readahead_node_child(parent, slot - 1);
1324	if (slot + 1 < nritems)
1325		btrfs_readahead_node_child(parent, slot + 1);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1326}
1327
1328
1329/*
1330 * when we walk down the tree, it is usually safe to unlock the higher layers
1331 * in the tree.  The exceptions are when our path goes through slot 0, because
1332 * operations on the tree might require changing key pointers higher up in the
1333 * tree.
1334 *
1335 * callers might also have set path->keep_locks, which tells this code to keep
1336 * the lock if the path points to the last slot in the block.  This is part of
1337 * walking through the tree, and selecting the next slot in the higher block.
1338 *
1339 * lowest_unlock sets the lowest level in the tree we're allowed to unlock.  so
1340 * if lowest_unlock is 1, level 0 won't be unlocked
1341 */
1342static noinline void unlock_up(struct btrfs_path *path, int level,
1343			       int lowest_unlock, int min_write_lock_level,
1344			       int *write_lock_level)
1345{
1346	int i;
1347	int skip_level = level;
1348	int no_skips = 0;
1349	struct extent_buffer *t;
1350
1351	for (i = level; i < BTRFS_MAX_LEVEL; i++) {
1352		if (!path->nodes[i])
1353			break;
1354		if (!path->locks[i])
1355			break;
1356		if (!no_skips && path->slots[i] == 0) {
1357			skip_level = i + 1;
1358			continue;
1359		}
1360		if (!no_skips && path->keep_locks) {
1361			u32 nritems;
1362			t = path->nodes[i];
1363			nritems = btrfs_header_nritems(t);
1364			if (nritems < 1 || path->slots[i] >= nritems - 1) {
1365				skip_level = i + 1;
1366				continue;
1367			}
1368		}
1369		if (skip_level < i && i >= lowest_unlock)
1370			no_skips = 1;
1371
1372		t = path->nodes[i];
1373		if (i >= lowest_unlock && i > skip_level) {
1374			btrfs_tree_unlock_rw(t, path->locks[i]);
1375			path->locks[i] = 0;
1376			if (write_lock_level &&
1377			    i > min_write_lock_level &&
1378			    i <= *write_lock_level) {
1379				*write_lock_level = i - 1;
1380			}
1381		}
1382	}
1383}
1384
1385/*
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1386 * helper function for btrfs_search_slot.  The goal is to find a block
1387 * in cache without setting the path to blocking.  If we find the block
1388 * we return zero and the path is unchanged.
1389 *
1390 * If we can't find the block, we set the path blocking and do some
1391 * reada.  -EAGAIN is returned and the search must be repeated.
1392 */
1393static int
1394read_block_for_search(struct btrfs_root *root, struct btrfs_path *p,
1395		      struct extent_buffer **eb_ret, int level, int slot,
1396		      const struct btrfs_key *key)
1397{
1398	struct btrfs_fs_info *fs_info = root->fs_info;
1399	u64 blocknr;
1400	u64 gen;
 
1401	struct extent_buffer *tmp;
1402	struct btrfs_key first_key;
1403	int ret;
1404	int parent_level;
1405
1406	blocknr = btrfs_node_blockptr(*eb_ret, slot);
1407	gen = btrfs_node_ptr_generation(*eb_ret, slot);
1408	parent_level = btrfs_header_level(*eb_ret);
1409	btrfs_node_key_to_cpu(*eb_ret, &first_key, slot);
1410
1411	tmp = find_extent_buffer(fs_info, blocknr);
1412	if (tmp) {
1413		if (p->reada == READA_FORWARD_ALWAYS)
1414			reada_for_search(fs_info, p, level, slot, key->objectid);
1415
1416		/* first we do an atomic uptodate check */
1417		if (btrfs_buffer_uptodate(tmp, gen, 1) > 0) {
1418			/*
1419			 * Do extra check for first_key, eb can be stale due to
1420			 * being cached, read from scrub, or have multiple
1421			 * parents (shared tree blocks).
1422			 */
1423			if (btrfs_verify_level_key(tmp,
1424					parent_level - 1, &first_key, gen)) {
1425				free_extent_buffer(tmp);
1426				return -EUCLEAN;
1427			}
1428			*eb_ret = tmp;
1429			return 0;
1430		}
1431
 
 
 
 
 
 
 
 
1432		/* now we're allowed to do a blocking uptodate check */
1433		ret = btrfs_read_buffer(tmp, gen, parent_level - 1, &first_key);
1434		if (!ret) {
1435			*eb_ret = tmp;
1436			return 0;
1437		}
1438		free_extent_buffer(tmp);
1439		btrfs_release_path(p);
1440		return -EIO;
1441	}
1442
1443	/*
1444	 * reduce lock contention at high levels
1445	 * of the btree by dropping locks before
1446	 * we read.  Don't release the lock on the current
1447	 * level because we need to walk this node to figure
1448	 * out which blocks to read.
1449	 */
1450	btrfs_unlock_up_safe(p, level + 1);
 
1451
1452	if (p->reada != READA_NONE)
1453		reada_for_search(fs_info, p, level, slot, key->objectid);
1454
1455	ret = -EAGAIN;
1456	tmp = read_tree_block(fs_info, blocknr, root->root_key.objectid,
1457			      gen, parent_level - 1, &first_key);
1458	if (!IS_ERR(tmp)) {
1459		/*
1460		 * If the read above didn't mark this buffer up to date,
1461		 * it will never end up being up to date.  Set ret to EIO now
1462		 * and give up so that our caller doesn't loop forever
1463		 * on our EAGAINs.
1464		 */
1465		if (!extent_buffer_uptodate(tmp))
1466			ret = -EIO;
1467		free_extent_buffer(tmp);
1468	} else {
1469		ret = PTR_ERR(tmp);
1470	}
1471
1472	btrfs_release_path(p);
1473	return ret;
1474}
1475
1476/*
1477 * helper function for btrfs_search_slot.  This does all of the checks
1478 * for node-level blocks and does any balancing required based on
1479 * the ins_len.
1480 *
1481 * If no extra work was required, zero is returned.  If we had to
1482 * drop the path, -EAGAIN is returned and btrfs_search_slot must
1483 * start over
1484 */
1485static int
1486setup_nodes_for_search(struct btrfs_trans_handle *trans,
1487		       struct btrfs_root *root, struct btrfs_path *p,
1488		       struct extent_buffer *b, int level, int ins_len,
1489		       int *write_lock_level)
1490{
1491	struct btrfs_fs_info *fs_info = root->fs_info;
1492	int ret = 0;
1493
1494	if ((p->search_for_split || ins_len > 0) && btrfs_header_nritems(b) >=
1495	    BTRFS_NODEPTRS_PER_BLOCK(fs_info) - 3) {
 
1496
1497		if (*write_lock_level < level + 1) {
1498			*write_lock_level = level + 1;
1499			btrfs_release_path(p);
1500			return -EAGAIN;
1501		}
1502
1503		reada_for_balance(p, level);
1504		ret = split_node(trans, root, p, level);
 
1505
 
 
 
 
 
1506		b = p->nodes[level];
1507	} else if (ins_len < 0 && btrfs_header_nritems(b) <
1508		   BTRFS_NODEPTRS_PER_BLOCK(fs_info) / 2) {
 
1509
1510		if (*write_lock_level < level + 1) {
1511			*write_lock_level = level + 1;
1512			btrfs_release_path(p);
1513			return -EAGAIN;
1514		}
1515
1516		reada_for_balance(p, level);
1517		ret = balance_level(trans, root, p, level);
1518		if (ret)
1519			return ret;
1520
 
 
 
 
1521		b = p->nodes[level];
1522		if (!b) {
1523			btrfs_release_path(p);
1524			return -EAGAIN;
1525		}
1526		BUG_ON(btrfs_header_nritems(b) == 1);
1527	}
 
 
 
 
 
1528	return ret;
1529}
1530
 
 
 
 
 
 
 
 
 
 
 
 
 
1531int btrfs_find_item(struct btrfs_root *fs_root, struct btrfs_path *path,
1532		u64 iobjectid, u64 ioff, u8 key_type,
1533		struct btrfs_key *found_key)
1534{
1535	int ret;
1536	struct btrfs_key key;
1537	struct extent_buffer *eb;
1538
1539	ASSERT(path);
1540	ASSERT(found_key);
1541
1542	key.type = key_type;
1543	key.objectid = iobjectid;
1544	key.offset = ioff;
1545
1546	ret = btrfs_search_slot(NULL, fs_root, &key, path, 0, 0);
1547	if (ret < 0)
1548		return ret;
1549
1550	eb = path->nodes[0];
1551	if (ret && path->slots[0] >= btrfs_header_nritems(eb)) {
1552		ret = btrfs_next_leaf(fs_root, path);
1553		if (ret)
1554			return ret;
1555		eb = path->nodes[0];
1556	}
1557
1558	btrfs_item_key_to_cpu(eb, found_key, path->slots[0]);
1559	if (found_key->type != key.type ||
1560			found_key->objectid != key.objectid)
1561		return 1;
1562
1563	return 0;
1564}
1565
1566static struct extent_buffer *btrfs_search_slot_get_root(struct btrfs_root *root,
1567							struct btrfs_path *p,
1568							int write_lock_level)
1569{
1570	struct btrfs_fs_info *fs_info = root->fs_info;
1571	struct extent_buffer *b;
1572	int root_lock;
1573	int level = 0;
1574
1575	/* We try very hard to do read locks on the root */
1576	root_lock = BTRFS_READ_LOCK;
1577
1578	if (p->search_commit_root) {
1579		/*
1580		 * The commit roots are read only so we always do read locks,
1581		 * and we always must hold the commit_root_sem when doing
1582		 * searches on them, the only exception is send where we don't
1583		 * want to block transaction commits for a long time, so
1584		 * we need to clone the commit root in order to avoid races
1585		 * with transaction commits that create a snapshot of one of
1586		 * the roots used by a send operation.
1587		 */
1588		if (p->need_commit_sem) {
1589			down_read(&fs_info->commit_root_sem);
1590			b = btrfs_clone_extent_buffer(root->commit_root);
1591			up_read(&fs_info->commit_root_sem);
1592			if (!b)
1593				return ERR_PTR(-ENOMEM);
1594
1595		} else {
1596			b = root->commit_root;
1597			atomic_inc(&b->refs);
1598		}
1599		level = btrfs_header_level(b);
1600		/*
1601		 * Ensure that all callers have set skip_locking when
1602		 * p->search_commit_root = 1.
1603		 */
1604		ASSERT(p->skip_locking == 1);
1605
1606		goto out;
1607	}
1608
1609	if (p->skip_locking) {
1610		b = btrfs_root_node(root);
1611		level = btrfs_header_level(b);
1612		goto out;
1613	}
1614
1615	/*
1616	 * If the level is set to maximum, we can skip trying to get the read
1617	 * lock.
1618	 */
1619	if (write_lock_level < BTRFS_MAX_LEVEL) {
1620		/*
1621		 * We don't know the level of the root node until we actually
1622		 * have it read locked
1623		 */
1624		b = btrfs_read_lock_root_node(root);
1625		level = btrfs_header_level(b);
1626		if (level > write_lock_level)
1627			goto out;
1628
1629		/* Whoops, must trade for write lock */
1630		btrfs_tree_read_unlock(b);
1631		free_extent_buffer(b);
1632	}
1633
1634	b = btrfs_lock_root_node(root);
1635	root_lock = BTRFS_WRITE_LOCK;
1636
1637	/* The level might have changed, check again */
1638	level = btrfs_header_level(b);
1639
1640out:
1641	p->nodes[level] = b;
1642	if (!p->skip_locking)
1643		p->locks[level] = root_lock;
1644	/*
1645	 * Callers are responsible for dropping b's references.
1646	 */
1647	return b;
1648}
1649
1650
1651/*
1652 * btrfs_search_slot - look for a key in a tree and perform necessary
1653 * modifications to preserve tree invariants.
1654 *
1655 * @trans:	Handle of transaction, used when modifying the tree
1656 * @p:		Holds all btree nodes along the search path
1657 * @root:	The root node of the tree
1658 * @key:	The key we are looking for
1659 * @ins_len:	Indicates purpose of search:
1660 *              >0  for inserts it's size of item inserted (*)
1661 *              <0  for deletions
1662 *               0  for plain searches, not modifying the tree
1663 *
1664 *              (*) If size of item inserted doesn't include
1665 *              sizeof(struct btrfs_item), then p->search_for_extension must
1666 *              be set.
1667 * @cow:	boolean should CoW operations be performed. Must always be 1
1668 *		when modifying the tree.
1669 *
1670 * If @ins_len > 0, nodes and leaves will be split as we walk down the tree.
1671 * If @ins_len < 0, nodes will be merged as we walk down the tree (if possible)
1672 *
1673 * If @key is found, 0 is returned and you can find the item in the leaf level
1674 * of the path (level 0)
1675 *
1676 * If @key isn't found, 1 is returned and the leaf level of the path (level 0)
1677 * points to the slot where it should be inserted
1678 *
1679 * If an error is encountered while searching the tree a negative error number
1680 * is returned
1681 */
1682int btrfs_search_slot(struct btrfs_trans_handle *trans, struct btrfs_root *root,
1683		      const struct btrfs_key *key, struct btrfs_path *p,
1684		      int ins_len, int cow)
1685{
1686	struct extent_buffer *b;
1687	int slot;
1688	int ret;
1689	int err;
1690	int level;
1691	int lowest_unlock = 1;
1692	/* everything at write_lock_level or lower must be write locked */
1693	int write_lock_level = 0;
1694	u8 lowest_level = 0;
1695	int min_write_lock_level;
1696	int prev_cmp;
1697
1698	lowest_level = p->lowest_level;
1699	WARN_ON(lowest_level && ins_len > 0);
1700	WARN_ON(p->nodes[0] != NULL);
1701	BUG_ON(!cow && ins_len);
1702
1703	if (ins_len < 0) {
1704		lowest_unlock = 2;
1705
1706		/* when we are removing items, we might have to go up to level
1707		 * two as we update tree pointers  Make sure we keep write
1708		 * for those levels as well
1709		 */
1710		write_lock_level = 2;
1711	} else if (ins_len > 0) {
1712		/*
1713		 * for inserting items, make sure we have a write lock on
1714		 * level 1 so we can update keys
1715		 */
1716		write_lock_level = 1;
1717	}
1718
1719	if (!cow)
1720		write_lock_level = -1;
1721
1722	if (cow && (p->keep_locks || p->lowest_level))
1723		write_lock_level = BTRFS_MAX_LEVEL;
1724
1725	min_write_lock_level = write_lock_level;
1726
1727again:
1728	prev_cmp = -1;
1729	b = btrfs_search_slot_get_root(root, p, write_lock_level);
1730	if (IS_ERR(b)) {
1731		ret = PTR_ERR(b);
1732		goto done;
1733	}
1734
1735	while (b) {
1736		int dec = 0;
1737
1738		level = btrfs_header_level(b);
1739
 
 
 
 
1740		if (cow) {
1741			bool last_level = (level == (BTRFS_MAX_LEVEL - 1));
1742
1743			/*
1744			 * if we don't really need to cow this block
1745			 * then we don't want to set the path blocking,
1746			 * so we test it here
1747			 */
1748			if (!should_cow_block(trans, root, b))
 
1749				goto cow_done;
 
1750
1751			/*
1752			 * must have write locks on this node and the
1753			 * parent
1754			 */
1755			if (level > write_lock_level ||
1756			    (level + 1 > write_lock_level &&
1757			    level + 1 < BTRFS_MAX_LEVEL &&
1758			    p->nodes[level + 1])) {
1759				write_lock_level = level + 1;
1760				btrfs_release_path(p);
1761				goto again;
1762			}
1763
 
1764			if (last_level)
1765				err = btrfs_cow_block(trans, root, b, NULL, 0,
1766						      &b,
1767						      BTRFS_NESTING_COW);
1768			else
1769				err = btrfs_cow_block(trans, root, b,
1770						      p->nodes[level + 1],
1771						      p->slots[level + 1], &b,
1772						      BTRFS_NESTING_COW);
1773			if (err) {
1774				ret = err;
1775				goto done;
1776			}
1777		}
1778cow_done:
1779		p->nodes[level] = b;
1780		/*
1781		 * Leave path with blocking locks to avoid massive
1782		 * lock context switch, this is made on purpose.
1783		 */
1784
1785		/*
1786		 * we have a lock on b and as long as we aren't changing
1787		 * the tree, there is no way to for the items in b to change.
1788		 * It is safe to drop the lock on our parent before we
1789		 * go through the expensive btree search on b.
1790		 *
1791		 * If we're inserting or deleting (ins_len != 0), then we might
1792		 * be changing slot zero, which may require changing the parent.
1793		 * So, we can't drop the lock until after we know which slot
1794		 * we're operating on.
1795		 */
1796		if (!ins_len && !p->keep_locks) {
1797			int u = level + 1;
1798
1799			if (u < BTRFS_MAX_LEVEL && p->locks[u]) {
1800				btrfs_tree_unlock_rw(p->nodes[u], p->locks[u]);
1801				p->locks[u] = 0;
1802			}
1803		}
1804
1805		/*
1806		 * If btrfs_bin_search returns an exact match (prev_cmp == 0)
1807		 * we can safely assume the target key will always be in slot 0
1808		 * on lower levels due to the invariants BTRFS' btree provides,
1809		 * namely that a btrfs_key_ptr entry always points to the
1810		 * lowest key in the child node, thus we can skip searching
1811		 * lower levels
1812		 */
1813		if (prev_cmp == 0) {
1814			slot = 0;
1815			ret = 0;
1816		} else {
1817			ret = btrfs_bin_search(b, key, &slot);
1818			prev_cmp = ret;
1819			if (ret < 0)
 
 
1820				goto done;
1821		}
 
 
1822
1823		if (level == 0) {
1824			p->slots[level] = slot;
1825			/*
1826			 * Item key already exists. In this case, if we are
1827			 * allowed to insert the item (for example, in dir_item
1828			 * case, item key collision is allowed), it will be
1829			 * merged with the original item. Only the item size
1830			 * grows, no new btrfs item will be added. If
1831			 * search_for_extension is not set, ins_len already
1832			 * accounts the size btrfs_item, deduct it here so leaf
1833			 * space check will be correct.
1834			 */
1835			if (ret == 0 && ins_len > 0 && !p->search_for_extension) {
1836				ASSERT(ins_len >= sizeof(struct btrfs_item));
1837				ins_len -= sizeof(struct btrfs_item);
 
 
 
 
 
 
 
 
 
 
 
1838			}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1839			if (ins_len > 0 &&
1840			    btrfs_leaf_free_space(b) < ins_len) {
1841				if (write_lock_level < 1) {
1842					write_lock_level = 1;
1843					btrfs_release_path(p);
1844					goto again;
1845				}
1846
 
1847				err = split_leaf(trans, root, key,
1848						 p, ins_len, ret == 0);
1849
1850				BUG_ON(err > 0);
1851				if (err) {
1852					ret = err;
1853					goto done;
1854				}
1855			}
1856			if (!p->search_for_split)
1857				unlock_up(p, level, lowest_unlock,
1858					  min_write_lock_level, NULL);
1859			goto done;
1860		}
1861		if (ret && slot > 0) {
1862			dec = 1;
1863			slot--;
1864		}
1865		p->slots[level] = slot;
1866		err = setup_nodes_for_search(trans, root, p, b, level, ins_len,
1867					     &write_lock_level);
1868		if (err == -EAGAIN)
1869			goto again;
1870		if (err) {
1871			ret = err;
1872			goto done;
1873		}
1874		b = p->nodes[level];
1875		slot = p->slots[level];
1876
1877		/*
1878		 * Slot 0 is special, if we change the key we have to update
1879		 * the parent pointer which means we must have a write lock on
1880		 * the parent
1881		 */
1882		if (slot == 0 && ins_len && write_lock_level < level + 1) {
1883			write_lock_level = level + 1;
1884			btrfs_release_path(p);
1885			goto again;
1886		}
1887
1888		unlock_up(p, level, lowest_unlock, min_write_lock_level,
1889			  &write_lock_level);
1890
1891		if (level == lowest_level) {
1892			if (dec)
1893				p->slots[level]++;
1894			goto done;
1895		}
1896
1897		err = read_block_for_search(root, p, &b, level, slot, key);
1898		if (err == -EAGAIN)
1899			goto again;
1900		if (err) {
1901			ret = err;
1902			goto done;
1903		}
1904
1905		if (!p->skip_locking) {
1906			level = btrfs_header_level(b);
1907			if (level <= write_lock_level) {
1908				btrfs_tree_lock(b);
1909				p->locks[level] = BTRFS_WRITE_LOCK;
1910			} else {
1911				btrfs_tree_read_lock(b);
1912				p->locks[level] = BTRFS_READ_LOCK;
1913			}
1914			p->nodes[level] = b;
1915		}
1916	}
1917	ret = 1;
1918done:
 
 
 
 
 
 
1919	if (ret < 0 && !p->skip_release_on_error)
1920		btrfs_release_path(p);
1921	return ret;
1922}
1923ALLOW_ERROR_INJECTION(btrfs_search_slot, ERRNO);
1924
1925/*
1926 * Like btrfs_search_slot, this looks for a key in the given tree. It uses the
1927 * current state of the tree together with the operations recorded in the tree
1928 * modification log to search for the key in a previous version of this tree, as
1929 * denoted by the time_seq parameter.
1930 *
1931 * Naturally, there is no support for insert, delete or cow operations.
1932 *
1933 * The resulting path and return value will be set up as if we called
1934 * btrfs_search_slot at that point in time with ins_len and cow both set to 0.
1935 */
1936int btrfs_search_old_slot(struct btrfs_root *root, const struct btrfs_key *key,
1937			  struct btrfs_path *p, u64 time_seq)
1938{
1939	struct btrfs_fs_info *fs_info = root->fs_info;
1940	struct extent_buffer *b;
1941	int slot;
1942	int ret;
1943	int err;
1944	int level;
1945	int lowest_unlock = 1;
1946	u8 lowest_level = 0;
 
1947
1948	lowest_level = p->lowest_level;
1949	WARN_ON(p->nodes[0] != NULL);
1950
1951	if (p->search_commit_root) {
1952		BUG_ON(time_seq);
1953		return btrfs_search_slot(NULL, root, key, p, 0, 0);
1954	}
1955
1956again:
1957	b = btrfs_get_old_root(root, time_seq);
1958	if (!b) {
1959		ret = -EIO;
1960		goto done;
1961	}
1962	level = btrfs_header_level(b);
1963	p->locks[level] = BTRFS_READ_LOCK;
1964
1965	while (b) {
1966		int dec = 0;
1967
1968		level = btrfs_header_level(b);
1969		p->nodes[level] = b;
1970
1971		/*
1972		 * we have a lock on b and as long as we aren't changing
1973		 * the tree, there is no way to for the items in b to change.
1974		 * It is safe to drop the lock on our parent before we
1975		 * go through the expensive btree search on b.
1976		 */
1977		btrfs_unlock_up_safe(p, level + 1);
1978
1979		ret = btrfs_bin_search(b, key, &slot);
 
 
 
 
 
1980		if (ret < 0)
1981			goto done;
1982
1983		if (level == 0) {
 
 
 
 
 
1984			p->slots[level] = slot;
1985			unlock_up(p, level, lowest_unlock, 0, NULL);
1986			goto done;
1987		}
1988
1989		if (ret && slot > 0) {
1990			dec = 1;
1991			slot--;
1992		}
1993		p->slots[level] = slot;
1994		unlock_up(p, level, lowest_unlock, 0, NULL);
1995
1996		if (level == lowest_level) {
1997			if (dec)
1998				p->slots[level]++;
1999			goto done;
2000		}
 
 
 
2001
2002		err = read_block_for_search(root, p, &b, level, slot, key);
2003		if (err == -EAGAIN)
2004			goto again;
2005		if (err) {
2006			ret = err;
2007			goto done;
2008		}
2009
2010		level = btrfs_header_level(b);
2011		btrfs_tree_read_lock(b);
2012		b = btrfs_tree_mod_log_rewind(fs_info, p, b, time_seq);
2013		if (!b) {
2014			ret = -ENOMEM;
 
 
2015			goto done;
2016		}
2017		p->locks[level] = BTRFS_READ_LOCK;
2018		p->nodes[level] = b;
2019	}
2020	ret = 1;
2021done:
 
 
2022	if (ret < 0)
2023		btrfs_release_path(p);
2024
2025	return ret;
2026}
2027
2028/*
2029 * helper to use instead of search slot if no exact match is needed but
2030 * instead the next or previous item should be returned.
2031 * When find_higher is true, the next higher item is returned, the next lower
2032 * otherwise.
2033 * When return_any and find_higher are both true, and no higher item is found,
2034 * return the next lower instead.
2035 * When return_any is true and find_higher is false, and no lower item is found,
2036 * return the next higher instead.
2037 * It returns 0 if any item is found, 1 if none is found (tree empty), and
2038 * < 0 on error
2039 */
2040int btrfs_search_slot_for_read(struct btrfs_root *root,
2041			       const struct btrfs_key *key,
2042			       struct btrfs_path *p, int find_higher,
2043			       int return_any)
2044{
2045	int ret;
2046	struct extent_buffer *leaf;
2047
2048again:
2049	ret = btrfs_search_slot(NULL, root, key, p, 0, 0);
2050	if (ret <= 0)
2051		return ret;
2052	/*
2053	 * a return value of 1 means the path is at the position where the
2054	 * item should be inserted. Normally this is the next bigger item,
2055	 * but in case the previous item is the last in a leaf, path points
2056	 * to the first free slot in the previous leaf, i.e. at an invalid
2057	 * item.
2058	 */
2059	leaf = p->nodes[0];
2060
2061	if (find_higher) {
2062		if (p->slots[0] >= btrfs_header_nritems(leaf)) {
2063			ret = btrfs_next_leaf(root, p);
2064			if (ret <= 0)
2065				return ret;
2066			if (!return_any)
2067				return 1;
2068			/*
2069			 * no higher item found, return the next
2070			 * lower instead
2071			 */
2072			return_any = 0;
2073			find_higher = 0;
2074			btrfs_release_path(p);
2075			goto again;
2076		}
2077	} else {
2078		if (p->slots[0] == 0) {
2079			ret = btrfs_prev_leaf(root, p);
2080			if (ret < 0)
2081				return ret;
2082			if (!ret) {
2083				leaf = p->nodes[0];
2084				if (p->slots[0] == btrfs_header_nritems(leaf))
2085					p->slots[0]--;
2086				return 0;
2087			}
2088			if (!return_any)
2089				return 1;
2090			/*
2091			 * no lower item found, return the next
2092			 * higher instead
2093			 */
2094			return_any = 0;
2095			find_higher = 1;
2096			btrfs_release_path(p);
2097			goto again;
2098		} else {
2099			--p->slots[0];
2100		}
2101	}
2102	return 0;
2103}
2104
2105/*
2106 * adjust the pointers going up the tree, starting at level
2107 * making sure the right key of each node is points to 'key'.
2108 * This is used after shifting pointers to the left, so it stops
2109 * fixing up pointers when a given leaf/node is not in slot 0 of the
2110 * higher levels
2111 *
2112 */
2113static void fixup_low_keys(struct btrfs_path *path,
2114			   struct btrfs_disk_key *key, int level)
2115{
2116	int i;
2117	struct extent_buffer *t;
2118	int ret;
2119
2120	for (i = level; i < BTRFS_MAX_LEVEL; i++) {
2121		int tslot = path->slots[i];
2122
2123		if (!path->nodes[i])
2124			break;
2125		t = path->nodes[i];
2126		ret = btrfs_tree_mod_log_insert_key(t, tslot,
2127				BTRFS_MOD_LOG_KEY_REPLACE, GFP_ATOMIC);
2128		BUG_ON(ret < 0);
2129		btrfs_set_node_key(t, key, tslot);
2130		btrfs_mark_buffer_dirty(path->nodes[i]);
2131		if (tslot != 0)
2132			break;
2133	}
2134}
2135
2136/*
2137 * update item key.
2138 *
2139 * This function isn't completely safe. It's the caller's responsibility
2140 * that the new key won't break the order
2141 */
2142void btrfs_set_item_key_safe(struct btrfs_fs_info *fs_info,
2143			     struct btrfs_path *path,
2144			     const struct btrfs_key *new_key)
2145{
2146	struct btrfs_disk_key disk_key;
2147	struct extent_buffer *eb;
2148	int slot;
2149
2150	eb = path->nodes[0];
2151	slot = path->slots[0];
2152	if (slot > 0) {
2153		btrfs_item_key(eb, &disk_key, slot - 1);
2154		if (unlikely(comp_keys(&disk_key, new_key) >= 0)) {
2155			btrfs_crit(fs_info,
2156		"slot %u key (%llu %u %llu) new key (%llu %u %llu)",
2157				   slot, btrfs_disk_key_objectid(&disk_key),
2158				   btrfs_disk_key_type(&disk_key),
2159				   btrfs_disk_key_offset(&disk_key),
2160				   new_key->objectid, new_key->type,
2161				   new_key->offset);
2162			btrfs_print_leaf(eb);
2163			BUG();
2164		}
2165	}
2166	if (slot < btrfs_header_nritems(eb) - 1) {
2167		btrfs_item_key(eb, &disk_key, slot + 1);
2168		if (unlikely(comp_keys(&disk_key, new_key) <= 0)) {
2169			btrfs_crit(fs_info,
2170		"slot %u key (%llu %u %llu) new key (%llu %u %llu)",
2171				   slot, btrfs_disk_key_objectid(&disk_key),
2172				   btrfs_disk_key_type(&disk_key),
2173				   btrfs_disk_key_offset(&disk_key),
2174				   new_key->objectid, new_key->type,
2175				   new_key->offset);
2176			btrfs_print_leaf(eb);
2177			BUG();
2178		}
2179	}
2180
2181	btrfs_cpu_key_to_disk(&disk_key, new_key);
2182	btrfs_set_item_key(eb, &disk_key, slot);
2183	btrfs_mark_buffer_dirty(eb);
2184	if (slot == 0)
2185		fixup_low_keys(path, &disk_key, 1);
2186}
2187
2188/*
2189 * Check key order of two sibling extent buffers.
2190 *
2191 * Return true if something is wrong.
2192 * Return false if everything is fine.
2193 *
2194 * Tree-checker only works inside one tree block, thus the following
2195 * corruption can not be detected by tree-checker:
2196 *
2197 * Leaf @left			| Leaf @right
2198 * --------------------------------------------------------------
2199 * | 1 | 2 | 3 | 4 | 5 | f6 |   | 7 | 8 |
2200 *
2201 * Key f6 in leaf @left itself is valid, but not valid when the next
2202 * key in leaf @right is 7.
2203 * This can only be checked at tree block merge time.
2204 * And since tree checker has ensured all key order in each tree block
2205 * is correct, we only need to bother the last key of @left and the first
2206 * key of @right.
2207 */
2208static bool check_sibling_keys(struct extent_buffer *left,
2209			       struct extent_buffer *right)
2210{
2211	struct btrfs_key left_last;
2212	struct btrfs_key right_first;
2213	int level = btrfs_header_level(left);
2214	int nr_left = btrfs_header_nritems(left);
2215	int nr_right = btrfs_header_nritems(right);
2216
2217	/* No key to check in one of the tree blocks */
2218	if (!nr_left || !nr_right)
2219		return false;
2220
2221	if (level) {
2222		btrfs_node_key_to_cpu(left, &left_last, nr_left - 1);
2223		btrfs_node_key_to_cpu(right, &right_first, 0);
2224	} else {
2225		btrfs_item_key_to_cpu(left, &left_last, nr_left - 1);
2226		btrfs_item_key_to_cpu(right, &right_first, 0);
2227	}
2228
2229	if (btrfs_comp_cpu_keys(&left_last, &right_first) >= 0) {
2230		btrfs_crit(left->fs_info,
2231"bad key order, sibling blocks, left last (%llu %u %llu) right first (%llu %u %llu)",
2232			   left_last.objectid, left_last.type,
2233			   left_last.offset, right_first.objectid,
2234			   right_first.type, right_first.offset);
2235		return true;
2236	}
2237	return false;
2238}
2239
2240/*
2241 * try to push data from one node into the next node left in the
2242 * tree.
2243 *
2244 * returns 0 if some ptrs were pushed left, < 0 if there was some horrible
2245 * error, and > 0 if there was no room in the left hand block.
2246 */
2247static int push_node_left(struct btrfs_trans_handle *trans,
2248			  struct extent_buffer *dst,
2249			  struct extent_buffer *src, int empty)
2250{
2251	struct btrfs_fs_info *fs_info = trans->fs_info;
2252	int push_items = 0;
2253	int src_nritems;
2254	int dst_nritems;
2255	int ret = 0;
2256
2257	src_nritems = btrfs_header_nritems(src);
2258	dst_nritems = btrfs_header_nritems(dst);
2259	push_items = BTRFS_NODEPTRS_PER_BLOCK(fs_info) - dst_nritems;
2260	WARN_ON(btrfs_header_generation(src) != trans->transid);
2261	WARN_ON(btrfs_header_generation(dst) != trans->transid);
2262
2263	if (!empty && src_nritems <= 8)
2264		return 1;
2265
2266	if (push_items <= 0)
2267		return 1;
2268
2269	if (empty) {
2270		push_items = min(src_nritems, push_items);
2271		if (push_items < src_nritems) {
2272			/* leave at least 8 pointers in the node if
2273			 * we aren't going to empty it
2274			 */
2275			if (src_nritems - push_items < 8) {
2276				if (push_items <= 8)
2277					return 1;
2278				push_items -= 8;
2279			}
2280		}
2281	} else
2282		push_items = min(src_nritems - 8, push_items);
2283
2284	/* dst is the left eb, src is the middle eb */
2285	if (check_sibling_keys(dst, src)) {
2286		ret = -EUCLEAN;
2287		btrfs_abort_transaction(trans, ret);
2288		return ret;
2289	}
2290	ret = btrfs_tree_mod_log_eb_copy(dst, src, dst_nritems, 0, push_items);
2291	if (ret) {
2292		btrfs_abort_transaction(trans, ret);
2293		return ret;
2294	}
2295	copy_extent_buffer(dst, src,
2296			   btrfs_node_key_ptr_offset(dst_nritems),
2297			   btrfs_node_key_ptr_offset(0),
2298			   push_items * sizeof(struct btrfs_key_ptr));
2299
2300	if (push_items < src_nritems) {
2301		/*
2302		 * Don't call btrfs_tree_mod_log_insert_move() here, key removal
2303		 * was already fully logged by btrfs_tree_mod_log_eb_copy() above.
2304		 */
2305		memmove_extent_buffer(src, btrfs_node_key_ptr_offset(0),
2306				      btrfs_node_key_ptr_offset(push_items),
2307				      (src_nritems - push_items) *
2308				      sizeof(struct btrfs_key_ptr));
2309	}
2310	btrfs_set_header_nritems(src, src_nritems - push_items);
2311	btrfs_set_header_nritems(dst, dst_nritems + push_items);
2312	btrfs_mark_buffer_dirty(src);
2313	btrfs_mark_buffer_dirty(dst);
2314
2315	return ret;
2316}
2317
2318/*
2319 * try to push data from one node into the next node right in the
2320 * tree.
2321 *
2322 * returns 0 if some ptrs were pushed, < 0 if there was some horrible
2323 * error, and > 0 if there was no room in the right hand block.
2324 *
2325 * this will  only push up to 1/2 the contents of the left node over
2326 */
2327static int balance_node_right(struct btrfs_trans_handle *trans,
2328			      struct extent_buffer *dst,
2329			      struct extent_buffer *src)
2330{
2331	struct btrfs_fs_info *fs_info = trans->fs_info;
2332	int push_items = 0;
2333	int max_push;
2334	int src_nritems;
2335	int dst_nritems;
2336	int ret = 0;
2337
2338	WARN_ON(btrfs_header_generation(src) != trans->transid);
2339	WARN_ON(btrfs_header_generation(dst) != trans->transid);
2340
2341	src_nritems = btrfs_header_nritems(src);
2342	dst_nritems = btrfs_header_nritems(dst);
2343	push_items = BTRFS_NODEPTRS_PER_BLOCK(fs_info) - dst_nritems;
2344	if (push_items <= 0)
2345		return 1;
2346
2347	if (src_nritems < 4)
2348		return 1;
2349
2350	max_push = src_nritems / 2 + 1;
2351	/* don't try to empty the node */
2352	if (max_push >= src_nritems)
2353		return 1;
2354
2355	if (max_push < push_items)
2356		push_items = max_push;
2357
2358	/* dst is the right eb, src is the middle eb */
2359	if (check_sibling_keys(src, dst)) {
2360		ret = -EUCLEAN;
2361		btrfs_abort_transaction(trans, ret);
2362		return ret;
2363	}
2364	ret = btrfs_tree_mod_log_insert_move(dst, push_items, 0, dst_nritems);
2365	BUG_ON(ret < 0);
2366	memmove_extent_buffer(dst, btrfs_node_key_ptr_offset(push_items),
2367				      btrfs_node_key_ptr_offset(0),
2368				      (dst_nritems) *
2369				      sizeof(struct btrfs_key_ptr));
2370
2371	ret = btrfs_tree_mod_log_eb_copy(dst, src, 0, src_nritems - push_items,
2372					 push_items);
2373	if (ret) {
2374		btrfs_abort_transaction(trans, ret);
2375		return ret;
2376	}
2377	copy_extent_buffer(dst, src,
2378			   btrfs_node_key_ptr_offset(0),
2379			   btrfs_node_key_ptr_offset(src_nritems - push_items),
2380			   push_items * sizeof(struct btrfs_key_ptr));
2381
2382	btrfs_set_header_nritems(src, src_nritems - push_items);
2383	btrfs_set_header_nritems(dst, dst_nritems + push_items);
2384
2385	btrfs_mark_buffer_dirty(src);
2386	btrfs_mark_buffer_dirty(dst);
2387
2388	return ret;
2389}
2390
2391/*
2392 * helper function to insert a new root level in the tree.
2393 * A new node is allocated, and a single item is inserted to
2394 * point to the existing root
2395 *
2396 * returns zero on success or < 0 on failure.
2397 */
2398static noinline int insert_new_root(struct btrfs_trans_handle *trans,
2399			   struct btrfs_root *root,
2400			   struct btrfs_path *path, int level)
2401{
2402	struct btrfs_fs_info *fs_info = root->fs_info;
2403	u64 lower_gen;
2404	struct extent_buffer *lower;
2405	struct extent_buffer *c;
2406	struct extent_buffer *old;
2407	struct btrfs_disk_key lower_key;
2408	int ret;
2409
2410	BUG_ON(path->nodes[level]);
2411	BUG_ON(path->nodes[level-1] != root->node);
2412
2413	lower = path->nodes[level-1];
2414	if (level == 1)
2415		btrfs_item_key(lower, &lower_key, 0);
2416	else
2417		btrfs_node_key(lower, &lower_key, 0);
2418
2419	c = btrfs_alloc_tree_block(trans, root, 0, root->root_key.objectid,
2420				   &lower_key, level, root->node->start, 0,
2421				   BTRFS_NESTING_NEW_ROOT);
2422	if (IS_ERR(c))
2423		return PTR_ERR(c);
2424
2425	root_add_used(root, fs_info->nodesize);
2426
2427	btrfs_set_header_nritems(c, 1);
2428	btrfs_set_node_key(c, &lower_key, 0);
2429	btrfs_set_node_blockptr(c, 0, lower->start);
2430	lower_gen = btrfs_header_generation(lower);
2431	WARN_ON(lower_gen != trans->transid);
2432
2433	btrfs_set_node_ptr_generation(c, 0, lower_gen);
2434
2435	btrfs_mark_buffer_dirty(c);
2436
2437	old = root->node;
2438	ret = btrfs_tree_mod_log_insert_root(root->node, c, false);
2439	BUG_ON(ret < 0);
2440	rcu_assign_pointer(root->node, c);
2441
2442	/* the super has an extra ref to root->node */
2443	free_extent_buffer(old);
2444
2445	add_root_to_dirty_list(root);
2446	atomic_inc(&c->refs);
2447	path->nodes[level] = c;
2448	path->locks[level] = BTRFS_WRITE_LOCK;
2449	path->slots[level] = 0;
2450	return 0;
2451}
2452
2453/*
2454 * worker function to insert a single pointer in a node.
2455 * the node should have enough room for the pointer already
2456 *
2457 * slot and level indicate where you want the key to go, and
2458 * blocknr is the block the key points to.
2459 */
2460static void insert_ptr(struct btrfs_trans_handle *trans,
2461		       struct btrfs_path *path,
2462		       struct btrfs_disk_key *key, u64 bytenr,
2463		       int slot, int level)
2464{
2465	struct extent_buffer *lower;
2466	int nritems;
2467	int ret;
2468
2469	BUG_ON(!path->nodes[level]);
2470	btrfs_assert_tree_locked(path->nodes[level]);
2471	lower = path->nodes[level];
2472	nritems = btrfs_header_nritems(lower);
2473	BUG_ON(slot > nritems);
2474	BUG_ON(nritems == BTRFS_NODEPTRS_PER_BLOCK(trans->fs_info));
2475	if (slot != nritems) {
2476		if (level) {
2477			ret = btrfs_tree_mod_log_insert_move(lower, slot + 1,
2478					slot, nritems - slot);
2479			BUG_ON(ret < 0);
2480		}
2481		memmove_extent_buffer(lower,
2482			      btrfs_node_key_ptr_offset(slot + 1),
2483			      btrfs_node_key_ptr_offset(slot),
2484			      (nritems - slot) * sizeof(struct btrfs_key_ptr));
2485	}
2486	if (level) {
2487		ret = btrfs_tree_mod_log_insert_key(lower, slot,
2488					    BTRFS_MOD_LOG_KEY_ADD, GFP_NOFS);
2489		BUG_ON(ret < 0);
2490	}
2491	btrfs_set_node_key(lower, key, slot);
2492	btrfs_set_node_blockptr(lower, slot, bytenr);
2493	WARN_ON(trans->transid == 0);
2494	btrfs_set_node_ptr_generation(lower, slot, trans->transid);
2495	btrfs_set_header_nritems(lower, nritems + 1);
2496	btrfs_mark_buffer_dirty(lower);
2497}
2498
2499/*
2500 * split the node at the specified level in path in two.
2501 * The path is corrected to point to the appropriate node after the split
2502 *
2503 * Before splitting this tries to make some room in the node by pushing
2504 * left and right, if either one works, it returns right away.
2505 *
2506 * returns 0 on success and < 0 on failure
2507 */
2508static noinline int split_node(struct btrfs_trans_handle *trans,
2509			       struct btrfs_root *root,
2510			       struct btrfs_path *path, int level)
2511{
2512	struct btrfs_fs_info *fs_info = root->fs_info;
2513	struct extent_buffer *c;
2514	struct extent_buffer *split;
2515	struct btrfs_disk_key disk_key;
2516	int mid;
2517	int ret;
2518	u32 c_nritems;
2519
2520	c = path->nodes[level];
2521	WARN_ON(btrfs_header_generation(c) != trans->transid);
2522	if (c == root->node) {
2523		/*
2524		 * trying to split the root, lets make a new one
2525		 *
2526		 * tree mod log: We don't log_removal old root in
2527		 * insert_new_root, because that root buffer will be kept as a
2528		 * normal node. We are going to log removal of half of the
2529		 * elements below with btrfs_tree_mod_log_eb_copy(). We're
2530		 * holding a tree lock on the buffer, which is why we cannot
2531		 * race with other tree_mod_log users.
2532		 */
2533		ret = insert_new_root(trans, root, path, level + 1);
2534		if (ret)
2535			return ret;
2536	} else {
2537		ret = push_nodes_for_insert(trans, root, path, level);
2538		c = path->nodes[level];
2539		if (!ret && btrfs_header_nritems(c) <
2540		    BTRFS_NODEPTRS_PER_BLOCK(fs_info) - 3)
2541			return 0;
2542		if (ret < 0)
2543			return ret;
2544	}
2545
2546	c_nritems = btrfs_header_nritems(c);
2547	mid = (c_nritems + 1) / 2;
2548	btrfs_node_key(c, &disk_key, mid);
2549
2550	split = btrfs_alloc_tree_block(trans, root, 0, root->root_key.objectid,
2551				       &disk_key, level, c->start, 0,
2552				       BTRFS_NESTING_SPLIT);
2553	if (IS_ERR(split))
2554		return PTR_ERR(split);
2555
2556	root_add_used(root, fs_info->nodesize);
2557	ASSERT(btrfs_header_level(c) == level);
2558
2559	ret = btrfs_tree_mod_log_eb_copy(split, c, 0, mid, c_nritems - mid);
2560	if (ret) {
2561		btrfs_abort_transaction(trans, ret);
2562		return ret;
2563	}
2564	copy_extent_buffer(split, c,
2565			   btrfs_node_key_ptr_offset(0),
2566			   btrfs_node_key_ptr_offset(mid),
2567			   (c_nritems - mid) * sizeof(struct btrfs_key_ptr));
2568	btrfs_set_header_nritems(split, c_nritems - mid);
2569	btrfs_set_header_nritems(c, mid);
 
2570
2571	btrfs_mark_buffer_dirty(c);
2572	btrfs_mark_buffer_dirty(split);
2573
2574	insert_ptr(trans, path, &disk_key, split->start,
2575		   path->slots[level + 1] + 1, level + 1);
2576
2577	if (path->slots[level] >= mid) {
2578		path->slots[level] -= mid;
2579		btrfs_tree_unlock(c);
2580		free_extent_buffer(c);
2581		path->nodes[level] = split;
2582		path->slots[level + 1] += 1;
2583	} else {
2584		btrfs_tree_unlock(split);
2585		free_extent_buffer(split);
2586	}
2587	return 0;
2588}
2589
2590/*
2591 * how many bytes are required to store the items in a leaf.  start
2592 * and nr indicate which items in the leaf to check.  This totals up the
2593 * space used both by the item structs and the item data
2594 */
2595static int leaf_space_used(struct extent_buffer *l, int start, int nr)
2596{
2597	struct btrfs_item *start_item;
2598	struct btrfs_item *end_item;
 
2599	int data_len;
2600	int nritems = btrfs_header_nritems(l);
2601	int end = min(nritems, start + nr) - 1;
2602
2603	if (!nr)
2604		return 0;
 
2605	start_item = btrfs_item_nr(start);
2606	end_item = btrfs_item_nr(end);
2607	data_len = btrfs_item_offset(l, start_item) +
2608		   btrfs_item_size(l, start_item);
2609	data_len = data_len - btrfs_item_offset(l, end_item);
2610	data_len += sizeof(struct btrfs_item) * nr;
2611	WARN_ON(data_len < 0);
2612	return data_len;
2613}
2614
2615/*
2616 * The space between the end of the leaf items and
2617 * the start of the leaf data.  IOW, how much room
2618 * the leaf has left for both items and data
2619 */
2620noinline int btrfs_leaf_free_space(struct extent_buffer *leaf)
2621{
2622	struct btrfs_fs_info *fs_info = leaf->fs_info;
2623	int nritems = btrfs_header_nritems(leaf);
2624	int ret;
2625
2626	ret = BTRFS_LEAF_DATA_SIZE(fs_info) - leaf_space_used(leaf, 0, nritems);
2627	if (ret < 0) {
2628		btrfs_crit(fs_info,
2629			   "leaf free space ret %d, leaf data size %lu, used %d nritems %d",
2630			   ret,
2631			   (unsigned long) BTRFS_LEAF_DATA_SIZE(fs_info),
2632			   leaf_space_used(leaf, 0, nritems), nritems);
2633	}
2634	return ret;
2635}
2636
2637/*
2638 * min slot controls the lowest index we're willing to push to the
2639 * right.  We'll push up to and including min_slot, but no lower
2640 */
2641static noinline int __push_leaf_right(struct btrfs_path *path,
2642				      int data_size, int empty,
2643				      struct extent_buffer *right,
2644				      int free_space, u32 left_nritems,
2645				      u32 min_slot)
2646{
2647	struct btrfs_fs_info *fs_info = right->fs_info;
2648	struct extent_buffer *left = path->nodes[0];
2649	struct extent_buffer *upper = path->nodes[1];
2650	struct btrfs_map_token token;
2651	struct btrfs_disk_key disk_key;
2652	int slot;
2653	u32 i;
2654	int push_space = 0;
2655	int push_items = 0;
2656	struct btrfs_item *item;
2657	u32 nr;
2658	u32 right_nritems;
2659	u32 data_end;
2660	u32 this_item_size;
2661
2662	if (empty)
2663		nr = 0;
2664	else
2665		nr = max_t(u32, 1, min_slot);
2666
2667	if (path->slots[0] >= left_nritems)
2668		push_space += data_size;
2669
2670	slot = path->slots[1];
2671	i = left_nritems - 1;
2672	while (i >= nr) {
2673		item = btrfs_item_nr(i);
2674
2675		if (!empty && push_items > 0) {
2676			if (path->slots[0] > i)
2677				break;
2678			if (path->slots[0] == i) {
2679				int space = btrfs_leaf_free_space(left);
2680
2681				if (space + push_space * 2 > free_space)
2682					break;
2683			}
2684		}
2685
2686		if (path->slots[0] == i)
2687			push_space += data_size;
2688
2689		this_item_size = btrfs_item_size(left, item);
2690		if (this_item_size + sizeof(*item) + push_space > free_space)
2691			break;
2692
2693		push_items++;
2694		push_space += this_item_size + sizeof(*item);
2695		if (i == 0)
2696			break;
2697		i--;
2698	}
2699
2700	if (push_items == 0)
2701		goto out_unlock;
2702
2703	WARN_ON(!empty && push_items == left_nritems);
2704
2705	/* push left to right */
2706	right_nritems = btrfs_header_nritems(right);
2707
2708	push_space = btrfs_item_end_nr(left, left_nritems - push_items);
2709	push_space -= leaf_data_end(left);
2710
2711	/* make room in the right data area */
2712	data_end = leaf_data_end(right);
2713	memmove_extent_buffer(right,
2714			      BTRFS_LEAF_DATA_OFFSET + data_end - push_space,
2715			      BTRFS_LEAF_DATA_OFFSET + data_end,
2716			      BTRFS_LEAF_DATA_SIZE(fs_info) - data_end);
2717
2718	/* copy from the left data area */
2719	copy_extent_buffer(right, left, BTRFS_LEAF_DATA_OFFSET +
2720		     BTRFS_LEAF_DATA_SIZE(fs_info) - push_space,
2721		     BTRFS_LEAF_DATA_OFFSET + leaf_data_end(left),
2722		     push_space);
2723
2724	memmove_extent_buffer(right, btrfs_item_nr_offset(push_items),
2725			      btrfs_item_nr_offset(0),
2726			      right_nritems * sizeof(struct btrfs_item));
2727
2728	/* copy the items from left to right */
2729	copy_extent_buffer(right, left, btrfs_item_nr_offset(0),
2730		   btrfs_item_nr_offset(left_nritems - push_items),
2731		   push_items * sizeof(struct btrfs_item));
2732
2733	/* update the item pointers */
2734	btrfs_init_map_token(&token, right);
2735	right_nritems += push_items;
2736	btrfs_set_header_nritems(right, right_nritems);
2737	push_space = BTRFS_LEAF_DATA_SIZE(fs_info);
2738	for (i = 0; i < right_nritems; i++) {
2739		item = btrfs_item_nr(i);
2740		push_space -= btrfs_token_item_size(&token, item);
2741		btrfs_set_token_item_offset(&token, item, push_space);
2742	}
2743
2744	left_nritems -= push_items;
2745	btrfs_set_header_nritems(left, left_nritems);
2746
2747	if (left_nritems)
2748		btrfs_mark_buffer_dirty(left);
2749	else
2750		btrfs_clean_tree_block(left);
2751
2752	btrfs_mark_buffer_dirty(right);
2753
2754	btrfs_item_key(right, &disk_key, 0);
2755	btrfs_set_node_key(upper, &disk_key, slot + 1);
2756	btrfs_mark_buffer_dirty(upper);
2757
2758	/* then fixup the leaf pointer in the path */
2759	if (path->slots[0] >= left_nritems) {
2760		path->slots[0] -= left_nritems;
2761		if (btrfs_header_nritems(path->nodes[0]) == 0)
2762			btrfs_clean_tree_block(path->nodes[0]);
2763		btrfs_tree_unlock(path->nodes[0]);
2764		free_extent_buffer(path->nodes[0]);
2765		path->nodes[0] = right;
2766		path->slots[1] += 1;
2767	} else {
2768		btrfs_tree_unlock(right);
2769		free_extent_buffer(right);
2770	}
2771	return 0;
2772
2773out_unlock:
2774	btrfs_tree_unlock(right);
2775	free_extent_buffer(right);
2776	return 1;
2777}
2778
2779/*
2780 * push some data in the path leaf to the right, trying to free up at
2781 * least data_size bytes.  returns zero if the push worked, nonzero otherwise
2782 *
2783 * returns 1 if the push failed because the other node didn't have enough
2784 * room, 0 if everything worked out and < 0 if there were major errors.
2785 *
2786 * this will push starting from min_slot to the end of the leaf.  It won't
2787 * push any slot lower than min_slot
2788 */
2789static int push_leaf_right(struct btrfs_trans_handle *trans, struct btrfs_root
2790			   *root, struct btrfs_path *path,
2791			   int min_data_size, int data_size,
2792			   int empty, u32 min_slot)
2793{
2794	struct extent_buffer *left = path->nodes[0];
2795	struct extent_buffer *right;
2796	struct extent_buffer *upper;
2797	int slot;
2798	int free_space;
2799	u32 left_nritems;
2800	int ret;
2801
2802	if (!path->nodes[1])
2803		return 1;
2804
2805	slot = path->slots[1];
2806	upper = path->nodes[1];
2807	if (slot >= btrfs_header_nritems(upper) - 1)
2808		return 1;
2809
2810	btrfs_assert_tree_locked(path->nodes[1]);
2811
2812	right = btrfs_read_node_slot(upper, slot + 1);
2813	/*
2814	 * slot + 1 is not valid or we fail to read the right node,
2815	 * no big deal, just return.
2816	 */
2817	if (IS_ERR(right))
2818		return 1;
2819
2820	__btrfs_tree_lock(right, BTRFS_NESTING_RIGHT);
 
2821
2822	free_space = btrfs_leaf_free_space(right);
2823	if (free_space < data_size)
2824		goto out_unlock;
2825
2826	/* cow and double check */
2827	ret = btrfs_cow_block(trans, root, right, upper,
2828			      slot + 1, &right, BTRFS_NESTING_RIGHT_COW);
2829	if (ret)
2830		goto out_unlock;
2831
2832	free_space = btrfs_leaf_free_space(right);
2833	if (free_space < data_size)
2834		goto out_unlock;
2835
2836	left_nritems = btrfs_header_nritems(left);
2837	if (left_nritems == 0)
2838		goto out_unlock;
2839
2840	if (check_sibling_keys(left, right)) {
2841		ret = -EUCLEAN;
2842		btrfs_tree_unlock(right);
2843		free_extent_buffer(right);
2844		return ret;
2845	}
2846	if (path->slots[0] == left_nritems && !empty) {
2847		/* Key greater than all keys in the leaf, right neighbor has
2848		 * enough room for it and we're not emptying our leaf to delete
2849		 * it, therefore use right neighbor to insert the new item and
2850		 * no need to touch/dirty our left leaf. */
2851		btrfs_tree_unlock(left);
2852		free_extent_buffer(left);
2853		path->nodes[0] = right;
2854		path->slots[0] = 0;
2855		path->slots[1]++;
2856		return 0;
2857	}
2858
2859	return __push_leaf_right(path, min_data_size, empty,
2860				right, free_space, left_nritems, min_slot);
2861out_unlock:
2862	btrfs_tree_unlock(right);
2863	free_extent_buffer(right);
2864	return 1;
2865}
2866
2867/*
2868 * push some data in the path leaf to the left, trying to free up at
2869 * least data_size bytes.  returns zero if the push worked, nonzero otherwise
2870 *
2871 * max_slot can put a limit on how far into the leaf we'll push items.  The
2872 * item at 'max_slot' won't be touched.  Use (u32)-1 to make us do all the
2873 * items
2874 */
2875static noinline int __push_leaf_left(struct btrfs_path *path, int data_size,
2876				     int empty, struct extent_buffer *left,
2877				     int free_space, u32 right_nritems,
2878				     u32 max_slot)
2879{
2880	struct btrfs_fs_info *fs_info = left->fs_info;
2881	struct btrfs_disk_key disk_key;
2882	struct extent_buffer *right = path->nodes[0];
2883	int i;
2884	int push_space = 0;
2885	int push_items = 0;
2886	struct btrfs_item *item;
2887	u32 old_left_nritems;
2888	u32 nr;
2889	int ret = 0;
2890	u32 this_item_size;
2891	u32 old_left_item_size;
2892	struct btrfs_map_token token;
2893
2894	if (empty)
2895		nr = min(right_nritems, max_slot);
2896	else
2897		nr = min(right_nritems - 1, max_slot);
2898
2899	for (i = 0; i < nr; i++) {
2900		item = btrfs_item_nr(i);
2901
2902		if (!empty && push_items > 0) {
2903			if (path->slots[0] < i)
2904				break;
2905			if (path->slots[0] == i) {
2906				int space = btrfs_leaf_free_space(right);
2907
2908				if (space + push_space * 2 > free_space)
2909					break;
2910			}
2911		}
2912
2913		if (path->slots[0] == i)
2914			push_space += data_size;
2915
2916		this_item_size = btrfs_item_size(right, item);
2917		if (this_item_size + sizeof(*item) + push_space > free_space)
2918			break;
2919
2920		push_items++;
2921		push_space += this_item_size + sizeof(*item);
2922	}
2923
2924	if (push_items == 0) {
2925		ret = 1;
2926		goto out;
2927	}
2928	WARN_ON(!empty && push_items == btrfs_header_nritems(right));
2929
2930	/* push data from right to left */
2931	copy_extent_buffer(left, right,
2932			   btrfs_item_nr_offset(btrfs_header_nritems(left)),
2933			   btrfs_item_nr_offset(0),
2934			   push_items * sizeof(struct btrfs_item));
2935
2936	push_space = BTRFS_LEAF_DATA_SIZE(fs_info) -
2937		     btrfs_item_offset_nr(right, push_items - 1);
2938
2939	copy_extent_buffer(left, right, BTRFS_LEAF_DATA_OFFSET +
2940		     leaf_data_end(left) - push_space,
2941		     BTRFS_LEAF_DATA_OFFSET +
2942		     btrfs_item_offset_nr(right, push_items - 1),
2943		     push_space);
2944	old_left_nritems = btrfs_header_nritems(left);
2945	BUG_ON(old_left_nritems <= 0);
2946
2947	btrfs_init_map_token(&token, left);
2948	old_left_item_size = btrfs_item_offset_nr(left, old_left_nritems - 1);
2949	for (i = old_left_nritems; i < old_left_nritems + push_items; i++) {
2950		u32 ioff;
2951
2952		item = btrfs_item_nr(i);
2953
2954		ioff = btrfs_token_item_offset(&token, item);
2955		btrfs_set_token_item_offset(&token, item,
2956		      ioff - (BTRFS_LEAF_DATA_SIZE(fs_info) - old_left_item_size));
 
2957	}
2958	btrfs_set_header_nritems(left, old_left_nritems + push_items);
2959
2960	/* fixup right node */
2961	if (push_items > right_nritems)
2962		WARN(1, KERN_CRIT "push items %d nr %u\n", push_items,
2963		       right_nritems);
2964
2965	if (push_items < right_nritems) {
2966		push_space = btrfs_item_offset_nr(right, push_items - 1) -
2967						  leaf_data_end(right);
2968		memmove_extent_buffer(right, BTRFS_LEAF_DATA_OFFSET +
2969				      BTRFS_LEAF_DATA_SIZE(fs_info) - push_space,
2970				      BTRFS_LEAF_DATA_OFFSET +
2971				      leaf_data_end(right), push_space);
2972
2973		memmove_extent_buffer(right, btrfs_item_nr_offset(0),
2974			      btrfs_item_nr_offset(push_items),
2975			     (btrfs_header_nritems(right) - push_items) *
2976			     sizeof(struct btrfs_item));
2977	}
2978
2979	btrfs_init_map_token(&token, right);
2980	right_nritems -= push_items;
2981	btrfs_set_header_nritems(right, right_nritems);
2982	push_space = BTRFS_LEAF_DATA_SIZE(fs_info);
2983	for (i = 0; i < right_nritems; i++) {
2984		item = btrfs_item_nr(i);
2985
2986		push_space = push_space - btrfs_token_item_size(&token, item);
2987		btrfs_set_token_item_offset(&token, item, push_space);
 
2988	}
2989
2990	btrfs_mark_buffer_dirty(left);
2991	if (right_nritems)
2992		btrfs_mark_buffer_dirty(right);
2993	else
2994		btrfs_clean_tree_block(right);
2995
2996	btrfs_item_key(right, &disk_key, 0);
2997	fixup_low_keys(path, &disk_key, 1);
2998
2999	/* then fixup the leaf pointer in the path */
3000	if (path->slots[0] < push_items) {
3001		path->slots[0] += old_left_nritems;
3002		btrfs_tree_unlock(path->nodes[0]);
3003		free_extent_buffer(path->nodes[0]);
3004		path->nodes[0] = left;
3005		path->slots[1] -= 1;
3006	} else {
3007		btrfs_tree_unlock(left);
3008		free_extent_buffer(left);
3009		path->slots[0] -= push_items;
3010	}
3011	BUG_ON(path->slots[0] < 0);
3012	return ret;
3013out:
3014	btrfs_tree_unlock(left);
3015	free_extent_buffer(left);
3016	return ret;
3017}
3018
3019/*
3020 * push some data in the path leaf to the left, trying to free up at
3021 * least data_size bytes.  returns zero if the push worked, nonzero otherwise
3022 *
3023 * max_slot can put a limit on how far into the leaf we'll push items.  The
3024 * item at 'max_slot' won't be touched.  Use (u32)-1 to make us push all the
3025 * items
3026 */
3027static int push_leaf_left(struct btrfs_trans_handle *trans, struct btrfs_root
3028			  *root, struct btrfs_path *path, int min_data_size,
3029			  int data_size, int empty, u32 max_slot)
3030{
3031	struct extent_buffer *right = path->nodes[0];
3032	struct extent_buffer *left;
3033	int slot;
3034	int free_space;
3035	u32 right_nritems;
3036	int ret = 0;
3037
3038	slot = path->slots[1];
3039	if (slot == 0)
3040		return 1;
3041	if (!path->nodes[1])
3042		return 1;
3043
3044	right_nritems = btrfs_header_nritems(right);
3045	if (right_nritems == 0)
3046		return 1;
3047
3048	btrfs_assert_tree_locked(path->nodes[1]);
3049
3050	left = btrfs_read_node_slot(path->nodes[1], slot - 1);
3051	/*
3052	 * slot - 1 is not valid or we fail to read the left node,
3053	 * no big deal, just return.
3054	 */
3055	if (IS_ERR(left))
3056		return 1;
3057
3058	__btrfs_tree_lock(left, BTRFS_NESTING_LEFT);
 
3059
3060	free_space = btrfs_leaf_free_space(left);
3061	if (free_space < data_size) {
3062		ret = 1;
3063		goto out;
3064	}
3065
3066	/* cow and double check */
3067	ret = btrfs_cow_block(trans, root, left,
3068			      path->nodes[1], slot - 1, &left,
3069			      BTRFS_NESTING_LEFT_COW);
3070	if (ret) {
3071		/* we hit -ENOSPC, but it isn't fatal here */
3072		if (ret == -ENOSPC)
3073			ret = 1;
3074		goto out;
3075	}
3076
3077	free_space = btrfs_leaf_free_space(left);
3078	if (free_space < data_size) {
3079		ret = 1;
3080		goto out;
3081	}
3082
3083	if (check_sibling_keys(left, right)) {
3084		ret = -EUCLEAN;
3085		goto out;
3086	}
3087	return __push_leaf_left(path, min_data_size,
3088			       empty, left, free_space, right_nritems,
3089			       max_slot);
3090out:
3091	btrfs_tree_unlock(left);
3092	free_extent_buffer(left);
3093	return ret;
3094}
3095
3096/*
3097 * split the path's leaf in two, making sure there is at least data_size
3098 * available for the resulting leaf level of the path.
3099 */
3100static noinline void copy_for_split(struct btrfs_trans_handle *trans,
3101				    struct btrfs_path *path,
3102				    struct extent_buffer *l,
3103				    struct extent_buffer *right,
3104				    int slot, int mid, int nritems)
3105{
3106	struct btrfs_fs_info *fs_info = trans->fs_info;
3107	int data_copy_size;
3108	int rt_data_off;
3109	int i;
3110	struct btrfs_disk_key disk_key;
3111	struct btrfs_map_token token;
3112
3113	nritems = nritems - mid;
3114	btrfs_set_header_nritems(right, nritems);
3115	data_copy_size = btrfs_item_end_nr(l, mid) - leaf_data_end(l);
3116
3117	copy_extent_buffer(right, l, btrfs_item_nr_offset(0),
3118			   btrfs_item_nr_offset(mid),
3119			   nritems * sizeof(struct btrfs_item));
3120
3121	copy_extent_buffer(right, l,
3122		     BTRFS_LEAF_DATA_OFFSET + BTRFS_LEAF_DATA_SIZE(fs_info) -
3123		     data_copy_size, BTRFS_LEAF_DATA_OFFSET +
3124		     leaf_data_end(l), data_copy_size);
3125
3126	rt_data_off = BTRFS_LEAF_DATA_SIZE(fs_info) - btrfs_item_end_nr(l, mid);
3127
3128	btrfs_init_map_token(&token, right);
3129	for (i = 0; i < nritems; i++) {
3130		struct btrfs_item *item = btrfs_item_nr(i);
3131		u32 ioff;
3132
3133		ioff = btrfs_token_item_offset(&token, item);
3134		btrfs_set_token_item_offset(&token, item, ioff + rt_data_off);
 
3135	}
3136
3137	btrfs_set_header_nritems(l, mid);
3138	btrfs_item_key(right, &disk_key, 0);
3139	insert_ptr(trans, path, &disk_key, right->start, path->slots[1] + 1, 1);
3140
3141	btrfs_mark_buffer_dirty(right);
3142	btrfs_mark_buffer_dirty(l);
3143	BUG_ON(path->slots[0] != slot);
3144
3145	if (mid <= slot) {
3146		btrfs_tree_unlock(path->nodes[0]);
3147		free_extent_buffer(path->nodes[0]);
3148		path->nodes[0] = right;
3149		path->slots[0] -= mid;
3150		path->slots[1] += 1;
3151	} else {
3152		btrfs_tree_unlock(right);
3153		free_extent_buffer(right);
3154	}
3155
3156	BUG_ON(path->slots[0] < 0);
3157}
3158
3159/*
3160 * double splits happen when we need to insert a big item in the middle
3161 * of a leaf.  A double split can leave us with 3 mostly empty leaves:
3162 * leaf: [ slots 0 - N] [ our target ] [ N + 1 - total in leaf ]
3163 *          A                 B                 C
3164 *
3165 * We avoid this by trying to push the items on either side of our target
3166 * into the adjacent leaves.  If all goes well we can avoid the double split
3167 * completely.
3168 */
3169static noinline int push_for_double_split(struct btrfs_trans_handle *trans,
3170					  struct btrfs_root *root,
3171					  struct btrfs_path *path,
3172					  int data_size)
3173{
3174	int ret;
3175	int progress = 0;
3176	int slot;
3177	u32 nritems;
3178	int space_needed = data_size;
3179
3180	slot = path->slots[0];
3181	if (slot < btrfs_header_nritems(path->nodes[0]))
3182		space_needed -= btrfs_leaf_free_space(path->nodes[0]);
3183
3184	/*
3185	 * try to push all the items after our slot into the
3186	 * right leaf
3187	 */
3188	ret = push_leaf_right(trans, root, path, 1, space_needed, 0, slot);
3189	if (ret < 0)
3190		return ret;
3191
3192	if (ret == 0)
3193		progress++;
3194
3195	nritems = btrfs_header_nritems(path->nodes[0]);
3196	/*
3197	 * our goal is to get our slot at the start or end of a leaf.  If
3198	 * we've done so we're done
3199	 */
3200	if (path->slots[0] == 0 || path->slots[0] == nritems)
3201		return 0;
3202
3203	if (btrfs_leaf_free_space(path->nodes[0]) >= data_size)
3204		return 0;
3205
3206	/* try to push all the items before our slot into the next leaf */
3207	slot = path->slots[0];
3208	space_needed = data_size;
3209	if (slot > 0)
3210		space_needed -= btrfs_leaf_free_space(path->nodes[0]);
3211	ret = push_leaf_left(trans, root, path, 1, space_needed, 0, slot);
3212	if (ret < 0)
3213		return ret;
3214
3215	if (ret == 0)
3216		progress++;
3217
3218	if (progress)
3219		return 0;
3220	return 1;
3221}
3222
3223/*
3224 * split the path's leaf in two, making sure there is at least data_size
3225 * available for the resulting leaf level of the path.
3226 *
3227 * returns 0 if all went well and < 0 on failure.
3228 */
3229static noinline int split_leaf(struct btrfs_trans_handle *trans,
3230			       struct btrfs_root *root,
3231			       const struct btrfs_key *ins_key,
3232			       struct btrfs_path *path, int data_size,
3233			       int extend)
3234{
3235	struct btrfs_disk_key disk_key;
3236	struct extent_buffer *l;
3237	u32 nritems;
3238	int mid;
3239	int slot;
3240	struct extent_buffer *right;
3241	struct btrfs_fs_info *fs_info = root->fs_info;
3242	int ret = 0;
3243	int wret;
3244	int split;
3245	int num_doubles = 0;
3246	int tried_avoid_double = 0;
3247
3248	l = path->nodes[0];
3249	slot = path->slots[0];
3250	if (extend && data_size + btrfs_item_size_nr(l, slot) +
3251	    sizeof(struct btrfs_item) > BTRFS_LEAF_DATA_SIZE(fs_info))
3252		return -EOVERFLOW;
3253
3254	/* first try to make some room by pushing left and right */
3255	if (data_size && path->nodes[1]) {
3256		int space_needed = data_size;
3257
3258		if (slot < btrfs_header_nritems(l))
3259			space_needed -= btrfs_leaf_free_space(l);
3260
3261		wret = push_leaf_right(trans, root, path, space_needed,
3262				       space_needed, 0, 0);
3263		if (wret < 0)
3264			return wret;
3265		if (wret) {
3266			space_needed = data_size;
3267			if (slot > 0)
3268				space_needed -= btrfs_leaf_free_space(l);
3269			wret = push_leaf_left(trans, root, path, space_needed,
3270					      space_needed, 0, (u32)-1);
3271			if (wret < 0)
3272				return wret;
3273		}
3274		l = path->nodes[0];
3275
3276		/* did the pushes work? */
3277		if (btrfs_leaf_free_space(l) >= data_size)
3278			return 0;
3279	}
3280
3281	if (!path->nodes[1]) {
3282		ret = insert_new_root(trans, root, path, 1);
3283		if (ret)
3284			return ret;
3285	}
3286again:
3287	split = 1;
3288	l = path->nodes[0];
3289	slot = path->slots[0];
3290	nritems = btrfs_header_nritems(l);
3291	mid = (nritems + 1) / 2;
3292
3293	if (mid <= slot) {
3294		if (nritems == 1 ||
3295		    leaf_space_used(l, mid, nritems - mid) + data_size >
3296			BTRFS_LEAF_DATA_SIZE(fs_info)) {
3297			if (slot >= nritems) {
3298				split = 0;
3299			} else {
3300				mid = slot;
3301				if (mid != nritems &&
3302				    leaf_space_used(l, mid, nritems - mid) +
3303				    data_size > BTRFS_LEAF_DATA_SIZE(fs_info)) {
3304					if (data_size && !tried_avoid_double)
3305						goto push_for_double;
3306					split = 2;
3307				}
3308			}
3309		}
3310	} else {
3311		if (leaf_space_used(l, 0, mid) + data_size >
3312			BTRFS_LEAF_DATA_SIZE(fs_info)) {
3313			if (!extend && data_size && slot == 0) {
3314				split = 0;
3315			} else if ((extend || !data_size) && slot == 0) {
3316				mid = 1;
3317			} else {
3318				mid = slot;
3319				if (mid != nritems &&
3320				    leaf_space_used(l, mid, nritems - mid) +
3321				    data_size > BTRFS_LEAF_DATA_SIZE(fs_info)) {
3322					if (data_size && !tried_avoid_double)
3323						goto push_for_double;
3324					split = 2;
3325				}
3326			}
3327		}
3328	}
3329
3330	if (split == 0)
3331		btrfs_cpu_key_to_disk(&disk_key, ins_key);
3332	else
3333		btrfs_item_key(l, &disk_key, mid);
3334
3335	/*
3336	 * We have to about BTRFS_NESTING_NEW_ROOT here if we've done a double
3337	 * split, because we're only allowed to have MAX_LOCKDEP_SUBCLASSES
3338	 * subclasses, which is 8 at the time of this patch, and we've maxed it
3339	 * out.  In the future we could add a
3340	 * BTRFS_NESTING_SPLIT_THE_SPLITTENING if we need to, but for now just
3341	 * use BTRFS_NESTING_NEW_ROOT.
3342	 */
3343	right = btrfs_alloc_tree_block(trans, root, 0, root->root_key.objectid,
3344				       &disk_key, 0, l->start, 0,
3345				       num_doubles ? BTRFS_NESTING_NEW_ROOT :
3346				       BTRFS_NESTING_SPLIT);
3347	if (IS_ERR(right))
3348		return PTR_ERR(right);
3349
3350	root_add_used(root, fs_info->nodesize);
3351
3352	if (split == 0) {
3353		if (mid <= slot) {
3354			btrfs_set_header_nritems(right, 0);
3355			insert_ptr(trans, path, &disk_key,
3356				   right->start, path->slots[1] + 1, 1);
3357			btrfs_tree_unlock(path->nodes[0]);
3358			free_extent_buffer(path->nodes[0]);
3359			path->nodes[0] = right;
3360			path->slots[0] = 0;
3361			path->slots[1] += 1;
3362		} else {
3363			btrfs_set_header_nritems(right, 0);
3364			insert_ptr(trans, path, &disk_key,
3365				   right->start, path->slots[1], 1);
3366			btrfs_tree_unlock(path->nodes[0]);
3367			free_extent_buffer(path->nodes[0]);
3368			path->nodes[0] = right;
3369			path->slots[0] = 0;
3370			if (path->slots[1] == 0)
3371				fixup_low_keys(path, &disk_key, 1);
3372		}
3373		/*
3374		 * We create a new leaf 'right' for the required ins_len and
3375		 * we'll do btrfs_mark_buffer_dirty() on this leaf after copying
3376		 * the content of ins_len to 'right'.
3377		 */
3378		return ret;
3379	}
3380
3381	copy_for_split(trans, path, l, right, slot, mid, nritems);
3382
3383	if (split == 2) {
3384		BUG_ON(num_doubles != 0);
3385		num_doubles++;
3386		goto again;
3387	}
3388
3389	return 0;
3390
3391push_for_double:
3392	push_for_double_split(trans, root, path, data_size);
3393	tried_avoid_double = 1;
3394	if (btrfs_leaf_free_space(path->nodes[0]) >= data_size)
3395		return 0;
3396	goto again;
3397}
3398
3399static noinline int setup_leaf_for_split(struct btrfs_trans_handle *trans,
3400					 struct btrfs_root *root,
3401					 struct btrfs_path *path, int ins_len)
3402{
3403	struct btrfs_key key;
3404	struct extent_buffer *leaf;
3405	struct btrfs_file_extent_item *fi;
3406	u64 extent_len = 0;
3407	u32 item_size;
3408	int ret;
3409
3410	leaf = path->nodes[0];
3411	btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
3412
3413	BUG_ON(key.type != BTRFS_EXTENT_DATA_KEY &&
3414	       key.type != BTRFS_EXTENT_CSUM_KEY);
3415
3416	if (btrfs_leaf_free_space(leaf) >= ins_len)
3417		return 0;
3418
3419	item_size = btrfs_item_size_nr(leaf, path->slots[0]);
3420	if (key.type == BTRFS_EXTENT_DATA_KEY) {
3421		fi = btrfs_item_ptr(leaf, path->slots[0],
3422				    struct btrfs_file_extent_item);
3423		extent_len = btrfs_file_extent_num_bytes(leaf, fi);
3424	}
3425	btrfs_release_path(path);
3426
3427	path->keep_locks = 1;
3428	path->search_for_split = 1;
3429	ret = btrfs_search_slot(trans, root, &key, path, 0, 1);
3430	path->search_for_split = 0;
3431	if (ret > 0)
3432		ret = -EAGAIN;
3433	if (ret < 0)
3434		goto err;
3435
3436	ret = -EAGAIN;
3437	leaf = path->nodes[0];
3438	/* if our item isn't there, return now */
3439	if (item_size != btrfs_item_size_nr(leaf, path->slots[0]))
3440		goto err;
3441
3442	/* the leaf has  changed, it now has room.  return now */
3443	if (btrfs_leaf_free_space(path->nodes[0]) >= ins_len)
3444		goto err;
3445
3446	if (key.type == BTRFS_EXTENT_DATA_KEY) {
3447		fi = btrfs_item_ptr(leaf, path->slots[0],
3448				    struct btrfs_file_extent_item);
3449		if (extent_len != btrfs_file_extent_num_bytes(leaf, fi))
3450			goto err;
3451	}
3452
 
3453	ret = split_leaf(trans, root, &key, path, ins_len, 1);
3454	if (ret)
3455		goto err;
3456
3457	path->keep_locks = 0;
3458	btrfs_unlock_up_safe(path, 1);
3459	return 0;
3460err:
3461	path->keep_locks = 0;
3462	return ret;
3463}
3464
3465static noinline int split_item(struct btrfs_path *path,
3466			       const struct btrfs_key *new_key,
3467			       unsigned long split_offset)
3468{
3469	struct extent_buffer *leaf;
3470	struct btrfs_item *item;
3471	struct btrfs_item *new_item;
3472	int slot;
3473	char *buf;
3474	u32 nritems;
3475	u32 item_size;
3476	u32 orig_offset;
3477	struct btrfs_disk_key disk_key;
3478
3479	leaf = path->nodes[0];
3480	BUG_ON(btrfs_leaf_free_space(leaf) < sizeof(struct btrfs_item));
3481
 
 
3482	item = btrfs_item_nr(path->slots[0]);
3483	orig_offset = btrfs_item_offset(leaf, item);
3484	item_size = btrfs_item_size(leaf, item);
3485
3486	buf = kmalloc(item_size, GFP_NOFS);
3487	if (!buf)
3488		return -ENOMEM;
3489
3490	read_extent_buffer(leaf, buf, btrfs_item_ptr_offset(leaf,
3491			    path->slots[0]), item_size);
3492
3493	slot = path->slots[0] + 1;
3494	nritems = btrfs_header_nritems(leaf);
3495	if (slot != nritems) {
3496		/* shift the items */
3497		memmove_extent_buffer(leaf, btrfs_item_nr_offset(slot + 1),
3498				btrfs_item_nr_offset(slot),
3499				(nritems - slot) * sizeof(struct btrfs_item));
3500	}
3501
3502	btrfs_cpu_key_to_disk(&disk_key, new_key);
3503	btrfs_set_item_key(leaf, &disk_key, slot);
3504
3505	new_item = btrfs_item_nr(slot);
3506
3507	btrfs_set_item_offset(leaf, new_item, orig_offset);
3508	btrfs_set_item_size(leaf, new_item, item_size - split_offset);
3509
3510	btrfs_set_item_offset(leaf, item,
3511			      orig_offset + item_size - split_offset);
3512	btrfs_set_item_size(leaf, item, split_offset);
3513
3514	btrfs_set_header_nritems(leaf, nritems + 1);
3515
3516	/* write the data for the start of the original item */
3517	write_extent_buffer(leaf, buf,
3518			    btrfs_item_ptr_offset(leaf, path->slots[0]),
3519			    split_offset);
3520
3521	/* write the data for the new item */
3522	write_extent_buffer(leaf, buf + split_offset,
3523			    btrfs_item_ptr_offset(leaf, slot),
3524			    item_size - split_offset);
3525	btrfs_mark_buffer_dirty(leaf);
3526
3527	BUG_ON(btrfs_leaf_free_space(leaf) < 0);
3528	kfree(buf);
3529	return 0;
3530}
3531
3532/*
3533 * This function splits a single item into two items,
3534 * giving 'new_key' to the new item and splitting the
3535 * old one at split_offset (from the start of the item).
3536 *
3537 * The path may be released by this operation.  After
3538 * the split, the path is pointing to the old item.  The
3539 * new item is going to be in the same node as the old one.
3540 *
3541 * Note, the item being split must be smaller enough to live alone on
3542 * a tree block with room for one extra struct btrfs_item
3543 *
3544 * This allows us to split the item in place, keeping a lock on the
3545 * leaf the entire time.
3546 */
3547int btrfs_split_item(struct btrfs_trans_handle *trans,
3548		     struct btrfs_root *root,
3549		     struct btrfs_path *path,
3550		     const struct btrfs_key *new_key,
3551		     unsigned long split_offset)
3552{
3553	int ret;
3554	ret = setup_leaf_for_split(trans, root, path,
3555				   sizeof(struct btrfs_item));
3556	if (ret)
3557		return ret;
3558
3559	ret = split_item(path, new_key, split_offset);
3560	return ret;
3561}
3562
3563/*
3564 * This function duplicate a item, giving 'new_key' to the new item.
3565 * It guarantees both items live in the same tree leaf and the new item
3566 * is contiguous with the original item.
3567 *
3568 * This allows us to split file extent in place, keeping a lock on the
3569 * leaf the entire time.
3570 */
3571int btrfs_duplicate_item(struct btrfs_trans_handle *trans,
3572			 struct btrfs_root *root,
3573			 struct btrfs_path *path,
3574			 const struct btrfs_key *new_key)
3575{
3576	struct extent_buffer *leaf;
3577	int ret;
3578	u32 item_size;
3579
3580	leaf = path->nodes[0];
3581	item_size = btrfs_item_size_nr(leaf, path->slots[0]);
3582	ret = setup_leaf_for_split(trans, root, path,
3583				   item_size + sizeof(struct btrfs_item));
3584	if (ret)
3585		return ret;
3586
3587	path->slots[0]++;
3588	setup_items_for_insert(root, path, new_key, &item_size, 1);
 
 
3589	leaf = path->nodes[0];
3590	memcpy_extent_buffer(leaf,
3591			     btrfs_item_ptr_offset(leaf, path->slots[0]),
3592			     btrfs_item_ptr_offset(leaf, path->slots[0] - 1),
3593			     item_size);
3594	return 0;
3595}
3596
3597/*
3598 * make the item pointed to by the path smaller.  new_size indicates
3599 * how small to make it, and from_end tells us if we just chop bytes
3600 * off the end of the item or if we shift the item to chop bytes off
3601 * the front.
3602 */
3603void btrfs_truncate_item(struct btrfs_path *path, u32 new_size, int from_end)
3604{
3605	int slot;
3606	struct extent_buffer *leaf;
3607	struct btrfs_item *item;
3608	u32 nritems;
3609	unsigned int data_end;
3610	unsigned int old_data_start;
3611	unsigned int old_size;
3612	unsigned int size_diff;
3613	int i;
3614	struct btrfs_map_token token;
3615
3616	leaf = path->nodes[0];
3617	slot = path->slots[0];
3618
3619	old_size = btrfs_item_size_nr(leaf, slot);
3620	if (old_size == new_size)
3621		return;
3622
3623	nritems = btrfs_header_nritems(leaf);
3624	data_end = leaf_data_end(leaf);
3625
3626	old_data_start = btrfs_item_offset_nr(leaf, slot);
3627
3628	size_diff = old_size - new_size;
3629
3630	BUG_ON(slot < 0);
3631	BUG_ON(slot >= nritems);
3632
3633	/*
3634	 * item0..itemN ... dataN.offset..dataN.size .. data0.size
3635	 */
3636	/* first correct the data pointers */
3637	btrfs_init_map_token(&token, leaf);
3638	for (i = slot; i < nritems; i++) {
3639		u32 ioff;
3640		item = btrfs_item_nr(i);
3641
3642		ioff = btrfs_token_item_offset(&token, item);
3643		btrfs_set_token_item_offset(&token, item, ioff + size_diff);
 
3644	}
3645
3646	/* shift the data */
3647	if (from_end) {
3648		memmove_extent_buffer(leaf, BTRFS_LEAF_DATA_OFFSET +
3649			      data_end + size_diff, BTRFS_LEAF_DATA_OFFSET +
3650			      data_end, old_data_start + new_size - data_end);
3651	} else {
3652		struct btrfs_disk_key disk_key;
3653		u64 offset;
3654
3655		btrfs_item_key(leaf, &disk_key, slot);
3656
3657		if (btrfs_disk_key_type(&disk_key) == BTRFS_EXTENT_DATA_KEY) {
3658			unsigned long ptr;
3659			struct btrfs_file_extent_item *fi;
3660
3661			fi = btrfs_item_ptr(leaf, slot,
3662					    struct btrfs_file_extent_item);
3663			fi = (struct btrfs_file_extent_item *)(
3664			     (unsigned long)fi - size_diff);
3665
3666			if (btrfs_file_extent_type(leaf, fi) ==
3667			    BTRFS_FILE_EXTENT_INLINE) {
3668				ptr = btrfs_item_ptr_offset(leaf, slot);
3669				memmove_extent_buffer(leaf, ptr,
3670				      (unsigned long)fi,
3671				      BTRFS_FILE_EXTENT_INLINE_DATA_START);
3672			}
3673		}
3674
3675		memmove_extent_buffer(leaf, BTRFS_LEAF_DATA_OFFSET +
3676			      data_end + size_diff, BTRFS_LEAF_DATA_OFFSET +
3677			      data_end, old_data_start - data_end);
3678
3679		offset = btrfs_disk_key_offset(&disk_key);
3680		btrfs_set_disk_key_offset(&disk_key, offset + size_diff);
3681		btrfs_set_item_key(leaf, &disk_key, slot);
3682		if (slot == 0)
3683			fixup_low_keys(path, &disk_key, 1);
3684	}
3685
3686	item = btrfs_item_nr(slot);
3687	btrfs_set_item_size(leaf, item, new_size);
3688	btrfs_mark_buffer_dirty(leaf);
3689
3690	if (btrfs_leaf_free_space(leaf) < 0) {
3691		btrfs_print_leaf(leaf);
3692		BUG();
3693	}
3694}
3695
3696/*
3697 * make the item pointed to by the path bigger, data_size is the added size.
3698 */
3699void btrfs_extend_item(struct btrfs_path *path, u32 data_size)
3700{
3701	int slot;
3702	struct extent_buffer *leaf;
3703	struct btrfs_item *item;
3704	u32 nritems;
3705	unsigned int data_end;
3706	unsigned int old_data;
3707	unsigned int old_size;
3708	int i;
3709	struct btrfs_map_token token;
3710
3711	leaf = path->nodes[0];
3712
3713	nritems = btrfs_header_nritems(leaf);
3714	data_end = leaf_data_end(leaf);
3715
3716	if (btrfs_leaf_free_space(leaf) < data_size) {
3717		btrfs_print_leaf(leaf);
3718		BUG();
3719	}
3720	slot = path->slots[0];
3721	old_data = btrfs_item_end_nr(leaf, slot);
3722
3723	BUG_ON(slot < 0);
3724	if (slot >= nritems) {
3725		btrfs_print_leaf(leaf);
3726		btrfs_crit(leaf->fs_info, "slot %d too large, nritems %d",
3727			   slot, nritems);
3728		BUG();
3729	}
3730
3731	/*
3732	 * item0..itemN ... dataN.offset..dataN.size .. data0.size
3733	 */
3734	/* first correct the data pointers */
3735	btrfs_init_map_token(&token, leaf);
3736	for (i = slot; i < nritems; i++) {
3737		u32 ioff;
3738		item = btrfs_item_nr(i);
3739
3740		ioff = btrfs_token_item_offset(&token, item);
3741		btrfs_set_token_item_offset(&token, item, ioff - data_size);
 
3742	}
3743
3744	/* shift the data */
3745	memmove_extent_buffer(leaf, BTRFS_LEAF_DATA_OFFSET +
3746		      data_end - data_size, BTRFS_LEAF_DATA_OFFSET +
3747		      data_end, old_data - data_end);
3748
3749	data_end = old_data;
3750	old_size = btrfs_item_size_nr(leaf, slot);
3751	item = btrfs_item_nr(slot);
3752	btrfs_set_item_size(leaf, item, old_size + data_size);
3753	btrfs_mark_buffer_dirty(leaf);
3754
3755	if (btrfs_leaf_free_space(leaf) < 0) {
3756		btrfs_print_leaf(leaf);
3757		BUG();
3758	}
3759}
3760
3761/**
3762 * setup_items_for_insert - Helper called before inserting one or more items
3763 * to a leaf. Main purpose is to save stack depth by doing the bulk of the work
3764 * in a function that doesn't call btrfs_search_slot
3765 *
3766 * @root:	root we are inserting items to
3767 * @path:	points to the leaf/slot where we are going to insert new items
3768 * @cpu_key:	array of keys for items to be inserted
3769 * @data_size:	size of the body of each item we are going to insert
3770 * @nr:		size of @cpu_key/@data_size arrays
3771 */
3772void setup_items_for_insert(struct btrfs_root *root, struct btrfs_path *path,
3773			    const struct btrfs_key *cpu_key, u32 *data_size,
3774			    int nr)
3775{
3776	struct btrfs_fs_info *fs_info = root->fs_info;
3777	struct btrfs_item *item;
3778	int i;
3779	u32 nritems;
3780	unsigned int data_end;
3781	struct btrfs_disk_key disk_key;
3782	struct extent_buffer *leaf;
3783	int slot;
3784	struct btrfs_map_token token;
3785	u32 total_size;
3786	u32 total_data = 0;
3787
3788	for (i = 0; i < nr; i++)
3789		total_data += data_size[i];
3790	total_size = total_data + (nr * sizeof(struct btrfs_item));
3791
3792	if (path->slots[0] == 0) {
3793		btrfs_cpu_key_to_disk(&disk_key, cpu_key);
3794		fixup_low_keys(path, &disk_key, 1);
3795	}
3796	btrfs_unlock_up_safe(path, 1);
3797
3798	leaf = path->nodes[0];
3799	slot = path->slots[0];
3800
3801	nritems = btrfs_header_nritems(leaf);
3802	data_end = leaf_data_end(leaf);
3803
3804	if (btrfs_leaf_free_space(leaf) < total_size) {
3805		btrfs_print_leaf(leaf);
3806		btrfs_crit(fs_info, "not enough freespace need %u have %d",
3807			   total_size, btrfs_leaf_free_space(leaf));
3808		BUG();
3809	}
3810
3811	btrfs_init_map_token(&token, leaf);
3812	if (slot != nritems) {
3813		unsigned int old_data = btrfs_item_end_nr(leaf, slot);
3814
3815		if (old_data < data_end) {
3816			btrfs_print_leaf(leaf);
3817			btrfs_crit(fs_info,
3818		"item at slot %d with data offset %u beyond data end of leaf %u",
3819				   slot, old_data, data_end);
3820			BUG();
3821		}
3822		/*
3823		 * item0..itemN ... dataN.offset..dataN.size .. data0.size
3824		 */
3825		/* first correct the data pointers */
3826		for (i = slot; i < nritems; i++) {
3827			u32 ioff;
3828
3829			item = btrfs_item_nr(i);
3830			ioff = btrfs_token_item_offset(&token, item);
3831			btrfs_set_token_item_offset(&token, item,
3832						    ioff - total_data);
3833		}
3834		/* shift the items */
3835		memmove_extent_buffer(leaf, btrfs_item_nr_offset(slot + nr),
3836			      btrfs_item_nr_offset(slot),
3837			      (nritems - slot) * sizeof(struct btrfs_item));
3838
3839		/* shift the data */
3840		memmove_extent_buffer(leaf, BTRFS_LEAF_DATA_OFFSET +
3841			      data_end - total_data, BTRFS_LEAF_DATA_OFFSET +
3842			      data_end, old_data - data_end);
3843		data_end = old_data;
3844	}
3845
3846	/* setup the item for the new data */
3847	for (i = 0; i < nr; i++) {
3848		btrfs_cpu_key_to_disk(&disk_key, cpu_key + i);
3849		btrfs_set_item_key(leaf, &disk_key, slot + i);
3850		item = btrfs_item_nr(slot + i);
 
 
3851		data_end -= data_size[i];
3852		btrfs_set_token_item_offset(&token, item, data_end);
3853		btrfs_set_token_item_size(&token, item, data_size[i]);
3854	}
3855
3856	btrfs_set_header_nritems(leaf, nritems + nr);
3857	btrfs_mark_buffer_dirty(leaf);
3858
3859	if (btrfs_leaf_free_space(leaf) < 0) {
3860		btrfs_print_leaf(leaf);
3861		BUG();
3862	}
3863}
3864
3865/*
3866 * Given a key and some data, insert items into the tree.
3867 * This does all the path init required, making room in the tree if needed.
3868 */
3869int btrfs_insert_empty_items(struct btrfs_trans_handle *trans,
3870			    struct btrfs_root *root,
3871			    struct btrfs_path *path,
3872			    const struct btrfs_key *cpu_key, u32 *data_size,
3873			    int nr)
3874{
3875	int ret = 0;
3876	int slot;
3877	int i;
3878	u32 total_size = 0;
3879	u32 total_data = 0;
3880
3881	for (i = 0; i < nr; i++)
3882		total_data += data_size[i];
3883
3884	total_size = total_data + (nr * sizeof(struct btrfs_item));
3885	ret = btrfs_search_slot(trans, root, cpu_key, path, total_size, 1);
3886	if (ret == 0)
3887		return -EEXIST;
3888	if (ret < 0)
3889		return ret;
3890
3891	slot = path->slots[0];
3892	BUG_ON(slot < 0);
3893
3894	setup_items_for_insert(root, path, cpu_key, data_size, nr);
 
3895	return 0;
3896}
3897
3898/*
3899 * Given a key and some data, insert an item into the tree.
3900 * This does all the path init required, making room in the tree if needed.
3901 */
3902int btrfs_insert_item(struct btrfs_trans_handle *trans, struct btrfs_root *root,
3903		      const struct btrfs_key *cpu_key, void *data,
3904		      u32 data_size)
3905{
3906	int ret = 0;
3907	struct btrfs_path *path;
3908	struct extent_buffer *leaf;
3909	unsigned long ptr;
3910
3911	path = btrfs_alloc_path();
3912	if (!path)
3913		return -ENOMEM;
3914	ret = btrfs_insert_empty_item(trans, root, path, cpu_key, data_size);
3915	if (!ret) {
3916		leaf = path->nodes[0];
3917		ptr = btrfs_item_ptr_offset(leaf, path->slots[0]);
3918		write_extent_buffer(leaf, data, ptr, data_size);
3919		btrfs_mark_buffer_dirty(leaf);
3920	}
3921	btrfs_free_path(path);
3922	return ret;
3923}
3924
3925/*
3926 * delete the pointer from a given node.
3927 *
3928 * the tree should have been previously balanced so the deletion does not
3929 * empty a node.
3930 */
3931static void del_ptr(struct btrfs_root *root, struct btrfs_path *path,
3932		    int level, int slot)
3933{
3934	struct extent_buffer *parent = path->nodes[level];
3935	u32 nritems;
3936	int ret;
3937
3938	nritems = btrfs_header_nritems(parent);
3939	if (slot != nritems - 1) {
3940		if (level) {
3941			ret = btrfs_tree_mod_log_insert_move(parent, slot,
3942					slot + 1, nritems - slot - 1);
3943			BUG_ON(ret < 0);
3944		}
3945		memmove_extent_buffer(parent,
3946			      btrfs_node_key_ptr_offset(slot),
3947			      btrfs_node_key_ptr_offset(slot + 1),
3948			      sizeof(struct btrfs_key_ptr) *
3949			      (nritems - slot - 1));
3950	} else if (level) {
3951		ret = btrfs_tree_mod_log_insert_key(parent, slot,
3952				BTRFS_MOD_LOG_KEY_REMOVE, GFP_NOFS);
3953		BUG_ON(ret < 0);
3954	}
3955
3956	nritems--;
3957	btrfs_set_header_nritems(parent, nritems);
3958	if (nritems == 0 && parent == root->node) {
3959		BUG_ON(btrfs_header_level(root->node) != 1);
3960		/* just turn the root into a leaf and break */
3961		btrfs_set_header_level(root->node, 0);
3962	} else if (slot == 0) {
3963		struct btrfs_disk_key disk_key;
3964
3965		btrfs_node_key(parent, &disk_key, 0);
3966		fixup_low_keys(path, &disk_key, level + 1);
3967	}
3968	btrfs_mark_buffer_dirty(parent);
3969}
3970
3971/*
3972 * a helper function to delete the leaf pointed to by path->slots[1] and
3973 * path->nodes[1].
3974 *
3975 * This deletes the pointer in path->nodes[1] and frees the leaf
3976 * block extent.  zero is returned if it all worked out, < 0 otherwise.
3977 *
3978 * The path must have already been setup for deleting the leaf, including
3979 * all the proper balancing.  path->nodes[1] must be locked.
3980 */
3981static noinline void btrfs_del_leaf(struct btrfs_trans_handle *trans,
3982				    struct btrfs_root *root,
3983				    struct btrfs_path *path,
3984				    struct extent_buffer *leaf)
3985{
3986	WARN_ON(btrfs_header_generation(leaf) != trans->transid);
3987	del_ptr(root, path, 1, path->slots[1]);
3988
3989	/*
3990	 * btrfs_free_extent is expensive, we want to make sure we
3991	 * aren't holding any locks when we call it
3992	 */
3993	btrfs_unlock_up_safe(path, 0);
3994
3995	root_sub_used(root, leaf->len);
3996
3997	atomic_inc(&leaf->refs);
3998	btrfs_free_tree_block(trans, root, leaf, 0, 1);
3999	free_extent_buffer_stale(leaf);
4000}
4001/*
4002 * delete the item at the leaf level in path.  If that empties
4003 * the leaf, remove it from the tree
4004 */
4005int btrfs_del_items(struct btrfs_trans_handle *trans, struct btrfs_root *root,
4006		    struct btrfs_path *path, int slot, int nr)
4007{
4008	struct btrfs_fs_info *fs_info = root->fs_info;
4009	struct extent_buffer *leaf;
4010	struct btrfs_item *item;
4011	u32 last_off;
4012	u32 dsize = 0;
4013	int ret = 0;
4014	int wret;
4015	int i;
4016	u32 nritems;
4017
4018	leaf = path->nodes[0];
4019	last_off = btrfs_item_offset_nr(leaf, slot + nr - 1);
4020
4021	for (i = 0; i < nr; i++)
4022		dsize += btrfs_item_size_nr(leaf, slot + i);
4023
4024	nritems = btrfs_header_nritems(leaf);
4025
4026	if (slot + nr != nritems) {
4027		int data_end = leaf_data_end(leaf);
4028		struct btrfs_map_token token;
4029
4030		memmove_extent_buffer(leaf, BTRFS_LEAF_DATA_OFFSET +
4031			      data_end + dsize,
4032			      BTRFS_LEAF_DATA_OFFSET + data_end,
4033			      last_off - data_end);
4034
4035		btrfs_init_map_token(&token, leaf);
4036		for (i = slot + nr; i < nritems; i++) {
4037			u32 ioff;
4038
4039			item = btrfs_item_nr(i);
4040			ioff = btrfs_token_item_offset(&token, item);
4041			btrfs_set_token_item_offset(&token, item, ioff + dsize);
 
4042		}
4043
4044		memmove_extent_buffer(leaf, btrfs_item_nr_offset(slot),
4045			      btrfs_item_nr_offset(slot + nr),
4046			      sizeof(struct btrfs_item) *
4047			      (nritems - slot - nr));
4048	}
4049	btrfs_set_header_nritems(leaf, nritems - nr);
4050	nritems -= nr;
4051
4052	/* delete the leaf if we've emptied it */
4053	if (nritems == 0) {
4054		if (leaf == root->node) {
4055			btrfs_set_header_level(leaf, 0);
4056		} else {
 
4057			btrfs_clean_tree_block(leaf);
4058			btrfs_del_leaf(trans, root, path, leaf);
4059		}
4060	} else {
4061		int used = leaf_space_used(leaf, 0, nritems);
4062		if (slot == 0) {
4063			struct btrfs_disk_key disk_key;
4064
4065			btrfs_item_key(leaf, &disk_key, 0);
4066			fixup_low_keys(path, &disk_key, 1);
4067		}
4068
4069		/* delete the leaf if it is mostly empty */
4070		if (used < BTRFS_LEAF_DATA_SIZE(fs_info) / 3) {
4071			/* push_leaf_left fixes the path.
4072			 * make sure the path still points to our leaf
4073			 * for possible call to del_ptr below
4074			 */
4075			slot = path->slots[1];
4076			atomic_inc(&leaf->refs);
4077
 
4078			wret = push_leaf_left(trans, root, path, 1, 1,
4079					      1, (u32)-1);
4080			if (wret < 0 && wret != -ENOSPC)
4081				ret = wret;
4082
4083			if (path->nodes[0] == leaf &&
4084			    btrfs_header_nritems(leaf)) {
4085				wret = push_leaf_right(trans, root, path, 1,
4086						       1, 1, 0);
4087				if (wret < 0 && wret != -ENOSPC)
4088					ret = wret;
4089			}
4090
4091			if (btrfs_header_nritems(leaf) == 0) {
4092				path->slots[1] = slot;
4093				btrfs_del_leaf(trans, root, path, leaf);
4094				free_extent_buffer(leaf);
4095				ret = 0;
4096			} else {
4097				/* if we're still in the path, make sure
4098				 * we're dirty.  Otherwise, one of the
4099				 * push_leaf functions must have already
4100				 * dirtied this buffer
4101				 */
4102				if (path->nodes[0] == leaf)
4103					btrfs_mark_buffer_dirty(leaf);
4104				free_extent_buffer(leaf);
4105			}
4106		} else {
4107			btrfs_mark_buffer_dirty(leaf);
4108		}
4109	}
4110	return ret;
4111}
4112
4113/*
4114 * search the tree again to find a leaf with lesser keys
4115 * returns 0 if it found something or 1 if there are no lesser leaves.
4116 * returns < 0 on io errors.
4117 *
4118 * This may release the path, and so you may lose any locks held at the
4119 * time you call it.
4120 */
4121int btrfs_prev_leaf(struct btrfs_root *root, struct btrfs_path *path)
4122{
4123	struct btrfs_key key;
4124	struct btrfs_disk_key found_key;
4125	int ret;
4126
4127	btrfs_item_key_to_cpu(path->nodes[0], &key, 0);
4128
4129	if (key.offset > 0) {
4130		key.offset--;
4131	} else if (key.type > 0) {
4132		key.type--;
4133		key.offset = (u64)-1;
4134	} else if (key.objectid > 0) {
4135		key.objectid--;
4136		key.type = (u8)-1;
4137		key.offset = (u64)-1;
4138	} else {
4139		return 1;
4140	}
4141
4142	btrfs_release_path(path);
4143	ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
4144	if (ret < 0)
4145		return ret;
4146	btrfs_item_key(path->nodes[0], &found_key, 0);
4147	ret = comp_keys(&found_key, &key);
4148	/*
4149	 * We might have had an item with the previous key in the tree right
4150	 * before we released our path. And after we released our path, that
4151	 * item might have been pushed to the first slot (0) of the leaf we
4152	 * were holding due to a tree balance. Alternatively, an item with the
4153	 * previous key can exist as the only element of a leaf (big fat item).
4154	 * Therefore account for these 2 cases, so that our callers (like
4155	 * btrfs_previous_item) don't miss an existing item with a key matching
4156	 * the previous key we computed above.
4157	 */
4158	if (ret <= 0)
4159		return 0;
4160	return 1;
4161}
4162
4163/*
4164 * A helper function to walk down the tree starting at min_key, and looking
4165 * for nodes or leaves that are have a minimum transaction id.
4166 * This is used by the btree defrag code, and tree logging
4167 *
4168 * This does not cow, but it does stuff the starting key it finds back
4169 * into min_key, so you can call btrfs_search_slot with cow=1 on the
4170 * key and get a writable path.
4171 *
4172 * This honors path->lowest_level to prevent descent past a given level
4173 * of the tree.
4174 *
4175 * min_trans indicates the oldest transaction that you are interested
4176 * in walking through.  Any nodes or leaves older than min_trans are
4177 * skipped over (without reading them).
4178 *
4179 * returns zero if something useful was found, < 0 on error and 1 if there
4180 * was nothing in the tree that matched the search criteria.
4181 */
4182int btrfs_search_forward(struct btrfs_root *root, struct btrfs_key *min_key,
4183			 struct btrfs_path *path,
4184			 u64 min_trans)
4185{
4186	struct extent_buffer *cur;
4187	struct btrfs_key found_key;
4188	int slot;
4189	int sret;
4190	u32 nritems;
4191	int level;
4192	int ret = 1;
4193	int keep_locks = path->keep_locks;
4194
4195	path->keep_locks = 1;
4196again:
4197	cur = btrfs_read_lock_root_node(root);
4198	level = btrfs_header_level(cur);
4199	WARN_ON(path->nodes[level]);
4200	path->nodes[level] = cur;
4201	path->locks[level] = BTRFS_READ_LOCK;
4202
4203	if (btrfs_header_generation(cur) < min_trans) {
4204		ret = 1;
4205		goto out;
4206	}
4207	while (1) {
4208		nritems = btrfs_header_nritems(cur);
4209		level = btrfs_header_level(cur);
4210		sret = btrfs_bin_search(cur, min_key, &slot);
4211		if (sret < 0) {
4212			ret = sret;
4213			goto out;
4214		}
4215
4216		/* at the lowest level, we're done, setup the path and exit */
4217		if (level == path->lowest_level) {
4218			if (slot >= nritems)
4219				goto find_next_key;
4220			ret = 0;
4221			path->slots[level] = slot;
4222			btrfs_item_key_to_cpu(cur, &found_key, slot);
4223			goto out;
4224		}
4225		if (sret && slot > 0)
4226			slot--;
4227		/*
4228		 * check this node pointer against the min_trans parameters.
4229		 * If it is too old, skip to the next one.
4230		 */
4231		while (slot < nritems) {
4232			u64 gen;
4233
4234			gen = btrfs_node_ptr_generation(cur, slot);
4235			if (gen < min_trans) {
4236				slot++;
4237				continue;
4238			}
4239			break;
4240		}
4241find_next_key:
4242		/*
4243		 * we didn't find a candidate key in this node, walk forward
4244		 * and find another one
4245		 */
4246		if (slot >= nritems) {
4247			path->slots[level] = slot;
 
4248			sret = btrfs_find_next_key(root, path, min_key, level,
4249						  min_trans);
4250			if (sret == 0) {
4251				btrfs_release_path(path);
4252				goto again;
4253			} else {
4254				goto out;
4255			}
4256		}
4257		/* save our key for returning back */
4258		btrfs_node_key_to_cpu(cur, &found_key, slot);
4259		path->slots[level] = slot;
4260		if (level == path->lowest_level) {
4261			ret = 0;
4262			goto out;
4263		}
 
4264		cur = btrfs_read_node_slot(cur, slot);
4265		if (IS_ERR(cur)) {
4266			ret = PTR_ERR(cur);
4267			goto out;
4268		}
4269
4270		btrfs_tree_read_lock(cur);
4271
4272		path->locks[level - 1] = BTRFS_READ_LOCK;
4273		path->nodes[level - 1] = cur;
4274		unlock_up(path, level, 1, 0, NULL);
4275	}
4276out:
4277	path->keep_locks = keep_locks;
4278	if (ret == 0) {
4279		btrfs_unlock_up_safe(path, path->lowest_level + 1);
 
4280		memcpy(min_key, &found_key, sizeof(found_key));
4281	}
4282	return ret;
4283}
4284
4285/*
4286 * this is similar to btrfs_next_leaf, but does not try to preserve
4287 * and fixup the path.  It looks for and returns the next key in the
4288 * tree based on the current path and the min_trans parameters.
4289 *
4290 * 0 is returned if another key is found, < 0 if there are any errors
4291 * and 1 is returned if there are no higher keys in the tree
4292 *
4293 * path->keep_locks should be set to 1 on the search made before
4294 * calling this function.
4295 */
4296int btrfs_find_next_key(struct btrfs_root *root, struct btrfs_path *path,
4297			struct btrfs_key *key, int level, u64 min_trans)
4298{
4299	int slot;
4300	struct extent_buffer *c;
4301
4302	WARN_ON(!path->keep_locks && !path->skip_locking);
4303	while (level < BTRFS_MAX_LEVEL) {
4304		if (!path->nodes[level])
4305			return 1;
4306
4307		slot = path->slots[level] + 1;
4308		c = path->nodes[level];
4309next:
4310		if (slot >= btrfs_header_nritems(c)) {
4311			int ret;
4312			int orig_lowest;
4313			struct btrfs_key cur_key;
4314			if (level + 1 >= BTRFS_MAX_LEVEL ||
4315			    !path->nodes[level + 1])
4316				return 1;
4317
4318			if (path->locks[level + 1] || path->skip_locking) {
4319				level++;
4320				continue;
4321			}
4322
4323			slot = btrfs_header_nritems(c) - 1;
4324			if (level == 0)
4325				btrfs_item_key_to_cpu(c, &cur_key, slot);
4326			else
4327				btrfs_node_key_to_cpu(c, &cur_key, slot);
4328
4329			orig_lowest = path->lowest_level;
4330			btrfs_release_path(path);
4331			path->lowest_level = level;
4332			ret = btrfs_search_slot(NULL, root, &cur_key, path,
4333						0, 0);
4334			path->lowest_level = orig_lowest;
4335			if (ret < 0)
4336				return ret;
4337
4338			c = path->nodes[level];
4339			slot = path->slots[level];
4340			if (ret == 0)
4341				slot++;
4342			goto next;
4343		}
4344
4345		if (level == 0)
4346			btrfs_item_key_to_cpu(c, key, slot);
4347		else {
4348			u64 gen = btrfs_node_ptr_generation(c, slot);
4349
4350			if (gen < min_trans) {
4351				slot++;
4352				goto next;
4353			}
4354			btrfs_node_key_to_cpu(c, key, slot);
4355		}
4356		return 0;
4357	}
4358	return 1;
4359}
4360
4361/*
4362 * search the tree again to find a leaf with greater keys
4363 * returns 0 if it found something or 1 if there are no greater leaves.
4364 * returns < 0 on io errors.
4365 */
4366int btrfs_next_leaf(struct btrfs_root *root, struct btrfs_path *path)
4367{
4368	return btrfs_next_old_leaf(root, path, 0);
4369}
4370
4371int btrfs_next_old_leaf(struct btrfs_root *root, struct btrfs_path *path,
4372			u64 time_seq)
4373{
4374	int slot;
4375	int level;
4376	struct extent_buffer *c;
4377	struct extent_buffer *next;
4378	struct btrfs_key key;
4379	u32 nritems;
4380	int ret;
4381	int i;
 
4382
4383	nritems = btrfs_header_nritems(path->nodes[0]);
4384	if (nritems == 0)
4385		return 1;
4386
4387	btrfs_item_key_to_cpu(path->nodes[0], &key, nritems - 1);
4388again:
4389	level = 1;
4390	next = NULL;
 
4391	btrfs_release_path(path);
4392
4393	path->keep_locks = 1;
 
4394
4395	if (time_seq)
4396		ret = btrfs_search_old_slot(root, &key, path, time_seq);
4397	else
4398		ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
4399	path->keep_locks = 0;
4400
4401	if (ret < 0)
4402		return ret;
4403
4404	nritems = btrfs_header_nritems(path->nodes[0]);
4405	/*
4406	 * by releasing the path above we dropped all our locks.  A balance
4407	 * could have added more items next to the key that used to be
4408	 * at the very end of the block.  So, check again here and
4409	 * advance the path if there are now more items available.
4410	 */
4411	if (nritems > 0 && path->slots[0] < nritems - 1) {
4412		if (ret == 0)
4413			path->slots[0]++;
4414		ret = 0;
4415		goto done;
4416	}
4417	/*
4418	 * So the above check misses one case:
4419	 * - after releasing the path above, someone has removed the item that
4420	 *   used to be at the very end of the block, and balance between leafs
4421	 *   gets another one with bigger key.offset to replace it.
4422	 *
4423	 * This one should be returned as well, or we can get leaf corruption
4424	 * later(esp. in __btrfs_drop_extents()).
4425	 *
4426	 * And a bit more explanation about this check,
4427	 * with ret > 0, the key isn't found, the path points to the slot
4428	 * where it should be inserted, so the path->slots[0] item must be the
4429	 * bigger one.
4430	 */
4431	if (nritems > 0 && ret > 0 && path->slots[0] == nritems - 1) {
4432		ret = 0;
4433		goto done;
4434	}
4435
4436	while (level < BTRFS_MAX_LEVEL) {
4437		if (!path->nodes[level]) {
4438			ret = 1;
4439			goto done;
4440		}
4441
4442		slot = path->slots[level] + 1;
4443		c = path->nodes[level];
4444		if (slot >= btrfs_header_nritems(c)) {
4445			level++;
4446			if (level == BTRFS_MAX_LEVEL) {
4447				ret = 1;
4448				goto done;
4449			}
4450			continue;
4451		}
4452
4453
4454		/*
4455		 * Our current level is where we're going to start from, and to
4456		 * make sure lockdep doesn't complain we need to drop our locks
4457		 * and nodes from 0 to our current level.
4458		 */
4459		for (i = 0; i < level; i++) {
4460			if (path->locks[level]) {
4461				btrfs_tree_read_unlock(path->nodes[i]);
4462				path->locks[i] = 0;
4463			}
4464			free_extent_buffer(path->nodes[i]);
4465			path->nodes[i] = NULL;
4466		}
4467
4468		next = c;
 
4469		ret = read_block_for_search(root, path, &next, level,
4470					    slot, &key);
4471		if (ret == -EAGAIN)
4472			goto again;
4473
4474		if (ret < 0) {
4475			btrfs_release_path(path);
4476			goto done;
4477		}
4478
4479		if (!path->skip_locking) {
4480			ret = btrfs_try_tree_read_lock(next);
4481			if (!ret && time_seq) {
4482				/*
4483				 * If we don't get the lock, we may be racing
4484				 * with push_leaf_left, holding that lock while
4485				 * itself waiting for the leaf we've currently
4486				 * locked. To solve this situation, we give up
4487				 * on our lock and cycle.
4488				 */
4489				free_extent_buffer(next);
4490				btrfs_release_path(path);
4491				cond_resched();
4492				goto again;
4493			}
4494			if (!ret)
 
4495				btrfs_tree_read_lock(next);
 
 
4496		}
4497		break;
4498	}
4499	path->slots[level] = slot;
4500	while (1) {
4501		level--;
 
 
 
 
 
4502		path->nodes[level] = next;
4503		path->slots[level] = 0;
4504		if (!path->skip_locking)
4505			path->locks[level] = BTRFS_READ_LOCK;
4506		if (!level)
4507			break;
4508
4509		ret = read_block_for_search(root, path, &next, level,
4510					    0, &key);
4511		if (ret == -EAGAIN)
4512			goto again;
4513
4514		if (ret < 0) {
4515			btrfs_release_path(path);
4516			goto done;
4517		}
4518
4519		if (!path->skip_locking)
4520			btrfs_tree_read_lock(next);
 
 
 
 
 
 
4521	}
4522	ret = 0;
4523done:
4524	unlock_up(path, 0, 1, 0, NULL);
 
 
 
4525
4526	return ret;
4527}
4528
4529/*
4530 * this uses btrfs_prev_leaf to walk backwards in the tree, and keeps
4531 * searching until it gets past min_objectid or finds an item of 'type'
4532 *
4533 * returns 0 if something is found, 1 if nothing was found and < 0 on error
4534 */
4535int btrfs_previous_item(struct btrfs_root *root,
4536			struct btrfs_path *path, u64 min_objectid,
4537			int type)
4538{
4539	struct btrfs_key found_key;
4540	struct extent_buffer *leaf;
4541	u32 nritems;
4542	int ret;
4543
4544	while (1) {
4545		if (path->slots[0] == 0) {
 
4546			ret = btrfs_prev_leaf(root, path);
4547			if (ret != 0)
4548				return ret;
4549		} else {
4550			path->slots[0]--;
4551		}
4552		leaf = path->nodes[0];
4553		nritems = btrfs_header_nritems(leaf);
4554		if (nritems == 0)
4555			return 1;
4556		if (path->slots[0] == nritems)
4557			path->slots[0]--;
4558
4559		btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
4560		if (found_key.objectid < min_objectid)
4561			break;
4562		if (found_key.type == type)
4563			return 0;
4564		if (found_key.objectid == min_objectid &&
4565		    found_key.type < type)
4566			break;
4567	}
4568	return 1;
4569}
4570
4571/*
4572 * search in extent tree to find a previous Metadata/Data extent item with
4573 * min objecitd.
4574 *
4575 * returns 0 if something is found, 1 if nothing was found and < 0 on error
4576 */
4577int btrfs_previous_extent_item(struct btrfs_root *root,
4578			struct btrfs_path *path, u64 min_objectid)
4579{
4580	struct btrfs_key found_key;
4581	struct extent_buffer *leaf;
4582	u32 nritems;
4583	int ret;
4584
4585	while (1) {
4586		if (path->slots[0] == 0) {
 
4587			ret = btrfs_prev_leaf(root, path);
4588			if (ret != 0)
4589				return ret;
4590		} else {
4591			path->slots[0]--;
4592		}
4593		leaf = path->nodes[0];
4594		nritems = btrfs_header_nritems(leaf);
4595		if (nritems == 0)
4596			return 1;
4597		if (path->slots[0] == nritems)
4598			path->slots[0]--;
4599
4600		btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
4601		if (found_key.objectid < min_objectid)
4602			break;
4603		if (found_key.type == BTRFS_EXTENT_ITEM_KEY ||
4604		    found_key.type == BTRFS_METADATA_ITEM_KEY)
4605			return 0;
4606		if (found_key.objectid == min_objectid &&
4607		    found_key.type < BTRFS_EXTENT_ITEM_KEY)
4608			break;
4609	}
4610	return 1;
4611}