Linux Audio

Check our new training course

Loading...
v6.2
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 * Copyright (C) 2009 Oracle.  All rights reserved.
   4 */
   5
   6#include <linux/sched.h>
   7#include <linux/slab.h>
   8#include <linux/sort.h>
   9#include "messages.h"
  10#include "ctree.h"
  11#include "delayed-ref.h"
  12#include "transaction.h"
  13#include "qgroup.h"
  14#include "space-info.h"
  15#include "tree-mod-log.h"
  16#include "fs.h"
  17
  18struct kmem_cache *btrfs_delayed_ref_head_cachep;
  19struct kmem_cache *btrfs_delayed_tree_ref_cachep;
  20struct kmem_cache *btrfs_delayed_data_ref_cachep;
  21struct kmem_cache *btrfs_delayed_extent_op_cachep;
  22/*
  23 * delayed back reference update tracking.  For subvolume trees
  24 * we queue up extent allocations and backref maintenance for
  25 * delayed processing.   This avoids deep call chains where we
  26 * add extents in the middle of btrfs_search_slot, and it allows
  27 * us to buffer up frequently modified backrefs in an rb tree instead
  28 * of hammering updates on the extent allocation tree.
  29 */
  30
  31bool btrfs_check_space_for_delayed_refs(struct btrfs_fs_info *fs_info)
  32{
  33	struct btrfs_block_rsv *delayed_refs_rsv = &fs_info->delayed_refs_rsv;
  34	struct btrfs_block_rsv *global_rsv = &fs_info->global_block_rsv;
  35	bool ret = false;
  36	u64 reserved;
  37
  38	spin_lock(&global_rsv->lock);
  39	reserved = global_rsv->reserved;
  40	spin_unlock(&global_rsv->lock);
  41
  42	/*
  43	 * Since the global reserve is just kind of magic we don't really want
  44	 * to rely on it to save our bacon, so if our size is more than the
  45	 * delayed_refs_rsv and the global rsv then it's time to think about
  46	 * bailing.
  47	 */
  48	spin_lock(&delayed_refs_rsv->lock);
  49	reserved += delayed_refs_rsv->reserved;
  50	if (delayed_refs_rsv->size >= reserved)
  51		ret = true;
  52	spin_unlock(&delayed_refs_rsv->lock);
  53	return ret;
  54}
  55
  56int btrfs_should_throttle_delayed_refs(struct btrfs_trans_handle *trans)
  57{
  58	u64 num_entries =
  59		atomic_read(&trans->transaction->delayed_refs.num_entries);
  60	u64 avg_runtime;
  61	u64 val;
  62
  63	smp_mb();
  64	avg_runtime = trans->fs_info->avg_delayed_ref_runtime;
  65	val = num_entries * avg_runtime;
  66	if (val >= NSEC_PER_SEC)
  67		return 1;
  68	if (val >= NSEC_PER_SEC / 2)
  69		return 2;
  70
  71	return btrfs_check_space_for_delayed_refs(trans->fs_info);
  72}
  73
  74/*
  75 * Release a ref head's reservation.
  76 *
  77 * @fs_info:  the filesystem
  78 * @nr:       number of items to drop
  79 *
  80 * Drops the delayed ref head's count from the delayed refs rsv and free any
  81 * excess reservation we had.
  82 */
  83void btrfs_delayed_refs_rsv_release(struct btrfs_fs_info *fs_info, int nr)
  84{
  85	struct btrfs_block_rsv *block_rsv = &fs_info->delayed_refs_rsv;
  86	u64 num_bytes = btrfs_calc_insert_metadata_size(fs_info, nr);
  87	u64 released = 0;
  88
  89	/*
  90	 * We have to check the mount option here because we could be enabling
  91	 * the free space tree for the first time and don't have the compat_ro
  92	 * option set yet.
  93	 *
  94	 * We need extra reservations if we have the free space tree because
  95	 * we'll have to modify that tree as well.
  96	 */
  97	if (btrfs_test_opt(fs_info, FREE_SPACE_TREE))
  98		num_bytes *= 2;
  99
 100	released = btrfs_block_rsv_release(fs_info, block_rsv, num_bytes, NULL);
 101	if (released)
 102		trace_btrfs_space_reservation(fs_info, "delayed_refs_rsv",
 103					      0, released, 0);
 104}
 105
 106/*
 107 * Adjust the size of the delayed refs rsv.
 
 108 *
 109 * This is to be called anytime we may have adjusted trans->delayed_ref_updates,
 110 * it'll calculate the additional size and add it to the delayed_refs_rsv.
 111 */
 112void btrfs_update_delayed_refs_rsv(struct btrfs_trans_handle *trans)
 113{
 114	struct btrfs_fs_info *fs_info = trans->fs_info;
 115	struct btrfs_block_rsv *delayed_rsv = &fs_info->delayed_refs_rsv;
 116	u64 num_bytes;
 117
 118	if (!trans->delayed_ref_updates)
 119		return;
 120
 121	num_bytes = btrfs_calc_insert_metadata_size(fs_info,
 122						    trans->delayed_ref_updates);
 123	/*
 124	 * We have to check the mount option here because we could be enabling
 125	 * the free space tree for the first time and don't have the compat_ro
 126	 * option set yet.
 127	 *
 128	 * We need extra reservations if we have the free space tree because
 129	 * we'll have to modify that tree as well.
 130	 */
 131	if (btrfs_test_opt(fs_info, FREE_SPACE_TREE))
 132		num_bytes *= 2;
 133
 134	spin_lock(&delayed_rsv->lock);
 135	delayed_rsv->size += num_bytes;
 136	delayed_rsv->full = false;
 137	spin_unlock(&delayed_rsv->lock);
 138	trans->delayed_ref_updates = 0;
 139}
 140
 141/*
 142 * Transfer bytes to our delayed refs rsv.
 143 *
 144 * @fs_info:   the filesystem
 145 * @src:       source block rsv to transfer from
 146 * @num_bytes: number of bytes to transfer
 147 *
 148 * This transfers up to the num_bytes amount from the src rsv to the
 149 * delayed_refs_rsv.  Any extra bytes are returned to the space info.
 150 */
 151void btrfs_migrate_to_delayed_refs_rsv(struct btrfs_fs_info *fs_info,
 152				       struct btrfs_block_rsv *src,
 153				       u64 num_bytes)
 154{
 155	struct btrfs_block_rsv *delayed_refs_rsv = &fs_info->delayed_refs_rsv;
 156	u64 to_free = 0;
 157
 158	spin_lock(&src->lock);
 159	src->reserved -= num_bytes;
 160	src->size -= num_bytes;
 161	spin_unlock(&src->lock);
 162
 163	spin_lock(&delayed_refs_rsv->lock);
 164	if (delayed_refs_rsv->size > delayed_refs_rsv->reserved) {
 165		u64 delta = delayed_refs_rsv->size -
 166			delayed_refs_rsv->reserved;
 167		if (num_bytes > delta) {
 168			to_free = num_bytes - delta;
 169			num_bytes = delta;
 170		}
 171	} else {
 172		to_free = num_bytes;
 173		num_bytes = 0;
 174	}
 175
 176	if (num_bytes)
 177		delayed_refs_rsv->reserved += num_bytes;
 178	if (delayed_refs_rsv->reserved >= delayed_refs_rsv->size)
 179		delayed_refs_rsv->full = true;
 180	spin_unlock(&delayed_refs_rsv->lock);
 181
 182	if (num_bytes)
 183		trace_btrfs_space_reservation(fs_info, "delayed_refs_rsv",
 184					      0, num_bytes, 1);
 185	if (to_free)
 186		btrfs_space_info_free_bytes_may_use(fs_info,
 187				delayed_refs_rsv->space_info, to_free);
 188}
 189
 190/*
 191 * Refill based on our delayed refs usage.
 192 *
 193 * @fs_info: the filesystem
 194 * @flush:   control how we can flush for this reservation.
 195 *
 196 * This will refill the delayed block_rsv up to 1 items size worth of space and
 197 * will return -ENOSPC if we can't make the reservation.
 198 */
 199int btrfs_delayed_refs_rsv_refill(struct btrfs_fs_info *fs_info,
 200				  enum btrfs_reserve_flush_enum flush)
 201{
 202	struct btrfs_block_rsv *block_rsv = &fs_info->delayed_refs_rsv;
 203	u64 limit = btrfs_calc_insert_metadata_size(fs_info, 1);
 204	u64 num_bytes = 0;
 205	int ret = -ENOSPC;
 206
 207	spin_lock(&block_rsv->lock);
 208	if (block_rsv->reserved < block_rsv->size) {
 209		num_bytes = block_rsv->size - block_rsv->reserved;
 210		num_bytes = min(num_bytes, limit);
 211	}
 212	spin_unlock(&block_rsv->lock);
 213
 214	if (!num_bytes)
 215		return 0;
 216
 217	ret = btrfs_reserve_metadata_bytes(fs_info, block_rsv, num_bytes, flush);
 
 218	if (ret)
 219		return ret;
 220	btrfs_block_rsv_add_bytes(block_rsv, num_bytes, 0);
 221	trace_btrfs_space_reservation(fs_info, "delayed_refs_rsv",
 222				      0, num_bytes, 1);
 223	return 0;
 224}
 225
 226/*
 227 * compare two delayed tree backrefs with same bytenr and type
 228 */
 229static int comp_tree_refs(struct btrfs_delayed_tree_ref *ref1,
 230			  struct btrfs_delayed_tree_ref *ref2)
 231{
 232	if (ref1->node.type == BTRFS_TREE_BLOCK_REF_KEY) {
 233		if (ref1->root < ref2->root)
 234			return -1;
 235		if (ref1->root > ref2->root)
 236			return 1;
 237	} else {
 238		if (ref1->parent < ref2->parent)
 239			return -1;
 240		if (ref1->parent > ref2->parent)
 241			return 1;
 242	}
 243	return 0;
 244}
 245
 246/*
 247 * compare two delayed data backrefs with same bytenr and type
 248 */
 249static int comp_data_refs(struct btrfs_delayed_data_ref *ref1,
 250			  struct btrfs_delayed_data_ref *ref2)
 251{
 252	if (ref1->node.type == BTRFS_EXTENT_DATA_REF_KEY) {
 253		if (ref1->root < ref2->root)
 254			return -1;
 255		if (ref1->root > ref2->root)
 256			return 1;
 257		if (ref1->objectid < ref2->objectid)
 258			return -1;
 259		if (ref1->objectid > ref2->objectid)
 260			return 1;
 261		if (ref1->offset < ref2->offset)
 262			return -1;
 263		if (ref1->offset > ref2->offset)
 264			return 1;
 265	} else {
 266		if (ref1->parent < ref2->parent)
 267			return -1;
 268		if (ref1->parent > ref2->parent)
 269			return 1;
 270	}
 271	return 0;
 272}
 273
 274static int comp_refs(struct btrfs_delayed_ref_node *ref1,
 275		     struct btrfs_delayed_ref_node *ref2,
 276		     bool check_seq)
 277{
 278	int ret = 0;
 279
 280	if (ref1->type < ref2->type)
 281		return -1;
 282	if (ref1->type > ref2->type)
 283		return 1;
 284	if (ref1->type == BTRFS_TREE_BLOCK_REF_KEY ||
 285	    ref1->type == BTRFS_SHARED_BLOCK_REF_KEY)
 286		ret = comp_tree_refs(btrfs_delayed_node_to_tree_ref(ref1),
 287				     btrfs_delayed_node_to_tree_ref(ref2));
 288	else
 289		ret = comp_data_refs(btrfs_delayed_node_to_data_ref(ref1),
 290				     btrfs_delayed_node_to_data_ref(ref2));
 291	if (ret)
 292		return ret;
 293	if (check_seq) {
 294		if (ref1->seq < ref2->seq)
 295			return -1;
 296		if (ref1->seq > ref2->seq)
 297			return 1;
 298	}
 299	return 0;
 300}
 301
 302/* insert a new ref to head ref rbtree */
 303static struct btrfs_delayed_ref_head *htree_insert(struct rb_root_cached *root,
 304						   struct rb_node *node)
 305{
 306	struct rb_node **p = &root->rb_root.rb_node;
 307	struct rb_node *parent_node = NULL;
 308	struct btrfs_delayed_ref_head *entry;
 309	struct btrfs_delayed_ref_head *ins;
 310	u64 bytenr;
 311	bool leftmost = true;
 312
 313	ins = rb_entry(node, struct btrfs_delayed_ref_head, href_node);
 314	bytenr = ins->bytenr;
 315	while (*p) {
 316		parent_node = *p;
 317		entry = rb_entry(parent_node, struct btrfs_delayed_ref_head,
 318				 href_node);
 319
 320		if (bytenr < entry->bytenr) {
 321			p = &(*p)->rb_left;
 322		} else if (bytenr > entry->bytenr) {
 323			p = &(*p)->rb_right;
 324			leftmost = false;
 325		} else {
 326			return entry;
 327		}
 328	}
 329
 330	rb_link_node(node, parent_node, p);
 331	rb_insert_color_cached(node, root, leftmost);
 332	return NULL;
 333}
 334
 335static struct btrfs_delayed_ref_node* tree_insert(struct rb_root_cached *root,
 336		struct btrfs_delayed_ref_node *ins)
 337{
 338	struct rb_node **p = &root->rb_root.rb_node;
 339	struct rb_node *node = &ins->ref_node;
 340	struct rb_node *parent_node = NULL;
 341	struct btrfs_delayed_ref_node *entry;
 342	bool leftmost = true;
 343
 344	while (*p) {
 345		int comp;
 346
 347		parent_node = *p;
 348		entry = rb_entry(parent_node, struct btrfs_delayed_ref_node,
 349				 ref_node);
 350		comp = comp_refs(ins, entry, true);
 351		if (comp < 0) {
 352			p = &(*p)->rb_left;
 353		} else if (comp > 0) {
 354			p = &(*p)->rb_right;
 355			leftmost = false;
 356		} else {
 357			return entry;
 358		}
 359	}
 360
 361	rb_link_node(node, parent_node, p);
 362	rb_insert_color_cached(node, root, leftmost);
 363	return NULL;
 364}
 365
 366static struct btrfs_delayed_ref_head *find_first_ref_head(
 367		struct btrfs_delayed_ref_root *dr)
 368{
 369	struct rb_node *n;
 370	struct btrfs_delayed_ref_head *entry;
 371
 372	n = rb_first_cached(&dr->href_root);
 373	if (!n)
 374		return NULL;
 375
 376	entry = rb_entry(n, struct btrfs_delayed_ref_head, href_node);
 377
 378	return entry;
 379}
 380
 381/*
 382 * Find a head entry based on bytenr. This returns the delayed ref head if it
 383 * was able to find one, or NULL if nothing was in that spot.  If return_bigger
 384 * is given, the next bigger entry is returned if no exact match is found.
 385 */
 386static struct btrfs_delayed_ref_head *find_ref_head(
 387		struct btrfs_delayed_ref_root *dr, u64 bytenr,
 388		bool return_bigger)
 389{
 390	struct rb_root *root = &dr->href_root.rb_root;
 391	struct rb_node *n;
 392	struct btrfs_delayed_ref_head *entry;
 393
 394	n = root->rb_node;
 395	entry = NULL;
 396	while (n) {
 397		entry = rb_entry(n, struct btrfs_delayed_ref_head, href_node);
 398
 399		if (bytenr < entry->bytenr)
 400			n = n->rb_left;
 401		else if (bytenr > entry->bytenr)
 402			n = n->rb_right;
 403		else
 404			return entry;
 405	}
 406	if (entry && return_bigger) {
 407		if (bytenr > entry->bytenr) {
 408			n = rb_next(&entry->href_node);
 409			if (!n)
 410				return NULL;
 411			entry = rb_entry(n, struct btrfs_delayed_ref_head,
 412					 href_node);
 413		}
 414		return entry;
 415	}
 416	return NULL;
 417}
 418
 419int btrfs_delayed_ref_lock(struct btrfs_delayed_ref_root *delayed_refs,
 420			   struct btrfs_delayed_ref_head *head)
 421{
 422	lockdep_assert_held(&delayed_refs->lock);
 423	if (mutex_trylock(&head->mutex))
 424		return 0;
 425
 426	refcount_inc(&head->refs);
 427	spin_unlock(&delayed_refs->lock);
 428
 429	mutex_lock(&head->mutex);
 430	spin_lock(&delayed_refs->lock);
 431	if (RB_EMPTY_NODE(&head->href_node)) {
 432		mutex_unlock(&head->mutex);
 433		btrfs_put_delayed_ref_head(head);
 434		return -EAGAIN;
 435	}
 436	btrfs_put_delayed_ref_head(head);
 437	return 0;
 438}
 439
 440static inline void drop_delayed_ref(struct btrfs_trans_handle *trans,
 441				    struct btrfs_delayed_ref_root *delayed_refs,
 442				    struct btrfs_delayed_ref_head *head,
 443				    struct btrfs_delayed_ref_node *ref)
 444{
 445	lockdep_assert_held(&head->lock);
 446	rb_erase_cached(&ref->ref_node, &head->ref_tree);
 447	RB_CLEAR_NODE(&ref->ref_node);
 448	if (!list_empty(&ref->add_list))
 449		list_del(&ref->add_list);
 450	ref->in_tree = 0;
 451	btrfs_put_delayed_ref(ref);
 452	atomic_dec(&delayed_refs->num_entries);
 453}
 454
 455static bool merge_ref(struct btrfs_trans_handle *trans,
 456		      struct btrfs_delayed_ref_root *delayed_refs,
 457		      struct btrfs_delayed_ref_head *head,
 458		      struct btrfs_delayed_ref_node *ref,
 459		      u64 seq)
 460{
 461	struct btrfs_delayed_ref_node *next;
 462	struct rb_node *node = rb_next(&ref->ref_node);
 463	bool done = false;
 464
 465	while (!done && node) {
 466		int mod;
 467
 468		next = rb_entry(node, struct btrfs_delayed_ref_node, ref_node);
 469		node = rb_next(node);
 470		if (seq && next->seq >= seq)
 471			break;
 472		if (comp_refs(ref, next, false))
 473			break;
 474
 475		if (ref->action == next->action) {
 476			mod = next->ref_mod;
 477		} else {
 478			if (ref->ref_mod < next->ref_mod) {
 479				swap(ref, next);
 480				done = true;
 481			}
 482			mod = -next->ref_mod;
 483		}
 484
 485		drop_delayed_ref(trans, delayed_refs, head, next);
 486		ref->ref_mod += mod;
 487		if (ref->ref_mod == 0) {
 488			drop_delayed_ref(trans, delayed_refs, head, ref);
 489			done = true;
 490		} else {
 491			/*
 492			 * Can't have multiples of the same ref on a tree block.
 493			 */
 494			WARN_ON(ref->type == BTRFS_TREE_BLOCK_REF_KEY ||
 495				ref->type == BTRFS_SHARED_BLOCK_REF_KEY);
 496		}
 497	}
 498
 499	return done;
 500}
 501
 502void btrfs_merge_delayed_refs(struct btrfs_trans_handle *trans,
 503			      struct btrfs_delayed_ref_root *delayed_refs,
 504			      struct btrfs_delayed_ref_head *head)
 505{
 506	struct btrfs_fs_info *fs_info = trans->fs_info;
 507	struct btrfs_delayed_ref_node *ref;
 508	struct rb_node *node;
 509	u64 seq = 0;
 510
 511	lockdep_assert_held(&head->lock);
 512
 513	if (RB_EMPTY_ROOT(&head->ref_tree.rb_root))
 514		return;
 515
 516	/* We don't have too many refs to merge for data. */
 517	if (head->is_data)
 518		return;
 519
 520	seq = btrfs_tree_mod_log_lowest_seq(fs_info);
 521again:
 522	for (node = rb_first_cached(&head->ref_tree); node;
 523	     node = rb_next(node)) {
 524		ref = rb_entry(node, struct btrfs_delayed_ref_node, ref_node);
 525		if (seq && ref->seq >= seq)
 526			continue;
 527		if (merge_ref(trans, delayed_refs, head, ref, seq))
 528			goto again;
 529	}
 530}
 531
 532int btrfs_check_delayed_seq(struct btrfs_fs_info *fs_info, u64 seq)
 533{
 534	int ret = 0;
 535	u64 min_seq = btrfs_tree_mod_log_lowest_seq(fs_info);
 536
 537	if (min_seq != 0 && seq >= min_seq) {
 538		btrfs_debug(fs_info,
 539			    "holding back delayed_ref %llu, lowest is %llu",
 540			    seq, min_seq);
 541		ret = 1;
 542	}
 543
 544	return ret;
 545}
 546
 547struct btrfs_delayed_ref_head *btrfs_select_ref_head(
 548		struct btrfs_delayed_ref_root *delayed_refs)
 549{
 550	struct btrfs_delayed_ref_head *head;
 551
 552again:
 553	head = find_ref_head(delayed_refs, delayed_refs->run_delayed_start,
 554			     true);
 555	if (!head && delayed_refs->run_delayed_start != 0) {
 556		delayed_refs->run_delayed_start = 0;
 557		head = find_first_ref_head(delayed_refs);
 558	}
 559	if (!head)
 560		return NULL;
 561
 562	while (head->processing) {
 563		struct rb_node *node;
 564
 565		node = rb_next(&head->href_node);
 566		if (!node) {
 567			if (delayed_refs->run_delayed_start == 0)
 568				return NULL;
 569			delayed_refs->run_delayed_start = 0;
 570			goto again;
 571		}
 572		head = rb_entry(node, struct btrfs_delayed_ref_head,
 573				href_node);
 574	}
 575
 576	head->processing = 1;
 577	WARN_ON(delayed_refs->num_heads_ready == 0);
 578	delayed_refs->num_heads_ready--;
 579	delayed_refs->run_delayed_start = head->bytenr +
 580		head->num_bytes;
 581	return head;
 582}
 583
 584void btrfs_delete_ref_head(struct btrfs_delayed_ref_root *delayed_refs,
 585			   struct btrfs_delayed_ref_head *head)
 586{
 587	lockdep_assert_held(&delayed_refs->lock);
 588	lockdep_assert_held(&head->lock);
 589
 590	rb_erase_cached(&head->href_node, &delayed_refs->href_root);
 591	RB_CLEAR_NODE(&head->href_node);
 592	atomic_dec(&delayed_refs->num_entries);
 593	delayed_refs->num_heads--;
 594	if (head->processing == 0)
 595		delayed_refs->num_heads_ready--;
 596}
 597
 598/*
 599 * Helper to insert the ref_node to the tail or merge with tail.
 600 *
 601 * Return 0 for insert.
 602 * Return >0 for merge.
 603 */
 604static int insert_delayed_ref(struct btrfs_trans_handle *trans,
 605			      struct btrfs_delayed_ref_root *root,
 606			      struct btrfs_delayed_ref_head *href,
 607			      struct btrfs_delayed_ref_node *ref)
 608{
 609	struct btrfs_delayed_ref_node *exist;
 610	int mod;
 611	int ret = 0;
 612
 613	spin_lock(&href->lock);
 614	exist = tree_insert(&href->ref_tree, ref);
 615	if (!exist)
 616		goto inserted;
 617
 618	/* Now we are sure we can merge */
 619	ret = 1;
 620	if (exist->action == ref->action) {
 621		mod = ref->ref_mod;
 622	} else {
 623		/* Need to change action */
 624		if (exist->ref_mod < ref->ref_mod) {
 625			exist->action = ref->action;
 626			mod = -exist->ref_mod;
 627			exist->ref_mod = ref->ref_mod;
 628			if (ref->action == BTRFS_ADD_DELAYED_REF)
 629				list_add_tail(&exist->add_list,
 630					      &href->ref_add_list);
 631			else if (ref->action == BTRFS_DROP_DELAYED_REF) {
 632				ASSERT(!list_empty(&exist->add_list));
 633				list_del(&exist->add_list);
 634			} else {
 635				ASSERT(0);
 636			}
 637		} else
 638			mod = -ref->ref_mod;
 639	}
 640	exist->ref_mod += mod;
 641
 642	/* remove existing tail if its ref_mod is zero */
 643	if (exist->ref_mod == 0)
 644		drop_delayed_ref(trans, root, href, exist);
 645	spin_unlock(&href->lock);
 646	return ret;
 647inserted:
 648	if (ref->action == BTRFS_ADD_DELAYED_REF)
 649		list_add_tail(&ref->add_list, &href->ref_add_list);
 650	atomic_inc(&root->num_entries);
 651	spin_unlock(&href->lock);
 652	return ret;
 653}
 654
 655/*
 656 * helper function to update the accounting in the head ref
 657 * existing and update must have the same bytenr
 658 */
 659static noinline void update_existing_head_ref(struct btrfs_trans_handle *trans,
 660			 struct btrfs_delayed_ref_head *existing,
 661			 struct btrfs_delayed_ref_head *update)
 662{
 663	struct btrfs_delayed_ref_root *delayed_refs =
 664		&trans->transaction->delayed_refs;
 665	struct btrfs_fs_info *fs_info = trans->fs_info;
 666	int old_ref_mod;
 667
 668	BUG_ON(existing->is_data != update->is_data);
 669
 670	spin_lock(&existing->lock);
 671	if (update->must_insert_reserved) {
 672		/* if the extent was freed and then
 673		 * reallocated before the delayed ref
 674		 * entries were processed, we can end up
 675		 * with an existing head ref without
 676		 * the must_insert_reserved flag set.
 677		 * Set it again here
 678		 */
 679		existing->must_insert_reserved = update->must_insert_reserved;
 680
 681		/*
 682		 * update the num_bytes so we make sure the accounting
 683		 * is done correctly
 684		 */
 685		existing->num_bytes = update->num_bytes;
 686
 687	}
 688
 689	if (update->extent_op) {
 690		if (!existing->extent_op) {
 691			existing->extent_op = update->extent_op;
 692		} else {
 693			if (update->extent_op->update_key) {
 694				memcpy(&existing->extent_op->key,
 695				       &update->extent_op->key,
 696				       sizeof(update->extent_op->key));
 697				existing->extent_op->update_key = true;
 698			}
 699			if (update->extent_op->update_flags) {
 700				existing->extent_op->flags_to_set |=
 701					update->extent_op->flags_to_set;
 702				existing->extent_op->update_flags = true;
 703			}
 704			btrfs_free_delayed_extent_op(update->extent_op);
 705		}
 706	}
 707	/*
 708	 * update the reference mod on the head to reflect this new operation,
 709	 * only need the lock for this case cause we could be processing it
 710	 * currently, for refs we just added we know we're a-ok.
 711	 */
 712	old_ref_mod = existing->total_ref_mod;
 713	existing->ref_mod += update->ref_mod;
 714	existing->total_ref_mod += update->ref_mod;
 715
 716	/*
 717	 * If we are going to from a positive ref mod to a negative or vice
 718	 * versa we need to make sure to adjust pending_csums accordingly.
 719	 */
 720	if (existing->is_data) {
 721		u64 csum_leaves =
 722			btrfs_csum_bytes_to_leaves(fs_info,
 723						   existing->num_bytes);
 724
 725		if (existing->total_ref_mod >= 0 && old_ref_mod < 0) {
 726			delayed_refs->pending_csums -= existing->num_bytes;
 727			btrfs_delayed_refs_rsv_release(fs_info, csum_leaves);
 728		}
 729		if (existing->total_ref_mod < 0 && old_ref_mod >= 0) {
 730			delayed_refs->pending_csums += existing->num_bytes;
 731			trans->delayed_ref_updates += csum_leaves;
 732		}
 733	}
 734
 735	spin_unlock(&existing->lock);
 736}
 737
 738static void init_delayed_ref_head(struct btrfs_delayed_ref_head *head_ref,
 739				  struct btrfs_qgroup_extent_record *qrecord,
 740				  u64 bytenr, u64 num_bytes, u64 ref_root,
 741				  u64 reserved, int action, bool is_data,
 742				  bool is_system)
 743{
 744	int count_mod = 1;
 745	int must_insert_reserved = 0;
 746
 747	/* If reserved is provided, it must be a data extent. */
 748	BUG_ON(!is_data && reserved);
 749
 750	/*
 751	 * The head node stores the sum of all the mods, so dropping a ref
 752	 * should drop the sum in the head node by one.
 753	 */
 754	if (action == BTRFS_UPDATE_DELAYED_HEAD)
 755		count_mod = 0;
 756	else if (action == BTRFS_DROP_DELAYED_REF)
 757		count_mod = -1;
 758
 759	/*
 760	 * BTRFS_ADD_DELAYED_EXTENT means that we need to update the reserved
 761	 * accounting when the extent is finally added, or if a later
 762	 * modification deletes the delayed ref without ever inserting the
 763	 * extent into the extent allocation tree.  ref->must_insert_reserved
 764	 * is the flag used to record that accounting mods are required.
 765	 *
 766	 * Once we record must_insert_reserved, switch the action to
 767	 * BTRFS_ADD_DELAYED_REF because other special casing is not required.
 768	 */
 769	if (action == BTRFS_ADD_DELAYED_EXTENT)
 770		must_insert_reserved = 1;
 771	else
 772		must_insert_reserved = 0;
 773
 774	refcount_set(&head_ref->refs, 1);
 775	head_ref->bytenr = bytenr;
 776	head_ref->num_bytes = num_bytes;
 777	head_ref->ref_mod = count_mod;
 778	head_ref->must_insert_reserved = must_insert_reserved;
 779	head_ref->is_data = is_data;
 780	head_ref->is_system = is_system;
 781	head_ref->ref_tree = RB_ROOT_CACHED;
 782	INIT_LIST_HEAD(&head_ref->ref_add_list);
 783	RB_CLEAR_NODE(&head_ref->href_node);
 784	head_ref->processing = 0;
 785	head_ref->total_ref_mod = count_mod;
 786	spin_lock_init(&head_ref->lock);
 787	mutex_init(&head_ref->mutex);
 788
 789	if (qrecord) {
 790		if (ref_root && reserved) {
 791			qrecord->data_rsv = reserved;
 792			qrecord->data_rsv_refroot = ref_root;
 793		}
 794		qrecord->bytenr = bytenr;
 795		qrecord->num_bytes = num_bytes;
 796		qrecord->old_roots = NULL;
 797	}
 798}
 799
 800/*
 801 * helper function to actually insert a head node into the rbtree.
 802 * this does all the dirty work in terms of maintaining the correct
 803 * overall modification count.
 804 */
 805static noinline struct btrfs_delayed_ref_head *
 806add_delayed_ref_head(struct btrfs_trans_handle *trans,
 807		     struct btrfs_delayed_ref_head *head_ref,
 808		     struct btrfs_qgroup_extent_record *qrecord,
 809		     int action, int *qrecord_inserted_ret)
 810{
 811	struct btrfs_delayed_ref_head *existing;
 812	struct btrfs_delayed_ref_root *delayed_refs;
 813	int qrecord_inserted = 0;
 814
 815	delayed_refs = &trans->transaction->delayed_refs;
 816
 817	/* Record qgroup extent info if provided */
 818	if (qrecord) {
 819		if (btrfs_qgroup_trace_extent_nolock(trans->fs_info,
 820					delayed_refs, qrecord))
 821			kfree(qrecord);
 822		else
 823			qrecord_inserted = 1;
 824	}
 825
 826	trace_add_delayed_ref_head(trans->fs_info, head_ref, action);
 827
 828	existing = htree_insert(&delayed_refs->href_root,
 829				&head_ref->href_node);
 830	if (existing) {
 831		update_existing_head_ref(trans, existing, head_ref);
 832		/*
 833		 * we've updated the existing ref, free the newly
 834		 * allocated ref
 835		 */
 836		kmem_cache_free(btrfs_delayed_ref_head_cachep, head_ref);
 837		head_ref = existing;
 838	} else {
 839		if (head_ref->is_data && head_ref->ref_mod < 0) {
 840			delayed_refs->pending_csums += head_ref->num_bytes;
 841			trans->delayed_ref_updates +=
 842				btrfs_csum_bytes_to_leaves(trans->fs_info,
 843							   head_ref->num_bytes);
 844		}
 845		delayed_refs->num_heads++;
 846		delayed_refs->num_heads_ready++;
 847		atomic_inc(&delayed_refs->num_entries);
 848		trans->delayed_ref_updates++;
 849	}
 850	if (qrecord_inserted_ret)
 851		*qrecord_inserted_ret = qrecord_inserted;
 852
 853	return head_ref;
 854}
 855
 856/*
 857 * init_delayed_ref_common - Initialize the structure which represents a
 858 *			     modification to a an extent.
 859 *
 860 * @fs_info:    Internal to the mounted filesystem mount structure.
 861 *
 862 * @ref:	The structure which is going to be initialized.
 863 *
 864 * @bytenr:	The logical address of the extent for which a modification is
 865 *		going to be recorded.
 866 *
 867 * @num_bytes:  Size of the extent whose modification is being recorded.
 868 *
 869 * @ref_root:	The id of the root where this modification has originated, this
 870 *		can be either one of the well-known metadata trees or the
 871 *		subvolume id which references this extent.
 872 *
 873 * @action:	Can be one of BTRFS_ADD_DELAYED_REF/BTRFS_DROP_DELAYED_REF or
 874 *		BTRFS_ADD_DELAYED_EXTENT
 875 *
 876 * @ref_type:	Holds the type of the extent which is being recorded, can be
 877 *		one of BTRFS_SHARED_BLOCK_REF_KEY/BTRFS_TREE_BLOCK_REF_KEY
 878 *		when recording a metadata extent or BTRFS_SHARED_DATA_REF_KEY/
 879 *		BTRFS_EXTENT_DATA_REF_KEY when recording data extent
 880 */
 881static void init_delayed_ref_common(struct btrfs_fs_info *fs_info,
 882				    struct btrfs_delayed_ref_node *ref,
 883				    u64 bytenr, u64 num_bytes, u64 ref_root,
 884				    int action, u8 ref_type)
 885{
 886	u64 seq = 0;
 887
 888	if (action == BTRFS_ADD_DELAYED_EXTENT)
 889		action = BTRFS_ADD_DELAYED_REF;
 890
 891	if (is_fstree(ref_root))
 892		seq = atomic64_read(&fs_info->tree_mod_seq);
 893
 894	refcount_set(&ref->refs, 1);
 895	ref->bytenr = bytenr;
 896	ref->num_bytes = num_bytes;
 897	ref->ref_mod = 1;
 898	ref->action = action;
 899	ref->is_head = 0;
 900	ref->in_tree = 1;
 901	ref->seq = seq;
 902	ref->type = ref_type;
 903	RB_CLEAR_NODE(&ref->ref_node);
 904	INIT_LIST_HEAD(&ref->add_list);
 905}
 906
 907/*
 908 * add a delayed tree ref.  This does all of the accounting required
 909 * to make sure the delayed ref is eventually processed before this
 910 * transaction commits.
 911 */
 912int btrfs_add_delayed_tree_ref(struct btrfs_trans_handle *trans,
 913			       struct btrfs_ref *generic_ref,
 914			       struct btrfs_delayed_extent_op *extent_op)
 915{
 916	struct btrfs_fs_info *fs_info = trans->fs_info;
 917	struct btrfs_delayed_tree_ref *ref;
 918	struct btrfs_delayed_ref_head *head_ref;
 919	struct btrfs_delayed_ref_root *delayed_refs;
 920	struct btrfs_qgroup_extent_record *record = NULL;
 921	int qrecord_inserted;
 922	bool is_system;
 923	int action = generic_ref->action;
 924	int level = generic_ref->tree_ref.level;
 925	int ret;
 926	u64 bytenr = generic_ref->bytenr;
 927	u64 num_bytes = generic_ref->len;
 928	u64 parent = generic_ref->parent;
 929	u8 ref_type;
 930
 931	is_system = (generic_ref->tree_ref.owning_root == BTRFS_CHUNK_TREE_OBJECTID);
 932
 933	ASSERT(generic_ref->type == BTRFS_REF_METADATA && generic_ref->action);
 
 934	ref = kmem_cache_alloc(btrfs_delayed_tree_ref_cachep, GFP_NOFS);
 935	if (!ref)
 936		return -ENOMEM;
 937
 938	head_ref = kmem_cache_alloc(btrfs_delayed_ref_head_cachep, GFP_NOFS);
 939	if (!head_ref) {
 940		kmem_cache_free(btrfs_delayed_tree_ref_cachep, ref);
 941		return -ENOMEM;
 942	}
 943
 944	if (test_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags) &&
 
 
 945	    !generic_ref->skip_qgroup) {
 946		record = kzalloc(sizeof(*record), GFP_NOFS);
 947		if (!record) {
 948			kmem_cache_free(btrfs_delayed_tree_ref_cachep, ref);
 949			kmem_cache_free(btrfs_delayed_ref_head_cachep, head_ref);
 950			return -ENOMEM;
 951		}
 952	}
 953
 954	if (parent)
 955		ref_type = BTRFS_SHARED_BLOCK_REF_KEY;
 956	else
 957		ref_type = BTRFS_TREE_BLOCK_REF_KEY;
 958
 959	init_delayed_ref_common(fs_info, &ref->node, bytenr, num_bytes,
 960				generic_ref->tree_ref.owning_root, action,
 961				ref_type);
 962	ref->root = generic_ref->tree_ref.owning_root;
 963	ref->parent = parent;
 964	ref->level = level;
 965
 966	init_delayed_ref_head(head_ref, record, bytenr, num_bytes,
 967			      generic_ref->tree_ref.owning_root, 0, action,
 968			      false, is_system);
 969	head_ref->extent_op = extent_op;
 970
 971	delayed_refs = &trans->transaction->delayed_refs;
 972	spin_lock(&delayed_refs->lock);
 973
 974	/*
 975	 * insert both the head node and the new ref without dropping
 976	 * the spin lock
 977	 */
 978	head_ref = add_delayed_ref_head(trans, head_ref, record,
 979					action, &qrecord_inserted);
 980
 981	ret = insert_delayed_ref(trans, delayed_refs, head_ref, &ref->node);
 982	spin_unlock(&delayed_refs->lock);
 983
 984	/*
 985	 * Need to update the delayed_refs_rsv with any changes we may have
 986	 * made.
 987	 */
 988	btrfs_update_delayed_refs_rsv(trans);
 989
 990	trace_add_delayed_tree_ref(fs_info, &ref->node, ref,
 991				   action == BTRFS_ADD_DELAYED_EXTENT ?
 992				   BTRFS_ADD_DELAYED_REF : action);
 993	if (ret > 0)
 994		kmem_cache_free(btrfs_delayed_tree_ref_cachep, ref);
 995
 996	if (qrecord_inserted)
 997		btrfs_qgroup_trace_extent_post(trans, record);
 998
 999	return 0;
1000}
1001
1002/*
1003 * add a delayed data ref. it's similar to btrfs_add_delayed_tree_ref.
1004 */
1005int btrfs_add_delayed_data_ref(struct btrfs_trans_handle *trans,
1006			       struct btrfs_ref *generic_ref,
1007			       u64 reserved)
1008{
1009	struct btrfs_fs_info *fs_info = trans->fs_info;
1010	struct btrfs_delayed_data_ref *ref;
1011	struct btrfs_delayed_ref_head *head_ref;
1012	struct btrfs_delayed_ref_root *delayed_refs;
1013	struct btrfs_qgroup_extent_record *record = NULL;
1014	int qrecord_inserted;
1015	int action = generic_ref->action;
1016	int ret;
1017	u64 bytenr = generic_ref->bytenr;
1018	u64 num_bytes = generic_ref->len;
1019	u64 parent = generic_ref->parent;
1020	u64 ref_root = generic_ref->data_ref.owning_root;
1021	u64 owner = generic_ref->data_ref.ino;
1022	u64 offset = generic_ref->data_ref.offset;
1023	u8 ref_type;
1024
1025	ASSERT(generic_ref->type == BTRFS_REF_DATA && action);
1026	ref = kmem_cache_alloc(btrfs_delayed_data_ref_cachep, GFP_NOFS);
1027	if (!ref)
1028		return -ENOMEM;
1029
1030	if (parent)
1031	        ref_type = BTRFS_SHARED_DATA_REF_KEY;
1032	else
1033	        ref_type = BTRFS_EXTENT_DATA_REF_KEY;
1034	init_delayed_ref_common(fs_info, &ref->node, bytenr, num_bytes,
1035				ref_root, action, ref_type);
1036	ref->root = ref_root;
1037	ref->parent = parent;
1038	ref->objectid = owner;
1039	ref->offset = offset;
1040
1041
1042	head_ref = kmem_cache_alloc(btrfs_delayed_ref_head_cachep, GFP_NOFS);
1043	if (!head_ref) {
1044		kmem_cache_free(btrfs_delayed_data_ref_cachep, ref);
1045		return -ENOMEM;
1046	}
1047
1048	if (test_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags) &&
 
 
1049	    !generic_ref->skip_qgroup) {
1050		record = kzalloc(sizeof(*record), GFP_NOFS);
1051		if (!record) {
1052			kmem_cache_free(btrfs_delayed_data_ref_cachep, ref);
1053			kmem_cache_free(btrfs_delayed_ref_head_cachep,
1054					head_ref);
1055			return -ENOMEM;
1056		}
1057	}
1058
1059	init_delayed_ref_head(head_ref, record, bytenr, num_bytes, ref_root,
1060			      reserved, action, true, false);
1061	head_ref->extent_op = NULL;
1062
1063	delayed_refs = &trans->transaction->delayed_refs;
1064	spin_lock(&delayed_refs->lock);
1065
1066	/*
1067	 * insert both the head node and the new ref without dropping
1068	 * the spin lock
1069	 */
1070	head_ref = add_delayed_ref_head(trans, head_ref, record,
1071					action, &qrecord_inserted);
1072
1073	ret = insert_delayed_ref(trans, delayed_refs, head_ref, &ref->node);
1074	spin_unlock(&delayed_refs->lock);
1075
1076	/*
1077	 * Need to update the delayed_refs_rsv with any changes we may have
1078	 * made.
1079	 */
1080	btrfs_update_delayed_refs_rsv(trans);
1081
1082	trace_add_delayed_data_ref(trans->fs_info, &ref->node, ref,
1083				   action == BTRFS_ADD_DELAYED_EXTENT ?
1084				   BTRFS_ADD_DELAYED_REF : action);
1085	if (ret > 0)
1086		kmem_cache_free(btrfs_delayed_data_ref_cachep, ref);
1087
1088
1089	if (qrecord_inserted)
1090		return btrfs_qgroup_trace_extent_post(trans, record);
1091	return 0;
1092}
1093
1094int btrfs_add_delayed_extent_op(struct btrfs_trans_handle *trans,
1095				u64 bytenr, u64 num_bytes,
1096				struct btrfs_delayed_extent_op *extent_op)
1097{
1098	struct btrfs_delayed_ref_head *head_ref;
1099	struct btrfs_delayed_ref_root *delayed_refs;
1100
1101	head_ref = kmem_cache_alloc(btrfs_delayed_ref_head_cachep, GFP_NOFS);
1102	if (!head_ref)
1103		return -ENOMEM;
1104
1105	init_delayed_ref_head(head_ref, NULL, bytenr, num_bytes, 0, 0,
1106			      BTRFS_UPDATE_DELAYED_HEAD, false, false);
 
1107	head_ref->extent_op = extent_op;
1108
1109	delayed_refs = &trans->transaction->delayed_refs;
1110	spin_lock(&delayed_refs->lock);
1111
1112	add_delayed_ref_head(trans, head_ref, NULL, BTRFS_UPDATE_DELAYED_HEAD,
1113			     NULL);
1114
1115	spin_unlock(&delayed_refs->lock);
1116
1117	/*
1118	 * Need to update the delayed_refs_rsv with any changes we may have
1119	 * made.
1120	 */
1121	btrfs_update_delayed_refs_rsv(trans);
1122	return 0;
1123}
1124
1125/*
1126 * This does a simple search for the head node for a given extent.  Returns the
1127 * head node if found, or NULL if not.
1128 */
1129struct btrfs_delayed_ref_head *
1130btrfs_find_delayed_ref_head(struct btrfs_delayed_ref_root *delayed_refs, u64 bytenr)
1131{
1132	lockdep_assert_held(&delayed_refs->lock);
1133
1134	return find_ref_head(delayed_refs, bytenr, false);
1135}
1136
1137void __cold btrfs_delayed_ref_exit(void)
1138{
1139	kmem_cache_destroy(btrfs_delayed_ref_head_cachep);
1140	kmem_cache_destroy(btrfs_delayed_tree_ref_cachep);
1141	kmem_cache_destroy(btrfs_delayed_data_ref_cachep);
1142	kmem_cache_destroy(btrfs_delayed_extent_op_cachep);
1143}
1144
1145int __init btrfs_delayed_ref_init(void)
1146{
1147	btrfs_delayed_ref_head_cachep = kmem_cache_create(
1148				"btrfs_delayed_ref_head",
1149				sizeof(struct btrfs_delayed_ref_head), 0,
1150				SLAB_MEM_SPREAD, NULL);
1151	if (!btrfs_delayed_ref_head_cachep)
1152		goto fail;
1153
1154	btrfs_delayed_tree_ref_cachep = kmem_cache_create(
1155				"btrfs_delayed_tree_ref",
1156				sizeof(struct btrfs_delayed_tree_ref), 0,
1157				SLAB_MEM_SPREAD, NULL);
1158	if (!btrfs_delayed_tree_ref_cachep)
1159		goto fail;
1160
1161	btrfs_delayed_data_ref_cachep = kmem_cache_create(
1162				"btrfs_delayed_data_ref",
1163				sizeof(struct btrfs_delayed_data_ref), 0,
1164				SLAB_MEM_SPREAD, NULL);
1165	if (!btrfs_delayed_data_ref_cachep)
1166		goto fail;
1167
1168	btrfs_delayed_extent_op_cachep = kmem_cache_create(
1169				"btrfs_delayed_extent_op",
1170				sizeof(struct btrfs_delayed_extent_op), 0,
1171				SLAB_MEM_SPREAD, NULL);
1172	if (!btrfs_delayed_extent_op_cachep)
1173		goto fail;
1174
1175	return 0;
1176fail:
1177	btrfs_delayed_ref_exit();
1178	return -ENOMEM;
1179}
v5.14.15
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 * Copyright (C) 2009 Oracle.  All rights reserved.
   4 */
   5
   6#include <linux/sched.h>
   7#include <linux/slab.h>
   8#include <linux/sort.h>
 
   9#include "ctree.h"
  10#include "delayed-ref.h"
  11#include "transaction.h"
  12#include "qgroup.h"
  13#include "space-info.h"
  14#include "tree-mod-log.h"
 
  15
  16struct kmem_cache *btrfs_delayed_ref_head_cachep;
  17struct kmem_cache *btrfs_delayed_tree_ref_cachep;
  18struct kmem_cache *btrfs_delayed_data_ref_cachep;
  19struct kmem_cache *btrfs_delayed_extent_op_cachep;
  20/*
  21 * delayed back reference update tracking.  For subvolume trees
  22 * we queue up extent allocations and backref maintenance for
  23 * delayed processing.   This avoids deep call chains where we
  24 * add extents in the middle of btrfs_search_slot, and it allows
  25 * us to buffer up frequently modified backrefs in an rb tree instead
  26 * of hammering updates on the extent allocation tree.
  27 */
  28
  29bool btrfs_check_space_for_delayed_refs(struct btrfs_fs_info *fs_info)
  30{
  31	struct btrfs_block_rsv *delayed_refs_rsv = &fs_info->delayed_refs_rsv;
  32	struct btrfs_block_rsv *global_rsv = &fs_info->global_block_rsv;
  33	bool ret = false;
  34	u64 reserved;
  35
  36	spin_lock(&global_rsv->lock);
  37	reserved = global_rsv->reserved;
  38	spin_unlock(&global_rsv->lock);
  39
  40	/*
  41	 * Since the global reserve is just kind of magic we don't really want
  42	 * to rely on it to save our bacon, so if our size is more than the
  43	 * delayed_refs_rsv and the global rsv then it's time to think about
  44	 * bailing.
  45	 */
  46	spin_lock(&delayed_refs_rsv->lock);
  47	reserved += delayed_refs_rsv->reserved;
  48	if (delayed_refs_rsv->size >= reserved)
  49		ret = true;
  50	spin_unlock(&delayed_refs_rsv->lock);
  51	return ret;
  52}
  53
  54int btrfs_should_throttle_delayed_refs(struct btrfs_trans_handle *trans)
  55{
  56	u64 num_entries =
  57		atomic_read(&trans->transaction->delayed_refs.num_entries);
  58	u64 avg_runtime;
  59	u64 val;
  60
  61	smp_mb();
  62	avg_runtime = trans->fs_info->avg_delayed_ref_runtime;
  63	val = num_entries * avg_runtime;
  64	if (val >= NSEC_PER_SEC)
  65		return 1;
  66	if (val >= NSEC_PER_SEC / 2)
  67		return 2;
  68
  69	return btrfs_check_space_for_delayed_refs(trans->fs_info);
  70}
  71
  72/**
  73 * Release a ref head's reservation
  74 *
  75 * @fs_info:  the filesystem
  76 * @nr:       number of items to drop
  77 *
  78 * This drops the delayed ref head's count from the delayed refs rsv and frees
  79 * any excess reservation we had.
  80 */
  81void btrfs_delayed_refs_rsv_release(struct btrfs_fs_info *fs_info, int nr)
  82{
  83	struct btrfs_block_rsv *block_rsv = &fs_info->delayed_refs_rsv;
  84	u64 num_bytes = btrfs_calc_insert_metadata_size(fs_info, nr);
  85	u64 released = 0;
  86
 
 
 
 
 
 
 
 
 
 
 
  87	released = btrfs_block_rsv_release(fs_info, block_rsv, num_bytes, NULL);
  88	if (released)
  89		trace_btrfs_space_reservation(fs_info, "delayed_refs_rsv",
  90					      0, released, 0);
  91}
  92
  93/*
  94 * btrfs_update_delayed_refs_rsv - adjust the size of the delayed refs rsv
  95 * @trans - the trans that may have generated delayed refs
  96 *
  97 * This is to be called anytime we may have adjusted trans->delayed_ref_updates,
  98 * it'll calculate the additional size and add it to the delayed_refs_rsv.
  99 */
 100void btrfs_update_delayed_refs_rsv(struct btrfs_trans_handle *trans)
 101{
 102	struct btrfs_fs_info *fs_info = trans->fs_info;
 103	struct btrfs_block_rsv *delayed_rsv = &fs_info->delayed_refs_rsv;
 104	u64 num_bytes;
 105
 106	if (!trans->delayed_ref_updates)
 107		return;
 108
 109	num_bytes = btrfs_calc_insert_metadata_size(fs_info,
 110						    trans->delayed_ref_updates);
 
 
 
 
 
 
 
 
 
 
 
 111	spin_lock(&delayed_rsv->lock);
 112	delayed_rsv->size += num_bytes;
 113	delayed_rsv->full = 0;
 114	spin_unlock(&delayed_rsv->lock);
 115	trans->delayed_ref_updates = 0;
 116}
 117
 118/**
 119 * Transfer bytes to our delayed refs rsv
 120 *
 121 * @fs_info:   the filesystem
 122 * @src:       source block rsv to transfer from
 123 * @num_bytes: number of bytes to transfer
 124 *
 125 * This transfers up to the num_bytes amount from the src rsv to the
 126 * delayed_refs_rsv.  Any extra bytes are returned to the space info.
 127 */
 128void btrfs_migrate_to_delayed_refs_rsv(struct btrfs_fs_info *fs_info,
 129				       struct btrfs_block_rsv *src,
 130				       u64 num_bytes)
 131{
 132	struct btrfs_block_rsv *delayed_refs_rsv = &fs_info->delayed_refs_rsv;
 133	u64 to_free = 0;
 134
 135	spin_lock(&src->lock);
 136	src->reserved -= num_bytes;
 137	src->size -= num_bytes;
 138	spin_unlock(&src->lock);
 139
 140	spin_lock(&delayed_refs_rsv->lock);
 141	if (delayed_refs_rsv->size > delayed_refs_rsv->reserved) {
 142		u64 delta = delayed_refs_rsv->size -
 143			delayed_refs_rsv->reserved;
 144		if (num_bytes > delta) {
 145			to_free = num_bytes - delta;
 146			num_bytes = delta;
 147		}
 148	} else {
 149		to_free = num_bytes;
 150		num_bytes = 0;
 151	}
 152
 153	if (num_bytes)
 154		delayed_refs_rsv->reserved += num_bytes;
 155	if (delayed_refs_rsv->reserved >= delayed_refs_rsv->size)
 156		delayed_refs_rsv->full = 1;
 157	spin_unlock(&delayed_refs_rsv->lock);
 158
 159	if (num_bytes)
 160		trace_btrfs_space_reservation(fs_info, "delayed_refs_rsv",
 161					      0, num_bytes, 1);
 162	if (to_free)
 163		btrfs_space_info_free_bytes_may_use(fs_info,
 164				delayed_refs_rsv->space_info, to_free);
 165}
 166
 167/**
 168 * Refill based on our delayed refs usage
 169 *
 170 * @fs_info: the filesystem
 171 * @flush:   control how we can flush for this reservation.
 172 *
 173 * This will refill the delayed block_rsv up to 1 items size worth of space and
 174 * will return -ENOSPC if we can't make the reservation.
 175 */
 176int btrfs_delayed_refs_rsv_refill(struct btrfs_fs_info *fs_info,
 177				  enum btrfs_reserve_flush_enum flush)
 178{
 179	struct btrfs_block_rsv *block_rsv = &fs_info->delayed_refs_rsv;
 180	u64 limit = btrfs_calc_insert_metadata_size(fs_info, 1);
 181	u64 num_bytes = 0;
 182	int ret = -ENOSPC;
 183
 184	spin_lock(&block_rsv->lock);
 185	if (block_rsv->reserved < block_rsv->size) {
 186		num_bytes = block_rsv->size - block_rsv->reserved;
 187		num_bytes = min(num_bytes, limit);
 188	}
 189	spin_unlock(&block_rsv->lock);
 190
 191	if (!num_bytes)
 192		return 0;
 193
 194	ret = btrfs_reserve_metadata_bytes(fs_info->extent_root, block_rsv,
 195					   num_bytes, flush);
 196	if (ret)
 197		return ret;
 198	btrfs_block_rsv_add_bytes(block_rsv, num_bytes, 0);
 199	trace_btrfs_space_reservation(fs_info, "delayed_refs_rsv",
 200				      0, num_bytes, 1);
 201	return 0;
 202}
 203
 204/*
 205 * compare two delayed tree backrefs with same bytenr and type
 206 */
 207static int comp_tree_refs(struct btrfs_delayed_tree_ref *ref1,
 208			  struct btrfs_delayed_tree_ref *ref2)
 209{
 210	if (ref1->node.type == BTRFS_TREE_BLOCK_REF_KEY) {
 211		if (ref1->root < ref2->root)
 212			return -1;
 213		if (ref1->root > ref2->root)
 214			return 1;
 215	} else {
 216		if (ref1->parent < ref2->parent)
 217			return -1;
 218		if (ref1->parent > ref2->parent)
 219			return 1;
 220	}
 221	return 0;
 222}
 223
 224/*
 225 * compare two delayed data backrefs with same bytenr and type
 226 */
 227static int comp_data_refs(struct btrfs_delayed_data_ref *ref1,
 228			  struct btrfs_delayed_data_ref *ref2)
 229{
 230	if (ref1->node.type == BTRFS_EXTENT_DATA_REF_KEY) {
 231		if (ref1->root < ref2->root)
 232			return -1;
 233		if (ref1->root > ref2->root)
 234			return 1;
 235		if (ref1->objectid < ref2->objectid)
 236			return -1;
 237		if (ref1->objectid > ref2->objectid)
 238			return 1;
 239		if (ref1->offset < ref2->offset)
 240			return -1;
 241		if (ref1->offset > ref2->offset)
 242			return 1;
 243	} else {
 244		if (ref1->parent < ref2->parent)
 245			return -1;
 246		if (ref1->parent > ref2->parent)
 247			return 1;
 248	}
 249	return 0;
 250}
 251
 252static int comp_refs(struct btrfs_delayed_ref_node *ref1,
 253		     struct btrfs_delayed_ref_node *ref2,
 254		     bool check_seq)
 255{
 256	int ret = 0;
 257
 258	if (ref1->type < ref2->type)
 259		return -1;
 260	if (ref1->type > ref2->type)
 261		return 1;
 262	if (ref1->type == BTRFS_TREE_BLOCK_REF_KEY ||
 263	    ref1->type == BTRFS_SHARED_BLOCK_REF_KEY)
 264		ret = comp_tree_refs(btrfs_delayed_node_to_tree_ref(ref1),
 265				     btrfs_delayed_node_to_tree_ref(ref2));
 266	else
 267		ret = comp_data_refs(btrfs_delayed_node_to_data_ref(ref1),
 268				     btrfs_delayed_node_to_data_ref(ref2));
 269	if (ret)
 270		return ret;
 271	if (check_seq) {
 272		if (ref1->seq < ref2->seq)
 273			return -1;
 274		if (ref1->seq > ref2->seq)
 275			return 1;
 276	}
 277	return 0;
 278}
 279
 280/* insert a new ref to head ref rbtree */
 281static struct btrfs_delayed_ref_head *htree_insert(struct rb_root_cached *root,
 282						   struct rb_node *node)
 283{
 284	struct rb_node **p = &root->rb_root.rb_node;
 285	struct rb_node *parent_node = NULL;
 286	struct btrfs_delayed_ref_head *entry;
 287	struct btrfs_delayed_ref_head *ins;
 288	u64 bytenr;
 289	bool leftmost = true;
 290
 291	ins = rb_entry(node, struct btrfs_delayed_ref_head, href_node);
 292	bytenr = ins->bytenr;
 293	while (*p) {
 294		parent_node = *p;
 295		entry = rb_entry(parent_node, struct btrfs_delayed_ref_head,
 296				 href_node);
 297
 298		if (bytenr < entry->bytenr) {
 299			p = &(*p)->rb_left;
 300		} else if (bytenr > entry->bytenr) {
 301			p = &(*p)->rb_right;
 302			leftmost = false;
 303		} else {
 304			return entry;
 305		}
 306	}
 307
 308	rb_link_node(node, parent_node, p);
 309	rb_insert_color_cached(node, root, leftmost);
 310	return NULL;
 311}
 312
 313static struct btrfs_delayed_ref_node* tree_insert(struct rb_root_cached *root,
 314		struct btrfs_delayed_ref_node *ins)
 315{
 316	struct rb_node **p = &root->rb_root.rb_node;
 317	struct rb_node *node = &ins->ref_node;
 318	struct rb_node *parent_node = NULL;
 319	struct btrfs_delayed_ref_node *entry;
 320	bool leftmost = true;
 321
 322	while (*p) {
 323		int comp;
 324
 325		parent_node = *p;
 326		entry = rb_entry(parent_node, struct btrfs_delayed_ref_node,
 327				 ref_node);
 328		comp = comp_refs(ins, entry, true);
 329		if (comp < 0) {
 330			p = &(*p)->rb_left;
 331		} else if (comp > 0) {
 332			p = &(*p)->rb_right;
 333			leftmost = false;
 334		} else {
 335			return entry;
 336		}
 337	}
 338
 339	rb_link_node(node, parent_node, p);
 340	rb_insert_color_cached(node, root, leftmost);
 341	return NULL;
 342}
 343
 344static struct btrfs_delayed_ref_head *find_first_ref_head(
 345		struct btrfs_delayed_ref_root *dr)
 346{
 347	struct rb_node *n;
 348	struct btrfs_delayed_ref_head *entry;
 349
 350	n = rb_first_cached(&dr->href_root);
 351	if (!n)
 352		return NULL;
 353
 354	entry = rb_entry(n, struct btrfs_delayed_ref_head, href_node);
 355
 356	return entry;
 357}
 358
 359/*
 360 * Find a head entry based on bytenr. This returns the delayed ref head if it
 361 * was able to find one, or NULL if nothing was in that spot.  If return_bigger
 362 * is given, the next bigger entry is returned if no exact match is found.
 363 */
 364static struct btrfs_delayed_ref_head *find_ref_head(
 365		struct btrfs_delayed_ref_root *dr, u64 bytenr,
 366		bool return_bigger)
 367{
 368	struct rb_root *root = &dr->href_root.rb_root;
 369	struct rb_node *n;
 370	struct btrfs_delayed_ref_head *entry;
 371
 372	n = root->rb_node;
 373	entry = NULL;
 374	while (n) {
 375		entry = rb_entry(n, struct btrfs_delayed_ref_head, href_node);
 376
 377		if (bytenr < entry->bytenr)
 378			n = n->rb_left;
 379		else if (bytenr > entry->bytenr)
 380			n = n->rb_right;
 381		else
 382			return entry;
 383	}
 384	if (entry && return_bigger) {
 385		if (bytenr > entry->bytenr) {
 386			n = rb_next(&entry->href_node);
 387			if (!n)
 388				return NULL;
 389			entry = rb_entry(n, struct btrfs_delayed_ref_head,
 390					 href_node);
 391		}
 392		return entry;
 393	}
 394	return NULL;
 395}
 396
 397int btrfs_delayed_ref_lock(struct btrfs_delayed_ref_root *delayed_refs,
 398			   struct btrfs_delayed_ref_head *head)
 399{
 400	lockdep_assert_held(&delayed_refs->lock);
 401	if (mutex_trylock(&head->mutex))
 402		return 0;
 403
 404	refcount_inc(&head->refs);
 405	spin_unlock(&delayed_refs->lock);
 406
 407	mutex_lock(&head->mutex);
 408	spin_lock(&delayed_refs->lock);
 409	if (RB_EMPTY_NODE(&head->href_node)) {
 410		mutex_unlock(&head->mutex);
 411		btrfs_put_delayed_ref_head(head);
 412		return -EAGAIN;
 413	}
 414	btrfs_put_delayed_ref_head(head);
 415	return 0;
 416}
 417
 418static inline void drop_delayed_ref(struct btrfs_trans_handle *trans,
 419				    struct btrfs_delayed_ref_root *delayed_refs,
 420				    struct btrfs_delayed_ref_head *head,
 421				    struct btrfs_delayed_ref_node *ref)
 422{
 423	lockdep_assert_held(&head->lock);
 424	rb_erase_cached(&ref->ref_node, &head->ref_tree);
 425	RB_CLEAR_NODE(&ref->ref_node);
 426	if (!list_empty(&ref->add_list))
 427		list_del(&ref->add_list);
 428	ref->in_tree = 0;
 429	btrfs_put_delayed_ref(ref);
 430	atomic_dec(&delayed_refs->num_entries);
 431}
 432
 433static bool merge_ref(struct btrfs_trans_handle *trans,
 434		      struct btrfs_delayed_ref_root *delayed_refs,
 435		      struct btrfs_delayed_ref_head *head,
 436		      struct btrfs_delayed_ref_node *ref,
 437		      u64 seq)
 438{
 439	struct btrfs_delayed_ref_node *next;
 440	struct rb_node *node = rb_next(&ref->ref_node);
 441	bool done = false;
 442
 443	while (!done && node) {
 444		int mod;
 445
 446		next = rb_entry(node, struct btrfs_delayed_ref_node, ref_node);
 447		node = rb_next(node);
 448		if (seq && next->seq >= seq)
 449			break;
 450		if (comp_refs(ref, next, false))
 451			break;
 452
 453		if (ref->action == next->action) {
 454			mod = next->ref_mod;
 455		} else {
 456			if (ref->ref_mod < next->ref_mod) {
 457				swap(ref, next);
 458				done = true;
 459			}
 460			mod = -next->ref_mod;
 461		}
 462
 463		drop_delayed_ref(trans, delayed_refs, head, next);
 464		ref->ref_mod += mod;
 465		if (ref->ref_mod == 0) {
 466			drop_delayed_ref(trans, delayed_refs, head, ref);
 467			done = true;
 468		} else {
 469			/*
 470			 * Can't have multiples of the same ref on a tree block.
 471			 */
 472			WARN_ON(ref->type == BTRFS_TREE_BLOCK_REF_KEY ||
 473				ref->type == BTRFS_SHARED_BLOCK_REF_KEY);
 474		}
 475	}
 476
 477	return done;
 478}
 479
 480void btrfs_merge_delayed_refs(struct btrfs_trans_handle *trans,
 481			      struct btrfs_delayed_ref_root *delayed_refs,
 482			      struct btrfs_delayed_ref_head *head)
 483{
 484	struct btrfs_fs_info *fs_info = trans->fs_info;
 485	struct btrfs_delayed_ref_node *ref;
 486	struct rb_node *node;
 487	u64 seq = 0;
 488
 489	lockdep_assert_held(&head->lock);
 490
 491	if (RB_EMPTY_ROOT(&head->ref_tree.rb_root))
 492		return;
 493
 494	/* We don't have too many refs to merge for data. */
 495	if (head->is_data)
 496		return;
 497
 498	seq = btrfs_tree_mod_log_lowest_seq(fs_info);
 499again:
 500	for (node = rb_first_cached(&head->ref_tree); node;
 501	     node = rb_next(node)) {
 502		ref = rb_entry(node, struct btrfs_delayed_ref_node, ref_node);
 503		if (seq && ref->seq >= seq)
 504			continue;
 505		if (merge_ref(trans, delayed_refs, head, ref, seq))
 506			goto again;
 507	}
 508}
 509
 510int btrfs_check_delayed_seq(struct btrfs_fs_info *fs_info, u64 seq)
 511{
 512	int ret = 0;
 513	u64 min_seq = btrfs_tree_mod_log_lowest_seq(fs_info);
 514
 515	if (min_seq != 0 && seq >= min_seq) {
 516		btrfs_debug(fs_info,
 517			    "holding back delayed_ref %llu, lowest is %llu",
 518			    seq, min_seq);
 519		ret = 1;
 520	}
 521
 522	return ret;
 523}
 524
 525struct btrfs_delayed_ref_head *btrfs_select_ref_head(
 526		struct btrfs_delayed_ref_root *delayed_refs)
 527{
 528	struct btrfs_delayed_ref_head *head;
 529
 530again:
 531	head = find_ref_head(delayed_refs, delayed_refs->run_delayed_start,
 532			     true);
 533	if (!head && delayed_refs->run_delayed_start != 0) {
 534		delayed_refs->run_delayed_start = 0;
 535		head = find_first_ref_head(delayed_refs);
 536	}
 537	if (!head)
 538		return NULL;
 539
 540	while (head->processing) {
 541		struct rb_node *node;
 542
 543		node = rb_next(&head->href_node);
 544		if (!node) {
 545			if (delayed_refs->run_delayed_start == 0)
 546				return NULL;
 547			delayed_refs->run_delayed_start = 0;
 548			goto again;
 549		}
 550		head = rb_entry(node, struct btrfs_delayed_ref_head,
 551				href_node);
 552	}
 553
 554	head->processing = 1;
 555	WARN_ON(delayed_refs->num_heads_ready == 0);
 556	delayed_refs->num_heads_ready--;
 557	delayed_refs->run_delayed_start = head->bytenr +
 558		head->num_bytes;
 559	return head;
 560}
 561
 562void btrfs_delete_ref_head(struct btrfs_delayed_ref_root *delayed_refs,
 563			   struct btrfs_delayed_ref_head *head)
 564{
 565	lockdep_assert_held(&delayed_refs->lock);
 566	lockdep_assert_held(&head->lock);
 567
 568	rb_erase_cached(&head->href_node, &delayed_refs->href_root);
 569	RB_CLEAR_NODE(&head->href_node);
 570	atomic_dec(&delayed_refs->num_entries);
 571	delayed_refs->num_heads--;
 572	if (head->processing == 0)
 573		delayed_refs->num_heads_ready--;
 574}
 575
 576/*
 577 * Helper to insert the ref_node to the tail or merge with tail.
 578 *
 579 * Return 0 for insert.
 580 * Return >0 for merge.
 581 */
 582static int insert_delayed_ref(struct btrfs_trans_handle *trans,
 583			      struct btrfs_delayed_ref_root *root,
 584			      struct btrfs_delayed_ref_head *href,
 585			      struct btrfs_delayed_ref_node *ref)
 586{
 587	struct btrfs_delayed_ref_node *exist;
 588	int mod;
 589	int ret = 0;
 590
 591	spin_lock(&href->lock);
 592	exist = tree_insert(&href->ref_tree, ref);
 593	if (!exist)
 594		goto inserted;
 595
 596	/* Now we are sure we can merge */
 597	ret = 1;
 598	if (exist->action == ref->action) {
 599		mod = ref->ref_mod;
 600	} else {
 601		/* Need to change action */
 602		if (exist->ref_mod < ref->ref_mod) {
 603			exist->action = ref->action;
 604			mod = -exist->ref_mod;
 605			exist->ref_mod = ref->ref_mod;
 606			if (ref->action == BTRFS_ADD_DELAYED_REF)
 607				list_add_tail(&exist->add_list,
 608					      &href->ref_add_list);
 609			else if (ref->action == BTRFS_DROP_DELAYED_REF) {
 610				ASSERT(!list_empty(&exist->add_list));
 611				list_del(&exist->add_list);
 612			} else {
 613				ASSERT(0);
 614			}
 615		} else
 616			mod = -ref->ref_mod;
 617	}
 618	exist->ref_mod += mod;
 619
 620	/* remove existing tail if its ref_mod is zero */
 621	if (exist->ref_mod == 0)
 622		drop_delayed_ref(trans, root, href, exist);
 623	spin_unlock(&href->lock);
 624	return ret;
 625inserted:
 626	if (ref->action == BTRFS_ADD_DELAYED_REF)
 627		list_add_tail(&ref->add_list, &href->ref_add_list);
 628	atomic_inc(&root->num_entries);
 629	spin_unlock(&href->lock);
 630	return ret;
 631}
 632
 633/*
 634 * helper function to update the accounting in the head ref
 635 * existing and update must have the same bytenr
 636 */
 637static noinline void update_existing_head_ref(struct btrfs_trans_handle *trans,
 638			 struct btrfs_delayed_ref_head *existing,
 639			 struct btrfs_delayed_ref_head *update)
 640{
 641	struct btrfs_delayed_ref_root *delayed_refs =
 642		&trans->transaction->delayed_refs;
 643	struct btrfs_fs_info *fs_info = trans->fs_info;
 644	int old_ref_mod;
 645
 646	BUG_ON(existing->is_data != update->is_data);
 647
 648	spin_lock(&existing->lock);
 649	if (update->must_insert_reserved) {
 650		/* if the extent was freed and then
 651		 * reallocated before the delayed ref
 652		 * entries were processed, we can end up
 653		 * with an existing head ref without
 654		 * the must_insert_reserved flag set.
 655		 * Set it again here
 656		 */
 657		existing->must_insert_reserved = update->must_insert_reserved;
 658
 659		/*
 660		 * update the num_bytes so we make sure the accounting
 661		 * is done correctly
 662		 */
 663		existing->num_bytes = update->num_bytes;
 664
 665	}
 666
 667	if (update->extent_op) {
 668		if (!existing->extent_op) {
 669			existing->extent_op = update->extent_op;
 670		} else {
 671			if (update->extent_op->update_key) {
 672				memcpy(&existing->extent_op->key,
 673				       &update->extent_op->key,
 674				       sizeof(update->extent_op->key));
 675				existing->extent_op->update_key = true;
 676			}
 677			if (update->extent_op->update_flags) {
 678				existing->extent_op->flags_to_set |=
 679					update->extent_op->flags_to_set;
 680				existing->extent_op->update_flags = true;
 681			}
 682			btrfs_free_delayed_extent_op(update->extent_op);
 683		}
 684	}
 685	/*
 686	 * update the reference mod on the head to reflect this new operation,
 687	 * only need the lock for this case cause we could be processing it
 688	 * currently, for refs we just added we know we're a-ok.
 689	 */
 690	old_ref_mod = existing->total_ref_mod;
 691	existing->ref_mod += update->ref_mod;
 692	existing->total_ref_mod += update->ref_mod;
 693
 694	/*
 695	 * If we are going to from a positive ref mod to a negative or vice
 696	 * versa we need to make sure to adjust pending_csums accordingly.
 697	 */
 698	if (existing->is_data) {
 699		u64 csum_leaves =
 700			btrfs_csum_bytes_to_leaves(fs_info,
 701						   existing->num_bytes);
 702
 703		if (existing->total_ref_mod >= 0 && old_ref_mod < 0) {
 704			delayed_refs->pending_csums -= existing->num_bytes;
 705			btrfs_delayed_refs_rsv_release(fs_info, csum_leaves);
 706		}
 707		if (existing->total_ref_mod < 0 && old_ref_mod >= 0) {
 708			delayed_refs->pending_csums += existing->num_bytes;
 709			trans->delayed_ref_updates += csum_leaves;
 710		}
 711	}
 712
 713	spin_unlock(&existing->lock);
 714}
 715
 716static void init_delayed_ref_head(struct btrfs_delayed_ref_head *head_ref,
 717				  struct btrfs_qgroup_extent_record *qrecord,
 718				  u64 bytenr, u64 num_bytes, u64 ref_root,
 719				  u64 reserved, int action, bool is_data,
 720				  bool is_system)
 721{
 722	int count_mod = 1;
 723	int must_insert_reserved = 0;
 724
 725	/* If reserved is provided, it must be a data extent. */
 726	BUG_ON(!is_data && reserved);
 727
 728	/*
 729	 * The head node stores the sum of all the mods, so dropping a ref
 730	 * should drop the sum in the head node by one.
 731	 */
 732	if (action == BTRFS_UPDATE_DELAYED_HEAD)
 733		count_mod = 0;
 734	else if (action == BTRFS_DROP_DELAYED_REF)
 735		count_mod = -1;
 736
 737	/*
 738	 * BTRFS_ADD_DELAYED_EXTENT means that we need to update the reserved
 739	 * accounting when the extent is finally added, or if a later
 740	 * modification deletes the delayed ref without ever inserting the
 741	 * extent into the extent allocation tree.  ref->must_insert_reserved
 742	 * is the flag used to record that accounting mods are required.
 743	 *
 744	 * Once we record must_insert_reserved, switch the action to
 745	 * BTRFS_ADD_DELAYED_REF because other special casing is not required.
 746	 */
 747	if (action == BTRFS_ADD_DELAYED_EXTENT)
 748		must_insert_reserved = 1;
 749	else
 750		must_insert_reserved = 0;
 751
 752	refcount_set(&head_ref->refs, 1);
 753	head_ref->bytenr = bytenr;
 754	head_ref->num_bytes = num_bytes;
 755	head_ref->ref_mod = count_mod;
 756	head_ref->must_insert_reserved = must_insert_reserved;
 757	head_ref->is_data = is_data;
 758	head_ref->is_system = is_system;
 759	head_ref->ref_tree = RB_ROOT_CACHED;
 760	INIT_LIST_HEAD(&head_ref->ref_add_list);
 761	RB_CLEAR_NODE(&head_ref->href_node);
 762	head_ref->processing = 0;
 763	head_ref->total_ref_mod = count_mod;
 764	spin_lock_init(&head_ref->lock);
 765	mutex_init(&head_ref->mutex);
 766
 767	if (qrecord) {
 768		if (ref_root && reserved) {
 769			qrecord->data_rsv = reserved;
 770			qrecord->data_rsv_refroot = ref_root;
 771		}
 772		qrecord->bytenr = bytenr;
 773		qrecord->num_bytes = num_bytes;
 774		qrecord->old_roots = NULL;
 775	}
 776}
 777
 778/*
 779 * helper function to actually insert a head node into the rbtree.
 780 * this does all the dirty work in terms of maintaining the correct
 781 * overall modification count.
 782 */
 783static noinline struct btrfs_delayed_ref_head *
 784add_delayed_ref_head(struct btrfs_trans_handle *trans,
 785		     struct btrfs_delayed_ref_head *head_ref,
 786		     struct btrfs_qgroup_extent_record *qrecord,
 787		     int action, int *qrecord_inserted_ret)
 788{
 789	struct btrfs_delayed_ref_head *existing;
 790	struct btrfs_delayed_ref_root *delayed_refs;
 791	int qrecord_inserted = 0;
 792
 793	delayed_refs = &trans->transaction->delayed_refs;
 794
 795	/* Record qgroup extent info if provided */
 796	if (qrecord) {
 797		if (btrfs_qgroup_trace_extent_nolock(trans->fs_info,
 798					delayed_refs, qrecord))
 799			kfree(qrecord);
 800		else
 801			qrecord_inserted = 1;
 802	}
 803
 804	trace_add_delayed_ref_head(trans->fs_info, head_ref, action);
 805
 806	existing = htree_insert(&delayed_refs->href_root,
 807				&head_ref->href_node);
 808	if (existing) {
 809		update_existing_head_ref(trans, existing, head_ref);
 810		/*
 811		 * we've updated the existing ref, free the newly
 812		 * allocated ref
 813		 */
 814		kmem_cache_free(btrfs_delayed_ref_head_cachep, head_ref);
 815		head_ref = existing;
 816	} else {
 817		if (head_ref->is_data && head_ref->ref_mod < 0) {
 818			delayed_refs->pending_csums += head_ref->num_bytes;
 819			trans->delayed_ref_updates +=
 820				btrfs_csum_bytes_to_leaves(trans->fs_info,
 821							   head_ref->num_bytes);
 822		}
 823		delayed_refs->num_heads++;
 824		delayed_refs->num_heads_ready++;
 825		atomic_inc(&delayed_refs->num_entries);
 826		trans->delayed_ref_updates++;
 827	}
 828	if (qrecord_inserted_ret)
 829		*qrecord_inserted_ret = qrecord_inserted;
 830
 831	return head_ref;
 832}
 833
 834/*
 835 * init_delayed_ref_common - Initialize the structure which represents a
 836 *			     modification to a an extent.
 837 *
 838 * @fs_info:    Internal to the mounted filesystem mount structure.
 839 *
 840 * @ref:	The structure which is going to be initialized.
 841 *
 842 * @bytenr:	The logical address of the extent for which a modification is
 843 *		going to be recorded.
 844 *
 845 * @num_bytes:  Size of the extent whose modification is being recorded.
 846 *
 847 * @ref_root:	The id of the root where this modification has originated, this
 848 *		can be either one of the well-known metadata trees or the
 849 *		subvolume id which references this extent.
 850 *
 851 * @action:	Can be one of BTRFS_ADD_DELAYED_REF/BTRFS_DROP_DELAYED_REF or
 852 *		BTRFS_ADD_DELAYED_EXTENT
 853 *
 854 * @ref_type:	Holds the type of the extent which is being recorded, can be
 855 *		one of BTRFS_SHARED_BLOCK_REF_KEY/BTRFS_TREE_BLOCK_REF_KEY
 856 *		when recording a metadata extent or BTRFS_SHARED_DATA_REF_KEY/
 857 *		BTRFS_EXTENT_DATA_REF_KEY when recording data extent
 858 */
 859static void init_delayed_ref_common(struct btrfs_fs_info *fs_info,
 860				    struct btrfs_delayed_ref_node *ref,
 861				    u64 bytenr, u64 num_bytes, u64 ref_root,
 862				    int action, u8 ref_type)
 863{
 864	u64 seq = 0;
 865
 866	if (action == BTRFS_ADD_DELAYED_EXTENT)
 867		action = BTRFS_ADD_DELAYED_REF;
 868
 869	if (is_fstree(ref_root))
 870		seq = atomic64_read(&fs_info->tree_mod_seq);
 871
 872	refcount_set(&ref->refs, 1);
 873	ref->bytenr = bytenr;
 874	ref->num_bytes = num_bytes;
 875	ref->ref_mod = 1;
 876	ref->action = action;
 877	ref->is_head = 0;
 878	ref->in_tree = 1;
 879	ref->seq = seq;
 880	ref->type = ref_type;
 881	RB_CLEAR_NODE(&ref->ref_node);
 882	INIT_LIST_HEAD(&ref->add_list);
 883}
 884
 885/*
 886 * add a delayed tree ref.  This does all of the accounting required
 887 * to make sure the delayed ref is eventually processed before this
 888 * transaction commits.
 889 */
 890int btrfs_add_delayed_tree_ref(struct btrfs_trans_handle *trans,
 891			       struct btrfs_ref *generic_ref,
 892			       struct btrfs_delayed_extent_op *extent_op)
 893{
 894	struct btrfs_fs_info *fs_info = trans->fs_info;
 895	struct btrfs_delayed_tree_ref *ref;
 896	struct btrfs_delayed_ref_head *head_ref;
 897	struct btrfs_delayed_ref_root *delayed_refs;
 898	struct btrfs_qgroup_extent_record *record = NULL;
 899	int qrecord_inserted;
 900	bool is_system;
 901	int action = generic_ref->action;
 902	int level = generic_ref->tree_ref.level;
 903	int ret;
 904	u64 bytenr = generic_ref->bytenr;
 905	u64 num_bytes = generic_ref->len;
 906	u64 parent = generic_ref->parent;
 907	u8 ref_type;
 908
 909	is_system = (generic_ref->real_root == BTRFS_CHUNK_TREE_OBJECTID);
 910
 911	ASSERT(generic_ref->type == BTRFS_REF_METADATA && generic_ref->action);
 912	BUG_ON(extent_op && extent_op->is_data);
 913	ref = kmem_cache_alloc(btrfs_delayed_tree_ref_cachep, GFP_NOFS);
 914	if (!ref)
 915		return -ENOMEM;
 916
 917	head_ref = kmem_cache_alloc(btrfs_delayed_ref_head_cachep, GFP_NOFS);
 918	if (!head_ref) {
 919		kmem_cache_free(btrfs_delayed_tree_ref_cachep, ref);
 920		return -ENOMEM;
 921	}
 922
 923	if (test_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags) &&
 924	    is_fstree(generic_ref->real_root) &&
 925	    is_fstree(generic_ref->tree_ref.root) &&
 926	    !generic_ref->skip_qgroup) {
 927		record = kzalloc(sizeof(*record), GFP_NOFS);
 928		if (!record) {
 929			kmem_cache_free(btrfs_delayed_tree_ref_cachep, ref);
 930			kmem_cache_free(btrfs_delayed_ref_head_cachep, head_ref);
 931			return -ENOMEM;
 932		}
 933	}
 934
 935	if (parent)
 936		ref_type = BTRFS_SHARED_BLOCK_REF_KEY;
 937	else
 938		ref_type = BTRFS_TREE_BLOCK_REF_KEY;
 939
 940	init_delayed_ref_common(fs_info, &ref->node, bytenr, num_bytes,
 941				generic_ref->tree_ref.root, action, ref_type);
 942	ref->root = generic_ref->tree_ref.root;
 
 943	ref->parent = parent;
 944	ref->level = level;
 945
 946	init_delayed_ref_head(head_ref, record, bytenr, num_bytes,
 947			      generic_ref->tree_ref.root, 0, action, false,
 948			      is_system);
 949	head_ref->extent_op = extent_op;
 950
 951	delayed_refs = &trans->transaction->delayed_refs;
 952	spin_lock(&delayed_refs->lock);
 953
 954	/*
 955	 * insert both the head node and the new ref without dropping
 956	 * the spin lock
 957	 */
 958	head_ref = add_delayed_ref_head(trans, head_ref, record,
 959					action, &qrecord_inserted);
 960
 961	ret = insert_delayed_ref(trans, delayed_refs, head_ref, &ref->node);
 962	spin_unlock(&delayed_refs->lock);
 963
 964	/*
 965	 * Need to update the delayed_refs_rsv with any changes we may have
 966	 * made.
 967	 */
 968	btrfs_update_delayed_refs_rsv(trans);
 969
 970	trace_add_delayed_tree_ref(fs_info, &ref->node, ref,
 971				   action == BTRFS_ADD_DELAYED_EXTENT ?
 972				   BTRFS_ADD_DELAYED_REF : action);
 973	if (ret > 0)
 974		kmem_cache_free(btrfs_delayed_tree_ref_cachep, ref);
 975
 976	if (qrecord_inserted)
 977		btrfs_qgroup_trace_extent_post(trans, record);
 978
 979	return 0;
 980}
 981
 982/*
 983 * add a delayed data ref. it's similar to btrfs_add_delayed_tree_ref.
 984 */
 985int btrfs_add_delayed_data_ref(struct btrfs_trans_handle *trans,
 986			       struct btrfs_ref *generic_ref,
 987			       u64 reserved)
 988{
 989	struct btrfs_fs_info *fs_info = trans->fs_info;
 990	struct btrfs_delayed_data_ref *ref;
 991	struct btrfs_delayed_ref_head *head_ref;
 992	struct btrfs_delayed_ref_root *delayed_refs;
 993	struct btrfs_qgroup_extent_record *record = NULL;
 994	int qrecord_inserted;
 995	int action = generic_ref->action;
 996	int ret;
 997	u64 bytenr = generic_ref->bytenr;
 998	u64 num_bytes = generic_ref->len;
 999	u64 parent = generic_ref->parent;
1000	u64 ref_root = generic_ref->data_ref.ref_root;
1001	u64 owner = generic_ref->data_ref.ino;
1002	u64 offset = generic_ref->data_ref.offset;
1003	u8 ref_type;
1004
1005	ASSERT(generic_ref->type == BTRFS_REF_DATA && action);
1006	ref = kmem_cache_alloc(btrfs_delayed_data_ref_cachep, GFP_NOFS);
1007	if (!ref)
1008		return -ENOMEM;
1009
1010	if (parent)
1011	        ref_type = BTRFS_SHARED_DATA_REF_KEY;
1012	else
1013	        ref_type = BTRFS_EXTENT_DATA_REF_KEY;
1014	init_delayed_ref_common(fs_info, &ref->node, bytenr, num_bytes,
1015				ref_root, action, ref_type);
1016	ref->root = ref_root;
1017	ref->parent = parent;
1018	ref->objectid = owner;
1019	ref->offset = offset;
1020
1021
1022	head_ref = kmem_cache_alloc(btrfs_delayed_ref_head_cachep, GFP_NOFS);
1023	if (!head_ref) {
1024		kmem_cache_free(btrfs_delayed_data_ref_cachep, ref);
1025		return -ENOMEM;
1026	}
1027
1028	if (test_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags) &&
1029	    is_fstree(ref_root) &&
1030	    is_fstree(generic_ref->real_root) &&
1031	    !generic_ref->skip_qgroup) {
1032		record = kzalloc(sizeof(*record), GFP_NOFS);
1033		if (!record) {
1034			kmem_cache_free(btrfs_delayed_data_ref_cachep, ref);
1035			kmem_cache_free(btrfs_delayed_ref_head_cachep,
1036					head_ref);
1037			return -ENOMEM;
1038		}
1039	}
1040
1041	init_delayed_ref_head(head_ref, record, bytenr, num_bytes, ref_root,
1042			      reserved, action, true, false);
1043	head_ref->extent_op = NULL;
1044
1045	delayed_refs = &trans->transaction->delayed_refs;
1046	spin_lock(&delayed_refs->lock);
1047
1048	/*
1049	 * insert both the head node and the new ref without dropping
1050	 * the spin lock
1051	 */
1052	head_ref = add_delayed_ref_head(trans, head_ref, record,
1053					action, &qrecord_inserted);
1054
1055	ret = insert_delayed_ref(trans, delayed_refs, head_ref, &ref->node);
1056	spin_unlock(&delayed_refs->lock);
1057
1058	/*
1059	 * Need to update the delayed_refs_rsv with any changes we may have
1060	 * made.
1061	 */
1062	btrfs_update_delayed_refs_rsv(trans);
1063
1064	trace_add_delayed_data_ref(trans->fs_info, &ref->node, ref,
1065				   action == BTRFS_ADD_DELAYED_EXTENT ?
1066				   BTRFS_ADD_DELAYED_REF : action);
1067	if (ret > 0)
1068		kmem_cache_free(btrfs_delayed_data_ref_cachep, ref);
1069
1070
1071	if (qrecord_inserted)
1072		return btrfs_qgroup_trace_extent_post(trans, record);
1073	return 0;
1074}
1075
1076int btrfs_add_delayed_extent_op(struct btrfs_trans_handle *trans,
1077				u64 bytenr, u64 num_bytes,
1078				struct btrfs_delayed_extent_op *extent_op)
1079{
1080	struct btrfs_delayed_ref_head *head_ref;
1081	struct btrfs_delayed_ref_root *delayed_refs;
1082
1083	head_ref = kmem_cache_alloc(btrfs_delayed_ref_head_cachep, GFP_NOFS);
1084	if (!head_ref)
1085		return -ENOMEM;
1086
1087	init_delayed_ref_head(head_ref, NULL, bytenr, num_bytes, 0, 0,
1088			      BTRFS_UPDATE_DELAYED_HEAD, extent_op->is_data,
1089			      false);
1090	head_ref->extent_op = extent_op;
1091
1092	delayed_refs = &trans->transaction->delayed_refs;
1093	spin_lock(&delayed_refs->lock);
1094
1095	add_delayed_ref_head(trans, head_ref, NULL, BTRFS_UPDATE_DELAYED_HEAD,
1096			     NULL);
1097
1098	spin_unlock(&delayed_refs->lock);
1099
1100	/*
1101	 * Need to update the delayed_refs_rsv with any changes we may have
1102	 * made.
1103	 */
1104	btrfs_update_delayed_refs_rsv(trans);
1105	return 0;
1106}
1107
1108/*
1109 * This does a simple search for the head node for a given extent.  Returns the
1110 * head node if found, or NULL if not.
1111 */
1112struct btrfs_delayed_ref_head *
1113btrfs_find_delayed_ref_head(struct btrfs_delayed_ref_root *delayed_refs, u64 bytenr)
1114{
1115	lockdep_assert_held(&delayed_refs->lock);
1116
1117	return find_ref_head(delayed_refs, bytenr, false);
1118}
1119
1120void __cold btrfs_delayed_ref_exit(void)
1121{
1122	kmem_cache_destroy(btrfs_delayed_ref_head_cachep);
1123	kmem_cache_destroy(btrfs_delayed_tree_ref_cachep);
1124	kmem_cache_destroy(btrfs_delayed_data_ref_cachep);
1125	kmem_cache_destroy(btrfs_delayed_extent_op_cachep);
1126}
1127
1128int __init btrfs_delayed_ref_init(void)
1129{
1130	btrfs_delayed_ref_head_cachep = kmem_cache_create(
1131				"btrfs_delayed_ref_head",
1132				sizeof(struct btrfs_delayed_ref_head), 0,
1133				SLAB_MEM_SPREAD, NULL);
1134	if (!btrfs_delayed_ref_head_cachep)
1135		goto fail;
1136
1137	btrfs_delayed_tree_ref_cachep = kmem_cache_create(
1138				"btrfs_delayed_tree_ref",
1139				sizeof(struct btrfs_delayed_tree_ref), 0,
1140				SLAB_MEM_SPREAD, NULL);
1141	if (!btrfs_delayed_tree_ref_cachep)
1142		goto fail;
1143
1144	btrfs_delayed_data_ref_cachep = kmem_cache_create(
1145				"btrfs_delayed_data_ref",
1146				sizeof(struct btrfs_delayed_data_ref), 0,
1147				SLAB_MEM_SPREAD, NULL);
1148	if (!btrfs_delayed_data_ref_cachep)
1149		goto fail;
1150
1151	btrfs_delayed_extent_op_cachep = kmem_cache_create(
1152				"btrfs_delayed_extent_op",
1153				sizeof(struct btrfs_delayed_extent_op), 0,
1154				SLAB_MEM_SPREAD, NULL);
1155	if (!btrfs_delayed_extent_op_cachep)
1156		goto fail;
1157
1158	return 0;
1159fail:
1160	btrfs_delayed_ref_exit();
1161	return -ENOMEM;
1162}