Linux Audio

Check our new training course

Open-source upstreaming

Need help get the support for your hardware in upstream Linux?
Loading...
v6.13.7
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 * Copyright (C) 2009 Oracle.  All rights reserved.
 
 
 
 
 
 
 
 
 
 
 
 
 
 
   4 */
   5
   6#include <linux/sched.h>
   7#include <linux/slab.h>
   8#include <linux/sort.h>
   9#include "messages.h"
  10#include "ctree.h"
  11#include "delayed-ref.h"
  12#include "extent-tree.h"
  13#include "transaction.h"
  14#include "qgroup.h"
  15#include "space-info.h"
  16#include "tree-mod-log.h"
  17#include "fs.h"
  18
  19struct kmem_cache *btrfs_delayed_ref_head_cachep;
  20struct kmem_cache *btrfs_delayed_ref_node_cachep;
 
  21struct kmem_cache *btrfs_delayed_extent_op_cachep;
  22/*
  23 * delayed back reference update tracking.  For subvolume trees
  24 * we queue up extent allocations and backref maintenance for
  25 * delayed processing.   This avoids deep call chains where we
  26 * add extents in the middle of btrfs_search_slot, and it allows
  27 * us to buffer up frequently modified backrefs in an rb tree instead
  28 * of hammering updates on the extent allocation tree.
  29 */
  30
  31bool btrfs_check_space_for_delayed_refs(struct btrfs_fs_info *fs_info)
  32{
  33	struct btrfs_block_rsv *delayed_refs_rsv = &fs_info->delayed_refs_rsv;
  34	struct btrfs_block_rsv *global_rsv = &fs_info->global_block_rsv;
  35	bool ret = false;
  36	u64 reserved;
  37
  38	spin_lock(&global_rsv->lock);
  39	reserved = global_rsv->reserved;
  40	spin_unlock(&global_rsv->lock);
  41
  42	/*
  43	 * Since the global reserve is just kind of magic we don't really want
  44	 * to rely on it to save our bacon, so if our size is more than the
  45	 * delayed_refs_rsv and the global rsv then it's time to think about
  46	 * bailing.
  47	 */
  48	spin_lock(&delayed_refs_rsv->lock);
  49	reserved += delayed_refs_rsv->reserved;
  50	if (delayed_refs_rsv->size >= reserved)
  51		ret = true;
  52	spin_unlock(&delayed_refs_rsv->lock);
  53	return ret;
  54}
  55
  56/*
  57 * Release a ref head's reservation.
  58 *
  59 * @fs_info:  the filesystem
  60 * @nr_refs:  number of delayed refs to drop
  61 * @nr_csums: number of csum items to drop
  62 *
  63 * Drops the delayed ref head's count from the delayed refs rsv and free any
  64 * excess reservation we had.
  65 */
  66void btrfs_delayed_refs_rsv_release(struct btrfs_fs_info *fs_info, int nr_refs, int nr_csums)
  67{
  68	struct btrfs_block_rsv *block_rsv = &fs_info->delayed_refs_rsv;
  69	u64 num_bytes;
  70	u64 released;
  71
  72	num_bytes = btrfs_calc_delayed_ref_bytes(fs_info, nr_refs);
  73	num_bytes += btrfs_calc_delayed_ref_csum_bytes(fs_info, nr_csums);
  74
  75	released = btrfs_block_rsv_release(fs_info, block_rsv, num_bytes, NULL);
  76	if (released)
  77		trace_btrfs_space_reservation(fs_info, "delayed_refs_rsv",
  78					      0, released, 0);
  79}
  80
  81/*
  82 * Adjust the size of the delayed refs rsv.
  83 *
  84 * This is to be called anytime we may have adjusted trans->delayed_ref_updates
  85 * or trans->delayed_ref_csum_deletions, it'll calculate the additional size and
  86 * add it to the delayed_refs_rsv.
  87 */
  88void btrfs_update_delayed_refs_rsv(struct btrfs_trans_handle *trans)
  89{
  90	struct btrfs_fs_info *fs_info = trans->fs_info;
  91	struct btrfs_block_rsv *delayed_rsv = &fs_info->delayed_refs_rsv;
  92	struct btrfs_block_rsv *local_rsv = &trans->delayed_rsv;
  93	u64 num_bytes;
  94	u64 reserved_bytes;
  95
  96	num_bytes = btrfs_calc_delayed_ref_bytes(fs_info, trans->delayed_ref_updates);
  97	num_bytes += btrfs_calc_delayed_ref_csum_bytes(fs_info,
  98						       trans->delayed_ref_csum_deletions);
  99
 100	if (num_bytes == 0)
 101		return;
 102
 103	/*
 104	 * Try to take num_bytes from the transaction's local delayed reserve.
 105	 * If not possible, try to take as much as it's available. If the local
 106	 * reserve doesn't have enough reserved space, the delayed refs reserve
 107	 * will be refilled next time btrfs_delayed_refs_rsv_refill() is called
 108	 * by someone or if a transaction commit is triggered before that, the
 109	 * global block reserve will be used. We want to minimize using the
 110	 * global block reserve for cases we can account for in advance, to
 111	 * avoid exhausting it and reach -ENOSPC during a transaction commit.
 112	 */
 113	spin_lock(&local_rsv->lock);
 114	reserved_bytes = min(num_bytes, local_rsv->reserved);
 115	local_rsv->reserved -= reserved_bytes;
 116	local_rsv->full = (local_rsv->reserved >= local_rsv->size);
 117	spin_unlock(&local_rsv->lock);
 118
 119	spin_lock(&delayed_rsv->lock);
 120	delayed_rsv->size += num_bytes;
 121	delayed_rsv->reserved += reserved_bytes;
 122	delayed_rsv->full = (delayed_rsv->reserved >= delayed_rsv->size);
 123	spin_unlock(&delayed_rsv->lock);
 124	trans->delayed_ref_updates = 0;
 125	trans->delayed_ref_csum_deletions = 0;
 126}
 127
 128/*
 129 * Adjust the size of the delayed refs block reserve for 1 block group item
 130 * insertion, used after allocating a block group.
 131 */
 132void btrfs_inc_delayed_refs_rsv_bg_inserts(struct btrfs_fs_info *fs_info)
 133{
 134	struct btrfs_block_rsv *delayed_rsv = &fs_info->delayed_refs_rsv;
 135
 136	spin_lock(&delayed_rsv->lock);
 137	/*
 138	 * Inserting a block group item does not require changing the free space
 139	 * tree, only the extent tree or the block group tree, so this is all we
 140	 * need.
 141	 */
 142	delayed_rsv->size += btrfs_calc_insert_metadata_size(fs_info, 1);
 143	delayed_rsv->full = false;
 144	spin_unlock(&delayed_rsv->lock);
 145}
 146
 147/*
 148 * Adjust the size of the delayed refs block reserve to release space for 1
 149 * block group item insertion.
 150 */
 151void btrfs_dec_delayed_refs_rsv_bg_inserts(struct btrfs_fs_info *fs_info)
 152{
 153	struct btrfs_block_rsv *delayed_rsv = &fs_info->delayed_refs_rsv;
 154	const u64 num_bytes = btrfs_calc_insert_metadata_size(fs_info, 1);
 155	u64 released;
 156
 157	released = btrfs_block_rsv_release(fs_info, delayed_rsv, num_bytes, NULL);
 158	if (released > 0)
 159		trace_btrfs_space_reservation(fs_info, "delayed_refs_rsv",
 160					      0, released, 0);
 161}
 162
 163/*
 164 * Adjust the size of the delayed refs block reserve for 1 block group item
 165 * update.
 166 */
 167void btrfs_inc_delayed_refs_rsv_bg_updates(struct btrfs_fs_info *fs_info)
 168{
 169	struct btrfs_block_rsv *delayed_rsv = &fs_info->delayed_refs_rsv;
 170
 171	spin_lock(&delayed_rsv->lock);
 172	/*
 173	 * Updating a block group item does not result in new nodes/leaves and
 174	 * does not require changing the free space tree, only the extent tree
 175	 * or the block group tree, so this is all we need.
 176	 */
 177	delayed_rsv->size += btrfs_calc_metadata_size(fs_info, 1);
 178	delayed_rsv->full = false;
 179	spin_unlock(&delayed_rsv->lock);
 180}
 181
 182/*
 183 * Adjust the size of the delayed refs block reserve to release space for 1
 184 * block group item update.
 185 */
 186void btrfs_dec_delayed_refs_rsv_bg_updates(struct btrfs_fs_info *fs_info)
 187{
 188	struct btrfs_block_rsv *delayed_rsv = &fs_info->delayed_refs_rsv;
 189	const u64 num_bytes = btrfs_calc_metadata_size(fs_info, 1);
 190	u64 released;
 191
 192	released = btrfs_block_rsv_release(fs_info, delayed_rsv, num_bytes, NULL);
 193	if (released > 0)
 194		trace_btrfs_space_reservation(fs_info, "delayed_refs_rsv",
 195					      0, released, 0);
 196}
 197
 198/*
 199 * Refill based on our delayed refs usage.
 200 *
 201 * @fs_info: the filesystem
 202 * @flush:   control how we can flush for this reservation.
 203 *
 204 * This will refill the delayed block_rsv up to 1 items size worth of space and
 205 * will return -ENOSPC if we can't make the reservation.
 206 */
 207int btrfs_delayed_refs_rsv_refill(struct btrfs_fs_info *fs_info,
 208				  enum btrfs_reserve_flush_enum flush)
 209{
 210	struct btrfs_block_rsv *block_rsv = &fs_info->delayed_refs_rsv;
 211	struct btrfs_space_info *space_info = block_rsv->space_info;
 212	u64 limit = btrfs_calc_delayed_ref_bytes(fs_info, 1);
 213	u64 num_bytes = 0;
 214	u64 refilled_bytes;
 215	u64 to_free;
 216	int ret = -ENOSPC;
 217
 218	spin_lock(&block_rsv->lock);
 219	if (block_rsv->reserved < block_rsv->size) {
 220		num_bytes = block_rsv->size - block_rsv->reserved;
 221		num_bytes = min(num_bytes, limit);
 222	}
 223	spin_unlock(&block_rsv->lock);
 224
 225	if (!num_bytes)
 226		return 0;
 227
 228	ret = btrfs_reserve_metadata_bytes(fs_info, space_info, num_bytes, flush);
 229	if (ret)
 230		return ret;
 231
 232	/*
 233	 * We may have raced with someone else, so check again if we the block
 234	 * reserve is still not full and release any excess space.
 235	 */
 236	spin_lock(&block_rsv->lock);
 237	if (block_rsv->reserved < block_rsv->size) {
 238		u64 needed = block_rsv->size - block_rsv->reserved;
 239
 240		if (num_bytes >= needed) {
 241			block_rsv->reserved += needed;
 242			block_rsv->full = true;
 243			to_free = num_bytes - needed;
 244			refilled_bytes = needed;
 245		} else {
 246			block_rsv->reserved += num_bytes;
 247			to_free = 0;
 248			refilled_bytes = num_bytes;
 249		}
 250	} else {
 251		to_free = num_bytes;
 252		refilled_bytes = 0;
 
 
 253	}
 254	spin_unlock(&block_rsv->lock);
 255
 256	if (to_free > 0)
 257		btrfs_space_info_free_bytes_may_use(fs_info, space_info, to_free);
 258
 259	if (refilled_bytes > 0)
 260		trace_btrfs_space_reservation(fs_info, "delayed_refs_rsv", 0,
 261					      refilled_bytes, 1);
 262	return 0;
 263}
 264
 265/*
 266 * compare two delayed data backrefs with same bytenr and type
 267 */
 268static int comp_data_refs(struct btrfs_delayed_ref_node *ref1,
 269			  struct btrfs_delayed_ref_node *ref2)
 270{
 271	if (ref1->data_ref.objectid < ref2->data_ref.objectid)
 272		return -1;
 273	if (ref1->data_ref.objectid > ref2->data_ref.objectid)
 274		return 1;
 275	if (ref1->data_ref.offset < ref2->data_ref.offset)
 276		return -1;
 277	if (ref1->data_ref.offset > ref2->data_ref.offset)
 278		return 1;
 279	return 0;
 280}
 281
 282static int comp_refs(struct btrfs_delayed_ref_node *ref1,
 283		     struct btrfs_delayed_ref_node *ref2,
 284		     bool check_seq)
 285{
 286	int ret = 0;
 287
 288	if (ref1->type < ref2->type)
 289		return -1;
 290	if (ref1->type > ref2->type)
 291		return 1;
 292	if (ref1->type == BTRFS_SHARED_BLOCK_REF_KEY ||
 293	    ref1->type == BTRFS_SHARED_DATA_REF_KEY) {
 294		if (ref1->parent < ref2->parent)
 295			return -1;
 296		if (ref1->parent > ref2->parent)
 297			return 1;
 298	} else {
 299		if (ref1->ref_root < ref2->ref_root)
 300			return -1;
 301		if (ref1->ref_root > ref2->ref_root)
 302			return 1;
 303		if (ref1->type == BTRFS_EXTENT_DATA_REF_KEY)
 304			ret = comp_data_refs(ref1, ref2);
 305	}
 306	if (ret)
 307		return ret;
 308	if (check_seq) {
 309		if (ref1->seq < ref2->seq)
 310			return -1;
 311		if (ref1->seq > ref2->seq)
 312			return 1;
 313	}
 314	return 0;
 315}
 316
 317static struct btrfs_delayed_ref_node* tree_insert(struct rb_root_cached *root,
 318		struct btrfs_delayed_ref_node *ins)
 
 319{
 320	struct rb_node **p = &root->rb_root.rb_node;
 321	struct rb_node *node = &ins->ref_node;
 322	struct rb_node *parent_node = NULL;
 323	struct btrfs_delayed_ref_node *entry;
 324	bool leftmost = true;
 
 325
 
 
 326	while (*p) {
 327		int comp;
 328
 329		parent_node = *p;
 330		entry = rb_entry(parent_node, struct btrfs_delayed_ref_node,
 331				 ref_node);
 332		comp = comp_refs(ins, entry, true);
 333		if (comp < 0) {
 334			p = &(*p)->rb_left;
 335		} else if (comp > 0) {
 336			p = &(*p)->rb_right;
 337			leftmost = false;
 338		} else {
 339			return entry;
 340		}
 341	}
 342
 343	rb_link_node(node, parent_node, p);
 344	rb_insert_color_cached(node, root, leftmost);
 345	return NULL;
 346}
 347
 348static struct btrfs_delayed_ref_head *find_first_ref_head(
 349		struct btrfs_delayed_ref_root *dr)
 350{
 351	unsigned long from = 0;
 352
 353	lockdep_assert_held(&dr->lock);
 354
 355	return xa_find(&dr->head_refs, &from, ULONG_MAX, XA_PRESENT);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 356}
 357
 358static bool btrfs_delayed_ref_lock(struct btrfs_delayed_ref_root *delayed_refs,
 359				   struct btrfs_delayed_ref_head *head)
 360{
 361	lockdep_assert_held(&delayed_refs->lock);
 
 
 
 362	if (mutex_trylock(&head->mutex))
 363		return true;
 364
 365	refcount_inc(&head->refs);
 366	spin_unlock(&delayed_refs->lock);
 367
 368	mutex_lock(&head->mutex);
 369	spin_lock(&delayed_refs->lock);
 370	if (!head->tracked) {
 371		mutex_unlock(&head->mutex);
 372		btrfs_put_delayed_ref_head(head);
 373		return false;
 374	}
 375	btrfs_put_delayed_ref_head(head);
 376	return true;
 377}
 378
 379static inline void drop_delayed_ref(struct btrfs_fs_info *fs_info,
 380				    struct btrfs_delayed_ref_root *delayed_refs,
 381				    struct btrfs_delayed_ref_head *head,
 382				    struct btrfs_delayed_ref_node *ref)
 383{
 384	lockdep_assert_held(&head->lock);
 385	rb_erase_cached(&ref->ref_node, &head->ref_tree);
 386	RB_CLEAR_NODE(&ref->ref_node);
 387	if (!list_empty(&ref->add_list))
 388		list_del(&ref->add_list);
 
 
 
 389	btrfs_put_delayed_ref(ref);
 390	btrfs_delayed_refs_rsv_release(fs_info, 1, 0);
 
 
 391}
 392
 393static bool merge_ref(struct btrfs_fs_info *fs_info,
 394		      struct btrfs_delayed_ref_root *delayed_refs,
 395		      struct btrfs_delayed_ref_head *head,
 396		      struct btrfs_delayed_ref_node *ref,
 397		      u64 seq)
 398{
 399	struct btrfs_delayed_ref_node *next;
 400	struct rb_node *node = rb_next(&ref->ref_node);
 401	bool done = false;
 402
 403	while (!done && node) {
 
 
 404		int mod;
 
 
 
 
 
 
 405
 406		next = rb_entry(node, struct btrfs_delayed_ref_node, ref_node);
 407		node = rb_next(node);
 408		if (seq && next->seq >= seq)
 409			break;
 410		if (comp_refs(ref, next, false))
 411			break;
 
 
 
 
 
 
 
 
 
 
 
 
 
 412
 413		if (ref->action == next->action) {
 414			mod = next->ref_mod;
 415		} else {
 416			if (ref->ref_mod < next->ref_mod) {
 417				swap(ref, next);
 418				done = true;
 419			}
 420			mod = -next->ref_mod;
 421		}
 422
 423		drop_delayed_ref(fs_info, delayed_refs, head, next);
 424		ref->ref_mod += mod;
 425		if (ref->ref_mod == 0) {
 426			drop_delayed_ref(fs_info, delayed_refs, head, ref);
 427			done = true;
 428		} else {
 429			/*
 430			 * Can't have multiples of the same ref on a tree block.
 431			 */
 432			WARN_ON(ref->type == BTRFS_TREE_BLOCK_REF_KEY ||
 433				ref->type == BTRFS_SHARED_BLOCK_REF_KEY);
 434		}
 
 
 435	}
 436
 437	return done;
 438}
 439
 440void btrfs_merge_delayed_refs(struct btrfs_fs_info *fs_info,
 
 441			      struct btrfs_delayed_ref_root *delayed_refs,
 442			      struct btrfs_delayed_ref_head *head)
 443{
 444	struct btrfs_delayed_ref_node *ref;
 445	struct rb_node *node;
 446	u64 seq = 0;
 447
 448	lockdep_assert_held(&head->lock);
 449
 450	if (RB_EMPTY_ROOT(&head->ref_tree.rb_root))
 451		return;
 452
 453	/* We don't have too many refs to merge for data. */
 454	if (head->is_data)
 455		return;
 456
 457	seq = btrfs_tree_mod_log_lowest_seq(fs_info);
 458again:
 459	for (node = rb_first_cached(&head->ref_tree); node;
 460	     node = rb_next(node)) {
 461		ref = rb_entry(node, struct btrfs_delayed_ref_node, ref_node);
 
 
 
 
 
 
 
 
 462		if (seq && ref->seq >= seq)
 
 
 
 
 
 
 
 
 463			continue;
 464		if (merge_ref(fs_info, delayed_refs, head, ref, seq))
 465			goto again;
 
 466	}
 467}
 468
 469int btrfs_check_delayed_seq(struct btrfs_fs_info *fs_info, u64 seq)
 
 
 470{
 
 471	int ret = 0;
 472	u64 min_seq = btrfs_tree_mod_log_lowest_seq(fs_info);
 473
 474	if (min_seq != 0 && seq >= min_seq) {
 475		btrfs_debug(fs_info,
 476			    "holding back delayed_ref %llu, lowest is %llu",
 477			    seq, min_seq);
 478		ret = 1;
 
 
 
 
 
 
 479	}
 480
 
 481	return ret;
 482}
 483
 484struct btrfs_delayed_ref_head *btrfs_select_ref_head(
 485		const struct btrfs_fs_info *fs_info,
 486		struct btrfs_delayed_ref_root *delayed_refs)
 487{
 
 488	struct btrfs_delayed_ref_head *head;
 489	unsigned long start_index;
 490	unsigned long found_index;
 491	bool found_head = false;
 492	bool locked;
 493
 494	spin_lock(&delayed_refs->lock);
 495again:
 496	start_index = (delayed_refs->run_delayed_start >> fs_info->sectorsize_bits);
 497	xa_for_each_start(&delayed_refs->head_refs, found_index, head, start_index) {
 498		if (!head->processing) {
 499			found_head = true;
 500			break;
 501		}
 502	}
 503	if (!found_head) {
 504		if (delayed_refs->run_delayed_start == 0) {
 505			spin_unlock(&delayed_refs->lock);
 506			return NULL;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 507		}
 508		delayed_refs->run_delayed_start = 0;
 509		goto again;
 510	}
 511
 512	head->processing = true;
 513	WARN_ON(delayed_refs->num_heads_ready == 0);
 514	delayed_refs->num_heads_ready--;
 515	delayed_refs->run_delayed_start = head->bytenr +
 516		head->num_bytes;
 517
 518	locked = btrfs_delayed_ref_lock(delayed_refs, head);
 519	spin_unlock(&delayed_refs->lock);
 520
 521	/*
 522	 * We may have dropped the spin lock to get the head mutex lock, and
 523	 * that might have given someone else time to free the head.  If that's
 524	 * true, it has been removed from our list and we can move on.
 525	 */
 526	if (!locked)
 527		return ERR_PTR(-EAGAIN);
 528
 529	return head;
 530}
 531
 532void btrfs_unselect_ref_head(struct btrfs_delayed_ref_root *delayed_refs,
 533			     struct btrfs_delayed_ref_head *head)
 534{
 535	spin_lock(&delayed_refs->lock);
 536	head->processing = false;
 537	delayed_refs->num_heads_ready++;
 538	spin_unlock(&delayed_refs->lock);
 539	btrfs_delayed_ref_unlock(head);
 540}
 541
 542void btrfs_delete_ref_head(const struct btrfs_fs_info *fs_info,
 543			   struct btrfs_delayed_ref_root *delayed_refs,
 544			   struct btrfs_delayed_ref_head *head)
 545{
 546	const unsigned long index = (head->bytenr >> fs_info->sectorsize_bits);
 547
 548	lockdep_assert_held(&delayed_refs->lock);
 549	lockdep_assert_held(&head->lock);
 550
 551	xa_erase(&delayed_refs->head_refs, index);
 552	head->tracked = false;
 553	delayed_refs->num_heads--;
 554	if (!head->processing)
 555		delayed_refs->num_heads_ready--;
 556}
 557
 558/*
 559 * Helper to insert the ref_node to the tail or merge with tail.
 560 *
 561 * Return false if the ref was inserted.
 562 * Return true if the ref was merged into an existing one (and therefore can be
 563 * freed by the caller).
 564 */
 565static bool insert_delayed_ref(struct btrfs_trans_handle *trans,
 566			       struct btrfs_delayed_ref_head *href,
 567			       struct btrfs_delayed_ref_node *ref)
 
 
 568{
 569	struct btrfs_delayed_ref_root *root = &trans->transaction->delayed_refs;
 570	struct btrfs_delayed_ref_node *exist;
 571	int mod;
 
 572
 573	spin_lock(&href->lock);
 574	exist = tree_insert(&href->ref_tree, ref);
 575	if (!exist) {
 576		if (ref->action == BTRFS_ADD_DELAYED_REF)
 577			list_add_tail(&ref->add_list, &href->ref_add_list);
 578		spin_unlock(&href->lock);
 579		trans->delayed_ref_updates++;
 580		return false;
 581	}
 
 
 
 
 
 
 
 
 
 
 
 
 582
 583	/* Now we are sure we can merge */
 
 584	if (exist->action == ref->action) {
 585		mod = ref->ref_mod;
 586	} else {
 587		/* Need to change action */
 588		if (exist->ref_mod < ref->ref_mod) {
 589			exist->action = ref->action;
 590			mod = -exist->ref_mod;
 591			exist->ref_mod = ref->ref_mod;
 592			if (ref->action == BTRFS_ADD_DELAYED_REF)
 593				list_add_tail(&exist->add_list,
 594					      &href->ref_add_list);
 595			else if (ref->action == BTRFS_DROP_DELAYED_REF) {
 596				ASSERT(!list_empty(&exist->add_list));
 597				list_del_init(&exist->add_list);
 598			} else {
 599				ASSERT(0);
 600			}
 601		} else
 602			mod = -ref->ref_mod;
 603	}
 604	exist->ref_mod += mod;
 605
 606	/* remove existing tail if its ref_mod is zero */
 607	if (exist->ref_mod == 0)
 608		drop_delayed_ref(trans->fs_info, root, href, exist);
 
 
 
 
 
 
 
 609	spin_unlock(&href->lock);
 610	return true;
 611}
 612
 613/*
 614 * helper function to update the accounting in the head ref
 615 * existing and update must have the same bytenr
 616 */
 617static noinline void update_existing_head_ref(struct btrfs_trans_handle *trans,
 618			 struct btrfs_delayed_ref_head *existing,
 619			 struct btrfs_delayed_ref_head *update)
 620{
 621	struct btrfs_delayed_ref_root *delayed_refs =
 622		&trans->transaction->delayed_refs;
 623	struct btrfs_fs_info *fs_info = trans->fs_info;
 624	int old_ref_mod;
 625
 626	BUG_ON(existing->is_data != update->is_data);
 
 
 627
 628	spin_lock(&existing->lock);
 629
 630	/*
 631	 * When freeing an extent, we may not know the owning root when we
 632	 * first create the head_ref. However, some deref before the last deref
 633	 * will know it, so we just need to update the head_ref accordingly.
 634	 */
 635	if (!existing->owning_root)
 636		existing->owning_root = update->owning_root;
 637
 638	if (update->must_insert_reserved) {
 639		/* if the extent was freed and then
 640		 * reallocated before the delayed ref
 641		 * entries were processed, we can end up
 642		 * with an existing head ref without
 643		 * the must_insert_reserved flag set.
 644		 * Set it again here
 645		 */
 646		existing->must_insert_reserved = update->must_insert_reserved;
 647		existing->owning_root = update->owning_root;
 648
 649		/*
 650		 * update the num_bytes so we make sure the accounting
 651		 * is done correctly
 652		 */
 653		existing->num_bytes = update->num_bytes;
 654
 655	}
 656
 657	if (update->extent_op) {
 658		if (!existing->extent_op) {
 659			existing->extent_op = update->extent_op;
 660		} else {
 661			if (update->extent_op->update_key) {
 662				memcpy(&existing->extent_op->key,
 663				       &update->extent_op->key,
 664				       sizeof(update->extent_op->key));
 665				existing->extent_op->update_key = true;
 666			}
 667			if (update->extent_op->update_flags) {
 668				existing->extent_op->flags_to_set |=
 669					update->extent_op->flags_to_set;
 670				existing->extent_op->update_flags = true;
 671			}
 672			btrfs_free_delayed_extent_op(update->extent_op);
 673		}
 674	}
 675	/*
 676	 * update the reference mod on the head to reflect this new operation,
 677	 * only need the lock for this case cause we could be processing it
 678	 * currently, for refs we just added we know we're a-ok.
 679	 */
 680	old_ref_mod = existing->total_ref_mod;
 681	existing->ref_mod += update->ref_mod;
 682	existing->total_ref_mod += update->ref_mod;
 683
 684	/*
 685	 * If we are going to from a positive ref mod to a negative or vice
 686	 * versa we need to make sure to adjust pending_csums accordingly.
 687	 * We reserve bytes for csum deletion when adding or updating a ref head
 688	 * see add_delayed_ref_head() for more details.
 689	 */
 690	if (existing->is_data) {
 691		u64 csum_leaves =
 692			btrfs_csum_bytes_to_leaves(fs_info,
 693						   existing->num_bytes);
 694
 695		if (existing->total_ref_mod >= 0 && old_ref_mod < 0) {
 696			delayed_refs->pending_csums -= existing->num_bytes;
 697			btrfs_delayed_refs_rsv_release(fs_info, 0, csum_leaves);
 698		}
 699		if (existing->total_ref_mod < 0 && old_ref_mod >= 0) {
 700			delayed_refs->pending_csums += existing->num_bytes;
 701			trans->delayed_ref_csum_deletions += csum_leaves;
 702		}
 703	}
 704
 705	spin_unlock(&existing->lock);
 706}
 707
 708static void init_delayed_ref_head(struct btrfs_delayed_ref_head *head_ref,
 709				  struct btrfs_ref *generic_ref,
 710				  struct btrfs_qgroup_extent_record *qrecord,
 711				  u64 reserved)
 712{
 713	int count_mod = 1;
 714	bool must_insert_reserved = false;
 715
 716	/* If reserved is provided, it must be a data extent. */
 717	BUG_ON(generic_ref->type != BTRFS_REF_DATA && reserved);
 718
 719	switch (generic_ref->action) {
 720	case BTRFS_ADD_DELAYED_REF:
 721		/* count_mod is already set to 1. */
 722		break;
 723	case BTRFS_UPDATE_DELAYED_HEAD:
 724		count_mod = 0;
 725		break;
 726	case BTRFS_DROP_DELAYED_REF:
 727		/*
 728		 * The head node stores the sum of all the mods, so dropping a ref
 729		 * should drop the sum in the head node by one.
 730		 */
 731		count_mod = -1;
 732		break;
 733	case BTRFS_ADD_DELAYED_EXTENT:
 734		/*
 735		 * BTRFS_ADD_DELAYED_EXTENT means that we need to update the
 736		 * reserved accounting when the extent is finally added, or if a
 737		 * later modification deletes the delayed ref without ever
 738		 * inserting the extent into the extent allocation tree.
 739		 * ref->must_insert_reserved is the flag used to record that
 740		 * accounting mods are required.
 741		 *
 742		 * Once we record must_insert_reserved, switch the action to
 743		 * BTRFS_ADD_DELAYED_REF because other special casing is not
 744		 * required.
 745		 */
 746		must_insert_reserved = true;
 747		break;
 748	}
 749
 750	refcount_set(&head_ref->refs, 1);
 751	head_ref->bytenr = generic_ref->bytenr;
 752	head_ref->num_bytes = generic_ref->num_bytes;
 753	head_ref->ref_mod = count_mod;
 754	head_ref->reserved_bytes = reserved;
 755	head_ref->must_insert_reserved = must_insert_reserved;
 756	head_ref->owning_root = generic_ref->owning_root;
 757	head_ref->is_data = (generic_ref->type == BTRFS_REF_DATA);
 758	head_ref->is_system = (generic_ref->ref_root == BTRFS_CHUNK_TREE_OBJECTID);
 759	head_ref->ref_tree = RB_ROOT_CACHED;
 760	INIT_LIST_HEAD(&head_ref->ref_add_list);
 761	head_ref->tracked = false;
 762	head_ref->processing = false;
 763	head_ref->total_ref_mod = count_mod;
 764	spin_lock_init(&head_ref->lock);
 765	mutex_init(&head_ref->mutex);
 766
 767	/* If not metadata set an impossible level to help debugging. */
 768	if (generic_ref->type == BTRFS_REF_METADATA)
 769		head_ref->level = generic_ref->tree_ref.level;
 770	else
 771		head_ref->level = U8_MAX;
 772
 773	if (qrecord) {
 774		if (generic_ref->ref_root && reserved) {
 775			qrecord->data_rsv = reserved;
 776			qrecord->data_rsv_refroot = generic_ref->ref_root;
 777		}
 778		qrecord->num_bytes = generic_ref->num_bytes;
 779		qrecord->old_roots = NULL;
 780	}
 
 781}
 782
 783/*
 784 * helper function to actually insert a head node into the rbtree.
 785 * this does all the dirty work in terms of maintaining the correct
 786 * overall modification count.
 787 *
 788 * Returns an error pointer in case of an error.
 789 */
 790static noinline struct btrfs_delayed_ref_head *
 791add_delayed_ref_head(struct btrfs_trans_handle *trans,
 792		     struct btrfs_delayed_ref_head *head_ref,
 
 793		     struct btrfs_qgroup_extent_record *qrecord,
 794		     int action, bool *qrecord_inserted_ret)
 
 795{
 796	struct btrfs_fs_info *fs_info = trans->fs_info;
 797	struct btrfs_delayed_ref_head *existing;
 
 798	struct btrfs_delayed_ref_root *delayed_refs;
 799	const unsigned long index = (head_ref->bytenr >> fs_info->sectorsize_bits);
 800	bool qrecord_inserted = false;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 801
 802	delayed_refs = &trans->transaction->delayed_refs;
 803	lockdep_assert_held(&delayed_refs->lock);
 804
 805#if BITS_PER_LONG == 32
 806	if (head_ref->bytenr >= MAX_LFS_FILESIZE) {
 807		if (qrecord)
 808			xa_release(&delayed_refs->dirty_extents, index);
 809		btrfs_err_rl(fs_info,
 810"delayed ref head %llu is beyond 32bit page cache and xarray index limit",
 811			     head_ref->bytenr);
 812		btrfs_err_32bit_limit(fs_info);
 813		return ERR_PTR(-EOVERFLOW);
 814	}
 815#endif
 
 
 
 
 
 
 
 
 816
 817	/* Record qgroup extent info if provided */
 818	if (qrecord) {
 819		int ret;
 
 
 
 
 
 
 
 820
 821		ret = btrfs_qgroup_trace_extent_nolock(fs_info, delayed_refs, qrecord,
 822						       head_ref->bytenr);
 823		if (ret) {
 824			/* Clean up if insertion fails or item exists. */
 825			xa_release(&delayed_refs->dirty_extents, index);
 826			/* Caller responsible for freeing qrecord on error. */
 827			if (ret < 0)
 828				return ERR_PTR(ret);
 829			kfree(qrecord);
 830		} else {
 831			qrecord_inserted = true;
 832		}
 833	}
 834
 835	trace_add_delayed_ref_head(fs_info, head_ref, action);
 
 
 
 836
 837	existing = xa_load(&delayed_refs->head_refs, index);
 
 838	if (existing) {
 839		update_existing_head_ref(trans, existing, head_ref);
 
 
 840		/*
 841		 * we've updated the existing ref, free the newly
 842		 * allocated ref
 843		 */
 844		kmem_cache_free(btrfs_delayed_ref_head_cachep, head_ref);
 845		head_ref = existing;
 846	} else {
 847		existing = xa_store(&delayed_refs->head_refs, index, head_ref, GFP_ATOMIC);
 848		if (xa_is_err(existing)) {
 849			/* Memory was preallocated by the caller. */
 850			ASSERT(xa_err(existing) != -ENOMEM);
 851			return ERR_PTR(xa_err(existing));
 852		} else if (WARN_ON(existing)) {
 853			/*
 854			 * Shouldn't happen we just did a lookup before under
 855			 * delayed_refs->lock.
 856			 */
 857			return ERR_PTR(-EEXIST);
 858		}
 859		head_ref->tracked = true;
 860		/*
 861		 * We reserve the amount of bytes needed to delete csums when
 862		 * adding the ref head and not when adding individual drop refs
 863		 * since the csum items are deleted only after running the last
 864		 * delayed drop ref (the data extent's ref count drops to 0).
 865		 */
 866		if (head_ref->is_data && head_ref->ref_mod < 0) {
 867			delayed_refs->pending_csums += head_ref->num_bytes;
 868			trans->delayed_ref_csum_deletions +=
 869				btrfs_csum_bytes_to_leaves(fs_info, head_ref->num_bytes);
 870		}
 871		delayed_refs->num_heads++;
 872		delayed_refs->num_heads_ready++;
 
 
 873	}
 874	if (qrecord_inserted_ret)
 875		*qrecord_inserted_ret = qrecord_inserted;
 876
 877	return head_ref;
 878}
 879
 880/*
 881 * Initialize the structure which represents a modification to a an extent.
 882 *
 883 * @fs_info:    Internal to the mounted filesystem mount structure.
 884 *
 885 * @ref:	The structure which is going to be initialized.
 886 *
 887 * @bytenr:	The logical address of the extent for which a modification is
 888 *		going to be recorded.
 889 *
 890 * @num_bytes:  Size of the extent whose modification is being recorded.
 891 *
 892 * @ref_root:	The id of the root where this modification has originated, this
 893 *		can be either one of the well-known metadata trees or the
 894 *		subvolume id which references this extent.
 895 *
 896 * @action:	Can be one of BTRFS_ADD_DELAYED_REF/BTRFS_DROP_DELAYED_REF or
 897 *		BTRFS_ADD_DELAYED_EXTENT
 898 *
 899 * @ref_type:	Holds the type of the extent which is being recorded, can be
 900 *		one of BTRFS_SHARED_BLOCK_REF_KEY/BTRFS_TREE_BLOCK_REF_KEY
 901 *		when recording a metadata extent or BTRFS_SHARED_DATA_REF_KEY/
 902 *		BTRFS_EXTENT_DATA_REF_KEY when recording data extent
 903 */
 904static void init_delayed_ref_common(struct btrfs_fs_info *fs_info,
 905				    struct btrfs_delayed_ref_node *ref,
 906				    struct btrfs_ref *generic_ref)
 
 
 
 
 907{
 908	int action = generic_ref->action;
 
 909	u64 seq = 0;
 
 910
 911	if (action == BTRFS_ADD_DELAYED_EXTENT)
 912		action = BTRFS_ADD_DELAYED_REF;
 913
 914	if (is_fstree(generic_ref->ref_root))
 915		seq = atomic64_read(&fs_info->tree_mod_seq);
 
 916
 917	refcount_set(&ref->refs, 1);
 918	ref->bytenr = generic_ref->bytenr;
 919	ref->num_bytes = generic_ref->num_bytes;
 
 920	ref->ref_mod = 1;
 921	ref->action = action;
 
 
 922	ref->seq = seq;
 923	ref->type = btrfs_ref_type(generic_ref);
 924	ref->ref_root = generic_ref->ref_root;
 925	ref->parent = generic_ref->parent;
 926	RB_CLEAR_NODE(&ref->ref_node);
 927	INIT_LIST_HEAD(&ref->add_list);
 928
 929	if (generic_ref->type == BTRFS_REF_DATA)
 930		ref->data_ref = generic_ref->data_ref;
 
 
 
 931	else
 932		ref->tree_ref = generic_ref->tree_ref;
 933}
 934
 935void btrfs_init_tree_ref(struct btrfs_ref *generic_ref, int level, u64 mod_root,
 936			 bool skip_qgroup)
 937{
 938#ifdef CONFIG_BTRFS_FS_REF_VERIFY
 939	/* If @real_root not set, use @root as fallback */
 940	generic_ref->real_root = mod_root ?: generic_ref->ref_root;
 941#endif
 942	generic_ref->tree_ref.level = level;
 943	generic_ref->type = BTRFS_REF_METADATA;
 944	if (skip_qgroup || !(is_fstree(generic_ref->ref_root) &&
 945			     (!mod_root || is_fstree(mod_root))))
 946		generic_ref->skip_qgroup = true;
 947	else
 948		generic_ref->skip_qgroup = false;
 949
 
 
 
 
 
 
 950}
 951
 952void btrfs_init_data_ref(struct btrfs_ref *generic_ref, u64 ino, u64 offset,
 953			 u64 mod_root, bool skip_qgroup)
 
 
 
 
 
 
 
 
 954{
 955#ifdef CONFIG_BTRFS_FS_REF_VERIFY
 956	/* If @real_root not set, use @root as fallback */
 957	generic_ref->real_root = mod_root ?: generic_ref->ref_root;
 958#endif
 959	generic_ref->data_ref.objectid = ino;
 960	generic_ref->data_ref.offset = offset;
 961	generic_ref->type = BTRFS_REF_DATA;
 962	if (skip_qgroup || !(is_fstree(generic_ref->ref_root) &&
 963			     (!mod_root || is_fstree(mod_root))))
 964		generic_ref->skip_qgroup = true;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 965	else
 966		generic_ref->skip_qgroup = false;
 
 
 
 
 
 
 
 
 
 
 967}
 968
 969static int add_delayed_ref(struct btrfs_trans_handle *trans,
 970			   struct btrfs_ref *generic_ref,
 971			   struct btrfs_delayed_extent_op *extent_op,
 972			   u64 reserved)
 
 
 
 
 
 
 973{
 974	struct btrfs_fs_info *fs_info = trans->fs_info;
 975	struct btrfs_delayed_ref_node *node;
 976	struct btrfs_delayed_ref_head *head_ref;
 977	struct btrfs_delayed_ref_head *new_head_ref;
 978	struct btrfs_delayed_ref_root *delayed_refs;
 979	struct btrfs_qgroup_extent_record *record = NULL;
 980	const unsigned long index = (generic_ref->bytenr >> fs_info->sectorsize_bits);
 981	bool qrecord_reserved = false;
 982	bool qrecord_inserted;
 983	int action = generic_ref->action;
 984	bool merged;
 985	int ret;
 986
 987	node = kmem_cache_alloc(btrfs_delayed_ref_node_cachep, GFP_NOFS);
 988	if (!node)
 
 989		return -ENOMEM;
 990
 991	head_ref = kmem_cache_alloc(btrfs_delayed_ref_head_cachep, GFP_NOFS);
 992	if (!head_ref) {
 993		ret = -ENOMEM;
 994		goto free_node;
 995	}
 996
 997	delayed_refs = &trans->transaction->delayed_refs;
 998
 999	if (btrfs_qgroup_full_accounting(fs_info) && !generic_ref->skip_qgroup) {
1000		record = kzalloc(sizeof(*record), GFP_NOFS);
1001		if (!record) {
1002			ret = -ENOMEM;
1003			goto free_head_ref;
1004		}
1005		if (xa_reserve(&delayed_refs->dirty_extents, index, GFP_NOFS)) {
1006			ret = -ENOMEM;
1007			goto free_record;
1008		}
1009		qrecord_reserved = true;
1010	}
1011
1012	ret = xa_reserve(&delayed_refs->head_refs, index, GFP_NOFS);
1013	if (ret) {
1014		if (qrecord_reserved)
1015			xa_release(&delayed_refs->dirty_extents, index);
1016		goto free_record;
1017	}
1018
1019	init_delayed_ref_common(fs_info, node, generic_ref);
1020	init_delayed_ref_head(head_ref, generic_ref, record, reserved);
1021	head_ref->extent_op = extent_op;
1022
 
1023	spin_lock(&delayed_refs->lock);
1024
1025	/*
1026	 * insert both the head node and the new ref without dropping
1027	 * the spin lock
1028	 */
1029	new_head_ref = add_delayed_ref_head(trans, head_ref, record,
1030					    action, &qrecord_inserted);
1031	if (IS_ERR(new_head_ref)) {
1032		xa_release(&delayed_refs->head_refs, index);
1033		spin_unlock(&delayed_refs->lock);
1034		ret = PTR_ERR(new_head_ref);
1035		goto free_record;
1036	}
1037	head_ref = new_head_ref;
1038
1039	merged = insert_delayed_ref(trans, head_ref, node);
 
1040	spin_unlock(&delayed_refs->lock);
1041
1042	/*
1043	 * Need to update the delayed_refs_rsv with any changes we may have
1044	 * made.
1045	 */
1046	btrfs_update_delayed_refs_rsv(trans);
1047
1048	if (generic_ref->type == BTRFS_REF_DATA)
1049		trace_add_delayed_data_ref(trans->fs_info, node);
1050	else
1051		trace_add_delayed_tree_ref(trans->fs_info, node);
1052	if (merged)
1053		kmem_cache_free(btrfs_delayed_ref_node_cachep, node);
1054
1055	if (qrecord_inserted)
1056		return btrfs_qgroup_trace_extent_post(trans, record, generic_ref->bytenr);
1057	return 0;
1058
1059free_record:
1060	kfree(record);
1061free_head_ref:
1062	kmem_cache_free(btrfs_delayed_ref_head_cachep, head_ref);
1063free_node:
1064	kmem_cache_free(btrfs_delayed_ref_node_cachep, node);
1065	return ret;
1066}
1067
1068/*
1069 * Add a delayed tree ref. This does all of the accounting required to make sure
1070 * the delayed ref is eventually processed before this transaction commits.
1071 */
1072int btrfs_add_delayed_tree_ref(struct btrfs_trans_handle *trans,
1073			       struct btrfs_ref *generic_ref,
1074			       struct btrfs_delayed_extent_op *extent_op)
1075{
1076	ASSERT(generic_ref->type == BTRFS_REF_METADATA && generic_ref->action);
1077	return add_delayed_ref(trans, generic_ref, extent_op, 0);
1078}
1079
1080/*
1081 * add a delayed data ref. it's similar to btrfs_add_delayed_tree_ref.
1082 */
1083int btrfs_add_delayed_data_ref(struct btrfs_trans_handle *trans,
1084			       struct btrfs_ref *generic_ref,
1085			       u64 reserved)
1086{
1087	ASSERT(generic_ref->type == BTRFS_REF_DATA && generic_ref->action);
1088	return add_delayed_ref(trans, generic_ref, NULL, reserved);
1089}
1090
1091int btrfs_add_delayed_extent_op(struct btrfs_trans_handle *trans,
1092				u64 bytenr, u64 num_bytes, u8 level,
1093				struct btrfs_delayed_extent_op *extent_op)
1094{
1095	const unsigned long index = (bytenr >> trans->fs_info->sectorsize_bits);
1096	struct btrfs_delayed_ref_head *head_ref;
1097	struct btrfs_delayed_ref_head *head_ref_ret;
1098	struct btrfs_delayed_ref_root *delayed_refs;
1099	struct btrfs_ref generic_ref = {
1100		.type = BTRFS_REF_METADATA,
1101		.action = BTRFS_UPDATE_DELAYED_HEAD,
1102		.bytenr = bytenr,
1103		.num_bytes = num_bytes,
1104		.tree_ref.level = level,
1105	};
1106	int ret;
1107
1108	head_ref = kmem_cache_alloc(btrfs_delayed_ref_head_cachep, GFP_NOFS);
1109	if (!head_ref)
 
1110		return -ENOMEM;
 
 
 
 
 
 
 
 
 
 
 
1111
1112	init_delayed_ref_head(head_ref, &generic_ref, NULL, 0);
1113	head_ref->extent_op = extent_op;
1114
1115	delayed_refs = &trans->transaction->delayed_refs;
1116
1117	ret = xa_reserve(&delayed_refs->head_refs, index, GFP_NOFS);
1118	if (ret) {
1119		kmem_cache_free(btrfs_delayed_ref_head_cachep, head_ref);
1120		return ret;
1121	}
1122
1123	spin_lock(&delayed_refs->lock);
1124	head_ref_ret = add_delayed_ref_head(trans, head_ref, NULL,
1125					    BTRFS_UPDATE_DELAYED_HEAD, NULL);
1126	if (IS_ERR(head_ref_ret)) {
1127		xa_release(&delayed_refs->head_refs, index);
1128		spin_unlock(&delayed_refs->lock);
1129		kmem_cache_free(btrfs_delayed_ref_head_cachep, head_ref);
1130		return PTR_ERR(head_ref_ret);
1131	}
1132	spin_unlock(&delayed_refs->lock);
1133
1134	/*
1135	 * Need to update the delayed_refs_rsv with any changes we may have
1136	 * made.
1137	 */
1138	btrfs_update_delayed_refs_rsv(trans);
 
 
 
 
 
 
 
 
1139	return 0;
1140}
1141
1142void btrfs_put_delayed_ref(struct btrfs_delayed_ref_node *ref)
 
 
1143{
1144	if (refcount_dec_and_test(&ref->refs)) {
1145		WARN_ON(!RB_EMPTY_NODE(&ref->ref_node));
1146		kmem_cache_free(btrfs_delayed_ref_node_cachep, ref);
1147	}
1148}
1149
1150/*
1151 * This does a simple search for the head node for a given extent.  Returns the
1152 * head node if found, or NULL if not.
1153 */
1154struct btrfs_delayed_ref_head *
1155btrfs_find_delayed_ref_head(const struct btrfs_fs_info *fs_info,
1156			    struct btrfs_delayed_ref_root *delayed_refs,
1157			    u64 bytenr)
1158{
1159	const unsigned long index = (bytenr >> fs_info->sectorsize_bits);
1160
1161	lockdep_assert_held(&delayed_refs->lock);
1162
1163	return xa_load(&delayed_refs->head_refs, index);
 
 
 
 
 
 
 
 
 
 
 
1164}
1165
1166static int find_comp(struct btrfs_delayed_ref_node *entry, u64 root, u64 parent)
 
 
 
1167{
1168	int type = parent ? BTRFS_SHARED_BLOCK_REF_KEY : BTRFS_TREE_BLOCK_REF_KEY;
 
1169
1170	if (type < entry->type)
1171		return -1;
1172	if (type > entry->type)
1173		return 1;
1174
1175	if (type == BTRFS_TREE_BLOCK_REF_KEY) {
1176		if (root < entry->ref_root)
1177			return -1;
1178		if (root > entry->ref_root)
1179			return 1;
1180	} else {
1181		if (parent < entry->parent)
1182			return -1;
1183		if (parent > entry->parent)
1184			return 1;
1185	}
1186	return 0;
1187}
1188
1189/*
1190 * Check to see if a given root/parent reference is attached to the head.  This
1191 * only checks for BTRFS_ADD_DELAYED_REF references that match, as that
1192 * indicates the reference exists for the given root or parent.  This is for
1193 * tree blocks only.
1194 *
1195 * @head: the head of the bytenr we're searching.
1196 * @root: the root objectid of the reference if it is a normal reference.
1197 * @parent: the parent if this is a shared backref.
1198 */
1199bool btrfs_find_delayed_tree_ref(struct btrfs_delayed_ref_head *head,
1200				 u64 root, u64 parent)
1201{
1202	struct rb_node *node;
1203	bool found = false;
1204
1205	lockdep_assert_held(&head->mutex);
 
 
1206
1207	spin_lock(&head->lock);
1208	node = head->ref_tree.rb_root.rb_node;
1209	while (node) {
1210		struct btrfs_delayed_ref_node *entry;
1211		int ret;
1212
1213		entry = rb_entry(node, struct btrfs_delayed_ref_node, ref_node);
1214		ret = find_comp(entry, root, parent);
1215		if (ret < 0) {
1216			node = node->rb_left;
1217		} else if (ret > 0) {
1218			node = node->rb_right;
1219		} else {
1220			/*
1221			 * We only want to count ADD actions, as drops mean the
1222			 * ref doesn't exist.
1223			 */
1224			if (entry->action == BTRFS_ADD_DELAYED_REF)
1225				found = true;
1226			break;
1227		}
1228	}
1229	spin_unlock(&head->lock);
1230	return found;
1231}
1232
1233void btrfs_destroy_delayed_refs(struct btrfs_transaction *trans)
 
 
 
 
 
 
1234{
1235	struct btrfs_delayed_ref_root *delayed_refs = &trans->delayed_refs;
1236	struct btrfs_fs_info *fs_info = trans->fs_info;
1237
1238	spin_lock(&delayed_refs->lock);
1239	while (true) {
1240		struct btrfs_delayed_ref_head *head;
1241		struct rb_node *n;
1242		bool pin_bytes = false;
1243
1244		head = find_first_ref_head(delayed_refs);
1245		if (!head)
1246			break;
1247
1248		if (!btrfs_delayed_ref_lock(delayed_refs, head))
1249			continue;
1250
1251		spin_lock(&head->lock);
1252		while ((n = rb_first_cached(&head->ref_tree)) != NULL) {
1253			struct btrfs_delayed_ref_node *ref;
1254
1255			ref = rb_entry(n, struct btrfs_delayed_ref_node, ref_node);
1256			drop_delayed_ref(fs_info, delayed_refs, head, ref);
1257		}
1258		if (head->must_insert_reserved)
1259			pin_bytes = true;
1260		btrfs_free_delayed_extent_op(head->extent_op);
1261		btrfs_delete_ref_head(fs_info, delayed_refs, head);
1262		spin_unlock(&head->lock);
1263		spin_unlock(&delayed_refs->lock);
1264		mutex_unlock(&head->mutex);
1265
1266		if (pin_bytes) {
1267			struct btrfs_block_group *bg;
1268
1269			bg = btrfs_lookup_block_group(fs_info, head->bytenr);
1270			if (WARN_ON_ONCE(bg == NULL)) {
1271				/*
1272				 * Unexpected and there's nothing we can do here
1273				 * because we are in a transaction abort path,
1274				 * so any errors can only be ignored or reported
1275				 * while attempting to cleanup all resources.
1276				 */
1277				btrfs_err(fs_info,
1278"block group for delayed ref at %llu was not found while destroying ref head",
1279					  head->bytenr);
1280			} else {
1281				spin_lock(&bg->space_info->lock);
1282				spin_lock(&bg->lock);
1283				bg->pinned += head->num_bytes;
1284				btrfs_space_info_update_bytes_pinned(fs_info,
1285								     bg->space_info,
1286								     head->num_bytes);
1287				bg->reserved -= head->num_bytes;
1288				bg->space_info->bytes_reserved -= head->num_bytes;
1289				spin_unlock(&bg->lock);
1290				spin_unlock(&bg->space_info->lock);
1291
1292				btrfs_put_block_group(bg);
1293			}
1294
1295			btrfs_error_unpin_extent_range(fs_info, head->bytenr,
1296				head->bytenr + head->num_bytes - 1);
1297		}
1298		btrfs_cleanup_ref_head_accounting(fs_info, delayed_refs, head);
1299		btrfs_put_delayed_ref_head(head);
1300		cond_resched();
1301		spin_lock(&delayed_refs->lock);
1302	}
1303	btrfs_qgroup_destroy_extent_records(trans);
1304
1305	spin_unlock(&delayed_refs->lock);
 
1306}
1307
1308void __cold btrfs_delayed_ref_exit(void)
1309{
1310	kmem_cache_destroy(btrfs_delayed_ref_head_cachep);
1311	kmem_cache_destroy(btrfs_delayed_ref_node_cachep);
 
1312	kmem_cache_destroy(btrfs_delayed_extent_op_cachep);
1313}
1314
1315int __init btrfs_delayed_ref_init(void)
1316{
1317	btrfs_delayed_ref_head_cachep = KMEM_CACHE(btrfs_delayed_ref_head, 0);
 
 
 
1318	if (!btrfs_delayed_ref_head_cachep)
1319		goto fail;
1320
1321	btrfs_delayed_ref_node_cachep = KMEM_CACHE(btrfs_delayed_ref_node, 0);
1322	if (!btrfs_delayed_ref_node_cachep)
 
 
 
 
 
 
 
 
 
 
1323		goto fail;
1324
1325	btrfs_delayed_extent_op_cachep = KMEM_CACHE(btrfs_delayed_extent_op, 0);
 
 
 
1326	if (!btrfs_delayed_extent_op_cachep)
1327		goto fail;
1328
1329	return 0;
1330fail:
1331	btrfs_delayed_ref_exit();
1332	return -ENOMEM;
1333}
v4.6
 
  1/*
  2 * Copyright (C) 2009 Oracle.  All rights reserved.
  3 *
  4 * This program is free software; you can redistribute it and/or
  5 * modify it under the terms of the GNU General Public
  6 * License v2 as published by the Free Software Foundation.
  7 *
  8 * This program is distributed in the hope that it will be useful,
  9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
 10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
 11 * General Public License for more details.
 12 *
 13 * You should have received a copy of the GNU General Public
 14 * License along with this program; if not, write to the
 15 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
 16 * Boston, MA 021110-1307, USA.
 17 */
 18
 19#include <linux/sched.h>
 20#include <linux/slab.h>
 21#include <linux/sort.h>
 
 22#include "ctree.h"
 23#include "delayed-ref.h"
 
 24#include "transaction.h"
 25#include "qgroup.h"
 
 
 
 26
 27struct kmem_cache *btrfs_delayed_ref_head_cachep;
 28struct kmem_cache *btrfs_delayed_tree_ref_cachep;
 29struct kmem_cache *btrfs_delayed_data_ref_cachep;
 30struct kmem_cache *btrfs_delayed_extent_op_cachep;
 31/*
 32 * delayed back reference update tracking.  For subvolume trees
 33 * we queue up extent allocations and backref maintenance for
 34 * delayed processing.   This avoids deep call chains where we
 35 * add extents in the middle of btrfs_search_slot, and it allows
 36 * us to buffer up frequently modified backrefs in an rb tree instead
 37 * of hammering updates on the extent allocation tree.
 38 */
 39
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 40/*
 41 * compare two delayed tree backrefs with same bytenr and type
 
 
 
 
 
 
 42 */
 43static int comp_tree_refs(struct btrfs_delayed_tree_ref *ref2,
 44			  struct btrfs_delayed_tree_ref *ref1, int type)
 45{
 46	if (type == BTRFS_TREE_BLOCK_REF_KEY) {
 47		if (ref1->root < ref2->root)
 48			return -1;
 49		if (ref1->root > ref2->root)
 50			return 1;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 51	} else {
 52		if (ref1->parent < ref2->parent)
 53			return -1;
 54		if (ref1->parent > ref2->parent)
 55			return 1;
 56	}
 
 
 
 
 
 
 
 
 57	return 0;
 58}
 59
 60/*
 61 * compare two delayed data backrefs with same bytenr and type
 62 */
 63static int comp_data_refs(struct btrfs_delayed_data_ref *ref2,
 64			  struct btrfs_delayed_data_ref *ref1)
 65{
 66	if (ref1->node.type == BTRFS_EXTENT_DATA_REF_KEY) {
 67		if (ref1->root < ref2->root)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 68			return -1;
 69		if (ref1->root > ref2->root)
 70			return 1;
 71		if (ref1->objectid < ref2->objectid)
 
 72			return -1;
 73		if (ref1->objectid > ref2->objectid)
 74			return 1;
 75		if (ref1->offset < ref2->offset)
 76			return -1;
 77		if (ref1->offset > ref2->offset)
 78			return 1;
 79	} else {
 80		if (ref1->parent < ref2->parent)
 
 81			return -1;
 82		if (ref1->parent > ref2->parent)
 83			return 1;
 84	}
 85	return 0;
 86}
 87
 88/* insert a new ref to head ref rbtree */
 89static struct btrfs_delayed_ref_head *htree_insert(struct rb_root *root,
 90						   struct rb_node *node)
 91{
 92	struct rb_node **p = &root->rb_node;
 
 93	struct rb_node *parent_node = NULL;
 94	struct btrfs_delayed_ref_head *entry;
 95	struct btrfs_delayed_ref_head *ins;
 96	u64 bytenr;
 97
 98	ins = rb_entry(node, struct btrfs_delayed_ref_head, href_node);
 99	bytenr = ins->node.bytenr;
100	while (*p) {
 
 
101		parent_node = *p;
102		entry = rb_entry(parent_node, struct btrfs_delayed_ref_head,
103				 href_node);
104
105		if (bytenr < entry->node.bytenr)
106			p = &(*p)->rb_left;
107		else if (bytenr > entry->node.bytenr)
108			p = &(*p)->rb_right;
109		else
 
110			return entry;
 
111	}
112
113	rb_link_node(node, parent_node, p);
114	rb_insert_color(node, root);
115	return NULL;
116}
117
118/*
119 * find an head entry based on bytenr. This returns the delayed ref
120 * head if it was able to find one, or NULL if nothing was in that spot.
121 * If return_bigger is given, the next bigger entry is returned if no exact
122 * match is found.
123 */
124static struct btrfs_delayed_ref_head *
125find_ref_head(struct rb_root *root, u64 bytenr,
126	      int return_bigger)
127{
128	struct rb_node *n;
129	struct btrfs_delayed_ref_head *entry;
130
131	n = root->rb_node;
132	entry = NULL;
133	while (n) {
134		entry = rb_entry(n, struct btrfs_delayed_ref_head, href_node);
135
136		if (bytenr < entry->node.bytenr)
137			n = n->rb_left;
138		else if (bytenr > entry->node.bytenr)
139			n = n->rb_right;
140		else
141			return entry;
142	}
143	if (entry && return_bigger) {
144		if (bytenr > entry->node.bytenr) {
145			n = rb_next(&entry->href_node);
146			if (!n)
147				n = rb_first(root);
148			entry = rb_entry(n, struct btrfs_delayed_ref_head,
149					 href_node);
150			return entry;
151		}
152		return entry;
153	}
154	return NULL;
155}
156
157int btrfs_delayed_ref_lock(struct btrfs_trans_handle *trans,
158			   struct btrfs_delayed_ref_head *head)
159{
160	struct btrfs_delayed_ref_root *delayed_refs;
161
162	delayed_refs = &trans->transaction->delayed_refs;
163	assert_spin_locked(&delayed_refs->lock);
164	if (mutex_trylock(&head->mutex))
165		return 0;
166
167	atomic_inc(&head->node.refs);
168	spin_unlock(&delayed_refs->lock);
169
170	mutex_lock(&head->mutex);
171	spin_lock(&delayed_refs->lock);
172	if (!head->node.in_tree) {
173		mutex_unlock(&head->mutex);
174		btrfs_put_delayed_ref(&head->node);
175		return -EAGAIN;
176	}
177	btrfs_put_delayed_ref(&head->node);
178	return 0;
179}
180
181static inline void drop_delayed_ref(struct btrfs_trans_handle *trans,
182				    struct btrfs_delayed_ref_root *delayed_refs,
183				    struct btrfs_delayed_ref_head *head,
184				    struct btrfs_delayed_ref_node *ref)
185{
186	if (btrfs_delayed_ref_is_head(ref)) {
187		head = btrfs_delayed_node_to_head(ref);
188		rb_erase(&head->href_node, &delayed_refs->href_root);
189	} else {
190		assert_spin_locked(&head->lock);
191		list_del(&ref->list);
192	}
193	ref->in_tree = 0;
194	btrfs_put_delayed_ref(ref);
195	atomic_dec(&delayed_refs->num_entries);
196	if (trans->delayed_ref_updates)
197		trans->delayed_ref_updates--;
198}
199
200static bool merge_ref(struct btrfs_trans_handle *trans,
201		      struct btrfs_delayed_ref_root *delayed_refs,
202		      struct btrfs_delayed_ref_head *head,
203		      struct btrfs_delayed_ref_node *ref,
204		      u64 seq)
205{
206	struct btrfs_delayed_ref_node *next;
 
207	bool done = false;
208
209	next = list_first_entry(&head->ref_list, struct btrfs_delayed_ref_node,
210				list);
211	while (!done && &next->list != &head->ref_list) {
212		int mod;
213		struct btrfs_delayed_ref_node *next2;
214
215		next2 = list_next_entry(next, list);
216
217		if (next == ref)
218			goto next;
219
 
 
220		if (seq && next->seq >= seq)
221			goto next;
222
223		if (next->type != ref->type)
224			goto next;
225
226		if ((ref->type == BTRFS_TREE_BLOCK_REF_KEY ||
227		     ref->type == BTRFS_SHARED_BLOCK_REF_KEY) &&
228		    comp_tree_refs(btrfs_delayed_node_to_tree_ref(ref),
229				   btrfs_delayed_node_to_tree_ref(next),
230				   ref->type))
231			goto next;
232		if ((ref->type == BTRFS_EXTENT_DATA_REF_KEY ||
233		     ref->type == BTRFS_SHARED_DATA_REF_KEY) &&
234		    comp_data_refs(btrfs_delayed_node_to_data_ref(ref),
235				   btrfs_delayed_node_to_data_ref(next)))
236			goto next;
237
238		if (ref->action == next->action) {
239			mod = next->ref_mod;
240		} else {
241			if (ref->ref_mod < next->ref_mod) {
242				swap(ref, next);
243				done = true;
244			}
245			mod = -next->ref_mod;
246		}
247
248		drop_delayed_ref(trans, delayed_refs, head, next);
249		ref->ref_mod += mod;
250		if (ref->ref_mod == 0) {
251			drop_delayed_ref(trans, delayed_refs, head, ref);
252			done = true;
253		} else {
254			/*
255			 * Can't have multiples of the same ref on a tree block.
256			 */
257			WARN_ON(ref->type == BTRFS_TREE_BLOCK_REF_KEY ||
258				ref->type == BTRFS_SHARED_BLOCK_REF_KEY);
259		}
260next:
261		next = next2;
262	}
263
264	return done;
265}
266
267void btrfs_merge_delayed_refs(struct btrfs_trans_handle *trans,
268			      struct btrfs_fs_info *fs_info,
269			      struct btrfs_delayed_ref_root *delayed_refs,
270			      struct btrfs_delayed_ref_head *head)
271{
272	struct btrfs_delayed_ref_node *ref;
 
273	u64 seq = 0;
274
275	assert_spin_locked(&head->lock);
276
277	if (list_empty(&head->ref_list))
278		return;
279
280	/* We don't have too many refs to merge for data. */
281	if (head->is_data)
282		return;
283
284	spin_lock(&fs_info->tree_mod_seq_lock);
285	if (!list_empty(&fs_info->tree_mod_seq_list)) {
286		struct seq_list *elem;
287
288		elem = list_first_entry(&fs_info->tree_mod_seq_list,
289					struct seq_list, list);
290		seq = elem->seq;
291	}
292	spin_unlock(&fs_info->tree_mod_seq_lock);
293
294	ref = list_first_entry(&head->ref_list, struct btrfs_delayed_ref_node,
295			       list);
296	while (&ref->list != &head->ref_list) {
297		if (seq && ref->seq >= seq)
298			goto next;
299
300		if (merge_ref(trans, delayed_refs, head, ref, seq)) {
301			if (list_empty(&head->ref_list))
302				break;
303			ref = list_first_entry(&head->ref_list,
304					       struct btrfs_delayed_ref_node,
305					       list);
306			continue;
307		}
308next:
309		ref = list_next_entry(ref, list);
310	}
311}
312
313int btrfs_check_delayed_seq(struct btrfs_fs_info *fs_info,
314			    struct btrfs_delayed_ref_root *delayed_refs,
315			    u64 seq)
316{
317	struct seq_list *elem;
318	int ret = 0;
 
319
320	spin_lock(&fs_info->tree_mod_seq_lock);
321	if (!list_empty(&fs_info->tree_mod_seq_list)) {
322		elem = list_first_entry(&fs_info->tree_mod_seq_list,
323					struct seq_list, list);
324		if (seq >= elem->seq) {
325			pr_debug("holding back delayed_ref %#x.%x, lowest is %#x.%x (%p)\n",
326				 (u32)(seq >> 32), (u32)seq,
327				 (u32)(elem->seq >> 32), (u32)elem->seq,
328				 delayed_refs);
329			ret = 1;
330		}
331	}
332
333	spin_unlock(&fs_info->tree_mod_seq_lock);
334	return ret;
335}
336
337struct btrfs_delayed_ref_head *
338btrfs_select_ref_head(struct btrfs_trans_handle *trans)
 
339{
340	struct btrfs_delayed_ref_root *delayed_refs;
341	struct btrfs_delayed_ref_head *head;
342	u64 start;
343	bool loop = false;
344
345	delayed_refs = &trans->transaction->delayed_refs;
346
 
347again:
348	start = delayed_refs->run_delayed_start;
349	head = find_ref_head(&delayed_refs->href_root, start, 1);
350	if (!head && !loop) {
351		delayed_refs->run_delayed_start = 0;
352		start = 0;
353		loop = true;
354		head = find_ref_head(&delayed_refs->href_root, start, 1);
355		if (!head)
 
 
356			return NULL;
357	} else if (!head && loop) {
358		return NULL;
359	}
360
361	while (head->processing) {
362		struct rb_node *node;
363
364		node = rb_next(&head->href_node);
365		if (!node) {
366			if (loop)
367				return NULL;
368			delayed_refs->run_delayed_start = 0;
369			start = 0;
370			loop = true;
371			goto again;
372		}
373		head = rb_entry(node, struct btrfs_delayed_ref_head,
374				href_node);
375	}
376
377	head->processing = 1;
378	WARN_ON(delayed_refs->num_heads_ready == 0);
379	delayed_refs->num_heads_ready--;
380	delayed_refs->run_delayed_start = head->node.bytenr +
381		head->node.num_bytes;
 
 
 
 
 
 
 
 
 
 
 
 
382	return head;
383}
384
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
385/*
386 * Helper to insert the ref_node to the tail or merge with tail.
387 *
388 * Return 0 for insert.
389 * Return >0 for merge.
 
390 */
391static int
392add_delayed_ref_tail_merge(struct btrfs_trans_handle *trans,
393			   struct btrfs_delayed_ref_root *root,
394			   struct btrfs_delayed_ref_head *href,
395			   struct btrfs_delayed_ref_node *ref)
396{
 
397	struct btrfs_delayed_ref_node *exist;
398	int mod;
399	int ret = 0;
400
401	spin_lock(&href->lock);
402	/* Check whether we can merge the tail node with ref */
403	if (list_empty(&href->ref_list))
404		goto add_tail;
405	exist = list_entry(href->ref_list.prev, struct btrfs_delayed_ref_node,
406			   list);
407	/* No need to compare bytenr nor is_head */
408	if (exist->type != ref->type || exist->seq != ref->seq)
409		goto add_tail;
410
411	if ((exist->type == BTRFS_TREE_BLOCK_REF_KEY ||
412	     exist->type == BTRFS_SHARED_BLOCK_REF_KEY) &&
413	    comp_tree_refs(btrfs_delayed_node_to_tree_ref(exist),
414			   btrfs_delayed_node_to_tree_ref(ref),
415			   ref->type))
416		goto add_tail;
417	if ((exist->type == BTRFS_EXTENT_DATA_REF_KEY ||
418	     exist->type == BTRFS_SHARED_DATA_REF_KEY) &&
419	    comp_data_refs(btrfs_delayed_node_to_data_ref(exist),
420			   btrfs_delayed_node_to_data_ref(ref)))
421		goto add_tail;
422
423	/* Now we are sure we can merge */
424	ret = 1;
425	if (exist->action == ref->action) {
426		mod = ref->ref_mod;
427	} else {
428		/* Need to change action */
429		if (exist->ref_mod < ref->ref_mod) {
430			exist->action = ref->action;
431			mod = -exist->ref_mod;
432			exist->ref_mod = ref->ref_mod;
 
 
 
 
 
 
 
 
 
433		} else
434			mod = -ref->ref_mod;
435	}
436	exist->ref_mod += mod;
437
438	/* remove existing tail if its ref_mod is zero */
439	if (exist->ref_mod == 0)
440		drop_delayed_ref(trans, root, href, exist);
441	spin_unlock(&href->lock);
442	return ret;
443
444add_tail:
445	list_add_tail(&ref->list, &href->ref_list);
446	atomic_inc(&root->num_entries);
447	trans->delayed_ref_updates++;
448	spin_unlock(&href->lock);
449	return ret;
450}
451
452/*
453 * helper function to update the accounting in the head ref
454 * existing and update must have the same bytenr
455 */
456static noinline void
457update_existing_head_ref(struct btrfs_delayed_ref_root *delayed_refs,
458			 struct btrfs_delayed_ref_node *existing,
459			 struct btrfs_delayed_ref_node *update)
460{
461	struct btrfs_delayed_ref_head *existing_ref;
462	struct btrfs_delayed_ref_head *ref;
463	int old_ref_mod;
464
465	existing_ref = btrfs_delayed_node_to_head(existing);
466	ref = btrfs_delayed_node_to_head(update);
467	BUG_ON(existing_ref->is_data != ref->is_data);
468
469	spin_lock(&existing_ref->lock);
470	if (ref->must_insert_reserved) {
 
 
 
 
 
 
 
 
 
471		/* if the extent was freed and then
472		 * reallocated before the delayed ref
473		 * entries were processed, we can end up
474		 * with an existing head ref without
475		 * the must_insert_reserved flag set.
476		 * Set it again here
477		 */
478		existing_ref->must_insert_reserved = ref->must_insert_reserved;
 
479
480		/*
481		 * update the num_bytes so we make sure the accounting
482		 * is done correctly
483		 */
484		existing->num_bytes = update->num_bytes;
485
486	}
487
488	if (ref->extent_op) {
489		if (!existing_ref->extent_op) {
490			existing_ref->extent_op = ref->extent_op;
491		} else {
492			if (ref->extent_op->update_key) {
493				memcpy(&existing_ref->extent_op->key,
494				       &ref->extent_op->key,
495				       sizeof(ref->extent_op->key));
496				existing_ref->extent_op->update_key = true;
497			}
498			if (ref->extent_op->update_flags) {
499				existing_ref->extent_op->flags_to_set |=
500					ref->extent_op->flags_to_set;
501				existing_ref->extent_op->update_flags = true;
502			}
503			btrfs_free_delayed_extent_op(ref->extent_op);
504		}
505	}
506	/*
507	 * update the reference mod on the head to reflect this new operation,
508	 * only need the lock for this case cause we could be processing it
509	 * currently, for refs we just added we know we're a-ok.
510	 */
511	old_ref_mod = existing_ref->total_ref_mod;
512	existing->ref_mod += update->ref_mod;
513	existing_ref->total_ref_mod += update->ref_mod;
514
515	/*
516	 * If we are going to from a positive ref mod to a negative or vice
517	 * versa we need to make sure to adjust pending_csums accordingly.
 
 
518	 */
519	if (existing_ref->is_data) {
520		if (existing_ref->total_ref_mod >= 0 && old_ref_mod < 0)
 
 
 
 
521			delayed_refs->pending_csums -= existing->num_bytes;
522		if (existing_ref->total_ref_mod < 0 && old_ref_mod >= 0)
 
 
523			delayed_refs->pending_csums += existing->num_bytes;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
524	}
525	spin_unlock(&existing_ref->lock);
526}
527
528/*
529 * helper function to actually insert a head node into the rbtree.
530 * this does all the dirty work in terms of maintaining the correct
531 * overall modification count.
 
 
532 */
533static noinline struct btrfs_delayed_ref_head *
534add_delayed_ref_head(struct btrfs_fs_info *fs_info,
535		     struct btrfs_trans_handle *trans,
536		     struct btrfs_delayed_ref_node *ref,
537		     struct btrfs_qgroup_extent_record *qrecord,
538		     u64 bytenr, u64 num_bytes, u64 ref_root, u64 reserved,
539		     int action, int is_data)
540{
 
541	struct btrfs_delayed_ref_head *existing;
542	struct btrfs_delayed_ref_head *head_ref = NULL;
543	struct btrfs_delayed_ref_root *delayed_refs;
544	struct btrfs_qgroup_extent_record *qexisting;
545	int count_mod = 1;
546	int must_insert_reserved = 0;
547
548	/* If reserved is provided, it must be a data extent. */
549	BUG_ON(!is_data && reserved);
550
551	/*
552	 * the head node stores the sum of all the mods, so dropping a ref
553	 * should drop the sum in the head node by one.
554	 */
555	if (action == BTRFS_UPDATE_DELAYED_HEAD)
556		count_mod = 0;
557	else if (action == BTRFS_DROP_DELAYED_REF)
558		count_mod = -1;
559
560	/*
561	 * BTRFS_ADD_DELAYED_EXTENT means that we need to update
562	 * the reserved accounting when the extent is finally added, or
563	 * if a later modification deletes the delayed ref without ever
564	 * inserting the extent into the extent allocation tree.
565	 * ref->must_insert_reserved is the flag used to record
566	 * that accounting mods are required.
567	 *
568	 * Once we record must_insert_reserved, switch the action to
569	 * BTRFS_ADD_DELAYED_REF because other special casing is not required.
570	 */
571	if (action == BTRFS_ADD_DELAYED_EXTENT)
572		must_insert_reserved = 1;
573	else
574		must_insert_reserved = 0;
575
576	delayed_refs = &trans->transaction->delayed_refs;
 
577
578	/* first set the basic ref node struct up */
579	atomic_set(&ref->refs, 1);
580	ref->bytenr = bytenr;
581	ref->num_bytes = num_bytes;
582	ref->ref_mod = count_mod;
583	ref->type  = 0;
584	ref->action  = 0;
585	ref->is_head = 1;
586	ref->in_tree = 1;
587	ref->seq = 0;
588
589	head_ref = btrfs_delayed_node_to_head(ref);
590	head_ref->must_insert_reserved = must_insert_reserved;
591	head_ref->is_data = is_data;
592	INIT_LIST_HEAD(&head_ref->ref_list);
593	head_ref->processing = 0;
594	head_ref->total_ref_mod = count_mod;
595	head_ref->qgroup_reserved = 0;
596	head_ref->qgroup_ref_root = 0;
597
598	/* Record qgroup extent info if provided */
599	if (qrecord) {
600		if (ref_root && reserved) {
601			head_ref->qgroup_ref_root = ref_root;
602			head_ref->qgroup_reserved = reserved;
603		}
604
605		qrecord->bytenr = bytenr;
606		qrecord->num_bytes = num_bytes;
607		qrecord->old_roots = NULL;
608
609		qexisting = btrfs_qgroup_insert_dirty_extent(delayed_refs,
610							     qrecord);
611		if (qexisting)
 
 
 
 
 
612			kfree(qrecord);
 
 
 
613	}
614
615	spin_lock_init(&head_ref->lock);
616	mutex_init(&head_ref->mutex);
617
618	trace_add_delayed_ref_head(ref, head_ref, action);
619
620	existing = htree_insert(&delayed_refs->href_root,
621				&head_ref->href_node);
622	if (existing) {
623		WARN_ON(ref_root && reserved && existing->qgroup_ref_root
624			&& existing->qgroup_reserved);
625		update_existing_head_ref(delayed_refs, &existing->node, ref);
626		/*
627		 * we've updated the existing ref, free the newly
628		 * allocated ref
629		 */
630		kmem_cache_free(btrfs_delayed_ref_head_cachep, head_ref);
631		head_ref = existing;
632	} else {
633		if (is_data && count_mod < 0)
634			delayed_refs->pending_csums += num_bytes;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
635		delayed_refs->num_heads++;
636		delayed_refs->num_heads_ready++;
637		atomic_inc(&delayed_refs->num_entries);
638		trans->delayed_ref_updates++;
639	}
 
 
 
640	return head_ref;
641}
642
643/*
644 * helper to insert a delayed tree ref into the rbtree.
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
645 */
646static noinline void
647add_delayed_tree_ref(struct btrfs_fs_info *fs_info,
648		     struct btrfs_trans_handle *trans,
649		     struct btrfs_delayed_ref_head *head_ref,
650		     struct btrfs_delayed_ref_node *ref, u64 bytenr,
651		     u64 num_bytes, u64 parent, u64 ref_root, int level,
652		     int action)
653{
654	struct btrfs_delayed_tree_ref *full_ref;
655	struct btrfs_delayed_ref_root *delayed_refs;
656	u64 seq = 0;
657	int ret;
658
659	if (action == BTRFS_ADD_DELAYED_EXTENT)
660		action = BTRFS_ADD_DELAYED_REF;
661
662	if (is_fstree(ref_root))
663		seq = atomic64_read(&fs_info->tree_mod_seq);
664	delayed_refs = &trans->transaction->delayed_refs;
665
666	/* first set the basic ref node struct up */
667	atomic_set(&ref->refs, 1);
668	ref->bytenr = bytenr;
669	ref->num_bytes = num_bytes;
670	ref->ref_mod = 1;
671	ref->action = action;
672	ref->is_head = 0;
673	ref->in_tree = 1;
674	ref->seq = seq;
 
 
 
 
 
675
676	full_ref = btrfs_delayed_node_to_tree_ref(ref);
677	full_ref->parent = parent;
678	full_ref->root = ref_root;
679	if (parent)
680		ref->type = BTRFS_SHARED_BLOCK_REF_KEY;
681	else
682		ref->type = BTRFS_TREE_BLOCK_REF_KEY;
683	full_ref->level = level;
684
685	trace_add_delayed_tree_ref(ref, full_ref, action);
686
687	ret = add_delayed_ref_tail_merge(trans, delayed_refs, head_ref, ref);
 
 
 
 
 
 
 
 
 
 
 
688
689	/*
690	 * XXX: memory should be freed at the same level allocated.
691	 * But bad practice is anywhere... Follow it now. Need cleanup.
692	 */
693	if (ret > 0)
694		kmem_cache_free(btrfs_delayed_tree_ref_cachep, full_ref);
695}
696
697/*
698 * helper to insert a delayed data ref into the rbtree.
699 */
700static noinline void
701add_delayed_data_ref(struct btrfs_fs_info *fs_info,
702		     struct btrfs_trans_handle *trans,
703		     struct btrfs_delayed_ref_head *head_ref,
704		     struct btrfs_delayed_ref_node *ref, u64 bytenr,
705		     u64 num_bytes, u64 parent, u64 ref_root, u64 owner,
706		     u64 offset, int action)
707{
708	struct btrfs_delayed_data_ref *full_ref;
709	struct btrfs_delayed_ref_root *delayed_refs;
710	u64 seq = 0;
711	int ret;
712
713	if (action == BTRFS_ADD_DELAYED_EXTENT)
714		action = BTRFS_ADD_DELAYED_REF;
715
716	delayed_refs = &trans->transaction->delayed_refs;
717
718	if (is_fstree(ref_root))
719		seq = atomic64_read(&fs_info->tree_mod_seq);
720
721	/* first set the basic ref node struct up */
722	atomic_set(&ref->refs, 1);
723	ref->bytenr = bytenr;
724	ref->num_bytes = num_bytes;
725	ref->ref_mod = 1;
726	ref->action = action;
727	ref->is_head = 0;
728	ref->in_tree = 1;
729	ref->seq = seq;
730
731	full_ref = btrfs_delayed_node_to_data_ref(ref);
732	full_ref->parent = parent;
733	full_ref->root = ref_root;
734	if (parent)
735		ref->type = BTRFS_SHARED_DATA_REF_KEY;
736	else
737		ref->type = BTRFS_EXTENT_DATA_REF_KEY;
738
739	full_ref->objectid = owner;
740	full_ref->offset = offset;
741
742	trace_add_delayed_data_ref(ref, full_ref, action);
743
744	ret = add_delayed_ref_tail_merge(trans, delayed_refs, head_ref, ref);
745
746	if (ret > 0)
747		kmem_cache_free(btrfs_delayed_data_ref_cachep, full_ref);
748}
749
750/*
751 * add a delayed tree ref.  This does all of the accounting required
752 * to make sure the delayed ref is eventually processed before this
753 * transaction commits.
754 */
755int btrfs_add_delayed_tree_ref(struct btrfs_fs_info *fs_info,
756			       struct btrfs_trans_handle *trans,
757			       u64 bytenr, u64 num_bytes, u64 parent,
758			       u64 ref_root,  int level, int action,
759			       struct btrfs_delayed_extent_op *extent_op)
760{
761	struct btrfs_delayed_tree_ref *ref;
 
762	struct btrfs_delayed_ref_head *head_ref;
 
763	struct btrfs_delayed_ref_root *delayed_refs;
764	struct btrfs_qgroup_extent_record *record = NULL;
 
 
 
 
 
 
765
766	BUG_ON(extent_op && extent_op->is_data);
767	ref = kmem_cache_alloc(btrfs_delayed_tree_ref_cachep, GFP_NOFS);
768	if (!ref)
769		return -ENOMEM;
770
771	head_ref = kmem_cache_alloc(btrfs_delayed_ref_head_cachep, GFP_NOFS);
772	if (!head_ref)
773		goto free_ref;
 
 
 
 
774
775	if (fs_info->quota_enabled && is_fstree(ref_root)) {
776		record = kmalloc(sizeof(*record), GFP_NOFS);
777		if (!record)
 
778			goto free_head_ref;
 
 
 
 
 
 
 
 
 
 
 
 
 
779	}
780
 
 
781	head_ref->extent_op = extent_op;
782
783	delayed_refs = &trans->transaction->delayed_refs;
784	spin_lock(&delayed_refs->lock);
785
786	/*
787	 * insert both the head node and the new ref without dropping
788	 * the spin lock
789	 */
790	head_ref = add_delayed_ref_head(fs_info, trans, &head_ref->node, record,
791					bytenr, num_bytes, 0, 0, action, 0);
 
 
 
 
 
 
 
792
793	add_delayed_tree_ref(fs_info, trans, head_ref, &ref->node, bytenr,
794			     num_bytes, parent, ref_root, level, action);
795	spin_unlock(&delayed_refs->lock);
796
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
797	return 0;
798
 
 
799free_head_ref:
800	kmem_cache_free(btrfs_delayed_ref_head_cachep, head_ref);
801free_ref:
802	kmem_cache_free(btrfs_delayed_tree_ref_cachep, ref);
 
 
803
804	return -ENOMEM;
 
 
 
 
 
 
 
 
 
805}
806
807/*
808 * add a delayed data ref. it's similar to btrfs_add_delayed_tree_ref.
809 */
810int btrfs_add_delayed_data_ref(struct btrfs_fs_info *fs_info,
811			       struct btrfs_trans_handle *trans,
812			       u64 bytenr, u64 num_bytes,
813			       u64 parent, u64 ref_root,
814			       u64 owner, u64 offset, u64 reserved, int action,
815			       struct btrfs_delayed_extent_op *extent_op)
 
 
 
 
 
816{
817	struct btrfs_delayed_data_ref *ref;
818	struct btrfs_delayed_ref_head *head_ref;
 
819	struct btrfs_delayed_ref_root *delayed_refs;
820	struct btrfs_qgroup_extent_record *record = NULL;
821
822	BUG_ON(extent_op && !extent_op->is_data);
823	ref = kmem_cache_alloc(btrfs_delayed_data_ref_cachep, GFP_NOFS);
824	if (!ref)
825		return -ENOMEM;
 
 
826
827	head_ref = kmem_cache_alloc(btrfs_delayed_ref_head_cachep, GFP_NOFS);
828	if (!head_ref) {
829		kmem_cache_free(btrfs_delayed_data_ref_cachep, ref);
830		return -ENOMEM;
831	}
832
833	if (fs_info->quota_enabled && is_fstree(ref_root)) {
834		record = kmalloc(sizeof(*record), GFP_NOFS);
835		if (!record) {
836			kmem_cache_free(btrfs_delayed_data_ref_cachep, ref);
837			kmem_cache_free(btrfs_delayed_ref_head_cachep,
838					head_ref);
839			return -ENOMEM;
840		}
841	}
842
 
843	head_ref->extent_op = extent_op;
844
845	delayed_refs = &trans->transaction->delayed_refs;
 
 
 
 
 
 
 
846	spin_lock(&delayed_refs->lock);
 
 
 
 
 
 
 
 
 
847
848	/*
849	 * insert both the head node and the new ref without dropping
850	 * the spin lock
851	 */
852	head_ref = add_delayed_ref_head(fs_info, trans, &head_ref->node, record,
853					bytenr, num_bytes, ref_root, reserved,
854					action, 1);
855
856	add_delayed_data_ref(fs_info, trans, head_ref, &ref->node, bytenr,
857				   num_bytes, parent, ref_root, owner, offset,
858				   action);
859	spin_unlock(&delayed_refs->lock);
860
861	return 0;
862}
863
864int btrfs_add_delayed_qgroup_reserve(struct btrfs_fs_info *fs_info,
865				     struct btrfs_trans_handle *trans,
866				     u64 ref_root, u64 bytenr, u64 num_bytes)
867{
868	struct btrfs_delayed_ref_root *delayed_refs;
869	struct btrfs_delayed_ref_head *ref_head;
870	int ret = 0;
 
 
871
872	if (!fs_info->quota_enabled || !is_fstree(ref_root))
873		return 0;
 
 
 
 
 
 
 
 
874
875	delayed_refs = &trans->transaction->delayed_refs;
876
877	spin_lock(&delayed_refs->lock);
878	ref_head = find_ref_head(&delayed_refs->href_root, bytenr, 0);
879	if (!ref_head) {
880		ret = -ENOENT;
881		goto out;
882	}
883	WARN_ON(ref_head->qgroup_reserved || ref_head->qgroup_ref_root);
884	ref_head->qgroup_ref_root = ref_root;
885	ref_head->qgroup_reserved = num_bytes;
886out:
887	spin_unlock(&delayed_refs->lock);
888	return ret;
889}
890
891int btrfs_add_delayed_extent_op(struct btrfs_fs_info *fs_info,
892				struct btrfs_trans_handle *trans,
893				u64 bytenr, u64 num_bytes,
894				struct btrfs_delayed_extent_op *extent_op)
895{
896	struct btrfs_delayed_ref_head *head_ref;
897	struct btrfs_delayed_ref_root *delayed_refs;
898
899	head_ref = kmem_cache_alloc(btrfs_delayed_ref_head_cachep, GFP_NOFS);
900	if (!head_ref)
901		return -ENOMEM;
 
902
903	head_ref->extent_op = extent_op;
 
 
 
 
 
 
 
 
 
 
 
 
904
905	delayed_refs = &trans->transaction->delayed_refs;
906	spin_lock(&delayed_refs->lock);
 
 
 
 
 
 
 
 
 
 
 
 
 
907
908	add_delayed_ref_head(fs_info, trans, &head_ref->node, NULL, bytenr,
909			     num_bytes, 0, 0, BTRFS_UPDATE_DELAYED_HEAD,
910			     extent_op->is_data);
911
912	spin_unlock(&delayed_refs->lock);
913	return 0;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
914}
915
916/*
917 * this does a simple search for the head node for a given extent.
918 * It must be called with the delayed ref spinlock held, and it returns
919 * the head node if any where found, or NULL if not.
920 */
921struct btrfs_delayed_ref_head *
922btrfs_find_delayed_ref_head(struct btrfs_trans_handle *trans, u64 bytenr)
923{
924	struct btrfs_delayed_ref_root *delayed_refs;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
925
926	delayed_refs = &trans->transaction->delayed_refs;
927	return find_ref_head(&delayed_refs->href_root, bytenr, 0);
928}
929
930void btrfs_delayed_ref_exit(void)
931{
932	kmem_cache_destroy(btrfs_delayed_ref_head_cachep);
933	kmem_cache_destroy(btrfs_delayed_tree_ref_cachep);
934	kmem_cache_destroy(btrfs_delayed_data_ref_cachep);
935	kmem_cache_destroy(btrfs_delayed_extent_op_cachep);
936}
937
938int btrfs_delayed_ref_init(void)
939{
940	btrfs_delayed_ref_head_cachep = kmem_cache_create(
941				"btrfs_delayed_ref_head",
942				sizeof(struct btrfs_delayed_ref_head), 0,
943				SLAB_RECLAIM_ACCOUNT | SLAB_MEM_SPREAD, NULL);
944	if (!btrfs_delayed_ref_head_cachep)
945		goto fail;
946
947	btrfs_delayed_tree_ref_cachep = kmem_cache_create(
948				"btrfs_delayed_tree_ref",
949				sizeof(struct btrfs_delayed_tree_ref), 0,
950				SLAB_RECLAIM_ACCOUNT | SLAB_MEM_SPREAD, NULL);
951	if (!btrfs_delayed_tree_ref_cachep)
952		goto fail;
953
954	btrfs_delayed_data_ref_cachep = kmem_cache_create(
955				"btrfs_delayed_data_ref",
956				sizeof(struct btrfs_delayed_data_ref), 0,
957				SLAB_RECLAIM_ACCOUNT | SLAB_MEM_SPREAD, NULL);
958	if (!btrfs_delayed_data_ref_cachep)
959		goto fail;
960
961	btrfs_delayed_extent_op_cachep = kmem_cache_create(
962				"btrfs_delayed_extent_op",
963				sizeof(struct btrfs_delayed_extent_op), 0,
964				SLAB_RECLAIM_ACCOUNT | SLAB_MEM_SPREAD, NULL);
965	if (!btrfs_delayed_extent_op_cachep)
966		goto fail;
967
968	return 0;
969fail:
970	btrfs_delayed_ref_exit();
971	return -ENOMEM;
972}