Linux Audio

Check our new training course

Yocto distribution development and maintenance

Need a Yocto distribution for your embedded project?
Loading...
v5.4
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 * Copyright (C) 2011 Fujitsu.  All rights reserved.
   4 * Written by Miao Xie <miaox@cn.fujitsu.com>
   5 */
   6
   7#include <linux/slab.h>
   8#include <linux/iversion.h>
 
   9#include "misc.h"
  10#include "delayed-inode.h"
  11#include "disk-io.h"
  12#include "transaction.h"
  13#include "ctree.h"
  14#include "qgroup.h"
 
  15
  16#define BTRFS_DELAYED_WRITEBACK		512
  17#define BTRFS_DELAYED_BACKGROUND	128
  18#define BTRFS_DELAYED_BATCH		16
  19
  20static struct kmem_cache *delayed_node_cache;
  21
  22int __init btrfs_delayed_inode_init(void)
  23{
  24	delayed_node_cache = kmem_cache_create("btrfs_delayed_node",
  25					sizeof(struct btrfs_delayed_node),
  26					0,
  27					SLAB_MEM_SPREAD,
  28					NULL);
  29	if (!delayed_node_cache)
  30		return -ENOMEM;
  31	return 0;
  32}
  33
  34void __cold btrfs_delayed_inode_exit(void)
  35{
  36	kmem_cache_destroy(delayed_node_cache);
  37}
  38
  39static inline void btrfs_init_delayed_node(
  40				struct btrfs_delayed_node *delayed_node,
  41				struct btrfs_root *root, u64 inode_id)
  42{
  43	delayed_node->root = root;
  44	delayed_node->inode_id = inode_id;
  45	refcount_set(&delayed_node->refs, 0);
  46	delayed_node->ins_root = RB_ROOT_CACHED;
  47	delayed_node->del_root = RB_ROOT_CACHED;
  48	mutex_init(&delayed_node->mutex);
  49	INIT_LIST_HEAD(&delayed_node->n_list);
  50	INIT_LIST_HEAD(&delayed_node->p_list);
  51}
  52
  53static inline int btrfs_is_continuous_delayed_item(
  54					struct btrfs_delayed_item *item1,
  55					struct btrfs_delayed_item *item2)
  56{
  57	if (item1->key.type == BTRFS_DIR_INDEX_KEY &&
  58	    item1->key.objectid == item2->key.objectid &&
  59	    item1->key.type == item2->key.type &&
  60	    item1->key.offset + 1 == item2->key.offset)
  61		return 1;
  62	return 0;
  63}
  64
  65static struct btrfs_delayed_node *btrfs_get_delayed_node(
  66		struct btrfs_inode *btrfs_inode)
  67{
  68	struct btrfs_root *root = btrfs_inode->root;
  69	u64 ino = btrfs_ino(btrfs_inode);
  70	struct btrfs_delayed_node *node;
  71
  72	node = READ_ONCE(btrfs_inode->delayed_node);
  73	if (node) {
  74		refcount_inc(&node->refs);
  75		return node;
  76	}
  77
  78	spin_lock(&root->inode_lock);
  79	node = radix_tree_lookup(&root->delayed_nodes_tree, ino);
  80
  81	if (node) {
  82		if (btrfs_inode->delayed_node) {
  83			refcount_inc(&node->refs);	/* can be accessed */
  84			BUG_ON(btrfs_inode->delayed_node != node);
  85			spin_unlock(&root->inode_lock);
  86			return node;
  87		}
  88
  89		/*
  90		 * It's possible that we're racing into the middle of removing
  91		 * this node from the radix tree.  In this case, the refcount
  92		 * was zero and it should never go back to one.  Just return
  93		 * NULL like it was never in the radix at all; our release
  94		 * function is in the process of removing it.
  95		 *
  96		 * Some implementations of refcount_inc refuse to bump the
  97		 * refcount once it has hit zero.  If we don't do this dance
  98		 * here, refcount_inc() may decide to just WARN_ONCE() instead
  99		 * of actually bumping the refcount.
 100		 *
 101		 * If this node is properly in the radix, we want to bump the
 102		 * refcount twice, once for the inode and once for this get
 103		 * operation.
 104		 */
 105		if (refcount_inc_not_zero(&node->refs)) {
 106			refcount_inc(&node->refs);
 107			btrfs_inode->delayed_node = node;
 108		} else {
 109			node = NULL;
 110		}
 111
 112		spin_unlock(&root->inode_lock);
 113		return node;
 114	}
 115	spin_unlock(&root->inode_lock);
 116
 117	return NULL;
 118}
 119
 120/* Will return either the node or PTR_ERR(-ENOMEM) */
 121static struct btrfs_delayed_node *btrfs_get_or_create_delayed_node(
 122		struct btrfs_inode *btrfs_inode)
 123{
 124	struct btrfs_delayed_node *node;
 125	struct btrfs_root *root = btrfs_inode->root;
 126	u64 ino = btrfs_ino(btrfs_inode);
 127	int ret;
 128
 129again:
 130	node = btrfs_get_delayed_node(btrfs_inode);
 131	if (node)
 132		return node;
 133
 134	node = kmem_cache_zalloc(delayed_node_cache, GFP_NOFS);
 135	if (!node)
 136		return ERR_PTR(-ENOMEM);
 137	btrfs_init_delayed_node(node, root, ino);
 138
 139	/* cached in the btrfs inode and can be accessed */
 140	refcount_set(&node->refs, 2);
 141
 142	ret = radix_tree_preload(GFP_NOFS);
 143	if (ret) {
 144		kmem_cache_free(delayed_node_cache, node);
 145		return ERR_PTR(ret);
 146	}
 147
 148	spin_lock(&root->inode_lock);
 149	ret = radix_tree_insert(&root->delayed_nodes_tree, ino, node);
 150	if (ret == -EEXIST) {
 151		spin_unlock(&root->inode_lock);
 152		kmem_cache_free(delayed_node_cache, node);
 153		radix_tree_preload_end();
 154		goto again;
 155	}
 156	btrfs_inode->delayed_node = node;
 157	spin_unlock(&root->inode_lock);
 158	radix_tree_preload_end();
 159
 160	return node;
 161}
 162
 163/*
 164 * Call it when holding delayed_node->mutex
 165 *
 166 * If mod = 1, add this node into the prepared list.
 167 */
 168static void btrfs_queue_delayed_node(struct btrfs_delayed_root *root,
 169				     struct btrfs_delayed_node *node,
 170				     int mod)
 171{
 172	spin_lock(&root->lock);
 173	if (test_bit(BTRFS_DELAYED_NODE_IN_LIST, &node->flags)) {
 174		if (!list_empty(&node->p_list))
 175			list_move_tail(&node->p_list, &root->prepare_list);
 176		else if (mod)
 177			list_add_tail(&node->p_list, &root->prepare_list);
 178	} else {
 179		list_add_tail(&node->n_list, &root->node_list);
 180		list_add_tail(&node->p_list, &root->prepare_list);
 181		refcount_inc(&node->refs);	/* inserted into list */
 182		root->nodes++;
 183		set_bit(BTRFS_DELAYED_NODE_IN_LIST, &node->flags);
 184	}
 185	spin_unlock(&root->lock);
 186}
 187
 188/* Call it when holding delayed_node->mutex */
 189static void btrfs_dequeue_delayed_node(struct btrfs_delayed_root *root,
 190				       struct btrfs_delayed_node *node)
 191{
 192	spin_lock(&root->lock);
 193	if (test_bit(BTRFS_DELAYED_NODE_IN_LIST, &node->flags)) {
 194		root->nodes--;
 195		refcount_dec(&node->refs);	/* not in the list */
 196		list_del_init(&node->n_list);
 197		if (!list_empty(&node->p_list))
 198			list_del_init(&node->p_list);
 199		clear_bit(BTRFS_DELAYED_NODE_IN_LIST, &node->flags);
 200	}
 201	spin_unlock(&root->lock);
 202}
 203
 204static struct btrfs_delayed_node *btrfs_first_delayed_node(
 205			struct btrfs_delayed_root *delayed_root)
 206{
 207	struct list_head *p;
 208	struct btrfs_delayed_node *node = NULL;
 209
 210	spin_lock(&delayed_root->lock);
 211	if (list_empty(&delayed_root->node_list))
 212		goto out;
 213
 214	p = delayed_root->node_list.next;
 215	node = list_entry(p, struct btrfs_delayed_node, n_list);
 216	refcount_inc(&node->refs);
 217out:
 218	spin_unlock(&delayed_root->lock);
 219
 220	return node;
 221}
 222
 223static struct btrfs_delayed_node *btrfs_next_delayed_node(
 224						struct btrfs_delayed_node *node)
 225{
 226	struct btrfs_delayed_root *delayed_root;
 227	struct list_head *p;
 228	struct btrfs_delayed_node *next = NULL;
 229
 230	delayed_root = node->root->fs_info->delayed_root;
 231	spin_lock(&delayed_root->lock);
 232	if (!test_bit(BTRFS_DELAYED_NODE_IN_LIST, &node->flags)) {
 233		/* not in the list */
 234		if (list_empty(&delayed_root->node_list))
 235			goto out;
 236		p = delayed_root->node_list.next;
 237	} else if (list_is_last(&node->n_list, &delayed_root->node_list))
 238		goto out;
 239	else
 240		p = node->n_list.next;
 241
 242	next = list_entry(p, struct btrfs_delayed_node, n_list);
 243	refcount_inc(&next->refs);
 244out:
 245	spin_unlock(&delayed_root->lock);
 246
 247	return next;
 248}
 249
 250static void __btrfs_release_delayed_node(
 251				struct btrfs_delayed_node *delayed_node,
 252				int mod)
 253{
 254	struct btrfs_delayed_root *delayed_root;
 255
 256	if (!delayed_node)
 257		return;
 258
 259	delayed_root = delayed_node->root->fs_info->delayed_root;
 260
 261	mutex_lock(&delayed_node->mutex);
 262	if (delayed_node->count)
 263		btrfs_queue_delayed_node(delayed_root, delayed_node, mod);
 264	else
 265		btrfs_dequeue_delayed_node(delayed_root, delayed_node);
 266	mutex_unlock(&delayed_node->mutex);
 267
 268	if (refcount_dec_and_test(&delayed_node->refs)) {
 269		struct btrfs_root *root = delayed_node->root;
 270
 271		spin_lock(&root->inode_lock);
 272		/*
 273		 * Once our refcount goes to zero, nobody is allowed to bump it
 274		 * back up.  We can delete it now.
 275		 */
 276		ASSERT(refcount_read(&delayed_node->refs) == 0);
 277		radix_tree_delete(&root->delayed_nodes_tree,
 278				  delayed_node->inode_id);
 279		spin_unlock(&root->inode_lock);
 280		kmem_cache_free(delayed_node_cache, delayed_node);
 281	}
 282}
 283
 284static inline void btrfs_release_delayed_node(struct btrfs_delayed_node *node)
 285{
 286	__btrfs_release_delayed_node(node, 0);
 287}
 288
 289static struct btrfs_delayed_node *btrfs_first_prepared_delayed_node(
 290					struct btrfs_delayed_root *delayed_root)
 291{
 292	struct list_head *p;
 293	struct btrfs_delayed_node *node = NULL;
 294
 295	spin_lock(&delayed_root->lock);
 296	if (list_empty(&delayed_root->prepare_list))
 297		goto out;
 298
 299	p = delayed_root->prepare_list.next;
 300	list_del_init(p);
 301	node = list_entry(p, struct btrfs_delayed_node, p_list);
 302	refcount_inc(&node->refs);
 303out:
 304	spin_unlock(&delayed_root->lock);
 305
 306	return node;
 307}
 308
 309static inline void btrfs_release_prepared_delayed_node(
 310					struct btrfs_delayed_node *node)
 311{
 312	__btrfs_release_delayed_node(node, 1);
 313}
 314
 315static struct btrfs_delayed_item *btrfs_alloc_delayed_item(u32 data_len)
 316{
 317	struct btrfs_delayed_item *item;
 318	item = kmalloc(sizeof(*item) + data_len, GFP_NOFS);
 319	if (item) {
 320		item->data_len = data_len;
 321		item->ins_or_del = 0;
 322		item->bytes_reserved = 0;
 323		item->delayed_node = NULL;
 324		refcount_set(&item->refs, 1);
 325	}
 326	return item;
 327}
 328
 329/*
 330 * __btrfs_lookup_delayed_item - look up the delayed item by key
 331 * @delayed_node: pointer to the delayed node
 332 * @key:	  the key to look up
 333 * @prev:	  used to store the prev item if the right item isn't found
 334 * @next:	  used to store the next item if the right item isn't found
 335 *
 336 * Note: if we don't find the right item, we will return the prev item and
 337 * the next item.
 338 */
 339static struct btrfs_delayed_item *__btrfs_lookup_delayed_item(
 340				struct rb_root *root,
 341				struct btrfs_key *key,
 342				struct btrfs_delayed_item **prev,
 343				struct btrfs_delayed_item **next)
 344{
 345	struct rb_node *node, *prev_node = NULL;
 346	struct btrfs_delayed_item *delayed_item = NULL;
 347	int ret = 0;
 348
 349	node = root->rb_node;
 350
 351	while (node) {
 352		delayed_item = rb_entry(node, struct btrfs_delayed_item,
 353					rb_node);
 354		prev_node = node;
 355		ret = btrfs_comp_cpu_keys(&delayed_item->key, key);
 356		if (ret < 0)
 357			node = node->rb_right;
 358		else if (ret > 0)
 359			node = node->rb_left;
 360		else
 361			return delayed_item;
 362	}
 363
 364	if (prev) {
 365		if (!prev_node)
 366			*prev = NULL;
 367		else if (ret < 0)
 368			*prev = delayed_item;
 369		else if ((node = rb_prev(prev_node)) != NULL) {
 370			*prev = rb_entry(node, struct btrfs_delayed_item,
 371					 rb_node);
 372		} else
 373			*prev = NULL;
 374	}
 375
 376	if (next) {
 377		if (!prev_node)
 378			*next = NULL;
 379		else if (ret > 0)
 380			*next = delayed_item;
 381		else if ((node = rb_next(prev_node)) != NULL) {
 382			*next = rb_entry(node, struct btrfs_delayed_item,
 383					 rb_node);
 384		} else
 385			*next = NULL;
 386	}
 387	return NULL;
 388}
 389
 390static struct btrfs_delayed_item *__btrfs_lookup_delayed_insertion_item(
 391					struct btrfs_delayed_node *delayed_node,
 392					struct btrfs_key *key)
 393{
 394	return __btrfs_lookup_delayed_item(&delayed_node->ins_root.rb_root, key,
 395					   NULL, NULL);
 396}
 397
 398static int __btrfs_add_delayed_item(struct btrfs_delayed_node *delayed_node,
 399				    struct btrfs_delayed_item *ins,
 400				    int action)
 401{
 402	struct rb_node **p, *node;
 403	struct rb_node *parent_node = NULL;
 404	struct rb_root_cached *root;
 405	struct btrfs_delayed_item *item;
 406	int cmp;
 407	bool leftmost = true;
 408
 409	if (action == BTRFS_DELAYED_INSERTION_ITEM)
 410		root = &delayed_node->ins_root;
 411	else if (action == BTRFS_DELAYED_DELETION_ITEM)
 412		root = &delayed_node->del_root;
 413	else
 414		BUG();
 415	p = &root->rb_root.rb_node;
 416	node = &ins->rb_node;
 417
 418	while (*p) {
 419		parent_node = *p;
 420		item = rb_entry(parent_node, struct btrfs_delayed_item,
 421				 rb_node);
 422
 423		cmp = btrfs_comp_cpu_keys(&item->key, &ins->key);
 424		if (cmp < 0) {
 425			p = &(*p)->rb_right;
 426			leftmost = false;
 427		} else if (cmp > 0) {
 428			p = &(*p)->rb_left;
 429		} else {
 430			return -EEXIST;
 431		}
 432	}
 433
 434	rb_link_node(node, parent_node, p);
 435	rb_insert_color_cached(node, root, leftmost);
 436	ins->delayed_node = delayed_node;
 437	ins->ins_or_del = action;
 438
 439	if (ins->key.type == BTRFS_DIR_INDEX_KEY &&
 440	    action == BTRFS_DELAYED_INSERTION_ITEM &&
 441	    ins->key.offset >= delayed_node->index_cnt)
 442			delayed_node->index_cnt = ins->key.offset + 1;
 443
 444	delayed_node->count++;
 445	atomic_inc(&delayed_node->root->fs_info->delayed_root->items);
 446	return 0;
 447}
 448
 449static int __btrfs_add_delayed_insertion_item(struct btrfs_delayed_node *node,
 450					      struct btrfs_delayed_item *item)
 451{
 452	return __btrfs_add_delayed_item(node, item,
 453					BTRFS_DELAYED_INSERTION_ITEM);
 454}
 455
 456static int __btrfs_add_delayed_deletion_item(struct btrfs_delayed_node *node,
 457					     struct btrfs_delayed_item *item)
 458{
 459	return __btrfs_add_delayed_item(node, item,
 460					BTRFS_DELAYED_DELETION_ITEM);
 461}
 462
 463static void finish_one_item(struct btrfs_delayed_root *delayed_root)
 464{
 465	int seq = atomic_inc_return(&delayed_root->items_seq);
 466
 467	/* atomic_dec_return implies a barrier */
 468	if ((atomic_dec_return(&delayed_root->items) <
 469	    BTRFS_DELAYED_BACKGROUND || seq % BTRFS_DELAYED_BATCH == 0))
 470		cond_wake_up_nomb(&delayed_root->wait);
 471}
 472
 473static void __btrfs_remove_delayed_item(struct btrfs_delayed_item *delayed_item)
 474{
 475	struct rb_root_cached *root;
 476	struct btrfs_delayed_root *delayed_root;
 477
 478	/* Not associated with any delayed_node */
 479	if (!delayed_item->delayed_node)
 480		return;
 481	delayed_root = delayed_item->delayed_node->root->fs_info->delayed_root;
 482
 483	BUG_ON(!delayed_root);
 484	BUG_ON(delayed_item->ins_or_del != BTRFS_DELAYED_DELETION_ITEM &&
 485	       delayed_item->ins_or_del != BTRFS_DELAYED_INSERTION_ITEM);
 486
 487	if (delayed_item->ins_or_del == BTRFS_DELAYED_INSERTION_ITEM)
 488		root = &delayed_item->delayed_node->ins_root;
 489	else
 490		root = &delayed_item->delayed_node->del_root;
 491
 492	rb_erase_cached(&delayed_item->rb_node, root);
 493	delayed_item->delayed_node->count--;
 494
 495	finish_one_item(delayed_root);
 496}
 497
 498static void btrfs_release_delayed_item(struct btrfs_delayed_item *item)
 499{
 500	if (item) {
 501		__btrfs_remove_delayed_item(item);
 502		if (refcount_dec_and_test(&item->refs))
 503			kfree(item);
 504	}
 505}
 506
 507static struct btrfs_delayed_item *__btrfs_first_delayed_insertion_item(
 508					struct btrfs_delayed_node *delayed_node)
 509{
 510	struct rb_node *p;
 511	struct btrfs_delayed_item *item = NULL;
 512
 513	p = rb_first_cached(&delayed_node->ins_root);
 514	if (p)
 515		item = rb_entry(p, struct btrfs_delayed_item, rb_node);
 516
 517	return item;
 518}
 519
 520static struct btrfs_delayed_item *__btrfs_first_delayed_deletion_item(
 521					struct btrfs_delayed_node *delayed_node)
 522{
 523	struct rb_node *p;
 524	struct btrfs_delayed_item *item = NULL;
 525
 526	p = rb_first_cached(&delayed_node->del_root);
 527	if (p)
 528		item = rb_entry(p, struct btrfs_delayed_item, rb_node);
 529
 530	return item;
 531}
 532
 533static struct btrfs_delayed_item *__btrfs_next_delayed_item(
 534						struct btrfs_delayed_item *item)
 535{
 536	struct rb_node *p;
 537	struct btrfs_delayed_item *next = NULL;
 538
 539	p = rb_next(&item->rb_node);
 540	if (p)
 541		next = rb_entry(p, struct btrfs_delayed_item, rb_node);
 542
 543	return next;
 544}
 545
 546static int btrfs_delayed_item_reserve_metadata(struct btrfs_trans_handle *trans,
 547					       struct btrfs_root *root,
 548					       struct btrfs_delayed_item *item)
 549{
 550	struct btrfs_block_rsv *src_rsv;
 551	struct btrfs_block_rsv *dst_rsv;
 552	struct btrfs_fs_info *fs_info = root->fs_info;
 553	u64 num_bytes;
 554	int ret;
 555
 556	if (!trans->bytes_reserved)
 557		return 0;
 558
 559	src_rsv = trans->block_rsv;
 560	dst_rsv = &fs_info->delayed_block_rsv;
 561
 562	num_bytes = btrfs_calc_insert_metadata_size(fs_info, 1);
 563
 564	/*
 565	 * Here we migrate space rsv from transaction rsv, since have already
 566	 * reserved space when starting a transaction.  So no need to reserve
 567	 * qgroup space here.
 568	 */
 569	ret = btrfs_block_rsv_migrate(src_rsv, dst_rsv, num_bytes, true);
 570	if (!ret) {
 571		trace_btrfs_space_reservation(fs_info, "delayed_item",
 572					      item->key.objectid,
 573					      num_bytes, 1);
 574		item->bytes_reserved = num_bytes;
 575	}
 576
 577	return ret;
 578}
 579
 580static void btrfs_delayed_item_release_metadata(struct btrfs_root *root,
 581						struct btrfs_delayed_item *item)
 582{
 583	struct btrfs_block_rsv *rsv;
 584	struct btrfs_fs_info *fs_info = root->fs_info;
 585
 586	if (!item->bytes_reserved)
 587		return;
 588
 589	rsv = &fs_info->delayed_block_rsv;
 590	/*
 591	 * Check btrfs_delayed_item_reserve_metadata() to see why we don't need
 592	 * to release/reserve qgroup space.
 593	 */
 594	trace_btrfs_space_reservation(fs_info, "delayed_item",
 595				      item->key.objectid, item->bytes_reserved,
 596				      0);
 597	btrfs_block_rsv_release(fs_info, rsv,
 598				item->bytes_reserved);
 599}
 600
 601static int btrfs_delayed_inode_reserve_metadata(
 602					struct btrfs_trans_handle *trans,
 603					struct btrfs_root *root,
 604					struct btrfs_inode *inode,
 605					struct btrfs_delayed_node *node)
 606{
 607	struct btrfs_fs_info *fs_info = root->fs_info;
 608	struct btrfs_block_rsv *src_rsv;
 609	struct btrfs_block_rsv *dst_rsv;
 610	u64 num_bytes;
 611	int ret;
 612
 613	src_rsv = trans->block_rsv;
 614	dst_rsv = &fs_info->delayed_block_rsv;
 615
 616	num_bytes = btrfs_calc_metadata_size(fs_info, 1);
 617
 618	/*
 619	 * btrfs_dirty_inode will update the inode under btrfs_join_transaction
 620	 * which doesn't reserve space for speed.  This is a problem since we
 621	 * still need to reserve space for this update, so try to reserve the
 622	 * space.
 623	 *
 624	 * Now if src_rsv == delalloc_block_rsv we'll let it just steal since
 625	 * we always reserve enough to update the inode item.
 626	 */
 627	if (!src_rsv || (!trans->bytes_reserved &&
 628			 src_rsv->type != BTRFS_BLOCK_RSV_DELALLOC)) {
 629		ret = btrfs_qgroup_reserve_meta_prealloc(root,
 630				fs_info->nodesize, true);
 631		if (ret < 0)
 632			return ret;
 633		ret = btrfs_block_rsv_add(root, dst_rsv, num_bytes,
 634					  BTRFS_RESERVE_NO_FLUSH);
 635		/*
 636		 * Since we're under a transaction reserve_metadata_bytes could
 637		 * try to commit the transaction which will make it return
 638		 * EAGAIN to make us stop the transaction we have, so return
 639		 * ENOSPC instead so that btrfs_dirty_inode knows what to do.
 640		 */
 641		if (ret == -EAGAIN) {
 642			ret = -ENOSPC;
 643			btrfs_qgroup_free_meta_prealloc(root, num_bytes);
 644		}
 645		if (!ret) {
 646			node->bytes_reserved = num_bytes;
 647			trace_btrfs_space_reservation(fs_info,
 648						      "delayed_inode",
 649						      btrfs_ino(inode),
 650						      num_bytes, 1);
 651		} else {
 652			btrfs_qgroup_free_meta_prealloc(root, fs_info->nodesize);
 653		}
 654		return ret;
 655	}
 656
 657	ret = btrfs_block_rsv_migrate(src_rsv, dst_rsv, num_bytes, true);
 658	if (!ret) {
 659		trace_btrfs_space_reservation(fs_info, "delayed_inode",
 660					      btrfs_ino(inode), num_bytes, 1);
 661		node->bytes_reserved = num_bytes;
 662	}
 663
 664	return ret;
 665}
 666
 667static void btrfs_delayed_inode_release_metadata(struct btrfs_fs_info *fs_info,
 668						struct btrfs_delayed_node *node,
 669						bool qgroup_free)
 670{
 671	struct btrfs_block_rsv *rsv;
 672
 673	if (!node->bytes_reserved)
 674		return;
 675
 676	rsv = &fs_info->delayed_block_rsv;
 677	trace_btrfs_space_reservation(fs_info, "delayed_inode",
 678				      node->inode_id, node->bytes_reserved, 0);
 679	btrfs_block_rsv_release(fs_info, rsv,
 680				node->bytes_reserved);
 681	if (qgroup_free)
 682		btrfs_qgroup_free_meta_prealloc(node->root,
 683				node->bytes_reserved);
 684	else
 685		btrfs_qgroup_convert_reserved_meta(node->root,
 686				node->bytes_reserved);
 687	node->bytes_reserved = 0;
 688}
 689
 690/*
 691 * This helper will insert some continuous items into the same leaf according
 692 * to the free space of the leaf.
 693 */
 694static int btrfs_batch_insert_items(struct btrfs_root *root,
 695				    struct btrfs_path *path,
 696				    struct btrfs_delayed_item *item)
 697{
 698	struct btrfs_delayed_item *curr, *next;
 699	int free_space;
 700	int total_data_size = 0, total_size = 0;
 701	struct extent_buffer *leaf;
 702	char *data_ptr;
 703	struct btrfs_key *keys;
 704	u32 *data_size;
 705	struct list_head head;
 706	int slot;
 707	int nitems;
 708	int i;
 709	int ret = 0;
 710
 711	BUG_ON(!path->nodes[0]);
 712
 713	leaf = path->nodes[0];
 714	free_space = btrfs_leaf_free_space(leaf);
 715	INIT_LIST_HEAD(&head);
 716
 717	next = item;
 718	nitems = 0;
 719
 720	/*
 721	 * count the number of the continuous items that we can insert in batch
 722	 */
 723	while (total_size + next->data_len + sizeof(struct btrfs_item) <=
 724	       free_space) {
 725		total_data_size += next->data_len;
 726		total_size += next->data_len + sizeof(struct btrfs_item);
 727		list_add_tail(&next->tree_list, &head);
 728		nitems++;
 729
 730		curr = next;
 731		next = __btrfs_next_delayed_item(curr);
 732		if (!next)
 733			break;
 734
 735		if (!btrfs_is_continuous_delayed_item(curr, next))
 736			break;
 737	}
 738
 739	if (!nitems) {
 740		ret = 0;
 741		goto out;
 742	}
 743
 744	/*
 745	 * we need allocate some memory space, but it might cause the task
 746	 * to sleep, so we set all locked nodes in the path to blocking locks
 747	 * first.
 748	 */
 749	btrfs_set_path_blocking(path);
 750
 751	keys = kmalloc_array(nitems, sizeof(struct btrfs_key), GFP_NOFS);
 752	if (!keys) {
 753		ret = -ENOMEM;
 754		goto out;
 755	}
 756
 757	data_size = kmalloc_array(nitems, sizeof(u32), GFP_NOFS);
 758	if (!data_size) {
 759		ret = -ENOMEM;
 760		goto error;
 761	}
 762
 763	/* get keys of all the delayed items */
 764	i = 0;
 765	list_for_each_entry(next, &head, tree_list) {
 766		keys[i] = next->key;
 767		data_size[i] = next->data_len;
 768		i++;
 769	}
 770
 771	/* insert the keys of the items */
 772	setup_items_for_insert(root, path, keys, data_size,
 773			       total_data_size, total_size, nitems);
 774
 775	/* insert the dir index items */
 776	slot = path->slots[0];
 777	list_for_each_entry_safe(curr, next, &head, tree_list) {
 778		data_ptr = btrfs_item_ptr(leaf, slot, char);
 779		write_extent_buffer(leaf, &curr->data,
 780				    (unsigned long)data_ptr,
 781				    curr->data_len);
 782		slot++;
 783
 784		btrfs_delayed_item_release_metadata(root, curr);
 785
 786		list_del(&curr->tree_list);
 787		btrfs_release_delayed_item(curr);
 788	}
 789
 790error:
 791	kfree(data_size);
 792	kfree(keys);
 793out:
 794	return ret;
 795}
 796
 797/*
 798 * This helper can just do simple insertion that needn't extend item for new
 799 * data, such as directory name index insertion, inode insertion.
 800 */
 801static int btrfs_insert_delayed_item(struct btrfs_trans_handle *trans,
 802				     struct btrfs_root *root,
 803				     struct btrfs_path *path,
 804				     struct btrfs_delayed_item *delayed_item)
 805{
 806	struct extent_buffer *leaf;
 
 807	char *ptr;
 808	int ret;
 809
 
 810	ret = btrfs_insert_empty_item(trans, root, path, &delayed_item->key,
 811				      delayed_item->data_len);
 
 812	if (ret < 0 && ret != -EEXIST)
 813		return ret;
 814
 815	leaf = path->nodes[0];
 816
 817	ptr = btrfs_item_ptr(leaf, path->slots[0], char);
 818
 819	write_extent_buffer(leaf, delayed_item->data, (unsigned long)ptr,
 820			    delayed_item->data_len);
 821	btrfs_mark_buffer_dirty(leaf);
 822
 823	btrfs_delayed_item_release_metadata(root, delayed_item);
 824	return 0;
 825}
 826
 827/*
 828 * we insert an item first, then if there are some continuous items, we try
 829 * to insert those items into the same leaf.
 830 */
 831static int btrfs_insert_delayed_items(struct btrfs_trans_handle *trans,
 832				      struct btrfs_path *path,
 833				      struct btrfs_root *root,
 834				      struct btrfs_delayed_node *node)
 835{
 836	struct btrfs_delayed_item *curr, *prev;
 837	int ret = 0;
 838
 839do_again:
 840	mutex_lock(&node->mutex);
 841	curr = __btrfs_first_delayed_insertion_item(node);
 842	if (!curr)
 843		goto insert_end;
 844
 845	ret = btrfs_insert_delayed_item(trans, root, path, curr);
 846	if (ret < 0) {
 847		btrfs_release_path(path);
 848		goto insert_end;
 849	}
 850
 851	prev = curr;
 852	curr = __btrfs_next_delayed_item(prev);
 853	if (curr && btrfs_is_continuous_delayed_item(prev, curr)) {
 854		/* insert the continuous items into the same leaf */
 855		path->slots[0]++;
 856		btrfs_batch_insert_items(root, path, curr);
 857	}
 858	btrfs_release_delayed_item(prev);
 859	btrfs_mark_buffer_dirty(path->nodes[0]);
 860
 861	btrfs_release_path(path);
 862	mutex_unlock(&node->mutex);
 863	goto do_again;
 864
 865insert_end:
 866	mutex_unlock(&node->mutex);
 867	return ret;
 868}
 869
 870static int btrfs_batch_delete_items(struct btrfs_trans_handle *trans,
 871				    struct btrfs_root *root,
 872				    struct btrfs_path *path,
 873				    struct btrfs_delayed_item *item)
 874{
 875	struct btrfs_delayed_item *curr, *next;
 876	struct extent_buffer *leaf;
 877	struct btrfs_key key;
 878	struct list_head head;
 879	int nitems, i, last_item;
 880	int ret = 0;
 881
 882	BUG_ON(!path->nodes[0]);
 883
 884	leaf = path->nodes[0];
 885
 886	i = path->slots[0];
 887	last_item = btrfs_header_nritems(leaf) - 1;
 888	if (i > last_item)
 889		return -ENOENT;	/* FIXME: Is errno suitable? */
 890
 891	next = item;
 892	INIT_LIST_HEAD(&head);
 893	btrfs_item_key_to_cpu(leaf, &key, i);
 894	nitems = 0;
 895	/*
 896	 * count the number of the dir index items that we can delete in batch
 897	 */
 898	while (btrfs_comp_cpu_keys(&next->key, &key) == 0) {
 899		list_add_tail(&next->tree_list, &head);
 900		nitems++;
 901
 902		curr = next;
 903		next = __btrfs_next_delayed_item(curr);
 904		if (!next)
 905			break;
 906
 907		if (!btrfs_is_continuous_delayed_item(curr, next))
 908			break;
 909
 910		i++;
 911		if (i > last_item)
 912			break;
 913		btrfs_item_key_to_cpu(leaf, &key, i);
 914	}
 915
 916	if (!nitems)
 917		return 0;
 918
 919	ret = btrfs_del_items(trans, root, path, path->slots[0], nitems);
 920	if (ret)
 921		goto out;
 922
 923	list_for_each_entry_safe(curr, next, &head, tree_list) {
 924		btrfs_delayed_item_release_metadata(root, curr);
 925		list_del(&curr->tree_list);
 926		btrfs_release_delayed_item(curr);
 927	}
 928
 929out:
 930	return ret;
 931}
 932
 933static int btrfs_delete_delayed_items(struct btrfs_trans_handle *trans,
 934				      struct btrfs_path *path,
 935				      struct btrfs_root *root,
 936				      struct btrfs_delayed_node *node)
 937{
 938	struct btrfs_delayed_item *curr, *prev;
 
 939	int ret = 0;
 940
 941do_again:
 942	mutex_lock(&node->mutex);
 943	curr = __btrfs_first_delayed_deletion_item(node);
 944	if (!curr)
 945		goto delete_fail;
 946
 
 947	ret = btrfs_search_slot(trans, root, &curr->key, path, -1, 1);
 
 948	if (ret < 0)
 949		goto delete_fail;
 950	else if (ret > 0) {
 951		/*
 952		 * can't find the item which the node points to, so this node
 953		 * is invalid, just drop it.
 954		 */
 955		prev = curr;
 956		curr = __btrfs_next_delayed_item(prev);
 957		btrfs_release_delayed_item(prev);
 958		ret = 0;
 959		btrfs_release_path(path);
 960		if (curr) {
 961			mutex_unlock(&node->mutex);
 962			goto do_again;
 963		} else
 964			goto delete_fail;
 965	}
 966
 967	btrfs_batch_delete_items(trans, root, path, curr);
 968	btrfs_release_path(path);
 969	mutex_unlock(&node->mutex);
 970	goto do_again;
 971
 972delete_fail:
 973	btrfs_release_path(path);
 974	mutex_unlock(&node->mutex);
 975	return ret;
 976}
 977
 978static void btrfs_release_delayed_inode(struct btrfs_delayed_node *delayed_node)
 979{
 980	struct btrfs_delayed_root *delayed_root;
 981
 982	if (delayed_node &&
 983	    test_bit(BTRFS_DELAYED_NODE_INODE_DIRTY, &delayed_node->flags)) {
 984		BUG_ON(!delayed_node->root);
 985		clear_bit(BTRFS_DELAYED_NODE_INODE_DIRTY, &delayed_node->flags);
 986		delayed_node->count--;
 987
 988		delayed_root = delayed_node->root->fs_info->delayed_root;
 989		finish_one_item(delayed_root);
 990	}
 991}
 992
 993static void btrfs_release_delayed_iref(struct btrfs_delayed_node *delayed_node)
 994{
 995	struct btrfs_delayed_root *delayed_root;
 996
 997	ASSERT(delayed_node->root);
 998	clear_bit(BTRFS_DELAYED_NODE_DEL_IREF, &delayed_node->flags);
 999	delayed_node->count--;
1000
1001	delayed_root = delayed_node->root->fs_info->delayed_root;
1002	finish_one_item(delayed_root);
1003}
1004
1005static int __btrfs_update_delayed_inode(struct btrfs_trans_handle *trans,
1006					struct btrfs_root *root,
1007					struct btrfs_path *path,
1008					struct btrfs_delayed_node *node)
1009{
1010	struct btrfs_fs_info *fs_info = root->fs_info;
1011	struct btrfs_key key;
1012	struct btrfs_inode_item *inode_item;
1013	struct extent_buffer *leaf;
 
1014	int mod;
1015	int ret;
1016
1017	key.objectid = node->inode_id;
1018	key.type = BTRFS_INODE_ITEM_KEY;
1019	key.offset = 0;
1020
1021	if (test_bit(BTRFS_DELAYED_NODE_DEL_IREF, &node->flags))
1022		mod = -1;
1023	else
1024		mod = 1;
1025
 
1026	ret = btrfs_lookup_inode(trans, root, path, &key, mod);
 
1027	if (ret > 0) {
1028		btrfs_release_path(path);
1029		return -ENOENT;
1030	} else if (ret < 0) {
1031		return ret;
1032	}
1033
1034	leaf = path->nodes[0];
1035	inode_item = btrfs_item_ptr(leaf, path->slots[0],
1036				    struct btrfs_inode_item);
1037	write_extent_buffer(leaf, &node->inode_item, (unsigned long)inode_item,
1038			    sizeof(struct btrfs_inode_item));
1039	btrfs_mark_buffer_dirty(leaf);
1040
1041	if (!test_bit(BTRFS_DELAYED_NODE_DEL_IREF, &node->flags))
1042		goto no_iref;
1043
1044	path->slots[0]++;
1045	if (path->slots[0] >= btrfs_header_nritems(leaf))
1046		goto search;
1047again:
1048	btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
1049	if (key.objectid != node->inode_id)
1050		goto out;
1051
1052	if (key.type != BTRFS_INODE_REF_KEY &&
1053	    key.type != BTRFS_INODE_EXTREF_KEY)
1054		goto out;
1055
1056	/*
1057	 * Delayed iref deletion is for the inode who has only one link,
1058	 * so there is only one iref. The case that several irefs are
1059	 * in the same item doesn't exist.
1060	 */
1061	btrfs_del_item(trans, root, path);
1062out:
1063	btrfs_release_delayed_iref(node);
1064no_iref:
1065	btrfs_release_path(path);
1066err_out:
1067	btrfs_delayed_inode_release_metadata(fs_info, node, (ret < 0));
1068	btrfs_release_delayed_inode(node);
1069
1070	return ret;
1071
1072search:
1073	btrfs_release_path(path);
1074
1075	key.type = BTRFS_INODE_EXTREF_KEY;
1076	key.offset = -1;
 
 
1077	ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
 
1078	if (ret < 0)
1079		goto err_out;
1080	ASSERT(ret);
1081
1082	ret = 0;
1083	leaf = path->nodes[0];
1084	path->slots[0]--;
1085	goto again;
1086}
1087
1088static inline int btrfs_update_delayed_inode(struct btrfs_trans_handle *trans,
1089					     struct btrfs_root *root,
1090					     struct btrfs_path *path,
1091					     struct btrfs_delayed_node *node)
1092{
1093	int ret;
1094
1095	mutex_lock(&node->mutex);
1096	if (!test_bit(BTRFS_DELAYED_NODE_INODE_DIRTY, &node->flags)) {
1097		mutex_unlock(&node->mutex);
1098		return 0;
1099	}
1100
1101	ret = __btrfs_update_delayed_inode(trans, root, path, node);
1102	mutex_unlock(&node->mutex);
1103	return ret;
1104}
1105
1106static inline int
1107__btrfs_commit_inode_delayed_items(struct btrfs_trans_handle *trans,
1108				   struct btrfs_path *path,
1109				   struct btrfs_delayed_node *node)
1110{
1111	int ret;
1112
1113	ret = btrfs_insert_delayed_items(trans, path, node->root, node);
1114	if (ret)
1115		return ret;
1116
1117	ret = btrfs_delete_delayed_items(trans, path, node->root, node);
1118	if (ret)
1119		return ret;
1120
1121	ret = btrfs_update_delayed_inode(trans, node->root, path, node);
1122	return ret;
1123}
1124
1125/*
1126 * Called when committing the transaction.
1127 * Returns 0 on success.
1128 * Returns < 0 on error and returns with an aborted transaction with any
1129 * outstanding delayed items cleaned up.
1130 */
1131static int __btrfs_run_delayed_items(struct btrfs_trans_handle *trans, int nr)
1132{
1133	struct btrfs_fs_info *fs_info = trans->fs_info;
1134	struct btrfs_delayed_root *delayed_root;
1135	struct btrfs_delayed_node *curr_node, *prev_node;
1136	struct btrfs_path *path;
1137	struct btrfs_block_rsv *block_rsv;
1138	int ret = 0;
1139	bool count = (nr > 0);
1140
1141	if (trans->aborted)
1142		return -EIO;
1143
1144	path = btrfs_alloc_path();
1145	if (!path)
1146		return -ENOMEM;
1147	path->leave_spinning = 1;
1148
1149	block_rsv = trans->block_rsv;
1150	trans->block_rsv = &fs_info->delayed_block_rsv;
1151
1152	delayed_root = fs_info->delayed_root;
1153
1154	curr_node = btrfs_first_delayed_node(delayed_root);
1155	while (curr_node && (!count || (count && nr--))) {
1156		ret = __btrfs_commit_inode_delayed_items(trans, path,
1157							 curr_node);
1158		if (ret) {
1159			btrfs_release_delayed_node(curr_node);
1160			curr_node = NULL;
1161			btrfs_abort_transaction(trans, ret);
1162			break;
1163		}
1164
1165		prev_node = curr_node;
1166		curr_node = btrfs_next_delayed_node(curr_node);
1167		btrfs_release_delayed_node(prev_node);
1168	}
1169
1170	if (curr_node)
1171		btrfs_release_delayed_node(curr_node);
1172	btrfs_free_path(path);
1173	trans->block_rsv = block_rsv;
1174
1175	return ret;
1176}
1177
1178int btrfs_run_delayed_items(struct btrfs_trans_handle *trans)
1179{
1180	return __btrfs_run_delayed_items(trans, -1);
1181}
1182
1183int btrfs_run_delayed_items_nr(struct btrfs_trans_handle *trans, int nr)
1184{
1185	return __btrfs_run_delayed_items(trans, nr);
1186}
1187
1188int btrfs_commit_inode_delayed_items(struct btrfs_trans_handle *trans,
1189				     struct btrfs_inode *inode)
1190{
1191	struct btrfs_delayed_node *delayed_node = btrfs_get_delayed_node(inode);
1192	struct btrfs_path *path;
1193	struct btrfs_block_rsv *block_rsv;
1194	int ret;
1195
1196	if (!delayed_node)
1197		return 0;
1198
1199	mutex_lock(&delayed_node->mutex);
1200	if (!delayed_node->count) {
1201		mutex_unlock(&delayed_node->mutex);
1202		btrfs_release_delayed_node(delayed_node);
1203		return 0;
1204	}
1205	mutex_unlock(&delayed_node->mutex);
1206
1207	path = btrfs_alloc_path();
1208	if (!path) {
1209		btrfs_release_delayed_node(delayed_node);
1210		return -ENOMEM;
1211	}
1212	path->leave_spinning = 1;
1213
1214	block_rsv = trans->block_rsv;
1215	trans->block_rsv = &delayed_node->root->fs_info->delayed_block_rsv;
1216
1217	ret = __btrfs_commit_inode_delayed_items(trans, path, delayed_node);
1218
1219	btrfs_release_delayed_node(delayed_node);
1220	btrfs_free_path(path);
1221	trans->block_rsv = block_rsv;
1222
1223	return ret;
1224}
1225
1226int btrfs_commit_inode_delayed_inode(struct btrfs_inode *inode)
1227{
1228	struct btrfs_fs_info *fs_info = inode->root->fs_info;
1229	struct btrfs_trans_handle *trans;
1230	struct btrfs_delayed_node *delayed_node = btrfs_get_delayed_node(inode);
1231	struct btrfs_path *path;
1232	struct btrfs_block_rsv *block_rsv;
1233	int ret;
1234
1235	if (!delayed_node)
1236		return 0;
1237
1238	mutex_lock(&delayed_node->mutex);
1239	if (!test_bit(BTRFS_DELAYED_NODE_INODE_DIRTY, &delayed_node->flags)) {
1240		mutex_unlock(&delayed_node->mutex);
1241		btrfs_release_delayed_node(delayed_node);
1242		return 0;
1243	}
1244	mutex_unlock(&delayed_node->mutex);
1245
1246	trans = btrfs_join_transaction(delayed_node->root);
1247	if (IS_ERR(trans)) {
1248		ret = PTR_ERR(trans);
1249		goto out;
1250	}
1251
1252	path = btrfs_alloc_path();
1253	if (!path) {
1254		ret = -ENOMEM;
1255		goto trans_out;
1256	}
1257	path->leave_spinning = 1;
1258
1259	block_rsv = trans->block_rsv;
1260	trans->block_rsv = &fs_info->delayed_block_rsv;
1261
1262	mutex_lock(&delayed_node->mutex);
1263	if (test_bit(BTRFS_DELAYED_NODE_INODE_DIRTY, &delayed_node->flags))
1264		ret = __btrfs_update_delayed_inode(trans, delayed_node->root,
1265						   path, delayed_node);
1266	else
1267		ret = 0;
1268	mutex_unlock(&delayed_node->mutex);
1269
1270	btrfs_free_path(path);
1271	trans->block_rsv = block_rsv;
1272trans_out:
1273	btrfs_end_transaction(trans);
1274	btrfs_btree_balance_dirty(fs_info);
1275out:
1276	btrfs_release_delayed_node(delayed_node);
1277
1278	return ret;
1279}
1280
1281void btrfs_remove_delayed_node(struct btrfs_inode *inode)
1282{
1283	struct btrfs_delayed_node *delayed_node;
1284
1285	delayed_node = READ_ONCE(inode->delayed_node);
1286	if (!delayed_node)
1287		return;
1288
1289	inode->delayed_node = NULL;
1290	btrfs_release_delayed_node(delayed_node);
1291}
1292
1293struct btrfs_async_delayed_work {
1294	struct btrfs_delayed_root *delayed_root;
1295	int nr;
1296	struct btrfs_work work;
1297};
1298
1299static void btrfs_async_run_delayed_root(struct btrfs_work *work)
1300{
1301	struct btrfs_async_delayed_work *async_work;
1302	struct btrfs_delayed_root *delayed_root;
1303	struct btrfs_trans_handle *trans;
1304	struct btrfs_path *path;
1305	struct btrfs_delayed_node *delayed_node = NULL;
1306	struct btrfs_root *root;
1307	struct btrfs_block_rsv *block_rsv;
1308	int total_done = 0;
1309
1310	async_work = container_of(work, struct btrfs_async_delayed_work, work);
1311	delayed_root = async_work->delayed_root;
1312
1313	path = btrfs_alloc_path();
1314	if (!path)
1315		goto out;
1316
1317	do {
1318		if (atomic_read(&delayed_root->items) <
1319		    BTRFS_DELAYED_BACKGROUND / 2)
1320			break;
1321
1322		delayed_node = btrfs_first_prepared_delayed_node(delayed_root);
1323		if (!delayed_node)
1324			break;
1325
1326		path->leave_spinning = 1;
1327		root = delayed_node->root;
1328
1329		trans = btrfs_join_transaction(root);
1330		if (IS_ERR(trans)) {
1331			btrfs_release_path(path);
1332			btrfs_release_prepared_delayed_node(delayed_node);
1333			total_done++;
1334			continue;
1335		}
1336
1337		block_rsv = trans->block_rsv;
1338		trans->block_rsv = &root->fs_info->delayed_block_rsv;
1339
1340		__btrfs_commit_inode_delayed_items(trans, path, delayed_node);
1341
1342		trans->block_rsv = block_rsv;
1343		btrfs_end_transaction(trans);
1344		btrfs_btree_balance_dirty_nodelay(root->fs_info);
1345
1346		btrfs_release_path(path);
1347		btrfs_release_prepared_delayed_node(delayed_node);
1348		total_done++;
1349
1350	} while ((async_work->nr == 0 && total_done < BTRFS_DELAYED_WRITEBACK)
1351		 || total_done < async_work->nr);
1352
1353	btrfs_free_path(path);
1354out:
1355	wake_up(&delayed_root->wait);
1356	kfree(async_work);
1357}
1358
1359
1360static int btrfs_wq_run_delayed_node(struct btrfs_delayed_root *delayed_root,
1361				     struct btrfs_fs_info *fs_info, int nr)
1362{
1363	struct btrfs_async_delayed_work *async_work;
1364
1365	async_work = kmalloc(sizeof(*async_work), GFP_NOFS);
1366	if (!async_work)
1367		return -ENOMEM;
1368
1369	async_work->delayed_root = delayed_root;
1370	btrfs_init_work(&async_work->work, btrfs_delayed_meta_helper,
1371			btrfs_async_run_delayed_root, NULL, NULL);
1372	async_work->nr = nr;
1373
1374	btrfs_queue_work(fs_info->delayed_workers, &async_work->work);
1375	return 0;
1376}
1377
1378void btrfs_assert_delayed_root_empty(struct btrfs_fs_info *fs_info)
1379{
1380	WARN_ON(btrfs_first_delayed_node(fs_info->delayed_root));
1381}
1382
1383static int could_end_wait(struct btrfs_delayed_root *delayed_root, int seq)
1384{
1385	int val = atomic_read(&delayed_root->items_seq);
1386
1387	if (val < seq || val >= seq + BTRFS_DELAYED_BATCH)
1388		return 1;
1389
1390	if (atomic_read(&delayed_root->items) < BTRFS_DELAYED_BACKGROUND)
1391		return 1;
1392
1393	return 0;
1394}
1395
1396void btrfs_balance_delayed_items(struct btrfs_fs_info *fs_info)
1397{
1398	struct btrfs_delayed_root *delayed_root = fs_info->delayed_root;
1399
1400	if ((atomic_read(&delayed_root->items) < BTRFS_DELAYED_BACKGROUND) ||
1401		btrfs_workqueue_normal_congested(fs_info->delayed_workers))
1402		return;
1403
1404	if (atomic_read(&delayed_root->items) >= BTRFS_DELAYED_WRITEBACK) {
1405		int seq;
1406		int ret;
1407
1408		seq = atomic_read(&delayed_root->items_seq);
1409
1410		ret = btrfs_wq_run_delayed_node(delayed_root, fs_info, 0);
1411		if (ret)
1412			return;
1413
1414		wait_event_interruptible(delayed_root->wait,
1415					 could_end_wait(delayed_root, seq));
1416		return;
1417	}
1418
1419	btrfs_wq_run_delayed_node(delayed_root, fs_info, BTRFS_DELAYED_BATCH);
1420}
1421
1422/* Will return 0 or -ENOMEM */
1423int btrfs_insert_delayed_dir_index(struct btrfs_trans_handle *trans,
1424				   const char *name, int name_len,
1425				   struct btrfs_inode *dir,
1426				   struct btrfs_disk_key *disk_key, u8 type,
1427				   u64 index)
1428{
1429	struct btrfs_delayed_node *delayed_node;
1430	struct btrfs_delayed_item *delayed_item;
1431	struct btrfs_dir_item *dir_item;
1432	int ret;
1433
1434	delayed_node = btrfs_get_or_create_delayed_node(dir);
1435	if (IS_ERR(delayed_node))
1436		return PTR_ERR(delayed_node);
1437
1438	delayed_item = btrfs_alloc_delayed_item(sizeof(*dir_item) + name_len);
1439	if (!delayed_item) {
1440		ret = -ENOMEM;
1441		goto release_node;
1442	}
1443
1444	delayed_item->key.objectid = btrfs_ino(dir);
1445	delayed_item->key.type = BTRFS_DIR_INDEX_KEY;
1446	delayed_item->key.offset = index;
1447
1448	dir_item = (struct btrfs_dir_item *)delayed_item->data;
1449	dir_item->location = *disk_key;
1450	btrfs_set_stack_dir_transid(dir_item, trans->transid);
1451	btrfs_set_stack_dir_data_len(dir_item, 0);
1452	btrfs_set_stack_dir_name_len(dir_item, name_len);
1453	btrfs_set_stack_dir_type(dir_item, type);
1454	memcpy((char *)(dir_item + 1), name, name_len);
1455
1456	ret = btrfs_delayed_item_reserve_metadata(trans, dir->root, delayed_item);
1457	/*
1458	 * we have reserved enough space when we start a new transaction,
1459	 * so reserving metadata failure is impossible
1460	 */
1461	BUG_ON(ret);
1462
1463	mutex_lock(&delayed_node->mutex);
1464	ret = __btrfs_add_delayed_insertion_item(delayed_node, delayed_item);
1465	if (unlikely(ret)) {
1466		btrfs_err(trans->fs_info,
1467			  "err add delayed dir index item(name: %.*s) into the insertion tree of the delayed node(root id: %llu, inode id: %llu, errno: %d)",
1468			  name_len, name, delayed_node->root->root_key.objectid,
1469			  delayed_node->inode_id, ret);
1470		BUG();
1471	}
1472	mutex_unlock(&delayed_node->mutex);
1473
1474release_node:
1475	btrfs_release_delayed_node(delayed_node);
1476	return ret;
1477}
1478
1479static int btrfs_delete_delayed_insertion_item(struct btrfs_fs_info *fs_info,
1480					       struct btrfs_delayed_node *node,
1481					       struct btrfs_key *key)
1482{
1483	struct btrfs_delayed_item *item;
1484
1485	mutex_lock(&node->mutex);
1486	item = __btrfs_lookup_delayed_insertion_item(node, key);
1487	if (!item) {
1488		mutex_unlock(&node->mutex);
1489		return 1;
1490	}
1491
1492	btrfs_delayed_item_release_metadata(node->root, item);
1493	btrfs_release_delayed_item(item);
1494	mutex_unlock(&node->mutex);
1495	return 0;
1496}
1497
1498int btrfs_delete_delayed_dir_index(struct btrfs_trans_handle *trans,
1499				   struct btrfs_inode *dir, u64 index)
1500{
1501	struct btrfs_delayed_node *node;
1502	struct btrfs_delayed_item *item;
1503	struct btrfs_key item_key;
1504	int ret;
1505
1506	node = btrfs_get_or_create_delayed_node(dir);
1507	if (IS_ERR(node))
1508		return PTR_ERR(node);
1509
1510	item_key.objectid = btrfs_ino(dir);
1511	item_key.type = BTRFS_DIR_INDEX_KEY;
1512	item_key.offset = index;
1513
1514	ret = btrfs_delete_delayed_insertion_item(trans->fs_info, node,
1515						  &item_key);
1516	if (!ret)
1517		goto end;
1518
1519	item = btrfs_alloc_delayed_item(0);
1520	if (!item) {
1521		ret = -ENOMEM;
1522		goto end;
1523	}
1524
1525	item->key = item_key;
1526
1527	ret = btrfs_delayed_item_reserve_metadata(trans, dir->root, item);
1528	/*
1529	 * we have reserved enough space when we start a new transaction,
1530	 * so reserving metadata failure is impossible.
1531	 */
1532	if (ret < 0) {
1533		btrfs_err(trans->fs_info,
1534"metadata reservation failed for delayed dir item deltiona, should have been reserved");
1535		btrfs_release_delayed_item(item);
1536		goto end;
1537	}
1538
1539	mutex_lock(&node->mutex);
1540	ret = __btrfs_add_delayed_deletion_item(node, item);
1541	if (unlikely(ret)) {
1542		btrfs_err(trans->fs_info,
1543			  "err add delayed dir index item(index: %llu) into the deletion tree of the delayed node(root id: %llu, inode id: %llu, errno: %d)",
1544			  index, node->root->root_key.objectid,
1545			  node->inode_id, ret);
1546		btrfs_delayed_item_release_metadata(dir->root, item);
1547		btrfs_release_delayed_item(item);
1548	}
1549	mutex_unlock(&node->mutex);
1550end:
1551	btrfs_release_delayed_node(node);
1552	return ret;
1553}
1554
1555int btrfs_inode_delayed_dir_index_count(struct btrfs_inode *inode)
1556{
1557	struct btrfs_delayed_node *delayed_node = btrfs_get_delayed_node(inode);
1558
1559	if (!delayed_node)
1560		return -ENOENT;
1561
1562	/*
1563	 * Since we have held i_mutex of this directory, it is impossible that
1564	 * a new directory index is added into the delayed node and index_cnt
1565	 * is updated now. So we needn't lock the delayed node.
1566	 */
1567	if (!delayed_node->index_cnt) {
1568		btrfs_release_delayed_node(delayed_node);
1569		return -EINVAL;
1570	}
1571
1572	inode->index_cnt = delayed_node->index_cnt;
1573	btrfs_release_delayed_node(delayed_node);
1574	return 0;
1575}
1576
1577bool btrfs_readdir_get_delayed_items(struct inode *inode,
1578				     struct list_head *ins_list,
1579				     struct list_head *del_list)
1580{
1581	struct btrfs_delayed_node *delayed_node;
1582	struct btrfs_delayed_item *item;
1583
1584	delayed_node = btrfs_get_delayed_node(BTRFS_I(inode));
1585	if (!delayed_node)
1586		return false;
1587
1588	/*
1589	 * We can only do one readdir with delayed items at a time because of
1590	 * item->readdir_list.
1591	 */
1592	inode_unlock_shared(inode);
1593	inode_lock(inode);
1594
1595	mutex_lock(&delayed_node->mutex);
1596	item = __btrfs_first_delayed_insertion_item(delayed_node);
1597	while (item) {
1598		refcount_inc(&item->refs);
1599		list_add_tail(&item->readdir_list, ins_list);
1600		item = __btrfs_next_delayed_item(item);
1601	}
1602
1603	item = __btrfs_first_delayed_deletion_item(delayed_node);
1604	while (item) {
1605		refcount_inc(&item->refs);
1606		list_add_tail(&item->readdir_list, del_list);
1607		item = __btrfs_next_delayed_item(item);
1608	}
1609	mutex_unlock(&delayed_node->mutex);
1610	/*
1611	 * This delayed node is still cached in the btrfs inode, so refs
1612	 * must be > 1 now, and we needn't check it is going to be freed
1613	 * or not.
1614	 *
1615	 * Besides that, this function is used to read dir, we do not
1616	 * insert/delete delayed items in this period. So we also needn't
1617	 * requeue or dequeue this delayed node.
1618	 */
1619	refcount_dec(&delayed_node->refs);
1620
1621	return true;
1622}
1623
1624void btrfs_readdir_put_delayed_items(struct inode *inode,
1625				     struct list_head *ins_list,
1626				     struct list_head *del_list)
1627{
1628	struct btrfs_delayed_item *curr, *next;
1629
1630	list_for_each_entry_safe(curr, next, ins_list, readdir_list) {
1631		list_del(&curr->readdir_list);
1632		if (refcount_dec_and_test(&curr->refs))
1633			kfree(curr);
1634	}
1635
1636	list_for_each_entry_safe(curr, next, del_list, readdir_list) {
1637		list_del(&curr->readdir_list);
1638		if (refcount_dec_and_test(&curr->refs))
1639			kfree(curr);
1640	}
1641
1642	/*
1643	 * The VFS is going to do up_read(), so we need to downgrade back to a
1644	 * read lock.
1645	 */
1646	downgrade_write(&inode->i_rwsem);
1647}
1648
1649int btrfs_should_delete_dir_index(struct list_head *del_list,
1650				  u64 index)
1651{
1652	struct btrfs_delayed_item *curr;
1653	int ret = 0;
1654
1655	list_for_each_entry(curr, del_list, readdir_list) {
1656		if (curr->key.offset > index)
1657			break;
1658		if (curr->key.offset == index) {
1659			ret = 1;
1660			break;
1661		}
1662	}
1663	return ret;
1664}
1665
1666/*
1667 * btrfs_readdir_delayed_dir_index - read dir info stored in the delayed tree
1668 *
1669 */
1670int btrfs_readdir_delayed_dir_index(struct dir_context *ctx,
1671				    struct list_head *ins_list)
1672{
1673	struct btrfs_dir_item *di;
1674	struct btrfs_delayed_item *curr, *next;
1675	struct btrfs_key location;
1676	char *name;
1677	int name_len;
1678	int over = 0;
1679	unsigned char d_type;
1680
1681	if (list_empty(ins_list))
1682		return 0;
1683
1684	/*
1685	 * Changing the data of the delayed item is impossible. So
1686	 * we needn't lock them. And we have held i_mutex of the
1687	 * directory, nobody can delete any directory indexes now.
1688	 */
1689	list_for_each_entry_safe(curr, next, ins_list, readdir_list) {
1690		list_del(&curr->readdir_list);
1691
1692		if (curr->key.offset < ctx->pos) {
1693			if (refcount_dec_and_test(&curr->refs))
1694				kfree(curr);
1695			continue;
1696		}
1697
1698		ctx->pos = curr->key.offset;
1699
1700		di = (struct btrfs_dir_item *)curr->data;
1701		name = (char *)(di + 1);
1702		name_len = btrfs_stack_dir_name_len(di);
1703
1704		d_type = fs_ftype_to_dtype(di->type);
1705		btrfs_disk_key_to_cpu(&location, &di->location);
1706
1707		over = !dir_emit(ctx, name, name_len,
1708			       location.objectid, d_type);
1709
1710		if (refcount_dec_and_test(&curr->refs))
1711			kfree(curr);
1712
1713		if (over)
1714			return 1;
1715		ctx->pos++;
1716	}
1717	return 0;
1718}
1719
1720static void fill_stack_inode_item(struct btrfs_trans_handle *trans,
1721				  struct btrfs_inode_item *inode_item,
1722				  struct inode *inode)
1723{
1724	btrfs_set_stack_inode_uid(inode_item, i_uid_read(inode));
1725	btrfs_set_stack_inode_gid(inode_item, i_gid_read(inode));
1726	btrfs_set_stack_inode_size(inode_item, BTRFS_I(inode)->disk_i_size);
1727	btrfs_set_stack_inode_mode(inode_item, inode->i_mode);
1728	btrfs_set_stack_inode_nlink(inode_item, inode->i_nlink);
1729	btrfs_set_stack_inode_nbytes(inode_item, inode_get_bytes(inode));
1730	btrfs_set_stack_inode_generation(inode_item,
1731					 BTRFS_I(inode)->generation);
1732	btrfs_set_stack_inode_sequence(inode_item,
1733				       inode_peek_iversion(inode));
1734	btrfs_set_stack_inode_transid(inode_item, trans->transid);
1735	btrfs_set_stack_inode_rdev(inode_item, inode->i_rdev);
1736	btrfs_set_stack_inode_flags(inode_item, BTRFS_I(inode)->flags);
1737	btrfs_set_stack_inode_block_group(inode_item, 0);
1738
1739	btrfs_set_stack_timespec_sec(&inode_item->atime,
1740				     inode->i_atime.tv_sec);
1741	btrfs_set_stack_timespec_nsec(&inode_item->atime,
1742				      inode->i_atime.tv_nsec);
1743
1744	btrfs_set_stack_timespec_sec(&inode_item->mtime,
1745				     inode->i_mtime.tv_sec);
1746	btrfs_set_stack_timespec_nsec(&inode_item->mtime,
1747				      inode->i_mtime.tv_nsec);
1748
1749	btrfs_set_stack_timespec_sec(&inode_item->ctime,
1750				     inode->i_ctime.tv_sec);
1751	btrfs_set_stack_timespec_nsec(&inode_item->ctime,
1752				      inode->i_ctime.tv_nsec);
1753
1754	btrfs_set_stack_timespec_sec(&inode_item->otime,
1755				     BTRFS_I(inode)->i_otime.tv_sec);
1756	btrfs_set_stack_timespec_nsec(&inode_item->otime,
1757				     BTRFS_I(inode)->i_otime.tv_nsec);
1758}
1759
1760int btrfs_fill_inode(struct inode *inode, u32 *rdev)
1761{
 
1762	struct btrfs_delayed_node *delayed_node;
1763	struct btrfs_inode_item *inode_item;
1764
1765	delayed_node = btrfs_get_delayed_node(BTRFS_I(inode));
1766	if (!delayed_node)
1767		return -ENOENT;
1768
1769	mutex_lock(&delayed_node->mutex);
1770	if (!test_bit(BTRFS_DELAYED_NODE_INODE_DIRTY, &delayed_node->flags)) {
1771		mutex_unlock(&delayed_node->mutex);
1772		btrfs_release_delayed_node(delayed_node);
1773		return -ENOENT;
1774	}
1775
1776	inode_item = &delayed_node->inode_item;
1777
1778	i_uid_write(inode, btrfs_stack_inode_uid(inode_item));
1779	i_gid_write(inode, btrfs_stack_inode_gid(inode_item));
1780	btrfs_i_size_write(BTRFS_I(inode), btrfs_stack_inode_size(inode_item));
 
 
1781	inode->i_mode = btrfs_stack_inode_mode(inode_item);
1782	set_nlink(inode, btrfs_stack_inode_nlink(inode_item));
1783	inode_set_bytes(inode, btrfs_stack_inode_nbytes(inode_item));
1784	BTRFS_I(inode)->generation = btrfs_stack_inode_generation(inode_item);
1785        BTRFS_I(inode)->last_trans = btrfs_stack_inode_transid(inode_item);
1786
1787	inode_set_iversion_queried(inode,
1788				   btrfs_stack_inode_sequence(inode_item));
1789	inode->i_rdev = 0;
1790	*rdev = btrfs_stack_inode_rdev(inode_item);
1791	BTRFS_I(inode)->flags = btrfs_stack_inode_flags(inode_item);
1792
1793	inode->i_atime.tv_sec = btrfs_stack_timespec_sec(&inode_item->atime);
1794	inode->i_atime.tv_nsec = btrfs_stack_timespec_nsec(&inode_item->atime);
1795
1796	inode->i_mtime.tv_sec = btrfs_stack_timespec_sec(&inode_item->mtime);
1797	inode->i_mtime.tv_nsec = btrfs_stack_timespec_nsec(&inode_item->mtime);
1798
1799	inode->i_ctime.tv_sec = btrfs_stack_timespec_sec(&inode_item->ctime);
1800	inode->i_ctime.tv_nsec = btrfs_stack_timespec_nsec(&inode_item->ctime);
1801
1802	BTRFS_I(inode)->i_otime.tv_sec =
1803		btrfs_stack_timespec_sec(&inode_item->otime);
1804	BTRFS_I(inode)->i_otime.tv_nsec =
1805		btrfs_stack_timespec_nsec(&inode_item->otime);
1806
1807	inode->i_generation = BTRFS_I(inode)->generation;
1808	BTRFS_I(inode)->index_cnt = (u64)-1;
1809
1810	mutex_unlock(&delayed_node->mutex);
1811	btrfs_release_delayed_node(delayed_node);
1812	return 0;
1813}
1814
1815int btrfs_delayed_update_inode(struct btrfs_trans_handle *trans,
1816			       struct btrfs_root *root, struct inode *inode)
1817{
1818	struct btrfs_delayed_node *delayed_node;
1819	int ret = 0;
1820
1821	delayed_node = btrfs_get_or_create_delayed_node(BTRFS_I(inode));
1822	if (IS_ERR(delayed_node))
1823		return PTR_ERR(delayed_node);
1824
1825	mutex_lock(&delayed_node->mutex);
1826	if (test_bit(BTRFS_DELAYED_NODE_INODE_DIRTY, &delayed_node->flags)) {
1827		fill_stack_inode_item(trans, &delayed_node->inode_item, inode);
1828		goto release_node;
1829	}
1830
1831	ret = btrfs_delayed_inode_reserve_metadata(trans, root, BTRFS_I(inode),
1832						   delayed_node);
1833	if (ret)
1834		goto release_node;
1835
1836	fill_stack_inode_item(trans, &delayed_node->inode_item, inode);
1837	set_bit(BTRFS_DELAYED_NODE_INODE_DIRTY, &delayed_node->flags);
1838	delayed_node->count++;
1839	atomic_inc(&root->fs_info->delayed_root->items);
1840release_node:
1841	mutex_unlock(&delayed_node->mutex);
1842	btrfs_release_delayed_node(delayed_node);
1843	return ret;
1844}
1845
1846int btrfs_delayed_delete_inode_ref(struct btrfs_inode *inode)
1847{
1848	struct btrfs_fs_info *fs_info = inode->root->fs_info;
1849	struct btrfs_delayed_node *delayed_node;
1850
1851	/*
1852	 * we don't do delayed inode updates during log recovery because it
1853	 * leads to enospc problems.  This means we also can't do
1854	 * delayed inode refs
1855	 */
1856	if (test_bit(BTRFS_FS_LOG_RECOVERING, &fs_info->flags))
1857		return -EAGAIN;
1858
1859	delayed_node = btrfs_get_or_create_delayed_node(inode);
1860	if (IS_ERR(delayed_node))
1861		return PTR_ERR(delayed_node);
1862
1863	/*
1864	 * We don't reserve space for inode ref deletion is because:
1865	 * - We ONLY do async inode ref deletion for the inode who has only
1866	 *   one link(i_nlink == 1), it means there is only one inode ref.
1867	 *   And in most case, the inode ref and the inode item are in the
1868	 *   same leaf, and we will deal with them at the same time.
1869	 *   Since we are sure we will reserve the space for the inode item,
1870	 *   it is unnecessary to reserve space for inode ref deletion.
1871	 * - If the inode ref and the inode item are not in the same leaf,
1872	 *   We also needn't worry about enospc problem, because we reserve
1873	 *   much more space for the inode update than it needs.
1874	 * - At the worst, we can steal some space from the global reservation.
1875	 *   It is very rare.
1876	 */
1877	mutex_lock(&delayed_node->mutex);
1878	if (test_bit(BTRFS_DELAYED_NODE_DEL_IREF, &delayed_node->flags))
1879		goto release_node;
1880
1881	set_bit(BTRFS_DELAYED_NODE_DEL_IREF, &delayed_node->flags);
1882	delayed_node->count++;
1883	atomic_inc(&fs_info->delayed_root->items);
1884release_node:
1885	mutex_unlock(&delayed_node->mutex);
1886	btrfs_release_delayed_node(delayed_node);
1887	return 0;
1888}
1889
1890static void __btrfs_kill_delayed_node(struct btrfs_delayed_node *delayed_node)
1891{
1892	struct btrfs_root *root = delayed_node->root;
1893	struct btrfs_fs_info *fs_info = root->fs_info;
1894	struct btrfs_delayed_item *curr_item, *prev_item;
1895
1896	mutex_lock(&delayed_node->mutex);
1897	curr_item = __btrfs_first_delayed_insertion_item(delayed_node);
1898	while (curr_item) {
1899		btrfs_delayed_item_release_metadata(root, curr_item);
1900		prev_item = curr_item;
1901		curr_item = __btrfs_next_delayed_item(prev_item);
1902		btrfs_release_delayed_item(prev_item);
1903	}
1904
1905	curr_item = __btrfs_first_delayed_deletion_item(delayed_node);
1906	while (curr_item) {
1907		btrfs_delayed_item_release_metadata(root, curr_item);
1908		prev_item = curr_item;
1909		curr_item = __btrfs_next_delayed_item(prev_item);
1910		btrfs_release_delayed_item(prev_item);
1911	}
1912
1913	if (test_bit(BTRFS_DELAYED_NODE_DEL_IREF, &delayed_node->flags))
1914		btrfs_release_delayed_iref(delayed_node);
1915
1916	if (test_bit(BTRFS_DELAYED_NODE_INODE_DIRTY, &delayed_node->flags)) {
1917		btrfs_delayed_inode_release_metadata(fs_info, delayed_node, false);
1918		btrfs_release_delayed_inode(delayed_node);
1919	}
1920	mutex_unlock(&delayed_node->mutex);
1921}
1922
1923void btrfs_kill_delayed_inode_items(struct btrfs_inode *inode)
1924{
1925	struct btrfs_delayed_node *delayed_node;
1926
1927	delayed_node = btrfs_get_delayed_node(inode);
1928	if (!delayed_node)
1929		return;
1930
1931	__btrfs_kill_delayed_node(delayed_node);
1932	btrfs_release_delayed_node(delayed_node);
1933}
1934
1935void btrfs_kill_all_delayed_nodes(struct btrfs_root *root)
1936{
1937	u64 inode_id = 0;
1938	struct btrfs_delayed_node *delayed_nodes[8];
1939	int i, n;
1940
1941	while (1) {
1942		spin_lock(&root->inode_lock);
1943		n = radix_tree_gang_lookup(&root->delayed_nodes_tree,
1944					   (void **)delayed_nodes, inode_id,
1945					   ARRAY_SIZE(delayed_nodes));
1946		if (!n) {
1947			spin_unlock(&root->inode_lock);
1948			break;
1949		}
1950
1951		inode_id = delayed_nodes[n - 1]->inode_id + 1;
1952
1953		for (i = 0; i < n; i++)
1954			refcount_inc(&delayed_nodes[i]->refs);
 
 
 
 
 
1955		spin_unlock(&root->inode_lock);
1956
1957		for (i = 0; i < n; i++) {
 
 
1958			__btrfs_kill_delayed_node(delayed_nodes[i]);
1959			btrfs_release_delayed_node(delayed_nodes[i]);
1960		}
1961	}
1962}
1963
1964void btrfs_destroy_delayed_inodes(struct btrfs_fs_info *fs_info)
1965{
1966	struct btrfs_delayed_node *curr_node, *prev_node;
1967
1968	curr_node = btrfs_first_delayed_node(fs_info->delayed_root);
1969	while (curr_node) {
1970		__btrfs_kill_delayed_node(curr_node);
1971
1972		prev_node = curr_node;
1973		curr_node = btrfs_next_delayed_node(curr_node);
1974		btrfs_release_delayed_node(prev_node);
1975	}
1976}
1977
v5.9
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 * Copyright (C) 2011 Fujitsu.  All rights reserved.
   4 * Written by Miao Xie <miaox@cn.fujitsu.com>
   5 */
   6
   7#include <linux/slab.h>
   8#include <linux/iversion.h>
   9#include <linux/sched/mm.h>
  10#include "misc.h"
  11#include "delayed-inode.h"
  12#include "disk-io.h"
  13#include "transaction.h"
  14#include "ctree.h"
  15#include "qgroup.h"
  16#include "locking.h"
  17
  18#define BTRFS_DELAYED_WRITEBACK		512
  19#define BTRFS_DELAYED_BACKGROUND	128
  20#define BTRFS_DELAYED_BATCH		16
  21
  22static struct kmem_cache *delayed_node_cache;
  23
  24int __init btrfs_delayed_inode_init(void)
  25{
  26	delayed_node_cache = kmem_cache_create("btrfs_delayed_node",
  27					sizeof(struct btrfs_delayed_node),
  28					0,
  29					SLAB_MEM_SPREAD,
  30					NULL);
  31	if (!delayed_node_cache)
  32		return -ENOMEM;
  33	return 0;
  34}
  35
  36void __cold btrfs_delayed_inode_exit(void)
  37{
  38	kmem_cache_destroy(delayed_node_cache);
  39}
  40
  41static inline void btrfs_init_delayed_node(
  42				struct btrfs_delayed_node *delayed_node,
  43				struct btrfs_root *root, u64 inode_id)
  44{
  45	delayed_node->root = root;
  46	delayed_node->inode_id = inode_id;
  47	refcount_set(&delayed_node->refs, 0);
  48	delayed_node->ins_root = RB_ROOT_CACHED;
  49	delayed_node->del_root = RB_ROOT_CACHED;
  50	mutex_init(&delayed_node->mutex);
  51	INIT_LIST_HEAD(&delayed_node->n_list);
  52	INIT_LIST_HEAD(&delayed_node->p_list);
  53}
  54
  55static inline int btrfs_is_continuous_delayed_item(
  56					struct btrfs_delayed_item *item1,
  57					struct btrfs_delayed_item *item2)
  58{
  59	if (item1->key.type == BTRFS_DIR_INDEX_KEY &&
  60	    item1->key.objectid == item2->key.objectid &&
  61	    item1->key.type == item2->key.type &&
  62	    item1->key.offset + 1 == item2->key.offset)
  63		return 1;
  64	return 0;
  65}
  66
  67static struct btrfs_delayed_node *btrfs_get_delayed_node(
  68		struct btrfs_inode *btrfs_inode)
  69{
  70	struct btrfs_root *root = btrfs_inode->root;
  71	u64 ino = btrfs_ino(btrfs_inode);
  72	struct btrfs_delayed_node *node;
  73
  74	node = READ_ONCE(btrfs_inode->delayed_node);
  75	if (node) {
  76		refcount_inc(&node->refs);
  77		return node;
  78	}
  79
  80	spin_lock(&root->inode_lock);
  81	node = radix_tree_lookup(&root->delayed_nodes_tree, ino);
  82
  83	if (node) {
  84		if (btrfs_inode->delayed_node) {
  85			refcount_inc(&node->refs);	/* can be accessed */
  86			BUG_ON(btrfs_inode->delayed_node != node);
  87			spin_unlock(&root->inode_lock);
  88			return node;
  89		}
  90
  91		/*
  92		 * It's possible that we're racing into the middle of removing
  93		 * this node from the radix tree.  In this case, the refcount
  94		 * was zero and it should never go back to one.  Just return
  95		 * NULL like it was never in the radix at all; our release
  96		 * function is in the process of removing it.
  97		 *
  98		 * Some implementations of refcount_inc refuse to bump the
  99		 * refcount once it has hit zero.  If we don't do this dance
 100		 * here, refcount_inc() may decide to just WARN_ONCE() instead
 101		 * of actually bumping the refcount.
 102		 *
 103		 * If this node is properly in the radix, we want to bump the
 104		 * refcount twice, once for the inode and once for this get
 105		 * operation.
 106		 */
 107		if (refcount_inc_not_zero(&node->refs)) {
 108			refcount_inc(&node->refs);
 109			btrfs_inode->delayed_node = node;
 110		} else {
 111			node = NULL;
 112		}
 113
 114		spin_unlock(&root->inode_lock);
 115		return node;
 116	}
 117	spin_unlock(&root->inode_lock);
 118
 119	return NULL;
 120}
 121
 122/* Will return either the node or PTR_ERR(-ENOMEM) */
 123static struct btrfs_delayed_node *btrfs_get_or_create_delayed_node(
 124		struct btrfs_inode *btrfs_inode)
 125{
 126	struct btrfs_delayed_node *node;
 127	struct btrfs_root *root = btrfs_inode->root;
 128	u64 ino = btrfs_ino(btrfs_inode);
 129	int ret;
 130
 131again:
 132	node = btrfs_get_delayed_node(btrfs_inode);
 133	if (node)
 134		return node;
 135
 136	node = kmem_cache_zalloc(delayed_node_cache, GFP_NOFS);
 137	if (!node)
 138		return ERR_PTR(-ENOMEM);
 139	btrfs_init_delayed_node(node, root, ino);
 140
 141	/* cached in the btrfs inode and can be accessed */
 142	refcount_set(&node->refs, 2);
 143
 144	ret = radix_tree_preload(GFP_NOFS);
 145	if (ret) {
 146		kmem_cache_free(delayed_node_cache, node);
 147		return ERR_PTR(ret);
 148	}
 149
 150	spin_lock(&root->inode_lock);
 151	ret = radix_tree_insert(&root->delayed_nodes_tree, ino, node);
 152	if (ret == -EEXIST) {
 153		spin_unlock(&root->inode_lock);
 154		kmem_cache_free(delayed_node_cache, node);
 155		radix_tree_preload_end();
 156		goto again;
 157	}
 158	btrfs_inode->delayed_node = node;
 159	spin_unlock(&root->inode_lock);
 160	radix_tree_preload_end();
 161
 162	return node;
 163}
 164
 165/*
 166 * Call it when holding delayed_node->mutex
 167 *
 168 * If mod = 1, add this node into the prepared list.
 169 */
 170static void btrfs_queue_delayed_node(struct btrfs_delayed_root *root,
 171				     struct btrfs_delayed_node *node,
 172				     int mod)
 173{
 174	spin_lock(&root->lock);
 175	if (test_bit(BTRFS_DELAYED_NODE_IN_LIST, &node->flags)) {
 176		if (!list_empty(&node->p_list))
 177			list_move_tail(&node->p_list, &root->prepare_list);
 178		else if (mod)
 179			list_add_tail(&node->p_list, &root->prepare_list);
 180	} else {
 181		list_add_tail(&node->n_list, &root->node_list);
 182		list_add_tail(&node->p_list, &root->prepare_list);
 183		refcount_inc(&node->refs);	/* inserted into list */
 184		root->nodes++;
 185		set_bit(BTRFS_DELAYED_NODE_IN_LIST, &node->flags);
 186	}
 187	spin_unlock(&root->lock);
 188}
 189
 190/* Call it when holding delayed_node->mutex */
 191static void btrfs_dequeue_delayed_node(struct btrfs_delayed_root *root,
 192				       struct btrfs_delayed_node *node)
 193{
 194	spin_lock(&root->lock);
 195	if (test_bit(BTRFS_DELAYED_NODE_IN_LIST, &node->flags)) {
 196		root->nodes--;
 197		refcount_dec(&node->refs);	/* not in the list */
 198		list_del_init(&node->n_list);
 199		if (!list_empty(&node->p_list))
 200			list_del_init(&node->p_list);
 201		clear_bit(BTRFS_DELAYED_NODE_IN_LIST, &node->flags);
 202	}
 203	spin_unlock(&root->lock);
 204}
 205
 206static struct btrfs_delayed_node *btrfs_first_delayed_node(
 207			struct btrfs_delayed_root *delayed_root)
 208{
 209	struct list_head *p;
 210	struct btrfs_delayed_node *node = NULL;
 211
 212	spin_lock(&delayed_root->lock);
 213	if (list_empty(&delayed_root->node_list))
 214		goto out;
 215
 216	p = delayed_root->node_list.next;
 217	node = list_entry(p, struct btrfs_delayed_node, n_list);
 218	refcount_inc(&node->refs);
 219out:
 220	spin_unlock(&delayed_root->lock);
 221
 222	return node;
 223}
 224
 225static struct btrfs_delayed_node *btrfs_next_delayed_node(
 226						struct btrfs_delayed_node *node)
 227{
 228	struct btrfs_delayed_root *delayed_root;
 229	struct list_head *p;
 230	struct btrfs_delayed_node *next = NULL;
 231
 232	delayed_root = node->root->fs_info->delayed_root;
 233	spin_lock(&delayed_root->lock);
 234	if (!test_bit(BTRFS_DELAYED_NODE_IN_LIST, &node->flags)) {
 235		/* not in the list */
 236		if (list_empty(&delayed_root->node_list))
 237			goto out;
 238		p = delayed_root->node_list.next;
 239	} else if (list_is_last(&node->n_list, &delayed_root->node_list))
 240		goto out;
 241	else
 242		p = node->n_list.next;
 243
 244	next = list_entry(p, struct btrfs_delayed_node, n_list);
 245	refcount_inc(&next->refs);
 246out:
 247	spin_unlock(&delayed_root->lock);
 248
 249	return next;
 250}
 251
 252static void __btrfs_release_delayed_node(
 253				struct btrfs_delayed_node *delayed_node,
 254				int mod)
 255{
 256	struct btrfs_delayed_root *delayed_root;
 257
 258	if (!delayed_node)
 259		return;
 260
 261	delayed_root = delayed_node->root->fs_info->delayed_root;
 262
 263	mutex_lock(&delayed_node->mutex);
 264	if (delayed_node->count)
 265		btrfs_queue_delayed_node(delayed_root, delayed_node, mod);
 266	else
 267		btrfs_dequeue_delayed_node(delayed_root, delayed_node);
 268	mutex_unlock(&delayed_node->mutex);
 269
 270	if (refcount_dec_and_test(&delayed_node->refs)) {
 271		struct btrfs_root *root = delayed_node->root;
 272
 273		spin_lock(&root->inode_lock);
 274		/*
 275		 * Once our refcount goes to zero, nobody is allowed to bump it
 276		 * back up.  We can delete it now.
 277		 */
 278		ASSERT(refcount_read(&delayed_node->refs) == 0);
 279		radix_tree_delete(&root->delayed_nodes_tree,
 280				  delayed_node->inode_id);
 281		spin_unlock(&root->inode_lock);
 282		kmem_cache_free(delayed_node_cache, delayed_node);
 283	}
 284}
 285
 286static inline void btrfs_release_delayed_node(struct btrfs_delayed_node *node)
 287{
 288	__btrfs_release_delayed_node(node, 0);
 289}
 290
 291static struct btrfs_delayed_node *btrfs_first_prepared_delayed_node(
 292					struct btrfs_delayed_root *delayed_root)
 293{
 294	struct list_head *p;
 295	struct btrfs_delayed_node *node = NULL;
 296
 297	spin_lock(&delayed_root->lock);
 298	if (list_empty(&delayed_root->prepare_list))
 299		goto out;
 300
 301	p = delayed_root->prepare_list.next;
 302	list_del_init(p);
 303	node = list_entry(p, struct btrfs_delayed_node, p_list);
 304	refcount_inc(&node->refs);
 305out:
 306	spin_unlock(&delayed_root->lock);
 307
 308	return node;
 309}
 310
 311static inline void btrfs_release_prepared_delayed_node(
 312					struct btrfs_delayed_node *node)
 313{
 314	__btrfs_release_delayed_node(node, 1);
 315}
 316
 317static struct btrfs_delayed_item *btrfs_alloc_delayed_item(u32 data_len)
 318{
 319	struct btrfs_delayed_item *item;
 320	item = kmalloc(sizeof(*item) + data_len, GFP_NOFS);
 321	if (item) {
 322		item->data_len = data_len;
 323		item->ins_or_del = 0;
 324		item->bytes_reserved = 0;
 325		item->delayed_node = NULL;
 326		refcount_set(&item->refs, 1);
 327	}
 328	return item;
 329}
 330
 331/*
 332 * __btrfs_lookup_delayed_item - look up the delayed item by key
 333 * @delayed_node: pointer to the delayed node
 334 * @key:	  the key to look up
 335 * @prev:	  used to store the prev item if the right item isn't found
 336 * @next:	  used to store the next item if the right item isn't found
 337 *
 338 * Note: if we don't find the right item, we will return the prev item and
 339 * the next item.
 340 */
 341static struct btrfs_delayed_item *__btrfs_lookup_delayed_item(
 342				struct rb_root *root,
 343				struct btrfs_key *key,
 344				struct btrfs_delayed_item **prev,
 345				struct btrfs_delayed_item **next)
 346{
 347	struct rb_node *node, *prev_node = NULL;
 348	struct btrfs_delayed_item *delayed_item = NULL;
 349	int ret = 0;
 350
 351	node = root->rb_node;
 352
 353	while (node) {
 354		delayed_item = rb_entry(node, struct btrfs_delayed_item,
 355					rb_node);
 356		prev_node = node;
 357		ret = btrfs_comp_cpu_keys(&delayed_item->key, key);
 358		if (ret < 0)
 359			node = node->rb_right;
 360		else if (ret > 0)
 361			node = node->rb_left;
 362		else
 363			return delayed_item;
 364	}
 365
 366	if (prev) {
 367		if (!prev_node)
 368			*prev = NULL;
 369		else if (ret < 0)
 370			*prev = delayed_item;
 371		else if ((node = rb_prev(prev_node)) != NULL) {
 372			*prev = rb_entry(node, struct btrfs_delayed_item,
 373					 rb_node);
 374		} else
 375			*prev = NULL;
 376	}
 377
 378	if (next) {
 379		if (!prev_node)
 380			*next = NULL;
 381		else if (ret > 0)
 382			*next = delayed_item;
 383		else if ((node = rb_next(prev_node)) != NULL) {
 384			*next = rb_entry(node, struct btrfs_delayed_item,
 385					 rb_node);
 386		} else
 387			*next = NULL;
 388	}
 389	return NULL;
 390}
 391
 392static struct btrfs_delayed_item *__btrfs_lookup_delayed_insertion_item(
 393					struct btrfs_delayed_node *delayed_node,
 394					struct btrfs_key *key)
 395{
 396	return __btrfs_lookup_delayed_item(&delayed_node->ins_root.rb_root, key,
 397					   NULL, NULL);
 398}
 399
 400static int __btrfs_add_delayed_item(struct btrfs_delayed_node *delayed_node,
 401				    struct btrfs_delayed_item *ins,
 402				    int action)
 403{
 404	struct rb_node **p, *node;
 405	struct rb_node *parent_node = NULL;
 406	struct rb_root_cached *root;
 407	struct btrfs_delayed_item *item;
 408	int cmp;
 409	bool leftmost = true;
 410
 411	if (action == BTRFS_DELAYED_INSERTION_ITEM)
 412		root = &delayed_node->ins_root;
 413	else if (action == BTRFS_DELAYED_DELETION_ITEM)
 414		root = &delayed_node->del_root;
 415	else
 416		BUG();
 417	p = &root->rb_root.rb_node;
 418	node = &ins->rb_node;
 419
 420	while (*p) {
 421		parent_node = *p;
 422		item = rb_entry(parent_node, struct btrfs_delayed_item,
 423				 rb_node);
 424
 425		cmp = btrfs_comp_cpu_keys(&item->key, &ins->key);
 426		if (cmp < 0) {
 427			p = &(*p)->rb_right;
 428			leftmost = false;
 429		} else if (cmp > 0) {
 430			p = &(*p)->rb_left;
 431		} else {
 432			return -EEXIST;
 433		}
 434	}
 435
 436	rb_link_node(node, parent_node, p);
 437	rb_insert_color_cached(node, root, leftmost);
 438	ins->delayed_node = delayed_node;
 439	ins->ins_or_del = action;
 440
 441	if (ins->key.type == BTRFS_DIR_INDEX_KEY &&
 442	    action == BTRFS_DELAYED_INSERTION_ITEM &&
 443	    ins->key.offset >= delayed_node->index_cnt)
 444			delayed_node->index_cnt = ins->key.offset + 1;
 445
 446	delayed_node->count++;
 447	atomic_inc(&delayed_node->root->fs_info->delayed_root->items);
 448	return 0;
 449}
 450
 451static int __btrfs_add_delayed_insertion_item(struct btrfs_delayed_node *node,
 452					      struct btrfs_delayed_item *item)
 453{
 454	return __btrfs_add_delayed_item(node, item,
 455					BTRFS_DELAYED_INSERTION_ITEM);
 456}
 457
 458static int __btrfs_add_delayed_deletion_item(struct btrfs_delayed_node *node,
 459					     struct btrfs_delayed_item *item)
 460{
 461	return __btrfs_add_delayed_item(node, item,
 462					BTRFS_DELAYED_DELETION_ITEM);
 463}
 464
 465static void finish_one_item(struct btrfs_delayed_root *delayed_root)
 466{
 467	int seq = atomic_inc_return(&delayed_root->items_seq);
 468
 469	/* atomic_dec_return implies a barrier */
 470	if ((atomic_dec_return(&delayed_root->items) <
 471	    BTRFS_DELAYED_BACKGROUND || seq % BTRFS_DELAYED_BATCH == 0))
 472		cond_wake_up_nomb(&delayed_root->wait);
 473}
 474
 475static void __btrfs_remove_delayed_item(struct btrfs_delayed_item *delayed_item)
 476{
 477	struct rb_root_cached *root;
 478	struct btrfs_delayed_root *delayed_root;
 479
 480	/* Not associated with any delayed_node */
 481	if (!delayed_item->delayed_node)
 482		return;
 483	delayed_root = delayed_item->delayed_node->root->fs_info->delayed_root;
 484
 485	BUG_ON(!delayed_root);
 486	BUG_ON(delayed_item->ins_or_del != BTRFS_DELAYED_DELETION_ITEM &&
 487	       delayed_item->ins_or_del != BTRFS_DELAYED_INSERTION_ITEM);
 488
 489	if (delayed_item->ins_or_del == BTRFS_DELAYED_INSERTION_ITEM)
 490		root = &delayed_item->delayed_node->ins_root;
 491	else
 492		root = &delayed_item->delayed_node->del_root;
 493
 494	rb_erase_cached(&delayed_item->rb_node, root);
 495	delayed_item->delayed_node->count--;
 496
 497	finish_one_item(delayed_root);
 498}
 499
 500static void btrfs_release_delayed_item(struct btrfs_delayed_item *item)
 501{
 502	if (item) {
 503		__btrfs_remove_delayed_item(item);
 504		if (refcount_dec_and_test(&item->refs))
 505			kfree(item);
 506	}
 507}
 508
 509static struct btrfs_delayed_item *__btrfs_first_delayed_insertion_item(
 510					struct btrfs_delayed_node *delayed_node)
 511{
 512	struct rb_node *p;
 513	struct btrfs_delayed_item *item = NULL;
 514
 515	p = rb_first_cached(&delayed_node->ins_root);
 516	if (p)
 517		item = rb_entry(p, struct btrfs_delayed_item, rb_node);
 518
 519	return item;
 520}
 521
 522static struct btrfs_delayed_item *__btrfs_first_delayed_deletion_item(
 523					struct btrfs_delayed_node *delayed_node)
 524{
 525	struct rb_node *p;
 526	struct btrfs_delayed_item *item = NULL;
 527
 528	p = rb_first_cached(&delayed_node->del_root);
 529	if (p)
 530		item = rb_entry(p, struct btrfs_delayed_item, rb_node);
 531
 532	return item;
 533}
 534
 535static struct btrfs_delayed_item *__btrfs_next_delayed_item(
 536						struct btrfs_delayed_item *item)
 537{
 538	struct rb_node *p;
 539	struct btrfs_delayed_item *next = NULL;
 540
 541	p = rb_next(&item->rb_node);
 542	if (p)
 543		next = rb_entry(p, struct btrfs_delayed_item, rb_node);
 544
 545	return next;
 546}
 547
 548static int btrfs_delayed_item_reserve_metadata(struct btrfs_trans_handle *trans,
 549					       struct btrfs_root *root,
 550					       struct btrfs_delayed_item *item)
 551{
 552	struct btrfs_block_rsv *src_rsv;
 553	struct btrfs_block_rsv *dst_rsv;
 554	struct btrfs_fs_info *fs_info = root->fs_info;
 555	u64 num_bytes;
 556	int ret;
 557
 558	if (!trans->bytes_reserved)
 559		return 0;
 560
 561	src_rsv = trans->block_rsv;
 562	dst_rsv = &fs_info->delayed_block_rsv;
 563
 564	num_bytes = btrfs_calc_insert_metadata_size(fs_info, 1);
 565
 566	/*
 567	 * Here we migrate space rsv from transaction rsv, since have already
 568	 * reserved space when starting a transaction.  So no need to reserve
 569	 * qgroup space here.
 570	 */
 571	ret = btrfs_block_rsv_migrate(src_rsv, dst_rsv, num_bytes, true);
 572	if (!ret) {
 573		trace_btrfs_space_reservation(fs_info, "delayed_item",
 574					      item->key.objectid,
 575					      num_bytes, 1);
 576		item->bytes_reserved = num_bytes;
 577	}
 578
 579	return ret;
 580}
 581
 582static void btrfs_delayed_item_release_metadata(struct btrfs_root *root,
 583						struct btrfs_delayed_item *item)
 584{
 585	struct btrfs_block_rsv *rsv;
 586	struct btrfs_fs_info *fs_info = root->fs_info;
 587
 588	if (!item->bytes_reserved)
 589		return;
 590
 591	rsv = &fs_info->delayed_block_rsv;
 592	/*
 593	 * Check btrfs_delayed_item_reserve_metadata() to see why we don't need
 594	 * to release/reserve qgroup space.
 595	 */
 596	trace_btrfs_space_reservation(fs_info, "delayed_item",
 597				      item->key.objectid, item->bytes_reserved,
 598				      0);
 599	btrfs_block_rsv_release(fs_info, rsv, item->bytes_reserved, NULL);
 
 600}
 601
 602static int btrfs_delayed_inode_reserve_metadata(
 603					struct btrfs_trans_handle *trans,
 604					struct btrfs_root *root,
 605					struct btrfs_inode *inode,
 606					struct btrfs_delayed_node *node)
 607{
 608	struct btrfs_fs_info *fs_info = root->fs_info;
 609	struct btrfs_block_rsv *src_rsv;
 610	struct btrfs_block_rsv *dst_rsv;
 611	u64 num_bytes;
 612	int ret;
 613
 614	src_rsv = trans->block_rsv;
 615	dst_rsv = &fs_info->delayed_block_rsv;
 616
 617	num_bytes = btrfs_calc_metadata_size(fs_info, 1);
 618
 619	/*
 620	 * btrfs_dirty_inode will update the inode under btrfs_join_transaction
 621	 * which doesn't reserve space for speed.  This is a problem since we
 622	 * still need to reserve space for this update, so try to reserve the
 623	 * space.
 624	 *
 625	 * Now if src_rsv == delalloc_block_rsv we'll let it just steal since
 626	 * we always reserve enough to update the inode item.
 627	 */
 628	if (!src_rsv || (!trans->bytes_reserved &&
 629			 src_rsv->type != BTRFS_BLOCK_RSV_DELALLOC)) {
 630		ret = btrfs_qgroup_reserve_meta_prealloc(root,
 631				fs_info->nodesize, true);
 632		if (ret < 0)
 633			return ret;
 634		ret = btrfs_block_rsv_add(root, dst_rsv, num_bytes,
 635					  BTRFS_RESERVE_NO_FLUSH);
 636		/*
 637		 * Since we're under a transaction reserve_metadata_bytes could
 638		 * try to commit the transaction which will make it return
 639		 * EAGAIN to make us stop the transaction we have, so return
 640		 * ENOSPC instead so that btrfs_dirty_inode knows what to do.
 641		 */
 642		if (ret == -EAGAIN) {
 643			ret = -ENOSPC;
 644			btrfs_qgroup_free_meta_prealloc(root, num_bytes);
 645		}
 646		if (!ret) {
 647			node->bytes_reserved = num_bytes;
 648			trace_btrfs_space_reservation(fs_info,
 649						      "delayed_inode",
 650						      btrfs_ino(inode),
 651						      num_bytes, 1);
 652		} else {
 653			btrfs_qgroup_free_meta_prealloc(root, fs_info->nodesize);
 654		}
 655		return ret;
 656	}
 657
 658	ret = btrfs_block_rsv_migrate(src_rsv, dst_rsv, num_bytes, true);
 659	if (!ret) {
 660		trace_btrfs_space_reservation(fs_info, "delayed_inode",
 661					      btrfs_ino(inode), num_bytes, 1);
 662		node->bytes_reserved = num_bytes;
 663	}
 664
 665	return ret;
 666}
 667
 668static void btrfs_delayed_inode_release_metadata(struct btrfs_fs_info *fs_info,
 669						struct btrfs_delayed_node *node,
 670						bool qgroup_free)
 671{
 672	struct btrfs_block_rsv *rsv;
 673
 674	if (!node->bytes_reserved)
 675		return;
 676
 677	rsv = &fs_info->delayed_block_rsv;
 678	trace_btrfs_space_reservation(fs_info, "delayed_inode",
 679				      node->inode_id, node->bytes_reserved, 0);
 680	btrfs_block_rsv_release(fs_info, rsv, node->bytes_reserved, NULL);
 
 681	if (qgroup_free)
 682		btrfs_qgroup_free_meta_prealloc(node->root,
 683				node->bytes_reserved);
 684	else
 685		btrfs_qgroup_convert_reserved_meta(node->root,
 686				node->bytes_reserved);
 687	node->bytes_reserved = 0;
 688}
 689
 690/*
 691 * This helper will insert some continuous items into the same leaf according
 692 * to the free space of the leaf.
 693 */
 694static int btrfs_batch_insert_items(struct btrfs_root *root,
 695				    struct btrfs_path *path,
 696				    struct btrfs_delayed_item *item)
 697{
 698	struct btrfs_delayed_item *curr, *next;
 699	int free_space;
 700	int total_data_size = 0, total_size = 0;
 701	struct extent_buffer *leaf;
 702	char *data_ptr;
 703	struct btrfs_key *keys;
 704	u32 *data_size;
 705	struct list_head head;
 706	int slot;
 707	int nitems;
 708	int i;
 709	int ret = 0;
 710
 711	BUG_ON(!path->nodes[0]);
 712
 713	leaf = path->nodes[0];
 714	free_space = btrfs_leaf_free_space(leaf);
 715	INIT_LIST_HEAD(&head);
 716
 717	next = item;
 718	nitems = 0;
 719
 720	/*
 721	 * count the number of the continuous items that we can insert in batch
 722	 */
 723	while (total_size + next->data_len + sizeof(struct btrfs_item) <=
 724	       free_space) {
 725		total_data_size += next->data_len;
 726		total_size += next->data_len + sizeof(struct btrfs_item);
 727		list_add_tail(&next->tree_list, &head);
 728		nitems++;
 729
 730		curr = next;
 731		next = __btrfs_next_delayed_item(curr);
 732		if (!next)
 733			break;
 734
 735		if (!btrfs_is_continuous_delayed_item(curr, next))
 736			break;
 737	}
 738
 739	if (!nitems) {
 740		ret = 0;
 741		goto out;
 742	}
 743
 744	/*
 745	 * we need allocate some memory space, but it might cause the task
 746	 * to sleep, so we set all locked nodes in the path to blocking locks
 747	 * first.
 748	 */
 749	btrfs_set_path_blocking(path);
 750
 751	keys = kmalloc_array(nitems, sizeof(struct btrfs_key), GFP_NOFS);
 752	if (!keys) {
 753		ret = -ENOMEM;
 754		goto out;
 755	}
 756
 757	data_size = kmalloc_array(nitems, sizeof(u32), GFP_NOFS);
 758	if (!data_size) {
 759		ret = -ENOMEM;
 760		goto error;
 761	}
 762
 763	/* get keys of all the delayed items */
 764	i = 0;
 765	list_for_each_entry(next, &head, tree_list) {
 766		keys[i] = next->key;
 767		data_size[i] = next->data_len;
 768		i++;
 769	}
 770
 771	/* insert the keys of the items */
 772	setup_items_for_insert(root, path, keys, data_size,
 773			       total_data_size, total_size, nitems);
 774
 775	/* insert the dir index items */
 776	slot = path->slots[0];
 777	list_for_each_entry_safe(curr, next, &head, tree_list) {
 778		data_ptr = btrfs_item_ptr(leaf, slot, char);
 779		write_extent_buffer(leaf, &curr->data,
 780				    (unsigned long)data_ptr,
 781				    curr->data_len);
 782		slot++;
 783
 784		btrfs_delayed_item_release_metadata(root, curr);
 785
 786		list_del(&curr->tree_list);
 787		btrfs_release_delayed_item(curr);
 788	}
 789
 790error:
 791	kfree(data_size);
 792	kfree(keys);
 793out:
 794	return ret;
 795}
 796
 797/*
 798 * This helper can just do simple insertion that needn't extend item for new
 799 * data, such as directory name index insertion, inode insertion.
 800 */
 801static int btrfs_insert_delayed_item(struct btrfs_trans_handle *trans,
 802				     struct btrfs_root *root,
 803				     struct btrfs_path *path,
 804				     struct btrfs_delayed_item *delayed_item)
 805{
 806	struct extent_buffer *leaf;
 807	unsigned int nofs_flag;
 808	char *ptr;
 809	int ret;
 810
 811	nofs_flag = memalloc_nofs_save();
 812	ret = btrfs_insert_empty_item(trans, root, path, &delayed_item->key,
 813				      delayed_item->data_len);
 814	memalloc_nofs_restore(nofs_flag);
 815	if (ret < 0 && ret != -EEXIST)
 816		return ret;
 817
 818	leaf = path->nodes[0];
 819
 820	ptr = btrfs_item_ptr(leaf, path->slots[0], char);
 821
 822	write_extent_buffer(leaf, delayed_item->data, (unsigned long)ptr,
 823			    delayed_item->data_len);
 824	btrfs_mark_buffer_dirty(leaf);
 825
 826	btrfs_delayed_item_release_metadata(root, delayed_item);
 827	return 0;
 828}
 829
 830/*
 831 * we insert an item first, then if there are some continuous items, we try
 832 * to insert those items into the same leaf.
 833 */
 834static int btrfs_insert_delayed_items(struct btrfs_trans_handle *trans,
 835				      struct btrfs_path *path,
 836				      struct btrfs_root *root,
 837				      struct btrfs_delayed_node *node)
 838{
 839	struct btrfs_delayed_item *curr, *prev;
 840	int ret = 0;
 841
 842do_again:
 843	mutex_lock(&node->mutex);
 844	curr = __btrfs_first_delayed_insertion_item(node);
 845	if (!curr)
 846		goto insert_end;
 847
 848	ret = btrfs_insert_delayed_item(trans, root, path, curr);
 849	if (ret < 0) {
 850		btrfs_release_path(path);
 851		goto insert_end;
 852	}
 853
 854	prev = curr;
 855	curr = __btrfs_next_delayed_item(prev);
 856	if (curr && btrfs_is_continuous_delayed_item(prev, curr)) {
 857		/* insert the continuous items into the same leaf */
 858		path->slots[0]++;
 859		btrfs_batch_insert_items(root, path, curr);
 860	}
 861	btrfs_release_delayed_item(prev);
 862	btrfs_mark_buffer_dirty(path->nodes[0]);
 863
 864	btrfs_release_path(path);
 865	mutex_unlock(&node->mutex);
 866	goto do_again;
 867
 868insert_end:
 869	mutex_unlock(&node->mutex);
 870	return ret;
 871}
 872
 873static int btrfs_batch_delete_items(struct btrfs_trans_handle *trans,
 874				    struct btrfs_root *root,
 875				    struct btrfs_path *path,
 876				    struct btrfs_delayed_item *item)
 877{
 878	struct btrfs_delayed_item *curr, *next;
 879	struct extent_buffer *leaf;
 880	struct btrfs_key key;
 881	struct list_head head;
 882	int nitems, i, last_item;
 883	int ret = 0;
 884
 885	BUG_ON(!path->nodes[0]);
 886
 887	leaf = path->nodes[0];
 888
 889	i = path->slots[0];
 890	last_item = btrfs_header_nritems(leaf) - 1;
 891	if (i > last_item)
 892		return -ENOENT;	/* FIXME: Is errno suitable? */
 893
 894	next = item;
 895	INIT_LIST_HEAD(&head);
 896	btrfs_item_key_to_cpu(leaf, &key, i);
 897	nitems = 0;
 898	/*
 899	 * count the number of the dir index items that we can delete in batch
 900	 */
 901	while (btrfs_comp_cpu_keys(&next->key, &key) == 0) {
 902		list_add_tail(&next->tree_list, &head);
 903		nitems++;
 904
 905		curr = next;
 906		next = __btrfs_next_delayed_item(curr);
 907		if (!next)
 908			break;
 909
 910		if (!btrfs_is_continuous_delayed_item(curr, next))
 911			break;
 912
 913		i++;
 914		if (i > last_item)
 915			break;
 916		btrfs_item_key_to_cpu(leaf, &key, i);
 917	}
 918
 919	if (!nitems)
 920		return 0;
 921
 922	ret = btrfs_del_items(trans, root, path, path->slots[0], nitems);
 923	if (ret)
 924		goto out;
 925
 926	list_for_each_entry_safe(curr, next, &head, tree_list) {
 927		btrfs_delayed_item_release_metadata(root, curr);
 928		list_del(&curr->tree_list);
 929		btrfs_release_delayed_item(curr);
 930	}
 931
 932out:
 933	return ret;
 934}
 935
 936static int btrfs_delete_delayed_items(struct btrfs_trans_handle *trans,
 937				      struct btrfs_path *path,
 938				      struct btrfs_root *root,
 939				      struct btrfs_delayed_node *node)
 940{
 941	struct btrfs_delayed_item *curr, *prev;
 942	unsigned int nofs_flag;
 943	int ret = 0;
 944
 945do_again:
 946	mutex_lock(&node->mutex);
 947	curr = __btrfs_first_delayed_deletion_item(node);
 948	if (!curr)
 949		goto delete_fail;
 950
 951	nofs_flag = memalloc_nofs_save();
 952	ret = btrfs_search_slot(trans, root, &curr->key, path, -1, 1);
 953	memalloc_nofs_restore(nofs_flag);
 954	if (ret < 0)
 955		goto delete_fail;
 956	else if (ret > 0) {
 957		/*
 958		 * can't find the item which the node points to, so this node
 959		 * is invalid, just drop it.
 960		 */
 961		prev = curr;
 962		curr = __btrfs_next_delayed_item(prev);
 963		btrfs_release_delayed_item(prev);
 964		ret = 0;
 965		btrfs_release_path(path);
 966		if (curr) {
 967			mutex_unlock(&node->mutex);
 968			goto do_again;
 969		} else
 970			goto delete_fail;
 971	}
 972
 973	btrfs_batch_delete_items(trans, root, path, curr);
 974	btrfs_release_path(path);
 975	mutex_unlock(&node->mutex);
 976	goto do_again;
 977
 978delete_fail:
 979	btrfs_release_path(path);
 980	mutex_unlock(&node->mutex);
 981	return ret;
 982}
 983
 984static void btrfs_release_delayed_inode(struct btrfs_delayed_node *delayed_node)
 985{
 986	struct btrfs_delayed_root *delayed_root;
 987
 988	if (delayed_node &&
 989	    test_bit(BTRFS_DELAYED_NODE_INODE_DIRTY, &delayed_node->flags)) {
 990		BUG_ON(!delayed_node->root);
 991		clear_bit(BTRFS_DELAYED_NODE_INODE_DIRTY, &delayed_node->flags);
 992		delayed_node->count--;
 993
 994		delayed_root = delayed_node->root->fs_info->delayed_root;
 995		finish_one_item(delayed_root);
 996	}
 997}
 998
 999static void btrfs_release_delayed_iref(struct btrfs_delayed_node *delayed_node)
1000{
1001	struct btrfs_delayed_root *delayed_root;
1002
1003	ASSERT(delayed_node->root);
1004	clear_bit(BTRFS_DELAYED_NODE_DEL_IREF, &delayed_node->flags);
1005	delayed_node->count--;
1006
1007	delayed_root = delayed_node->root->fs_info->delayed_root;
1008	finish_one_item(delayed_root);
1009}
1010
1011static int __btrfs_update_delayed_inode(struct btrfs_trans_handle *trans,
1012					struct btrfs_root *root,
1013					struct btrfs_path *path,
1014					struct btrfs_delayed_node *node)
1015{
1016	struct btrfs_fs_info *fs_info = root->fs_info;
1017	struct btrfs_key key;
1018	struct btrfs_inode_item *inode_item;
1019	struct extent_buffer *leaf;
1020	unsigned int nofs_flag;
1021	int mod;
1022	int ret;
1023
1024	key.objectid = node->inode_id;
1025	key.type = BTRFS_INODE_ITEM_KEY;
1026	key.offset = 0;
1027
1028	if (test_bit(BTRFS_DELAYED_NODE_DEL_IREF, &node->flags))
1029		mod = -1;
1030	else
1031		mod = 1;
1032
1033	nofs_flag = memalloc_nofs_save();
1034	ret = btrfs_lookup_inode(trans, root, path, &key, mod);
1035	memalloc_nofs_restore(nofs_flag);
1036	if (ret > 0) {
1037		btrfs_release_path(path);
1038		return -ENOENT;
1039	} else if (ret < 0) {
1040		return ret;
1041	}
1042
1043	leaf = path->nodes[0];
1044	inode_item = btrfs_item_ptr(leaf, path->slots[0],
1045				    struct btrfs_inode_item);
1046	write_extent_buffer(leaf, &node->inode_item, (unsigned long)inode_item,
1047			    sizeof(struct btrfs_inode_item));
1048	btrfs_mark_buffer_dirty(leaf);
1049
1050	if (!test_bit(BTRFS_DELAYED_NODE_DEL_IREF, &node->flags))
1051		goto no_iref;
1052
1053	path->slots[0]++;
1054	if (path->slots[0] >= btrfs_header_nritems(leaf))
1055		goto search;
1056again:
1057	btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
1058	if (key.objectid != node->inode_id)
1059		goto out;
1060
1061	if (key.type != BTRFS_INODE_REF_KEY &&
1062	    key.type != BTRFS_INODE_EXTREF_KEY)
1063		goto out;
1064
1065	/*
1066	 * Delayed iref deletion is for the inode who has only one link,
1067	 * so there is only one iref. The case that several irefs are
1068	 * in the same item doesn't exist.
1069	 */
1070	btrfs_del_item(trans, root, path);
1071out:
1072	btrfs_release_delayed_iref(node);
1073no_iref:
1074	btrfs_release_path(path);
1075err_out:
1076	btrfs_delayed_inode_release_metadata(fs_info, node, (ret < 0));
1077	btrfs_release_delayed_inode(node);
1078
1079	return ret;
1080
1081search:
1082	btrfs_release_path(path);
1083
1084	key.type = BTRFS_INODE_EXTREF_KEY;
1085	key.offset = -1;
1086
1087	nofs_flag = memalloc_nofs_save();
1088	ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
1089	memalloc_nofs_restore(nofs_flag);
1090	if (ret < 0)
1091		goto err_out;
1092	ASSERT(ret);
1093
1094	ret = 0;
1095	leaf = path->nodes[0];
1096	path->slots[0]--;
1097	goto again;
1098}
1099
1100static inline int btrfs_update_delayed_inode(struct btrfs_trans_handle *trans,
1101					     struct btrfs_root *root,
1102					     struct btrfs_path *path,
1103					     struct btrfs_delayed_node *node)
1104{
1105	int ret;
1106
1107	mutex_lock(&node->mutex);
1108	if (!test_bit(BTRFS_DELAYED_NODE_INODE_DIRTY, &node->flags)) {
1109		mutex_unlock(&node->mutex);
1110		return 0;
1111	}
1112
1113	ret = __btrfs_update_delayed_inode(trans, root, path, node);
1114	mutex_unlock(&node->mutex);
1115	return ret;
1116}
1117
1118static inline int
1119__btrfs_commit_inode_delayed_items(struct btrfs_trans_handle *trans,
1120				   struct btrfs_path *path,
1121				   struct btrfs_delayed_node *node)
1122{
1123	int ret;
1124
1125	ret = btrfs_insert_delayed_items(trans, path, node->root, node);
1126	if (ret)
1127		return ret;
1128
1129	ret = btrfs_delete_delayed_items(trans, path, node->root, node);
1130	if (ret)
1131		return ret;
1132
1133	ret = btrfs_update_delayed_inode(trans, node->root, path, node);
1134	return ret;
1135}
1136
1137/*
1138 * Called when committing the transaction.
1139 * Returns 0 on success.
1140 * Returns < 0 on error and returns with an aborted transaction with any
1141 * outstanding delayed items cleaned up.
1142 */
1143static int __btrfs_run_delayed_items(struct btrfs_trans_handle *trans, int nr)
1144{
1145	struct btrfs_fs_info *fs_info = trans->fs_info;
1146	struct btrfs_delayed_root *delayed_root;
1147	struct btrfs_delayed_node *curr_node, *prev_node;
1148	struct btrfs_path *path;
1149	struct btrfs_block_rsv *block_rsv;
1150	int ret = 0;
1151	bool count = (nr > 0);
1152
1153	if (TRANS_ABORTED(trans))
1154		return -EIO;
1155
1156	path = btrfs_alloc_path();
1157	if (!path)
1158		return -ENOMEM;
1159	path->leave_spinning = 1;
1160
1161	block_rsv = trans->block_rsv;
1162	trans->block_rsv = &fs_info->delayed_block_rsv;
1163
1164	delayed_root = fs_info->delayed_root;
1165
1166	curr_node = btrfs_first_delayed_node(delayed_root);
1167	while (curr_node && (!count || (count && nr--))) {
1168		ret = __btrfs_commit_inode_delayed_items(trans, path,
1169							 curr_node);
1170		if (ret) {
1171			btrfs_release_delayed_node(curr_node);
1172			curr_node = NULL;
1173			btrfs_abort_transaction(trans, ret);
1174			break;
1175		}
1176
1177		prev_node = curr_node;
1178		curr_node = btrfs_next_delayed_node(curr_node);
1179		btrfs_release_delayed_node(prev_node);
1180	}
1181
1182	if (curr_node)
1183		btrfs_release_delayed_node(curr_node);
1184	btrfs_free_path(path);
1185	trans->block_rsv = block_rsv;
1186
1187	return ret;
1188}
1189
1190int btrfs_run_delayed_items(struct btrfs_trans_handle *trans)
1191{
1192	return __btrfs_run_delayed_items(trans, -1);
1193}
1194
1195int btrfs_run_delayed_items_nr(struct btrfs_trans_handle *trans, int nr)
1196{
1197	return __btrfs_run_delayed_items(trans, nr);
1198}
1199
1200int btrfs_commit_inode_delayed_items(struct btrfs_trans_handle *trans,
1201				     struct btrfs_inode *inode)
1202{
1203	struct btrfs_delayed_node *delayed_node = btrfs_get_delayed_node(inode);
1204	struct btrfs_path *path;
1205	struct btrfs_block_rsv *block_rsv;
1206	int ret;
1207
1208	if (!delayed_node)
1209		return 0;
1210
1211	mutex_lock(&delayed_node->mutex);
1212	if (!delayed_node->count) {
1213		mutex_unlock(&delayed_node->mutex);
1214		btrfs_release_delayed_node(delayed_node);
1215		return 0;
1216	}
1217	mutex_unlock(&delayed_node->mutex);
1218
1219	path = btrfs_alloc_path();
1220	if (!path) {
1221		btrfs_release_delayed_node(delayed_node);
1222		return -ENOMEM;
1223	}
1224	path->leave_spinning = 1;
1225
1226	block_rsv = trans->block_rsv;
1227	trans->block_rsv = &delayed_node->root->fs_info->delayed_block_rsv;
1228
1229	ret = __btrfs_commit_inode_delayed_items(trans, path, delayed_node);
1230
1231	btrfs_release_delayed_node(delayed_node);
1232	btrfs_free_path(path);
1233	trans->block_rsv = block_rsv;
1234
1235	return ret;
1236}
1237
1238int btrfs_commit_inode_delayed_inode(struct btrfs_inode *inode)
1239{
1240	struct btrfs_fs_info *fs_info = inode->root->fs_info;
1241	struct btrfs_trans_handle *trans;
1242	struct btrfs_delayed_node *delayed_node = btrfs_get_delayed_node(inode);
1243	struct btrfs_path *path;
1244	struct btrfs_block_rsv *block_rsv;
1245	int ret;
1246
1247	if (!delayed_node)
1248		return 0;
1249
1250	mutex_lock(&delayed_node->mutex);
1251	if (!test_bit(BTRFS_DELAYED_NODE_INODE_DIRTY, &delayed_node->flags)) {
1252		mutex_unlock(&delayed_node->mutex);
1253		btrfs_release_delayed_node(delayed_node);
1254		return 0;
1255	}
1256	mutex_unlock(&delayed_node->mutex);
1257
1258	trans = btrfs_join_transaction(delayed_node->root);
1259	if (IS_ERR(trans)) {
1260		ret = PTR_ERR(trans);
1261		goto out;
1262	}
1263
1264	path = btrfs_alloc_path();
1265	if (!path) {
1266		ret = -ENOMEM;
1267		goto trans_out;
1268	}
1269	path->leave_spinning = 1;
1270
1271	block_rsv = trans->block_rsv;
1272	trans->block_rsv = &fs_info->delayed_block_rsv;
1273
1274	mutex_lock(&delayed_node->mutex);
1275	if (test_bit(BTRFS_DELAYED_NODE_INODE_DIRTY, &delayed_node->flags))
1276		ret = __btrfs_update_delayed_inode(trans, delayed_node->root,
1277						   path, delayed_node);
1278	else
1279		ret = 0;
1280	mutex_unlock(&delayed_node->mutex);
1281
1282	btrfs_free_path(path);
1283	trans->block_rsv = block_rsv;
1284trans_out:
1285	btrfs_end_transaction(trans);
1286	btrfs_btree_balance_dirty(fs_info);
1287out:
1288	btrfs_release_delayed_node(delayed_node);
1289
1290	return ret;
1291}
1292
1293void btrfs_remove_delayed_node(struct btrfs_inode *inode)
1294{
1295	struct btrfs_delayed_node *delayed_node;
1296
1297	delayed_node = READ_ONCE(inode->delayed_node);
1298	if (!delayed_node)
1299		return;
1300
1301	inode->delayed_node = NULL;
1302	btrfs_release_delayed_node(delayed_node);
1303}
1304
1305struct btrfs_async_delayed_work {
1306	struct btrfs_delayed_root *delayed_root;
1307	int nr;
1308	struct btrfs_work work;
1309};
1310
1311static void btrfs_async_run_delayed_root(struct btrfs_work *work)
1312{
1313	struct btrfs_async_delayed_work *async_work;
1314	struct btrfs_delayed_root *delayed_root;
1315	struct btrfs_trans_handle *trans;
1316	struct btrfs_path *path;
1317	struct btrfs_delayed_node *delayed_node = NULL;
1318	struct btrfs_root *root;
1319	struct btrfs_block_rsv *block_rsv;
1320	int total_done = 0;
1321
1322	async_work = container_of(work, struct btrfs_async_delayed_work, work);
1323	delayed_root = async_work->delayed_root;
1324
1325	path = btrfs_alloc_path();
1326	if (!path)
1327		goto out;
1328
1329	do {
1330		if (atomic_read(&delayed_root->items) <
1331		    BTRFS_DELAYED_BACKGROUND / 2)
1332			break;
1333
1334		delayed_node = btrfs_first_prepared_delayed_node(delayed_root);
1335		if (!delayed_node)
1336			break;
1337
1338		path->leave_spinning = 1;
1339		root = delayed_node->root;
1340
1341		trans = btrfs_join_transaction(root);
1342		if (IS_ERR(trans)) {
1343			btrfs_release_path(path);
1344			btrfs_release_prepared_delayed_node(delayed_node);
1345			total_done++;
1346			continue;
1347		}
1348
1349		block_rsv = trans->block_rsv;
1350		trans->block_rsv = &root->fs_info->delayed_block_rsv;
1351
1352		__btrfs_commit_inode_delayed_items(trans, path, delayed_node);
1353
1354		trans->block_rsv = block_rsv;
1355		btrfs_end_transaction(trans);
1356		btrfs_btree_balance_dirty_nodelay(root->fs_info);
1357
1358		btrfs_release_path(path);
1359		btrfs_release_prepared_delayed_node(delayed_node);
1360		total_done++;
1361
1362	} while ((async_work->nr == 0 && total_done < BTRFS_DELAYED_WRITEBACK)
1363		 || total_done < async_work->nr);
1364
1365	btrfs_free_path(path);
1366out:
1367	wake_up(&delayed_root->wait);
1368	kfree(async_work);
1369}
1370
1371
1372static int btrfs_wq_run_delayed_node(struct btrfs_delayed_root *delayed_root,
1373				     struct btrfs_fs_info *fs_info, int nr)
1374{
1375	struct btrfs_async_delayed_work *async_work;
1376
1377	async_work = kmalloc(sizeof(*async_work), GFP_NOFS);
1378	if (!async_work)
1379		return -ENOMEM;
1380
1381	async_work->delayed_root = delayed_root;
1382	btrfs_init_work(&async_work->work, btrfs_async_run_delayed_root, NULL,
1383			NULL);
1384	async_work->nr = nr;
1385
1386	btrfs_queue_work(fs_info->delayed_workers, &async_work->work);
1387	return 0;
1388}
1389
1390void btrfs_assert_delayed_root_empty(struct btrfs_fs_info *fs_info)
1391{
1392	WARN_ON(btrfs_first_delayed_node(fs_info->delayed_root));
1393}
1394
1395static int could_end_wait(struct btrfs_delayed_root *delayed_root, int seq)
1396{
1397	int val = atomic_read(&delayed_root->items_seq);
1398
1399	if (val < seq || val >= seq + BTRFS_DELAYED_BATCH)
1400		return 1;
1401
1402	if (atomic_read(&delayed_root->items) < BTRFS_DELAYED_BACKGROUND)
1403		return 1;
1404
1405	return 0;
1406}
1407
1408void btrfs_balance_delayed_items(struct btrfs_fs_info *fs_info)
1409{
1410	struct btrfs_delayed_root *delayed_root = fs_info->delayed_root;
1411
1412	if ((atomic_read(&delayed_root->items) < BTRFS_DELAYED_BACKGROUND) ||
1413		btrfs_workqueue_normal_congested(fs_info->delayed_workers))
1414		return;
1415
1416	if (atomic_read(&delayed_root->items) >= BTRFS_DELAYED_WRITEBACK) {
1417		int seq;
1418		int ret;
1419
1420		seq = atomic_read(&delayed_root->items_seq);
1421
1422		ret = btrfs_wq_run_delayed_node(delayed_root, fs_info, 0);
1423		if (ret)
1424			return;
1425
1426		wait_event_interruptible(delayed_root->wait,
1427					 could_end_wait(delayed_root, seq));
1428		return;
1429	}
1430
1431	btrfs_wq_run_delayed_node(delayed_root, fs_info, BTRFS_DELAYED_BATCH);
1432}
1433
1434/* Will return 0 or -ENOMEM */
1435int btrfs_insert_delayed_dir_index(struct btrfs_trans_handle *trans,
1436				   const char *name, int name_len,
1437				   struct btrfs_inode *dir,
1438				   struct btrfs_disk_key *disk_key, u8 type,
1439				   u64 index)
1440{
1441	struct btrfs_delayed_node *delayed_node;
1442	struct btrfs_delayed_item *delayed_item;
1443	struct btrfs_dir_item *dir_item;
1444	int ret;
1445
1446	delayed_node = btrfs_get_or_create_delayed_node(dir);
1447	if (IS_ERR(delayed_node))
1448		return PTR_ERR(delayed_node);
1449
1450	delayed_item = btrfs_alloc_delayed_item(sizeof(*dir_item) + name_len);
1451	if (!delayed_item) {
1452		ret = -ENOMEM;
1453		goto release_node;
1454	}
1455
1456	delayed_item->key.objectid = btrfs_ino(dir);
1457	delayed_item->key.type = BTRFS_DIR_INDEX_KEY;
1458	delayed_item->key.offset = index;
1459
1460	dir_item = (struct btrfs_dir_item *)delayed_item->data;
1461	dir_item->location = *disk_key;
1462	btrfs_set_stack_dir_transid(dir_item, trans->transid);
1463	btrfs_set_stack_dir_data_len(dir_item, 0);
1464	btrfs_set_stack_dir_name_len(dir_item, name_len);
1465	btrfs_set_stack_dir_type(dir_item, type);
1466	memcpy((char *)(dir_item + 1), name, name_len);
1467
1468	ret = btrfs_delayed_item_reserve_metadata(trans, dir->root, delayed_item);
1469	/*
1470	 * we have reserved enough space when we start a new transaction,
1471	 * so reserving metadata failure is impossible
1472	 */
1473	BUG_ON(ret);
1474
1475	mutex_lock(&delayed_node->mutex);
1476	ret = __btrfs_add_delayed_insertion_item(delayed_node, delayed_item);
1477	if (unlikely(ret)) {
1478		btrfs_err(trans->fs_info,
1479			  "err add delayed dir index item(name: %.*s) into the insertion tree of the delayed node(root id: %llu, inode id: %llu, errno: %d)",
1480			  name_len, name, delayed_node->root->root_key.objectid,
1481			  delayed_node->inode_id, ret);
1482		BUG();
1483	}
1484	mutex_unlock(&delayed_node->mutex);
1485
1486release_node:
1487	btrfs_release_delayed_node(delayed_node);
1488	return ret;
1489}
1490
1491static int btrfs_delete_delayed_insertion_item(struct btrfs_fs_info *fs_info,
1492					       struct btrfs_delayed_node *node,
1493					       struct btrfs_key *key)
1494{
1495	struct btrfs_delayed_item *item;
1496
1497	mutex_lock(&node->mutex);
1498	item = __btrfs_lookup_delayed_insertion_item(node, key);
1499	if (!item) {
1500		mutex_unlock(&node->mutex);
1501		return 1;
1502	}
1503
1504	btrfs_delayed_item_release_metadata(node->root, item);
1505	btrfs_release_delayed_item(item);
1506	mutex_unlock(&node->mutex);
1507	return 0;
1508}
1509
1510int btrfs_delete_delayed_dir_index(struct btrfs_trans_handle *trans,
1511				   struct btrfs_inode *dir, u64 index)
1512{
1513	struct btrfs_delayed_node *node;
1514	struct btrfs_delayed_item *item;
1515	struct btrfs_key item_key;
1516	int ret;
1517
1518	node = btrfs_get_or_create_delayed_node(dir);
1519	if (IS_ERR(node))
1520		return PTR_ERR(node);
1521
1522	item_key.objectid = btrfs_ino(dir);
1523	item_key.type = BTRFS_DIR_INDEX_KEY;
1524	item_key.offset = index;
1525
1526	ret = btrfs_delete_delayed_insertion_item(trans->fs_info, node,
1527						  &item_key);
1528	if (!ret)
1529		goto end;
1530
1531	item = btrfs_alloc_delayed_item(0);
1532	if (!item) {
1533		ret = -ENOMEM;
1534		goto end;
1535	}
1536
1537	item->key = item_key;
1538
1539	ret = btrfs_delayed_item_reserve_metadata(trans, dir->root, item);
1540	/*
1541	 * we have reserved enough space when we start a new transaction,
1542	 * so reserving metadata failure is impossible.
1543	 */
1544	if (ret < 0) {
1545		btrfs_err(trans->fs_info,
1546"metadata reservation failed for delayed dir item deltiona, should have been reserved");
1547		btrfs_release_delayed_item(item);
1548		goto end;
1549	}
1550
1551	mutex_lock(&node->mutex);
1552	ret = __btrfs_add_delayed_deletion_item(node, item);
1553	if (unlikely(ret)) {
1554		btrfs_err(trans->fs_info,
1555			  "err add delayed dir index item(index: %llu) into the deletion tree of the delayed node(root id: %llu, inode id: %llu, errno: %d)",
1556			  index, node->root->root_key.objectid,
1557			  node->inode_id, ret);
1558		btrfs_delayed_item_release_metadata(dir->root, item);
1559		btrfs_release_delayed_item(item);
1560	}
1561	mutex_unlock(&node->mutex);
1562end:
1563	btrfs_release_delayed_node(node);
1564	return ret;
1565}
1566
1567int btrfs_inode_delayed_dir_index_count(struct btrfs_inode *inode)
1568{
1569	struct btrfs_delayed_node *delayed_node = btrfs_get_delayed_node(inode);
1570
1571	if (!delayed_node)
1572		return -ENOENT;
1573
1574	/*
1575	 * Since we have held i_mutex of this directory, it is impossible that
1576	 * a new directory index is added into the delayed node and index_cnt
1577	 * is updated now. So we needn't lock the delayed node.
1578	 */
1579	if (!delayed_node->index_cnt) {
1580		btrfs_release_delayed_node(delayed_node);
1581		return -EINVAL;
1582	}
1583
1584	inode->index_cnt = delayed_node->index_cnt;
1585	btrfs_release_delayed_node(delayed_node);
1586	return 0;
1587}
1588
1589bool btrfs_readdir_get_delayed_items(struct inode *inode,
1590				     struct list_head *ins_list,
1591				     struct list_head *del_list)
1592{
1593	struct btrfs_delayed_node *delayed_node;
1594	struct btrfs_delayed_item *item;
1595
1596	delayed_node = btrfs_get_delayed_node(BTRFS_I(inode));
1597	if (!delayed_node)
1598		return false;
1599
1600	/*
1601	 * We can only do one readdir with delayed items at a time because of
1602	 * item->readdir_list.
1603	 */
1604	inode_unlock_shared(inode);
1605	inode_lock(inode);
1606
1607	mutex_lock(&delayed_node->mutex);
1608	item = __btrfs_first_delayed_insertion_item(delayed_node);
1609	while (item) {
1610		refcount_inc(&item->refs);
1611		list_add_tail(&item->readdir_list, ins_list);
1612		item = __btrfs_next_delayed_item(item);
1613	}
1614
1615	item = __btrfs_first_delayed_deletion_item(delayed_node);
1616	while (item) {
1617		refcount_inc(&item->refs);
1618		list_add_tail(&item->readdir_list, del_list);
1619		item = __btrfs_next_delayed_item(item);
1620	}
1621	mutex_unlock(&delayed_node->mutex);
1622	/*
1623	 * This delayed node is still cached in the btrfs inode, so refs
1624	 * must be > 1 now, and we needn't check it is going to be freed
1625	 * or not.
1626	 *
1627	 * Besides that, this function is used to read dir, we do not
1628	 * insert/delete delayed items in this period. So we also needn't
1629	 * requeue or dequeue this delayed node.
1630	 */
1631	refcount_dec(&delayed_node->refs);
1632
1633	return true;
1634}
1635
1636void btrfs_readdir_put_delayed_items(struct inode *inode,
1637				     struct list_head *ins_list,
1638				     struct list_head *del_list)
1639{
1640	struct btrfs_delayed_item *curr, *next;
1641
1642	list_for_each_entry_safe(curr, next, ins_list, readdir_list) {
1643		list_del(&curr->readdir_list);
1644		if (refcount_dec_and_test(&curr->refs))
1645			kfree(curr);
1646	}
1647
1648	list_for_each_entry_safe(curr, next, del_list, readdir_list) {
1649		list_del(&curr->readdir_list);
1650		if (refcount_dec_and_test(&curr->refs))
1651			kfree(curr);
1652	}
1653
1654	/*
1655	 * The VFS is going to do up_read(), so we need to downgrade back to a
1656	 * read lock.
1657	 */
1658	downgrade_write(&inode->i_rwsem);
1659}
1660
1661int btrfs_should_delete_dir_index(struct list_head *del_list,
1662				  u64 index)
1663{
1664	struct btrfs_delayed_item *curr;
1665	int ret = 0;
1666
1667	list_for_each_entry(curr, del_list, readdir_list) {
1668		if (curr->key.offset > index)
1669			break;
1670		if (curr->key.offset == index) {
1671			ret = 1;
1672			break;
1673		}
1674	}
1675	return ret;
1676}
1677
1678/*
1679 * btrfs_readdir_delayed_dir_index - read dir info stored in the delayed tree
1680 *
1681 */
1682int btrfs_readdir_delayed_dir_index(struct dir_context *ctx,
1683				    struct list_head *ins_list)
1684{
1685	struct btrfs_dir_item *di;
1686	struct btrfs_delayed_item *curr, *next;
1687	struct btrfs_key location;
1688	char *name;
1689	int name_len;
1690	int over = 0;
1691	unsigned char d_type;
1692
1693	if (list_empty(ins_list))
1694		return 0;
1695
1696	/*
1697	 * Changing the data of the delayed item is impossible. So
1698	 * we needn't lock them. And we have held i_mutex of the
1699	 * directory, nobody can delete any directory indexes now.
1700	 */
1701	list_for_each_entry_safe(curr, next, ins_list, readdir_list) {
1702		list_del(&curr->readdir_list);
1703
1704		if (curr->key.offset < ctx->pos) {
1705			if (refcount_dec_and_test(&curr->refs))
1706				kfree(curr);
1707			continue;
1708		}
1709
1710		ctx->pos = curr->key.offset;
1711
1712		di = (struct btrfs_dir_item *)curr->data;
1713		name = (char *)(di + 1);
1714		name_len = btrfs_stack_dir_name_len(di);
1715
1716		d_type = fs_ftype_to_dtype(di->type);
1717		btrfs_disk_key_to_cpu(&location, &di->location);
1718
1719		over = !dir_emit(ctx, name, name_len,
1720			       location.objectid, d_type);
1721
1722		if (refcount_dec_and_test(&curr->refs))
1723			kfree(curr);
1724
1725		if (over)
1726			return 1;
1727		ctx->pos++;
1728	}
1729	return 0;
1730}
1731
1732static void fill_stack_inode_item(struct btrfs_trans_handle *trans,
1733				  struct btrfs_inode_item *inode_item,
1734				  struct inode *inode)
1735{
1736	btrfs_set_stack_inode_uid(inode_item, i_uid_read(inode));
1737	btrfs_set_stack_inode_gid(inode_item, i_gid_read(inode));
1738	btrfs_set_stack_inode_size(inode_item, BTRFS_I(inode)->disk_i_size);
1739	btrfs_set_stack_inode_mode(inode_item, inode->i_mode);
1740	btrfs_set_stack_inode_nlink(inode_item, inode->i_nlink);
1741	btrfs_set_stack_inode_nbytes(inode_item, inode_get_bytes(inode));
1742	btrfs_set_stack_inode_generation(inode_item,
1743					 BTRFS_I(inode)->generation);
1744	btrfs_set_stack_inode_sequence(inode_item,
1745				       inode_peek_iversion(inode));
1746	btrfs_set_stack_inode_transid(inode_item, trans->transid);
1747	btrfs_set_stack_inode_rdev(inode_item, inode->i_rdev);
1748	btrfs_set_stack_inode_flags(inode_item, BTRFS_I(inode)->flags);
1749	btrfs_set_stack_inode_block_group(inode_item, 0);
1750
1751	btrfs_set_stack_timespec_sec(&inode_item->atime,
1752				     inode->i_atime.tv_sec);
1753	btrfs_set_stack_timespec_nsec(&inode_item->atime,
1754				      inode->i_atime.tv_nsec);
1755
1756	btrfs_set_stack_timespec_sec(&inode_item->mtime,
1757				     inode->i_mtime.tv_sec);
1758	btrfs_set_stack_timespec_nsec(&inode_item->mtime,
1759				      inode->i_mtime.tv_nsec);
1760
1761	btrfs_set_stack_timespec_sec(&inode_item->ctime,
1762				     inode->i_ctime.tv_sec);
1763	btrfs_set_stack_timespec_nsec(&inode_item->ctime,
1764				      inode->i_ctime.tv_nsec);
1765
1766	btrfs_set_stack_timespec_sec(&inode_item->otime,
1767				     BTRFS_I(inode)->i_otime.tv_sec);
1768	btrfs_set_stack_timespec_nsec(&inode_item->otime,
1769				     BTRFS_I(inode)->i_otime.tv_nsec);
1770}
1771
1772int btrfs_fill_inode(struct inode *inode, u32 *rdev)
1773{
1774	struct btrfs_fs_info *fs_info = BTRFS_I(inode)->root->fs_info;
1775	struct btrfs_delayed_node *delayed_node;
1776	struct btrfs_inode_item *inode_item;
1777
1778	delayed_node = btrfs_get_delayed_node(BTRFS_I(inode));
1779	if (!delayed_node)
1780		return -ENOENT;
1781
1782	mutex_lock(&delayed_node->mutex);
1783	if (!test_bit(BTRFS_DELAYED_NODE_INODE_DIRTY, &delayed_node->flags)) {
1784		mutex_unlock(&delayed_node->mutex);
1785		btrfs_release_delayed_node(delayed_node);
1786		return -ENOENT;
1787	}
1788
1789	inode_item = &delayed_node->inode_item;
1790
1791	i_uid_write(inode, btrfs_stack_inode_uid(inode_item));
1792	i_gid_write(inode, btrfs_stack_inode_gid(inode_item));
1793	btrfs_i_size_write(BTRFS_I(inode), btrfs_stack_inode_size(inode_item));
1794	btrfs_inode_set_file_extent_range(BTRFS_I(inode), 0,
1795			round_up(i_size_read(inode), fs_info->sectorsize));
1796	inode->i_mode = btrfs_stack_inode_mode(inode_item);
1797	set_nlink(inode, btrfs_stack_inode_nlink(inode_item));
1798	inode_set_bytes(inode, btrfs_stack_inode_nbytes(inode_item));
1799	BTRFS_I(inode)->generation = btrfs_stack_inode_generation(inode_item);
1800        BTRFS_I(inode)->last_trans = btrfs_stack_inode_transid(inode_item);
1801
1802	inode_set_iversion_queried(inode,
1803				   btrfs_stack_inode_sequence(inode_item));
1804	inode->i_rdev = 0;
1805	*rdev = btrfs_stack_inode_rdev(inode_item);
1806	BTRFS_I(inode)->flags = btrfs_stack_inode_flags(inode_item);
1807
1808	inode->i_atime.tv_sec = btrfs_stack_timespec_sec(&inode_item->atime);
1809	inode->i_atime.tv_nsec = btrfs_stack_timespec_nsec(&inode_item->atime);
1810
1811	inode->i_mtime.tv_sec = btrfs_stack_timespec_sec(&inode_item->mtime);
1812	inode->i_mtime.tv_nsec = btrfs_stack_timespec_nsec(&inode_item->mtime);
1813
1814	inode->i_ctime.tv_sec = btrfs_stack_timespec_sec(&inode_item->ctime);
1815	inode->i_ctime.tv_nsec = btrfs_stack_timespec_nsec(&inode_item->ctime);
1816
1817	BTRFS_I(inode)->i_otime.tv_sec =
1818		btrfs_stack_timespec_sec(&inode_item->otime);
1819	BTRFS_I(inode)->i_otime.tv_nsec =
1820		btrfs_stack_timespec_nsec(&inode_item->otime);
1821
1822	inode->i_generation = BTRFS_I(inode)->generation;
1823	BTRFS_I(inode)->index_cnt = (u64)-1;
1824
1825	mutex_unlock(&delayed_node->mutex);
1826	btrfs_release_delayed_node(delayed_node);
1827	return 0;
1828}
1829
1830int btrfs_delayed_update_inode(struct btrfs_trans_handle *trans,
1831			       struct btrfs_root *root, struct inode *inode)
1832{
1833	struct btrfs_delayed_node *delayed_node;
1834	int ret = 0;
1835
1836	delayed_node = btrfs_get_or_create_delayed_node(BTRFS_I(inode));
1837	if (IS_ERR(delayed_node))
1838		return PTR_ERR(delayed_node);
1839
1840	mutex_lock(&delayed_node->mutex);
1841	if (test_bit(BTRFS_DELAYED_NODE_INODE_DIRTY, &delayed_node->flags)) {
1842		fill_stack_inode_item(trans, &delayed_node->inode_item, inode);
1843		goto release_node;
1844	}
1845
1846	ret = btrfs_delayed_inode_reserve_metadata(trans, root, BTRFS_I(inode),
1847						   delayed_node);
1848	if (ret)
1849		goto release_node;
1850
1851	fill_stack_inode_item(trans, &delayed_node->inode_item, inode);
1852	set_bit(BTRFS_DELAYED_NODE_INODE_DIRTY, &delayed_node->flags);
1853	delayed_node->count++;
1854	atomic_inc(&root->fs_info->delayed_root->items);
1855release_node:
1856	mutex_unlock(&delayed_node->mutex);
1857	btrfs_release_delayed_node(delayed_node);
1858	return ret;
1859}
1860
1861int btrfs_delayed_delete_inode_ref(struct btrfs_inode *inode)
1862{
1863	struct btrfs_fs_info *fs_info = inode->root->fs_info;
1864	struct btrfs_delayed_node *delayed_node;
1865
1866	/*
1867	 * we don't do delayed inode updates during log recovery because it
1868	 * leads to enospc problems.  This means we also can't do
1869	 * delayed inode refs
1870	 */
1871	if (test_bit(BTRFS_FS_LOG_RECOVERING, &fs_info->flags))
1872		return -EAGAIN;
1873
1874	delayed_node = btrfs_get_or_create_delayed_node(inode);
1875	if (IS_ERR(delayed_node))
1876		return PTR_ERR(delayed_node);
1877
1878	/*
1879	 * We don't reserve space for inode ref deletion is because:
1880	 * - We ONLY do async inode ref deletion for the inode who has only
1881	 *   one link(i_nlink == 1), it means there is only one inode ref.
1882	 *   And in most case, the inode ref and the inode item are in the
1883	 *   same leaf, and we will deal with them at the same time.
1884	 *   Since we are sure we will reserve the space for the inode item,
1885	 *   it is unnecessary to reserve space for inode ref deletion.
1886	 * - If the inode ref and the inode item are not in the same leaf,
1887	 *   We also needn't worry about enospc problem, because we reserve
1888	 *   much more space for the inode update than it needs.
1889	 * - At the worst, we can steal some space from the global reservation.
1890	 *   It is very rare.
1891	 */
1892	mutex_lock(&delayed_node->mutex);
1893	if (test_bit(BTRFS_DELAYED_NODE_DEL_IREF, &delayed_node->flags))
1894		goto release_node;
1895
1896	set_bit(BTRFS_DELAYED_NODE_DEL_IREF, &delayed_node->flags);
1897	delayed_node->count++;
1898	atomic_inc(&fs_info->delayed_root->items);
1899release_node:
1900	mutex_unlock(&delayed_node->mutex);
1901	btrfs_release_delayed_node(delayed_node);
1902	return 0;
1903}
1904
1905static void __btrfs_kill_delayed_node(struct btrfs_delayed_node *delayed_node)
1906{
1907	struct btrfs_root *root = delayed_node->root;
1908	struct btrfs_fs_info *fs_info = root->fs_info;
1909	struct btrfs_delayed_item *curr_item, *prev_item;
1910
1911	mutex_lock(&delayed_node->mutex);
1912	curr_item = __btrfs_first_delayed_insertion_item(delayed_node);
1913	while (curr_item) {
1914		btrfs_delayed_item_release_metadata(root, curr_item);
1915		prev_item = curr_item;
1916		curr_item = __btrfs_next_delayed_item(prev_item);
1917		btrfs_release_delayed_item(prev_item);
1918	}
1919
1920	curr_item = __btrfs_first_delayed_deletion_item(delayed_node);
1921	while (curr_item) {
1922		btrfs_delayed_item_release_metadata(root, curr_item);
1923		prev_item = curr_item;
1924		curr_item = __btrfs_next_delayed_item(prev_item);
1925		btrfs_release_delayed_item(prev_item);
1926	}
1927
1928	if (test_bit(BTRFS_DELAYED_NODE_DEL_IREF, &delayed_node->flags))
1929		btrfs_release_delayed_iref(delayed_node);
1930
1931	if (test_bit(BTRFS_DELAYED_NODE_INODE_DIRTY, &delayed_node->flags)) {
1932		btrfs_delayed_inode_release_metadata(fs_info, delayed_node, false);
1933		btrfs_release_delayed_inode(delayed_node);
1934	}
1935	mutex_unlock(&delayed_node->mutex);
1936}
1937
1938void btrfs_kill_delayed_inode_items(struct btrfs_inode *inode)
1939{
1940	struct btrfs_delayed_node *delayed_node;
1941
1942	delayed_node = btrfs_get_delayed_node(inode);
1943	if (!delayed_node)
1944		return;
1945
1946	__btrfs_kill_delayed_node(delayed_node);
1947	btrfs_release_delayed_node(delayed_node);
1948}
1949
1950void btrfs_kill_all_delayed_nodes(struct btrfs_root *root)
1951{
1952	u64 inode_id = 0;
1953	struct btrfs_delayed_node *delayed_nodes[8];
1954	int i, n;
1955
1956	while (1) {
1957		spin_lock(&root->inode_lock);
1958		n = radix_tree_gang_lookup(&root->delayed_nodes_tree,
1959					   (void **)delayed_nodes, inode_id,
1960					   ARRAY_SIZE(delayed_nodes));
1961		if (!n) {
1962			spin_unlock(&root->inode_lock);
1963			break;
1964		}
1965
1966		inode_id = delayed_nodes[n - 1]->inode_id + 1;
1967		for (i = 0; i < n; i++) {
1968			/*
1969			 * Don't increase refs in case the node is dead and
1970			 * about to be removed from the tree in the loop below
1971			 */
1972			if (!refcount_inc_not_zero(&delayed_nodes[i]->refs))
1973				delayed_nodes[i] = NULL;
1974		}
1975		spin_unlock(&root->inode_lock);
1976
1977		for (i = 0; i < n; i++) {
1978			if (!delayed_nodes[i])
1979				continue;
1980			__btrfs_kill_delayed_node(delayed_nodes[i]);
1981			btrfs_release_delayed_node(delayed_nodes[i]);
1982		}
1983	}
1984}
1985
1986void btrfs_destroy_delayed_inodes(struct btrfs_fs_info *fs_info)
1987{
1988	struct btrfs_delayed_node *curr_node, *prev_node;
1989
1990	curr_node = btrfs_first_delayed_node(fs_info->delayed_root);
1991	while (curr_node) {
1992		__btrfs_kill_delayed_node(curr_node);
1993
1994		prev_node = curr_node;
1995		curr_node = btrfs_next_delayed_node(curr_node);
1996		btrfs_release_delayed_node(prev_node);
1997	}
1998}
1999