Linux Audio

Check our new training course

Loading...
v5.14.15
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 * Copyright (C) 2011 Fujitsu.  All rights reserved.
   4 * Written by Miao Xie <miaox@cn.fujitsu.com>
 
 
 
 
 
 
 
 
 
 
 
 
 
 
   5 */
   6
   7#include <linux/slab.h>
   8#include <linux/iversion.h>
   9#include <linux/sched/mm.h>
  10#include "misc.h"
  11#include "delayed-inode.h"
  12#include "disk-io.h"
  13#include "transaction.h"
  14#include "ctree.h"
  15#include "qgroup.h"
  16#include "locking.h"
  17
  18#define BTRFS_DELAYED_WRITEBACK		512
  19#define BTRFS_DELAYED_BACKGROUND	128
  20#define BTRFS_DELAYED_BATCH		16
  21
  22static struct kmem_cache *delayed_node_cache;
  23
  24int __init btrfs_delayed_inode_init(void)
  25{
  26	delayed_node_cache = kmem_cache_create("btrfs_delayed_node",
  27					sizeof(struct btrfs_delayed_node),
  28					0,
  29					SLAB_MEM_SPREAD,
  30					NULL);
  31	if (!delayed_node_cache)
  32		return -ENOMEM;
  33	return 0;
  34}
  35
  36void __cold btrfs_delayed_inode_exit(void)
  37{
  38	kmem_cache_destroy(delayed_node_cache);
  39}
  40
  41static inline void btrfs_init_delayed_node(
  42				struct btrfs_delayed_node *delayed_node,
  43				struct btrfs_root *root, u64 inode_id)
  44{
  45	delayed_node->root = root;
  46	delayed_node->inode_id = inode_id;
  47	refcount_set(&delayed_node->refs, 0);
  48	delayed_node->ins_root = RB_ROOT_CACHED;
  49	delayed_node->del_root = RB_ROOT_CACHED;
  50	mutex_init(&delayed_node->mutex);
  51	INIT_LIST_HEAD(&delayed_node->n_list);
  52	INIT_LIST_HEAD(&delayed_node->p_list);
  53}
  54
  55static inline int btrfs_is_continuous_delayed_item(
  56					struct btrfs_delayed_item *item1,
  57					struct btrfs_delayed_item *item2)
  58{
  59	if (item1->key.type == BTRFS_DIR_INDEX_KEY &&
  60	    item1->key.objectid == item2->key.objectid &&
  61	    item1->key.type == item2->key.type &&
  62	    item1->key.offset + 1 == item2->key.offset)
  63		return 1;
  64	return 0;
  65}
  66
  67static struct btrfs_delayed_node *btrfs_get_delayed_node(
  68		struct btrfs_inode *btrfs_inode)
  69{
 
  70	struct btrfs_root *root = btrfs_inode->root;
  71	u64 ino = btrfs_ino(btrfs_inode);
  72	struct btrfs_delayed_node *node;
  73
  74	node = READ_ONCE(btrfs_inode->delayed_node);
  75	if (node) {
  76		refcount_inc(&node->refs);
  77		return node;
  78	}
  79
  80	spin_lock(&root->inode_lock);
  81	node = radix_tree_lookup(&root->delayed_nodes_tree, ino);
  82
  83	if (node) {
  84		if (btrfs_inode->delayed_node) {
  85			refcount_inc(&node->refs);	/* can be accessed */
  86			BUG_ON(btrfs_inode->delayed_node != node);
  87			spin_unlock(&root->inode_lock);
  88			return node;
  89		}
  90
  91		/*
  92		 * It's possible that we're racing into the middle of removing
  93		 * this node from the radix tree.  In this case, the refcount
  94		 * was zero and it should never go back to one.  Just return
  95		 * NULL like it was never in the radix at all; our release
  96		 * function is in the process of removing it.
  97		 *
  98		 * Some implementations of refcount_inc refuse to bump the
  99		 * refcount once it has hit zero.  If we don't do this dance
 100		 * here, refcount_inc() may decide to just WARN_ONCE() instead
 101		 * of actually bumping the refcount.
 102		 *
 103		 * If this node is properly in the radix, we want to bump the
 104		 * refcount twice, once for the inode and once for this get
 105		 * operation.
 106		 */
 107		if (refcount_inc_not_zero(&node->refs)) {
 108			refcount_inc(&node->refs);
 109			btrfs_inode->delayed_node = node;
 110		} else {
 111			node = NULL;
 112		}
 113
 114		spin_unlock(&root->inode_lock);
 115		return node;
 116	}
 117	spin_unlock(&root->inode_lock);
 118
 119	return NULL;
 120}
 121
 122/* Will return either the node or PTR_ERR(-ENOMEM) */
 123static struct btrfs_delayed_node *btrfs_get_or_create_delayed_node(
 124		struct btrfs_inode *btrfs_inode)
 125{
 126	struct btrfs_delayed_node *node;
 
 127	struct btrfs_root *root = btrfs_inode->root;
 128	u64 ino = btrfs_ino(btrfs_inode);
 129	int ret;
 130
 131again:
 132	node = btrfs_get_delayed_node(btrfs_inode);
 133	if (node)
 134		return node;
 135
 136	node = kmem_cache_zalloc(delayed_node_cache, GFP_NOFS);
 137	if (!node)
 138		return ERR_PTR(-ENOMEM);
 139	btrfs_init_delayed_node(node, root, ino);
 140
 141	/* cached in the btrfs inode and can be accessed */
 142	refcount_set(&node->refs, 2);
 143
 144	ret = radix_tree_preload(GFP_NOFS);
 145	if (ret) {
 146		kmem_cache_free(delayed_node_cache, node);
 147		return ERR_PTR(ret);
 148	}
 149
 150	spin_lock(&root->inode_lock);
 151	ret = radix_tree_insert(&root->delayed_nodes_tree, ino, node);
 152	if (ret == -EEXIST) {
 153		spin_unlock(&root->inode_lock);
 154		kmem_cache_free(delayed_node_cache, node);
 155		radix_tree_preload_end();
 156		goto again;
 157	}
 158	btrfs_inode->delayed_node = node;
 159	spin_unlock(&root->inode_lock);
 160	radix_tree_preload_end();
 161
 162	return node;
 163}
 164
 165/*
 166 * Call it when holding delayed_node->mutex
 167 *
 168 * If mod = 1, add this node into the prepared list.
 169 */
 170static void btrfs_queue_delayed_node(struct btrfs_delayed_root *root,
 171				     struct btrfs_delayed_node *node,
 172				     int mod)
 173{
 174	spin_lock(&root->lock);
 175	if (test_bit(BTRFS_DELAYED_NODE_IN_LIST, &node->flags)) {
 176		if (!list_empty(&node->p_list))
 177			list_move_tail(&node->p_list, &root->prepare_list);
 178		else if (mod)
 179			list_add_tail(&node->p_list, &root->prepare_list);
 180	} else {
 181		list_add_tail(&node->n_list, &root->node_list);
 182		list_add_tail(&node->p_list, &root->prepare_list);
 183		refcount_inc(&node->refs);	/* inserted into list */
 184		root->nodes++;
 185		set_bit(BTRFS_DELAYED_NODE_IN_LIST, &node->flags);
 186	}
 187	spin_unlock(&root->lock);
 188}
 189
 190/* Call it when holding delayed_node->mutex */
 191static void btrfs_dequeue_delayed_node(struct btrfs_delayed_root *root,
 192				       struct btrfs_delayed_node *node)
 193{
 194	spin_lock(&root->lock);
 195	if (test_bit(BTRFS_DELAYED_NODE_IN_LIST, &node->flags)) {
 196		root->nodes--;
 197		refcount_dec(&node->refs);	/* not in the list */
 198		list_del_init(&node->n_list);
 199		if (!list_empty(&node->p_list))
 200			list_del_init(&node->p_list);
 201		clear_bit(BTRFS_DELAYED_NODE_IN_LIST, &node->flags);
 202	}
 203	spin_unlock(&root->lock);
 204}
 205
 206static struct btrfs_delayed_node *btrfs_first_delayed_node(
 207			struct btrfs_delayed_root *delayed_root)
 208{
 209	struct list_head *p;
 210	struct btrfs_delayed_node *node = NULL;
 211
 212	spin_lock(&delayed_root->lock);
 213	if (list_empty(&delayed_root->node_list))
 214		goto out;
 215
 216	p = delayed_root->node_list.next;
 217	node = list_entry(p, struct btrfs_delayed_node, n_list);
 218	refcount_inc(&node->refs);
 219out:
 220	spin_unlock(&delayed_root->lock);
 221
 222	return node;
 223}
 224
 225static struct btrfs_delayed_node *btrfs_next_delayed_node(
 226						struct btrfs_delayed_node *node)
 227{
 228	struct btrfs_delayed_root *delayed_root;
 229	struct list_head *p;
 230	struct btrfs_delayed_node *next = NULL;
 231
 232	delayed_root = node->root->fs_info->delayed_root;
 233	spin_lock(&delayed_root->lock);
 234	if (!test_bit(BTRFS_DELAYED_NODE_IN_LIST, &node->flags)) {
 235		/* not in the list */
 236		if (list_empty(&delayed_root->node_list))
 237			goto out;
 238		p = delayed_root->node_list.next;
 239	} else if (list_is_last(&node->n_list, &delayed_root->node_list))
 240		goto out;
 241	else
 242		p = node->n_list.next;
 243
 244	next = list_entry(p, struct btrfs_delayed_node, n_list);
 245	refcount_inc(&next->refs);
 246out:
 247	spin_unlock(&delayed_root->lock);
 248
 249	return next;
 250}
 251
 252static void __btrfs_release_delayed_node(
 253				struct btrfs_delayed_node *delayed_node,
 254				int mod)
 255{
 256	struct btrfs_delayed_root *delayed_root;
 257
 258	if (!delayed_node)
 259		return;
 260
 261	delayed_root = delayed_node->root->fs_info->delayed_root;
 262
 263	mutex_lock(&delayed_node->mutex);
 264	if (delayed_node->count)
 265		btrfs_queue_delayed_node(delayed_root, delayed_node, mod);
 266	else
 267		btrfs_dequeue_delayed_node(delayed_root, delayed_node);
 268	mutex_unlock(&delayed_node->mutex);
 269
 270	if (refcount_dec_and_test(&delayed_node->refs)) {
 
 271		struct btrfs_root *root = delayed_node->root;
 272
 273		spin_lock(&root->inode_lock);
 274		/*
 275		 * Once our refcount goes to zero, nobody is allowed to bump it
 276		 * back up.  We can delete it now.
 277		 */
 278		ASSERT(refcount_read(&delayed_node->refs) == 0);
 279		radix_tree_delete(&root->delayed_nodes_tree,
 280				  delayed_node->inode_id);
 281		spin_unlock(&root->inode_lock);
 282		kmem_cache_free(delayed_node_cache, delayed_node);
 
 283	}
 284}
 285
 286static inline void btrfs_release_delayed_node(struct btrfs_delayed_node *node)
 287{
 288	__btrfs_release_delayed_node(node, 0);
 289}
 290
 291static struct btrfs_delayed_node *btrfs_first_prepared_delayed_node(
 292					struct btrfs_delayed_root *delayed_root)
 293{
 294	struct list_head *p;
 295	struct btrfs_delayed_node *node = NULL;
 296
 297	spin_lock(&delayed_root->lock);
 298	if (list_empty(&delayed_root->prepare_list))
 299		goto out;
 300
 301	p = delayed_root->prepare_list.next;
 302	list_del_init(p);
 303	node = list_entry(p, struct btrfs_delayed_node, p_list);
 304	refcount_inc(&node->refs);
 305out:
 306	spin_unlock(&delayed_root->lock);
 307
 308	return node;
 309}
 310
 311static inline void btrfs_release_prepared_delayed_node(
 312					struct btrfs_delayed_node *node)
 313{
 314	__btrfs_release_delayed_node(node, 1);
 315}
 316
 317static struct btrfs_delayed_item *btrfs_alloc_delayed_item(u32 data_len)
 318{
 319	struct btrfs_delayed_item *item;
 320	item = kmalloc(sizeof(*item) + data_len, GFP_NOFS);
 321	if (item) {
 322		item->data_len = data_len;
 323		item->ins_or_del = 0;
 324		item->bytes_reserved = 0;
 325		item->delayed_node = NULL;
 326		refcount_set(&item->refs, 1);
 327	}
 328	return item;
 329}
 330
 331/*
 332 * __btrfs_lookup_delayed_item - look up the delayed item by key
 333 * @delayed_node: pointer to the delayed node
 334 * @key:	  the key to look up
 335 * @prev:	  used to store the prev item if the right item isn't found
 336 * @next:	  used to store the next item if the right item isn't found
 337 *
 338 * Note: if we don't find the right item, we will return the prev item and
 339 * the next item.
 340 */
 341static struct btrfs_delayed_item *__btrfs_lookup_delayed_item(
 342				struct rb_root *root,
 343				struct btrfs_key *key,
 344				struct btrfs_delayed_item **prev,
 345				struct btrfs_delayed_item **next)
 346{
 347	struct rb_node *node, *prev_node = NULL;
 348	struct btrfs_delayed_item *delayed_item = NULL;
 349	int ret = 0;
 350
 351	node = root->rb_node;
 352
 353	while (node) {
 354		delayed_item = rb_entry(node, struct btrfs_delayed_item,
 355					rb_node);
 356		prev_node = node;
 357		ret = btrfs_comp_cpu_keys(&delayed_item->key, key);
 358		if (ret < 0)
 359			node = node->rb_right;
 360		else if (ret > 0)
 361			node = node->rb_left;
 362		else
 363			return delayed_item;
 364	}
 365
 366	if (prev) {
 367		if (!prev_node)
 368			*prev = NULL;
 369		else if (ret < 0)
 370			*prev = delayed_item;
 371		else if ((node = rb_prev(prev_node)) != NULL) {
 372			*prev = rb_entry(node, struct btrfs_delayed_item,
 373					 rb_node);
 374		} else
 375			*prev = NULL;
 376	}
 377
 378	if (next) {
 379		if (!prev_node)
 380			*next = NULL;
 381		else if (ret > 0)
 382			*next = delayed_item;
 383		else if ((node = rb_next(prev_node)) != NULL) {
 384			*next = rb_entry(node, struct btrfs_delayed_item,
 385					 rb_node);
 386		} else
 387			*next = NULL;
 388	}
 389	return NULL;
 390}
 391
 392static struct btrfs_delayed_item *__btrfs_lookup_delayed_insertion_item(
 393					struct btrfs_delayed_node *delayed_node,
 394					struct btrfs_key *key)
 395{
 396	return __btrfs_lookup_delayed_item(&delayed_node->ins_root.rb_root, key,
 397					   NULL, NULL);
 398}
 399
 400static int __btrfs_add_delayed_item(struct btrfs_delayed_node *delayed_node,
 401				    struct btrfs_delayed_item *ins,
 402				    int action)
 403{
 404	struct rb_node **p, *node;
 405	struct rb_node *parent_node = NULL;
 406	struct rb_root_cached *root;
 407	struct btrfs_delayed_item *item;
 408	int cmp;
 409	bool leftmost = true;
 410
 411	if (action == BTRFS_DELAYED_INSERTION_ITEM)
 412		root = &delayed_node->ins_root;
 413	else if (action == BTRFS_DELAYED_DELETION_ITEM)
 414		root = &delayed_node->del_root;
 415	else
 416		BUG();
 417	p = &root->rb_root.rb_node;
 418	node = &ins->rb_node;
 419
 420	while (*p) {
 421		parent_node = *p;
 422		item = rb_entry(parent_node, struct btrfs_delayed_item,
 423				 rb_node);
 424
 425		cmp = btrfs_comp_cpu_keys(&item->key, &ins->key);
 426		if (cmp < 0) {
 427			p = &(*p)->rb_right;
 428			leftmost = false;
 429		} else if (cmp > 0) {
 430			p = &(*p)->rb_left;
 431		} else {
 432			return -EEXIST;
 433		}
 434	}
 435
 436	rb_link_node(node, parent_node, p);
 437	rb_insert_color_cached(node, root, leftmost);
 438	ins->delayed_node = delayed_node;
 439	ins->ins_or_del = action;
 440
 441	if (ins->key.type == BTRFS_DIR_INDEX_KEY &&
 442	    action == BTRFS_DELAYED_INSERTION_ITEM &&
 443	    ins->key.offset >= delayed_node->index_cnt)
 444			delayed_node->index_cnt = ins->key.offset + 1;
 445
 446	delayed_node->count++;
 447	atomic_inc(&delayed_node->root->fs_info->delayed_root->items);
 448	return 0;
 449}
 450
 451static int __btrfs_add_delayed_insertion_item(struct btrfs_delayed_node *node,
 452					      struct btrfs_delayed_item *item)
 453{
 454	return __btrfs_add_delayed_item(node, item,
 455					BTRFS_DELAYED_INSERTION_ITEM);
 456}
 457
 458static int __btrfs_add_delayed_deletion_item(struct btrfs_delayed_node *node,
 459					     struct btrfs_delayed_item *item)
 460{
 461	return __btrfs_add_delayed_item(node, item,
 462					BTRFS_DELAYED_DELETION_ITEM);
 463}
 464
 465static void finish_one_item(struct btrfs_delayed_root *delayed_root)
 466{
 467	int seq = atomic_inc_return(&delayed_root->items_seq);
 468
 469	/* atomic_dec_return implies a barrier */
 
 
 470	if ((atomic_dec_return(&delayed_root->items) <
 471	    BTRFS_DELAYED_BACKGROUND || seq % BTRFS_DELAYED_BATCH == 0))
 472		cond_wake_up_nomb(&delayed_root->wait);
 
 473}
 474
 475static void __btrfs_remove_delayed_item(struct btrfs_delayed_item *delayed_item)
 476{
 477	struct rb_root_cached *root;
 478	struct btrfs_delayed_root *delayed_root;
 479
 480	/* Not associated with any delayed_node */
 481	if (!delayed_item->delayed_node)
 482		return;
 483	delayed_root = delayed_item->delayed_node->root->fs_info->delayed_root;
 484
 485	BUG_ON(!delayed_root);
 486	BUG_ON(delayed_item->ins_or_del != BTRFS_DELAYED_DELETION_ITEM &&
 487	       delayed_item->ins_or_del != BTRFS_DELAYED_INSERTION_ITEM);
 488
 489	if (delayed_item->ins_or_del == BTRFS_DELAYED_INSERTION_ITEM)
 490		root = &delayed_item->delayed_node->ins_root;
 491	else
 492		root = &delayed_item->delayed_node->del_root;
 493
 494	rb_erase_cached(&delayed_item->rb_node, root);
 495	delayed_item->delayed_node->count--;
 496
 497	finish_one_item(delayed_root);
 498}
 499
 500static void btrfs_release_delayed_item(struct btrfs_delayed_item *item)
 501{
 502	if (item) {
 503		__btrfs_remove_delayed_item(item);
 504		if (refcount_dec_and_test(&item->refs))
 505			kfree(item);
 506	}
 507}
 508
 509static struct btrfs_delayed_item *__btrfs_first_delayed_insertion_item(
 510					struct btrfs_delayed_node *delayed_node)
 511{
 512	struct rb_node *p;
 513	struct btrfs_delayed_item *item = NULL;
 514
 515	p = rb_first_cached(&delayed_node->ins_root);
 516	if (p)
 517		item = rb_entry(p, struct btrfs_delayed_item, rb_node);
 518
 519	return item;
 520}
 521
 522static struct btrfs_delayed_item *__btrfs_first_delayed_deletion_item(
 523					struct btrfs_delayed_node *delayed_node)
 524{
 525	struct rb_node *p;
 526	struct btrfs_delayed_item *item = NULL;
 527
 528	p = rb_first_cached(&delayed_node->del_root);
 529	if (p)
 530		item = rb_entry(p, struct btrfs_delayed_item, rb_node);
 531
 532	return item;
 533}
 534
 535static struct btrfs_delayed_item *__btrfs_next_delayed_item(
 536						struct btrfs_delayed_item *item)
 537{
 538	struct rb_node *p;
 539	struct btrfs_delayed_item *next = NULL;
 540
 541	p = rb_next(&item->rb_node);
 542	if (p)
 543		next = rb_entry(p, struct btrfs_delayed_item, rb_node);
 544
 545	return next;
 546}
 547
 548static int btrfs_delayed_item_reserve_metadata(struct btrfs_trans_handle *trans,
 549					       struct btrfs_root *root,
 550					       struct btrfs_delayed_item *item)
 551{
 552	struct btrfs_block_rsv *src_rsv;
 553	struct btrfs_block_rsv *dst_rsv;
 554	struct btrfs_fs_info *fs_info = root->fs_info;
 555	u64 num_bytes;
 556	int ret;
 557
 558	if (!trans->bytes_reserved)
 559		return 0;
 560
 561	src_rsv = trans->block_rsv;
 562	dst_rsv = &fs_info->delayed_block_rsv;
 563
 564	num_bytes = btrfs_calc_insert_metadata_size(fs_info, 1);
 565
 566	/*
 567	 * Here we migrate space rsv from transaction rsv, since have already
 568	 * reserved space when starting a transaction.  So no need to reserve
 569	 * qgroup space here.
 570	 */
 571	ret = btrfs_block_rsv_migrate(src_rsv, dst_rsv, num_bytes, true);
 572	if (!ret) {
 573		trace_btrfs_space_reservation(fs_info, "delayed_item",
 574					      item->key.objectid,
 575					      num_bytes, 1);
 576		item->bytes_reserved = num_bytes;
 577	}
 578
 579	return ret;
 580}
 581
 582static void btrfs_delayed_item_release_metadata(struct btrfs_root *root,
 583						struct btrfs_delayed_item *item)
 584{
 585	struct btrfs_block_rsv *rsv;
 586	struct btrfs_fs_info *fs_info = root->fs_info;
 587
 588	if (!item->bytes_reserved)
 589		return;
 590
 591	rsv = &fs_info->delayed_block_rsv;
 592	/*
 593	 * Check btrfs_delayed_item_reserve_metadata() to see why we don't need
 594	 * to release/reserve qgroup space.
 595	 */
 596	trace_btrfs_space_reservation(fs_info, "delayed_item",
 597				      item->key.objectid, item->bytes_reserved,
 598				      0);
 599	btrfs_block_rsv_release(fs_info, rsv, item->bytes_reserved, NULL);
 
 600}
 601
 602static int btrfs_delayed_inode_reserve_metadata(
 603					struct btrfs_trans_handle *trans,
 604					struct btrfs_root *root,
 
 605					struct btrfs_delayed_node *node)
 606{
 607	struct btrfs_fs_info *fs_info = root->fs_info;
 608	struct btrfs_block_rsv *src_rsv;
 609	struct btrfs_block_rsv *dst_rsv;
 610	u64 num_bytes;
 611	int ret;
 
 612
 613	src_rsv = trans->block_rsv;
 614	dst_rsv = &fs_info->delayed_block_rsv;
 615
 616	num_bytes = btrfs_calc_metadata_size(fs_info, 1);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 617
 618	/*
 619	 * btrfs_dirty_inode will update the inode under btrfs_join_transaction
 620	 * which doesn't reserve space for speed.  This is a problem since we
 621	 * still need to reserve space for this update, so try to reserve the
 622	 * space.
 623	 *
 624	 * Now if src_rsv == delalloc_block_rsv we'll let it just steal since
 625	 * we always reserve enough to update the inode item.
 626	 */
 627	if (!src_rsv || (!trans->bytes_reserved &&
 628			 src_rsv->type != BTRFS_BLOCK_RSV_DELALLOC)) {
 629		ret = btrfs_qgroup_reserve_meta(root, num_bytes,
 630					  BTRFS_QGROUP_RSV_META_PREALLOC, true);
 631		if (ret < 0)
 632			return ret;
 633		ret = btrfs_block_rsv_add(root, dst_rsv, num_bytes,
 634					  BTRFS_RESERVE_NO_FLUSH);
 635		/* NO_FLUSH could only fail with -ENOSPC */
 636		ASSERT(ret == 0 || ret == -ENOSPC);
 637		if (ret)
 638			btrfs_qgroup_free_meta_prealloc(root, num_bytes);
 639	} else {
 640		ret = btrfs_block_rsv_migrate(src_rsv, dst_rsv, num_bytes, true);
 
 
 
 
 
 
 
 
 
 
 641	}
 642
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 643	if (!ret) {
 644		trace_btrfs_space_reservation(fs_info, "delayed_inode",
 645					      node->inode_id, num_bytes, 1);
 646		node->bytes_reserved = num_bytes;
 647	}
 648
 
 
 
 
 
 
 649	return ret;
 650}
 651
 652static void btrfs_delayed_inode_release_metadata(struct btrfs_fs_info *fs_info,
 653						struct btrfs_delayed_node *node,
 654						bool qgroup_free)
 655{
 656	struct btrfs_block_rsv *rsv;
 657
 658	if (!node->bytes_reserved)
 659		return;
 660
 661	rsv = &fs_info->delayed_block_rsv;
 662	trace_btrfs_space_reservation(fs_info, "delayed_inode",
 663				      node->inode_id, node->bytes_reserved, 0);
 664	btrfs_block_rsv_release(fs_info, rsv, node->bytes_reserved, NULL);
 665	if (qgroup_free)
 666		btrfs_qgroup_free_meta_prealloc(node->root,
 667				node->bytes_reserved);
 668	else
 669		btrfs_qgroup_convert_reserved_meta(node->root,
 670				node->bytes_reserved);
 671	node->bytes_reserved = 0;
 672}
 673
 674/*
 675 * This helper will insert some continuous items into the same leaf according
 676 * to the free space of the leaf.
 677 */
 678static int btrfs_batch_insert_items(struct btrfs_root *root,
 679				    struct btrfs_path *path,
 680				    struct btrfs_delayed_item *item)
 681{
 
 682	struct btrfs_delayed_item *curr, *next;
 683	int free_space;
 684	int total_size = 0;
 685	struct extent_buffer *leaf;
 686	char *data_ptr;
 687	struct btrfs_key *keys;
 688	u32 *data_size;
 689	struct list_head head;
 690	int slot;
 691	int nitems;
 692	int i;
 693	int ret = 0;
 694
 695	BUG_ON(!path->nodes[0]);
 696
 697	leaf = path->nodes[0];
 698	free_space = btrfs_leaf_free_space(leaf);
 699	INIT_LIST_HEAD(&head);
 700
 701	next = item;
 702	nitems = 0;
 703
 704	/*
 705	 * count the number of the continuous items that we can insert in batch
 706	 */
 707	while (total_size + next->data_len + sizeof(struct btrfs_item) <=
 708	       free_space) {
 
 709		total_size += next->data_len + sizeof(struct btrfs_item);
 710		list_add_tail(&next->tree_list, &head);
 711		nitems++;
 712
 713		curr = next;
 714		next = __btrfs_next_delayed_item(curr);
 715		if (!next)
 716			break;
 717
 718		if (!btrfs_is_continuous_delayed_item(curr, next))
 719			break;
 720	}
 721
 722	if (!nitems) {
 723		ret = 0;
 724		goto out;
 725	}
 726
 
 
 
 
 
 
 
 727	keys = kmalloc_array(nitems, sizeof(struct btrfs_key), GFP_NOFS);
 728	if (!keys) {
 729		ret = -ENOMEM;
 730		goto out;
 731	}
 732
 733	data_size = kmalloc_array(nitems, sizeof(u32), GFP_NOFS);
 734	if (!data_size) {
 735		ret = -ENOMEM;
 736		goto error;
 737	}
 738
 739	/* get keys of all the delayed items */
 740	i = 0;
 741	list_for_each_entry(next, &head, tree_list) {
 742		keys[i] = next->key;
 743		data_size[i] = next->data_len;
 744		i++;
 745	}
 746
 
 
 
 747	/* insert the keys of the items */
 748	setup_items_for_insert(root, path, keys, data_size, nitems);
 
 749
 750	/* insert the dir index items */
 751	slot = path->slots[0];
 752	list_for_each_entry_safe(curr, next, &head, tree_list) {
 753		data_ptr = btrfs_item_ptr(leaf, slot, char);
 754		write_extent_buffer(leaf, &curr->data,
 755				    (unsigned long)data_ptr,
 756				    curr->data_len);
 757		slot++;
 758
 759		btrfs_delayed_item_release_metadata(root, curr);
 760
 761		list_del(&curr->tree_list);
 762		btrfs_release_delayed_item(curr);
 763	}
 764
 765error:
 766	kfree(data_size);
 767	kfree(keys);
 768out:
 769	return ret;
 770}
 771
 772/*
 773 * This helper can just do simple insertion that needn't extend item for new
 774 * data, such as directory name index insertion, inode insertion.
 775 */
 776static int btrfs_insert_delayed_item(struct btrfs_trans_handle *trans,
 777				     struct btrfs_root *root,
 778				     struct btrfs_path *path,
 779				     struct btrfs_delayed_item *delayed_item)
 780{
 
 781	struct extent_buffer *leaf;
 782	unsigned int nofs_flag;
 783	char *ptr;
 784	int ret;
 785
 786	nofs_flag = memalloc_nofs_save();
 787	ret = btrfs_insert_empty_item(trans, root, path, &delayed_item->key,
 788				      delayed_item->data_len);
 789	memalloc_nofs_restore(nofs_flag);
 790	if (ret < 0 && ret != -EEXIST)
 791		return ret;
 792
 793	leaf = path->nodes[0];
 794
 795	ptr = btrfs_item_ptr(leaf, path->slots[0], char);
 796
 797	write_extent_buffer(leaf, delayed_item->data, (unsigned long)ptr,
 798			    delayed_item->data_len);
 799	btrfs_mark_buffer_dirty(leaf);
 800
 801	btrfs_delayed_item_release_metadata(root, delayed_item);
 802	return 0;
 803}
 804
 805/*
 806 * we insert an item first, then if there are some continuous items, we try
 807 * to insert those items into the same leaf.
 808 */
 809static int btrfs_insert_delayed_items(struct btrfs_trans_handle *trans,
 810				      struct btrfs_path *path,
 811				      struct btrfs_root *root,
 812				      struct btrfs_delayed_node *node)
 813{
 814	struct btrfs_delayed_item *curr, *prev;
 815	int ret = 0;
 816
 817do_again:
 818	mutex_lock(&node->mutex);
 819	curr = __btrfs_first_delayed_insertion_item(node);
 820	if (!curr)
 821		goto insert_end;
 822
 823	ret = btrfs_insert_delayed_item(trans, root, path, curr);
 824	if (ret < 0) {
 825		btrfs_release_path(path);
 826		goto insert_end;
 827	}
 828
 829	prev = curr;
 830	curr = __btrfs_next_delayed_item(prev);
 831	if (curr && btrfs_is_continuous_delayed_item(prev, curr)) {
 832		/* insert the continuous items into the same leaf */
 833		path->slots[0]++;
 834		btrfs_batch_insert_items(root, path, curr);
 835	}
 836	btrfs_release_delayed_item(prev);
 837	btrfs_mark_buffer_dirty(path->nodes[0]);
 838
 839	btrfs_release_path(path);
 840	mutex_unlock(&node->mutex);
 841	goto do_again;
 842
 843insert_end:
 844	mutex_unlock(&node->mutex);
 845	return ret;
 846}
 847
 848static int btrfs_batch_delete_items(struct btrfs_trans_handle *trans,
 849				    struct btrfs_root *root,
 850				    struct btrfs_path *path,
 851				    struct btrfs_delayed_item *item)
 852{
 
 853	struct btrfs_delayed_item *curr, *next;
 854	struct extent_buffer *leaf;
 855	struct btrfs_key key;
 856	struct list_head head;
 857	int nitems, i, last_item;
 858	int ret = 0;
 859
 860	BUG_ON(!path->nodes[0]);
 861
 862	leaf = path->nodes[0];
 863
 864	i = path->slots[0];
 865	last_item = btrfs_header_nritems(leaf) - 1;
 866	if (i > last_item)
 867		return -ENOENT;	/* FIXME: Is errno suitable? */
 868
 869	next = item;
 870	INIT_LIST_HEAD(&head);
 871	btrfs_item_key_to_cpu(leaf, &key, i);
 872	nitems = 0;
 873	/*
 874	 * count the number of the dir index items that we can delete in batch
 875	 */
 876	while (btrfs_comp_cpu_keys(&next->key, &key) == 0) {
 877		list_add_tail(&next->tree_list, &head);
 878		nitems++;
 879
 880		curr = next;
 881		next = __btrfs_next_delayed_item(curr);
 882		if (!next)
 883			break;
 884
 885		if (!btrfs_is_continuous_delayed_item(curr, next))
 886			break;
 887
 888		i++;
 889		if (i > last_item)
 890			break;
 891		btrfs_item_key_to_cpu(leaf, &key, i);
 892	}
 893
 894	if (!nitems)
 895		return 0;
 896
 897	ret = btrfs_del_items(trans, root, path, path->slots[0], nitems);
 898	if (ret)
 899		goto out;
 900
 901	list_for_each_entry_safe(curr, next, &head, tree_list) {
 902		btrfs_delayed_item_release_metadata(root, curr);
 903		list_del(&curr->tree_list);
 904		btrfs_release_delayed_item(curr);
 905	}
 906
 907out:
 908	return ret;
 909}
 910
 911static int btrfs_delete_delayed_items(struct btrfs_trans_handle *trans,
 912				      struct btrfs_path *path,
 913				      struct btrfs_root *root,
 914				      struct btrfs_delayed_node *node)
 915{
 916	struct btrfs_delayed_item *curr, *prev;
 917	unsigned int nofs_flag;
 918	int ret = 0;
 919
 920do_again:
 921	mutex_lock(&node->mutex);
 922	curr = __btrfs_first_delayed_deletion_item(node);
 923	if (!curr)
 924		goto delete_fail;
 925
 926	nofs_flag = memalloc_nofs_save();
 927	ret = btrfs_search_slot(trans, root, &curr->key, path, -1, 1);
 928	memalloc_nofs_restore(nofs_flag);
 929	if (ret < 0)
 930		goto delete_fail;
 931	else if (ret > 0) {
 932		/*
 933		 * can't find the item which the node points to, so this node
 934		 * is invalid, just drop it.
 935		 */
 936		prev = curr;
 937		curr = __btrfs_next_delayed_item(prev);
 938		btrfs_release_delayed_item(prev);
 939		ret = 0;
 940		btrfs_release_path(path);
 941		if (curr) {
 942			mutex_unlock(&node->mutex);
 943			goto do_again;
 944		} else
 945			goto delete_fail;
 946	}
 947
 948	btrfs_batch_delete_items(trans, root, path, curr);
 949	btrfs_release_path(path);
 950	mutex_unlock(&node->mutex);
 951	goto do_again;
 952
 953delete_fail:
 954	btrfs_release_path(path);
 955	mutex_unlock(&node->mutex);
 956	return ret;
 957}
 958
 959static void btrfs_release_delayed_inode(struct btrfs_delayed_node *delayed_node)
 960{
 961	struct btrfs_delayed_root *delayed_root;
 962
 963	if (delayed_node &&
 964	    test_bit(BTRFS_DELAYED_NODE_INODE_DIRTY, &delayed_node->flags)) {
 965		BUG_ON(!delayed_node->root);
 966		clear_bit(BTRFS_DELAYED_NODE_INODE_DIRTY, &delayed_node->flags);
 967		delayed_node->count--;
 968
 969		delayed_root = delayed_node->root->fs_info->delayed_root;
 970		finish_one_item(delayed_root);
 971	}
 972}
 973
 974static void btrfs_release_delayed_iref(struct btrfs_delayed_node *delayed_node)
 975{
 
 976
 977	if (test_and_clear_bit(BTRFS_DELAYED_NODE_DEL_IREF, &delayed_node->flags)) {
 978		struct btrfs_delayed_root *delayed_root;
 979
 980		ASSERT(delayed_node->root);
 981		delayed_node->count--;
 982
 983		delayed_root = delayed_node->root->fs_info->delayed_root;
 984		finish_one_item(delayed_root);
 985	}
 986}
 987
 988static int __btrfs_update_delayed_inode(struct btrfs_trans_handle *trans,
 989					struct btrfs_root *root,
 990					struct btrfs_path *path,
 991					struct btrfs_delayed_node *node)
 992{
 993	struct btrfs_fs_info *fs_info = root->fs_info;
 994	struct btrfs_key key;
 995	struct btrfs_inode_item *inode_item;
 996	struct extent_buffer *leaf;
 997	unsigned int nofs_flag;
 998	int mod;
 999	int ret;
1000
1001	key.objectid = node->inode_id;
1002	key.type = BTRFS_INODE_ITEM_KEY;
1003	key.offset = 0;
1004
1005	if (test_bit(BTRFS_DELAYED_NODE_DEL_IREF, &node->flags))
1006		mod = -1;
1007	else
1008		mod = 1;
1009
1010	nofs_flag = memalloc_nofs_save();
1011	ret = btrfs_lookup_inode(trans, root, path, &key, mod);
1012	memalloc_nofs_restore(nofs_flag);
1013	if (ret > 0)
1014		ret = -ENOENT;
1015	if (ret < 0)
1016		goto out;
 
1017
1018	leaf = path->nodes[0];
1019	inode_item = btrfs_item_ptr(leaf, path->slots[0],
1020				    struct btrfs_inode_item);
1021	write_extent_buffer(leaf, &node->inode_item, (unsigned long)inode_item,
1022			    sizeof(struct btrfs_inode_item));
1023	btrfs_mark_buffer_dirty(leaf);
1024
1025	if (!test_bit(BTRFS_DELAYED_NODE_DEL_IREF, &node->flags))
1026		goto out;
1027
1028	path->slots[0]++;
1029	if (path->slots[0] >= btrfs_header_nritems(leaf))
1030		goto search;
1031again:
1032	btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
1033	if (key.objectid != node->inode_id)
1034		goto out;
1035
1036	if (key.type != BTRFS_INODE_REF_KEY &&
1037	    key.type != BTRFS_INODE_EXTREF_KEY)
1038		goto out;
1039
1040	/*
1041	 * Delayed iref deletion is for the inode who has only one link,
1042	 * so there is only one iref. The case that several irefs are
1043	 * in the same item doesn't exist.
1044	 */
1045	btrfs_del_item(trans, root, path);
1046out:
1047	btrfs_release_delayed_iref(node);
 
1048	btrfs_release_path(path);
1049err_out:
1050	btrfs_delayed_inode_release_metadata(fs_info, node, (ret < 0));
1051	btrfs_release_delayed_inode(node);
1052
1053	/*
1054	 * If we fail to update the delayed inode we need to abort the
1055	 * transaction, because we could leave the inode with the improper
1056	 * counts behind.
1057	 */
1058	if (ret && ret != -ENOENT)
1059		btrfs_abort_transaction(trans, ret);
1060
1061	return ret;
1062
1063search:
1064	btrfs_release_path(path);
1065
1066	key.type = BTRFS_INODE_EXTREF_KEY;
1067	key.offset = -1;
1068
1069	nofs_flag = memalloc_nofs_save();
1070	ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
1071	memalloc_nofs_restore(nofs_flag);
1072	if (ret < 0)
1073		goto err_out;
1074	ASSERT(ret);
1075
1076	ret = 0;
1077	leaf = path->nodes[0];
1078	path->slots[0]--;
1079	goto again;
1080}
1081
1082static inline int btrfs_update_delayed_inode(struct btrfs_trans_handle *trans,
1083					     struct btrfs_root *root,
1084					     struct btrfs_path *path,
1085					     struct btrfs_delayed_node *node)
1086{
1087	int ret;
1088
1089	mutex_lock(&node->mutex);
1090	if (!test_bit(BTRFS_DELAYED_NODE_INODE_DIRTY, &node->flags)) {
1091		mutex_unlock(&node->mutex);
1092		return 0;
1093	}
1094
1095	ret = __btrfs_update_delayed_inode(trans, root, path, node);
1096	mutex_unlock(&node->mutex);
1097	return ret;
1098}
1099
1100static inline int
1101__btrfs_commit_inode_delayed_items(struct btrfs_trans_handle *trans,
1102				   struct btrfs_path *path,
1103				   struct btrfs_delayed_node *node)
1104{
1105	int ret;
1106
1107	ret = btrfs_insert_delayed_items(trans, path, node->root, node);
1108	if (ret)
1109		return ret;
1110
1111	ret = btrfs_delete_delayed_items(trans, path, node->root, node);
1112	if (ret)
1113		return ret;
1114
1115	ret = btrfs_update_delayed_inode(trans, node->root, path, node);
1116	return ret;
1117}
1118
1119/*
1120 * Called when committing the transaction.
1121 * Returns 0 on success.
1122 * Returns < 0 on error and returns with an aborted transaction with any
1123 * outstanding delayed items cleaned up.
1124 */
1125static int __btrfs_run_delayed_items(struct btrfs_trans_handle *trans, int nr)
 
1126{
1127	struct btrfs_fs_info *fs_info = trans->fs_info;
1128	struct btrfs_delayed_root *delayed_root;
1129	struct btrfs_delayed_node *curr_node, *prev_node;
1130	struct btrfs_path *path;
1131	struct btrfs_block_rsv *block_rsv;
1132	int ret = 0;
1133	bool count = (nr > 0);
1134
1135	if (TRANS_ABORTED(trans))
1136		return -EIO;
1137
1138	path = btrfs_alloc_path();
1139	if (!path)
1140		return -ENOMEM;
 
1141
1142	block_rsv = trans->block_rsv;
1143	trans->block_rsv = &fs_info->delayed_block_rsv;
1144
1145	delayed_root = fs_info->delayed_root;
1146
1147	curr_node = btrfs_first_delayed_node(delayed_root);
1148	while (curr_node && (!count || nr--)) {
1149		ret = __btrfs_commit_inode_delayed_items(trans, path,
1150							 curr_node);
1151		if (ret) {
1152			btrfs_release_delayed_node(curr_node);
1153			curr_node = NULL;
1154			btrfs_abort_transaction(trans, ret);
1155			break;
1156		}
1157
1158		prev_node = curr_node;
1159		curr_node = btrfs_next_delayed_node(curr_node);
1160		btrfs_release_delayed_node(prev_node);
1161	}
1162
1163	if (curr_node)
1164		btrfs_release_delayed_node(curr_node);
1165	btrfs_free_path(path);
1166	trans->block_rsv = block_rsv;
1167
1168	return ret;
1169}
1170
1171int btrfs_run_delayed_items(struct btrfs_trans_handle *trans)
 
1172{
1173	return __btrfs_run_delayed_items(trans, -1);
1174}
1175
1176int btrfs_run_delayed_items_nr(struct btrfs_trans_handle *trans, int nr)
 
1177{
1178	return __btrfs_run_delayed_items(trans, nr);
1179}
1180
1181int btrfs_commit_inode_delayed_items(struct btrfs_trans_handle *trans,
1182				     struct btrfs_inode *inode)
1183{
1184	struct btrfs_delayed_node *delayed_node = btrfs_get_delayed_node(inode);
1185	struct btrfs_path *path;
1186	struct btrfs_block_rsv *block_rsv;
1187	int ret;
1188
1189	if (!delayed_node)
1190		return 0;
1191
1192	mutex_lock(&delayed_node->mutex);
1193	if (!delayed_node->count) {
1194		mutex_unlock(&delayed_node->mutex);
1195		btrfs_release_delayed_node(delayed_node);
1196		return 0;
1197	}
1198	mutex_unlock(&delayed_node->mutex);
1199
1200	path = btrfs_alloc_path();
1201	if (!path) {
1202		btrfs_release_delayed_node(delayed_node);
1203		return -ENOMEM;
1204	}
 
1205
1206	block_rsv = trans->block_rsv;
1207	trans->block_rsv = &delayed_node->root->fs_info->delayed_block_rsv;
1208
1209	ret = __btrfs_commit_inode_delayed_items(trans, path, delayed_node);
1210
1211	btrfs_release_delayed_node(delayed_node);
1212	btrfs_free_path(path);
1213	trans->block_rsv = block_rsv;
1214
1215	return ret;
1216}
1217
1218int btrfs_commit_inode_delayed_inode(struct btrfs_inode *inode)
1219{
1220	struct btrfs_fs_info *fs_info = inode->root->fs_info;
1221	struct btrfs_trans_handle *trans;
1222	struct btrfs_delayed_node *delayed_node = btrfs_get_delayed_node(inode);
1223	struct btrfs_path *path;
1224	struct btrfs_block_rsv *block_rsv;
1225	int ret;
1226
1227	if (!delayed_node)
1228		return 0;
1229
1230	mutex_lock(&delayed_node->mutex);
1231	if (!test_bit(BTRFS_DELAYED_NODE_INODE_DIRTY, &delayed_node->flags)) {
1232		mutex_unlock(&delayed_node->mutex);
1233		btrfs_release_delayed_node(delayed_node);
1234		return 0;
1235	}
1236	mutex_unlock(&delayed_node->mutex);
1237
1238	trans = btrfs_join_transaction(delayed_node->root);
1239	if (IS_ERR(trans)) {
1240		ret = PTR_ERR(trans);
1241		goto out;
1242	}
1243
1244	path = btrfs_alloc_path();
1245	if (!path) {
1246		ret = -ENOMEM;
1247		goto trans_out;
1248	}
 
1249
1250	block_rsv = trans->block_rsv;
1251	trans->block_rsv = &fs_info->delayed_block_rsv;
1252
1253	mutex_lock(&delayed_node->mutex);
1254	if (test_bit(BTRFS_DELAYED_NODE_INODE_DIRTY, &delayed_node->flags))
1255		ret = __btrfs_update_delayed_inode(trans, delayed_node->root,
1256						   path, delayed_node);
1257	else
1258		ret = 0;
1259	mutex_unlock(&delayed_node->mutex);
1260
1261	btrfs_free_path(path);
1262	trans->block_rsv = block_rsv;
1263trans_out:
1264	btrfs_end_transaction(trans);
1265	btrfs_btree_balance_dirty(fs_info);
1266out:
1267	btrfs_release_delayed_node(delayed_node);
1268
1269	return ret;
1270}
1271
1272void btrfs_remove_delayed_node(struct btrfs_inode *inode)
1273{
1274	struct btrfs_delayed_node *delayed_node;
1275
1276	delayed_node = READ_ONCE(inode->delayed_node);
1277	if (!delayed_node)
1278		return;
1279
1280	inode->delayed_node = NULL;
1281	btrfs_release_delayed_node(delayed_node);
1282}
1283
1284struct btrfs_async_delayed_work {
1285	struct btrfs_delayed_root *delayed_root;
1286	int nr;
1287	struct btrfs_work work;
1288};
1289
1290static void btrfs_async_run_delayed_root(struct btrfs_work *work)
1291{
1292	struct btrfs_async_delayed_work *async_work;
1293	struct btrfs_delayed_root *delayed_root;
1294	struct btrfs_trans_handle *trans;
1295	struct btrfs_path *path;
1296	struct btrfs_delayed_node *delayed_node = NULL;
1297	struct btrfs_root *root;
1298	struct btrfs_block_rsv *block_rsv;
1299	int total_done = 0;
1300
1301	async_work = container_of(work, struct btrfs_async_delayed_work, work);
1302	delayed_root = async_work->delayed_root;
1303
1304	path = btrfs_alloc_path();
1305	if (!path)
1306		goto out;
1307
1308	do {
1309		if (atomic_read(&delayed_root->items) <
1310		    BTRFS_DELAYED_BACKGROUND / 2)
1311			break;
1312
1313		delayed_node = btrfs_first_prepared_delayed_node(delayed_root);
1314		if (!delayed_node)
1315			break;
1316
1317		root = delayed_node->root;
 
1318
1319		trans = btrfs_join_transaction(root);
1320		if (IS_ERR(trans)) {
1321			btrfs_release_path(path);
1322			btrfs_release_prepared_delayed_node(delayed_node);
1323			total_done++;
1324			continue;
1325		}
1326
1327		block_rsv = trans->block_rsv;
1328		trans->block_rsv = &root->fs_info->delayed_block_rsv;
1329
1330		__btrfs_commit_inode_delayed_items(trans, path, delayed_node);
1331
1332		trans->block_rsv = block_rsv;
1333		btrfs_end_transaction(trans);
1334		btrfs_btree_balance_dirty_nodelay(root->fs_info);
1335
1336		btrfs_release_path(path);
1337		btrfs_release_prepared_delayed_node(delayed_node);
1338		total_done++;
1339
1340	} while ((async_work->nr == 0 && total_done < BTRFS_DELAYED_WRITEBACK)
1341		 || total_done < async_work->nr);
 
 
1342
 
1343	btrfs_free_path(path);
1344out:
1345	wake_up(&delayed_root->wait);
1346	kfree(async_work);
1347}
1348
1349
1350static int btrfs_wq_run_delayed_node(struct btrfs_delayed_root *delayed_root,
1351				     struct btrfs_fs_info *fs_info, int nr)
1352{
1353	struct btrfs_async_delayed_work *async_work;
1354
 
 
 
 
1355	async_work = kmalloc(sizeof(*async_work), GFP_NOFS);
1356	if (!async_work)
1357		return -ENOMEM;
1358
1359	async_work->delayed_root = delayed_root;
1360	btrfs_init_work(&async_work->work, btrfs_async_run_delayed_root, NULL,
1361			NULL);
1362	async_work->nr = nr;
1363
1364	btrfs_queue_work(fs_info->delayed_workers, &async_work->work);
1365	return 0;
1366}
1367
1368void btrfs_assert_delayed_root_empty(struct btrfs_fs_info *fs_info)
1369{
1370	WARN_ON(btrfs_first_delayed_node(fs_info->delayed_root));
1371}
1372
1373static int could_end_wait(struct btrfs_delayed_root *delayed_root, int seq)
1374{
1375	int val = atomic_read(&delayed_root->items_seq);
1376
1377	if (val < seq || val >= seq + BTRFS_DELAYED_BATCH)
1378		return 1;
1379
1380	if (atomic_read(&delayed_root->items) < BTRFS_DELAYED_BACKGROUND)
1381		return 1;
1382
1383	return 0;
1384}
1385
1386void btrfs_balance_delayed_items(struct btrfs_fs_info *fs_info)
1387{
1388	struct btrfs_delayed_root *delayed_root = fs_info->delayed_root;
1389
1390	if ((atomic_read(&delayed_root->items) < BTRFS_DELAYED_BACKGROUND) ||
1391		btrfs_workqueue_normal_congested(fs_info->delayed_workers))
1392		return;
1393
1394	if (atomic_read(&delayed_root->items) >= BTRFS_DELAYED_WRITEBACK) {
1395		int seq;
1396		int ret;
1397
1398		seq = atomic_read(&delayed_root->items_seq);
1399
1400		ret = btrfs_wq_run_delayed_node(delayed_root, fs_info, 0);
1401		if (ret)
1402			return;
1403
1404		wait_event_interruptible(delayed_root->wait,
1405					 could_end_wait(delayed_root, seq));
1406		return;
1407	}
1408
1409	btrfs_wq_run_delayed_node(delayed_root, fs_info, BTRFS_DELAYED_BATCH);
1410}
1411
1412/* Will return 0 or -ENOMEM */
1413int btrfs_insert_delayed_dir_index(struct btrfs_trans_handle *trans,
 
1414				   const char *name, int name_len,
1415				   struct btrfs_inode *dir,
1416				   struct btrfs_disk_key *disk_key, u8 type,
1417				   u64 index)
1418{
1419	struct btrfs_delayed_node *delayed_node;
1420	struct btrfs_delayed_item *delayed_item;
1421	struct btrfs_dir_item *dir_item;
1422	int ret;
1423
1424	delayed_node = btrfs_get_or_create_delayed_node(dir);
1425	if (IS_ERR(delayed_node))
1426		return PTR_ERR(delayed_node);
1427
1428	delayed_item = btrfs_alloc_delayed_item(sizeof(*dir_item) + name_len);
1429	if (!delayed_item) {
1430		ret = -ENOMEM;
1431		goto release_node;
1432	}
1433
1434	delayed_item->key.objectid = btrfs_ino(dir);
1435	delayed_item->key.type = BTRFS_DIR_INDEX_KEY;
1436	delayed_item->key.offset = index;
1437
1438	dir_item = (struct btrfs_dir_item *)delayed_item->data;
1439	dir_item->location = *disk_key;
1440	btrfs_set_stack_dir_transid(dir_item, trans->transid);
1441	btrfs_set_stack_dir_data_len(dir_item, 0);
1442	btrfs_set_stack_dir_name_len(dir_item, name_len);
1443	btrfs_set_stack_dir_type(dir_item, type);
1444	memcpy((char *)(dir_item + 1), name, name_len);
1445
1446	ret = btrfs_delayed_item_reserve_metadata(trans, dir->root, delayed_item);
1447	/*
1448	 * we have reserved enough space when we start a new transaction,
1449	 * so reserving metadata failure is impossible
1450	 */
1451	BUG_ON(ret);
1452
 
1453	mutex_lock(&delayed_node->mutex);
1454	ret = __btrfs_add_delayed_insertion_item(delayed_node, delayed_item);
1455	if (unlikely(ret)) {
1456		btrfs_err(trans->fs_info,
1457			  "err add delayed dir index item(name: %.*s) into the insertion tree of the delayed node(root id: %llu, inode id: %llu, errno: %d)",
1458			  name_len, name, delayed_node->root->root_key.objectid,
1459			  delayed_node->inode_id, ret);
1460		BUG();
1461	}
1462	mutex_unlock(&delayed_node->mutex);
1463
1464release_node:
1465	btrfs_release_delayed_node(delayed_node);
1466	return ret;
1467}
1468
1469static int btrfs_delete_delayed_insertion_item(struct btrfs_fs_info *fs_info,
1470					       struct btrfs_delayed_node *node,
1471					       struct btrfs_key *key)
1472{
1473	struct btrfs_delayed_item *item;
1474
1475	mutex_lock(&node->mutex);
1476	item = __btrfs_lookup_delayed_insertion_item(node, key);
1477	if (!item) {
1478		mutex_unlock(&node->mutex);
1479		return 1;
1480	}
1481
1482	btrfs_delayed_item_release_metadata(node->root, item);
1483	btrfs_release_delayed_item(item);
1484	mutex_unlock(&node->mutex);
1485	return 0;
1486}
1487
1488int btrfs_delete_delayed_dir_index(struct btrfs_trans_handle *trans,
1489				   struct btrfs_inode *dir, u64 index)
 
1490{
1491	struct btrfs_delayed_node *node;
1492	struct btrfs_delayed_item *item;
1493	struct btrfs_key item_key;
1494	int ret;
1495
1496	node = btrfs_get_or_create_delayed_node(dir);
1497	if (IS_ERR(node))
1498		return PTR_ERR(node);
1499
1500	item_key.objectid = btrfs_ino(dir);
1501	item_key.type = BTRFS_DIR_INDEX_KEY;
1502	item_key.offset = index;
1503
1504	ret = btrfs_delete_delayed_insertion_item(trans->fs_info, node,
1505						  &item_key);
1506	if (!ret)
1507		goto end;
1508
1509	item = btrfs_alloc_delayed_item(0);
1510	if (!item) {
1511		ret = -ENOMEM;
1512		goto end;
1513	}
1514
1515	item->key = item_key;
1516
1517	ret = btrfs_delayed_item_reserve_metadata(trans, dir->root, item);
1518	/*
1519	 * we have reserved enough space when we start a new transaction,
1520	 * so reserving metadata failure is impossible.
1521	 */
1522	if (ret < 0) {
1523		btrfs_err(trans->fs_info,
1524"metadata reservation failed for delayed dir item deltiona, should have been reserved");
1525		btrfs_release_delayed_item(item);
1526		goto end;
1527	}
1528
1529	mutex_lock(&node->mutex);
1530	ret = __btrfs_add_delayed_deletion_item(node, item);
1531	if (unlikely(ret)) {
1532		btrfs_err(trans->fs_info,
1533			  "err add delayed dir index item(index: %llu) into the deletion tree of the delayed node(root id: %llu, inode id: %llu, errno: %d)",
1534			  index, node->root->root_key.objectid,
1535			  node->inode_id, ret);
1536		btrfs_delayed_item_release_metadata(dir->root, item);
1537		btrfs_release_delayed_item(item);
1538	}
1539	mutex_unlock(&node->mutex);
1540end:
1541	btrfs_release_delayed_node(node);
1542	return ret;
1543}
1544
1545int btrfs_inode_delayed_dir_index_count(struct btrfs_inode *inode)
1546{
1547	struct btrfs_delayed_node *delayed_node = btrfs_get_delayed_node(inode);
1548
1549	if (!delayed_node)
1550		return -ENOENT;
1551
1552	/*
1553	 * Since we have held i_mutex of this directory, it is impossible that
1554	 * a new directory index is added into the delayed node and index_cnt
1555	 * is updated now. So we needn't lock the delayed node.
1556	 */
1557	if (!delayed_node->index_cnt) {
1558		btrfs_release_delayed_node(delayed_node);
1559		return -EINVAL;
1560	}
1561
1562	inode->index_cnt = delayed_node->index_cnt;
1563	btrfs_release_delayed_node(delayed_node);
1564	return 0;
1565}
1566
1567bool btrfs_readdir_get_delayed_items(struct inode *inode,
1568				     struct list_head *ins_list,
1569				     struct list_head *del_list)
1570{
1571	struct btrfs_delayed_node *delayed_node;
1572	struct btrfs_delayed_item *item;
1573
1574	delayed_node = btrfs_get_delayed_node(BTRFS_I(inode));
1575	if (!delayed_node)
1576		return false;
1577
1578	/*
1579	 * We can only do one readdir with delayed items at a time because of
1580	 * item->readdir_list.
1581	 */
1582	btrfs_inode_unlock(inode, BTRFS_ILOCK_SHARED);
1583	btrfs_inode_lock(inode, 0);
1584
1585	mutex_lock(&delayed_node->mutex);
1586	item = __btrfs_first_delayed_insertion_item(delayed_node);
1587	while (item) {
1588		refcount_inc(&item->refs);
1589		list_add_tail(&item->readdir_list, ins_list);
1590		item = __btrfs_next_delayed_item(item);
1591	}
1592
1593	item = __btrfs_first_delayed_deletion_item(delayed_node);
1594	while (item) {
1595		refcount_inc(&item->refs);
1596		list_add_tail(&item->readdir_list, del_list);
1597		item = __btrfs_next_delayed_item(item);
1598	}
1599	mutex_unlock(&delayed_node->mutex);
1600	/*
1601	 * This delayed node is still cached in the btrfs inode, so refs
1602	 * must be > 1 now, and we needn't check it is going to be freed
1603	 * or not.
1604	 *
1605	 * Besides that, this function is used to read dir, we do not
1606	 * insert/delete delayed items in this period. So we also needn't
1607	 * requeue or dequeue this delayed node.
1608	 */
1609	refcount_dec(&delayed_node->refs);
1610
1611	return true;
1612}
1613
1614void btrfs_readdir_put_delayed_items(struct inode *inode,
1615				     struct list_head *ins_list,
1616				     struct list_head *del_list)
1617{
1618	struct btrfs_delayed_item *curr, *next;
1619
1620	list_for_each_entry_safe(curr, next, ins_list, readdir_list) {
1621		list_del(&curr->readdir_list);
1622		if (refcount_dec_and_test(&curr->refs))
1623			kfree(curr);
1624	}
1625
1626	list_for_each_entry_safe(curr, next, del_list, readdir_list) {
1627		list_del(&curr->readdir_list);
1628		if (refcount_dec_and_test(&curr->refs))
1629			kfree(curr);
1630	}
1631
1632	/*
1633	 * The VFS is going to do up_read(), so we need to downgrade back to a
1634	 * read lock.
1635	 */
1636	downgrade_write(&inode->i_rwsem);
1637}
1638
1639int btrfs_should_delete_dir_index(struct list_head *del_list,
1640				  u64 index)
1641{
1642	struct btrfs_delayed_item *curr;
1643	int ret = 0;
1644
1645	list_for_each_entry(curr, del_list, readdir_list) {
 
 
 
1646		if (curr->key.offset > index)
1647			break;
1648		if (curr->key.offset == index) {
1649			ret = 1;
1650			break;
1651		}
 
 
 
 
 
 
 
1652	}
1653	return ret;
1654}
1655
1656/*
1657 * btrfs_readdir_delayed_dir_index - read dir info stored in the delayed tree
1658 *
1659 */
1660int btrfs_readdir_delayed_dir_index(struct dir_context *ctx,
1661				    struct list_head *ins_list)
1662{
1663	struct btrfs_dir_item *di;
1664	struct btrfs_delayed_item *curr, *next;
1665	struct btrfs_key location;
1666	char *name;
1667	int name_len;
1668	int over = 0;
1669	unsigned char d_type;
1670
1671	if (list_empty(ins_list))
1672		return 0;
1673
1674	/*
1675	 * Changing the data of the delayed item is impossible. So
1676	 * we needn't lock them. And we have held i_mutex of the
1677	 * directory, nobody can delete any directory indexes now.
1678	 */
1679	list_for_each_entry_safe(curr, next, ins_list, readdir_list) {
1680		list_del(&curr->readdir_list);
1681
1682		if (curr->key.offset < ctx->pos) {
1683			if (refcount_dec_and_test(&curr->refs))
1684				kfree(curr);
1685			continue;
1686		}
1687
1688		ctx->pos = curr->key.offset;
1689
1690		di = (struct btrfs_dir_item *)curr->data;
1691		name = (char *)(di + 1);
1692		name_len = btrfs_stack_dir_name_len(di);
1693
1694		d_type = fs_ftype_to_dtype(di->type);
1695		btrfs_disk_key_to_cpu(&location, &di->location);
1696
1697		over = !dir_emit(ctx, name, name_len,
1698			       location.objectid, d_type);
1699
1700		if (refcount_dec_and_test(&curr->refs))
1701			kfree(curr);
1702
1703		if (over)
1704			return 1;
1705		ctx->pos++;
1706	}
1707	return 0;
1708}
1709
1710static void fill_stack_inode_item(struct btrfs_trans_handle *trans,
1711				  struct btrfs_inode_item *inode_item,
1712				  struct inode *inode)
1713{
1714	btrfs_set_stack_inode_uid(inode_item, i_uid_read(inode));
1715	btrfs_set_stack_inode_gid(inode_item, i_gid_read(inode));
1716	btrfs_set_stack_inode_size(inode_item, BTRFS_I(inode)->disk_i_size);
1717	btrfs_set_stack_inode_mode(inode_item, inode->i_mode);
1718	btrfs_set_stack_inode_nlink(inode_item, inode->i_nlink);
1719	btrfs_set_stack_inode_nbytes(inode_item, inode_get_bytes(inode));
1720	btrfs_set_stack_inode_generation(inode_item,
1721					 BTRFS_I(inode)->generation);
1722	btrfs_set_stack_inode_sequence(inode_item,
1723				       inode_peek_iversion(inode));
1724	btrfs_set_stack_inode_transid(inode_item, trans->transid);
1725	btrfs_set_stack_inode_rdev(inode_item, inode->i_rdev);
1726	btrfs_set_stack_inode_flags(inode_item, BTRFS_I(inode)->flags);
1727	btrfs_set_stack_inode_block_group(inode_item, 0);
1728
1729	btrfs_set_stack_timespec_sec(&inode_item->atime,
1730				     inode->i_atime.tv_sec);
1731	btrfs_set_stack_timespec_nsec(&inode_item->atime,
1732				      inode->i_atime.tv_nsec);
1733
1734	btrfs_set_stack_timespec_sec(&inode_item->mtime,
1735				     inode->i_mtime.tv_sec);
1736	btrfs_set_stack_timespec_nsec(&inode_item->mtime,
1737				      inode->i_mtime.tv_nsec);
1738
1739	btrfs_set_stack_timespec_sec(&inode_item->ctime,
1740				     inode->i_ctime.tv_sec);
1741	btrfs_set_stack_timespec_nsec(&inode_item->ctime,
1742				      inode->i_ctime.tv_nsec);
1743
1744	btrfs_set_stack_timespec_sec(&inode_item->otime,
1745				     BTRFS_I(inode)->i_otime.tv_sec);
1746	btrfs_set_stack_timespec_nsec(&inode_item->otime,
1747				     BTRFS_I(inode)->i_otime.tv_nsec);
1748}
1749
1750int btrfs_fill_inode(struct inode *inode, u32 *rdev)
1751{
1752	struct btrfs_fs_info *fs_info = BTRFS_I(inode)->root->fs_info;
1753	struct btrfs_delayed_node *delayed_node;
1754	struct btrfs_inode_item *inode_item;
1755
1756	delayed_node = btrfs_get_delayed_node(BTRFS_I(inode));
1757	if (!delayed_node)
1758		return -ENOENT;
1759
1760	mutex_lock(&delayed_node->mutex);
1761	if (!test_bit(BTRFS_DELAYED_NODE_INODE_DIRTY, &delayed_node->flags)) {
1762		mutex_unlock(&delayed_node->mutex);
1763		btrfs_release_delayed_node(delayed_node);
1764		return -ENOENT;
1765	}
1766
1767	inode_item = &delayed_node->inode_item;
1768
1769	i_uid_write(inode, btrfs_stack_inode_uid(inode_item));
1770	i_gid_write(inode, btrfs_stack_inode_gid(inode_item));
1771	btrfs_i_size_write(BTRFS_I(inode), btrfs_stack_inode_size(inode_item));
1772	btrfs_inode_set_file_extent_range(BTRFS_I(inode), 0,
1773			round_up(i_size_read(inode), fs_info->sectorsize));
1774	inode->i_mode = btrfs_stack_inode_mode(inode_item);
1775	set_nlink(inode, btrfs_stack_inode_nlink(inode_item));
1776	inode_set_bytes(inode, btrfs_stack_inode_nbytes(inode_item));
1777	BTRFS_I(inode)->generation = btrfs_stack_inode_generation(inode_item);
1778        BTRFS_I(inode)->last_trans = btrfs_stack_inode_transid(inode_item);
1779
1780	inode_set_iversion_queried(inode,
1781				   btrfs_stack_inode_sequence(inode_item));
1782	inode->i_rdev = 0;
1783	*rdev = btrfs_stack_inode_rdev(inode_item);
1784	BTRFS_I(inode)->flags = btrfs_stack_inode_flags(inode_item);
1785
1786	inode->i_atime.tv_sec = btrfs_stack_timespec_sec(&inode_item->atime);
1787	inode->i_atime.tv_nsec = btrfs_stack_timespec_nsec(&inode_item->atime);
1788
1789	inode->i_mtime.tv_sec = btrfs_stack_timespec_sec(&inode_item->mtime);
1790	inode->i_mtime.tv_nsec = btrfs_stack_timespec_nsec(&inode_item->mtime);
1791
1792	inode->i_ctime.tv_sec = btrfs_stack_timespec_sec(&inode_item->ctime);
1793	inode->i_ctime.tv_nsec = btrfs_stack_timespec_nsec(&inode_item->ctime);
1794
1795	BTRFS_I(inode)->i_otime.tv_sec =
1796		btrfs_stack_timespec_sec(&inode_item->otime);
1797	BTRFS_I(inode)->i_otime.tv_nsec =
1798		btrfs_stack_timespec_nsec(&inode_item->otime);
1799
1800	inode->i_generation = BTRFS_I(inode)->generation;
1801	BTRFS_I(inode)->index_cnt = (u64)-1;
1802
1803	mutex_unlock(&delayed_node->mutex);
1804	btrfs_release_delayed_node(delayed_node);
1805	return 0;
1806}
1807
1808int btrfs_delayed_update_inode(struct btrfs_trans_handle *trans,
1809			       struct btrfs_root *root,
1810			       struct btrfs_inode *inode)
1811{
1812	struct btrfs_delayed_node *delayed_node;
1813	int ret = 0;
1814
1815	delayed_node = btrfs_get_or_create_delayed_node(inode);
1816	if (IS_ERR(delayed_node))
1817		return PTR_ERR(delayed_node);
1818
1819	mutex_lock(&delayed_node->mutex);
1820	if (test_bit(BTRFS_DELAYED_NODE_INODE_DIRTY, &delayed_node->flags)) {
1821		fill_stack_inode_item(trans, &delayed_node->inode_item,
1822				      &inode->vfs_inode);
1823		goto release_node;
1824	}
1825
1826	ret = btrfs_delayed_inode_reserve_metadata(trans, root, delayed_node);
 
1827	if (ret)
1828		goto release_node;
1829
1830	fill_stack_inode_item(trans, &delayed_node->inode_item, &inode->vfs_inode);
1831	set_bit(BTRFS_DELAYED_NODE_INODE_DIRTY, &delayed_node->flags);
1832	delayed_node->count++;
1833	atomic_inc(&root->fs_info->delayed_root->items);
1834release_node:
1835	mutex_unlock(&delayed_node->mutex);
1836	btrfs_release_delayed_node(delayed_node);
1837	return ret;
1838}
1839
1840int btrfs_delayed_delete_inode_ref(struct btrfs_inode *inode)
1841{
1842	struct btrfs_fs_info *fs_info = inode->root->fs_info;
1843	struct btrfs_delayed_node *delayed_node;
1844
1845	/*
1846	 * we don't do delayed inode updates during log recovery because it
1847	 * leads to enospc problems.  This means we also can't do
1848	 * delayed inode refs
1849	 */
1850	if (test_bit(BTRFS_FS_LOG_RECOVERING, &fs_info->flags))
1851		return -EAGAIN;
1852
1853	delayed_node = btrfs_get_or_create_delayed_node(inode);
1854	if (IS_ERR(delayed_node))
1855		return PTR_ERR(delayed_node);
1856
1857	/*
1858	 * We don't reserve space for inode ref deletion is because:
1859	 * - We ONLY do async inode ref deletion for the inode who has only
1860	 *   one link(i_nlink == 1), it means there is only one inode ref.
1861	 *   And in most case, the inode ref and the inode item are in the
1862	 *   same leaf, and we will deal with them at the same time.
1863	 *   Since we are sure we will reserve the space for the inode item,
1864	 *   it is unnecessary to reserve space for inode ref deletion.
1865	 * - If the inode ref and the inode item are not in the same leaf,
1866	 *   We also needn't worry about enospc problem, because we reserve
1867	 *   much more space for the inode update than it needs.
1868	 * - At the worst, we can steal some space from the global reservation.
1869	 *   It is very rare.
1870	 */
1871	mutex_lock(&delayed_node->mutex);
1872	if (test_bit(BTRFS_DELAYED_NODE_DEL_IREF, &delayed_node->flags))
1873		goto release_node;
1874
1875	set_bit(BTRFS_DELAYED_NODE_DEL_IREF, &delayed_node->flags);
1876	delayed_node->count++;
1877	atomic_inc(&fs_info->delayed_root->items);
1878release_node:
1879	mutex_unlock(&delayed_node->mutex);
1880	btrfs_release_delayed_node(delayed_node);
1881	return 0;
1882}
1883
1884static void __btrfs_kill_delayed_node(struct btrfs_delayed_node *delayed_node)
1885{
1886	struct btrfs_root *root = delayed_node->root;
1887	struct btrfs_fs_info *fs_info = root->fs_info;
1888	struct btrfs_delayed_item *curr_item, *prev_item;
1889
1890	mutex_lock(&delayed_node->mutex);
1891	curr_item = __btrfs_first_delayed_insertion_item(delayed_node);
1892	while (curr_item) {
1893		btrfs_delayed_item_release_metadata(root, curr_item);
1894		prev_item = curr_item;
1895		curr_item = __btrfs_next_delayed_item(prev_item);
1896		btrfs_release_delayed_item(prev_item);
1897	}
1898
1899	curr_item = __btrfs_first_delayed_deletion_item(delayed_node);
1900	while (curr_item) {
1901		btrfs_delayed_item_release_metadata(root, curr_item);
1902		prev_item = curr_item;
1903		curr_item = __btrfs_next_delayed_item(prev_item);
1904		btrfs_release_delayed_item(prev_item);
1905	}
1906
1907	btrfs_release_delayed_iref(delayed_node);
 
1908
1909	if (test_bit(BTRFS_DELAYED_NODE_INODE_DIRTY, &delayed_node->flags)) {
1910		btrfs_delayed_inode_release_metadata(fs_info, delayed_node, false);
1911		btrfs_release_delayed_inode(delayed_node);
1912	}
1913	mutex_unlock(&delayed_node->mutex);
1914}
1915
1916void btrfs_kill_delayed_inode_items(struct btrfs_inode *inode)
1917{
1918	struct btrfs_delayed_node *delayed_node;
1919
1920	delayed_node = btrfs_get_delayed_node(inode);
1921	if (!delayed_node)
1922		return;
1923
1924	__btrfs_kill_delayed_node(delayed_node);
1925	btrfs_release_delayed_node(delayed_node);
1926}
1927
1928void btrfs_kill_all_delayed_nodes(struct btrfs_root *root)
1929{
1930	u64 inode_id = 0;
1931	struct btrfs_delayed_node *delayed_nodes[8];
1932	int i, n;
1933
1934	while (1) {
1935		spin_lock(&root->inode_lock);
1936		n = radix_tree_gang_lookup(&root->delayed_nodes_tree,
1937					   (void **)delayed_nodes, inode_id,
1938					   ARRAY_SIZE(delayed_nodes));
1939		if (!n) {
1940			spin_unlock(&root->inode_lock);
1941			break;
1942		}
1943
1944		inode_id = delayed_nodes[n - 1]->inode_id + 1;
1945		for (i = 0; i < n; i++) {
1946			/*
1947			 * Don't increase refs in case the node is dead and
1948			 * about to be removed from the tree in the loop below
1949			 */
1950			if (!refcount_inc_not_zero(&delayed_nodes[i]->refs))
1951				delayed_nodes[i] = NULL;
1952		}
1953		spin_unlock(&root->inode_lock);
1954
1955		for (i = 0; i < n; i++) {
1956			if (!delayed_nodes[i])
1957				continue;
1958			__btrfs_kill_delayed_node(delayed_nodes[i]);
1959			btrfs_release_delayed_node(delayed_nodes[i]);
1960		}
1961	}
1962}
1963
1964void btrfs_destroy_delayed_inodes(struct btrfs_fs_info *fs_info)
1965{
1966	struct btrfs_delayed_node *curr_node, *prev_node;
1967
1968	curr_node = btrfs_first_delayed_node(fs_info->delayed_root);
1969	while (curr_node) {
1970		__btrfs_kill_delayed_node(curr_node);
1971
1972		prev_node = curr_node;
1973		curr_node = btrfs_next_delayed_node(curr_node);
1974		btrfs_release_delayed_node(prev_node);
1975	}
1976}
1977
v4.10.11
 
   1/*
   2 * Copyright (C) 2011 Fujitsu.  All rights reserved.
   3 * Written by Miao Xie <miaox@cn.fujitsu.com>
   4 *
   5 * This program is free software; you can redistribute it and/or
   6 * modify it under the terms of the GNU General Public
   7 * License v2 as published by the Free Software Foundation.
   8 *
   9 * This program is distributed in the hope that it will be useful,
  10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
  11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
  12 * General Public License for more details.
  13 *
  14 * You should have received a copy of the GNU General Public
  15 * License along with this program; if not, write to the
  16 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
  17 * Boston, MA 021110-1307, USA.
  18 */
  19
  20#include <linux/slab.h>
 
 
 
  21#include "delayed-inode.h"
  22#include "disk-io.h"
  23#include "transaction.h"
  24#include "ctree.h"
 
 
  25
  26#define BTRFS_DELAYED_WRITEBACK		512
  27#define BTRFS_DELAYED_BACKGROUND	128
  28#define BTRFS_DELAYED_BATCH		16
  29
  30static struct kmem_cache *delayed_node_cache;
  31
  32int __init btrfs_delayed_inode_init(void)
  33{
  34	delayed_node_cache = kmem_cache_create("btrfs_delayed_node",
  35					sizeof(struct btrfs_delayed_node),
  36					0,
  37					SLAB_MEM_SPREAD,
  38					NULL);
  39	if (!delayed_node_cache)
  40		return -ENOMEM;
  41	return 0;
  42}
  43
  44void btrfs_delayed_inode_exit(void)
  45{
  46	kmem_cache_destroy(delayed_node_cache);
  47}
  48
  49static inline void btrfs_init_delayed_node(
  50				struct btrfs_delayed_node *delayed_node,
  51				struct btrfs_root *root, u64 inode_id)
  52{
  53	delayed_node->root = root;
  54	delayed_node->inode_id = inode_id;
  55	atomic_set(&delayed_node->refs, 0);
  56	delayed_node->ins_root = RB_ROOT;
  57	delayed_node->del_root = RB_ROOT;
  58	mutex_init(&delayed_node->mutex);
  59	INIT_LIST_HEAD(&delayed_node->n_list);
  60	INIT_LIST_HEAD(&delayed_node->p_list);
  61}
  62
  63static inline int btrfs_is_continuous_delayed_item(
  64					struct btrfs_delayed_item *item1,
  65					struct btrfs_delayed_item *item2)
  66{
  67	if (item1->key.type == BTRFS_DIR_INDEX_KEY &&
  68	    item1->key.objectid == item2->key.objectid &&
  69	    item1->key.type == item2->key.type &&
  70	    item1->key.offset + 1 == item2->key.offset)
  71		return 1;
  72	return 0;
  73}
  74
  75static struct btrfs_delayed_node *btrfs_get_delayed_node(struct inode *inode)
 
  76{
  77	struct btrfs_inode *btrfs_inode = BTRFS_I(inode);
  78	struct btrfs_root *root = btrfs_inode->root;
  79	u64 ino = btrfs_ino(inode);
  80	struct btrfs_delayed_node *node;
  81
  82	node = ACCESS_ONCE(btrfs_inode->delayed_node);
  83	if (node) {
  84		atomic_inc(&node->refs);
  85		return node;
  86	}
  87
  88	spin_lock(&root->inode_lock);
  89	node = radix_tree_lookup(&root->delayed_nodes_tree, ino);
 
  90	if (node) {
  91		if (btrfs_inode->delayed_node) {
  92			atomic_inc(&node->refs);	/* can be accessed */
  93			BUG_ON(btrfs_inode->delayed_node != node);
  94			spin_unlock(&root->inode_lock);
  95			return node;
  96		}
  97		btrfs_inode->delayed_node = node;
  98		/* can be accessed and cached in the inode */
  99		atomic_add(2, &node->refs);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 100		spin_unlock(&root->inode_lock);
 101		return node;
 102	}
 103	spin_unlock(&root->inode_lock);
 104
 105	return NULL;
 106}
 107
 108/* Will return either the node or PTR_ERR(-ENOMEM) */
 109static struct btrfs_delayed_node *btrfs_get_or_create_delayed_node(
 110							struct inode *inode)
 111{
 112	struct btrfs_delayed_node *node;
 113	struct btrfs_inode *btrfs_inode = BTRFS_I(inode);
 114	struct btrfs_root *root = btrfs_inode->root;
 115	u64 ino = btrfs_ino(inode);
 116	int ret;
 117
 118again:
 119	node = btrfs_get_delayed_node(inode);
 120	if (node)
 121		return node;
 122
 123	node = kmem_cache_zalloc(delayed_node_cache, GFP_NOFS);
 124	if (!node)
 125		return ERR_PTR(-ENOMEM);
 126	btrfs_init_delayed_node(node, root, ino);
 127
 128	/* cached in the btrfs inode and can be accessed */
 129	atomic_add(2, &node->refs);
 130
 131	ret = radix_tree_preload(GFP_NOFS);
 132	if (ret) {
 133		kmem_cache_free(delayed_node_cache, node);
 134		return ERR_PTR(ret);
 135	}
 136
 137	spin_lock(&root->inode_lock);
 138	ret = radix_tree_insert(&root->delayed_nodes_tree, ino, node);
 139	if (ret == -EEXIST) {
 140		spin_unlock(&root->inode_lock);
 141		kmem_cache_free(delayed_node_cache, node);
 142		radix_tree_preload_end();
 143		goto again;
 144	}
 145	btrfs_inode->delayed_node = node;
 146	spin_unlock(&root->inode_lock);
 147	radix_tree_preload_end();
 148
 149	return node;
 150}
 151
 152/*
 153 * Call it when holding delayed_node->mutex
 154 *
 155 * If mod = 1, add this node into the prepared list.
 156 */
 157static void btrfs_queue_delayed_node(struct btrfs_delayed_root *root,
 158				     struct btrfs_delayed_node *node,
 159				     int mod)
 160{
 161	spin_lock(&root->lock);
 162	if (test_bit(BTRFS_DELAYED_NODE_IN_LIST, &node->flags)) {
 163		if (!list_empty(&node->p_list))
 164			list_move_tail(&node->p_list, &root->prepare_list);
 165		else if (mod)
 166			list_add_tail(&node->p_list, &root->prepare_list);
 167	} else {
 168		list_add_tail(&node->n_list, &root->node_list);
 169		list_add_tail(&node->p_list, &root->prepare_list);
 170		atomic_inc(&node->refs);	/* inserted into list */
 171		root->nodes++;
 172		set_bit(BTRFS_DELAYED_NODE_IN_LIST, &node->flags);
 173	}
 174	spin_unlock(&root->lock);
 175}
 176
 177/* Call it when holding delayed_node->mutex */
 178static void btrfs_dequeue_delayed_node(struct btrfs_delayed_root *root,
 179				       struct btrfs_delayed_node *node)
 180{
 181	spin_lock(&root->lock);
 182	if (test_bit(BTRFS_DELAYED_NODE_IN_LIST, &node->flags)) {
 183		root->nodes--;
 184		atomic_dec(&node->refs);	/* not in the list */
 185		list_del_init(&node->n_list);
 186		if (!list_empty(&node->p_list))
 187			list_del_init(&node->p_list);
 188		clear_bit(BTRFS_DELAYED_NODE_IN_LIST, &node->flags);
 189	}
 190	spin_unlock(&root->lock);
 191}
 192
 193static struct btrfs_delayed_node *btrfs_first_delayed_node(
 194			struct btrfs_delayed_root *delayed_root)
 195{
 196	struct list_head *p;
 197	struct btrfs_delayed_node *node = NULL;
 198
 199	spin_lock(&delayed_root->lock);
 200	if (list_empty(&delayed_root->node_list))
 201		goto out;
 202
 203	p = delayed_root->node_list.next;
 204	node = list_entry(p, struct btrfs_delayed_node, n_list);
 205	atomic_inc(&node->refs);
 206out:
 207	spin_unlock(&delayed_root->lock);
 208
 209	return node;
 210}
 211
 212static struct btrfs_delayed_node *btrfs_next_delayed_node(
 213						struct btrfs_delayed_node *node)
 214{
 215	struct btrfs_delayed_root *delayed_root;
 216	struct list_head *p;
 217	struct btrfs_delayed_node *next = NULL;
 218
 219	delayed_root = node->root->fs_info->delayed_root;
 220	spin_lock(&delayed_root->lock);
 221	if (!test_bit(BTRFS_DELAYED_NODE_IN_LIST, &node->flags)) {
 222		/* not in the list */
 223		if (list_empty(&delayed_root->node_list))
 224			goto out;
 225		p = delayed_root->node_list.next;
 226	} else if (list_is_last(&node->n_list, &delayed_root->node_list))
 227		goto out;
 228	else
 229		p = node->n_list.next;
 230
 231	next = list_entry(p, struct btrfs_delayed_node, n_list);
 232	atomic_inc(&next->refs);
 233out:
 234	spin_unlock(&delayed_root->lock);
 235
 236	return next;
 237}
 238
 239static void __btrfs_release_delayed_node(
 240				struct btrfs_delayed_node *delayed_node,
 241				int mod)
 242{
 243	struct btrfs_delayed_root *delayed_root;
 244
 245	if (!delayed_node)
 246		return;
 247
 248	delayed_root = delayed_node->root->fs_info->delayed_root;
 249
 250	mutex_lock(&delayed_node->mutex);
 251	if (delayed_node->count)
 252		btrfs_queue_delayed_node(delayed_root, delayed_node, mod);
 253	else
 254		btrfs_dequeue_delayed_node(delayed_root, delayed_node);
 255	mutex_unlock(&delayed_node->mutex);
 256
 257	if (atomic_dec_and_test(&delayed_node->refs)) {
 258		bool free = false;
 259		struct btrfs_root *root = delayed_node->root;
 
 260		spin_lock(&root->inode_lock);
 261		if (atomic_read(&delayed_node->refs) == 0) {
 262			radix_tree_delete(&root->delayed_nodes_tree,
 263					  delayed_node->inode_id);
 264			free = true;
 265		}
 
 
 266		spin_unlock(&root->inode_lock);
 267		if (free)
 268			kmem_cache_free(delayed_node_cache, delayed_node);
 269	}
 270}
 271
 272static inline void btrfs_release_delayed_node(struct btrfs_delayed_node *node)
 273{
 274	__btrfs_release_delayed_node(node, 0);
 275}
 276
 277static struct btrfs_delayed_node *btrfs_first_prepared_delayed_node(
 278					struct btrfs_delayed_root *delayed_root)
 279{
 280	struct list_head *p;
 281	struct btrfs_delayed_node *node = NULL;
 282
 283	spin_lock(&delayed_root->lock);
 284	if (list_empty(&delayed_root->prepare_list))
 285		goto out;
 286
 287	p = delayed_root->prepare_list.next;
 288	list_del_init(p);
 289	node = list_entry(p, struct btrfs_delayed_node, p_list);
 290	atomic_inc(&node->refs);
 291out:
 292	spin_unlock(&delayed_root->lock);
 293
 294	return node;
 295}
 296
 297static inline void btrfs_release_prepared_delayed_node(
 298					struct btrfs_delayed_node *node)
 299{
 300	__btrfs_release_delayed_node(node, 1);
 301}
 302
 303static struct btrfs_delayed_item *btrfs_alloc_delayed_item(u32 data_len)
 304{
 305	struct btrfs_delayed_item *item;
 306	item = kmalloc(sizeof(*item) + data_len, GFP_NOFS);
 307	if (item) {
 308		item->data_len = data_len;
 309		item->ins_or_del = 0;
 310		item->bytes_reserved = 0;
 311		item->delayed_node = NULL;
 312		atomic_set(&item->refs, 1);
 313	}
 314	return item;
 315}
 316
 317/*
 318 * __btrfs_lookup_delayed_item - look up the delayed item by key
 319 * @delayed_node: pointer to the delayed node
 320 * @key:	  the key to look up
 321 * @prev:	  used to store the prev item if the right item isn't found
 322 * @next:	  used to store the next item if the right item isn't found
 323 *
 324 * Note: if we don't find the right item, we will return the prev item and
 325 * the next item.
 326 */
 327static struct btrfs_delayed_item *__btrfs_lookup_delayed_item(
 328				struct rb_root *root,
 329				struct btrfs_key *key,
 330				struct btrfs_delayed_item **prev,
 331				struct btrfs_delayed_item **next)
 332{
 333	struct rb_node *node, *prev_node = NULL;
 334	struct btrfs_delayed_item *delayed_item = NULL;
 335	int ret = 0;
 336
 337	node = root->rb_node;
 338
 339	while (node) {
 340		delayed_item = rb_entry(node, struct btrfs_delayed_item,
 341					rb_node);
 342		prev_node = node;
 343		ret = btrfs_comp_cpu_keys(&delayed_item->key, key);
 344		if (ret < 0)
 345			node = node->rb_right;
 346		else if (ret > 0)
 347			node = node->rb_left;
 348		else
 349			return delayed_item;
 350	}
 351
 352	if (prev) {
 353		if (!prev_node)
 354			*prev = NULL;
 355		else if (ret < 0)
 356			*prev = delayed_item;
 357		else if ((node = rb_prev(prev_node)) != NULL) {
 358			*prev = rb_entry(node, struct btrfs_delayed_item,
 359					 rb_node);
 360		} else
 361			*prev = NULL;
 362	}
 363
 364	if (next) {
 365		if (!prev_node)
 366			*next = NULL;
 367		else if (ret > 0)
 368			*next = delayed_item;
 369		else if ((node = rb_next(prev_node)) != NULL) {
 370			*next = rb_entry(node, struct btrfs_delayed_item,
 371					 rb_node);
 372		} else
 373			*next = NULL;
 374	}
 375	return NULL;
 376}
 377
 378static struct btrfs_delayed_item *__btrfs_lookup_delayed_insertion_item(
 379					struct btrfs_delayed_node *delayed_node,
 380					struct btrfs_key *key)
 381{
 382	return __btrfs_lookup_delayed_item(&delayed_node->ins_root, key,
 383					   NULL, NULL);
 384}
 385
 386static int __btrfs_add_delayed_item(struct btrfs_delayed_node *delayed_node,
 387				    struct btrfs_delayed_item *ins,
 388				    int action)
 389{
 390	struct rb_node **p, *node;
 391	struct rb_node *parent_node = NULL;
 392	struct rb_root *root;
 393	struct btrfs_delayed_item *item;
 394	int cmp;
 
 395
 396	if (action == BTRFS_DELAYED_INSERTION_ITEM)
 397		root = &delayed_node->ins_root;
 398	else if (action == BTRFS_DELAYED_DELETION_ITEM)
 399		root = &delayed_node->del_root;
 400	else
 401		BUG();
 402	p = &root->rb_node;
 403	node = &ins->rb_node;
 404
 405	while (*p) {
 406		parent_node = *p;
 407		item = rb_entry(parent_node, struct btrfs_delayed_item,
 408				 rb_node);
 409
 410		cmp = btrfs_comp_cpu_keys(&item->key, &ins->key);
 411		if (cmp < 0)
 412			p = &(*p)->rb_right;
 413		else if (cmp > 0)
 
 414			p = &(*p)->rb_left;
 415		else
 416			return -EEXIST;
 
 417	}
 418
 419	rb_link_node(node, parent_node, p);
 420	rb_insert_color(node, root);
 421	ins->delayed_node = delayed_node;
 422	ins->ins_or_del = action;
 423
 424	if (ins->key.type == BTRFS_DIR_INDEX_KEY &&
 425	    action == BTRFS_DELAYED_INSERTION_ITEM &&
 426	    ins->key.offset >= delayed_node->index_cnt)
 427			delayed_node->index_cnt = ins->key.offset + 1;
 428
 429	delayed_node->count++;
 430	atomic_inc(&delayed_node->root->fs_info->delayed_root->items);
 431	return 0;
 432}
 433
 434static int __btrfs_add_delayed_insertion_item(struct btrfs_delayed_node *node,
 435					      struct btrfs_delayed_item *item)
 436{
 437	return __btrfs_add_delayed_item(node, item,
 438					BTRFS_DELAYED_INSERTION_ITEM);
 439}
 440
 441static int __btrfs_add_delayed_deletion_item(struct btrfs_delayed_node *node,
 442					     struct btrfs_delayed_item *item)
 443{
 444	return __btrfs_add_delayed_item(node, item,
 445					BTRFS_DELAYED_DELETION_ITEM);
 446}
 447
 448static void finish_one_item(struct btrfs_delayed_root *delayed_root)
 449{
 450	int seq = atomic_inc_return(&delayed_root->items_seq);
 451
 452	/*
 453	 * atomic_dec_return implies a barrier for waitqueue_active
 454	 */
 455	if ((atomic_dec_return(&delayed_root->items) <
 456	    BTRFS_DELAYED_BACKGROUND || seq % BTRFS_DELAYED_BATCH == 0) &&
 457	    waitqueue_active(&delayed_root->wait))
 458		wake_up(&delayed_root->wait);
 459}
 460
 461static void __btrfs_remove_delayed_item(struct btrfs_delayed_item *delayed_item)
 462{
 463	struct rb_root *root;
 464	struct btrfs_delayed_root *delayed_root;
 465
 
 
 
 466	delayed_root = delayed_item->delayed_node->root->fs_info->delayed_root;
 467
 468	BUG_ON(!delayed_root);
 469	BUG_ON(delayed_item->ins_or_del != BTRFS_DELAYED_DELETION_ITEM &&
 470	       delayed_item->ins_or_del != BTRFS_DELAYED_INSERTION_ITEM);
 471
 472	if (delayed_item->ins_or_del == BTRFS_DELAYED_INSERTION_ITEM)
 473		root = &delayed_item->delayed_node->ins_root;
 474	else
 475		root = &delayed_item->delayed_node->del_root;
 476
 477	rb_erase(&delayed_item->rb_node, root);
 478	delayed_item->delayed_node->count--;
 479
 480	finish_one_item(delayed_root);
 481}
 482
 483static void btrfs_release_delayed_item(struct btrfs_delayed_item *item)
 484{
 485	if (item) {
 486		__btrfs_remove_delayed_item(item);
 487		if (atomic_dec_and_test(&item->refs))
 488			kfree(item);
 489	}
 490}
 491
 492static struct btrfs_delayed_item *__btrfs_first_delayed_insertion_item(
 493					struct btrfs_delayed_node *delayed_node)
 494{
 495	struct rb_node *p;
 496	struct btrfs_delayed_item *item = NULL;
 497
 498	p = rb_first(&delayed_node->ins_root);
 499	if (p)
 500		item = rb_entry(p, struct btrfs_delayed_item, rb_node);
 501
 502	return item;
 503}
 504
 505static struct btrfs_delayed_item *__btrfs_first_delayed_deletion_item(
 506					struct btrfs_delayed_node *delayed_node)
 507{
 508	struct rb_node *p;
 509	struct btrfs_delayed_item *item = NULL;
 510
 511	p = rb_first(&delayed_node->del_root);
 512	if (p)
 513		item = rb_entry(p, struct btrfs_delayed_item, rb_node);
 514
 515	return item;
 516}
 517
 518static struct btrfs_delayed_item *__btrfs_next_delayed_item(
 519						struct btrfs_delayed_item *item)
 520{
 521	struct rb_node *p;
 522	struct btrfs_delayed_item *next = NULL;
 523
 524	p = rb_next(&item->rb_node);
 525	if (p)
 526		next = rb_entry(p, struct btrfs_delayed_item, rb_node);
 527
 528	return next;
 529}
 530
 531static int btrfs_delayed_item_reserve_metadata(struct btrfs_trans_handle *trans,
 532					       struct btrfs_fs_info *fs_info,
 533					       struct btrfs_delayed_item *item)
 534{
 535	struct btrfs_block_rsv *src_rsv;
 536	struct btrfs_block_rsv *dst_rsv;
 
 537	u64 num_bytes;
 538	int ret;
 539
 540	if (!trans->bytes_reserved)
 541		return 0;
 542
 543	src_rsv = trans->block_rsv;
 544	dst_rsv = &fs_info->delayed_block_rsv;
 545
 546	num_bytes = btrfs_calc_trans_metadata_size(fs_info, 1);
 547	ret = btrfs_block_rsv_migrate(src_rsv, dst_rsv, num_bytes, 1);
 
 
 
 
 
 
 548	if (!ret) {
 549		trace_btrfs_space_reservation(fs_info, "delayed_item",
 550					      item->key.objectid,
 551					      num_bytes, 1);
 552		item->bytes_reserved = num_bytes;
 553	}
 554
 555	return ret;
 556}
 557
 558static void btrfs_delayed_item_release_metadata(struct btrfs_fs_info *fs_info,
 559						struct btrfs_delayed_item *item)
 560{
 561	struct btrfs_block_rsv *rsv;
 
 562
 563	if (!item->bytes_reserved)
 564		return;
 565
 566	rsv = &fs_info->delayed_block_rsv;
 
 
 
 
 567	trace_btrfs_space_reservation(fs_info, "delayed_item",
 568				      item->key.objectid, item->bytes_reserved,
 569				      0);
 570	btrfs_block_rsv_release(fs_info, rsv,
 571				item->bytes_reserved);
 572}
 573
 574static int btrfs_delayed_inode_reserve_metadata(
 575					struct btrfs_trans_handle *trans,
 576					struct btrfs_root *root,
 577					struct inode *inode,
 578					struct btrfs_delayed_node *node)
 579{
 580	struct btrfs_fs_info *fs_info = root->fs_info;
 581	struct btrfs_block_rsv *src_rsv;
 582	struct btrfs_block_rsv *dst_rsv;
 583	u64 num_bytes;
 584	int ret;
 585	bool release = false;
 586
 587	src_rsv = trans->block_rsv;
 588	dst_rsv = &fs_info->delayed_block_rsv;
 589
 590	num_bytes = btrfs_calc_trans_metadata_size(fs_info, 1);
 591
 592	/*
 593	 * If our block_rsv is the delalloc block reserve then check and see if
 594	 * we have our extra reservation for updating the inode.  If not fall
 595	 * through and try to reserve space quickly.
 596	 *
 597	 * We used to try and steal from the delalloc block rsv or the global
 598	 * reserve, but we'd steal a full reservation, which isn't kind.  We are
 599	 * here through delalloc which means we've likely just cowed down close
 600	 * to the leaf that contains the inode, so we would steal less just
 601	 * doing the fallback inode update, so if we do end up having to steal
 602	 * from the global block rsv we hopefully only steal one or two blocks
 603	 * worth which is less likely to hurt us.
 604	 */
 605	if (src_rsv && src_rsv->type == BTRFS_BLOCK_RSV_DELALLOC) {
 606		spin_lock(&BTRFS_I(inode)->lock);
 607		if (test_and_clear_bit(BTRFS_INODE_DELALLOC_META_RESERVED,
 608				       &BTRFS_I(inode)->runtime_flags))
 609			release = true;
 610		else
 611			src_rsv = NULL;
 612		spin_unlock(&BTRFS_I(inode)->lock);
 613	}
 614
 615	/*
 616	 * btrfs_dirty_inode will update the inode under btrfs_join_transaction
 617	 * which doesn't reserve space for speed.  This is a problem since we
 618	 * still need to reserve space for this update, so try to reserve the
 619	 * space.
 620	 *
 621	 * Now if src_rsv == delalloc_block_rsv we'll let it just steal since
 622	 * we're accounted for.
 623	 */
 624	if (!src_rsv || (!trans->bytes_reserved &&
 625			 src_rsv->type != BTRFS_BLOCK_RSV_DELALLOC)) {
 
 
 
 
 626		ret = btrfs_block_rsv_add(root, dst_rsv, num_bytes,
 627					  BTRFS_RESERVE_NO_FLUSH);
 628		/*
 629		 * Since we're under a transaction reserve_metadata_bytes could
 630		 * try to commit the transaction which will make it return
 631		 * EAGAIN to make us stop the transaction we have, so return
 632		 * ENOSPC instead so that btrfs_dirty_inode knows what to do.
 633		 */
 634		if (ret == -EAGAIN)
 635			ret = -ENOSPC;
 636		if (!ret) {
 637			node->bytes_reserved = num_bytes;
 638			trace_btrfs_space_reservation(fs_info,
 639						      "delayed_inode",
 640						      btrfs_ino(inode),
 641						      num_bytes, 1);
 642		}
 643		return ret;
 644	}
 645
 646	ret = btrfs_block_rsv_migrate(src_rsv, dst_rsv, num_bytes, 1);
 647
 648	/*
 649	 * Migrate only takes a reservation, it doesn't touch the size of the
 650	 * block_rsv.  This is to simplify people who don't normally have things
 651	 * migrated from their block rsv.  If they go to release their
 652	 * reservation, that will decrease the size as well, so if migrate
 653	 * reduced size we'd end up with a negative size.  But for the
 654	 * delalloc_meta_reserved stuff we will only know to drop 1 reservation,
 655	 * but we could in fact do this reserve/migrate dance several times
 656	 * between the time we did the original reservation and we'd clean it
 657	 * up.  So to take care of this, release the space for the meta
 658	 * reservation here.  I think it may be time for a documentation page on
 659	 * how block rsvs. work.
 660	 */
 661	if (!ret) {
 662		trace_btrfs_space_reservation(fs_info, "delayed_inode",
 663					      btrfs_ino(inode), num_bytes, 1);
 664		node->bytes_reserved = num_bytes;
 665	}
 666
 667	if (release) {
 668		trace_btrfs_space_reservation(fs_info, "delalloc",
 669					      btrfs_ino(inode), num_bytes, 0);
 670		btrfs_block_rsv_release(fs_info, src_rsv, num_bytes);
 671	}
 672
 673	return ret;
 674}
 675
 676static void btrfs_delayed_inode_release_metadata(struct btrfs_fs_info *fs_info,
 677						struct btrfs_delayed_node *node)
 
 678{
 679	struct btrfs_block_rsv *rsv;
 680
 681	if (!node->bytes_reserved)
 682		return;
 683
 684	rsv = &fs_info->delayed_block_rsv;
 685	trace_btrfs_space_reservation(fs_info, "delayed_inode",
 686				      node->inode_id, node->bytes_reserved, 0);
 687	btrfs_block_rsv_release(fs_info, rsv,
 
 
 
 
 
 688				node->bytes_reserved);
 689	node->bytes_reserved = 0;
 690}
 691
 692/*
 693 * This helper will insert some continuous items into the same leaf according
 694 * to the free space of the leaf.
 695 */
 696static int btrfs_batch_insert_items(struct btrfs_root *root,
 697				    struct btrfs_path *path,
 698				    struct btrfs_delayed_item *item)
 699{
 700	struct btrfs_fs_info *fs_info = root->fs_info;
 701	struct btrfs_delayed_item *curr, *next;
 702	int free_space;
 703	int total_data_size = 0, total_size = 0;
 704	struct extent_buffer *leaf;
 705	char *data_ptr;
 706	struct btrfs_key *keys;
 707	u32 *data_size;
 708	struct list_head head;
 709	int slot;
 710	int nitems;
 711	int i;
 712	int ret = 0;
 713
 714	BUG_ON(!path->nodes[0]);
 715
 716	leaf = path->nodes[0];
 717	free_space = btrfs_leaf_free_space(fs_info, leaf);
 718	INIT_LIST_HEAD(&head);
 719
 720	next = item;
 721	nitems = 0;
 722
 723	/*
 724	 * count the number of the continuous items that we can insert in batch
 725	 */
 726	while (total_size + next->data_len + sizeof(struct btrfs_item) <=
 727	       free_space) {
 728		total_data_size += next->data_len;
 729		total_size += next->data_len + sizeof(struct btrfs_item);
 730		list_add_tail(&next->tree_list, &head);
 731		nitems++;
 732
 733		curr = next;
 734		next = __btrfs_next_delayed_item(curr);
 735		if (!next)
 736			break;
 737
 738		if (!btrfs_is_continuous_delayed_item(curr, next))
 739			break;
 740	}
 741
 742	if (!nitems) {
 743		ret = 0;
 744		goto out;
 745	}
 746
 747	/*
 748	 * we need allocate some memory space, but it might cause the task
 749	 * to sleep, so we set all locked nodes in the path to blocking locks
 750	 * first.
 751	 */
 752	btrfs_set_path_blocking(path);
 753
 754	keys = kmalloc_array(nitems, sizeof(struct btrfs_key), GFP_NOFS);
 755	if (!keys) {
 756		ret = -ENOMEM;
 757		goto out;
 758	}
 759
 760	data_size = kmalloc_array(nitems, sizeof(u32), GFP_NOFS);
 761	if (!data_size) {
 762		ret = -ENOMEM;
 763		goto error;
 764	}
 765
 766	/* get keys of all the delayed items */
 767	i = 0;
 768	list_for_each_entry(next, &head, tree_list) {
 769		keys[i] = next->key;
 770		data_size[i] = next->data_len;
 771		i++;
 772	}
 773
 774	/* reset all the locked nodes in the patch to spinning locks. */
 775	btrfs_clear_path_blocking(path, NULL, 0);
 776
 777	/* insert the keys of the items */
 778	setup_items_for_insert(root, path, keys, data_size,
 779			       total_data_size, total_size, nitems);
 780
 781	/* insert the dir index items */
 782	slot = path->slots[0];
 783	list_for_each_entry_safe(curr, next, &head, tree_list) {
 784		data_ptr = btrfs_item_ptr(leaf, slot, char);
 785		write_extent_buffer(leaf, &curr->data,
 786				    (unsigned long)data_ptr,
 787				    curr->data_len);
 788		slot++;
 789
 790		btrfs_delayed_item_release_metadata(fs_info, curr);
 791
 792		list_del(&curr->tree_list);
 793		btrfs_release_delayed_item(curr);
 794	}
 795
 796error:
 797	kfree(data_size);
 798	kfree(keys);
 799out:
 800	return ret;
 801}
 802
 803/*
 804 * This helper can just do simple insertion that needn't extend item for new
 805 * data, such as directory name index insertion, inode insertion.
 806 */
 807static int btrfs_insert_delayed_item(struct btrfs_trans_handle *trans,
 808				     struct btrfs_root *root,
 809				     struct btrfs_path *path,
 810				     struct btrfs_delayed_item *delayed_item)
 811{
 812	struct btrfs_fs_info *fs_info = root->fs_info;
 813	struct extent_buffer *leaf;
 
 814	char *ptr;
 815	int ret;
 816
 
 817	ret = btrfs_insert_empty_item(trans, root, path, &delayed_item->key,
 818				      delayed_item->data_len);
 
 819	if (ret < 0 && ret != -EEXIST)
 820		return ret;
 821
 822	leaf = path->nodes[0];
 823
 824	ptr = btrfs_item_ptr(leaf, path->slots[0], char);
 825
 826	write_extent_buffer(leaf, delayed_item->data, (unsigned long)ptr,
 827			    delayed_item->data_len);
 828	btrfs_mark_buffer_dirty(leaf);
 829
 830	btrfs_delayed_item_release_metadata(fs_info, delayed_item);
 831	return 0;
 832}
 833
 834/*
 835 * we insert an item first, then if there are some continuous items, we try
 836 * to insert those items into the same leaf.
 837 */
 838static int btrfs_insert_delayed_items(struct btrfs_trans_handle *trans,
 839				      struct btrfs_path *path,
 840				      struct btrfs_root *root,
 841				      struct btrfs_delayed_node *node)
 842{
 843	struct btrfs_delayed_item *curr, *prev;
 844	int ret = 0;
 845
 846do_again:
 847	mutex_lock(&node->mutex);
 848	curr = __btrfs_first_delayed_insertion_item(node);
 849	if (!curr)
 850		goto insert_end;
 851
 852	ret = btrfs_insert_delayed_item(trans, root, path, curr);
 853	if (ret < 0) {
 854		btrfs_release_path(path);
 855		goto insert_end;
 856	}
 857
 858	prev = curr;
 859	curr = __btrfs_next_delayed_item(prev);
 860	if (curr && btrfs_is_continuous_delayed_item(prev, curr)) {
 861		/* insert the continuous items into the same leaf */
 862		path->slots[0]++;
 863		btrfs_batch_insert_items(root, path, curr);
 864	}
 865	btrfs_release_delayed_item(prev);
 866	btrfs_mark_buffer_dirty(path->nodes[0]);
 867
 868	btrfs_release_path(path);
 869	mutex_unlock(&node->mutex);
 870	goto do_again;
 871
 872insert_end:
 873	mutex_unlock(&node->mutex);
 874	return ret;
 875}
 876
 877static int btrfs_batch_delete_items(struct btrfs_trans_handle *trans,
 878				    struct btrfs_root *root,
 879				    struct btrfs_path *path,
 880				    struct btrfs_delayed_item *item)
 881{
 882	struct btrfs_fs_info *fs_info = root->fs_info;
 883	struct btrfs_delayed_item *curr, *next;
 884	struct extent_buffer *leaf;
 885	struct btrfs_key key;
 886	struct list_head head;
 887	int nitems, i, last_item;
 888	int ret = 0;
 889
 890	BUG_ON(!path->nodes[0]);
 891
 892	leaf = path->nodes[0];
 893
 894	i = path->slots[0];
 895	last_item = btrfs_header_nritems(leaf) - 1;
 896	if (i > last_item)
 897		return -ENOENT;	/* FIXME: Is errno suitable? */
 898
 899	next = item;
 900	INIT_LIST_HEAD(&head);
 901	btrfs_item_key_to_cpu(leaf, &key, i);
 902	nitems = 0;
 903	/*
 904	 * count the number of the dir index items that we can delete in batch
 905	 */
 906	while (btrfs_comp_cpu_keys(&next->key, &key) == 0) {
 907		list_add_tail(&next->tree_list, &head);
 908		nitems++;
 909
 910		curr = next;
 911		next = __btrfs_next_delayed_item(curr);
 912		if (!next)
 913			break;
 914
 915		if (!btrfs_is_continuous_delayed_item(curr, next))
 916			break;
 917
 918		i++;
 919		if (i > last_item)
 920			break;
 921		btrfs_item_key_to_cpu(leaf, &key, i);
 922	}
 923
 924	if (!nitems)
 925		return 0;
 926
 927	ret = btrfs_del_items(trans, root, path, path->slots[0], nitems);
 928	if (ret)
 929		goto out;
 930
 931	list_for_each_entry_safe(curr, next, &head, tree_list) {
 932		btrfs_delayed_item_release_metadata(fs_info, curr);
 933		list_del(&curr->tree_list);
 934		btrfs_release_delayed_item(curr);
 935	}
 936
 937out:
 938	return ret;
 939}
 940
 941static int btrfs_delete_delayed_items(struct btrfs_trans_handle *trans,
 942				      struct btrfs_path *path,
 943				      struct btrfs_root *root,
 944				      struct btrfs_delayed_node *node)
 945{
 946	struct btrfs_delayed_item *curr, *prev;
 
 947	int ret = 0;
 948
 949do_again:
 950	mutex_lock(&node->mutex);
 951	curr = __btrfs_first_delayed_deletion_item(node);
 952	if (!curr)
 953		goto delete_fail;
 954
 
 955	ret = btrfs_search_slot(trans, root, &curr->key, path, -1, 1);
 
 956	if (ret < 0)
 957		goto delete_fail;
 958	else if (ret > 0) {
 959		/*
 960		 * can't find the item which the node points to, so this node
 961		 * is invalid, just drop it.
 962		 */
 963		prev = curr;
 964		curr = __btrfs_next_delayed_item(prev);
 965		btrfs_release_delayed_item(prev);
 966		ret = 0;
 967		btrfs_release_path(path);
 968		if (curr) {
 969			mutex_unlock(&node->mutex);
 970			goto do_again;
 971		} else
 972			goto delete_fail;
 973	}
 974
 975	btrfs_batch_delete_items(trans, root, path, curr);
 976	btrfs_release_path(path);
 977	mutex_unlock(&node->mutex);
 978	goto do_again;
 979
 980delete_fail:
 981	btrfs_release_path(path);
 982	mutex_unlock(&node->mutex);
 983	return ret;
 984}
 985
 986static void btrfs_release_delayed_inode(struct btrfs_delayed_node *delayed_node)
 987{
 988	struct btrfs_delayed_root *delayed_root;
 989
 990	if (delayed_node &&
 991	    test_bit(BTRFS_DELAYED_NODE_INODE_DIRTY, &delayed_node->flags)) {
 992		BUG_ON(!delayed_node->root);
 993		clear_bit(BTRFS_DELAYED_NODE_INODE_DIRTY, &delayed_node->flags);
 994		delayed_node->count--;
 995
 996		delayed_root = delayed_node->root->fs_info->delayed_root;
 997		finish_one_item(delayed_root);
 998	}
 999}
1000
1001static void btrfs_release_delayed_iref(struct btrfs_delayed_node *delayed_node)
1002{
1003	struct btrfs_delayed_root *delayed_root;
1004
1005	ASSERT(delayed_node->root);
1006	clear_bit(BTRFS_DELAYED_NODE_DEL_IREF, &delayed_node->flags);
1007	delayed_node->count--;
 
 
1008
1009	delayed_root = delayed_node->root->fs_info->delayed_root;
1010	finish_one_item(delayed_root);
 
1011}
1012
1013static int __btrfs_update_delayed_inode(struct btrfs_trans_handle *trans,
1014					struct btrfs_root *root,
1015					struct btrfs_path *path,
1016					struct btrfs_delayed_node *node)
1017{
1018	struct btrfs_fs_info *fs_info = root->fs_info;
1019	struct btrfs_key key;
1020	struct btrfs_inode_item *inode_item;
1021	struct extent_buffer *leaf;
 
1022	int mod;
1023	int ret;
1024
1025	key.objectid = node->inode_id;
1026	key.type = BTRFS_INODE_ITEM_KEY;
1027	key.offset = 0;
1028
1029	if (test_bit(BTRFS_DELAYED_NODE_DEL_IREF, &node->flags))
1030		mod = -1;
1031	else
1032		mod = 1;
1033
 
1034	ret = btrfs_lookup_inode(trans, root, path, &key, mod);
1035	if (ret > 0) {
1036		btrfs_release_path(path);
1037		return -ENOENT;
1038	} else if (ret < 0) {
1039		return ret;
1040	}
1041
1042	leaf = path->nodes[0];
1043	inode_item = btrfs_item_ptr(leaf, path->slots[0],
1044				    struct btrfs_inode_item);
1045	write_extent_buffer(leaf, &node->inode_item, (unsigned long)inode_item,
1046			    sizeof(struct btrfs_inode_item));
1047	btrfs_mark_buffer_dirty(leaf);
1048
1049	if (!test_bit(BTRFS_DELAYED_NODE_DEL_IREF, &node->flags))
1050		goto no_iref;
1051
1052	path->slots[0]++;
1053	if (path->slots[0] >= btrfs_header_nritems(leaf))
1054		goto search;
1055again:
1056	btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
1057	if (key.objectid != node->inode_id)
1058		goto out;
1059
1060	if (key.type != BTRFS_INODE_REF_KEY &&
1061	    key.type != BTRFS_INODE_EXTREF_KEY)
1062		goto out;
1063
1064	/*
1065	 * Delayed iref deletion is for the inode who has only one link,
1066	 * so there is only one iref. The case that several irefs are
1067	 * in the same item doesn't exist.
1068	 */
1069	btrfs_del_item(trans, root, path);
1070out:
1071	btrfs_release_delayed_iref(node);
1072no_iref:
1073	btrfs_release_path(path);
1074err_out:
1075	btrfs_delayed_inode_release_metadata(fs_info, node);
1076	btrfs_release_delayed_inode(node);
1077
 
 
 
 
 
 
 
 
1078	return ret;
1079
1080search:
1081	btrfs_release_path(path);
1082
1083	key.type = BTRFS_INODE_EXTREF_KEY;
1084	key.offset = -1;
 
 
1085	ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
 
1086	if (ret < 0)
1087		goto err_out;
1088	ASSERT(ret);
1089
1090	ret = 0;
1091	leaf = path->nodes[0];
1092	path->slots[0]--;
1093	goto again;
1094}
1095
1096static inline int btrfs_update_delayed_inode(struct btrfs_trans_handle *trans,
1097					     struct btrfs_root *root,
1098					     struct btrfs_path *path,
1099					     struct btrfs_delayed_node *node)
1100{
1101	int ret;
1102
1103	mutex_lock(&node->mutex);
1104	if (!test_bit(BTRFS_DELAYED_NODE_INODE_DIRTY, &node->flags)) {
1105		mutex_unlock(&node->mutex);
1106		return 0;
1107	}
1108
1109	ret = __btrfs_update_delayed_inode(trans, root, path, node);
1110	mutex_unlock(&node->mutex);
1111	return ret;
1112}
1113
1114static inline int
1115__btrfs_commit_inode_delayed_items(struct btrfs_trans_handle *trans,
1116				   struct btrfs_path *path,
1117				   struct btrfs_delayed_node *node)
1118{
1119	int ret;
1120
1121	ret = btrfs_insert_delayed_items(trans, path, node->root, node);
1122	if (ret)
1123		return ret;
1124
1125	ret = btrfs_delete_delayed_items(trans, path, node->root, node);
1126	if (ret)
1127		return ret;
1128
1129	ret = btrfs_update_delayed_inode(trans, node->root, path, node);
1130	return ret;
1131}
1132
1133/*
1134 * Called when committing the transaction.
1135 * Returns 0 on success.
1136 * Returns < 0 on error and returns with an aborted transaction with any
1137 * outstanding delayed items cleaned up.
1138 */
1139static int __btrfs_run_delayed_items(struct btrfs_trans_handle *trans,
1140				     struct btrfs_fs_info *fs_info, int nr)
1141{
 
1142	struct btrfs_delayed_root *delayed_root;
1143	struct btrfs_delayed_node *curr_node, *prev_node;
1144	struct btrfs_path *path;
1145	struct btrfs_block_rsv *block_rsv;
1146	int ret = 0;
1147	bool count = (nr > 0);
1148
1149	if (trans->aborted)
1150		return -EIO;
1151
1152	path = btrfs_alloc_path();
1153	if (!path)
1154		return -ENOMEM;
1155	path->leave_spinning = 1;
1156
1157	block_rsv = trans->block_rsv;
1158	trans->block_rsv = &fs_info->delayed_block_rsv;
1159
1160	delayed_root = fs_info->delayed_root;
1161
1162	curr_node = btrfs_first_delayed_node(delayed_root);
1163	while (curr_node && (!count || (count && nr--))) {
1164		ret = __btrfs_commit_inode_delayed_items(trans, path,
1165							 curr_node);
1166		if (ret) {
1167			btrfs_release_delayed_node(curr_node);
1168			curr_node = NULL;
1169			btrfs_abort_transaction(trans, ret);
1170			break;
1171		}
1172
1173		prev_node = curr_node;
1174		curr_node = btrfs_next_delayed_node(curr_node);
1175		btrfs_release_delayed_node(prev_node);
1176	}
1177
1178	if (curr_node)
1179		btrfs_release_delayed_node(curr_node);
1180	btrfs_free_path(path);
1181	trans->block_rsv = block_rsv;
1182
1183	return ret;
1184}
1185
1186int btrfs_run_delayed_items(struct btrfs_trans_handle *trans,
1187			    struct btrfs_fs_info *fs_info)
1188{
1189	return __btrfs_run_delayed_items(trans, fs_info, -1);
1190}
1191
1192int btrfs_run_delayed_items_nr(struct btrfs_trans_handle *trans,
1193			       struct btrfs_fs_info *fs_info, int nr)
1194{
1195	return __btrfs_run_delayed_items(trans, fs_info, nr);
1196}
1197
1198int btrfs_commit_inode_delayed_items(struct btrfs_trans_handle *trans,
1199				     struct inode *inode)
1200{
1201	struct btrfs_delayed_node *delayed_node = btrfs_get_delayed_node(inode);
1202	struct btrfs_path *path;
1203	struct btrfs_block_rsv *block_rsv;
1204	int ret;
1205
1206	if (!delayed_node)
1207		return 0;
1208
1209	mutex_lock(&delayed_node->mutex);
1210	if (!delayed_node->count) {
1211		mutex_unlock(&delayed_node->mutex);
1212		btrfs_release_delayed_node(delayed_node);
1213		return 0;
1214	}
1215	mutex_unlock(&delayed_node->mutex);
1216
1217	path = btrfs_alloc_path();
1218	if (!path) {
1219		btrfs_release_delayed_node(delayed_node);
1220		return -ENOMEM;
1221	}
1222	path->leave_spinning = 1;
1223
1224	block_rsv = trans->block_rsv;
1225	trans->block_rsv = &delayed_node->root->fs_info->delayed_block_rsv;
1226
1227	ret = __btrfs_commit_inode_delayed_items(trans, path, delayed_node);
1228
1229	btrfs_release_delayed_node(delayed_node);
1230	btrfs_free_path(path);
1231	trans->block_rsv = block_rsv;
1232
1233	return ret;
1234}
1235
1236int btrfs_commit_inode_delayed_inode(struct inode *inode)
1237{
1238	struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
1239	struct btrfs_trans_handle *trans;
1240	struct btrfs_delayed_node *delayed_node = btrfs_get_delayed_node(inode);
1241	struct btrfs_path *path;
1242	struct btrfs_block_rsv *block_rsv;
1243	int ret;
1244
1245	if (!delayed_node)
1246		return 0;
1247
1248	mutex_lock(&delayed_node->mutex);
1249	if (!test_bit(BTRFS_DELAYED_NODE_INODE_DIRTY, &delayed_node->flags)) {
1250		mutex_unlock(&delayed_node->mutex);
1251		btrfs_release_delayed_node(delayed_node);
1252		return 0;
1253	}
1254	mutex_unlock(&delayed_node->mutex);
1255
1256	trans = btrfs_join_transaction(delayed_node->root);
1257	if (IS_ERR(trans)) {
1258		ret = PTR_ERR(trans);
1259		goto out;
1260	}
1261
1262	path = btrfs_alloc_path();
1263	if (!path) {
1264		ret = -ENOMEM;
1265		goto trans_out;
1266	}
1267	path->leave_spinning = 1;
1268
1269	block_rsv = trans->block_rsv;
1270	trans->block_rsv = &fs_info->delayed_block_rsv;
1271
1272	mutex_lock(&delayed_node->mutex);
1273	if (test_bit(BTRFS_DELAYED_NODE_INODE_DIRTY, &delayed_node->flags))
1274		ret = __btrfs_update_delayed_inode(trans, delayed_node->root,
1275						   path, delayed_node);
1276	else
1277		ret = 0;
1278	mutex_unlock(&delayed_node->mutex);
1279
1280	btrfs_free_path(path);
1281	trans->block_rsv = block_rsv;
1282trans_out:
1283	btrfs_end_transaction(trans);
1284	btrfs_btree_balance_dirty(fs_info);
1285out:
1286	btrfs_release_delayed_node(delayed_node);
1287
1288	return ret;
1289}
1290
1291void btrfs_remove_delayed_node(struct inode *inode)
1292{
1293	struct btrfs_delayed_node *delayed_node;
1294
1295	delayed_node = ACCESS_ONCE(BTRFS_I(inode)->delayed_node);
1296	if (!delayed_node)
1297		return;
1298
1299	BTRFS_I(inode)->delayed_node = NULL;
1300	btrfs_release_delayed_node(delayed_node);
1301}
1302
1303struct btrfs_async_delayed_work {
1304	struct btrfs_delayed_root *delayed_root;
1305	int nr;
1306	struct btrfs_work work;
1307};
1308
1309static void btrfs_async_run_delayed_root(struct btrfs_work *work)
1310{
1311	struct btrfs_async_delayed_work *async_work;
1312	struct btrfs_delayed_root *delayed_root;
1313	struct btrfs_trans_handle *trans;
1314	struct btrfs_path *path;
1315	struct btrfs_delayed_node *delayed_node = NULL;
1316	struct btrfs_root *root;
1317	struct btrfs_block_rsv *block_rsv;
1318	int total_done = 0;
1319
1320	async_work = container_of(work, struct btrfs_async_delayed_work, work);
1321	delayed_root = async_work->delayed_root;
1322
1323	path = btrfs_alloc_path();
1324	if (!path)
1325		goto out;
1326
1327again:
1328	if (atomic_read(&delayed_root->items) < BTRFS_DELAYED_BACKGROUND / 2)
1329		goto free_path;
 
1330
1331	delayed_node = btrfs_first_prepared_delayed_node(delayed_root);
1332	if (!delayed_node)
1333		goto free_path;
1334
1335	path->leave_spinning = 1;
1336	root = delayed_node->root;
1337
1338	trans = btrfs_join_transaction(root);
1339	if (IS_ERR(trans))
1340		goto release_path;
 
 
 
 
1341
1342	block_rsv = trans->block_rsv;
1343	trans->block_rsv = &root->fs_info->delayed_block_rsv;
1344
1345	__btrfs_commit_inode_delayed_items(trans, path, delayed_node);
1346
1347	trans->block_rsv = block_rsv;
1348	btrfs_end_transaction(trans);
1349	btrfs_btree_balance_dirty_nodelay(root->fs_info);
1350
1351release_path:
1352	btrfs_release_path(path);
1353	total_done++;
1354
1355	btrfs_release_prepared_delayed_node(delayed_node);
1356	if ((async_work->nr == 0 && total_done < BTRFS_DELAYED_WRITEBACK) ||
1357	    total_done < async_work->nr)
1358		goto again;
1359
1360free_path:
1361	btrfs_free_path(path);
1362out:
1363	wake_up(&delayed_root->wait);
1364	kfree(async_work);
1365}
1366
1367
1368static int btrfs_wq_run_delayed_node(struct btrfs_delayed_root *delayed_root,
1369				     struct btrfs_fs_info *fs_info, int nr)
1370{
1371	struct btrfs_async_delayed_work *async_work;
1372
1373	if (atomic_read(&delayed_root->items) < BTRFS_DELAYED_BACKGROUND ||
1374	    btrfs_workqueue_normal_congested(fs_info->delayed_workers))
1375		return 0;
1376
1377	async_work = kmalloc(sizeof(*async_work), GFP_NOFS);
1378	if (!async_work)
1379		return -ENOMEM;
1380
1381	async_work->delayed_root = delayed_root;
1382	btrfs_init_work(&async_work->work, btrfs_delayed_meta_helper,
1383			btrfs_async_run_delayed_root, NULL, NULL);
1384	async_work->nr = nr;
1385
1386	btrfs_queue_work(fs_info->delayed_workers, &async_work->work);
1387	return 0;
1388}
1389
1390void btrfs_assert_delayed_root_empty(struct btrfs_fs_info *fs_info)
1391{
1392	WARN_ON(btrfs_first_delayed_node(fs_info->delayed_root));
1393}
1394
1395static int could_end_wait(struct btrfs_delayed_root *delayed_root, int seq)
1396{
1397	int val = atomic_read(&delayed_root->items_seq);
1398
1399	if (val < seq || val >= seq + BTRFS_DELAYED_BATCH)
1400		return 1;
1401
1402	if (atomic_read(&delayed_root->items) < BTRFS_DELAYED_BACKGROUND)
1403		return 1;
1404
1405	return 0;
1406}
1407
1408void btrfs_balance_delayed_items(struct btrfs_fs_info *fs_info)
1409{
1410	struct btrfs_delayed_root *delayed_root = fs_info->delayed_root;
1411
1412	if (atomic_read(&delayed_root->items) < BTRFS_DELAYED_BACKGROUND)
 
1413		return;
1414
1415	if (atomic_read(&delayed_root->items) >= BTRFS_DELAYED_WRITEBACK) {
1416		int seq;
1417		int ret;
1418
1419		seq = atomic_read(&delayed_root->items_seq);
1420
1421		ret = btrfs_wq_run_delayed_node(delayed_root, fs_info, 0);
1422		if (ret)
1423			return;
1424
1425		wait_event_interruptible(delayed_root->wait,
1426					 could_end_wait(delayed_root, seq));
1427		return;
1428	}
1429
1430	btrfs_wq_run_delayed_node(delayed_root, fs_info, BTRFS_DELAYED_BATCH);
1431}
1432
1433/* Will return 0 or -ENOMEM */
1434int btrfs_insert_delayed_dir_index(struct btrfs_trans_handle *trans,
1435				   struct btrfs_fs_info *fs_info,
1436				   const char *name, int name_len,
1437				   struct inode *dir,
1438				   struct btrfs_disk_key *disk_key, u8 type,
1439				   u64 index)
1440{
1441	struct btrfs_delayed_node *delayed_node;
1442	struct btrfs_delayed_item *delayed_item;
1443	struct btrfs_dir_item *dir_item;
1444	int ret;
1445
1446	delayed_node = btrfs_get_or_create_delayed_node(dir);
1447	if (IS_ERR(delayed_node))
1448		return PTR_ERR(delayed_node);
1449
1450	delayed_item = btrfs_alloc_delayed_item(sizeof(*dir_item) + name_len);
1451	if (!delayed_item) {
1452		ret = -ENOMEM;
1453		goto release_node;
1454	}
1455
1456	delayed_item->key.objectid = btrfs_ino(dir);
1457	delayed_item->key.type = BTRFS_DIR_INDEX_KEY;
1458	delayed_item->key.offset = index;
1459
1460	dir_item = (struct btrfs_dir_item *)delayed_item->data;
1461	dir_item->location = *disk_key;
1462	btrfs_set_stack_dir_transid(dir_item, trans->transid);
1463	btrfs_set_stack_dir_data_len(dir_item, 0);
1464	btrfs_set_stack_dir_name_len(dir_item, name_len);
1465	btrfs_set_stack_dir_type(dir_item, type);
1466	memcpy((char *)(dir_item + 1), name, name_len);
1467
1468	ret = btrfs_delayed_item_reserve_metadata(trans, fs_info, delayed_item);
1469	/*
1470	 * we have reserved enough space when we start a new transaction,
1471	 * so reserving metadata failure is impossible
1472	 */
1473	BUG_ON(ret);
1474
1475
1476	mutex_lock(&delayed_node->mutex);
1477	ret = __btrfs_add_delayed_insertion_item(delayed_node, delayed_item);
1478	if (unlikely(ret)) {
1479		btrfs_err(fs_info,
1480			  "err add delayed dir index item(name: %.*s) into the insertion tree of the delayed node(root id: %llu, inode id: %llu, errno: %d)",
1481			  name_len, name, delayed_node->root->objectid,
1482			  delayed_node->inode_id, ret);
1483		BUG();
1484	}
1485	mutex_unlock(&delayed_node->mutex);
1486
1487release_node:
1488	btrfs_release_delayed_node(delayed_node);
1489	return ret;
1490}
1491
1492static int btrfs_delete_delayed_insertion_item(struct btrfs_fs_info *fs_info,
1493					       struct btrfs_delayed_node *node,
1494					       struct btrfs_key *key)
1495{
1496	struct btrfs_delayed_item *item;
1497
1498	mutex_lock(&node->mutex);
1499	item = __btrfs_lookup_delayed_insertion_item(node, key);
1500	if (!item) {
1501		mutex_unlock(&node->mutex);
1502		return 1;
1503	}
1504
1505	btrfs_delayed_item_release_metadata(fs_info, item);
1506	btrfs_release_delayed_item(item);
1507	mutex_unlock(&node->mutex);
1508	return 0;
1509}
1510
1511int btrfs_delete_delayed_dir_index(struct btrfs_trans_handle *trans,
1512				   struct btrfs_fs_info *fs_info,
1513				   struct inode *dir, u64 index)
1514{
1515	struct btrfs_delayed_node *node;
1516	struct btrfs_delayed_item *item;
1517	struct btrfs_key item_key;
1518	int ret;
1519
1520	node = btrfs_get_or_create_delayed_node(dir);
1521	if (IS_ERR(node))
1522		return PTR_ERR(node);
1523
1524	item_key.objectid = btrfs_ino(dir);
1525	item_key.type = BTRFS_DIR_INDEX_KEY;
1526	item_key.offset = index;
1527
1528	ret = btrfs_delete_delayed_insertion_item(fs_info, node, &item_key);
 
1529	if (!ret)
1530		goto end;
1531
1532	item = btrfs_alloc_delayed_item(0);
1533	if (!item) {
1534		ret = -ENOMEM;
1535		goto end;
1536	}
1537
1538	item->key = item_key;
1539
1540	ret = btrfs_delayed_item_reserve_metadata(trans, fs_info, item);
1541	/*
1542	 * we have reserved enough space when we start a new transaction,
1543	 * so reserving metadata failure is impossible.
1544	 */
1545	BUG_ON(ret);
 
 
 
 
 
1546
1547	mutex_lock(&node->mutex);
1548	ret = __btrfs_add_delayed_deletion_item(node, item);
1549	if (unlikely(ret)) {
1550		btrfs_err(fs_info,
1551			  "err add delayed dir index item(index: %llu) into the deletion tree of the delayed node(root id: %llu, inode id: %llu, errno: %d)",
1552			  index, node->root->objectid, node->inode_id, ret);
1553		BUG();
 
 
1554	}
1555	mutex_unlock(&node->mutex);
1556end:
1557	btrfs_release_delayed_node(node);
1558	return ret;
1559}
1560
1561int btrfs_inode_delayed_dir_index_count(struct inode *inode)
1562{
1563	struct btrfs_delayed_node *delayed_node = btrfs_get_delayed_node(inode);
1564
1565	if (!delayed_node)
1566		return -ENOENT;
1567
1568	/*
1569	 * Since we have held i_mutex of this directory, it is impossible that
1570	 * a new directory index is added into the delayed node and index_cnt
1571	 * is updated now. So we needn't lock the delayed node.
1572	 */
1573	if (!delayed_node->index_cnt) {
1574		btrfs_release_delayed_node(delayed_node);
1575		return -EINVAL;
1576	}
1577
1578	BTRFS_I(inode)->index_cnt = delayed_node->index_cnt;
1579	btrfs_release_delayed_node(delayed_node);
1580	return 0;
1581}
1582
1583bool btrfs_readdir_get_delayed_items(struct inode *inode,
1584				     struct list_head *ins_list,
1585				     struct list_head *del_list)
1586{
1587	struct btrfs_delayed_node *delayed_node;
1588	struct btrfs_delayed_item *item;
1589
1590	delayed_node = btrfs_get_delayed_node(inode);
1591	if (!delayed_node)
1592		return false;
1593
1594	/*
1595	 * We can only do one readdir with delayed items at a time because of
1596	 * item->readdir_list.
1597	 */
1598	inode_unlock_shared(inode);
1599	inode_lock(inode);
1600
1601	mutex_lock(&delayed_node->mutex);
1602	item = __btrfs_first_delayed_insertion_item(delayed_node);
1603	while (item) {
1604		atomic_inc(&item->refs);
1605		list_add_tail(&item->readdir_list, ins_list);
1606		item = __btrfs_next_delayed_item(item);
1607	}
1608
1609	item = __btrfs_first_delayed_deletion_item(delayed_node);
1610	while (item) {
1611		atomic_inc(&item->refs);
1612		list_add_tail(&item->readdir_list, del_list);
1613		item = __btrfs_next_delayed_item(item);
1614	}
1615	mutex_unlock(&delayed_node->mutex);
1616	/*
1617	 * This delayed node is still cached in the btrfs inode, so refs
1618	 * must be > 1 now, and we needn't check it is going to be freed
1619	 * or not.
1620	 *
1621	 * Besides that, this function is used to read dir, we do not
1622	 * insert/delete delayed items in this period. So we also needn't
1623	 * requeue or dequeue this delayed node.
1624	 */
1625	atomic_dec(&delayed_node->refs);
1626
1627	return true;
1628}
1629
1630void btrfs_readdir_put_delayed_items(struct inode *inode,
1631				     struct list_head *ins_list,
1632				     struct list_head *del_list)
1633{
1634	struct btrfs_delayed_item *curr, *next;
1635
1636	list_for_each_entry_safe(curr, next, ins_list, readdir_list) {
1637		list_del(&curr->readdir_list);
1638		if (atomic_dec_and_test(&curr->refs))
1639			kfree(curr);
1640	}
1641
1642	list_for_each_entry_safe(curr, next, del_list, readdir_list) {
1643		list_del(&curr->readdir_list);
1644		if (atomic_dec_and_test(&curr->refs))
1645			kfree(curr);
1646	}
1647
1648	/*
1649	 * The VFS is going to do up_read(), so we need to downgrade back to a
1650	 * read lock.
1651	 */
1652	downgrade_write(&inode->i_rwsem);
1653}
1654
1655int btrfs_should_delete_dir_index(struct list_head *del_list,
1656				  u64 index)
1657{
1658	struct btrfs_delayed_item *curr, *next;
1659	int ret;
1660
1661	if (list_empty(del_list))
1662		return 0;
1663
1664	list_for_each_entry_safe(curr, next, del_list, readdir_list) {
1665		if (curr->key.offset > index)
1666			break;
1667
1668		list_del(&curr->readdir_list);
1669		ret = (curr->key.offset == index);
1670
1671		if (atomic_dec_and_test(&curr->refs))
1672			kfree(curr);
1673
1674		if (ret)
1675			return 1;
1676		else
1677			continue;
1678	}
1679	return 0;
1680}
1681
1682/*
1683 * btrfs_readdir_delayed_dir_index - read dir info stored in the delayed tree
1684 *
1685 */
1686int btrfs_readdir_delayed_dir_index(struct dir_context *ctx,
1687				    struct list_head *ins_list)
1688{
1689	struct btrfs_dir_item *di;
1690	struct btrfs_delayed_item *curr, *next;
1691	struct btrfs_key location;
1692	char *name;
1693	int name_len;
1694	int over = 0;
1695	unsigned char d_type;
1696
1697	if (list_empty(ins_list))
1698		return 0;
1699
1700	/*
1701	 * Changing the data of the delayed item is impossible. So
1702	 * we needn't lock them. And we have held i_mutex of the
1703	 * directory, nobody can delete any directory indexes now.
1704	 */
1705	list_for_each_entry_safe(curr, next, ins_list, readdir_list) {
1706		list_del(&curr->readdir_list);
1707
1708		if (curr->key.offset < ctx->pos) {
1709			if (atomic_dec_and_test(&curr->refs))
1710				kfree(curr);
1711			continue;
1712		}
1713
1714		ctx->pos = curr->key.offset;
1715
1716		di = (struct btrfs_dir_item *)curr->data;
1717		name = (char *)(di + 1);
1718		name_len = btrfs_stack_dir_name_len(di);
1719
1720		d_type = btrfs_filetype_table[di->type];
1721		btrfs_disk_key_to_cpu(&location, &di->location);
1722
1723		over = !dir_emit(ctx, name, name_len,
1724			       location.objectid, d_type);
1725
1726		if (atomic_dec_and_test(&curr->refs))
1727			kfree(curr);
1728
1729		if (over)
1730			return 1;
 
1731	}
1732	return 0;
1733}
1734
1735static void fill_stack_inode_item(struct btrfs_trans_handle *trans,
1736				  struct btrfs_inode_item *inode_item,
1737				  struct inode *inode)
1738{
1739	btrfs_set_stack_inode_uid(inode_item, i_uid_read(inode));
1740	btrfs_set_stack_inode_gid(inode_item, i_gid_read(inode));
1741	btrfs_set_stack_inode_size(inode_item, BTRFS_I(inode)->disk_i_size);
1742	btrfs_set_stack_inode_mode(inode_item, inode->i_mode);
1743	btrfs_set_stack_inode_nlink(inode_item, inode->i_nlink);
1744	btrfs_set_stack_inode_nbytes(inode_item, inode_get_bytes(inode));
1745	btrfs_set_stack_inode_generation(inode_item,
1746					 BTRFS_I(inode)->generation);
1747	btrfs_set_stack_inode_sequence(inode_item, inode->i_version);
 
1748	btrfs_set_stack_inode_transid(inode_item, trans->transid);
1749	btrfs_set_stack_inode_rdev(inode_item, inode->i_rdev);
1750	btrfs_set_stack_inode_flags(inode_item, BTRFS_I(inode)->flags);
1751	btrfs_set_stack_inode_block_group(inode_item, 0);
1752
1753	btrfs_set_stack_timespec_sec(&inode_item->atime,
1754				     inode->i_atime.tv_sec);
1755	btrfs_set_stack_timespec_nsec(&inode_item->atime,
1756				      inode->i_atime.tv_nsec);
1757
1758	btrfs_set_stack_timespec_sec(&inode_item->mtime,
1759				     inode->i_mtime.tv_sec);
1760	btrfs_set_stack_timespec_nsec(&inode_item->mtime,
1761				      inode->i_mtime.tv_nsec);
1762
1763	btrfs_set_stack_timespec_sec(&inode_item->ctime,
1764				     inode->i_ctime.tv_sec);
1765	btrfs_set_stack_timespec_nsec(&inode_item->ctime,
1766				      inode->i_ctime.tv_nsec);
1767
1768	btrfs_set_stack_timespec_sec(&inode_item->otime,
1769				     BTRFS_I(inode)->i_otime.tv_sec);
1770	btrfs_set_stack_timespec_nsec(&inode_item->otime,
1771				     BTRFS_I(inode)->i_otime.tv_nsec);
1772}
1773
1774int btrfs_fill_inode(struct inode *inode, u32 *rdev)
1775{
 
1776	struct btrfs_delayed_node *delayed_node;
1777	struct btrfs_inode_item *inode_item;
1778
1779	delayed_node = btrfs_get_delayed_node(inode);
1780	if (!delayed_node)
1781		return -ENOENT;
1782
1783	mutex_lock(&delayed_node->mutex);
1784	if (!test_bit(BTRFS_DELAYED_NODE_INODE_DIRTY, &delayed_node->flags)) {
1785		mutex_unlock(&delayed_node->mutex);
1786		btrfs_release_delayed_node(delayed_node);
1787		return -ENOENT;
1788	}
1789
1790	inode_item = &delayed_node->inode_item;
1791
1792	i_uid_write(inode, btrfs_stack_inode_uid(inode_item));
1793	i_gid_write(inode, btrfs_stack_inode_gid(inode_item));
1794	btrfs_i_size_write(inode, btrfs_stack_inode_size(inode_item));
 
 
1795	inode->i_mode = btrfs_stack_inode_mode(inode_item);
1796	set_nlink(inode, btrfs_stack_inode_nlink(inode_item));
1797	inode_set_bytes(inode, btrfs_stack_inode_nbytes(inode_item));
1798	BTRFS_I(inode)->generation = btrfs_stack_inode_generation(inode_item);
1799        BTRFS_I(inode)->last_trans = btrfs_stack_inode_transid(inode_item);
1800
1801	inode->i_version = btrfs_stack_inode_sequence(inode_item);
 
1802	inode->i_rdev = 0;
1803	*rdev = btrfs_stack_inode_rdev(inode_item);
1804	BTRFS_I(inode)->flags = btrfs_stack_inode_flags(inode_item);
1805
1806	inode->i_atime.tv_sec = btrfs_stack_timespec_sec(&inode_item->atime);
1807	inode->i_atime.tv_nsec = btrfs_stack_timespec_nsec(&inode_item->atime);
1808
1809	inode->i_mtime.tv_sec = btrfs_stack_timespec_sec(&inode_item->mtime);
1810	inode->i_mtime.tv_nsec = btrfs_stack_timespec_nsec(&inode_item->mtime);
1811
1812	inode->i_ctime.tv_sec = btrfs_stack_timespec_sec(&inode_item->ctime);
1813	inode->i_ctime.tv_nsec = btrfs_stack_timespec_nsec(&inode_item->ctime);
1814
1815	BTRFS_I(inode)->i_otime.tv_sec =
1816		btrfs_stack_timespec_sec(&inode_item->otime);
1817	BTRFS_I(inode)->i_otime.tv_nsec =
1818		btrfs_stack_timespec_nsec(&inode_item->otime);
1819
1820	inode->i_generation = BTRFS_I(inode)->generation;
1821	BTRFS_I(inode)->index_cnt = (u64)-1;
1822
1823	mutex_unlock(&delayed_node->mutex);
1824	btrfs_release_delayed_node(delayed_node);
1825	return 0;
1826}
1827
1828int btrfs_delayed_update_inode(struct btrfs_trans_handle *trans,
1829			       struct btrfs_root *root, struct inode *inode)
 
1830{
1831	struct btrfs_delayed_node *delayed_node;
1832	int ret = 0;
1833
1834	delayed_node = btrfs_get_or_create_delayed_node(inode);
1835	if (IS_ERR(delayed_node))
1836		return PTR_ERR(delayed_node);
1837
1838	mutex_lock(&delayed_node->mutex);
1839	if (test_bit(BTRFS_DELAYED_NODE_INODE_DIRTY, &delayed_node->flags)) {
1840		fill_stack_inode_item(trans, &delayed_node->inode_item, inode);
 
1841		goto release_node;
1842	}
1843
1844	ret = btrfs_delayed_inode_reserve_metadata(trans, root, inode,
1845						   delayed_node);
1846	if (ret)
1847		goto release_node;
1848
1849	fill_stack_inode_item(trans, &delayed_node->inode_item, inode);
1850	set_bit(BTRFS_DELAYED_NODE_INODE_DIRTY, &delayed_node->flags);
1851	delayed_node->count++;
1852	atomic_inc(&root->fs_info->delayed_root->items);
1853release_node:
1854	mutex_unlock(&delayed_node->mutex);
1855	btrfs_release_delayed_node(delayed_node);
1856	return ret;
1857}
1858
1859int btrfs_delayed_delete_inode_ref(struct inode *inode)
1860{
1861	struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
1862	struct btrfs_delayed_node *delayed_node;
1863
1864	/*
1865	 * we don't do delayed inode updates during log recovery because it
1866	 * leads to enospc problems.  This means we also can't do
1867	 * delayed inode refs
1868	 */
1869	if (test_bit(BTRFS_FS_LOG_RECOVERING, &fs_info->flags))
1870		return -EAGAIN;
1871
1872	delayed_node = btrfs_get_or_create_delayed_node(inode);
1873	if (IS_ERR(delayed_node))
1874		return PTR_ERR(delayed_node);
1875
1876	/*
1877	 * We don't reserve space for inode ref deletion is because:
1878	 * - We ONLY do async inode ref deletion for the inode who has only
1879	 *   one link(i_nlink == 1), it means there is only one inode ref.
1880	 *   And in most case, the inode ref and the inode item are in the
1881	 *   same leaf, and we will deal with them at the same time.
1882	 *   Since we are sure we will reserve the space for the inode item,
1883	 *   it is unnecessary to reserve space for inode ref deletion.
1884	 * - If the inode ref and the inode item are not in the same leaf,
1885	 *   We also needn't worry about enospc problem, because we reserve
1886	 *   much more space for the inode update than it needs.
1887	 * - At the worst, we can steal some space from the global reservation.
1888	 *   It is very rare.
1889	 */
1890	mutex_lock(&delayed_node->mutex);
1891	if (test_bit(BTRFS_DELAYED_NODE_DEL_IREF, &delayed_node->flags))
1892		goto release_node;
1893
1894	set_bit(BTRFS_DELAYED_NODE_DEL_IREF, &delayed_node->flags);
1895	delayed_node->count++;
1896	atomic_inc(&fs_info->delayed_root->items);
1897release_node:
1898	mutex_unlock(&delayed_node->mutex);
1899	btrfs_release_delayed_node(delayed_node);
1900	return 0;
1901}
1902
1903static void __btrfs_kill_delayed_node(struct btrfs_delayed_node *delayed_node)
1904{
1905	struct btrfs_root *root = delayed_node->root;
1906	struct btrfs_fs_info *fs_info = root->fs_info;
1907	struct btrfs_delayed_item *curr_item, *prev_item;
1908
1909	mutex_lock(&delayed_node->mutex);
1910	curr_item = __btrfs_first_delayed_insertion_item(delayed_node);
1911	while (curr_item) {
1912		btrfs_delayed_item_release_metadata(fs_info, curr_item);
1913		prev_item = curr_item;
1914		curr_item = __btrfs_next_delayed_item(prev_item);
1915		btrfs_release_delayed_item(prev_item);
1916	}
1917
1918	curr_item = __btrfs_first_delayed_deletion_item(delayed_node);
1919	while (curr_item) {
1920		btrfs_delayed_item_release_metadata(fs_info, curr_item);
1921		prev_item = curr_item;
1922		curr_item = __btrfs_next_delayed_item(prev_item);
1923		btrfs_release_delayed_item(prev_item);
1924	}
1925
1926	if (test_bit(BTRFS_DELAYED_NODE_DEL_IREF, &delayed_node->flags))
1927		btrfs_release_delayed_iref(delayed_node);
1928
1929	if (test_bit(BTRFS_DELAYED_NODE_INODE_DIRTY, &delayed_node->flags)) {
1930		btrfs_delayed_inode_release_metadata(fs_info, delayed_node);
1931		btrfs_release_delayed_inode(delayed_node);
1932	}
1933	mutex_unlock(&delayed_node->mutex);
1934}
1935
1936void btrfs_kill_delayed_inode_items(struct inode *inode)
1937{
1938	struct btrfs_delayed_node *delayed_node;
1939
1940	delayed_node = btrfs_get_delayed_node(inode);
1941	if (!delayed_node)
1942		return;
1943
1944	__btrfs_kill_delayed_node(delayed_node);
1945	btrfs_release_delayed_node(delayed_node);
1946}
1947
1948void btrfs_kill_all_delayed_nodes(struct btrfs_root *root)
1949{
1950	u64 inode_id = 0;
1951	struct btrfs_delayed_node *delayed_nodes[8];
1952	int i, n;
1953
1954	while (1) {
1955		spin_lock(&root->inode_lock);
1956		n = radix_tree_gang_lookup(&root->delayed_nodes_tree,
1957					   (void **)delayed_nodes, inode_id,
1958					   ARRAY_SIZE(delayed_nodes));
1959		if (!n) {
1960			spin_unlock(&root->inode_lock);
1961			break;
1962		}
1963
1964		inode_id = delayed_nodes[n - 1]->inode_id + 1;
1965
1966		for (i = 0; i < n; i++)
1967			atomic_inc(&delayed_nodes[i]->refs);
 
 
 
 
 
1968		spin_unlock(&root->inode_lock);
1969
1970		for (i = 0; i < n; i++) {
 
 
1971			__btrfs_kill_delayed_node(delayed_nodes[i]);
1972			btrfs_release_delayed_node(delayed_nodes[i]);
1973		}
1974	}
1975}
1976
1977void btrfs_destroy_delayed_inodes(struct btrfs_fs_info *fs_info)
1978{
1979	struct btrfs_delayed_node *curr_node, *prev_node;
1980
1981	curr_node = btrfs_first_delayed_node(fs_info->delayed_root);
1982	while (curr_node) {
1983		__btrfs_kill_delayed_node(curr_node);
1984
1985		prev_node = curr_node;
1986		curr_node = btrfs_next_delayed_node(curr_node);
1987		btrfs_release_delayed_node(prev_node);
1988	}
1989}
1990