Linux Audio

Check our new training course

Linux kernel drivers training

Mar 31-Apr 9, 2025, special US time zones
Register
Loading...
v3.15
   1/*
   2 * Copyright (C) 2011 Fujitsu.  All rights reserved.
   3 * Written by Miao Xie <miaox@cn.fujitsu.com>
   4 *
   5 * This program is free software; you can redistribute it and/or
   6 * modify it under the terms of the GNU General Public
   7 * License v2 as published by the Free Software Foundation.
   8 *
   9 * This program is distributed in the hope that it will be useful,
  10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
  11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
  12 * General Public License for more details.
  13 *
  14 * You should have received a copy of the GNU General Public
  15 * License along with this program; if not, write to the
  16 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
  17 * Boston, MA 021110-1307, USA.
  18 */
  19
  20#include <linux/slab.h>
  21#include "delayed-inode.h"
  22#include "disk-io.h"
  23#include "transaction.h"
  24#include "ctree.h"
  25
  26#define BTRFS_DELAYED_WRITEBACK		512
  27#define BTRFS_DELAYED_BACKGROUND	128
  28#define BTRFS_DELAYED_BATCH		16
  29
  30static struct kmem_cache *delayed_node_cache;
  31
  32int __init btrfs_delayed_inode_init(void)
  33{
  34	delayed_node_cache = kmem_cache_create("btrfs_delayed_node",
  35					sizeof(struct btrfs_delayed_node),
  36					0,
  37					SLAB_RECLAIM_ACCOUNT | SLAB_MEM_SPREAD,
  38					NULL);
  39	if (!delayed_node_cache)
  40		return -ENOMEM;
  41	return 0;
  42}
  43
  44void btrfs_delayed_inode_exit(void)
  45{
  46	if (delayed_node_cache)
  47		kmem_cache_destroy(delayed_node_cache);
  48}
  49
  50static inline void btrfs_init_delayed_node(
  51				struct btrfs_delayed_node *delayed_node,
  52				struct btrfs_root *root, u64 inode_id)
  53{
  54	delayed_node->root = root;
  55	delayed_node->inode_id = inode_id;
  56	atomic_set(&delayed_node->refs, 0);
  57	delayed_node->count = 0;
  58	delayed_node->flags = 0;
 
  59	delayed_node->ins_root = RB_ROOT;
  60	delayed_node->del_root = RB_ROOT;
  61	mutex_init(&delayed_node->mutex);
  62	delayed_node->index_cnt = 0;
  63	INIT_LIST_HEAD(&delayed_node->n_list);
  64	INIT_LIST_HEAD(&delayed_node->p_list);
  65	delayed_node->bytes_reserved = 0;
  66	memset(&delayed_node->inode_item, 0, sizeof(delayed_node->inode_item));
  67}
  68
  69static inline int btrfs_is_continuous_delayed_item(
  70					struct btrfs_delayed_item *item1,
  71					struct btrfs_delayed_item *item2)
  72{
  73	if (item1->key.type == BTRFS_DIR_INDEX_KEY &&
  74	    item1->key.objectid == item2->key.objectid &&
  75	    item1->key.type == item2->key.type &&
  76	    item1->key.offset + 1 == item2->key.offset)
  77		return 1;
  78	return 0;
  79}
  80
  81static inline struct btrfs_delayed_root *btrfs_get_delayed_root(
  82							struct btrfs_root *root)
  83{
  84	return root->fs_info->delayed_root;
  85}
  86
  87static struct btrfs_delayed_node *btrfs_get_delayed_node(struct inode *inode)
  88{
  89	struct btrfs_inode *btrfs_inode = BTRFS_I(inode);
  90	struct btrfs_root *root = btrfs_inode->root;
  91	u64 ino = btrfs_ino(inode);
  92	struct btrfs_delayed_node *node;
  93
  94	node = ACCESS_ONCE(btrfs_inode->delayed_node);
  95	if (node) {
  96		atomic_inc(&node->refs);
  97		return node;
  98	}
  99
 100	spin_lock(&root->inode_lock);
 101	node = radix_tree_lookup(&root->delayed_nodes_tree, ino);
 102	if (node) {
 103		if (btrfs_inode->delayed_node) {
 104			atomic_inc(&node->refs);	/* can be accessed */
 105			BUG_ON(btrfs_inode->delayed_node != node);
 106			spin_unlock(&root->inode_lock);
 107			return node;
 108		}
 109		btrfs_inode->delayed_node = node;
 110		/* can be accessed and cached in the inode */
 111		atomic_add(2, &node->refs);
 112		spin_unlock(&root->inode_lock);
 113		return node;
 114	}
 115	spin_unlock(&root->inode_lock);
 116
 117	return NULL;
 118}
 119
 120/* Will return either the node or PTR_ERR(-ENOMEM) */
 121static struct btrfs_delayed_node *btrfs_get_or_create_delayed_node(
 122							struct inode *inode)
 123{
 124	struct btrfs_delayed_node *node;
 125	struct btrfs_inode *btrfs_inode = BTRFS_I(inode);
 126	struct btrfs_root *root = btrfs_inode->root;
 127	u64 ino = btrfs_ino(inode);
 128	int ret;
 129
 130again:
 131	node = btrfs_get_delayed_node(inode);
 132	if (node)
 133		return node;
 134
 135	node = kmem_cache_alloc(delayed_node_cache, GFP_NOFS);
 136	if (!node)
 137		return ERR_PTR(-ENOMEM);
 138	btrfs_init_delayed_node(node, root, ino);
 139
 140	/* cached in the btrfs inode and can be accessed */
 141	atomic_add(2, &node->refs);
 142
 143	ret = radix_tree_preload(GFP_NOFS & ~__GFP_HIGHMEM);
 144	if (ret) {
 145		kmem_cache_free(delayed_node_cache, node);
 146		return ERR_PTR(ret);
 147	}
 148
 149	spin_lock(&root->inode_lock);
 150	ret = radix_tree_insert(&root->delayed_nodes_tree, ino, node);
 151	if (ret == -EEXIST) {
 152		kmem_cache_free(delayed_node_cache, node);
 153		spin_unlock(&root->inode_lock);
 154		radix_tree_preload_end();
 155		goto again;
 156	}
 157	btrfs_inode->delayed_node = node;
 158	spin_unlock(&root->inode_lock);
 159	radix_tree_preload_end();
 160
 161	return node;
 162}
 163
 164/*
 165 * Call it when holding delayed_node->mutex
 166 *
 167 * If mod = 1, add this node into the prepared list.
 168 */
 169static void btrfs_queue_delayed_node(struct btrfs_delayed_root *root,
 170				     struct btrfs_delayed_node *node,
 171				     int mod)
 172{
 173	spin_lock(&root->lock);
 174	if (test_bit(BTRFS_DELAYED_NODE_IN_LIST, &node->flags)) {
 175		if (!list_empty(&node->p_list))
 176			list_move_tail(&node->p_list, &root->prepare_list);
 177		else if (mod)
 178			list_add_tail(&node->p_list, &root->prepare_list);
 179	} else {
 180		list_add_tail(&node->n_list, &root->node_list);
 181		list_add_tail(&node->p_list, &root->prepare_list);
 182		atomic_inc(&node->refs);	/* inserted into list */
 183		root->nodes++;
 184		set_bit(BTRFS_DELAYED_NODE_IN_LIST, &node->flags);
 185	}
 186	spin_unlock(&root->lock);
 187}
 188
 189/* Call it when holding delayed_node->mutex */
 190static void btrfs_dequeue_delayed_node(struct btrfs_delayed_root *root,
 191				       struct btrfs_delayed_node *node)
 192{
 193	spin_lock(&root->lock);
 194	if (test_bit(BTRFS_DELAYED_NODE_IN_LIST, &node->flags)) {
 195		root->nodes--;
 196		atomic_dec(&node->refs);	/* not in the list */
 197		list_del_init(&node->n_list);
 198		if (!list_empty(&node->p_list))
 199			list_del_init(&node->p_list);
 200		clear_bit(BTRFS_DELAYED_NODE_IN_LIST, &node->flags);
 201	}
 202	spin_unlock(&root->lock);
 203}
 204
 205static struct btrfs_delayed_node *btrfs_first_delayed_node(
 206			struct btrfs_delayed_root *delayed_root)
 207{
 208	struct list_head *p;
 209	struct btrfs_delayed_node *node = NULL;
 210
 211	spin_lock(&delayed_root->lock);
 212	if (list_empty(&delayed_root->node_list))
 213		goto out;
 214
 215	p = delayed_root->node_list.next;
 216	node = list_entry(p, struct btrfs_delayed_node, n_list);
 217	atomic_inc(&node->refs);
 218out:
 219	spin_unlock(&delayed_root->lock);
 220
 221	return node;
 222}
 223
 224static struct btrfs_delayed_node *btrfs_next_delayed_node(
 225						struct btrfs_delayed_node *node)
 226{
 227	struct btrfs_delayed_root *delayed_root;
 228	struct list_head *p;
 229	struct btrfs_delayed_node *next = NULL;
 230
 231	delayed_root = node->root->fs_info->delayed_root;
 232	spin_lock(&delayed_root->lock);
 233	if (!test_bit(BTRFS_DELAYED_NODE_IN_LIST, &node->flags)) {
 234		/* not in the list */
 235		if (list_empty(&delayed_root->node_list))
 236			goto out;
 237		p = delayed_root->node_list.next;
 238	} else if (list_is_last(&node->n_list, &delayed_root->node_list))
 239		goto out;
 240	else
 241		p = node->n_list.next;
 242
 243	next = list_entry(p, struct btrfs_delayed_node, n_list);
 244	atomic_inc(&next->refs);
 245out:
 246	spin_unlock(&delayed_root->lock);
 247
 248	return next;
 249}
 250
 251static void __btrfs_release_delayed_node(
 252				struct btrfs_delayed_node *delayed_node,
 253				int mod)
 254{
 255	struct btrfs_delayed_root *delayed_root;
 256
 257	if (!delayed_node)
 258		return;
 259
 260	delayed_root = delayed_node->root->fs_info->delayed_root;
 261
 262	mutex_lock(&delayed_node->mutex);
 263	if (delayed_node->count)
 264		btrfs_queue_delayed_node(delayed_root, delayed_node, mod);
 265	else
 266		btrfs_dequeue_delayed_node(delayed_root, delayed_node);
 267	mutex_unlock(&delayed_node->mutex);
 268
 269	if (atomic_dec_and_test(&delayed_node->refs)) {
 270		struct btrfs_root *root = delayed_node->root;
 271		spin_lock(&root->inode_lock);
 272		if (atomic_read(&delayed_node->refs) == 0) {
 273			radix_tree_delete(&root->delayed_nodes_tree,
 274					  delayed_node->inode_id);
 275			kmem_cache_free(delayed_node_cache, delayed_node);
 276		}
 277		spin_unlock(&root->inode_lock);
 278	}
 279}
 280
 281static inline void btrfs_release_delayed_node(struct btrfs_delayed_node *node)
 282{
 283	__btrfs_release_delayed_node(node, 0);
 284}
 285
 286static struct btrfs_delayed_node *btrfs_first_prepared_delayed_node(
 287					struct btrfs_delayed_root *delayed_root)
 288{
 289	struct list_head *p;
 290	struct btrfs_delayed_node *node = NULL;
 291
 292	spin_lock(&delayed_root->lock);
 293	if (list_empty(&delayed_root->prepare_list))
 294		goto out;
 295
 296	p = delayed_root->prepare_list.next;
 297	list_del_init(p);
 298	node = list_entry(p, struct btrfs_delayed_node, p_list);
 299	atomic_inc(&node->refs);
 300out:
 301	spin_unlock(&delayed_root->lock);
 302
 303	return node;
 304}
 305
 306static inline void btrfs_release_prepared_delayed_node(
 307					struct btrfs_delayed_node *node)
 308{
 309	__btrfs_release_delayed_node(node, 1);
 310}
 311
 312static struct btrfs_delayed_item *btrfs_alloc_delayed_item(u32 data_len)
 313{
 314	struct btrfs_delayed_item *item;
 315	item = kmalloc(sizeof(*item) + data_len, GFP_NOFS);
 316	if (item) {
 317		item->data_len = data_len;
 318		item->ins_or_del = 0;
 319		item->bytes_reserved = 0;
 320		item->delayed_node = NULL;
 321		atomic_set(&item->refs, 1);
 322	}
 323	return item;
 324}
 325
 326/*
 327 * __btrfs_lookup_delayed_item - look up the delayed item by key
 328 * @delayed_node: pointer to the delayed node
 329 * @key:	  the key to look up
 330 * @prev:	  used to store the prev item if the right item isn't found
 331 * @next:	  used to store the next item if the right item isn't found
 332 *
 333 * Note: if we don't find the right item, we will return the prev item and
 334 * the next item.
 335 */
 336static struct btrfs_delayed_item *__btrfs_lookup_delayed_item(
 337				struct rb_root *root,
 338				struct btrfs_key *key,
 339				struct btrfs_delayed_item **prev,
 340				struct btrfs_delayed_item **next)
 341{
 342	struct rb_node *node, *prev_node = NULL;
 343	struct btrfs_delayed_item *delayed_item = NULL;
 344	int ret = 0;
 345
 346	node = root->rb_node;
 347
 348	while (node) {
 349		delayed_item = rb_entry(node, struct btrfs_delayed_item,
 350					rb_node);
 351		prev_node = node;
 352		ret = btrfs_comp_cpu_keys(&delayed_item->key, key);
 353		if (ret < 0)
 354			node = node->rb_right;
 355		else if (ret > 0)
 356			node = node->rb_left;
 357		else
 358			return delayed_item;
 359	}
 360
 361	if (prev) {
 362		if (!prev_node)
 363			*prev = NULL;
 364		else if (ret < 0)
 365			*prev = delayed_item;
 366		else if ((node = rb_prev(prev_node)) != NULL) {
 367			*prev = rb_entry(node, struct btrfs_delayed_item,
 368					 rb_node);
 369		} else
 370			*prev = NULL;
 371	}
 372
 373	if (next) {
 374		if (!prev_node)
 375			*next = NULL;
 376		else if (ret > 0)
 377			*next = delayed_item;
 378		else if ((node = rb_next(prev_node)) != NULL) {
 379			*next = rb_entry(node, struct btrfs_delayed_item,
 380					 rb_node);
 381		} else
 382			*next = NULL;
 383	}
 384	return NULL;
 385}
 386
 387static struct btrfs_delayed_item *__btrfs_lookup_delayed_insertion_item(
 388					struct btrfs_delayed_node *delayed_node,
 389					struct btrfs_key *key)
 390{
 391	struct btrfs_delayed_item *item;
 392
 393	item = __btrfs_lookup_delayed_item(&delayed_node->ins_root, key,
 394					   NULL, NULL);
 395	return item;
 396}
 397
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 398static int __btrfs_add_delayed_item(struct btrfs_delayed_node *delayed_node,
 399				    struct btrfs_delayed_item *ins,
 400				    int action)
 401{
 402	struct rb_node **p, *node;
 403	struct rb_node *parent_node = NULL;
 404	struct rb_root *root;
 405	struct btrfs_delayed_item *item;
 406	int cmp;
 407
 408	if (action == BTRFS_DELAYED_INSERTION_ITEM)
 409		root = &delayed_node->ins_root;
 410	else if (action == BTRFS_DELAYED_DELETION_ITEM)
 411		root = &delayed_node->del_root;
 412	else
 413		BUG();
 414	p = &root->rb_node;
 415	node = &ins->rb_node;
 416
 417	while (*p) {
 418		parent_node = *p;
 419		item = rb_entry(parent_node, struct btrfs_delayed_item,
 420				 rb_node);
 421
 422		cmp = btrfs_comp_cpu_keys(&item->key, &ins->key);
 423		if (cmp < 0)
 424			p = &(*p)->rb_right;
 425		else if (cmp > 0)
 426			p = &(*p)->rb_left;
 427		else
 428			return -EEXIST;
 429	}
 430
 431	rb_link_node(node, parent_node, p);
 432	rb_insert_color(node, root);
 433	ins->delayed_node = delayed_node;
 434	ins->ins_or_del = action;
 435
 436	if (ins->key.type == BTRFS_DIR_INDEX_KEY &&
 437	    action == BTRFS_DELAYED_INSERTION_ITEM &&
 438	    ins->key.offset >= delayed_node->index_cnt)
 439			delayed_node->index_cnt = ins->key.offset + 1;
 440
 441	delayed_node->count++;
 442	atomic_inc(&delayed_node->root->fs_info->delayed_root->items);
 443	return 0;
 444}
 445
 446static int __btrfs_add_delayed_insertion_item(struct btrfs_delayed_node *node,
 447					      struct btrfs_delayed_item *item)
 448{
 449	return __btrfs_add_delayed_item(node, item,
 450					BTRFS_DELAYED_INSERTION_ITEM);
 451}
 452
 453static int __btrfs_add_delayed_deletion_item(struct btrfs_delayed_node *node,
 454					     struct btrfs_delayed_item *item)
 455{
 456	return __btrfs_add_delayed_item(node, item,
 457					BTRFS_DELAYED_DELETION_ITEM);
 458}
 459
 460static void finish_one_item(struct btrfs_delayed_root *delayed_root)
 461{
 462	int seq = atomic_inc_return(&delayed_root->items_seq);
 463	if ((atomic_dec_return(&delayed_root->items) <
 464	    BTRFS_DELAYED_BACKGROUND || seq % BTRFS_DELAYED_BATCH == 0) &&
 465	    waitqueue_active(&delayed_root->wait))
 466		wake_up(&delayed_root->wait);
 467}
 468
 469static void __btrfs_remove_delayed_item(struct btrfs_delayed_item *delayed_item)
 470{
 471	struct rb_root *root;
 472	struct btrfs_delayed_root *delayed_root;
 473
 474	delayed_root = delayed_item->delayed_node->root->fs_info->delayed_root;
 475
 476	BUG_ON(!delayed_root);
 477	BUG_ON(delayed_item->ins_or_del != BTRFS_DELAYED_DELETION_ITEM &&
 478	       delayed_item->ins_or_del != BTRFS_DELAYED_INSERTION_ITEM);
 479
 480	if (delayed_item->ins_or_del == BTRFS_DELAYED_INSERTION_ITEM)
 481		root = &delayed_item->delayed_node->ins_root;
 482	else
 483		root = &delayed_item->delayed_node->del_root;
 484
 485	rb_erase(&delayed_item->rb_node, root);
 486	delayed_item->delayed_node->count--;
 487
 488	finish_one_item(delayed_root);
 
 
 489}
 490
 491static void btrfs_release_delayed_item(struct btrfs_delayed_item *item)
 492{
 493	if (item) {
 494		__btrfs_remove_delayed_item(item);
 495		if (atomic_dec_and_test(&item->refs))
 496			kfree(item);
 497	}
 498}
 499
 500static struct btrfs_delayed_item *__btrfs_first_delayed_insertion_item(
 501					struct btrfs_delayed_node *delayed_node)
 502{
 503	struct rb_node *p;
 504	struct btrfs_delayed_item *item = NULL;
 505
 506	p = rb_first(&delayed_node->ins_root);
 507	if (p)
 508		item = rb_entry(p, struct btrfs_delayed_item, rb_node);
 509
 510	return item;
 511}
 512
 513static struct btrfs_delayed_item *__btrfs_first_delayed_deletion_item(
 514					struct btrfs_delayed_node *delayed_node)
 515{
 516	struct rb_node *p;
 517	struct btrfs_delayed_item *item = NULL;
 518
 519	p = rb_first(&delayed_node->del_root);
 520	if (p)
 521		item = rb_entry(p, struct btrfs_delayed_item, rb_node);
 522
 523	return item;
 524}
 525
 526static struct btrfs_delayed_item *__btrfs_next_delayed_item(
 527						struct btrfs_delayed_item *item)
 528{
 529	struct rb_node *p;
 530	struct btrfs_delayed_item *next = NULL;
 531
 532	p = rb_next(&item->rb_node);
 533	if (p)
 534		next = rb_entry(p, struct btrfs_delayed_item, rb_node);
 535
 536	return next;
 537}
 538
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 539static int btrfs_delayed_item_reserve_metadata(struct btrfs_trans_handle *trans,
 540					       struct btrfs_root *root,
 541					       struct btrfs_delayed_item *item)
 542{
 543	struct btrfs_block_rsv *src_rsv;
 544	struct btrfs_block_rsv *dst_rsv;
 545	u64 num_bytes;
 546	int ret;
 547
 548	if (!trans->bytes_reserved)
 549		return 0;
 550
 551	src_rsv = trans->block_rsv;
 552	dst_rsv = &root->fs_info->delayed_block_rsv;
 553
 554	num_bytes = btrfs_calc_trans_metadata_size(root, 1);
 555	ret = btrfs_block_rsv_migrate(src_rsv, dst_rsv, num_bytes);
 556	if (!ret) {
 557		trace_btrfs_space_reservation(root->fs_info, "delayed_item",
 558					      item->key.objectid,
 559					      num_bytes, 1);
 560		item->bytes_reserved = num_bytes;
 561	}
 562
 563	return ret;
 564}
 565
 566static void btrfs_delayed_item_release_metadata(struct btrfs_root *root,
 567						struct btrfs_delayed_item *item)
 568{
 569	struct btrfs_block_rsv *rsv;
 570
 571	if (!item->bytes_reserved)
 572		return;
 573
 574	rsv = &root->fs_info->delayed_block_rsv;
 575	trace_btrfs_space_reservation(root->fs_info, "delayed_item",
 576				      item->key.objectid, item->bytes_reserved,
 577				      0);
 578	btrfs_block_rsv_release(root, rsv,
 579				item->bytes_reserved);
 580}
 581
 582static int btrfs_delayed_inode_reserve_metadata(
 583					struct btrfs_trans_handle *trans,
 584					struct btrfs_root *root,
 585					struct inode *inode,
 586					struct btrfs_delayed_node *node)
 587{
 588	struct btrfs_block_rsv *src_rsv;
 589	struct btrfs_block_rsv *dst_rsv;
 590	u64 num_bytes;
 591	int ret;
 592	bool release = false;
 593
 594	src_rsv = trans->block_rsv;
 595	dst_rsv = &root->fs_info->delayed_block_rsv;
 596
 597	num_bytes = btrfs_calc_trans_metadata_size(root, 1);
 598
 599	/*
 600	 * btrfs_dirty_inode will update the inode under btrfs_join_transaction
 601	 * which doesn't reserve space for speed.  This is a problem since we
 602	 * still need to reserve space for this update, so try to reserve the
 603	 * space.
 604	 *
 605	 * Now if src_rsv == delalloc_block_rsv we'll let it just steal since
 606	 * we're accounted for.
 607	 */
 608	if (!src_rsv || (!trans->bytes_reserved &&
 609			 src_rsv->type != BTRFS_BLOCK_RSV_DELALLOC)) {
 610		ret = btrfs_block_rsv_add(root, dst_rsv, num_bytes,
 611					  BTRFS_RESERVE_NO_FLUSH);
 612		/*
 613		 * Since we're under a transaction reserve_metadata_bytes could
 614		 * try to commit the transaction which will make it return
 615		 * EAGAIN to make us stop the transaction we have, so return
 616		 * ENOSPC instead so that btrfs_dirty_inode knows what to do.
 617		 */
 618		if (ret == -EAGAIN)
 619			ret = -ENOSPC;
 620		if (!ret) {
 621			node->bytes_reserved = num_bytes;
 622			trace_btrfs_space_reservation(root->fs_info,
 623						      "delayed_inode",
 624						      btrfs_ino(inode),
 625						      num_bytes, 1);
 626		}
 627		return ret;
 628	} else if (src_rsv->type == BTRFS_BLOCK_RSV_DELALLOC) {
 629		spin_lock(&BTRFS_I(inode)->lock);
 630		if (test_and_clear_bit(BTRFS_INODE_DELALLOC_META_RESERVED,
 631				       &BTRFS_I(inode)->runtime_flags)) {
 632			spin_unlock(&BTRFS_I(inode)->lock);
 633			release = true;
 634			goto migrate;
 635		}
 636		spin_unlock(&BTRFS_I(inode)->lock);
 637
 638		/* Ok we didn't have space pre-reserved.  This shouldn't happen
 639		 * too often but it can happen if we do delalloc to an existing
 640		 * inode which gets dirtied because of the time update, and then
 641		 * isn't touched again until after the transaction commits and
 642		 * then we try to write out the data.  First try to be nice and
 643		 * reserve something strictly for us.  If not be a pain and try
 644		 * to steal from the delalloc block rsv.
 645		 */
 646		ret = btrfs_block_rsv_add(root, dst_rsv, num_bytes,
 647					  BTRFS_RESERVE_NO_FLUSH);
 648		if (!ret)
 649			goto out;
 650
 651		ret = btrfs_block_rsv_migrate(src_rsv, dst_rsv, num_bytes);
 652		if (!WARN_ON(ret))
 653			goto out;
 654
 655		/*
 656		 * Ok this is a problem, let's just steal from the global rsv
 657		 * since this really shouldn't happen that often.
 658		 */
 
 659		ret = btrfs_block_rsv_migrate(&root->fs_info->global_block_rsv,
 660					      dst_rsv, num_bytes);
 661		goto out;
 662	}
 663
 664migrate:
 665	ret = btrfs_block_rsv_migrate(src_rsv, dst_rsv, num_bytes);
 666
 667out:
 668	/*
 669	 * Migrate only takes a reservation, it doesn't touch the size of the
 670	 * block_rsv.  This is to simplify people who don't normally have things
 671	 * migrated from their block rsv.  If they go to release their
 672	 * reservation, that will decrease the size as well, so if migrate
 673	 * reduced size we'd end up with a negative size.  But for the
 674	 * delalloc_meta_reserved stuff we will only know to drop 1 reservation,
 675	 * but we could in fact do this reserve/migrate dance several times
 676	 * between the time we did the original reservation and we'd clean it
 677	 * up.  So to take care of this, release the space for the meta
 678	 * reservation here.  I think it may be time for a documentation page on
 679	 * how block rsvs. work.
 680	 */
 681	if (!ret) {
 682		trace_btrfs_space_reservation(root->fs_info, "delayed_inode",
 683					      btrfs_ino(inode), num_bytes, 1);
 684		node->bytes_reserved = num_bytes;
 685	}
 686
 687	if (release) {
 688		trace_btrfs_space_reservation(root->fs_info, "delalloc",
 689					      btrfs_ino(inode), num_bytes, 0);
 690		btrfs_block_rsv_release(root, src_rsv, num_bytes);
 691	}
 692
 693	return ret;
 694}
 695
 696static void btrfs_delayed_inode_release_metadata(struct btrfs_root *root,
 697						struct btrfs_delayed_node *node)
 698{
 699	struct btrfs_block_rsv *rsv;
 700
 701	if (!node->bytes_reserved)
 702		return;
 703
 704	rsv = &root->fs_info->delayed_block_rsv;
 705	trace_btrfs_space_reservation(root->fs_info, "delayed_inode",
 706				      node->inode_id, node->bytes_reserved, 0);
 707	btrfs_block_rsv_release(root, rsv,
 708				node->bytes_reserved);
 709	node->bytes_reserved = 0;
 710}
 711
 712/*
 713 * This helper will insert some continuous items into the same leaf according
 714 * to the free space of the leaf.
 715 */
 716static int btrfs_batch_insert_items(struct btrfs_root *root,
 717				    struct btrfs_path *path,
 718				    struct btrfs_delayed_item *item)
 
 719{
 720	struct btrfs_delayed_item *curr, *next;
 721	int free_space;
 722	int total_data_size = 0, total_size = 0;
 723	struct extent_buffer *leaf;
 724	char *data_ptr;
 725	struct btrfs_key *keys;
 726	u32 *data_size;
 727	struct list_head head;
 728	int slot;
 729	int nitems;
 730	int i;
 731	int ret = 0;
 732
 733	BUG_ON(!path->nodes[0]);
 734
 735	leaf = path->nodes[0];
 736	free_space = btrfs_leaf_free_space(root, leaf);
 737	INIT_LIST_HEAD(&head);
 738
 739	next = item;
 740	nitems = 0;
 741
 742	/*
 743	 * count the number of the continuous items that we can insert in batch
 744	 */
 745	while (total_size + next->data_len + sizeof(struct btrfs_item) <=
 746	       free_space) {
 747		total_data_size += next->data_len;
 748		total_size += next->data_len + sizeof(struct btrfs_item);
 749		list_add_tail(&next->tree_list, &head);
 750		nitems++;
 751
 752		curr = next;
 753		next = __btrfs_next_delayed_item(curr);
 754		if (!next)
 755			break;
 756
 757		if (!btrfs_is_continuous_delayed_item(curr, next))
 758			break;
 759	}
 760
 761	if (!nitems) {
 762		ret = 0;
 763		goto out;
 764	}
 765
 766	/*
 767	 * we need allocate some memory space, but it might cause the task
 768	 * to sleep, so we set all locked nodes in the path to blocking locks
 769	 * first.
 770	 */
 771	btrfs_set_path_blocking(path);
 772
 773	keys = kmalloc_array(nitems, sizeof(struct btrfs_key), GFP_NOFS);
 774	if (!keys) {
 775		ret = -ENOMEM;
 776		goto out;
 777	}
 778
 779	data_size = kmalloc_array(nitems, sizeof(u32), GFP_NOFS);
 780	if (!data_size) {
 781		ret = -ENOMEM;
 782		goto error;
 783	}
 784
 785	/* get keys of all the delayed items */
 786	i = 0;
 787	list_for_each_entry(next, &head, tree_list) {
 788		keys[i] = next->key;
 789		data_size[i] = next->data_len;
 790		i++;
 791	}
 792
 793	/* reset all the locked nodes in the patch to spinning locks. */
 794	btrfs_clear_path_blocking(path, NULL, 0);
 795
 796	/* insert the keys of the items */
 797	setup_items_for_insert(root, path, keys, data_size,
 798			       total_data_size, total_size, nitems);
 799
 800	/* insert the dir index items */
 801	slot = path->slots[0];
 802	list_for_each_entry_safe(curr, next, &head, tree_list) {
 803		data_ptr = btrfs_item_ptr(leaf, slot, char);
 804		write_extent_buffer(leaf, &curr->data,
 805				    (unsigned long)data_ptr,
 806				    curr->data_len);
 807		slot++;
 808
 809		btrfs_delayed_item_release_metadata(root, curr);
 810
 811		list_del(&curr->tree_list);
 812		btrfs_release_delayed_item(curr);
 813	}
 814
 815error:
 816	kfree(data_size);
 817	kfree(keys);
 818out:
 819	return ret;
 820}
 821
 822/*
 823 * This helper can just do simple insertion that needn't extend item for new
 824 * data, such as directory name index insertion, inode insertion.
 825 */
 826static int btrfs_insert_delayed_item(struct btrfs_trans_handle *trans,
 827				     struct btrfs_root *root,
 828				     struct btrfs_path *path,
 829				     struct btrfs_delayed_item *delayed_item)
 830{
 831	struct extent_buffer *leaf;
 
 832	char *ptr;
 833	int ret;
 834
 835	ret = btrfs_insert_empty_item(trans, root, path, &delayed_item->key,
 836				      delayed_item->data_len);
 837	if (ret < 0 && ret != -EEXIST)
 838		return ret;
 839
 840	leaf = path->nodes[0];
 841
 
 842	ptr = btrfs_item_ptr(leaf, path->slots[0], char);
 843
 844	write_extent_buffer(leaf, delayed_item->data, (unsigned long)ptr,
 845			    delayed_item->data_len);
 846	btrfs_mark_buffer_dirty(leaf);
 847
 848	btrfs_delayed_item_release_metadata(root, delayed_item);
 849	return 0;
 850}
 851
 852/*
 853 * we insert an item first, then if there are some continuous items, we try
 854 * to insert those items into the same leaf.
 855 */
 856static int btrfs_insert_delayed_items(struct btrfs_trans_handle *trans,
 857				      struct btrfs_path *path,
 858				      struct btrfs_root *root,
 859				      struct btrfs_delayed_node *node)
 860{
 861	struct btrfs_delayed_item *curr, *prev;
 862	int ret = 0;
 863
 864do_again:
 865	mutex_lock(&node->mutex);
 866	curr = __btrfs_first_delayed_insertion_item(node);
 867	if (!curr)
 868		goto insert_end;
 869
 870	ret = btrfs_insert_delayed_item(trans, root, path, curr);
 871	if (ret < 0) {
 872		btrfs_release_path(path);
 873		goto insert_end;
 874	}
 875
 876	prev = curr;
 877	curr = __btrfs_next_delayed_item(prev);
 878	if (curr && btrfs_is_continuous_delayed_item(prev, curr)) {
 879		/* insert the continuous items into the same leaf */
 880		path->slots[0]++;
 881		btrfs_batch_insert_items(root, path, curr);
 882	}
 883	btrfs_release_delayed_item(prev);
 884	btrfs_mark_buffer_dirty(path->nodes[0]);
 885
 886	btrfs_release_path(path);
 887	mutex_unlock(&node->mutex);
 888	goto do_again;
 889
 890insert_end:
 891	mutex_unlock(&node->mutex);
 892	return ret;
 893}
 894
 895static int btrfs_batch_delete_items(struct btrfs_trans_handle *trans,
 896				    struct btrfs_root *root,
 897				    struct btrfs_path *path,
 898				    struct btrfs_delayed_item *item)
 899{
 900	struct btrfs_delayed_item *curr, *next;
 901	struct extent_buffer *leaf;
 902	struct btrfs_key key;
 903	struct list_head head;
 904	int nitems, i, last_item;
 905	int ret = 0;
 906
 907	BUG_ON(!path->nodes[0]);
 908
 909	leaf = path->nodes[0];
 910
 911	i = path->slots[0];
 912	last_item = btrfs_header_nritems(leaf) - 1;
 913	if (i > last_item)
 914		return -ENOENT;	/* FIXME: Is errno suitable? */
 915
 916	next = item;
 917	INIT_LIST_HEAD(&head);
 918	btrfs_item_key_to_cpu(leaf, &key, i);
 919	nitems = 0;
 920	/*
 921	 * count the number of the dir index items that we can delete in batch
 922	 */
 923	while (btrfs_comp_cpu_keys(&next->key, &key) == 0) {
 924		list_add_tail(&next->tree_list, &head);
 925		nitems++;
 926
 927		curr = next;
 928		next = __btrfs_next_delayed_item(curr);
 929		if (!next)
 930			break;
 931
 932		if (!btrfs_is_continuous_delayed_item(curr, next))
 933			break;
 934
 935		i++;
 936		if (i > last_item)
 937			break;
 938		btrfs_item_key_to_cpu(leaf, &key, i);
 939	}
 940
 941	if (!nitems)
 942		return 0;
 943
 944	ret = btrfs_del_items(trans, root, path, path->slots[0], nitems);
 945	if (ret)
 946		goto out;
 947
 948	list_for_each_entry_safe(curr, next, &head, tree_list) {
 949		btrfs_delayed_item_release_metadata(root, curr);
 950		list_del(&curr->tree_list);
 951		btrfs_release_delayed_item(curr);
 952	}
 953
 954out:
 955	return ret;
 956}
 957
 958static int btrfs_delete_delayed_items(struct btrfs_trans_handle *trans,
 959				      struct btrfs_path *path,
 960				      struct btrfs_root *root,
 961				      struct btrfs_delayed_node *node)
 962{
 963	struct btrfs_delayed_item *curr, *prev;
 964	int ret = 0;
 965
 966do_again:
 967	mutex_lock(&node->mutex);
 968	curr = __btrfs_first_delayed_deletion_item(node);
 969	if (!curr)
 970		goto delete_fail;
 971
 972	ret = btrfs_search_slot(trans, root, &curr->key, path, -1, 1);
 973	if (ret < 0)
 974		goto delete_fail;
 975	else if (ret > 0) {
 976		/*
 977		 * can't find the item which the node points to, so this node
 978		 * is invalid, just drop it.
 979		 */
 980		prev = curr;
 981		curr = __btrfs_next_delayed_item(prev);
 982		btrfs_release_delayed_item(prev);
 983		ret = 0;
 984		btrfs_release_path(path);
 985		if (curr) {
 986			mutex_unlock(&node->mutex);
 987			goto do_again;
 988		} else
 989			goto delete_fail;
 990	}
 991
 992	btrfs_batch_delete_items(trans, root, path, curr);
 993	btrfs_release_path(path);
 994	mutex_unlock(&node->mutex);
 995	goto do_again;
 996
 997delete_fail:
 998	btrfs_release_path(path);
 999	mutex_unlock(&node->mutex);
1000	return ret;
1001}
1002
1003static void btrfs_release_delayed_inode(struct btrfs_delayed_node *delayed_node)
1004{
1005	struct btrfs_delayed_root *delayed_root;
1006
1007	if (delayed_node &&
1008	    test_bit(BTRFS_DELAYED_NODE_INODE_DIRTY, &delayed_node->flags)) {
1009		BUG_ON(!delayed_node->root);
1010		clear_bit(BTRFS_DELAYED_NODE_INODE_DIRTY, &delayed_node->flags);
1011		delayed_node->count--;
1012
1013		delayed_root = delayed_node->root->fs_info->delayed_root;
1014		finish_one_item(delayed_root);
 
 
 
 
1015	}
1016}
1017
1018static void btrfs_release_delayed_iref(struct btrfs_delayed_node *delayed_node)
1019{
1020	struct btrfs_delayed_root *delayed_root;
1021
1022	ASSERT(delayed_node->root);
1023	clear_bit(BTRFS_DELAYED_NODE_DEL_IREF, &delayed_node->flags);
1024	delayed_node->count--;
1025
1026	delayed_root = delayed_node->root->fs_info->delayed_root;
1027	finish_one_item(delayed_root);
1028}
1029
1030static int __btrfs_update_delayed_inode(struct btrfs_trans_handle *trans,
1031					struct btrfs_root *root,
1032					struct btrfs_path *path,
1033					struct btrfs_delayed_node *node)
1034{
1035	struct btrfs_key key;
1036	struct btrfs_inode_item *inode_item;
1037	struct extent_buffer *leaf;
1038	int mod;
1039	int ret;
1040
 
 
 
 
 
 
1041	key.objectid = node->inode_id;
1042	btrfs_set_key_type(&key, BTRFS_INODE_ITEM_KEY);
1043	key.offset = 0;
1044
1045	if (test_bit(BTRFS_DELAYED_NODE_DEL_IREF, &node->flags))
1046		mod = -1;
1047	else
1048		mod = 1;
1049
1050	ret = btrfs_lookup_inode(trans, root, path, &key, mod);
1051	if (ret > 0) {
1052		btrfs_release_path(path);
 
1053		return -ENOENT;
1054	} else if (ret < 0) {
 
1055		return ret;
1056	}
1057
 
1058	leaf = path->nodes[0];
1059	inode_item = btrfs_item_ptr(leaf, path->slots[0],
1060				    struct btrfs_inode_item);
1061	write_extent_buffer(leaf, &node->inode_item, (unsigned long)inode_item,
1062			    sizeof(struct btrfs_inode_item));
1063	btrfs_mark_buffer_dirty(leaf);
1064
1065	if (!test_bit(BTRFS_DELAYED_NODE_DEL_IREF, &node->flags))
1066		goto no_iref;
1067
1068	path->slots[0]++;
1069	if (path->slots[0] >= btrfs_header_nritems(leaf))
1070		goto search;
1071again:
1072	btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
1073	if (key.objectid != node->inode_id)
1074		goto out;
1075
1076	if (key.type != BTRFS_INODE_REF_KEY &&
1077	    key.type != BTRFS_INODE_EXTREF_KEY)
1078		goto out;
1079
1080	/*
1081	 * Delayed iref deletion is for the inode who has only one link,
1082	 * so there is only one iref. The case that several irefs are
1083	 * in the same item doesn't exist.
1084	 */
1085	btrfs_del_item(trans, root, path);
1086out:
1087	btrfs_release_delayed_iref(node);
1088no_iref:
1089	btrfs_release_path(path);
1090err_out:
1091	btrfs_delayed_inode_release_metadata(root, node);
1092	btrfs_release_delayed_inode(node);
1093
1094	return ret;
1095
1096search:
1097	btrfs_release_path(path);
1098
1099	btrfs_set_key_type(&key, BTRFS_INODE_EXTREF_KEY);
1100	key.offset = -1;
1101	ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
1102	if (ret < 0)
1103		goto err_out;
1104	ASSERT(ret);
1105
1106	ret = 0;
1107	leaf = path->nodes[0];
1108	path->slots[0]--;
1109	goto again;
1110}
1111
1112static inline int btrfs_update_delayed_inode(struct btrfs_trans_handle *trans,
1113					     struct btrfs_root *root,
1114					     struct btrfs_path *path,
1115					     struct btrfs_delayed_node *node)
1116{
1117	int ret;
1118
1119	mutex_lock(&node->mutex);
1120	if (!test_bit(BTRFS_DELAYED_NODE_INODE_DIRTY, &node->flags)) {
1121		mutex_unlock(&node->mutex);
1122		return 0;
1123	}
1124
1125	ret = __btrfs_update_delayed_inode(trans, root, path, node);
1126	mutex_unlock(&node->mutex);
1127	return ret;
1128}
1129
1130static inline int
1131__btrfs_commit_inode_delayed_items(struct btrfs_trans_handle *trans,
1132				   struct btrfs_path *path,
1133				   struct btrfs_delayed_node *node)
1134{
1135	int ret;
1136
1137	ret = btrfs_insert_delayed_items(trans, path, node->root, node);
1138	if (ret)
1139		return ret;
1140
1141	ret = btrfs_delete_delayed_items(trans, path, node->root, node);
1142	if (ret)
1143		return ret;
1144
1145	ret = btrfs_update_delayed_inode(trans, node->root, path, node);
1146	return ret;
1147}
1148
1149/*
1150 * Called when committing the transaction.
1151 * Returns 0 on success.
1152 * Returns < 0 on error and returns with an aborted transaction with any
1153 * outstanding delayed items cleaned up.
1154 */
1155static int __btrfs_run_delayed_items(struct btrfs_trans_handle *trans,
1156				     struct btrfs_root *root, int nr)
1157{
 
1158	struct btrfs_delayed_root *delayed_root;
1159	struct btrfs_delayed_node *curr_node, *prev_node;
1160	struct btrfs_path *path;
1161	struct btrfs_block_rsv *block_rsv;
1162	int ret = 0;
1163	bool count = (nr > 0);
1164
1165	if (trans->aborted)
1166		return -EIO;
1167
1168	path = btrfs_alloc_path();
1169	if (!path)
1170		return -ENOMEM;
1171	path->leave_spinning = 1;
1172
1173	block_rsv = trans->block_rsv;
1174	trans->block_rsv = &root->fs_info->delayed_block_rsv;
1175
1176	delayed_root = btrfs_get_delayed_root(root);
1177
1178	curr_node = btrfs_first_delayed_node(delayed_root);
1179	while (curr_node && (!count || (count && nr--))) {
1180		ret = __btrfs_commit_inode_delayed_items(trans, path,
1181							 curr_node);
 
 
 
 
 
 
 
1182		if (ret) {
1183			btrfs_release_delayed_node(curr_node);
1184			curr_node = NULL;
1185			btrfs_abort_transaction(trans, root, ret);
1186			break;
1187		}
1188
1189		prev_node = curr_node;
1190		curr_node = btrfs_next_delayed_node(curr_node);
1191		btrfs_release_delayed_node(prev_node);
1192	}
1193
1194	if (curr_node)
1195		btrfs_release_delayed_node(curr_node);
1196	btrfs_free_path(path);
1197	trans->block_rsv = block_rsv;
1198
1199	return ret;
1200}
1201
1202int btrfs_run_delayed_items(struct btrfs_trans_handle *trans,
1203			    struct btrfs_root *root)
1204{
1205	return __btrfs_run_delayed_items(trans, root, -1);
1206}
1207
1208int btrfs_run_delayed_items_nr(struct btrfs_trans_handle *trans,
1209			       struct btrfs_root *root, int nr)
1210{
1211	return __btrfs_run_delayed_items(trans, root, nr);
1212}
1213
1214int btrfs_commit_inode_delayed_items(struct btrfs_trans_handle *trans,
1215				     struct inode *inode)
1216{
1217	struct btrfs_delayed_node *delayed_node = btrfs_get_delayed_node(inode);
1218	struct btrfs_path *path;
1219	struct btrfs_block_rsv *block_rsv;
1220	int ret;
1221
1222	if (!delayed_node)
1223		return 0;
1224
1225	mutex_lock(&delayed_node->mutex);
1226	if (!delayed_node->count) {
1227		mutex_unlock(&delayed_node->mutex);
1228		btrfs_release_delayed_node(delayed_node);
1229		return 0;
1230	}
1231	mutex_unlock(&delayed_node->mutex);
1232
1233	path = btrfs_alloc_path();
1234	if (!path) {
1235		btrfs_release_delayed_node(delayed_node);
1236		return -ENOMEM;
1237	}
1238	path->leave_spinning = 1;
1239
1240	block_rsv = trans->block_rsv;
1241	trans->block_rsv = &delayed_node->root->fs_info->delayed_block_rsv;
1242
1243	ret = __btrfs_commit_inode_delayed_items(trans, path, delayed_node);
1244
1245	btrfs_release_delayed_node(delayed_node);
 
 
1246	btrfs_free_path(path);
1247	trans->block_rsv = block_rsv;
1248
 
1249	return ret;
1250}
1251
1252int btrfs_commit_inode_delayed_inode(struct inode *inode)
 
1253{
1254	struct btrfs_trans_handle *trans;
1255	struct btrfs_delayed_node *delayed_node = btrfs_get_delayed_node(inode);
1256	struct btrfs_path *path;
1257	struct btrfs_block_rsv *block_rsv;
1258	int ret;
1259
1260	if (!delayed_node)
1261		return 0;
1262
1263	mutex_lock(&delayed_node->mutex);
1264	if (!test_bit(BTRFS_DELAYED_NODE_INODE_DIRTY, &delayed_node->flags)) {
1265		mutex_unlock(&delayed_node->mutex);
1266		btrfs_release_delayed_node(delayed_node);
1267		return 0;
1268	}
1269	mutex_unlock(&delayed_node->mutex);
1270
1271	trans = btrfs_join_transaction(delayed_node->root);
1272	if (IS_ERR(trans)) {
1273		ret = PTR_ERR(trans);
1274		goto out;
1275	}
1276
1277	path = btrfs_alloc_path();
1278	if (!path) {
1279		ret = -ENOMEM;
1280		goto trans_out;
1281	}
1282	path->leave_spinning = 1;
1283
1284	block_rsv = trans->block_rsv;
1285	trans->block_rsv = &delayed_node->root->fs_info->delayed_block_rsv;
1286
1287	mutex_lock(&delayed_node->mutex);
1288	if (test_bit(BTRFS_DELAYED_NODE_INODE_DIRTY, &delayed_node->flags))
1289		ret = __btrfs_update_delayed_inode(trans, delayed_node->root,
1290						   path, delayed_node);
1291	else
1292		ret = 0;
1293	mutex_unlock(&delayed_node->mutex);
1294
1295	btrfs_free_path(path);
1296	trans->block_rsv = block_rsv;
1297trans_out:
1298	btrfs_end_transaction(trans, delayed_node->root);
1299	btrfs_btree_balance_dirty(delayed_node->root);
1300out:
1301	btrfs_release_delayed_node(delayed_node);
1302
1303	return ret;
1304}
1305
1306void btrfs_remove_delayed_node(struct inode *inode)
1307{
1308	struct btrfs_delayed_node *delayed_node;
1309
1310	delayed_node = ACCESS_ONCE(BTRFS_I(inode)->delayed_node);
1311	if (!delayed_node)
1312		return;
1313
1314	BTRFS_I(inode)->delayed_node = NULL;
1315	btrfs_release_delayed_node(delayed_node);
1316}
1317
1318struct btrfs_async_delayed_work {
1319	struct btrfs_delayed_root *delayed_root;
1320	int nr;
1321	struct btrfs_work work;
1322};
1323
1324static void btrfs_async_run_delayed_root(struct btrfs_work *work)
1325{
1326	struct btrfs_async_delayed_work *async_work;
1327	struct btrfs_delayed_root *delayed_root;
1328	struct btrfs_trans_handle *trans;
1329	struct btrfs_path *path;
1330	struct btrfs_delayed_node *delayed_node = NULL;
1331	struct btrfs_root *root;
1332	struct btrfs_block_rsv *block_rsv;
1333	int total_done = 0;
 
 
1334
1335	async_work = container_of(work, struct btrfs_async_delayed_work, work);
1336	delayed_root = async_work->delayed_root;
1337
1338	path = btrfs_alloc_path();
1339	if (!path)
1340		goto out;
1341
1342again:
1343	if (atomic_read(&delayed_root->items) < BTRFS_DELAYED_BACKGROUND / 2)
1344		goto free_path;
1345
1346	delayed_node = btrfs_first_prepared_delayed_node(delayed_root);
1347	if (!delayed_node)
1348		goto free_path;
1349
1350	path->leave_spinning = 1;
 
 
1351	root = delayed_node->root;
1352
1353	trans = btrfs_join_transaction(root);
1354	if (IS_ERR(trans))
1355		goto release_path;
1356
1357	block_rsv = trans->block_rsv;
1358	trans->block_rsv = &root->fs_info->delayed_block_rsv;
1359
1360	__btrfs_commit_inode_delayed_items(trans, path, delayed_node);
 
 
 
1361
1362	trans->block_rsv = block_rsv;
1363	btrfs_end_transaction(trans, root);
1364	btrfs_btree_balance_dirty_nodelay(root);
1365
1366release_path:
1367	btrfs_release_path(path);
1368	total_done++;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1369
1370	btrfs_release_prepared_delayed_node(delayed_node);
1371	if (async_work->nr == 0 || total_done < async_work->nr)
1372		goto again;
1373
 
 
 
1374free_path:
1375	btrfs_free_path(path);
1376out:
1377	wake_up(&delayed_root->wait);
1378	kfree(async_work);
 
 
 
 
1379}
1380
1381
1382static int btrfs_wq_run_delayed_node(struct btrfs_delayed_root *delayed_root,
1383				     struct btrfs_root *root, int nr)
1384{
1385	struct btrfs_async_delayed_work *async_work;
 
 
1386
1387	if (atomic_read(&delayed_root->items) < BTRFS_DELAYED_BACKGROUND)
 
 
1388		return 0;
1389
1390	async_work = kmalloc(sizeof(*async_work), GFP_NOFS);
1391	if (!async_work)
 
1392		return -ENOMEM;
 
 
 
 
 
 
 
1393
1394	async_work->delayed_root = delayed_root;
1395	btrfs_init_work(&async_work->work, btrfs_async_run_delayed_root,
1396			NULL, NULL);
1397	async_work->nr = nr;
 
1398
1399	btrfs_queue_work(root->fs_info->delayed_workers, &async_work->work);
1400	return 0;
1401}
1402
1403void btrfs_assert_delayed_root_empty(struct btrfs_root *root)
1404{
1405	struct btrfs_delayed_root *delayed_root;
1406	delayed_root = btrfs_get_delayed_root(root);
1407	WARN_ON(btrfs_first_delayed_node(delayed_root));
1408}
1409
1410static int could_end_wait(struct btrfs_delayed_root *delayed_root, int seq)
1411{
1412	int val = atomic_read(&delayed_root->items_seq);
1413
1414	if (val < seq || val >= seq + BTRFS_DELAYED_BATCH)
1415		return 1;
1416
1417	if (atomic_read(&delayed_root->items) < BTRFS_DELAYED_BACKGROUND)
1418		return 1;
1419
1420	return 0;
1421}
1422
1423void btrfs_balance_delayed_items(struct btrfs_root *root)
1424{
1425	struct btrfs_delayed_root *delayed_root;
1426
1427	delayed_root = btrfs_get_delayed_root(root);
1428
1429	if (atomic_read(&delayed_root->items) < BTRFS_DELAYED_BACKGROUND)
1430		return;
1431
1432	if (atomic_read(&delayed_root->items) >= BTRFS_DELAYED_WRITEBACK) {
1433		int seq;
1434		int ret;
1435
1436		seq = atomic_read(&delayed_root->items_seq);
1437
1438		ret = btrfs_wq_run_delayed_node(delayed_root, root, 0);
1439		if (ret)
1440			return;
1441
1442		wait_event_interruptible(delayed_root->wait,
1443					 could_end_wait(delayed_root, seq));
 
 
 
1444		return;
1445	}
1446
1447	btrfs_wq_run_delayed_node(delayed_root, root, BTRFS_DELAYED_BATCH);
1448}
1449
1450/* Will return 0 or -ENOMEM */
1451int btrfs_insert_delayed_dir_index(struct btrfs_trans_handle *trans,
1452				   struct btrfs_root *root, const char *name,
1453				   int name_len, struct inode *dir,
1454				   struct btrfs_disk_key *disk_key, u8 type,
1455				   u64 index)
1456{
1457	struct btrfs_delayed_node *delayed_node;
1458	struct btrfs_delayed_item *delayed_item;
1459	struct btrfs_dir_item *dir_item;
1460	int ret;
1461
1462	delayed_node = btrfs_get_or_create_delayed_node(dir);
1463	if (IS_ERR(delayed_node))
1464		return PTR_ERR(delayed_node);
1465
1466	delayed_item = btrfs_alloc_delayed_item(sizeof(*dir_item) + name_len);
1467	if (!delayed_item) {
1468		ret = -ENOMEM;
1469		goto release_node;
1470	}
1471
1472	delayed_item->key.objectid = btrfs_ino(dir);
1473	btrfs_set_key_type(&delayed_item->key, BTRFS_DIR_INDEX_KEY);
1474	delayed_item->key.offset = index;
1475
1476	dir_item = (struct btrfs_dir_item *)delayed_item->data;
1477	dir_item->location = *disk_key;
1478	btrfs_set_stack_dir_transid(dir_item, trans->transid);
1479	btrfs_set_stack_dir_data_len(dir_item, 0);
1480	btrfs_set_stack_dir_name_len(dir_item, name_len);
1481	btrfs_set_stack_dir_type(dir_item, type);
1482	memcpy((char *)(dir_item + 1), name, name_len);
1483
1484	ret = btrfs_delayed_item_reserve_metadata(trans, root, delayed_item);
1485	/*
1486	 * we have reserved enough space when we start a new transaction,
1487	 * so reserving metadata failure is impossible
1488	 */
1489	BUG_ON(ret);
1490
1491
1492	mutex_lock(&delayed_node->mutex);
1493	ret = __btrfs_add_delayed_insertion_item(delayed_node, delayed_item);
1494	if (unlikely(ret)) {
1495		btrfs_err(root->fs_info, "err add delayed dir index item(name: %.*s) "
1496				"into the insertion tree of the delayed node"
1497				"(root id: %llu, inode id: %llu, errno: %d)",
1498				name_len, name, delayed_node->root->objectid,
1499				delayed_node->inode_id, ret);
 
 
1500		BUG();
1501	}
1502	mutex_unlock(&delayed_node->mutex);
1503
1504release_node:
1505	btrfs_release_delayed_node(delayed_node);
1506	return ret;
1507}
1508
1509static int btrfs_delete_delayed_insertion_item(struct btrfs_root *root,
1510					       struct btrfs_delayed_node *node,
1511					       struct btrfs_key *key)
1512{
1513	struct btrfs_delayed_item *item;
1514
1515	mutex_lock(&node->mutex);
1516	item = __btrfs_lookup_delayed_insertion_item(node, key);
1517	if (!item) {
1518		mutex_unlock(&node->mutex);
1519		return 1;
1520	}
1521
1522	btrfs_delayed_item_release_metadata(root, item);
1523	btrfs_release_delayed_item(item);
1524	mutex_unlock(&node->mutex);
1525	return 0;
1526}
1527
1528int btrfs_delete_delayed_dir_index(struct btrfs_trans_handle *trans,
1529				   struct btrfs_root *root, struct inode *dir,
1530				   u64 index)
1531{
1532	struct btrfs_delayed_node *node;
1533	struct btrfs_delayed_item *item;
1534	struct btrfs_key item_key;
1535	int ret;
1536
1537	node = btrfs_get_or_create_delayed_node(dir);
1538	if (IS_ERR(node))
1539		return PTR_ERR(node);
1540
1541	item_key.objectid = btrfs_ino(dir);
1542	btrfs_set_key_type(&item_key, BTRFS_DIR_INDEX_KEY);
1543	item_key.offset = index;
1544
1545	ret = btrfs_delete_delayed_insertion_item(root, node, &item_key);
1546	if (!ret)
1547		goto end;
1548
1549	item = btrfs_alloc_delayed_item(0);
1550	if (!item) {
1551		ret = -ENOMEM;
1552		goto end;
1553	}
1554
1555	item->key = item_key;
1556
1557	ret = btrfs_delayed_item_reserve_metadata(trans, root, item);
1558	/*
1559	 * we have reserved enough space when we start a new transaction,
1560	 * so reserving metadata failure is impossible.
1561	 */
1562	BUG_ON(ret);
1563
1564	mutex_lock(&node->mutex);
1565	ret = __btrfs_add_delayed_deletion_item(node, item);
1566	if (unlikely(ret)) {
1567		btrfs_err(root->fs_info, "err add delayed dir index item(index: %llu) "
1568				"into the deletion tree of the delayed node"
1569				"(root id: %llu, inode id: %llu, errno: %d)",
1570				index, node->root->objectid, node->inode_id,
 
 
1571				ret);
1572		BUG();
1573	}
1574	mutex_unlock(&node->mutex);
1575end:
1576	btrfs_release_delayed_node(node);
1577	return ret;
1578}
1579
1580int btrfs_inode_delayed_dir_index_count(struct inode *inode)
1581{
1582	struct btrfs_delayed_node *delayed_node = btrfs_get_delayed_node(inode);
1583
1584	if (!delayed_node)
1585		return -ENOENT;
1586
1587	/*
1588	 * Since we have held i_mutex of this directory, it is impossible that
1589	 * a new directory index is added into the delayed node and index_cnt
1590	 * is updated now. So we needn't lock the delayed node.
1591	 */
1592	if (!delayed_node->index_cnt) {
1593		btrfs_release_delayed_node(delayed_node);
1594		return -EINVAL;
1595	}
1596
1597	BTRFS_I(inode)->index_cnt = delayed_node->index_cnt;
1598	btrfs_release_delayed_node(delayed_node);
1599	return 0;
1600}
1601
1602void btrfs_get_delayed_items(struct inode *inode, struct list_head *ins_list,
1603			     struct list_head *del_list)
1604{
1605	struct btrfs_delayed_node *delayed_node;
1606	struct btrfs_delayed_item *item;
1607
1608	delayed_node = btrfs_get_delayed_node(inode);
1609	if (!delayed_node)
1610		return;
1611
1612	mutex_lock(&delayed_node->mutex);
1613	item = __btrfs_first_delayed_insertion_item(delayed_node);
1614	while (item) {
1615		atomic_inc(&item->refs);
1616		list_add_tail(&item->readdir_list, ins_list);
1617		item = __btrfs_next_delayed_item(item);
1618	}
1619
1620	item = __btrfs_first_delayed_deletion_item(delayed_node);
1621	while (item) {
1622		atomic_inc(&item->refs);
1623		list_add_tail(&item->readdir_list, del_list);
1624		item = __btrfs_next_delayed_item(item);
1625	}
1626	mutex_unlock(&delayed_node->mutex);
1627	/*
1628	 * This delayed node is still cached in the btrfs inode, so refs
1629	 * must be > 1 now, and we needn't check it is going to be freed
1630	 * or not.
1631	 *
1632	 * Besides that, this function is used to read dir, we do not
1633	 * insert/delete delayed items in this period. So we also needn't
1634	 * requeue or dequeue this delayed node.
1635	 */
1636	atomic_dec(&delayed_node->refs);
1637}
1638
1639void btrfs_put_delayed_items(struct list_head *ins_list,
1640			     struct list_head *del_list)
1641{
1642	struct btrfs_delayed_item *curr, *next;
1643
1644	list_for_each_entry_safe(curr, next, ins_list, readdir_list) {
1645		list_del(&curr->readdir_list);
1646		if (atomic_dec_and_test(&curr->refs))
1647			kfree(curr);
1648	}
1649
1650	list_for_each_entry_safe(curr, next, del_list, readdir_list) {
1651		list_del(&curr->readdir_list);
1652		if (atomic_dec_and_test(&curr->refs))
1653			kfree(curr);
1654	}
1655}
1656
1657int btrfs_should_delete_dir_index(struct list_head *del_list,
1658				  u64 index)
1659{
1660	struct btrfs_delayed_item *curr, *next;
1661	int ret;
1662
1663	if (list_empty(del_list))
1664		return 0;
1665
1666	list_for_each_entry_safe(curr, next, del_list, readdir_list) {
1667		if (curr->key.offset > index)
1668			break;
1669
1670		list_del(&curr->readdir_list);
1671		ret = (curr->key.offset == index);
1672
1673		if (atomic_dec_and_test(&curr->refs))
1674			kfree(curr);
1675
1676		if (ret)
1677			return 1;
1678		else
1679			continue;
1680	}
1681	return 0;
1682}
1683
1684/*
1685 * btrfs_readdir_delayed_dir_index - read dir info stored in the delayed tree
1686 *
1687 */
1688int btrfs_readdir_delayed_dir_index(struct dir_context *ctx,
 
1689				    struct list_head *ins_list)
1690{
1691	struct btrfs_dir_item *di;
1692	struct btrfs_delayed_item *curr, *next;
1693	struct btrfs_key location;
1694	char *name;
1695	int name_len;
1696	int over = 0;
1697	unsigned char d_type;
1698
1699	if (list_empty(ins_list))
1700		return 0;
1701
1702	/*
1703	 * Changing the data of the delayed item is impossible. So
1704	 * we needn't lock them. And we have held i_mutex of the
1705	 * directory, nobody can delete any directory indexes now.
1706	 */
1707	list_for_each_entry_safe(curr, next, ins_list, readdir_list) {
1708		list_del(&curr->readdir_list);
1709
1710		if (curr->key.offset < ctx->pos) {
1711			if (atomic_dec_and_test(&curr->refs))
1712				kfree(curr);
1713			continue;
1714		}
1715
1716		ctx->pos = curr->key.offset;
1717
1718		di = (struct btrfs_dir_item *)curr->data;
1719		name = (char *)(di + 1);
1720		name_len = btrfs_stack_dir_name_len(di);
1721
1722		d_type = btrfs_filetype_table[di->type];
1723		btrfs_disk_key_to_cpu(&location, &di->location);
1724
1725		over = !dir_emit(ctx, name, name_len,
1726			       location.objectid, d_type);
1727
1728		if (atomic_dec_and_test(&curr->refs))
1729			kfree(curr);
1730
1731		if (over)
1732			return 1;
1733	}
1734	return 0;
1735}
1736
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1737static void fill_stack_inode_item(struct btrfs_trans_handle *trans,
1738				  struct btrfs_inode_item *inode_item,
1739				  struct inode *inode)
1740{
1741	btrfs_set_stack_inode_uid(inode_item, i_uid_read(inode));
1742	btrfs_set_stack_inode_gid(inode_item, i_gid_read(inode));
1743	btrfs_set_stack_inode_size(inode_item, BTRFS_I(inode)->disk_i_size);
1744	btrfs_set_stack_inode_mode(inode_item, inode->i_mode);
1745	btrfs_set_stack_inode_nlink(inode_item, inode->i_nlink);
1746	btrfs_set_stack_inode_nbytes(inode_item, inode_get_bytes(inode));
1747	btrfs_set_stack_inode_generation(inode_item,
1748					 BTRFS_I(inode)->generation);
1749	btrfs_set_stack_inode_sequence(inode_item, inode->i_version);
1750	btrfs_set_stack_inode_transid(inode_item, trans->transid);
1751	btrfs_set_stack_inode_rdev(inode_item, inode->i_rdev);
1752	btrfs_set_stack_inode_flags(inode_item, BTRFS_I(inode)->flags);
1753	btrfs_set_stack_inode_block_group(inode_item, 0);
1754
1755	btrfs_set_stack_timespec_sec(btrfs_inode_atime(inode_item),
1756				     inode->i_atime.tv_sec);
1757	btrfs_set_stack_timespec_nsec(btrfs_inode_atime(inode_item),
1758				      inode->i_atime.tv_nsec);
1759
1760	btrfs_set_stack_timespec_sec(btrfs_inode_mtime(inode_item),
1761				     inode->i_mtime.tv_sec);
1762	btrfs_set_stack_timespec_nsec(btrfs_inode_mtime(inode_item),
1763				      inode->i_mtime.tv_nsec);
1764
1765	btrfs_set_stack_timespec_sec(btrfs_inode_ctime(inode_item),
1766				     inode->i_ctime.tv_sec);
1767	btrfs_set_stack_timespec_nsec(btrfs_inode_ctime(inode_item),
1768				      inode->i_ctime.tv_nsec);
1769}
1770
1771int btrfs_fill_inode(struct inode *inode, u32 *rdev)
1772{
1773	struct btrfs_delayed_node *delayed_node;
1774	struct btrfs_inode_item *inode_item;
1775	struct btrfs_timespec *tspec;
1776
1777	delayed_node = btrfs_get_delayed_node(inode);
1778	if (!delayed_node)
1779		return -ENOENT;
1780
1781	mutex_lock(&delayed_node->mutex);
1782	if (!test_bit(BTRFS_DELAYED_NODE_INODE_DIRTY, &delayed_node->flags)) {
1783		mutex_unlock(&delayed_node->mutex);
1784		btrfs_release_delayed_node(delayed_node);
1785		return -ENOENT;
1786	}
1787
1788	inode_item = &delayed_node->inode_item;
1789
1790	i_uid_write(inode, btrfs_stack_inode_uid(inode_item));
1791	i_gid_write(inode, btrfs_stack_inode_gid(inode_item));
1792	btrfs_i_size_write(inode, btrfs_stack_inode_size(inode_item));
1793	inode->i_mode = btrfs_stack_inode_mode(inode_item);
1794	set_nlink(inode, btrfs_stack_inode_nlink(inode_item));
1795	inode_set_bytes(inode, btrfs_stack_inode_nbytes(inode_item));
1796	BTRFS_I(inode)->generation = btrfs_stack_inode_generation(inode_item);
1797	inode->i_version = btrfs_stack_inode_sequence(inode_item);
1798	inode->i_rdev = 0;
1799	*rdev = btrfs_stack_inode_rdev(inode_item);
1800	BTRFS_I(inode)->flags = btrfs_stack_inode_flags(inode_item);
1801
1802	tspec = btrfs_inode_atime(inode_item);
1803	inode->i_atime.tv_sec = btrfs_stack_timespec_sec(tspec);
1804	inode->i_atime.tv_nsec = btrfs_stack_timespec_nsec(tspec);
1805
1806	tspec = btrfs_inode_mtime(inode_item);
1807	inode->i_mtime.tv_sec = btrfs_stack_timespec_sec(tspec);
1808	inode->i_mtime.tv_nsec = btrfs_stack_timespec_nsec(tspec);
1809
1810	tspec = btrfs_inode_ctime(inode_item);
1811	inode->i_ctime.tv_sec = btrfs_stack_timespec_sec(tspec);
1812	inode->i_ctime.tv_nsec = btrfs_stack_timespec_nsec(tspec);
1813
1814	inode->i_generation = BTRFS_I(inode)->generation;
1815	BTRFS_I(inode)->index_cnt = (u64)-1;
1816
1817	mutex_unlock(&delayed_node->mutex);
1818	btrfs_release_delayed_node(delayed_node);
1819	return 0;
1820}
1821
1822int btrfs_delayed_update_inode(struct btrfs_trans_handle *trans,
1823			       struct btrfs_root *root, struct inode *inode)
1824{
1825	struct btrfs_delayed_node *delayed_node;
1826	int ret = 0;
1827
1828	delayed_node = btrfs_get_or_create_delayed_node(inode);
1829	if (IS_ERR(delayed_node))
1830		return PTR_ERR(delayed_node);
1831
1832	mutex_lock(&delayed_node->mutex);
1833	if (test_bit(BTRFS_DELAYED_NODE_INODE_DIRTY, &delayed_node->flags)) {
1834		fill_stack_inode_item(trans, &delayed_node->inode_item, inode);
1835		goto release_node;
1836	}
1837
1838	ret = btrfs_delayed_inode_reserve_metadata(trans, root, inode,
1839						   delayed_node);
1840	if (ret)
1841		goto release_node;
1842
1843	fill_stack_inode_item(trans, &delayed_node->inode_item, inode);
1844	set_bit(BTRFS_DELAYED_NODE_INODE_DIRTY, &delayed_node->flags);
1845	delayed_node->count++;
1846	atomic_inc(&root->fs_info->delayed_root->items);
1847release_node:
1848	mutex_unlock(&delayed_node->mutex);
1849	btrfs_release_delayed_node(delayed_node);
1850	return ret;
1851}
1852
1853int btrfs_delayed_delete_inode_ref(struct inode *inode)
1854{
1855	struct btrfs_delayed_node *delayed_node;
1856
1857	delayed_node = btrfs_get_or_create_delayed_node(inode);
1858	if (IS_ERR(delayed_node))
1859		return PTR_ERR(delayed_node);
1860
1861	/*
1862	 * We don't reserve space for inode ref deletion is because:
1863	 * - We ONLY do async inode ref deletion for the inode who has only
1864	 *   one link(i_nlink == 1), it means there is only one inode ref.
1865	 *   And in most case, the inode ref and the inode item are in the
1866	 *   same leaf, and we will deal with them at the same time.
1867	 *   Since we are sure we will reserve the space for the inode item,
1868	 *   it is unnecessary to reserve space for inode ref deletion.
1869	 * - If the inode ref and the inode item are not in the same leaf,
1870	 *   We also needn't worry about enospc problem, because we reserve
1871	 *   much more space for the inode update than it needs.
1872	 * - At the worst, we can steal some space from the global reservation.
1873	 *   It is very rare.
1874	 */
1875	mutex_lock(&delayed_node->mutex);
1876	if (test_bit(BTRFS_DELAYED_NODE_DEL_IREF, &delayed_node->flags))
1877		goto release_node;
1878
1879	set_bit(BTRFS_DELAYED_NODE_DEL_IREF, &delayed_node->flags);
1880	delayed_node->count++;
1881	atomic_inc(&BTRFS_I(inode)->root->fs_info->delayed_root->items);
1882release_node:
1883	mutex_unlock(&delayed_node->mutex);
1884	btrfs_release_delayed_node(delayed_node);
1885	return 0;
1886}
1887
1888static void __btrfs_kill_delayed_node(struct btrfs_delayed_node *delayed_node)
1889{
1890	struct btrfs_root *root = delayed_node->root;
1891	struct btrfs_delayed_item *curr_item, *prev_item;
1892
1893	mutex_lock(&delayed_node->mutex);
1894	curr_item = __btrfs_first_delayed_insertion_item(delayed_node);
1895	while (curr_item) {
1896		btrfs_delayed_item_release_metadata(root, curr_item);
1897		prev_item = curr_item;
1898		curr_item = __btrfs_next_delayed_item(prev_item);
1899		btrfs_release_delayed_item(prev_item);
1900	}
1901
1902	curr_item = __btrfs_first_delayed_deletion_item(delayed_node);
1903	while (curr_item) {
1904		btrfs_delayed_item_release_metadata(root, curr_item);
1905		prev_item = curr_item;
1906		curr_item = __btrfs_next_delayed_item(prev_item);
1907		btrfs_release_delayed_item(prev_item);
1908	}
1909
1910	if (test_bit(BTRFS_DELAYED_NODE_DEL_IREF, &delayed_node->flags))
1911		btrfs_release_delayed_iref(delayed_node);
1912
1913	if (test_bit(BTRFS_DELAYED_NODE_INODE_DIRTY, &delayed_node->flags)) {
1914		btrfs_delayed_inode_release_metadata(root, delayed_node);
1915		btrfs_release_delayed_inode(delayed_node);
1916	}
1917	mutex_unlock(&delayed_node->mutex);
1918}
1919
1920void btrfs_kill_delayed_inode_items(struct inode *inode)
1921{
1922	struct btrfs_delayed_node *delayed_node;
1923
1924	delayed_node = btrfs_get_delayed_node(inode);
1925	if (!delayed_node)
1926		return;
1927
1928	__btrfs_kill_delayed_node(delayed_node);
1929	btrfs_release_delayed_node(delayed_node);
1930}
1931
1932void btrfs_kill_all_delayed_nodes(struct btrfs_root *root)
1933{
1934	u64 inode_id = 0;
1935	struct btrfs_delayed_node *delayed_nodes[8];
1936	int i, n;
1937
1938	while (1) {
1939		spin_lock(&root->inode_lock);
1940		n = radix_tree_gang_lookup(&root->delayed_nodes_tree,
1941					   (void **)delayed_nodes, inode_id,
1942					   ARRAY_SIZE(delayed_nodes));
1943		if (!n) {
1944			spin_unlock(&root->inode_lock);
1945			break;
1946		}
1947
1948		inode_id = delayed_nodes[n - 1]->inode_id + 1;
1949
1950		for (i = 0; i < n; i++)
1951			atomic_inc(&delayed_nodes[i]->refs);
1952		spin_unlock(&root->inode_lock);
1953
1954		for (i = 0; i < n; i++) {
1955			__btrfs_kill_delayed_node(delayed_nodes[i]);
1956			btrfs_release_delayed_node(delayed_nodes[i]);
1957		}
1958	}
1959}
1960
1961void btrfs_destroy_delayed_inodes(struct btrfs_root *root)
1962{
1963	struct btrfs_delayed_root *delayed_root;
1964	struct btrfs_delayed_node *curr_node, *prev_node;
1965
1966	delayed_root = btrfs_get_delayed_root(root);
1967
1968	curr_node = btrfs_first_delayed_node(delayed_root);
1969	while (curr_node) {
1970		__btrfs_kill_delayed_node(curr_node);
1971
1972		prev_node = curr_node;
1973		curr_node = btrfs_next_delayed_node(curr_node);
1974		btrfs_release_delayed_node(prev_node);
1975	}
1976}
1977
v3.5.6
   1/*
   2 * Copyright (C) 2011 Fujitsu.  All rights reserved.
   3 * Written by Miao Xie <miaox@cn.fujitsu.com>
   4 *
   5 * This program is free software; you can redistribute it and/or
   6 * modify it under the terms of the GNU General Public
   7 * License v2 as published by the Free Software Foundation.
   8 *
   9 * This program is distributed in the hope that it will be useful,
  10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
  11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
  12 * General Public License for more details.
  13 *
  14 * You should have received a copy of the GNU General Public
  15 * License along with this program; if not, write to the
  16 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
  17 * Boston, MA 021110-1307, USA.
  18 */
  19
  20#include <linux/slab.h>
  21#include "delayed-inode.h"
  22#include "disk-io.h"
  23#include "transaction.h"
 
  24
  25#define BTRFS_DELAYED_WRITEBACK		400
  26#define BTRFS_DELAYED_BACKGROUND	100
 
  27
  28static struct kmem_cache *delayed_node_cache;
  29
  30int __init btrfs_delayed_inode_init(void)
  31{
  32	delayed_node_cache = kmem_cache_create("delayed_node",
  33					sizeof(struct btrfs_delayed_node),
  34					0,
  35					SLAB_RECLAIM_ACCOUNT | SLAB_MEM_SPREAD,
  36					NULL);
  37	if (!delayed_node_cache)
  38		return -ENOMEM;
  39	return 0;
  40}
  41
  42void btrfs_delayed_inode_exit(void)
  43{
  44	if (delayed_node_cache)
  45		kmem_cache_destroy(delayed_node_cache);
  46}
  47
  48static inline void btrfs_init_delayed_node(
  49				struct btrfs_delayed_node *delayed_node,
  50				struct btrfs_root *root, u64 inode_id)
  51{
  52	delayed_node->root = root;
  53	delayed_node->inode_id = inode_id;
  54	atomic_set(&delayed_node->refs, 0);
  55	delayed_node->count = 0;
  56	delayed_node->in_list = 0;
  57	delayed_node->inode_dirty = 0;
  58	delayed_node->ins_root = RB_ROOT;
  59	delayed_node->del_root = RB_ROOT;
  60	mutex_init(&delayed_node->mutex);
  61	delayed_node->index_cnt = 0;
  62	INIT_LIST_HEAD(&delayed_node->n_list);
  63	INIT_LIST_HEAD(&delayed_node->p_list);
  64	delayed_node->bytes_reserved = 0;
 
  65}
  66
  67static inline int btrfs_is_continuous_delayed_item(
  68					struct btrfs_delayed_item *item1,
  69					struct btrfs_delayed_item *item2)
  70{
  71	if (item1->key.type == BTRFS_DIR_INDEX_KEY &&
  72	    item1->key.objectid == item2->key.objectid &&
  73	    item1->key.type == item2->key.type &&
  74	    item1->key.offset + 1 == item2->key.offset)
  75		return 1;
  76	return 0;
  77}
  78
  79static inline struct btrfs_delayed_root *btrfs_get_delayed_root(
  80							struct btrfs_root *root)
  81{
  82	return root->fs_info->delayed_root;
  83}
  84
  85static struct btrfs_delayed_node *btrfs_get_delayed_node(struct inode *inode)
  86{
  87	struct btrfs_inode *btrfs_inode = BTRFS_I(inode);
  88	struct btrfs_root *root = btrfs_inode->root;
  89	u64 ino = btrfs_ino(inode);
  90	struct btrfs_delayed_node *node;
  91
  92	node = ACCESS_ONCE(btrfs_inode->delayed_node);
  93	if (node) {
  94		atomic_inc(&node->refs);
  95		return node;
  96	}
  97
  98	spin_lock(&root->inode_lock);
  99	node = radix_tree_lookup(&root->delayed_nodes_tree, ino);
 100	if (node) {
 101		if (btrfs_inode->delayed_node) {
 102			atomic_inc(&node->refs);	/* can be accessed */
 103			BUG_ON(btrfs_inode->delayed_node != node);
 104			spin_unlock(&root->inode_lock);
 105			return node;
 106		}
 107		btrfs_inode->delayed_node = node;
 108		atomic_inc(&node->refs);	/* can be accessed */
 109		atomic_inc(&node->refs);	/* cached in the inode */
 110		spin_unlock(&root->inode_lock);
 111		return node;
 112	}
 113	spin_unlock(&root->inode_lock);
 114
 115	return NULL;
 116}
 117
 118/* Will return either the node or PTR_ERR(-ENOMEM) */
 119static struct btrfs_delayed_node *btrfs_get_or_create_delayed_node(
 120							struct inode *inode)
 121{
 122	struct btrfs_delayed_node *node;
 123	struct btrfs_inode *btrfs_inode = BTRFS_I(inode);
 124	struct btrfs_root *root = btrfs_inode->root;
 125	u64 ino = btrfs_ino(inode);
 126	int ret;
 127
 128again:
 129	node = btrfs_get_delayed_node(inode);
 130	if (node)
 131		return node;
 132
 133	node = kmem_cache_alloc(delayed_node_cache, GFP_NOFS);
 134	if (!node)
 135		return ERR_PTR(-ENOMEM);
 136	btrfs_init_delayed_node(node, root, ino);
 137
 138	atomic_inc(&node->refs);	/* cached in the btrfs inode */
 139	atomic_inc(&node->refs);	/* can be accessed */
 140
 141	ret = radix_tree_preload(GFP_NOFS & ~__GFP_HIGHMEM);
 142	if (ret) {
 143		kmem_cache_free(delayed_node_cache, node);
 144		return ERR_PTR(ret);
 145	}
 146
 147	spin_lock(&root->inode_lock);
 148	ret = radix_tree_insert(&root->delayed_nodes_tree, ino, node);
 149	if (ret == -EEXIST) {
 150		kmem_cache_free(delayed_node_cache, node);
 151		spin_unlock(&root->inode_lock);
 152		radix_tree_preload_end();
 153		goto again;
 154	}
 155	btrfs_inode->delayed_node = node;
 156	spin_unlock(&root->inode_lock);
 157	radix_tree_preload_end();
 158
 159	return node;
 160}
 161
 162/*
 163 * Call it when holding delayed_node->mutex
 164 *
 165 * If mod = 1, add this node into the prepared list.
 166 */
 167static void btrfs_queue_delayed_node(struct btrfs_delayed_root *root,
 168				     struct btrfs_delayed_node *node,
 169				     int mod)
 170{
 171	spin_lock(&root->lock);
 172	if (node->in_list) {
 173		if (!list_empty(&node->p_list))
 174			list_move_tail(&node->p_list, &root->prepare_list);
 175		else if (mod)
 176			list_add_tail(&node->p_list, &root->prepare_list);
 177	} else {
 178		list_add_tail(&node->n_list, &root->node_list);
 179		list_add_tail(&node->p_list, &root->prepare_list);
 180		atomic_inc(&node->refs);	/* inserted into list */
 181		root->nodes++;
 182		node->in_list = 1;
 183	}
 184	spin_unlock(&root->lock);
 185}
 186
 187/* Call it when holding delayed_node->mutex */
 188static void btrfs_dequeue_delayed_node(struct btrfs_delayed_root *root,
 189				       struct btrfs_delayed_node *node)
 190{
 191	spin_lock(&root->lock);
 192	if (node->in_list) {
 193		root->nodes--;
 194		atomic_dec(&node->refs);	/* not in the list */
 195		list_del_init(&node->n_list);
 196		if (!list_empty(&node->p_list))
 197			list_del_init(&node->p_list);
 198		node->in_list = 0;
 199	}
 200	spin_unlock(&root->lock);
 201}
 202
 203struct btrfs_delayed_node *btrfs_first_delayed_node(
 204			struct btrfs_delayed_root *delayed_root)
 205{
 206	struct list_head *p;
 207	struct btrfs_delayed_node *node = NULL;
 208
 209	spin_lock(&delayed_root->lock);
 210	if (list_empty(&delayed_root->node_list))
 211		goto out;
 212
 213	p = delayed_root->node_list.next;
 214	node = list_entry(p, struct btrfs_delayed_node, n_list);
 215	atomic_inc(&node->refs);
 216out:
 217	spin_unlock(&delayed_root->lock);
 218
 219	return node;
 220}
 221
 222struct btrfs_delayed_node *btrfs_next_delayed_node(
 223						struct btrfs_delayed_node *node)
 224{
 225	struct btrfs_delayed_root *delayed_root;
 226	struct list_head *p;
 227	struct btrfs_delayed_node *next = NULL;
 228
 229	delayed_root = node->root->fs_info->delayed_root;
 230	spin_lock(&delayed_root->lock);
 231	if (!node->in_list) {	/* not in the list */
 
 232		if (list_empty(&delayed_root->node_list))
 233			goto out;
 234		p = delayed_root->node_list.next;
 235	} else if (list_is_last(&node->n_list, &delayed_root->node_list))
 236		goto out;
 237	else
 238		p = node->n_list.next;
 239
 240	next = list_entry(p, struct btrfs_delayed_node, n_list);
 241	atomic_inc(&next->refs);
 242out:
 243	spin_unlock(&delayed_root->lock);
 244
 245	return next;
 246}
 247
 248static void __btrfs_release_delayed_node(
 249				struct btrfs_delayed_node *delayed_node,
 250				int mod)
 251{
 252	struct btrfs_delayed_root *delayed_root;
 253
 254	if (!delayed_node)
 255		return;
 256
 257	delayed_root = delayed_node->root->fs_info->delayed_root;
 258
 259	mutex_lock(&delayed_node->mutex);
 260	if (delayed_node->count)
 261		btrfs_queue_delayed_node(delayed_root, delayed_node, mod);
 262	else
 263		btrfs_dequeue_delayed_node(delayed_root, delayed_node);
 264	mutex_unlock(&delayed_node->mutex);
 265
 266	if (atomic_dec_and_test(&delayed_node->refs)) {
 267		struct btrfs_root *root = delayed_node->root;
 268		spin_lock(&root->inode_lock);
 269		if (atomic_read(&delayed_node->refs) == 0) {
 270			radix_tree_delete(&root->delayed_nodes_tree,
 271					  delayed_node->inode_id);
 272			kmem_cache_free(delayed_node_cache, delayed_node);
 273		}
 274		spin_unlock(&root->inode_lock);
 275	}
 276}
 277
 278static inline void btrfs_release_delayed_node(struct btrfs_delayed_node *node)
 279{
 280	__btrfs_release_delayed_node(node, 0);
 281}
 282
 283struct btrfs_delayed_node *btrfs_first_prepared_delayed_node(
 284					struct btrfs_delayed_root *delayed_root)
 285{
 286	struct list_head *p;
 287	struct btrfs_delayed_node *node = NULL;
 288
 289	spin_lock(&delayed_root->lock);
 290	if (list_empty(&delayed_root->prepare_list))
 291		goto out;
 292
 293	p = delayed_root->prepare_list.next;
 294	list_del_init(p);
 295	node = list_entry(p, struct btrfs_delayed_node, p_list);
 296	atomic_inc(&node->refs);
 297out:
 298	spin_unlock(&delayed_root->lock);
 299
 300	return node;
 301}
 302
 303static inline void btrfs_release_prepared_delayed_node(
 304					struct btrfs_delayed_node *node)
 305{
 306	__btrfs_release_delayed_node(node, 1);
 307}
 308
 309struct btrfs_delayed_item *btrfs_alloc_delayed_item(u32 data_len)
 310{
 311	struct btrfs_delayed_item *item;
 312	item = kmalloc(sizeof(*item) + data_len, GFP_NOFS);
 313	if (item) {
 314		item->data_len = data_len;
 315		item->ins_or_del = 0;
 316		item->bytes_reserved = 0;
 317		item->delayed_node = NULL;
 318		atomic_set(&item->refs, 1);
 319	}
 320	return item;
 321}
 322
 323/*
 324 * __btrfs_lookup_delayed_item - look up the delayed item by key
 325 * @delayed_node: pointer to the delayed node
 326 * @key:	  the key to look up
 327 * @prev:	  used to store the prev item if the right item isn't found
 328 * @next:	  used to store the next item if the right item isn't found
 329 *
 330 * Note: if we don't find the right item, we will return the prev item and
 331 * the next item.
 332 */
 333static struct btrfs_delayed_item *__btrfs_lookup_delayed_item(
 334				struct rb_root *root,
 335				struct btrfs_key *key,
 336				struct btrfs_delayed_item **prev,
 337				struct btrfs_delayed_item **next)
 338{
 339	struct rb_node *node, *prev_node = NULL;
 340	struct btrfs_delayed_item *delayed_item = NULL;
 341	int ret = 0;
 342
 343	node = root->rb_node;
 344
 345	while (node) {
 346		delayed_item = rb_entry(node, struct btrfs_delayed_item,
 347					rb_node);
 348		prev_node = node;
 349		ret = btrfs_comp_cpu_keys(&delayed_item->key, key);
 350		if (ret < 0)
 351			node = node->rb_right;
 352		else if (ret > 0)
 353			node = node->rb_left;
 354		else
 355			return delayed_item;
 356	}
 357
 358	if (prev) {
 359		if (!prev_node)
 360			*prev = NULL;
 361		else if (ret < 0)
 362			*prev = delayed_item;
 363		else if ((node = rb_prev(prev_node)) != NULL) {
 364			*prev = rb_entry(node, struct btrfs_delayed_item,
 365					 rb_node);
 366		} else
 367			*prev = NULL;
 368	}
 369
 370	if (next) {
 371		if (!prev_node)
 372			*next = NULL;
 373		else if (ret > 0)
 374			*next = delayed_item;
 375		else if ((node = rb_next(prev_node)) != NULL) {
 376			*next = rb_entry(node, struct btrfs_delayed_item,
 377					 rb_node);
 378		} else
 379			*next = NULL;
 380	}
 381	return NULL;
 382}
 383
 384struct btrfs_delayed_item *__btrfs_lookup_delayed_insertion_item(
 385					struct btrfs_delayed_node *delayed_node,
 386					struct btrfs_key *key)
 387{
 388	struct btrfs_delayed_item *item;
 389
 390	item = __btrfs_lookup_delayed_item(&delayed_node->ins_root, key,
 391					   NULL, NULL);
 392	return item;
 393}
 394
 395struct btrfs_delayed_item *__btrfs_lookup_delayed_deletion_item(
 396					struct btrfs_delayed_node *delayed_node,
 397					struct btrfs_key *key)
 398{
 399	struct btrfs_delayed_item *item;
 400
 401	item = __btrfs_lookup_delayed_item(&delayed_node->del_root, key,
 402					   NULL, NULL);
 403	return item;
 404}
 405
 406struct btrfs_delayed_item *__btrfs_search_delayed_insertion_item(
 407					struct btrfs_delayed_node *delayed_node,
 408					struct btrfs_key *key)
 409{
 410	struct btrfs_delayed_item *item, *next;
 411
 412	item = __btrfs_lookup_delayed_item(&delayed_node->ins_root, key,
 413					   NULL, &next);
 414	if (!item)
 415		item = next;
 416
 417	return item;
 418}
 419
 420struct btrfs_delayed_item *__btrfs_search_delayed_deletion_item(
 421					struct btrfs_delayed_node *delayed_node,
 422					struct btrfs_key *key)
 423{
 424	struct btrfs_delayed_item *item, *next;
 425
 426	item = __btrfs_lookup_delayed_item(&delayed_node->del_root, key,
 427					   NULL, &next);
 428	if (!item)
 429		item = next;
 430
 431	return item;
 432}
 433
 434static int __btrfs_add_delayed_item(struct btrfs_delayed_node *delayed_node,
 435				    struct btrfs_delayed_item *ins,
 436				    int action)
 437{
 438	struct rb_node **p, *node;
 439	struct rb_node *parent_node = NULL;
 440	struct rb_root *root;
 441	struct btrfs_delayed_item *item;
 442	int cmp;
 443
 444	if (action == BTRFS_DELAYED_INSERTION_ITEM)
 445		root = &delayed_node->ins_root;
 446	else if (action == BTRFS_DELAYED_DELETION_ITEM)
 447		root = &delayed_node->del_root;
 448	else
 449		BUG();
 450	p = &root->rb_node;
 451	node = &ins->rb_node;
 452
 453	while (*p) {
 454		parent_node = *p;
 455		item = rb_entry(parent_node, struct btrfs_delayed_item,
 456				 rb_node);
 457
 458		cmp = btrfs_comp_cpu_keys(&item->key, &ins->key);
 459		if (cmp < 0)
 460			p = &(*p)->rb_right;
 461		else if (cmp > 0)
 462			p = &(*p)->rb_left;
 463		else
 464			return -EEXIST;
 465	}
 466
 467	rb_link_node(node, parent_node, p);
 468	rb_insert_color(node, root);
 469	ins->delayed_node = delayed_node;
 470	ins->ins_or_del = action;
 471
 472	if (ins->key.type == BTRFS_DIR_INDEX_KEY &&
 473	    action == BTRFS_DELAYED_INSERTION_ITEM &&
 474	    ins->key.offset >= delayed_node->index_cnt)
 475			delayed_node->index_cnt = ins->key.offset + 1;
 476
 477	delayed_node->count++;
 478	atomic_inc(&delayed_node->root->fs_info->delayed_root->items);
 479	return 0;
 480}
 481
 482static int __btrfs_add_delayed_insertion_item(struct btrfs_delayed_node *node,
 483					      struct btrfs_delayed_item *item)
 484{
 485	return __btrfs_add_delayed_item(node, item,
 486					BTRFS_DELAYED_INSERTION_ITEM);
 487}
 488
 489static int __btrfs_add_delayed_deletion_item(struct btrfs_delayed_node *node,
 490					     struct btrfs_delayed_item *item)
 491{
 492	return __btrfs_add_delayed_item(node, item,
 493					BTRFS_DELAYED_DELETION_ITEM);
 494}
 495
 
 
 
 
 
 
 
 
 
 496static void __btrfs_remove_delayed_item(struct btrfs_delayed_item *delayed_item)
 497{
 498	struct rb_root *root;
 499	struct btrfs_delayed_root *delayed_root;
 500
 501	delayed_root = delayed_item->delayed_node->root->fs_info->delayed_root;
 502
 503	BUG_ON(!delayed_root);
 504	BUG_ON(delayed_item->ins_or_del != BTRFS_DELAYED_DELETION_ITEM &&
 505	       delayed_item->ins_or_del != BTRFS_DELAYED_INSERTION_ITEM);
 506
 507	if (delayed_item->ins_or_del == BTRFS_DELAYED_INSERTION_ITEM)
 508		root = &delayed_item->delayed_node->ins_root;
 509	else
 510		root = &delayed_item->delayed_node->del_root;
 511
 512	rb_erase(&delayed_item->rb_node, root);
 513	delayed_item->delayed_node->count--;
 514	atomic_dec(&delayed_root->items);
 515	if (atomic_read(&delayed_root->items) < BTRFS_DELAYED_BACKGROUND &&
 516	    waitqueue_active(&delayed_root->wait))
 517		wake_up(&delayed_root->wait);
 518}
 519
 520static void btrfs_release_delayed_item(struct btrfs_delayed_item *item)
 521{
 522	if (item) {
 523		__btrfs_remove_delayed_item(item);
 524		if (atomic_dec_and_test(&item->refs))
 525			kfree(item);
 526	}
 527}
 528
 529struct btrfs_delayed_item *__btrfs_first_delayed_insertion_item(
 530					struct btrfs_delayed_node *delayed_node)
 531{
 532	struct rb_node *p;
 533	struct btrfs_delayed_item *item = NULL;
 534
 535	p = rb_first(&delayed_node->ins_root);
 536	if (p)
 537		item = rb_entry(p, struct btrfs_delayed_item, rb_node);
 538
 539	return item;
 540}
 541
 542struct btrfs_delayed_item *__btrfs_first_delayed_deletion_item(
 543					struct btrfs_delayed_node *delayed_node)
 544{
 545	struct rb_node *p;
 546	struct btrfs_delayed_item *item = NULL;
 547
 548	p = rb_first(&delayed_node->del_root);
 549	if (p)
 550		item = rb_entry(p, struct btrfs_delayed_item, rb_node);
 551
 552	return item;
 553}
 554
 555struct btrfs_delayed_item *__btrfs_next_delayed_item(
 556						struct btrfs_delayed_item *item)
 557{
 558	struct rb_node *p;
 559	struct btrfs_delayed_item *next = NULL;
 560
 561	p = rb_next(&item->rb_node);
 562	if (p)
 563		next = rb_entry(p, struct btrfs_delayed_item, rb_node);
 564
 565	return next;
 566}
 567
 568static inline struct btrfs_root *btrfs_get_fs_root(struct btrfs_root *root,
 569						   u64 root_id)
 570{
 571	struct btrfs_key root_key;
 572
 573	if (root->objectid == root_id)
 574		return root;
 575
 576	root_key.objectid = root_id;
 577	root_key.type = BTRFS_ROOT_ITEM_KEY;
 578	root_key.offset = (u64)-1;
 579	return btrfs_read_fs_root_no_name(root->fs_info, &root_key);
 580}
 581
 582static int btrfs_delayed_item_reserve_metadata(struct btrfs_trans_handle *trans,
 583					       struct btrfs_root *root,
 584					       struct btrfs_delayed_item *item)
 585{
 586	struct btrfs_block_rsv *src_rsv;
 587	struct btrfs_block_rsv *dst_rsv;
 588	u64 num_bytes;
 589	int ret;
 590
 591	if (!trans->bytes_reserved)
 592		return 0;
 593
 594	src_rsv = trans->block_rsv;
 595	dst_rsv = &root->fs_info->delayed_block_rsv;
 596
 597	num_bytes = btrfs_calc_trans_metadata_size(root, 1);
 598	ret = btrfs_block_rsv_migrate(src_rsv, dst_rsv, num_bytes);
 599	if (!ret) {
 600		trace_btrfs_space_reservation(root->fs_info, "delayed_item",
 601					      item->key.objectid,
 602					      num_bytes, 1);
 603		item->bytes_reserved = num_bytes;
 604	}
 605
 606	return ret;
 607}
 608
 609static void btrfs_delayed_item_release_metadata(struct btrfs_root *root,
 610						struct btrfs_delayed_item *item)
 611{
 612	struct btrfs_block_rsv *rsv;
 613
 614	if (!item->bytes_reserved)
 615		return;
 616
 617	rsv = &root->fs_info->delayed_block_rsv;
 618	trace_btrfs_space_reservation(root->fs_info, "delayed_item",
 619				      item->key.objectid, item->bytes_reserved,
 620				      0);
 621	btrfs_block_rsv_release(root, rsv,
 622				item->bytes_reserved);
 623}
 624
 625static int btrfs_delayed_inode_reserve_metadata(
 626					struct btrfs_trans_handle *trans,
 627					struct btrfs_root *root,
 628					struct inode *inode,
 629					struct btrfs_delayed_node *node)
 630{
 631	struct btrfs_block_rsv *src_rsv;
 632	struct btrfs_block_rsv *dst_rsv;
 633	u64 num_bytes;
 634	int ret;
 635	bool release = false;
 636
 637	src_rsv = trans->block_rsv;
 638	dst_rsv = &root->fs_info->delayed_block_rsv;
 639
 640	num_bytes = btrfs_calc_trans_metadata_size(root, 1);
 641
 642	/*
 643	 * btrfs_dirty_inode will update the inode under btrfs_join_transaction
 644	 * which doesn't reserve space for speed.  This is a problem since we
 645	 * still need to reserve space for this update, so try to reserve the
 646	 * space.
 647	 *
 648	 * Now if src_rsv == delalloc_block_rsv we'll let it just steal since
 649	 * we're accounted for.
 650	 */
 651	if (!src_rsv || (!trans->bytes_reserved &&
 652	    src_rsv != &root->fs_info->delalloc_block_rsv)) {
 653		ret = btrfs_block_rsv_add_noflush(root, dst_rsv, num_bytes);
 
 654		/*
 655		 * Since we're under a transaction reserve_metadata_bytes could
 656		 * try to commit the transaction which will make it return
 657		 * EAGAIN to make us stop the transaction we have, so return
 658		 * ENOSPC instead so that btrfs_dirty_inode knows what to do.
 659		 */
 660		if (ret == -EAGAIN)
 661			ret = -ENOSPC;
 662		if (!ret) {
 663			node->bytes_reserved = num_bytes;
 664			trace_btrfs_space_reservation(root->fs_info,
 665						      "delayed_inode",
 666						      btrfs_ino(inode),
 667						      num_bytes, 1);
 668		}
 669		return ret;
 670	} else if (src_rsv == &root->fs_info->delalloc_block_rsv) {
 671		spin_lock(&BTRFS_I(inode)->lock);
 672		if (test_and_clear_bit(BTRFS_INODE_DELALLOC_META_RESERVED,
 673				       &BTRFS_I(inode)->runtime_flags)) {
 674			spin_unlock(&BTRFS_I(inode)->lock);
 675			release = true;
 676			goto migrate;
 677		}
 678		spin_unlock(&BTRFS_I(inode)->lock);
 679
 680		/* Ok we didn't have space pre-reserved.  This shouldn't happen
 681		 * too often but it can happen if we do delalloc to an existing
 682		 * inode which gets dirtied because of the time update, and then
 683		 * isn't touched again until after the transaction commits and
 684		 * then we try to write out the data.  First try to be nice and
 685		 * reserve something strictly for us.  If not be a pain and try
 686		 * to steal from the delalloc block rsv.
 687		 */
 688		ret = btrfs_block_rsv_add_noflush(root, dst_rsv, num_bytes);
 
 689		if (!ret)
 690			goto out;
 691
 692		ret = btrfs_block_rsv_migrate(src_rsv, dst_rsv, num_bytes);
 693		if (!ret)
 694			goto out;
 695
 696		/*
 697		 * Ok this is a problem, let's just steal from the global rsv
 698		 * since this really shouldn't happen that often.
 699		 */
 700		WARN_ON(1);
 701		ret = btrfs_block_rsv_migrate(&root->fs_info->global_block_rsv,
 702					      dst_rsv, num_bytes);
 703		goto out;
 704	}
 705
 706migrate:
 707	ret = btrfs_block_rsv_migrate(src_rsv, dst_rsv, num_bytes);
 708
 709out:
 710	/*
 711	 * Migrate only takes a reservation, it doesn't touch the size of the
 712	 * block_rsv.  This is to simplify people who don't normally have things
 713	 * migrated from their block rsv.  If they go to release their
 714	 * reservation, that will decrease the size as well, so if migrate
 715	 * reduced size we'd end up with a negative size.  But for the
 716	 * delalloc_meta_reserved stuff we will only know to drop 1 reservation,
 717	 * but we could in fact do this reserve/migrate dance several times
 718	 * between the time we did the original reservation and we'd clean it
 719	 * up.  So to take care of this, release the space for the meta
 720	 * reservation here.  I think it may be time for a documentation page on
 721	 * how block rsvs. work.
 722	 */
 723	if (!ret) {
 724		trace_btrfs_space_reservation(root->fs_info, "delayed_inode",
 725					      btrfs_ino(inode), num_bytes, 1);
 726		node->bytes_reserved = num_bytes;
 727	}
 728
 729	if (release) {
 730		trace_btrfs_space_reservation(root->fs_info, "delalloc",
 731					      btrfs_ino(inode), num_bytes, 0);
 732		btrfs_block_rsv_release(root, src_rsv, num_bytes);
 733	}
 734
 735	return ret;
 736}
 737
 738static void btrfs_delayed_inode_release_metadata(struct btrfs_root *root,
 739						struct btrfs_delayed_node *node)
 740{
 741	struct btrfs_block_rsv *rsv;
 742
 743	if (!node->bytes_reserved)
 744		return;
 745
 746	rsv = &root->fs_info->delayed_block_rsv;
 747	trace_btrfs_space_reservation(root->fs_info, "delayed_inode",
 748				      node->inode_id, node->bytes_reserved, 0);
 749	btrfs_block_rsv_release(root, rsv,
 750				node->bytes_reserved);
 751	node->bytes_reserved = 0;
 752}
 753
 754/*
 755 * This helper will insert some continuous items into the same leaf according
 756 * to the free space of the leaf.
 757 */
 758static int btrfs_batch_insert_items(struct btrfs_trans_handle *trans,
 759				struct btrfs_root *root,
 760				struct btrfs_path *path,
 761				struct btrfs_delayed_item *item)
 762{
 763	struct btrfs_delayed_item *curr, *next;
 764	int free_space;
 765	int total_data_size = 0, total_size = 0;
 766	struct extent_buffer *leaf;
 767	char *data_ptr;
 768	struct btrfs_key *keys;
 769	u32 *data_size;
 770	struct list_head head;
 771	int slot;
 772	int nitems;
 773	int i;
 774	int ret = 0;
 775
 776	BUG_ON(!path->nodes[0]);
 777
 778	leaf = path->nodes[0];
 779	free_space = btrfs_leaf_free_space(root, leaf);
 780	INIT_LIST_HEAD(&head);
 781
 782	next = item;
 783	nitems = 0;
 784
 785	/*
 786	 * count the number of the continuous items that we can insert in batch
 787	 */
 788	while (total_size + next->data_len + sizeof(struct btrfs_item) <=
 789	       free_space) {
 790		total_data_size += next->data_len;
 791		total_size += next->data_len + sizeof(struct btrfs_item);
 792		list_add_tail(&next->tree_list, &head);
 793		nitems++;
 794
 795		curr = next;
 796		next = __btrfs_next_delayed_item(curr);
 797		if (!next)
 798			break;
 799
 800		if (!btrfs_is_continuous_delayed_item(curr, next))
 801			break;
 802	}
 803
 804	if (!nitems) {
 805		ret = 0;
 806		goto out;
 807	}
 808
 809	/*
 810	 * we need allocate some memory space, but it might cause the task
 811	 * to sleep, so we set all locked nodes in the path to blocking locks
 812	 * first.
 813	 */
 814	btrfs_set_path_blocking(path);
 815
 816	keys = kmalloc(sizeof(struct btrfs_key) * nitems, GFP_NOFS);
 817	if (!keys) {
 818		ret = -ENOMEM;
 819		goto out;
 820	}
 821
 822	data_size = kmalloc(sizeof(u32) * nitems, GFP_NOFS);
 823	if (!data_size) {
 824		ret = -ENOMEM;
 825		goto error;
 826	}
 827
 828	/* get keys of all the delayed items */
 829	i = 0;
 830	list_for_each_entry(next, &head, tree_list) {
 831		keys[i] = next->key;
 832		data_size[i] = next->data_len;
 833		i++;
 834	}
 835
 836	/* reset all the locked nodes in the patch to spinning locks. */
 837	btrfs_clear_path_blocking(path, NULL, 0);
 838
 839	/* insert the keys of the items */
 840	setup_items_for_insert(trans, root, path, keys, data_size,
 841			       total_data_size, total_size, nitems);
 842
 843	/* insert the dir index items */
 844	slot = path->slots[0];
 845	list_for_each_entry_safe(curr, next, &head, tree_list) {
 846		data_ptr = btrfs_item_ptr(leaf, slot, char);
 847		write_extent_buffer(leaf, &curr->data,
 848				    (unsigned long)data_ptr,
 849				    curr->data_len);
 850		slot++;
 851
 852		btrfs_delayed_item_release_metadata(root, curr);
 853
 854		list_del(&curr->tree_list);
 855		btrfs_release_delayed_item(curr);
 856	}
 857
 858error:
 859	kfree(data_size);
 860	kfree(keys);
 861out:
 862	return ret;
 863}
 864
 865/*
 866 * This helper can just do simple insertion that needn't extend item for new
 867 * data, such as directory name index insertion, inode insertion.
 868 */
 869static int btrfs_insert_delayed_item(struct btrfs_trans_handle *trans,
 870				     struct btrfs_root *root,
 871				     struct btrfs_path *path,
 872				     struct btrfs_delayed_item *delayed_item)
 873{
 874	struct extent_buffer *leaf;
 875	struct btrfs_item *item;
 876	char *ptr;
 877	int ret;
 878
 879	ret = btrfs_insert_empty_item(trans, root, path, &delayed_item->key,
 880				      delayed_item->data_len);
 881	if (ret < 0 && ret != -EEXIST)
 882		return ret;
 883
 884	leaf = path->nodes[0];
 885
 886	item = btrfs_item_nr(leaf, path->slots[0]);
 887	ptr = btrfs_item_ptr(leaf, path->slots[0], char);
 888
 889	write_extent_buffer(leaf, delayed_item->data, (unsigned long)ptr,
 890			    delayed_item->data_len);
 891	btrfs_mark_buffer_dirty(leaf);
 892
 893	btrfs_delayed_item_release_metadata(root, delayed_item);
 894	return 0;
 895}
 896
 897/*
 898 * we insert an item first, then if there are some continuous items, we try
 899 * to insert those items into the same leaf.
 900 */
 901static int btrfs_insert_delayed_items(struct btrfs_trans_handle *trans,
 902				      struct btrfs_path *path,
 903				      struct btrfs_root *root,
 904				      struct btrfs_delayed_node *node)
 905{
 906	struct btrfs_delayed_item *curr, *prev;
 907	int ret = 0;
 908
 909do_again:
 910	mutex_lock(&node->mutex);
 911	curr = __btrfs_first_delayed_insertion_item(node);
 912	if (!curr)
 913		goto insert_end;
 914
 915	ret = btrfs_insert_delayed_item(trans, root, path, curr);
 916	if (ret < 0) {
 917		btrfs_release_path(path);
 918		goto insert_end;
 919	}
 920
 921	prev = curr;
 922	curr = __btrfs_next_delayed_item(prev);
 923	if (curr && btrfs_is_continuous_delayed_item(prev, curr)) {
 924		/* insert the continuous items into the same leaf */
 925		path->slots[0]++;
 926		btrfs_batch_insert_items(trans, root, path, curr);
 927	}
 928	btrfs_release_delayed_item(prev);
 929	btrfs_mark_buffer_dirty(path->nodes[0]);
 930
 931	btrfs_release_path(path);
 932	mutex_unlock(&node->mutex);
 933	goto do_again;
 934
 935insert_end:
 936	mutex_unlock(&node->mutex);
 937	return ret;
 938}
 939
 940static int btrfs_batch_delete_items(struct btrfs_trans_handle *trans,
 941				    struct btrfs_root *root,
 942				    struct btrfs_path *path,
 943				    struct btrfs_delayed_item *item)
 944{
 945	struct btrfs_delayed_item *curr, *next;
 946	struct extent_buffer *leaf;
 947	struct btrfs_key key;
 948	struct list_head head;
 949	int nitems, i, last_item;
 950	int ret = 0;
 951
 952	BUG_ON(!path->nodes[0]);
 953
 954	leaf = path->nodes[0];
 955
 956	i = path->slots[0];
 957	last_item = btrfs_header_nritems(leaf) - 1;
 958	if (i > last_item)
 959		return -ENOENT;	/* FIXME: Is errno suitable? */
 960
 961	next = item;
 962	INIT_LIST_HEAD(&head);
 963	btrfs_item_key_to_cpu(leaf, &key, i);
 964	nitems = 0;
 965	/*
 966	 * count the number of the dir index items that we can delete in batch
 967	 */
 968	while (btrfs_comp_cpu_keys(&next->key, &key) == 0) {
 969		list_add_tail(&next->tree_list, &head);
 970		nitems++;
 971
 972		curr = next;
 973		next = __btrfs_next_delayed_item(curr);
 974		if (!next)
 975			break;
 976
 977		if (!btrfs_is_continuous_delayed_item(curr, next))
 978			break;
 979
 980		i++;
 981		if (i > last_item)
 982			break;
 983		btrfs_item_key_to_cpu(leaf, &key, i);
 984	}
 985
 986	if (!nitems)
 987		return 0;
 988
 989	ret = btrfs_del_items(trans, root, path, path->slots[0], nitems);
 990	if (ret)
 991		goto out;
 992
 993	list_for_each_entry_safe(curr, next, &head, tree_list) {
 994		btrfs_delayed_item_release_metadata(root, curr);
 995		list_del(&curr->tree_list);
 996		btrfs_release_delayed_item(curr);
 997	}
 998
 999out:
1000	return ret;
1001}
1002
1003static int btrfs_delete_delayed_items(struct btrfs_trans_handle *trans,
1004				      struct btrfs_path *path,
1005				      struct btrfs_root *root,
1006				      struct btrfs_delayed_node *node)
1007{
1008	struct btrfs_delayed_item *curr, *prev;
1009	int ret = 0;
1010
1011do_again:
1012	mutex_lock(&node->mutex);
1013	curr = __btrfs_first_delayed_deletion_item(node);
1014	if (!curr)
1015		goto delete_fail;
1016
1017	ret = btrfs_search_slot(trans, root, &curr->key, path, -1, 1);
1018	if (ret < 0)
1019		goto delete_fail;
1020	else if (ret > 0) {
1021		/*
1022		 * can't find the item which the node points to, so this node
1023		 * is invalid, just drop it.
1024		 */
1025		prev = curr;
1026		curr = __btrfs_next_delayed_item(prev);
1027		btrfs_release_delayed_item(prev);
1028		ret = 0;
1029		btrfs_release_path(path);
1030		if (curr)
 
1031			goto do_again;
1032		else
1033			goto delete_fail;
1034	}
1035
1036	btrfs_batch_delete_items(trans, root, path, curr);
1037	btrfs_release_path(path);
1038	mutex_unlock(&node->mutex);
1039	goto do_again;
1040
1041delete_fail:
1042	btrfs_release_path(path);
1043	mutex_unlock(&node->mutex);
1044	return ret;
1045}
1046
1047static void btrfs_release_delayed_inode(struct btrfs_delayed_node *delayed_node)
1048{
1049	struct btrfs_delayed_root *delayed_root;
1050
1051	if (delayed_node && delayed_node->inode_dirty) {
 
1052		BUG_ON(!delayed_node->root);
1053		delayed_node->inode_dirty = 0;
1054		delayed_node->count--;
1055
1056		delayed_root = delayed_node->root->fs_info->delayed_root;
1057		atomic_dec(&delayed_root->items);
1058		if (atomic_read(&delayed_root->items) <
1059		    BTRFS_DELAYED_BACKGROUND &&
1060		    waitqueue_active(&delayed_root->wait))
1061			wake_up(&delayed_root->wait);
1062	}
1063}
1064
1065static int btrfs_update_delayed_inode(struct btrfs_trans_handle *trans,
1066				      struct btrfs_root *root,
1067				      struct btrfs_path *path,
1068				      struct btrfs_delayed_node *node)
 
 
 
 
 
 
 
 
 
 
 
 
1069{
1070	struct btrfs_key key;
1071	struct btrfs_inode_item *inode_item;
1072	struct extent_buffer *leaf;
 
1073	int ret;
1074
1075	mutex_lock(&node->mutex);
1076	if (!node->inode_dirty) {
1077		mutex_unlock(&node->mutex);
1078		return 0;
1079	}
1080
1081	key.objectid = node->inode_id;
1082	btrfs_set_key_type(&key, BTRFS_INODE_ITEM_KEY);
1083	key.offset = 0;
1084	ret = btrfs_lookup_inode(trans, root, path, &key, 1);
 
 
 
 
 
 
1085	if (ret > 0) {
1086		btrfs_release_path(path);
1087		mutex_unlock(&node->mutex);
1088		return -ENOENT;
1089	} else if (ret < 0) {
1090		mutex_unlock(&node->mutex);
1091		return ret;
1092	}
1093
1094	btrfs_unlock_up_safe(path, 1);
1095	leaf = path->nodes[0];
1096	inode_item = btrfs_item_ptr(leaf, path->slots[0],
1097				    struct btrfs_inode_item);
1098	write_extent_buffer(leaf, &node->inode_item, (unsigned long)inode_item,
1099			    sizeof(struct btrfs_inode_item));
1100	btrfs_mark_buffer_dirty(leaf);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1101	btrfs_release_path(path);
1102
1103	btrfs_delayed_inode_release_metadata(root, node);
1104	btrfs_release_delayed_inode(node);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1105	mutex_unlock(&node->mutex);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1106
1107	return 0;
 
1108}
1109
1110/*
1111 * Called when committing the transaction.
1112 * Returns 0 on success.
1113 * Returns < 0 on error and returns with an aborted transaction with any
1114 * outstanding delayed items cleaned up.
1115 */
1116int btrfs_run_delayed_items(struct btrfs_trans_handle *trans,
1117			    struct btrfs_root *root)
1118{
1119	struct btrfs_root *curr_root = root;
1120	struct btrfs_delayed_root *delayed_root;
1121	struct btrfs_delayed_node *curr_node, *prev_node;
1122	struct btrfs_path *path;
1123	struct btrfs_block_rsv *block_rsv;
1124	int ret = 0;
 
1125
1126	if (trans->aborted)
1127		return -EIO;
1128
1129	path = btrfs_alloc_path();
1130	if (!path)
1131		return -ENOMEM;
1132	path->leave_spinning = 1;
1133
1134	block_rsv = trans->block_rsv;
1135	trans->block_rsv = &root->fs_info->delayed_block_rsv;
1136
1137	delayed_root = btrfs_get_delayed_root(root);
1138
1139	curr_node = btrfs_first_delayed_node(delayed_root);
1140	while (curr_node) {
1141		curr_root = curr_node->root;
1142		ret = btrfs_insert_delayed_items(trans, path, curr_root,
1143						 curr_node);
1144		if (!ret)
1145			ret = btrfs_delete_delayed_items(trans, path,
1146						curr_root, curr_node);
1147		if (!ret)
1148			ret = btrfs_update_delayed_inode(trans, curr_root,
1149						path, curr_node);
1150		if (ret) {
1151			btrfs_release_delayed_node(curr_node);
 
1152			btrfs_abort_transaction(trans, root, ret);
1153			break;
1154		}
1155
1156		prev_node = curr_node;
1157		curr_node = btrfs_next_delayed_node(curr_node);
1158		btrfs_release_delayed_node(prev_node);
1159	}
1160
 
 
1161	btrfs_free_path(path);
1162	trans->block_rsv = block_rsv;
1163
1164	return ret;
1165}
1166
1167static int __btrfs_commit_inode_delayed_items(struct btrfs_trans_handle *trans,
1168					      struct btrfs_delayed_node *node)
 
 
 
 
 
 
1169{
 
 
 
 
 
 
 
1170	struct btrfs_path *path;
1171	struct btrfs_block_rsv *block_rsv;
1172	int ret;
1173
 
 
 
 
 
 
 
 
 
 
 
1174	path = btrfs_alloc_path();
1175	if (!path)
 
1176		return -ENOMEM;
 
1177	path->leave_spinning = 1;
1178
1179	block_rsv = trans->block_rsv;
1180	trans->block_rsv = &node->root->fs_info->delayed_block_rsv;
1181
1182	ret = btrfs_insert_delayed_items(trans, path, node->root, node);
1183	if (!ret)
1184		ret = btrfs_delete_delayed_items(trans, path, node->root, node);
1185	if (!ret)
1186		ret = btrfs_update_delayed_inode(trans, node->root, path, node);
1187	btrfs_free_path(path);
 
1188
1189	trans->block_rsv = block_rsv;
1190	return ret;
1191}
1192
1193int btrfs_commit_inode_delayed_items(struct btrfs_trans_handle *trans,
1194				     struct inode *inode)
1195{
 
1196	struct btrfs_delayed_node *delayed_node = btrfs_get_delayed_node(inode);
 
 
1197	int ret;
1198
1199	if (!delayed_node)
1200		return 0;
1201
1202	mutex_lock(&delayed_node->mutex);
1203	if (!delayed_node->count) {
1204		mutex_unlock(&delayed_node->mutex);
1205		btrfs_release_delayed_node(delayed_node);
1206		return 0;
1207	}
1208	mutex_unlock(&delayed_node->mutex);
1209
1210	ret = __btrfs_commit_inode_delayed_items(trans, delayed_node);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1211	btrfs_release_delayed_node(delayed_node);
 
1212	return ret;
1213}
1214
1215void btrfs_remove_delayed_node(struct inode *inode)
1216{
1217	struct btrfs_delayed_node *delayed_node;
1218
1219	delayed_node = ACCESS_ONCE(BTRFS_I(inode)->delayed_node);
1220	if (!delayed_node)
1221		return;
1222
1223	BTRFS_I(inode)->delayed_node = NULL;
1224	btrfs_release_delayed_node(delayed_node);
1225}
1226
1227struct btrfs_async_delayed_node {
1228	struct btrfs_root *root;
1229	struct btrfs_delayed_node *delayed_node;
1230	struct btrfs_work work;
1231};
1232
1233static void btrfs_async_run_delayed_node_done(struct btrfs_work *work)
1234{
1235	struct btrfs_async_delayed_node *async_node;
 
1236	struct btrfs_trans_handle *trans;
1237	struct btrfs_path *path;
1238	struct btrfs_delayed_node *delayed_node = NULL;
1239	struct btrfs_root *root;
1240	struct btrfs_block_rsv *block_rsv;
1241	unsigned long nr = 0;
1242	int need_requeue = 0;
1243	int ret;
1244
1245	async_node = container_of(work, struct btrfs_async_delayed_node, work);
 
1246
1247	path = btrfs_alloc_path();
1248	if (!path)
1249		goto out;
 
 
 
 
 
 
 
 
 
1250	path->leave_spinning = 1;
1251
1252	delayed_node = async_node->delayed_node;
1253	root = delayed_node->root;
1254
1255	trans = btrfs_join_transaction(root);
1256	if (IS_ERR(trans))
1257		goto free_path;
1258
1259	block_rsv = trans->block_rsv;
1260	trans->block_rsv = &root->fs_info->delayed_block_rsv;
1261
1262	ret = btrfs_insert_delayed_items(trans, path, root, delayed_node);
1263	if (!ret)
1264		ret = btrfs_delete_delayed_items(trans, path, root,
1265						 delayed_node);
1266
1267	if (!ret)
1268		btrfs_update_delayed_inode(trans, root, path, delayed_node);
 
1269
1270	/*
1271	 * Maybe new delayed items have been inserted, so we need requeue
1272	 * the work. Besides that, we must dequeue the empty delayed nodes
1273	 * to avoid the race between delayed items balance and the worker.
1274	 * The race like this:
1275	 * 	Task1				Worker thread
1276	 * 					count == 0, needn't requeue
1277	 * 					  also needn't insert the
1278	 * 					  delayed node into prepare
1279	 * 					  list again.
1280	 * 	add lots of delayed items
1281	 * 	queue the delayed node
1282	 * 	  already in the list,
1283	 * 	  and not in the prepare
1284	 * 	  list, it means the delayed
1285	 * 	  node is being dealt with
1286	 * 	  by the worker.
1287	 * 	do delayed items balance
1288	 * 	  the delayed node is being
1289	 * 	  dealt with by the worker
1290	 * 	  now, just wait.
1291	 * 	  				the worker goto idle.
1292	 * Task1 will sleep until the transaction is commited.
1293	 */
1294	mutex_lock(&delayed_node->mutex);
1295	if (delayed_node->count)
1296		need_requeue = 1;
1297	else
1298		btrfs_dequeue_delayed_node(root->fs_info->delayed_root,
1299					   delayed_node);
1300	mutex_unlock(&delayed_node->mutex);
1301
1302	nr = trans->blocks_used;
 
 
1303
1304	trans->block_rsv = block_rsv;
1305	btrfs_end_transaction_dmeta(trans, root);
1306	__btrfs_btree_balance_dirty(root, nr);
1307free_path:
1308	btrfs_free_path(path);
1309out:
1310	if (need_requeue)
1311		btrfs_requeue_work(&async_node->work);
1312	else {
1313		btrfs_release_prepared_delayed_node(delayed_node);
1314		kfree(async_node);
1315	}
1316}
1317
 
1318static int btrfs_wq_run_delayed_node(struct btrfs_delayed_root *delayed_root,
1319				     struct btrfs_root *root, int all)
1320{
1321	struct btrfs_async_delayed_node *async_node;
1322	struct btrfs_delayed_node *curr;
1323	int count = 0;
1324
1325again:
1326	curr = btrfs_first_prepared_delayed_node(delayed_root);
1327	if (!curr)
1328		return 0;
1329
1330	async_node = kmalloc(sizeof(*async_node), GFP_NOFS);
1331	if (!async_node) {
1332		btrfs_release_prepared_delayed_node(curr);
1333		return -ENOMEM;
1334	}
1335
1336	async_node->root = root;
1337	async_node->delayed_node = curr;
1338
1339	async_node->work.func = btrfs_async_run_delayed_node_done;
1340	async_node->work.flags = 0;
1341
1342	btrfs_queue_worker(&root->fs_info->delayed_workers, &async_node->work);
1343	count++;
1344
1345	if (all || count < 4)
1346		goto again;
1347
 
1348	return 0;
1349}
1350
1351void btrfs_assert_delayed_root_empty(struct btrfs_root *root)
1352{
1353	struct btrfs_delayed_root *delayed_root;
1354	delayed_root = btrfs_get_delayed_root(root);
1355	WARN_ON(btrfs_first_delayed_node(delayed_root));
1356}
1357
 
 
 
 
 
 
 
 
 
 
 
 
 
1358void btrfs_balance_delayed_items(struct btrfs_root *root)
1359{
1360	struct btrfs_delayed_root *delayed_root;
1361
1362	delayed_root = btrfs_get_delayed_root(root);
1363
1364	if (atomic_read(&delayed_root->items) < BTRFS_DELAYED_BACKGROUND)
1365		return;
1366
1367	if (atomic_read(&delayed_root->items) >= BTRFS_DELAYED_WRITEBACK) {
 
1368		int ret;
1369		ret = btrfs_wq_run_delayed_node(delayed_root, root, 1);
 
 
 
1370		if (ret)
1371			return;
1372
1373		wait_event_interruptible_timeout(
1374				delayed_root->wait,
1375				(atomic_read(&delayed_root->items) <
1376				 BTRFS_DELAYED_BACKGROUND),
1377				HZ);
1378		return;
1379	}
1380
1381	btrfs_wq_run_delayed_node(delayed_root, root, 0);
1382}
1383
1384/* Will return 0 or -ENOMEM */
1385int btrfs_insert_delayed_dir_index(struct btrfs_trans_handle *trans,
1386				   struct btrfs_root *root, const char *name,
1387				   int name_len, struct inode *dir,
1388				   struct btrfs_disk_key *disk_key, u8 type,
1389				   u64 index)
1390{
1391	struct btrfs_delayed_node *delayed_node;
1392	struct btrfs_delayed_item *delayed_item;
1393	struct btrfs_dir_item *dir_item;
1394	int ret;
1395
1396	delayed_node = btrfs_get_or_create_delayed_node(dir);
1397	if (IS_ERR(delayed_node))
1398		return PTR_ERR(delayed_node);
1399
1400	delayed_item = btrfs_alloc_delayed_item(sizeof(*dir_item) + name_len);
1401	if (!delayed_item) {
1402		ret = -ENOMEM;
1403		goto release_node;
1404	}
1405
1406	delayed_item->key.objectid = btrfs_ino(dir);
1407	btrfs_set_key_type(&delayed_item->key, BTRFS_DIR_INDEX_KEY);
1408	delayed_item->key.offset = index;
1409
1410	dir_item = (struct btrfs_dir_item *)delayed_item->data;
1411	dir_item->location = *disk_key;
1412	dir_item->transid = cpu_to_le64(trans->transid);
1413	dir_item->data_len = 0;
1414	dir_item->name_len = cpu_to_le16(name_len);
1415	dir_item->type = type;
1416	memcpy((char *)(dir_item + 1), name, name_len);
1417
1418	ret = btrfs_delayed_item_reserve_metadata(trans, root, delayed_item);
1419	/*
1420	 * we have reserved enough space when we start a new transaction,
1421	 * so reserving metadata failure is impossible
1422	 */
1423	BUG_ON(ret);
1424
1425
1426	mutex_lock(&delayed_node->mutex);
1427	ret = __btrfs_add_delayed_insertion_item(delayed_node, delayed_item);
1428	if (unlikely(ret)) {
1429		printk(KERN_ERR "err add delayed dir index item(name: %s) into "
1430				"the insertion tree of the delayed node"
1431				"(root id: %llu, inode id: %llu, errno: %d)\n",
1432				name,
1433				(unsigned long long)delayed_node->root->objectid,
1434				(unsigned long long)delayed_node->inode_id,
1435				ret);
1436		BUG();
1437	}
1438	mutex_unlock(&delayed_node->mutex);
1439
1440release_node:
1441	btrfs_release_delayed_node(delayed_node);
1442	return ret;
1443}
1444
1445static int btrfs_delete_delayed_insertion_item(struct btrfs_root *root,
1446					       struct btrfs_delayed_node *node,
1447					       struct btrfs_key *key)
1448{
1449	struct btrfs_delayed_item *item;
1450
1451	mutex_lock(&node->mutex);
1452	item = __btrfs_lookup_delayed_insertion_item(node, key);
1453	if (!item) {
1454		mutex_unlock(&node->mutex);
1455		return 1;
1456	}
1457
1458	btrfs_delayed_item_release_metadata(root, item);
1459	btrfs_release_delayed_item(item);
1460	mutex_unlock(&node->mutex);
1461	return 0;
1462}
1463
1464int btrfs_delete_delayed_dir_index(struct btrfs_trans_handle *trans,
1465				   struct btrfs_root *root, struct inode *dir,
1466				   u64 index)
1467{
1468	struct btrfs_delayed_node *node;
1469	struct btrfs_delayed_item *item;
1470	struct btrfs_key item_key;
1471	int ret;
1472
1473	node = btrfs_get_or_create_delayed_node(dir);
1474	if (IS_ERR(node))
1475		return PTR_ERR(node);
1476
1477	item_key.objectid = btrfs_ino(dir);
1478	btrfs_set_key_type(&item_key, BTRFS_DIR_INDEX_KEY);
1479	item_key.offset = index;
1480
1481	ret = btrfs_delete_delayed_insertion_item(root, node, &item_key);
1482	if (!ret)
1483		goto end;
1484
1485	item = btrfs_alloc_delayed_item(0);
1486	if (!item) {
1487		ret = -ENOMEM;
1488		goto end;
1489	}
1490
1491	item->key = item_key;
1492
1493	ret = btrfs_delayed_item_reserve_metadata(trans, root, item);
1494	/*
1495	 * we have reserved enough space when we start a new transaction,
1496	 * so reserving metadata failure is impossible.
1497	 */
1498	BUG_ON(ret);
1499
1500	mutex_lock(&node->mutex);
1501	ret = __btrfs_add_delayed_deletion_item(node, item);
1502	if (unlikely(ret)) {
1503		printk(KERN_ERR "err add delayed dir index item(index: %llu) "
1504				"into the deletion tree of the delayed node"
1505				"(root id: %llu, inode id: %llu, errno: %d)\n",
1506				(unsigned long long)index,
1507				(unsigned long long)node->root->objectid,
1508				(unsigned long long)node->inode_id,
1509				ret);
1510		BUG();
1511	}
1512	mutex_unlock(&node->mutex);
1513end:
1514	btrfs_release_delayed_node(node);
1515	return ret;
1516}
1517
1518int btrfs_inode_delayed_dir_index_count(struct inode *inode)
1519{
1520	struct btrfs_delayed_node *delayed_node = btrfs_get_delayed_node(inode);
1521
1522	if (!delayed_node)
1523		return -ENOENT;
1524
1525	/*
1526	 * Since we have held i_mutex of this directory, it is impossible that
1527	 * a new directory index is added into the delayed node and index_cnt
1528	 * is updated now. So we needn't lock the delayed node.
1529	 */
1530	if (!delayed_node->index_cnt) {
1531		btrfs_release_delayed_node(delayed_node);
1532		return -EINVAL;
1533	}
1534
1535	BTRFS_I(inode)->index_cnt = delayed_node->index_cnt;
1536	btrfs_release_delayed_node(delayed_node);
1537	return 0;
1538}
1539
1540void btrfs_get_delayed_items(struct inode *inode, struct list_head *ins_list,
1541			     struct list_head *del_list)
1542{
1543	struct btrfs_delayed_node *delayed_node;
1544	struct btrfs_delayed_item *item;
1545
1546	delayed_node = btrfs_get_delayed_node(inode);
1547	if (!delayed_node)
1548		return;
1549
1550	mutex_lock(&delayed_node->mutex);
1551	item = __btrfs_first_delayed_insertion_item(delayed_node);
1552	while (item) {
1553		atomic_inc(&item->refs);
1554		list_add_tail(&item->readdir_list, ins_list);
1555		item = __btrfs_next_delayed_item(item);
1556	}
1557
1558	item = __btrfs_first_delayed_deletion_item(delayed_node);
1559	while (item) {
1560		atomic_inc(&item->refs);
1561		list_add_tail(&item->readdir_list, del_list);
1562		item = __btrfs_next_delayed_item(item);
1563	}
1564	mutex_unlock(&delayed_node->mutex);
1565	/*
1566	 * This delayed node is still cached in the btrfs inode, so refs
1567	 * must be > 1 now, and we needn't check it is going to be freed
1568	 * or not.
1569	 *
1570	 * Besides that, this function is used to read dir, we do not
1571	 * insert/delete delayed items in this period. So we also needn't
1572	 * requeue or dequeue this delayed node.
1573	 */
1574	atomic_dec(&delayed_node->refs);
1575}
1576
1577void btrfs_put_delayed_items(struct list_head *ins_list,
1578			     struct list_head *del_list)
1579{
1580	struct btrfs_delayed_item *curr, *next;
1581
1582	list_for_each_entry_safe(curr, next, ins_list, readdir_list) {
1583		list_del(&curr->readdir_list);
1584		if (atomic_dec_and_test(&curr->refs))
1585			kfree(curr);
1586	}
1587
1588	list_for_each_entry_safe(curr, next, del_list, readdir_list) {
1589		list_del(&curr->readdir_list);
1590		if (atomic_dec_and_test(&curr->refs))
1591			kfree(curr);
1592	}
1593}
1594
1595int btrfs_should_delete_dir_index(struct list_head *del_list,
1596				  u64 index)
1597{
1598	struct btrfs_delayed_item *curr, *next;
1599	int ret;
1600
1601	if (list_empty(del_list))
1602		return 0;
1603
1604	list_for_each_entry_safe(curr, next, del_list, readdir_list) {
1605		if (curr->key.offset > index)
1606			break;
1607
1608		list_del(&curr->readdir_list);
1609		ret = (curr->key.offset == index);
1610
1611		if (atomic_dec_and_test(&curr->refs))
1612			kfree(curr);
1613
1614		if (ret)
1615			return 1;
1616		else
1617			continue;
1618	}
1619	return 0;
1620}
1621
1622/*
1623 * btrfs_readdir_delayed_dir_index - read dir info stored in the delayed tree
1624 *
1625 */
1626int btrfs_readdir_delayed_dir_index(struct file *filp, void *dirent,
1627				    filldir_t filldir,
1628				    struct list_head *ins_list)
1629{
1630	struct btrfs_dir_item *di;
1631	struct btrfs_delayed_item *curr, *next;
1632	struct btrfs_key location;
1633	char *name;
1634	int name_len;
1635	int over = 0;
1636	unsigned char d_type;
1637
1638	if (list_empty(ins_list))
1639		return 0;
1640
1641	/*
1642	 * Changing the data of the delayed item is impossible. So
1643	 * we needn't lock them. And we have held i_mutex of the
1644	 * directory, nobody can delete any directory indexes now.
1645	 */
1646	list_for_each_entry_safe(curr, next, ins_list, readdir_list) {
1647		list_del(&curr->readdir_list);
1648
1649		if (curr->key.offset < filp->f_pos) {
1650			if (atomic_dec_and_test(&curr->refs))
1651				kfree(curr);
1652			continue;
1653		}
1654
1655		filp->f_pos = curr->key.offset;
1656
1657		di = (struct btrfs_dir_item *)curr->data;
1658		name = (char *)(di + 1);
1659		name_len = le16_to_cpu(di->name_len);
1660
1661		d_type = btrfs_filetype_table[di->type];
1662		btrfs_disk_key_to_cpu(&location, &di->location);
1663
1664		over = filldir(dirent, name, name_len, curr->key.offset,
1665			       location.objectid, d_type);
1666
1667		if (atomic_dec_and_test(&curr->refs))
1668			kfree(curr);
1669
1670		if (over)
1671			return 1;
1672	}
1673	return 0;
1674}
1675
1676BTRFS_SETGET_STACK_FUNCS(stack_inode_generation, struct btrfs_inode_item,
1677			 generation, 64);
1678BTRFS_SETGET_STACK_FUNCS(stack_inode_sequence, struct btrfs_inode_item,
1679			 sequence, 64);
1680BTRFS_SETGET_STACK_FUNCS(stack_inode_transid, struct btrfs_inode_item,
1681			 transid, 64);
1682BTRFS_SETGET_STACK_FUNCS(stack_inode_size, struct btrfs_inode_item, size, 64);
1683BTRFS_SETGET_STACK_FUNCS(stack_inode_nbytes, struct btrfs_inode_item,
1684			 nbytes, 64);
1685BTRFS_SETGET_STACK_FUNCS(stack_inode_block_group, struct btrfs_inode_item,
1686			 block_group, 64);
1687BTRFS_SETGET_STACK_FUNCS(stack_inode_nlink, struct btrfs_inode_item, nlink, 32);
1688BTRFS_SETGET_STACK_FUNCS(stack_inode_uid, struct btrfs_inode_item, uid, 32);
1689BTRFS_SETGET_STACK_FUNCS(stack_inode_gid, struct btrfs_inode_item, gid, 32);
1690BTRFS_SETGET_STACK_FUNCS(stack_inode_mode, struct btrfs_inode_item, mode, 32);
1691BTRFS_SETGET_STACK_FUNCS(stack_inode_rdev, struct btrfs_inode_item, rdev, 64);
1692BTRFS_SETGET_STACK_FUNCS(stack_inode_flags, struct btrfs_inode_item, flags, 64);
1693
1694BTRFS_SETGET_STACK_FUNCS(stack_timespec_sec, struct btrfs_timespec, sec, 64);
1695BTRFS_SETGET_STACK_FUNCS(stack_timespec_nsec, struct btrfs_timespec, nsec, 32);
1696
1697static void fill_stack_inode_item(struct btrfs_trans_handle *trans,
1698				  struct btrfs_inode_item *inode_item,
1699				  struct inode *inode)
1700{
1701	btrfs_set_stack_inode_uid(inode_item, inode->i_uid);
1702	btrfs_set_stack_inode_gid(inode_item, inode->i_gid);
1703	btrfs_set_stack_inode_size(inode_item, BTRFS_I(inode)->disk_i_size);
1704	btrfs_set_stack_inode_mode(inode_item, inode->i_mode);
1705	btrfs_set_stack_inode_nlink(inode_item, inode->i_nlink);
1706	btrfs_set_stack_inode_nbytes(inode_item, inode_get_bytes(inode));
1707	btrfs_set_stack_inode_generation(inode_item,
1708					 BTRFS_I(inode)->generation);
1709	btrfs_set_stack_inode_sequence(inode_item, inode->i_version);
1710	btrfs_set_stack_inode_transid(inode_item, trans->transid);
1711	btrfs_set_stack_inode_rdev(inode_item, inode->i_rdev);
1712	btrfs_set_stack_inode_flags(inode_item, BTRFS_I(inode)->flags);
1713	btrfs_set_stack_inode_block_group(inode_item, 0);
1714
1715	btrfs_set_stack_timespec_sec(btrfs_inode_atime(inode_item),
1716				     inode->i_atime.tv_sec);
1717	btrfs_set_stack_timespec_nsec(btrfs_inode_atime(inode_item),
1718				      inode->i_atime.tv_nsec);
1719
1720	btrfs_set_stack_timespec_sec(btrfs_inode_mtime(inode_item),
1721				     inode->i_mtime.tv_sec);
1722	btrfs_set_stack_timespec_nsec(btrfs_inode_mtime(inode_item),
1723				      inode->i_mtime.tv_nsec);
1724
1725	btrfs_set_stack_timespec_sec(btrfs_inode_ctime(inode_item),
1726				     inode->i_ctime.tv_sec);
1727	btrfs_set_stack_timespec_nsec(btrfs_inode_ctime(inode_item),
1728				      inode->i_ctime.tv_nsec);
1729}
1730
1731int btrfs_fill_inode(struct inode *inode, u32 *rdev)
1732{
1733	struct btrfs_delayed_node *delayed_node;
1734	struct btrfs_inode_item *inode_item;
1735	struct btrfs_timespec *tspec;
1736
1737	delayed_node = btrfs_get_delayed_node(inode);
1738	if (!delayed_node)
1739		return -ENOENT;
1740
1741	mutex_lock(&delayed_node->mutex);
1742	if (!delayed_node->inode_dirty) {
1743		mutex_unlock(&delayed_node->mutex);
1744		btrfs_release_delayed_node(delayed_node);
1745		return -ENOENT;
1746	}
1747
1748	inode_item = &delayed_node->inode_item;
1749
1750	inode->i_uid = btrfs_stack_inode_uid(inode_item);
1751	inode->i_gid = btrfs_stack_inode_gid(inode_item);
1752	btrfs_i_size_write(inode, btrfs_stack_inode_size(inode_item));
1753	inode->i_mode = btrfs_stack_inode_mode(inode_item);
1754	set_nlink(inode, btrfs_stack_inode_nlink(inode_item));
1755	inode_set_bytes(inode, btrfs_stack_inode_nbytes(inode_item));
1756	BTRFS_I(inode)->generation = btrfs_stack_inode_generation(inode_item);
1757	inode->i_version = btrfs_stack_inode_sequence(inode_item);
1758	inode->i_rdev = 0;
1759	*rdev = btrfs_stack_inode_rdev(inode_item);
1760	BTRFS_I(inode)->flags = btrfs_stack_inode_flags(inode_item);
1761
1762	tspec = btrfs_inode_atime(inode_item);
1763	inode->i_atime.tv_sec = btrfs_stack_timespec_sec(tspec);
1764	inode->i_atime.tv_nsec = btrfs_stack_timespec_nsec(tspec);
1765
1766	tspec = btrfs_inode_mtime(inode_item);
1767	inode->i_mtime.tv_sec = btrfs_stack_timespec_sec(tspec);
1768	inode->i_mtime.tv_nsec = btrfs_stack_timespec_nsec(tspec);
1769
1770	tspec = btrfs_inode_ctime(inode_item);
1771	inode->i_ctime.tv_sec = btrfs_stack_timespec_sec(tspec);
1772	inode->i_ctime.tv_nsec = btrfs_stack_timespec_nsec(tspec);
1773
1774	inode->i_generation = BTRFS_I(inode)->generation;
1775	BTRFS_I(inode)->index_cnt = (u64)-1;
1776
1777	mutex_unlock(&delayed_node->mutex);
1778	btrfs_release_delayed_node(delayed_node);
1779	return 0;
1780}
1781
1782int btrfs_delayed_update_inode(struct btrfs_trans_handle *trans,
1783			       struct btrfs_root *root, struct inode *inode)
1784{
1785	struct btrfs_delayed_node *delayed_node;
1786	int ret = 0;
1787
1788	delayed_node = btrfs_get_or_create_delayed_node(inode);
1789	if (IS_ERR(delayed_node))
1790		return PTR_ERR(delayed_node);
1791
1792	mutex_lock(&delayed_node->mutex);
1793	if (delayed_node->inode_dirty) {
1794		fill_stack_inode_item(trans, &delayed_node->inode_item, inode);
1795		goto release_node;
1796	}
1797
1798	ret = btrfs_delayed_inode_reserve_metadata(trans, root, inode,
1799						   delayed_node);
1800	if (ret)
1801		goto release_node;
1802
1803	fill_stack_inode_item(trans, &delayed_node->inode_item, inode);
1804	delayed_node->inode_dirty = 1;
1805	delayed_node->count++;
1806	atomic_inc(&root->fs_info->delayed_root->items);
1807release_node:
1808	mutex_unlock(&delayed_node->mutex);
1809	btrfs_release_delayed_node(delayed_node);
1810	return ret;
1811}
1812
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1813static void __btrfs_kill_delayed_node(struct btrfs_delayed_node *delayed_node)
1814{
1815	struct btrfs_root *root = delayed_node->root;
1816	struct btrfs_delayed_item *curr_item, *prev_item;
1817
1818	mutex_lock(&delayed_node->mutex);
1819	curr_item = __btrfs_first_delayed_insertion_item(delayed_node);
1820	while (curr_item) {
1821		btrfs_delayed_item_release_metadata(root, curr_item);
1822		prev_item = curr_item;
1823		curr_item = __btrfs_next_delayed_item(prev_item);
1824		btrfs_release_delayed_item(prev_item);
1825	}
1826
1827	curr_item = __btrfs_first_delayed_deletion_item(delayed_node);
1828	while (curr_item) {
1829		btrfs_delayed_item_release_metadata(root, curr_item);
1830		prev_item = curr_item;
1831		curr_item = __btrfs_next_delayed_item(prev_item);
1832		btrfs_release_delayed_item(prev_item);
1833	}
1834
1835	if (delayed_node->inode_dirty) {
 
 
 
1836		btrfs_delayed_inode_release_metadata(root, delayed_node);
1837		btrfs_release_delayed_inode(delayed_node);
1838	}
1839	mutex_unlock(&delayed_node->mutex);
1840}
1841
1842void btrfs_kill_delayed_inode_items(struct inode *inode)
1843{
1844	struct btrfs_delayed_node *delayed_node;
1845
1846	delayed_node = btrfs_get_delayed_node(inode);
1847	if (!delayed_node)
1848		return;
1849
1850	__btrfs_kill_delayed_node(delayed_node);
1851	btrfs_release_delayed_node(delayed_node);
1852}
1853
1854void btrfs_kill_all_delayed_nodes(struct btrfs_root *root)
1855{
1856	u64 inode_id = 0;
1857	struct btrfs_delayed_node *delayed_nodes[8];
1858	int i, n;
1859
1860	while (1) {
1861		spin_lock(&root->inode_lock);
1862		n = radix_tree_gang_lookup(&root->delayed_nodes_tree,
1863					   (void **)delayed_nodes, inode_id,
1864					   ARRAY_SIZE(delayed_nodes));
1865		if (!n) {
1866			spin_unlock(&root->inode_lock);
1867			break;
1868		}
1869
1870		inode_id = delayed_nodes[n - 1]->inode_id + 1;
1871
1872		for (i = 0; i < n; i++)
1873			atomic_inc(&delayed_nodes[i]->refs);
1874		spin_unlock(&root->inode_lock);
1875
1876		for (i = 0; i < n; i++) {
1877			__btrfs_kill_delayed_node(delayed_nodes[i]);
1878			btrfs_release_delayed_node(delayed_nodes[i]);
1879		}
1880	}
1881}
1882
1883void btrfs_destroy_delayed_inodes(struct btrfs_root *root)
1884{
1885	struct btrfs_delayed_root *delayed_root;
1886	struct btrfs_delayed_node *curr_node, *prev_node;
1887
1888	delayed_root = btrfs_get_delayed_root(root);
1889
1890	curr_node = btrfs_first_delayed_node(delayed_root);
1891	while (curr_node) {
1892		__btrfs_kill_delayed_node(curr_node);
1893
1894		prev_node = curr_node;
1895		curr_node = btrfs_next_delayed_node(curr_node);
1896		btrfs_release_delayed_node(prev_node);
1897	}
1898}
1899