Linux Audio

Check our new training course

Embedded Linux training

Mar 31-Apr 8, 2025
Register
Loading...
v5.4
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 * Copyright (C) 2007 Oracle.  All rights reserved.
   4 */
   5
   6#include <linux/fs.h>
   7#include <linux/slab.h>
   8#include <linux/sched.h>
 
   9#include <linux/writeback.h>
  10#include <linux/pagemap.h>
  11#include <linux/blkdev.h>
  12#include <linux/uuid.h>
 
  13#include "misc.h"
  14#include "ctree.h"
  15#include "disk-io.h"
  16#include "transaction.h"
  17#include "locking.h"
  18#include "tree-log.h"
  19#include "inode-map.h"
  20#include "volumes.h"
  21#include "dev-replace.h"
  22#include "qgroup.h"
  23#include "block-group.h"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
  24
  25#define BTRFS_ROOT_TRANS_TAG 0
  26
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
  27static const unsigned int btrfs_blocked_trans_types[TRANS_STATE_MAX] = {
  28	[TRANS_STATE_RUNNING]		= 0U,
  29	[TRANS_STATE_BLOCKED]		=  __TRANS_START,
  30	[TRANS_STATE_COMMIT_START]	= (__TRANS_START | __TRANS_ATTACH),
  31	[TRANS_STATE_COMMIT_DOING]	= (__TRANS_START |
  32					   __TRANS_ATTACH |
  33					   __TRANS_JOIN |
  34					   __TRANS_JOIN_NOSTART),
  35	[TRANS_STATE_UNBLOCKED]		= (__TRANS_START |
  36					   __TRANS_ATTACH |
  37					   __TRANS_JOIN |
  38					   __TRANS_JOIN_NOLOCK |
  39					   __TRANS_JOIN_NOSTART),
 
 
 
 
 
  40	[TRANS_STATE_COMPLETED]		= (__TRANS_START |
  41					   __TRANS_ATTACH |
  42					   __TRANS_JOIN |
  43					   __TRANS_JOIN_NOLOCK |
  44					   __TRANS_JOIN_NOSTART),
  45};
  46
  47void btrfs_put_transaction(struct btrfs_transaction *transaction)
  48{
  49	WARN_ON(refcount_read(&transaction->use_count) == 0);
  50	if (refcount_dec_and_test(&transaction->use_count)) {
  51		BUG_ON(!list_empty(&transaction->list));
  52		WARN_ON(!RB_EMPTY_ROOT(
  53				&transaction->delayed_refs.href_root.rb_root));
 
 
  54		if (transaction->delayed_refs.pending_csums)
  55			btrfs_err(transaction->fs_info,
  56				  "pending csums is %llu",
  57				  transaction->delayed_refs.pending_csums);
  58		/*
  59		 * If any block groups are found in ->deleted_bgs then it's
  60		 * because the transaction was aborted and a commit did not
  61		 * happen (things failed before writing the new superblock
  62		 * and calling btrfs_finish_extent_commit()), so we can not
  63		 * discard the physical locations of the block groups.
  64		 */
  65		while (!list_empty(&transaction->deleted_bgs)) {
  66			struct btrfs_block_group_cache *cache;
  67
  68			cache = list_first_entry(&transaction->deleted_bgs,
  69						 struct btrfs_block_group_cache,
  70						 bg_list);
  71			list_del_init(&cache->bg_list);
  72			btrfs_put_block_group_trimming(cache);
  73			btrfs_put_block_group(cache);
  74		}
  75		WARN_ON(!list_empty(&transaction->dev_update_list));
  76		kfree(transaction);
  77	}
  78}
  79
  80static noinline void switch_commit_roots(struct btrfs_transaction *trans)
  81{
 
  82	struct btrfs_fs_info *fs_info = trans->fs_info;
  83	struct btrfs_root *root, *tmp;
  84
 
 
 
 
 
 
  85	down_write(&fs_info->commit_root_sem);
  86	list_for_each_entry_safe(root, tmp, &trans->switch_commits,
 
 
 
 
  87				 dirty_list) {
  88		list_del_init(&root->dirty_list);
  89		free_extent_buffer(root->commit_root);
  90		root->commit_root = btrfs_root_node(root);
  91		if (is_fstree(root->root_key.objectid))
  92			btrfs_unpin_free_ino(root);
  93		extent_io_tree_release(&root->dirty_log_pages);
  94		btrfs_qgroup_clean_swapped_blocks(root);
  95	}
  96
  97	/* We can free old roots now. */
  98	spin_lock(&trans->dropped_roots_lock);
  99	while (!list_empty(&trans->dropped_roots)) {
 100		root = list_first_entry(&trans->dropped_roots,
 101					struct btrfs_root, root_list);
 102		list_del_init(&root->root_list);
 103		spin_unlock(&trans->dropped_roots_lock);
 
 104		btrfs_drop_and_free_fs_root(fs_info, root);
 105		spin_lock(&trans->dropped_roots_lock);
 106	}
 107	spin_unlock(&trans->dropped_roots_lock);
 
 108	up_write(&fs_info->commit_root_sem);
 109}
 110
 111static inline void extwriter_counter_inc(struct btrfs_transaction *trans,
 112					 unsigned int type)
 113{
 114	if (type & TRANS_EXTWRITERS)
 115		atomic_inc(&trans->num_extwriters);
 116}
 117
 118static inline void extwriter_counter_dec(struct btrfs_transaction *trans,
 119					 unsigned int type)
 120{
 121	if (type & TRANS_EXTWRITERS)
 122		atomic_dec(&trans->num_extwriters);
 123}
 124
 125static inline void extwriter_counter_init(struct btrfs_transaction *trans,
 126					  unsigned int type)
 127{
 128	atomic_set(&trans->num_extwriters, ((type & TRANS_EXTWRITERS) ? 1 : 0));
 129}
 130
 131static inline int extwriter_counter_read(struct btrfs_transaction *trans)
 132{
 133	return atomic_read(&trans->num_extwriters);
 134}
 135
 136/*
 137 * To be called after all the new block groups attached to the transaction
 138 * handle have been created (btrfs_create_pending_block_groups()).
 
 
 
 139 */
 140void btrfs_trans_release_chunk_metadata(struct btrfs_trans_handle *trans)
 141{
 142	struct btrfs_fs_info *fs_info = trans->fs_info;
 143
 144	if (!trans->chunk_bytes_reserved)
 145		return;
 146
 147	WARN_ON_ONCE(!list_empty(&trans->new_bgs));
 148
 149	btrfs_block_rsv_release(fs_info, &fs_info->chunk_block_rsv,
 150				trans->chunk_bytes_reserved);
 151	trans->chunk_bytes_reserved = 0;
 152}
 153
 154/*
 155 * either allocate a new transaction or hop into the existing one
 156 */
 157static noinline int join_transaction(struct btrfs_fs_info *fs_info,
 158				     unsigned int type)
 159{
 160	struct btrfs_transaction *cur_trans;
 161
 162	spin_lock(&fs_info->trans_lock);
 163loop:
 164	/* The file system has been taken offline. No new transactions. */
 165	if (test_bit(BTRFS_FS_STATE_ERROR, &fs_info->fs_state)) {
 166		spin_unlock(&fs_info->trans_lock);
 167		return -EROFS;
 168	}
 169
 170	cur_trans = fs_info->running_transaction;
 171	if (cur_trans) {
 172		if (cur_trans->aborted) {
 173			spin_unlock(&fs_info->trans_lock);
 174			return cur_trans->aborted;
 175		}
 176		if (btrfs_blocked_trans_types[cur_trans->state] & type) {
 177			spin_unlock(&fs_info->trans_lock);
 178			return -EBUSY;
 179		}
 180		refcount_inc(&cur_trans->use_count);
 181		atomic_inc(&cur_trans->num_writers);
 182		extwriter_counter_inc(cur_trans, type);
 183		spin_unlock(&fs_info->trans_lock);
 
 
 184		return 0;
 185	}
 186	spin_unlock(&fs_info->trans_lock);
 187
 188	/*
 189	 * If we are ATTACH, we just want to catch the current transaction,
 190	 * and commit it. If there is no transaction, just return ENOENT.
 191	 */
 192	if (type == TRANS_ATTACH)
 193		return -ENOENT;
 194
 195	/*
 196	 * JOIN_NOLOCK only happens during the transaction commit, so
 197	 * it is impossible that ->running_transaction is NULL
 198	 */
 199	BUG_ON(type == TRANS_JOIN_NOLOCK);
 200
 201	cur_trans = kmalloc(sizeof(*cur_trans), GFP_NOFS);
 202	if (!cur_trans)
 203		return -ENOMEM;
 204
 
 
 
 205	spin_lock(&fs_info->trans_lock);
 206	if (fs_info->running_transaction) {
 207		/*
 208		 * someone started a transaction after we unlocked.  Make sure
 209		 * to redo the checks above
 210		 */
 
 
 211		kfree(cur_trans);
 212		goto loop;
 213	} else if (test_bit(BTRFS_FS_STATE_ERROR, &fs_info->fs_state)) {
 214		spin_unlock(&fs_info->trans_lock);
 
 
 215		kfree(cur_trans);
 216		return -EROFS;
 217	}
 218
 219	cur_trans->fs_info = fs_info;
 
 
 220	atomic_set(&cur_trans->num_writers, 1);
 221	extwriter_counter_init(cur_trans, type);
 222	init_waitqueue_head(&cur_trans->writer_wait);
 223	init_waitqueue_head(&cur_trans->commit_wait);
 224	cur_trans->state = TRANS_STATE_RUNNING;
 225	/*
 226	 * One for this trans handle, one so it will live on until we
 227	 * commit the transaction.
 228	 */
 229	refcount_set(&cur_trans->use_count, 2);
 230	cur_trans->flags = 0;
 231	cur_trans->start_time = ktime_get_seconds();
 232
 233	memset(&cur_trans->delayed_refs, 0, sizeof(cur_trans->delayed_refs));
 234
 235	cur_trans->delayed_refs.href_root = RB_ROOT_CACHED;
 236	cur_trans->delayed_refs.dirty_extent_root = RB_ROOT;
 237	atomic_set(&cur_trans->delayed_refs.num_entries, 0);
 238
 239	/*
 240	 * although the tree mod log is per file system and not per transaction,
 241	 * the log must never go across transaction boundaries.
 242	 */
 243	smp_mb();
 244	if (!list_empty(&fs_info->tree_mod_seq_list))
 245		WARN(1, KERN_ERR "BTRFS: tree_mod_seq_list not empty when creating a fresh transaction\n");
 246	if (!RB_EMPTY_ROOT(&fs_info->tree_mod_log))
 247		WARN(1, KERN_ERR "BTRFS: tree_mod_log rb tree not empty when creating a fresh transaction\n");
 248	atomic64_set(&fs_info->tree_mod_seq, 0);
 249
 250	spin_lock_init(&cur_trans->delayed_refs.lock);
 251
 252	INIT_LIST_HEAD(&cur_trans->pending_snapshots);
 253	INIT_LIST_HEAD(&cur_trans->dev_update_list);
 254	INIT_LIST_HEAD(&cur_trans->switch_commits);
 255	INIT_LIST_HEAD(&cur_trans->dirty_bgs);
 256	INIT_LIST_HEAD(&cur_trans->io_bgs);
 257	INIT_LIST_HEAD(&cur_trans->dropped_roots);
 258	mutex_init(&cur_trans->cache_write_mutex);
 259	spin_lock_init(&cur_trans->dirty_bgs_lock);
 260	INIT_LIST_HEAD(&cur_trans->deleted_bgs);
 261	spin_lock_init(&cur_trans->dropped_roots_lock);
 
 
 262	list_add_tail(&cur_trans->list, &fs_info->trans_list);
 263	extent_io_tree_init(fs_info, &cur_trans->dirty_pages,
 264			IO_TREE_TRANS_DIRTY_PAGES, fs_info->btree_inode);
 
 
 265	fs_info->generation++;
 266	cur_trans->transid = fs_info->generation;
 267	fs_info->running_transaction = cur_trans;
 268	cur_trans->aborted = 0;
 269	spin_unlock(&fs_info->trans_lock);
 270
 271	return 0;
 272}
 273
 274/*
 275 * this does all the record keeping required to make sure that a reference
 276 * counted root is properly recorded in a given transaction.  This is required
 277 * to make sure the old root from before we joined the transaction is deleted
 278 * when the transaction commits
 279 */
 280static int record_root_in_trans(struct btrfs_trans_handle *trans,
 281			       struct btrfs_root *root,
 282			       int force)
 283{
 284	struct btrfs_fs_info *fs_info = root->fs_info;
 
 285
 286	if ((test_bit(BTRFS_ROOT_REF_COWS, &root->state) &&
 287	    root->last_trans < trans->transid) || force) {
 288		WARN_ON(root == fs_info->extent_root);
 289		WARN_ON(!force && root->commit_root != root->node);
 290
 291		/*
 292		 * see below for IN_TRANS_SETUP usage rules
 293		 * we have the reloc mutex held now, so there
 294		 * is only one writer in this function
 295		 */
 296		set_bit(BTRFS_ROOT_IN_TRANS_SETUP, &root->state);
 297
 298		/* make sure readers find IN_TRANS_SETUP before
 299		 * they find our root->last_trans update
 300		 */
 301		smp_wmb();
 302
 303		spin_lock(&fs_info->fs_roots_radix_lock);
 304		if (root->last_trans == trans->transid && !force) {
 305			spin_unlock(&fs_info->fs_roots_radix_lock);
 306			return 0;
 307		}
 308		radix_tree_tag_set(&fs_info->fs_roots_radix,
 309				   (unsigned long)root->root_key.objectid,
 310				   BTRFS_ROOT_TRANS_TAG);
 311		spin_unlock(&fs_info->fs_roots_radix_lock);
 312		root->last_trans = trans->transid;
 313
 314		/* this is pretty tricky.  We don't want to
 315		 * take the relocation lock in btrfs_record_root_in_trans
 316		 * unless we're really doing the first setup for this root in
 317		 * this transaction.
 318		 *
 319		 * Normally we'd use root->last_trans as a flag to decide
 320		 * if we want to take the expensive mutex.
 321		 *
 322		 * But, we have to set root->last_trans before we
 323		 * init the relocation root, otherwise, we trip over warnings
 324		 * in ctree.c.  The solution used here is to flag ourselves
 325		 * with root IN_TRANS_SETUP.  When this is 1, we're still
 326		 * fixing up the reloc trees and everyone must wait.
 327		 *
 328		 * When this is zero, they can trust root->last_trans and fly
 329		 * through btrfs_record_root_in_trans without having to take the
 330		 * lock.  smp_wmb() makes sure that all the writes above are
 331		 * done before we pop in the zero below
 332		 */
 333		btrfs_init_reloc_root(trans, root);
 334		smp_mb__before_atomic();
 335		clear_bit(BTRFS_ROOT_IN_TRANS_SETUP, &root->state);
 336	}
 337	return 0;
 338}
 339
 340
 341void btrfs_add_dropped_root(struct btrfs_trans_handle *trans,
 342			    struct btrfs_root *root)
 343{
 344	struct btrfs_fs_info *fs_info = root->fs_info;
 345	struct btrfs_transaction *cur_trans = trans->transaction;
 346
 347	/* Add ourselves to the transaction dropped list */
 348	spin_lock(&cur_trans->dropped_roots_lock);
 349	list_add_tail(&root->root_list, &cur_trans->dropped_roots);
 350	spin_unlock(&cur_trans->dropped_roots_lock);
 351
 352	/* Make sure we don't try to update the root at commit time */
 353	spin_lock(&fs_info->fs_roots_radix_lock);
 354	radix_tree_tag_clear(&fs_info->fs_roots_radix,
 355			     (unsigned long)root->root_key.objectid,
 356			     BTRFS_ROOT_TRANS_TAG);
 357	spin_unlock(&fs_info->fs_roots_radix_lock);
 358}
 359
 360int btrfs_record_root_in_trans(struct btrfs_trans_handle *trans,
 361			       struct btrfs_root *root)
 362{
 363	struct btrfs_fs_info *fs_info = root->fs_info;
 
 364
 365	if (!test_bit(BTRFS_ROOT_REF_COWS, &root->state))
 366		return 0;
 367
 368	/*
 369	 * see record_root_in_trans for comments about IN_TRANS_SETUP usage
 370	 * and barriers
 371	 */
 372	smp_rmb();
 373	if (root->last_trans == trans->transid &&
 374	    !test_bit(BTRFS_ROOT_IN_TRANS_SETUP, &root->state))
 375		return 0;
 376
 377	mutex_lock(&fs_info->reloc_mutex);
 378	record_root_in_trans(trans, root, 0);
 379	mutex_unlock(&fs_info->reloc_mutex);
 380
 381	return 0;
 382}
 383
 384static inline int is_transaction_blocked(struct btrfs_transaction *trans)
 385{
 386	return (trans->state >= TRANS_STATE_BLOCKED &&
 387		trans->state < TRANS_STATE_UNBLOCKED &&
 388		!trans->aborted);
 389}
 390
 391/* wait for commit against the current transaction to become unblocked
 392 * when this is done, it is safe to start a new transaction, but the current
 393 * transaction might not be fully on disk.
 394 */
 395static void wait_current_trans(struct btrfs_fs_info *fs_info)
 396{
 397	struct btrfs_transaction *cur_trans;
 398
 399	spin_lock(&fs_info->trans_lock);
 400	cur_trans = fs_info->running_transaction;
 401	if (cur_trans && is_transaction_blocked(cur_trans)) {
 402		refcount_inc(&cur_trans->use_count);
 403		spin_unlock(&fs_info->trans_lock);
 404
 
 405		wait_event(fs_info->transaction_wait,
 406			   cur_trans->state >= TRANS_STATE_UNBLOCKED ||
 407			   cur_trans->aborted);
 408		btrfs_put_transaction(cur_trans);
 409	} else {
 410		spin_unlock(&fs_info->trans_lock);
 411	}
 412}
 413
 414static int may_wait_transaction(struct btrfs_fs_info *fs_info, int type)
 415{
 416	if (test_bit(BTRFS_FS_LOG_RECOVERING, &fs_info->flags))
 417		return 0;
 418
 419	if (type == TRANS_START)
 420		return 1;
 421
 422	return 0;
 423}
 424
 425static inline bool need_reserve_reloc_root(struct btrfs_root *root)
 426{
 427	struct btrfs_fs_info *fs_info = root->fs_info;
 428
 429	if (!fs_info->reloc_ctl ||
 430	    !test_bit(BTRFS_ROOT_REF_COWS, &root->state) ||
 431	    root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID ||
 432	    root->reloc_root)
 433		return false;
 434
 435	return true;
 436}
 437
 438static struct btrfs_trans_handle *
 439start_transaction(struct btrfs_root *root, unsigned int num_items,
 440		  unsigned int type, enum btrfs_reserve_flush_enum flush,
 441		  bool enforce_qgroups)
 442{
 443	struct btrfs_fs_info *fs_info = root->fs_info;
 444	struct btrfs_block_rsv *delayed_refs_rsv = &fs_info->delayed_refs_rsv;
 445	struct btrfs_trans_handle *h;
 446	struct btrfs_transaction *cur_trans;
 447	u64 num_bytes = 0;
 448	u64 qgroup_reserved = 0;
 449	bool reloc_reserved = false;
 
 450	int ret;
 451
 452	/* Send isn't supposed to start transactions. */
 453	ASSERT(current->journal_info != BTRFS_SEND_TRANS_STUB);
 454
 455	if (test_bit(BTRFS_FS_STATE_ERROR, &fs_info->fs_state))
 456		return ERR_PTR(-EROFS);
 457
 458	if (current->journal_info) {
 459		WARN_ON(type & TRANS_EXTWRITERS);
 460		h = current->journal_info;
 461		refcount_inc(&h->use_count);
 462		WARN_ON(refcount_read(&h->use_count) > 2);
 463		h->orig_rsv = h->block_rsv;
 464		h->block_rsv = NULL;
 465		goto got_it;
 466	}
 467
 468	/*
 469	 * Do the reservation before we join the transaction so we can do all
 470	 * the appropriate flushing if need be.
 471	 */
 472	if (num_items && root != fs_info->chunk_root) {
 473		struct btrfs_block_rsv *rsv = &fs_info->trans_block_rsv;
 474		u64 delayed_refs_bytes = 0;
 475
 476		qgroup_reserved = num_items * fs_info->nodesize;
 477		ret = btrfs_qgroup_reserve_meta_pertrans(root, qgroup_reserved,
 478				enforce_qgroups);
 479		if (ret)
 480			return ERR_PTR(ret);
 481
 482		/*
 483		 * We want to reserve all the bytes we may need all at once, so
 484		 * we only do 1 enospc flushing cycle per transaction start.  We
 485		 * accomplish this by simply assuming we'll do 2 x num_items
 486		 * worth of delayed refs updates in this trans handle, and
 487		 * refill that amount for whatever is missing in the reserve.
 488		 */
 489		num_bytes = btrfs_calc_insert_metadata_size(fs_info, num_items);
 490		if (delayed_refs_rsv->full == 0) {
 
 491			delayed_refs_bytes = num_bytes;
 492			num_bytes <<= 1;
 493		}
 494
 495		/*
 496		 * Do the reservation for the relocation root creation
 497		 */
 498		if (need_reserve_reloc_root(root)) {
 499			num_bytes += fs_info->nodesize;
 500			reloc_reserved = true;
 501		}
 502
 503		ret = btrfs_block_rsv_add(root, rsv, num_bytes, flush);
 504		if (ret)
 505			goto reserve_fail;
 506		if (delayed_refs_bytes) {
 507			btrfs_migrate_to_delayed_refs_rsv(fs_info, rsv,
 508							  delayed_refs_bytes);
 509			num_bytes -= delayed_refs_bytes;
 510		}
 
 
 
 511	} else if (num_items == 0 && flush == BTRFS_RESERVE_FLUSH_ALL &&
 512		   !delayed_refs_rsv->full) {
 513		/*
 514		 * Some people call with btrfs_start_transaction(root, 0)
 515		 * because they can be throttled, but have some other mechanism
 516		 * for reserving space.  We still want these guys to refill the
 517		 * delayed block_rsv so just add 1 items worth of reservation
 518		 * here.
 519		 */
 520		ret = btrfs_delayed_refs_rsv_refill(fs_info, flush);
 521		if (ret)
 522			goto reserve_fail;
 523	}
 524again:
 525	h = kmem_cache_zalloc(btrfs_trans_handle_cachep, GFP_NOFS);
 526	if (!h) {
 527		ret = -ENOMEM;
 528		goto alloc_fail;
 529	}
 530
 531	/*
 532	 * If we are JOIN_NOLOCK we're already committing a transaction and
 533	 * waiting on this guy, so we don't need to do the sb_start_intwrite
 534	 * because we're already holding a ref.  We need this because we could
 535	 * have raced in and did an fsync() on a file which can kick a commit
 536	 * and then we deadlock with somebody doing a freeze.
 537	 *
 538	 * If we are ATTACH, it means we just want to catch the current
 539	 * transaction and commit it, so we needn't do sb_start_intwrite(). 
 540	 */
 541	if (type & __TRANS_FREEZABLE)
 542		sb_start_intwrite(fs_info->sb);
 543
 544	if (may_wait_transaction(fs_info, type))
 545		wait_current_trans(fs_info);
 546
 547	do {
 548		ret = join_transaction(fs_info, type);
 549		if (ret == -EBUSY) {
 550			wait_current_trans(fs_info);
 551			if (unlikely(type == TRANS_ATTACH ||
 552				     type == TRANS_JOIN_NOSTART))
 553				ret = -ENOENT;
 554		}
 555	} while (ret == -EBUSY);
 556
 557	if (ret < 0)
 558		goto join_fail;
 559
 560	cur_trans = fs_info->running_transaction;
 561
 562	h->transid = cur_trans->transid;
 563	h->transaction = cur_trans;
 564	h->root = root;
 565	refcount_set(&h->use_count, 1);
 566	h->fs_info = root->fs_info;
 567
 568	h->type = type;
 569	h->can_flush_pending_bgs = true;
 570	INIT_LIST_HEAD(&h->new_bgs);
 571
 572	smp_mb();
 573	if (cur_trans->state >= TRANS_STATE_BLOCKED &&
 574	    may_wait_transaction(fs_info, type)) {
 575		current->journal_info = h;
 576		btrfs_commit_transaction(h);
 577		goto again;
 578	}
 579
 580	if (num_bytes) {
 581		trace_btrfs_space_reservation(fs_info, "transaction",
 582					      h->transid, num_bytes, 1);
 583		h->block_rsv = &fs_info->trans_block_rsv;
 584		h->bytes_reserved = num_bytes;
 585		h->reloc_reserved = reloc_reserved;
 586	}
 587
 588got_it:
 589	btrfs_record_root_in_trans(h, root);
 590
 591	if (!current->journal_info)
 592		current->journal_info = h;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 593	return h;
 594
 595join_fail:
 596	if (type & __TRANS_FREEZABLE)
 597		sb_end_intwrite(fs_info->sb);
 598	kmem_cache_free(btrfs_trans_handle_cachep, h);
 599alloc_fail:
 600	if (num_bytes)
 601		btrfs_block_rsv_release(fs_info, &fs_info->trans_block_rsv,
 602					num_bytes);
 603reserve_fail:
 604	btrfs_qgroup_free_meta_pertrans(root, qgroup_reserved);
 605	return ERR_PTR(ret);
 606}
 607
 608struct btrfs_trans_handle *btrfs_start_transaction(struct btrfs_root *root,
 609						   unsigned int num_items)
 610{
 611	return start_transaction(root, num_items, TRANS_START,
 612				 BTRFS_RESERVE_FLUSH_ALL, true);
 613}
 614
 615struct btrfs_trans_handle *btrfs_start_transaction_fallback_global_rsv(
 616					struct btrfs_root *root,
 617					unsigned int num_items,
 618					int min_factor)
 619{
 620	struct btrfs_fs_info *fs_info = root->fs_info;
 621	struct btrfs_trans_handle *trans;
 622	u64 num_bytes;
 623	int ret;
 624
 625	/*
 626	 * We have two callers: unlink and block group removal.  The
 627	 * former should succeed even if we will temporarily exceed
 628	 * quota and the latter operates on the extent root so
 629	 * qgroup enforcement is ignored anyway.
 630	 */
 631	trans = start_transaction(root, num_items, TRANS_START,
 632				  BTRFS_RESERVE_FLUSH_ALL, false);
 633	if (!IS_ERR(trans) || PTR_ERR(trans) != -ENOSPC)
 634		return trans;
 635
 636	trans = btrfs_start_transaction(root, 0);
 637	if (IS_ERR(trans))
 638		return trans;
 639
 640	num_bytes = btrfs_calc_insert_metadata_size(fs_info, num_items);
 641	ret = btrfs_cond_migrate_bytes(fs_info, &fs_info->trans_block_rsv,
 642				       num_bytes, min_factor);
 643	if (ret) {
 644		btrfs_end_transaction(trans);
 645		return ERR_PTR(ret);
 646	}
 647
 648	trans->block_rsv = &fs_info->trans_block_rsv;
 649	trans->bytes_reserved = num_bytes;
 650	trace_btrfs_space_reservation(fs_info, "transaction",
 651				      trans->transid, num_bytes, 1);
 652
 653	return trans;
 654}
 655
 656struct btrfs_trans_handle *btrfs_join_transaction(struct btrfs_root *root)
 657{
 658	return start_transaction(root, 0, TRANS_JOIN, BTRFS_RESERVE_NO_FLUSH,
 659				 true);
 660}
 661
 662struct btrfs_trans_handle *btrfs_join_transaction_nolock(struct btrfs_root *root)
 663{
 664	return start_transaction(root, 0, TRANS_JOIN_NOLOCK,
 665				 BTRFS_RESERVE_NO_FLUSH, true);
 666}
 667
 668/*
 669 * Similar to regular join but it never starts a transaction when none is
 670 * running or after waiting for the current one to finish.
 671 */
 672struct btrfs_trans_handle *btrfs_join_transaction_nostart(struct btrfs_root *root)
 673{
 674	return start_transaction(root, 0, TRANS_JOIN_NOSTART,
 675				 BTRFS_RESERVE_NO_FLUSH, true);
 676}
 677
 678/*
 679 * btrfs_attach_transaction() - catch the running transaction
 680 *
 681 * It is used when we want to commit the current the transaction, but
 682 * don't want to start a new one.
 683 *
 684 * Note: If this function return -ENOENT, it just means there is no
 685 * running transaction. But it is possible that the inactive transaction
 686 * is still in the memory, not fully on disk. If you hope there is no
 687 * inactive transaction in the fs when -ENOENT is returned, you should
 688 * invoke
 689 *     btrfs_attach_transaction_barrier()
 690 */
 691struct btrfs_trans_handle *btrfs_attach_transaction(struct btrfs_root *root)
 692{
 693	return start_transaction(root, 0, TRANS_ATTACH,
 694				 BTRFS_RESERVE_NO_FLUSH, true);
 695}
 696
 697/*
 698 * btrfs_attach_transaction_barrier() - catch the running transaction
 699 *
 700 * It is similar to the above function, the difference is this one
 701 * will wait for all the inactive transactions until they fully
 702 * complete.
 703 */
 704struct btrfs_trans_handle *
 705btrfs_attach_transaction_barrier(struct btrfs_root *root)
 706{
 707	struct btrfs_trans_handle *trans;
 708
 709	trans = start_transaction(root, 0, TRANS_ATTACH,
 710				  BTRFS_RESERVE_NO_FLUSH, true);
 711	if (trans == ERR_PTR(-ENOENT))
 712		btrfs_wait_for_commit(root->fs_info, 0);
 713
 714	return trans;
 715}
 716
 717/* wait for a transaction commit to be fully complete */
 718static noinline void wait_for_commit(struct btrfs_transaction *commit)
 
 719{
 720	wait_event(commit->commit_wait, commit->state == TRANS_STATE_COMPLETED);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 721}
 722
 723int btrfs_wait_for_commit(struct btrfs_fs_info *fs_info, u64 transid)
 724{
 725	struct btrfs_transaction *cur_trans = NULL, *t;
 726	int ret = 0;
 727
 728	if (transid) {
 729		if (transid <= fs_info->last_trans_committed)
 730			goto out;
 731
 732		/* find specified transaction */
 733		spin_lock(&fs_info->trans_lock);
 734		list_for_each_entry(t, &fs_info->trans_list, list) {
 735			if (t->transid == transid) {
 736				cur_trans = t;
 737				refcount_inc(&cur_trans->use_count);
 738				ret = 0;
 739				break;
 740			}
 741			if (t->transid > transid) {
 742				ret = 0;
 743				break;
 744			}
 745		}
 746		spin_unlock(&fs_info->trans_lock);
 747
 748		/*
 749		 * The specified transaction doesn't exist, or we
 750		 * raced with btrfs_commit_transaction
 751		 */
 752		if (!cur_trans) {
 753			if (transid > fs_info->last_trans_committed)
 754				ret = -EINVAL;
 755			goto out;
 756		}
 757	} else {
 758		/* find newest transaction that is committing | committed */
 759		spin_lock(&fs_info->trans_lock);
 760		list_for_each_entry_reverse(t, &fs_info->trans_list,
 761					    list) {
 762			if (t->state >= TRANS_STATE_COMMIT_START) {
 763				if (t->state == TRANS_STATE_COMPLETED)
 764					break;
 765				cur_trans = t;
 766				refcount_inc(&cur_trans->use_count);
 767				break;
 768			}
 769		}
 770		spin_unlock(&fs_info->trans_lock);
 771		if (!cur_trans)
 772			goto out;  /* nothing committing|committed */
 773	}
 774
 775	wait_for_commit(cur_trans);
 776	btrfs_put_transaction(cur_trans);
 777out:
 778	return ret;
 779}
 780
 781void btrfs_throttle(struct btrfs_fs_info *fs_info)
 782{
 783	wait_current_trans(fs_info);
 784}
 785
 786static int should_end_transaction(struct btrfs_trans_handle *trans)
 787{
 788	struct btrfs_fs_info *fs_info = trans->fs_info;
 789
 790	if (btrfs_check_space_for_delayed_refs(fs_info))
 791		return 1;
 792
 793	return !!btrfs_block_rsv_check(&fs_info->global_block_rsv, 5);
 794}
 795
 796int btrfs_should_end_transaction(struct btrfs_trans_handle *trans)
 797{
 798	struct btrfs_transaction *cur_trans = trans->transaction;
 799
 800	smp_mb();
 801	if (cur_trans->state >= TRANS_STATE_BLOCKED ||
 802	    cur_trans->delayed_refs.flushing)
 803		return 1;
 804
 805	return should_end_transaction(trans);
 806}
 807
 808static void btrfs_trans_release_metadata(struct btrfs_trans_handle *trans)
 809
 810{
 811	struct btrfs_fs_info *fs_info = trans->fs_info;
 812
 813	if (!trans->block_rsv) {
 814		ASSERT(!trans->bytes_reserved);
 815		return;
 816	}
 817
 818	if (!trans->bytes_reserved)
 819		return;
 820
 821	ASSERT(trans->block_rsv == &fs_info->trans_block_rsv);
 822	trace_btrfs_space_reservation(fs_info, "transaction",
 823				      trans->transid, trans->bytes_reserved, 0);
 824	btrfs_block_rsv_release(fs_info, trans->block_rsv,
 825				trans->bytes_reserved);
 826	trans->bytes_reserved = 0;
 827}
 828
 829static int __btrfs_end_transaction(struct btrfs_trans_handle *trans,
 830				   int throttle)
 831{
 832	struct btrfs_fs_info *info = trans->fs_info;
 833	struct btrfs_transaction *cur_trans = trans->transaction;
 834	int lock = (trans->type != TRANS_JOIN_NOLOCK);
 835	int err = 0;
 836
 837	if (refcount_read(&trans->use_count) > 1) {
 838		refcount_dec(&trans->use_count);
 839		trans->block_rsv = trans->orig_rsv;
 840		return 0;
 841	}
 842
 843	btrfs_trans_release_metadata(trans);
 844	trans->block_rsv = NULL;
 845
 846	btrfs_create_pending_block_groups(trans);
 847
 848	btrfs_trans_release_chunk_metadata(trans);
 849
 850	if (lock && READ_ONCE(cur_trans->state) == TRANS_STATE_BLOCKED) {
 851		if (throttle)
 852			return btrfs_commit_transaction(trans);
 853		else
 854			wake_up_process(info->transaction_kthread);
 855	}
 856
 857	if (trans->type & __TRANS_FREEZABLE)
 858		sb_end_intwrite(info->sb);
 859
 860	WARN_ON(cur_trans != info->running_transaction);
 861	WARN_ON(atomic_read(&cur_trans->num_writers) < 1);
 862	atomic_dec(&cur_trans->num_writers);
 863	extwriter_counter_dec(cur_trans, trans->type);
 864
 865	cond_wake_up(&cur_trans->writer_wait);
 
 
 
 
 866	btrfs_put_transaction(cur_trans);
 867
 868	if (current->journal_info == trans)
 869		current->journal_info = NULL;
 870
 871	if (throttle)
 872		btrfs_run_delayed_iputs(info);
 873
 874	if (trans->aborted ||
 875	    test_bit(BTRFS_FS_STATE_ERROR, &info->fs_state)) {
 876		wake_up_process(info->transaction_kthread);
 877		err = -EIO;
 
 
 
 878	}
 879
 880	kmem_cache_free(btrfs_trans_handle_cachep, trans);
 881	return err;
 882}
 883
 884int btrfs_end_transaction(struct btrfs_trans_handle *trans)
 885{
 886	return __btrfs_end_transaction(trans, 0);
 887}
 888
 889int btrfs_end_transaction_throttle(struct btrfs_trans_handle *trans)
 890{
 891	return __btrfs_end_transaction(trans, 1);
 892}
 893
 894/*
 895 * when btree blocks are allocated, they have some corresponding bits set for
 896 * them in one of two extent_io trees.  This is used to make sure all of
 897 * those extents are sent to disk but does not wait on them
 898 */
 899int btrfs_write_marked_extents(struct btrfs_fs_info *fs_info,
 900			       struct extent_io_tree *dirty_pages, int mark)
 901{
 902	int err = 0;
 903	int werr = 0;
 904	struct address_space *mapping = fs_info->btree_inode->i_mapping;
 905	struct extent_state *cached_state = NULL;
 906	u64 start = 0;
 907	u64 end;
 908
 909	atomic_inc(&BTRFS_I(fs_info->btree_inode)->sync_writers);
 910	while (!find_first_extent_bit(dirty_pages, start, &start, &end,
 911				      mark, &cached_state)) {
 912		bool wait_writeback = false;
 913
 914		err = convert_extent_bit(dirty_pages, start, end,
 915					 EXTENT_NEED_WAIT,
 916					 mark, &cached_state);
 917		/*
 918		 * convert_extent_bit can return -ENOMEM, which is most of the
 919		 * time a temporary error. So when it happens, ignore the error
 920		 * and wait for writeback of this range to finish - because we
 921		 * failed to set the bit EXTENT_NEED_WAIT for the range, a call
 922		 * to __btrfs_wait_marked_extents() would not know that
 923		 * writeback for this range started and therefore wouldn't
 924		 * wait for it to finish - we don't want to commit a
 925		 * superblock that points to btree nodes/leafs for which
 926		 * writeback hasn't finished yet (and without errors).
 927		 * We cleanup any entries left in the io tree when committing
 928		 * the transaction (through extent_io_tree_release()).
 929		 */
 930		if (err == -ENOMEM) {
 931			err = 0;
 932			wait_writeback = true;
 933		}
 934		if (!err)
 935			err = filemap_fdatawrite_range(mapping, start, end);
 936		if (err)
 937			werr = err;
 938		else if (wait_writeback)
 939			werr = filemap_fdatawait_range(mapping, start, end);
 940		free_extent_state(cached_state);
 941		cached_state = NULL;
 942		cond_resched();
 943		start = end + 1;
 944	}
 945	atomic_dec(&BTRFS_I(fs_info->btree_inode)->sync_writers);
 946	return werr;
 947}
 948
 949/*
 950 * when btree blocks are allocated, they have some corresponding bits set for
 951 * them in one of two extent_io trees.  This is used to make sure all of
 952 * those extents are on disk for transaction or log commit.  We wait
 953 * on all the pages and clear them from the dirty pages state tree
 954 */
 955static int __btrfs_wait_marked_extents(struct btrfs_fs_info *fs_info,
 956				       struct extent_io_tree *dirty_pages)
 957{
 958	int err = 0;
 959	int werr = 0;
 960	struct address_space *mapping = fs_info->btree_inode->i_mapping;
 961	struct extent_state *cached_state = NULL;
 962	u64 start = 0;
 963	u64 end;
 964
 965	while (!find_first_extent_bit(dirty_pages, start, &start, &end,
 966				      EXTENT_NEED_WAIT, &cached_state)) {
 967		/*
 968		 * Ignore -ENOMEM errors returned by clear_extent_bit().
 969		 * When committing the transaction, we'll remove any entries
 970		 * left in the io tree. For a log commit, we don't remove them
 971		 * after committing the log because the tree can be accessed
 972		 * concurrently - we do it only at transaction commit time when
 973		 * it's safe to do it (through extent_io_tree_release()).
 974		 */
 975		err = clear_extent_bit(dirty_pages, start, end,
 976				       EXTENT_NEED_WAIT, 0, 0, &cached_state);
 977		if (err == -ENOMEM)
 978			err = 0;
 979		if (!err)
 980			err = filemap_fdatawait_range(mapping, start, end);
 981		if (err)
 982			werr = err;
 983		free_extent_state(cached_state);
 984		cached_state = NULL;
 985		cond_resched();
 986		start = end + 1;
 987	}
 988	if (err)
 989		werr = err;
 990	return werr;
 991}
 992
 993int btrfs_wait_extents(struct btrfs_fs_info *fs_info,
 994		       struct extent_io_tree *dirty_pages)
 995{
 996	bool errors = false;
 997	int err;
 998
 999	err = __btrfs_wait_marked_extents(fs_info, dirty_pages);
1000	if (test_and_clear_bit(BTRFS_FS_BTREE_ERR, &fs_info->flags))
1001		errors = true;
1002
1003	if (errors && !err)
1004		err = -EIO;
1005	return err;
1006}
1007
1008int btrfs_wait_tree_log_extents(struct btrfs_root *log_root, int mark)
1009{
1010	struct btrfs_fs_info *fs_info = log_root->fs_info;
1011	struct extent_io_tree *dirty_pages = &log_root->dirty_log_pages;
1012	bool errors = false;
1013	int err;
1014
1015	ASSERT(log_root->root_key.objectid == BTRFS_TREE_LOG_OBJECTID);
1016
1017	err = __btrfs_wait_marked_extents(fs_info, dirty_pages);
1018	if ((mark & EXTENT_DIRTY) &&
1019	    test_and_clear_bit(BTRFS_FS_LOG1_ERR, &fs_info->flags))
1020		errors = true;
1021
1022	if ((mark & EXTENT_NEW) &&
1023	    test_and_clear_bit(BTRFS_FS_LOG2_ERR, &fs_info->flags))
1024		errors = true;
1025
1026	if (errors && !err)
1027		err = -EIO;
1028	return err;
1029}
1030
1031/*
1032 * When btree blocks are allocated the corresponding extents are marked dirty.
1033 * This function ensures such extents are persisted on disk for transaction or
1034 * log commit.
1035 *
1036 * @trans: transaction whose dirty pages we'd like to write
1037 */
1038static int btrfs_write_and_wait_transaction(struct btrfs_trans_handle *trans)
1039{
1040	int ret;
1041	int ret2;
1042	struct extent_io_tree *dirty_pages = &trans->transaction->dirty_pages;
1043	struct btrfs_fs_info *fs_info = trans->fs_info;
1044	struct blk_plug plug;
1045
1046	blk_start_plug(&plug);
1047	ret = btrfs_write_marked_extents(fs_info, dirty_pages, EXTENT_DIRTY);
1048	blk_finish_plug(&plug);
1049	ret2 = btrfs_wait_extents(fs_info, dirty_pages);
1050
1051	extent_io_tree_release(&trans->transaction->dirty_pages);
1052
1053	if (ret)
1054		return ret;
1055	else if (ret2)
1056		return ret2;
1057	else
1058		return 0;
1059}
1060
1061/*
1062 * this is used to update the root pointer in the tree of tree roots.
1063 *
1064 * But, in the case of the extent allocation tree, updating the root
1065 * pointer may allocate blocks which may change the root of the extent
1066 * allocation tree.
1067 *
1068 * So, this loops and repeats and makes sure the cowonly root didn't
1069 * change while the root pointer was being updated in the metadata.
1070 */
1071static int update_cowonly_root(struct btrfs_trans_handle *trans,
1072			       struct btrfs_root *root)
1073{
1074	int ret;
1075	u64 old_root_bytenr;
1076	u64 old_root_used;
1077	struct btrfs_fs_info *fs_info = root->fs_info;
1078	struct btrfs_root *tree_root = fs_info->tree_root;
1079
1080	old_root_used = btrfs_root_used(&root->root_item);
1081
1082	while (1) {
1083		old_root_bytenr = btrfs_root_bytenr(&root->root_item);
1084		if (old_root_bytenr == root->node->start &&
1085		    old_root_used == btrfs_root_used(&root->root_item))
1086			break;
1087
1088		btrfs_set_root_node(&root->root_item, root->node);
1089		ret = btrfs_update_root(trans, tree_root,
1090					&root->root_key,
1091					&root->root_item);
1092		if (ret)
1093			return ret;
1094
1095		old_root_used = btrfs_root_used(&root->root_item);
1096	}
1097
1098	return 0;
1099}
1100
1101/*
1102 * update all the cowonly tree roots on disk
1103 *
1104 * The error handling in this function may not be obvious. Any of the
1105 * failures will cause the file system to go offline. We still need
1106 * to clean up the delayed refs.
1107 */
1108static noinline int commit_cowonly_roots(struct btrfs_trans_handle *trans)
1109{
1110	struct btrfs_fs_info *fs_info = trans->fs_info;
1111	struct list_head *dirty_bgs = &trans->transaction->dirty_bgs;
1112	struct list_head *io_bgs = &trans->transaction->io_bgs;
1113	struct list_head *next;
1114	struct extent_buffer *eb;
1115	int ret;
1116
 
 
 
 
 
 
1117	eb = btrfs_lock_root_node(fs_info->tree_root);
1118	ret = btrfs_cow_block(trans, fs_info->tree_root, eb, NULL,
1119			      0, &eb);
1120	btrfs_tree_unlock(eb);
1121	free_extent_buffer(eb);
1122
1123	if (ret)
1124		return ret;
1125
1126	ret = btrfs_run_delayed_refs(trans, (unsigned long)-1);
1127	if (ret)
1128		return ret;
1129
1130	ret = btrfs_run_dev_stats(trans);
1131	if (ret)
1132		return ret;
1133	ret = btrfs_run_dev_replace(trans);
1134	if (ret)
1135		return ret;
1136	ret = btrfs_run_qgroups(trans);
1137	if (ret)
1138		return ret;
1139
1140	ret = btrfs_setup_space_cache(trans);
1141	if (ret)
1142		return ret;
1143
1144	/* run_qgroups might have added some more refs */
1145	ret = btrfs_run_delayed_refs(trans, (unsigned long)-1);
1146	if (ret)
1147		return ret;
1148again:
1149	while (!list_empty(&fs_info->dirty_cowonly_roots)) {
1150		struct btrfs_root *root;
1151		next = fs_info->dirty_cowonly_roots.next;
1152		list_del_init(next);
1153		root = list_entry(next, struct btrfs_root, dirty_list);
1154		clear_bit(BTRFS_ROOT_DIRTY, &root->state);
1155
1156		if (root != fs_info->extent_root)
1157			list_add_tail(&root->dirty_list,
1158				      &trans->transaction->switch_commits);
1159		ret = update_cowonly_root(trans, root);
1160		if (ret)
1161			return ret;
1162		ret = btrfs_run_delayed_refs(trans, (unsigned long)-1);
1163		if (ret)
1164			return ret;
1165	}
1166
 
 
 
 
 
1167	while (!list_empty(dirty_bgs) || !list_empty(io_bgs)) {
1168		ret = btrfs_write_dirty_block_groups(trans);
1169		if (ret)
1170			return ret;
 
 
 
 
 
 
 
1171		ret = btrfs_run_delayed_refs(trans, (unsigned long)-1);
1172		if (ret)
1173			return ret;
1174	}
1175
1176	if (!list_empty(&fs_info->dirty_cowonly_roots))
1177		goto again;
1178
1179	list_add_tail(&fs_info->extent_root->dirty_list,
1180		      &trans->transaction->switch_commits);
1181
1182	/* Update dev-replace pointer once everything is committed */
1183	fs_info->dev_replace.committed_cursor_left =
1184		fs_info->dev_replace.cursor_left_last_write_of_item;
1185
1186	return 0;
1187}
1188
1189/*
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1190 * dead roots are old snapshots that need to be deleted.  This allocates
1191 * a dirty root struct and adds it into the list of dead roots that need to
1192 * be deleted
1193 */
1194void btrfs_add_dead_root(struct btrfs_root *root)
1195{
1196	struct btrfs_fs_info *fs_info = root->fs_info;
1197
1198	spin_lock(&fs_info->trans_lock);
1199	if (list_empty(&root->root_list))
1200		list_add_tail(&root->root_list, &fs_info->dead_roots);
 
 
 
 
 
 
 
1201	spin_unlock(&fs_info->trans_lock);
1202}
1203
1204/*
1205 * update all the cowonly tree roots on disk
 
1206 */
1207static noinline int commit_fs_roots(struct btrfs_trans_handle *trans)
1208{
1209	struct btrfs_fs_info *fs_info = trans->fs_info;
1210	struct btrfs_root *gang[8];
1211	int i;
1212	int ret;
1213	int err = 0;
 
 
 
 
 
1214
1215	spin_lock(&fs_info->fs_roots_radix_lock);
1216	while (1) {
1217		ret = radix_tree_gang_lookup_tag(&fs_info->fs_roots_radix,
1218						 (void **)gang, 0,
1219						 ARRAY_SIZE(gang),
1220						 BTRFS_ROOT_TRANS_TAG);
1221		if (ret == 0)
1222			break;
1223		for (i = 0; i < ret; i++) {
1224			struct btrfs_root *root = gang[i];
 
 
 
 
 
 
 
 
 
 
1225			radix_tree_tag_clear(&fs_info->fs_roots_radix,
1226					(unsigned long)root->root_key.objectid,
1227					BTRFS_ROOT_TRANS_TAG);
1228			spin_unlock(&fs_info->fs_roots_radix_lock);
1229
1230			btrfs_free_log(trans, root);
1231			btrfs_update_reloc_root(trans, root);
1232
1233			btrfs_save_ino_cache(root, trans);
1234
1235			/* see comments in should_cow_block() */
1236			clear_bit(BTRFS_ROOT_FORCE_COW, &root->state);
1237			smp_mb__after_atomic();
1238
1239			if (root->commit_root != root->node) {
1240				list_add_tail(&root->dirty_list,
1241					&trans->transaction->switch_commits);
1242				btrfs_set_root_node(&root->root_item,
1243						    root->node);
1244			}
1245
1246			err = btrfs_update_root(trans, fs_info->tree_root,
1247						&root->root_key,
1248						&root->root_item);
 
 
1249			spin_lock(&fs_info->fs_roots_radix_lock);
1250			if (err)
1251				break;
1252			btrfs_qgroup_free_meta_all_pertrans(root);
1253		}
1254	}
1255	spin_unlock(&fs_info->fs_roots_radix_lock);
1256	return err;
1257}
1258
1259/*
1260 * defrag a given btree.
1261 * Every leaf in the btree is read and defragged.
1262 */
1263int btrfs_defrag_root(struct btrfs_root *root)
1264{
1265	struct btrfs_fs_info *info = root->fs_info;
1266	struct btrfs_trans_handle *trans;
1267	int ret;
1268
1269	if (test_and_set_bit(BTRFS_ROOT_DEFRAG_RUNNING, &root->state))
1270		return 0;
1271
1272	while (1) {
1273		trans = btrfs_start_transaction(root, 0);
1274		if (IS_ERR(trans))
1275			return PTR_ERR(trans);
 
 
1276
1277		ret = btrfs_defrag_leaves(trans, root);
1278
1279		btrfs_end_transaction(trans);
1280		btrfs_btree_balance_dirty(info);
1281		cond_resched();
1282
1283		if (btrfs_fs_closing(info) || ret != -EAGAIN)
1284			break;
1285
1286		if (btrfs_defrag_cancelled(info)) {
1287			btrfs_debug(info, "defrag_root cancelled");
1288			ret = -EAGAIN;
1289			break;
1290		}
1291	}
1292	clear_bit(BTRFS_ROOT_DEFRAG_RUNNING, &root->state);
1293	return ret;
1294}
1295
1296/*
1297 * Do all special snapshot related qgroup dirty hack.
1298 *
1299 * Will do all needed qgroup inherit and dirty hack like switch commit
1300 * roots inside one transaction and write all btree into disk, to make
1301 * qgroup works.
1302 */
1303static int qgroup_account_snapshot(struct btrfs_trans_handle *trans,
1304				   struct btrfs_root *src,
1305				   struct btrfs_root *parent,
1306				   struct btrfs_qgroup_inherit *inherit,
1307				   u64 dst_objectid)
1308{
1309	struct btrfs_fs_info *fs_info = src->fs_info;
1310	int ret;
1311
1312	/*
1313	 * Save some performance in the case that qgroups are not
1314	 * enabled. If this check races with the ioctl, rescan will
1315	 * kick in anyway.
1316	 */
1317	if (!test_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags))
1318		return 0;
1319
1320	/*
1321	 * Ensure dirty @src will be committed.  Or, after coming
1322	 * commit_fs_roots() and switch_commit_roots(), any dirty but not
1323	 * recorded root will never be updated again, causing an outdated root
1324	 * item.
1325	 */
1326	record_root_in_trans(trans, src, 1);
 
 
1327
1328	/*
1329	 * We are going to commit transaction, see btrfs_commit_transaction()
1330	 * comment for reason locking tree_log_mutex
 
 
 
 
 
 
 
1331	 */
1332	mutex_lock(&fs_info->tree_log_mutex);
 
 
 
 
1333
1334	ret = commit_fs_roots(trans);
1335	if (ret)
1336		goto out;
1337	ret = btrfs_qgroup_account_extents(trans);
1338	if (ret < 0)
1339		goto out;
1340
1341	/* Now qgroup are all updated, we can inherit it to new qgroups */
1342	ret = btrfs_qgroup_inherit(trans, src->root_key.objectid, dst_objectid,
1343				   inherit);
1344	if (ret < 0)
1345		goto out;
1346
1347	/*
1348	 * Now we do a simplified commit transaction, which will:
1349	 * 1) commit all subvolume and extent tree
1350	 *    To ensure all subvolume and extent tree have a valid
1351	 *    commit_root to accounting later insert_dir_item()
1352	 * 2) write all btree blocks onto disk
1353	 *    This is to make sure later btree modification will be cowed
1354	 *    Or commit_root can be populated and cause wrong qgroup numbers
1355	 * In this simplified commit, we don't really care about other trees
1356	 * like chunk and root tree, as they won't affect qgroup.
1357	 * And we don't write super to avoid half committed status.
1358	 */
1359	ret = commit_cowonly_roots(trans);
1360	if (ret)
1361		goto out;
1362	switch_commit_roots(trans->transaction);
1363	ret = btrfs_write_and_wait_transaction(trans);
1364	if (ret)
1365		btrfs_handle_fs_error(fs_info, ret,
1366			"Error while writing out transaction for qgroup");
1367
1368out:
1369	mutex_unlock(&fs_info->tree_log_mutex);
1370
1371	/*
1372	 * Force parent root to be updated, as we recorded it before so its
1373	 * last_trans == cur_transid.
1374	 * Or it won't be committed again onto disk after later
1375	 * insert_dir_item()
1376	 */
1377	if (!ret)
1378		record_root_in_trans(trans, parent, 1);
1379	return ret;
1380}
1381
1382/*
1383 * new snapshots need to be created at a very specific time in the
1384 * transaction commit.  This does the actual creation.
1385 *
1386 * Note:
1387 * If the error which may affect the commitment of the current transaction
1388 * happens, we should return the error number. If the error which just affect
1389 * the creation of the pending snapshots, just return 0.
1390 */
1391static noinline int create_pending_snapshot(struct btrfs_trans_handle *trans,
1392				   struct btrfs_pending_snapshot *pending)
1393{
1394
1395	struct btrfs_fs_info *fs_info = trans->fs_info;
1396	struct btrfs_key key;
1397	struct btrfs_root_item *new_root_item;
1398	struct btrfs_root *tree_root = fs_info->tree_root;
1399	struct btrfs_root *root = pending->root;
1400	struct btrfs_root *parent_root;
1401	struct btrfs_block_rsv *rsv;
1402	struct inode *parent_inode;
1403	struct btrfs_path *path;
1404	struct btrfs_dir_item *dir_item;
1405	struct dentry *dentry;
1406	struct extent_buffer *tmp;
1407	struct extent_buffer *old;
1408	struct timespec64 cur_time;
1409	int ret = 0;
1410	u64 to_reserve = 0;
1411	u64 index = 0;
1412	u64 objectid;
1413	u64 root_flags;
1414	uuid_le new_uuid;
 
1415
1416	ASSERT(pending->path);
1417	path = pending->path;
1418
1419	ASSERT(pending->root_item);
1420	new_root_item = pending->root_item;
1421
1422	pending->error = btrfs_find_free_objectid(tree_root, &objectid);
 
 
 
 
 
 
 
 
 
 
 
 
 
1423	if (pending->error)
1424		goto no_free_objectid;
1425
1426	/*
1427	 * Make qgroup to skip current new snapshot's qgroupid, as it is
1428	 * accounted by later btrfs_qgroup_inherit().
1429	 */
1430	btrfs_set_skip_qgroup(trans, objectid);
1431
1432	btrfs_reloc_pre_snapshot(pending, &to_reserve);
1433
1434	if (to_reserve > 0) {
1435		pending->error = btrfs_block_rsv_add(root,
1436						     &pending->block_rsv,
1437						     to_reserve,
1438						     BTRFS_RESERVE_NO_FLUSH);
1439		if (pending->error)
1440			goto clear_skip_qgroup;
1441	}
1442
1443	key.objectid = objectid;
1444	key.offset = (u64)-1;
1445	key.type = BTRFS_ROOT_ITEM_KEY;
1446
1447	rsv = trans->block_rsv;
1448	trans->block_rsv = &pending->block_rsv;
1449	trans->bytes_reserved = trans->block_rsv->reserved;
1450	trace_btrfs_space_reservation(fs_info, "transaction",
1451				      trans->transid,
1452				      trans->bytes_reserved, 1);
1453	dentry = pending->dentry;
1454	parent_inode = pending->dir;
1455	parent_root = BTRFS_I(parent_inode)->root;
1456	record_root_in_trans(trans, parent_root, 0);
1457
 
1458	cur_time = current_time(parent_inode);
1459
1460	/*
1461	 * insert the directory item
1462	 */
1463	ret = btrfs_set_inode_index(BTRFS_I(parent_inode), &index);
1464	BUG_ON(ret); /* -ENOMEM */
1465
1466	/* check if there is a file/dir which has the same name. */
1467	dir_item = btrfs_lookup_dir_item(NULL, parent_root, path,
1468					 btrfs_ino(BTRFS_I(parent_inode)),
1469					 dentry->d_name.name,
1470					 dentry->d_name.len, 0);
1471	if (dir_item != NULL && !IS_ERR(dir_item)) {
1472		pending->error = -EEXIST;
1473		goto dir_item_existed;
1474	} else if (IS_ERR(dir_item)) {
1475		ret = PTR_ERR(dir_item);
1476		btrfs_abort_transaction(trans, ret);
1477		goto fail;
1478	}
1479	btrfs_release_path(path);
1480
1481	/*
1482	 * pull in the delayed directory update
1483	 * and the delayed inode item
1484	 * otherwise we corrupt the FS during
1485	 * snapshot
1486	 */
1487	ret = btrfs_run_delayed_items(trans);
1488	if (ret) {	/* Transaction aborted */
1489		btrfs_abort_transaction(trans, ret);
1490		goto fail;
1491	}
1492
1493	record_root_in_trans(trans, root, 0);
 
 
 
 
1494	btrfs_set_root_last_snapshot(&root->root_item, trans->transid);
1495	memcpy(new_root_item, &root->root_item, sizeof(*new_root_item));
1496	btrfs_check_and_init_root_item(new_root_item);
1497
1498	root_flags = btrfs_root_flags(new_root_item);
1499	if (pending->readonly)
1500		root_flags |= BTRFS_ROOT_SUBVOL_RDONLY;
1501	else
1502		root_flags &= ~BTRFS_ROOT_SUBVOL_RDONLY;
1503	btrfs_set_root_flags(new_root_item, root_flags);
1504
1505	btrfs_set_root_generation_v2(new_root_item,
1506			trans->transid);
1507	uuid_le_gen(&new_uuid);
1508	memcpy(new_root_item->uuid, new_uuid.b, BTRFS_UUID_SIZE);
1509	memcpy(new_root_item->parent_uuid, root->root_item.uuid,
1510			BTRFS_UUID_SIZE);
1511	if (!(root_flags & BTRFS_ROOT_SUBVOL_RDONLY)) {
1512		memset(new_root_item->received_uuid, 0,
1513		       sizeof(new_root_item->received_uuid));
1514		memset(&new_root_item->stime, 0, sizeof(new_root_item->stime));
1515		memset(&new_root_item->rtime, 0, sizeof(new_root_item->rtime));
1516		btrfs_set_root_stransid(new_root_item, 0);
1517		btrfs_set_root_rtransid(new_root_item, 0);
1518	}
1519	btrfs_set_stack_timespec_sec(&new_root_item->otime, cur_time.tv_sec);
1520	btrfs_set_stack_timespec_nsec(&new_root_item->otime, cur_time.tv_nsec);
1521	btrfs_set_root_otransid(new_root_item, trans->transid);
1522
1523	old = btrfs_lock_root_node(root);
1524	ret = btrfs_cow_block(trans, root, old, NULL, 0, &old);
 
1525	if (ret) {
1526		btrfs_tree_unlock(old);
1527		free_extent_buffer(old);
1528		btrfs_abort_transaction(trans, ret);
1529		goto fail;
1530	}
1531
1532	btrfs_set_lock_blocking_write(old);
1533
1534	ret = btrfs_copy_root(trans, root, old, &tmp, objectid);
1535	/* clean up in any case */
1536	btrfs_tree_unlock(old);
1537	free_extent_buffer(old);
1538	if (ret) {
1539		btrfs_abort_transaction(trans, ret);
1540		goto fail;
1541	}
1542	/* see comments in should_cow_block() */
1543	set_bit(BTRFS_ROOT_FORCE_COW, &root->state);
1544	smp_wmb();
1545
1546	btrfs_set_root_node(new_root_item, tmp);
1547	/* record when the snapshot was created in key.offset */
1548	key.offset = trans->transid;
1549	ret = btrfs_insert_root(trans, tree_root, &key, new_root_item);
1550	btrfs_tree_unlock(tmp);
1551	free_extent_buffer(tmp);
1552	if (ret) {
1553		btrfs_abort_transaction(trans, ret);
1554		goto fail;
1555	}
1556
1557	/*
1558	 * insert root back/forward references
1559	 */
1560	ret = btrfs_add_root_ref(trans, objectid,
1561				 parent_root->root_key.objectid,
1562				 btrfs_ino(BTRFS_I(parent_inode)), index,
1563				 dentry->d_name.name, dentry->d_name.len);
1564	if (ret) {
1565		btrfs_abort_transaction(trans, ret);
1566		goto fail;
1567	}
1568
1569	key.offset = (u64)-1;
1570	pending->snap = btrfs_read_fs_root_no_name(fs_info, &key);
1571	if (IS_ERR(pending->snap)) {
1572		ret = PTR_ERR(pending->snap);
 
1573		btrfs_abort_transaction(trans, ret);
1574		goto fail;
1575	}
1576
1577	ret = btrfs_reloc_post_snapshot(trans, pending);
1578	if (ret) {
1579		btrfs_abort_transaction(trans, ret);
1580		goto fail;
1581	}
1582
1583	ret = btrfs_run_delayed_refs(trans, (unsigned long)-1);
1584	if (ret) {
1585		btrfs_abort_transaction(trans, ret);
1586		goto fail;
1587	}
1588
1589	/*
1590	 * Do special qgroup accounting for snapshot, as we do some qgroup
1591	 * snapshot hack to do fast snapshot.
1592	 * To co-operate with that hack, we do hack again.
1593	 * Or snapshot will be greatly slowed down by a subtree qgroup rescan
1594	 */
1595	ret = qgroup_account_snapshot(trans, root, parent_root,
1596				      pending->inherit, objectid);
1597	if (ret < 0)
1598		goto fail;
1599
1600	ret = btrfs_insert_dir_item(trans, dentry->d_name.name,
1601				    dentry->d_name.len, BTRFS_I(parent_inode),
1602				    &key, BTRFS_FT_DIR, index);
1603	/* We have check then name at the beginning, so it is impossible. */
1604	BUG_ON(ret == -EEXIST || ret == -EOVERFLOW);
1605	if (ret) {
1606		btrfs_abort_transaction(trans, ret);
1607		goto fail;
1608	}
1609
1610	btrfs_i_size_write(BTRFS_I(parent_inode), parent_inode->i_size +
1611					 dentry->d_name.len * 2);
1612	parent_inode->i_mtime = parent_inode->i_ctime =
1613		current_time(parent_inode);
1614	ret = btrfs_update_inode_fallback(trans, parent_root, parent_inode);
1615	if (ret) {
1616		btrfs_abort_transaction(trans, ret);
1617		goto fail;
1618	}
1619	ret = btrfs_uuid_tree_add(trans, new_uuid.b, BTRFS_UUID_KEY_SUBVOL,
 
1620				  objectid);
1621	if (ret) {
1622		btrfs_abort_transaction(trans, ret);
1623		goto fail;
1624	}
1625	if (!btrfs_is_empty_uuid(new_root_item->received_uuid)) {
1626		ret = btrfs_uuid_tree_add(trans, new_root_item->received_uuid,
1627					  BTRFS_UUID_KEY_RECEIVED_SUBVOL,
1628					  objectid);
1629		if (ret && ret != -EEXIST) {
1630			btrfs_abort_transaction(trans, ret);
1631			goto fail;
1632		}
1633	}
1634
1635	ret = btrfs_run_delayed_refs(trans, (unsigned long)-1);
1636	if (ret) {
1637		btrfs_abort_transaction(trans, ret);
1638		goto fail;
1639	}
1640
1641fail:
1642	pending->error = ret;
1643dir_item_existed:
1644	trans->block_rsv = rsv;
1645	trans->bytes_reserved = 0;
1646clear_skip_qgroup:
1647	btrfs_clear_skip_qgroup(trans);
1648no_free_objectid:
 
 
1649	kfree(new_root_item);
1650	pending->root_item = NULL;
1651	btrfs_free_path(path);
1652	pending->path = NULL;
1653
1654	return ret;
1655}
1656
1657/*
1658 * create all the snapshots we've scheduled for creation
1659 */
1660static noinline int create_pending_snapshots(struct btrfs_trans_handle *trans)
1661{
1662	struct btrfs_pending_snapshot *pending, *next;
1663	struct list_head *head = &trans->transaction->pending_snapshots;
1664	int ret = 0;
1665
1666	list_for_each_entry_safe(pending, next, head, list) {
1667		list_del(&pending->list);
1668		ret = create_pending_snapshot(trans, pending);
1669		if (ret)
1670			break;
1671	}
1672	return ret;
1673}
1674
1675static void update_super_roots(struct btrfs_fs_info *fs_info)
1676{
1677	struct btrfs_root_item *root_item;
1678	struct btrfs_super_block *super;
1679
1680	super = fs_info->super_copy;
1681
1682	root_item = &fs_info->chunk_root->root_item;
1683	super->chunk_root = root_item->bytenr;
1684	super->chunk_root_generation = root_item->generation;
1685	super->chunk_root_level = root_item->level;
1686
1687	root_item = &fs_info->tree_root->root_item;
1688	super->root = root_item->bytenr;
1689	super->generation = root_item->generation;
1690	super->root_level = root_item->level;
1691	if (btrfs_test_opt(fs_info, SPACE_CACHE))
1692		super->cache_generation = root_item->generation;
 
 
1693	if (test_bit(BTRFS_FS_UPDATE_UUID_TREE_GEN, &fs_info->flags))
1694		super->uuid_tree_generation = root_item->generation;
1695}
1696
1697int btrfs_transaction_in_commit(struct btrfs_fs_info *info)
1698{
1699	struct btrfs_transaction *trans;
1700	int ret = 0;
1701
1702	spin_lock(&info->trans_lock);
1703	trans = info->running_transaction;
1704	if (trans)
1705		ret = (trans->state >= TRANS_STATE_COMMIT_START);
1706	spin_unlock(&info->trans_lock);
1707	return ret;
1708}
1709
1710int btrfs_transaction_blocked(struct btrfs_fs_info *info)
1711{
1712	struct btrfs_transaction *trans;
1713	int ret = 0;
1714
1715	spin_lock(&info->trans_lock);
1716	trans = info->running_transaction;
1717	if (trans)
1718		ret = is_transaction_blocked(trans);
1719	spin_unlock(&info->trans_lock);
1720	return ret;
1721}
1722
1723/*
1724 * wait for the current transaction commit to start and block subsequent
1725 * transaction joins
1726 */
1727static void wait_current_trans_commit_start(struct btrfs_fs_info *fs_info,
1728					    struct btrfs_transaction *trans)
1729{
1730	wait_event(fs_info->transaction_blocked_wait,
1731		   trans->state >= TRANS_STATE_COMMIT_START || trans->aborted);
1732}
1733
1734/*
1735 * wait for the current transaction to start and then become unblocked.
1736 * caller holds ref.
1737 */
1738static void wait_current_trans_commit_start_and_unblock(
1739					struct btrfs_fs_info *fs_info,
1740					struct btrfs_transaction *trans)
1741{
1742	wait_event(fs_info->transaction_wait,
1743		   trans->state >= TRANS_STATE_UNBLOCKED || trans->aborted);
1744}
1745
1746/*
1747 * commit transactions asynchronously. once btrfs_commit_transaction_async
1748 * returns, any subsequent transaction will not be allowed to join.
1749 */
1750struct btrfs_async_commit {
1751	struct btrfs_trans_handle *newtrans;
1752	struct work_struct work;
1753};
1754
1755static void do_async_commit(struct work_struct *work)
1756{
1757	struct btrfs_async_commit *ac =
1758		container_of(work, struct btrfs_async_commit, work);
1759
1760	/*
1761	 * We've got freeze protection passed with the transaction.
1762	 * Tell lockdep about it.
1763	 */
1764	if (ac->newtrans->type & __TRANS_FREEZABLE)
1765		__sb_writers_acquired(ac->newtrans->fs_info->sb, SB_FREEZE_FS);
1766
1767	current->journal_info = ac->newtrans;
1768
1769	btrfs_commit_transaction(ac->newtrans);
1770	kfree(ac);
1771}
1772
1773int btrfs_commit_transaction_async(struct btrfs_trans_handle *trans,
1774				   int wait_for_unblock)
1775{
1776	struct btrfs_fs_info *fs_info = trans->fs_info;
1777	struct btrfs_async_commit *ac;
1778	struct btrfs_transaction *cur_trans;
1779
1780	ac = kmalloc(sizeof(*ac), GFP_NOFS);
1781	if (!ac)
1782		return -ENOMEM;
1783
1784	INIT_WORK(&ac->work, do_async_commit);
1785	ac->newtrans = btrfs_join_transaction(trans->root);
1786	if (IS_ERR(ac->newtrans)) {
1787		int err = PTR_ERR(ac->newtrans);
1788		kfree(ac);
1789		return err;
1790	}
1791
1792	/* take transaction reference */
1793	cur_trans = trans->transaction;
1794	refcount_inc(&cur_trans->use_count);
1795
1796	btrfs_end_transaction(trans);
1797
1798	/*
1799	 * Tell lockdep we've released the freeze rwsem, since the
1800	 * async commit thread will be the one to unlock it.
1801	 */
1802	if (ac->newtrans->type & __TRANS_FREEZABLE)
1803		__sb_writers_release(fs_info->sb, SB_FREEZE_FS);
1804
1805	schedule_work(&ac->work);
1806
1807	/* wait for transaction to start and unblock */
1808	if (wait_for_unblock)
1809		wait_current_trans_commit_start_and_unblock(fs_info, cur_trans);
1810	else
1811		wait_current_trans_commit_start(fs_info, cur_trans);
1812
1813	if (current->journal_info == trans)
1814		current->journal_info = NULL;
1815
1816	btrfs_put_transaction(cur_trans);
1817	return 0;
1818}
1819
1820
1821static void cleanup_transaction(struct btrfs_trans_handle *trans, int err)
1822{
1823	struct btrfs_fs_info *fs_info = trans->fs_info;
1824	struct btrfs_transaction *cur_trans = trans->transaction;
1825
1826	WARN_ON(refcount_read(&trans->use_count) > 1);
1827
1828	btrfs_abort_transaction(trans, err);
1829
1830	spin_lock(&fs_info->trans_lock);
1831
1832	/*
1833	 * If the transaction is removed from the list, it means this
1834	 * transaction has been committed successfully, so it is impossible
1835	 * to call the cleanup function.
1836	 */
1837	BUG_ON(list_empty(&cur_trans->list));
1838
1839	list_del_init(&cur_trans->list);
1840	if (cur_trans == fs_info->running_transaction) {
1841		cur_trans->state = TRANS_STATE_COMMIT_DOING;
1842		spin_unlock(&fs_info->trans_lock);
 
 
 
 
 
 
1843		wait_event(cur_trans->writer_wait,
1844			   atomic_read(&cur_trans->num_writers) == 1);
1845
1846		spin_lock(&fs_info->trans_lock);
1847	}
 
 
 
 
 
 
 
 
 
 
 
1848	spin_unlock(&fs_info->trans_lock);
1849
1850	btrfs_cleanup_one_transaction(trans->transaction, fs_info);
1851
1852	spin_lock(&fs_info->trans_lock);
1853	if (cur_trans == fs_info->running_transaction)
1854		fs_info->running_transaction = NULL;
1855	spin_unlock(&fs_info->trans_lock);
1856
1857	if (trans->type & __TRANS_FREEZABLE)
1858		sb_end_intwrite(fs_info->sb);
1859	btrfs_put_transaction(cur_trans);
1860	btrfs_put_transaction(cur_trans);
1861
1862	trace_btrfs_transaction_commit(trans->root);
1863
1864	if (current->journal_info == trans)
1865		current->journal_info = NULL;
1866	btrfs_scrub_cancel(fs_info);
1867
1868	kmem_cache_free(btrfs_trans_handle_cachep, trans);
1869}
1870
1871/*
1872 * Release reserved delayed ref space of all pending block groups of the
1873 * transaction and remove them from the list
1874 */
1875static void btrfs_cleanup_pending_block_groups(struct btrfs_trans_handle *trans)
1876{
1877       struct btrfs_fs_info *fs_info = trans->fs_info;
1878       struct btrfs_block_group_cache *block_group, *tmp;
1879
1880       list_for_each_entry_safe(block_group, tmp, &trans->new_bgs, bg_list) {
1881               btrfs_delayed_refs_rsv_release(fs_info, 1);
1882               list_del_init(&block_group->bg_list);
1883       }
1884}
1885
1886static inline int btrfs_start_delalloc_flush(struct btrfs_trans_handle *trans)
1887{
1888	struct btrfs_fs_info *fs_info = trans->fs_info;
1889
1890	/*
1891	 * We use writeback_inodes_sb here because if we used
1892	 * btrfs_start_delalloc_roots we would deadlock with fs freeze.
1893	 * Currently are holding the fs freeze lock, if we do an async flush
1894	 * we'll do btrfs_join_transaction() and deadlock because we need to
1895	 * wait for the fs freeze lock.  Using the direct flushing we benefit
1896	 * from already being in a transaction and our join_transaction doesn't
1897	 * have to re-take the fs freeze lock.
 
 
 
 
 
 
 
 
1898	 */
1899	if (btrfs_test_opt(fs_info, FLUSHONCOMMIT)) {
1900		writeback_inodes_sb(fs_info->sb, WB_REASON_SYNC);
1901	} else {
1902		struct btrfs_pending_snapshot *pending;
1903		struct list_head *head = &trans->transaction->pending_snapshots;
1904
1905		/*
1906		 * Flush dellaloc for any root that is going to be snapshotted.
1907		 * This is done to avoid a corrupted version of files, in the
1908		 * snapshots, that had both buffered and direct IO writes (even
1909		 * if they were done sequentially) due to an unordered update of
1910		 * the inode's size on disk.
1911		 */
1912		list_for_each_entry(pending, head, list) {
1913			int ret;
1914
1915			ret = btrfs_start_delalloc_snapshot(pending->root);
1916			if (ret)
1917				return ret;
1918		}
1919	}
1920	return 0;
1921}
1922
1923static inline void btrfs_wait_delalloc_flush(struct btrfs_trans_handle *trans)
1924{
1925	struct btrfs_fs_info *fs_info = trans->fs_info;
1926
1927	if (btrfs_test_opt(fs_info, FLUSHONCOMMIT)) {
1928		btrfs_wait_ordered_roots(fs_info, U64_MAX, 0, (u64)-1);
1929	} else {
1930		struct btrfs_pending_snapshot *pending;
1931		struct list_head *head = &trans->transaction->pending_snapshots;
1932
1933		/*
1934		 * Wait for any dellaloc that we started previously for the roots
1935		 * that are going to be snapshotted. This is to avoid a corrupted
1936		 * version of files in the snapshots that had both buffered and
1937		 * direct IO writes (even if they were done sequentially).
1938		 */
1939		list_for_each_entry(pending, head, list)
1940			btrfs_wait_ordered_extents(pending->root,
1941						   U64_MAX, 0, U64_MAX);
1942	}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1943}
1944
1945int btrfs_commit_transaction(struct btrfs_trans_handle *trans)
1946{
1947	struct btrfs_fs_info *fs_info = trans->fs_info;
1948	struct btrfs_transaction *cur_trans = trans->transaction;
1949	struct btrfs_transaction *prev_trans = NULL;
1950	int ret;
 
 
 
 
 
 
 
1951
1952	/* Stop the commit early if ->aborted is set */
1953	if (unlikely(READ_ONCE(cur_trans->aborted))) {
1954		ret = cur_trans->aborted;
1955		btrfs_end_transaction(trans);
1956		return ret;
1957	}
1958
1959	btrfs_trans_release_metadata(trans);
1960	trans->block_rsv = NULL;
1961
1962	/* make a pass through all the delayed refs we have so far
1963	 * any runnings procs may add more while we are here
1964	 */
1965	ret = btrfs_run_delayed_refs(trans, 0);
1966	if (ret) {
1967		btrfs_end_transaction(trans);
1968		return ret;
1969	}
1970
1971	cur_trans = trans->transaction;
1972
1973	/*
1974	 * set the flushing flag so procs in this transaction have to
1975	 * start sending their work down.
1976	 */
1977	cur_trans->delayed_refs.flushing = 1;
1978	smp_wmb();
 
 
 
 
 
 
 
 
1979
1980	btrfs_create_pending_block_groups(trans);
1981
1982	ret = btrfs_run_delayed_refs(trans, 0);
1983	if (ret) {
1984		btrfs_end_transaction(trans);
1985		return ret;
1986	}
1987
1988	if (!test_bit(BTRFS_TRANS_DIRTY_BG_RUN, &cur_trans->flags)) {
1989		int run_it = 0;
1990
1991		/* this mutex is also taken before trying to set
1992		 * block groups readonly.  We need to make sure
1993		 * that nobody has set a block group readonly
1994		 * after a extents from that block group have been
1995		 * allocated for cache files.  btrfs_set_block_group_ro
1996		 * will wait for the transaction to commit if it
1997		 * finds BTRFS_TRANS_DIRTY_BG_RUN set.
1998		 *
1999		 * The BTRFS_TRANS_DIRTY_BG_RUN flag is also used to make sure
2000		 * only one process starts all the block group IO.  It wouldn't
2001		 * hurt to have more than one go through, but there's no
2002		 * real advantage to it either.
2003		 */
2004		mutex_lock(&fs_info->ro_block_group_mutex);
2005		if (!test_and_set_bit(BTRFS_TRANS_DIRTY_BG_RUN,
2006				      &cur_trans->flags))
2007			run_it = 1;
2008		mutex_unlock(&fs_info->ro_block_group_mutex);
2009
2010		if (run_it) {
2011			ret = btrfs_start_dirty_block_groups(trans);
2012			if (ret) {
2013				btrfs_end_transaction(trans);
2014				return ret;
2015			}
2016		}
2017	}
2018
2019	spin_lock(&fs_info->trans_lock);
2020	if (cur_trans->state >= TRANS_STATE_COMMIT_START) {
 
 
 
 
2021		spin_unlock(&fs_info->trans_lock);
2022		refcount_inc(&cur_trans->use_count);
2023		ret = btrfs_end_transaction(trans);
2024
2025		wait_for_commit(cur_trans);
 
2026
2027		if (unlikely(cur_trans->aborted))
 
 
 
 
 
2028			ret = cur_trans->aborted;
2029
2030		btrfs_put_transaction(cur_trans);
2031
2032		return ret;
2033	}
2034
2035	cur_trans->state = TRANS_STATE_COMMIT_START;
2036	wake_up(&fs_info->transaction_blocked_wait);
 
2037
2038	if (cur_trans->list.prev != &fs_info->trans_list) {
 
 
 
 
 
2039		prev_trans = list_entry(cur_trans->list.prev,
2040					struct btrfs_transaction, list);
2041		if (prev_trans->state != TRANS_STATE_COMPLETED) {
2042			refcount_inc(&prev_trans->use_count);
2043			spin_unlock(&fs_info->trans_lock);
2044
2045			wait_for_commit(prev_trans);
2046			ret = prev_trans->aborted;
 
2047
2048			btrfs_put_transaction(prev_trans);
2049			if (ret)
2050				goto cleanup_transaction;
2051		} else {
2052			spin_unlock(&fs_info->trans_lock);
2053		}
2054	} else {
2055		spin_unlock(&fs_info->trans_lock);
2056		/*
2057		 * The previous transaction was aborted and was already removed
2058		 * from the list of transactions at fs_info->trans_list. So we
2059		 * abort to prevent writing a new superblock that reflects a
2060		 * corrupt state (pointing to trees with unwritten nodes/leafs).
2061		 */
2062		if (test_bit(BTRFS_FS_STATE_TRANS_ABORTED, &fs_info->fs_state)) {
2063			ret = -EROFS;
2064			goto cleanup_transaction;
2065		}
2066	}
2067
 
 
 
 
 
 
2068	extwriter_counter_dec(cur_trans, trans->type);
2069
2070	ret = btrfs_start_delalloc_flush(trans);
2071	if (ret)
2072		goto cleanup_transaction;
2073
2074	ret = btrfs_run_delayed_items(trans);
2075	if (ret)
2076		goto cleanup_transaction;
2077
 
 
 
 
 
 
 
2078	wait_event(cur_trans->writer_wait,
2079		   extwriter_counter_read(cur_trans) == 0);
2080
2081	/* some pending stuffs might be added after the previous flush. */
2082	ret = btrfs_run_delayed_items(trans);
2083	if (ret)
 
2084		goto cleanup_transaction;
 
 
 
2085
2086	btrfs_wait_delalloc_flush(trans);
 
 
 
 
 
 
 
2087
2088	btrfs_scrub_pause(fs_info);
2089	/*
2090	 * Ok now we need to make sure to block out any other joins while we
2091	 * commit the transaction.  We could have started a join before setting
2092	 * COMMIT_DOING so make sure to wait for num_writers to == 1 again.
2093	 */
2094	spin_lock(&fs_info->trans_lock);
 
2095	cur_trans->state = TRANS_STATE_COMMIT_DOING;
2096	spin_unlock(&fs_info->trans_lock);
 
 
 
 
 
 
 
 
2097	wait_event(cur_trans->writer_wait,
2098		   atomic_read(&cur_trans->num_writers) == 1);
2099
2100	/* ->aborted might be set after the previous check, so check it */
2101	if (unlikely(READ_ONCE(cur_trans->aborted))) {
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2102		ret = cur_trans->aborted;
 
2103		goto scrub_continue;
2104	}
2105	/*
2106	 * the reloc mutex makes sure that we stop
2107	 * the balancing code from coming in and moving
2108	 * extents around in the middle of the commit
2109	 */
2110	mutex_lock(&fs_info->reloc_mutex);
2111
2112	/*
2113	 * We needn't worry about the delayed items because we will
2114	 * deal with them in create_pending_snapshot(), which is the
2115	 * core function of the snapshot creation.
2116	 */
2117	ret = create_pending_snapshots(trans);
2118	if (ret) {
2119		mutex_unlock(&fs_info->reloc_mutex);
2120		goto scrub_continue;
2121	}
2122
2123	/*
2124	 * We insert the dir indexes of the snapshots and update the inode
2125	 * of the snapshots' parents after the snapshot creation, so there
2126	 * are some delayed items which are not dealt with. Now deal with
2127	 * them.
2128	 *
2129	 * We needn't worry that this operation will corrupt the snapshots,
2130	 * because all the tree which are snapshoted will be forced to COW
2131	 * the nodes and leaves.
2132	 */
2133	ret = btrfs_run_delayed_items(trans);
2134	if (ret) {
2135		mutex_unlock(&fs_info->reloc_mutex);
2136		goto scrub_continue;
2137	}
2138
2139	ret = btrfs_run_delayed_refs(trans, (unsigned long)-1);
2140	if (ret) {
2141		mutex_unlock(&fs_info->reloc_mutex);
2142		goto scrub_continue;
2143	}
2144
2145	/*
2146	 * make sure none of the code above managed to slip in a
2147	 * delayed item
2148	 */
2149	btrfs_assert_delayed_root_empty(fs_info);
2150
2151	WARN_ON(cur_trans != trans->transaction);
2152
2153	/* btrfs_commit_tree_roots is responsible for getting the
2154	 * various roots consistent with each other.  Every pointer
2155	 * in the tree of tree roots has to point to the most up to date
2156	 * root for every subvolume and other tree.  So, we have to keep
2157	 * the tree logging code from jumping in and changing any
2158	 * of the trees.
2159	 *
2160	 * At this point in the commit, there can't be any tree-log
2161	 * writers, but a little lower down we drop the trans mutex
2162	 * and let new people in.  By holding the tree_log_mutex
2163	 * from now until after the super is written, we avoid races
2164	 * with the tree-log code.
2165	 */
2166	mutex_lock(&fs_info->tree_log_mutex);
2167
2168	ret = commit_fs_roots(trans);
2169	if (ret) {
2170		mutex_unlock(&fs_info->tree_log_mutex);
2171		mutex_unlock(&fs_info->reloc_mutex);
2172		goto scrub_continue;
2173	}
2174
2175	/*
2176	 * Since the transaction is done, we can apply the pending changes
2177	 * before the next transaction.
2178	 */
2179	btrfs_apply_pending_changes(fs_info);
2180
2181	/* commit_fs_roots gets rid of all the tree log roots, it is now
2182	 * safe to free the root of tree log roots
2183	 */
2184	btrfs_free_log_root_tree(trans, fs_info);
2185
2186	/*
2187	 * commit_fs_roots() can call btrfs_save_ino_cache(), which generates
2188	 * new delayed refs. Must handle them or qgroup can be wrong.
2189	 */
2190	ret = btrfs_run_delayed_refs(trans, (unsigned long)-1);
2191	if (ret) {
2192		mutex_unlock(&fs_info->tree_log_mutex);
2193		mutex_unlock(&fs_info->reloc_mutex);
2194		goto scrub_continue;
2195	}
2196
2197	/*
2198	 * Since fs roots are all committed, we can get a quite accurate
2199	 * new_roots. So let's do quota accounting.
2200	 */
2201	ret = btrfs_qgroup_account_extents(trans);
2202	if (ret < 0) {
2203		mutex_unlock(&fs_info->tree_log_mutex);
2204		mutex_unlock(&fs_info->reloc_mutex);
2205		goto scrub_continue;
2206	}
2207
2208	ret = commit_cowonly_roots(trans);
2209	if (ret) {
2210		mutex_unlock(&fs_info->tree_log_mutex);
2211		mutex_unlock(&fs_info->reloc_mutex);
2212		goto scrub_continue;
2213	}
2214
2215	/*
2216	 * The tasks which save the space cache and inode cache may also
2217	 * update ->aborted, check it.
2218	 */
2219	if (unlikely(READ_ONCE(cur_trans->aborted))) {
2220		ret = cur_trans->aborted;
2221		mutex_unlock(&fs_info->tree_log_mutex);
2222		mutex_unlock(&fs_info->reloc_mutex);
2223		goto scrub_continue;
2224	}
2225
2226	btrfs_prepare_extent_commit(fs_info);
2227
2228	cur_trans = fs_info->running_transaction;
2229
2230	btrfs_set_root_node(&fs_info->tree_root->root_item,
2231			    fs_info->tree_root->node);
2232	list_add_tail(&fs_info->tree_root->dirty_list,
2233		      &cur_trans->switch_commits);
2234
2235	btrfs_set_root_node(&fs_info->chunk_root->root_item,
2236			    fs_info->chunk_root->node);
2237	list_add_tail(&fs_info->chunk_root->dirty_list,
2238		      &cur_trans->switch_commits);
2239
2240	switch_commit_roots(cur_trans);
 
 
 
 
 
 
 
2241
2242	ASSERT(list_empty(&cur_trans->dirty_bgs));
2243	ASSERT(list_empty(&cur_trans->io_bgs));
2244	update_super_roots(fs_info);
2245
2246	btrfs_set_super_log_root(fs_info->super_copy, 0);
2247	btrfs_set_super_log_root_level(fs_info->super_copy, 0);
2248	memcpy(fs_info->super_for_commit, fs_info->super_copy,
2249	       sizeof(*fs_info->super_copy));
2250
2251	btrfs_commit_device_sizes(cur_trans);
2252
2253	clear_bit(BTRFS_FS_LOG1_ERR, &fs_info->flags);
2254	clear_bit(BTRFS_FS_LOG2_ERR, &fs_info->flags);
2255
2256	btrfs_trans_release_chunk_metadata(trans);
2257
 
 
 
 
 
 
 
 
 
 
2258	spin_lock(&fs_info->trans_lock);
2259	cur_trans->state = TRANS_STATE_UNBLOCKED;
2260	fs_info->running_transaction = NULL;
2261	spin_unlock(&fs_info->trans_lock);
2262	mutex_unlock(&fs_info->reloc_mutex);
2263
2264	wake_up(&fs_info->transaction_wait);
 
2265
2266	ret = btrfs_write_and_wait_transaction(trans);
2267	if (ret) {
2268		btrfs_handle_fs_error(fs_info, ret,
2269				      "Error while writing out transaction");
2270		mutex_unlock(&fs_info->tree_log_mutex);
2271		goto scrub_continue;
2272	}
2273
 
 
 
 
 
 
 
2274	ret = write_all_supers(fs_info, 0);
2275	/*
2276	 * the super is written, we can safely allow the tree-loggers
2277	 * to go about their business
2278	 */
2279	mutex_unlock(&fs_info->tree_log_mutex);
2280	if (ret)
2281		goto scrub_continue;
2282
 
 
 
 
 
 
 
 
2283	btrfs_finish_extent_commit(trans);
2284
2285	if (test_bit(BTRFS_TRANS_HAVE_FREE_BGS, &cur_trans->flags))
2286		btrfs_clear_space_info_full(fs_info);
2287
2288	fs_info->last_trans_committed = cur_trans->transid;
2289	/*
2290	 * We needn't acquire the lock here because there is no other task
2291	 * which can change it.
2292	 */
2293	cur_trans->state = TRANS_STATE_COMPLETED;
2294	wake_up(&cur_trans->commit_wait);
2295	clear_bit(BTRFS_FS_NEED_ASYNC_COMMIT, &fs_info->flags);
2296
2297	spin_lock(&fs_info->trans_lock);
2298	list_del_init(&cur_trans->list);
2299	spin_unlock(&fs_info->trans_lock);
2300
2301	btrfs_put_transaction(cur_trans);
2302	btrfs_put_transaction(cur_trans);
2303
2304	if (trans->type & __TRANS_FREEZABLE)
2305		sb_end_intwrite(fs_info->sb);
2306
2307	trace_btrfs_transaction_commit(trans->root);
 
 
2308
2309	btrfs_scrub_continue(fs_info);
2310
2311	if (current->journal_info == trans)
2312		current->journal_info = NULL;
2313
2314	kmem_cache_free(btrfs_trans_handle_cachep, trans);
2315
 
 
2316	return ret;
2317
 
 
 
2318scrub_continue:
 
 
2319	btrfs_scrub_continue(fs_info);
2320cleanup_transaction:
2321	btrfs_trans_release_metadata(trans);
2322	btrfs_cleanup_pending_block_groups(trans);
2323	btrfs_trans_release_chunk_metadata(trans);
2324	trans->block_rsv = NULL;
2325	btrfs_warn(fs_info, "Skipping commit of aborted transaction.");
2326	if (current->journal_info == trans)
2327		current->journal_info = NULL;
2328	cleanup_transaction(trans, ret);
2329
2330	return ret;
 
 
 
 
 
 
 
 
 
 
2331}
2332
2333/*
2334 * return < 0 if error
2335 * 0 if there are no more dead_roots at the time of call
2336 * 1 there are more to be processed, call me again
2337 *
2338 * The return value indicates there are certainly more snapshots to delete, but
2339 * if there comes a new one during processing, it may return 0. We don't mind,
2340 * because btrfs_commit_super will poke cleaner thread and it will process it a
2341 * few seconds later.
2342 */
2343int btrfs_clean_one_deleted_snapshot(struct btrfs_root *root)
2344{
 
2345	int ret;
2346	struct btrfs_fs_info *fs_info = root->fs_info;
2347
2348	spin_lock(&fs_info->trans_lock);
2349	if (list_empty(&fs_info->dead_roots)) {
2350		spin_unlock(&fs_info->trans_lock);
2351		return 0;
2352	}
2353	root = list_first_entry(&fs_info->dead_roots,
2354			struct btrfs_root, root_list);
2355	list_del_init(&root->root_list);
2356	spin_unlock(&fs_info->trans_lock);
2357
2358	btrfs_debug(fs_info, "cleaner removing %llu", root->root_key.objectid);
2359
2360	btrfs_kill_all_delayed_nodes(root);
2361
2362	if (btrfs_header_backref_rev(root->node) <
2363			BTRFS_MIXED_BACKREF_REV)
2364		ret = btrfs_drop_snapshot(root, NULL, 0, 0);
2365	else
2366		ret = btrfs_drop_snapshot(root, NULL, 1, 0);
2367
 
2368	return (ret < 0) ? 0 : 1;
2369}
2370
2371void btrfs_apply_pending_changes(struct btrfs_fs_info *fs_info)
2372{
2373	unsigned long prev;
2374	unsigned long bit;
2375
2376	prev = xchg(&fs_info->pending_changes, 0);
2377	if (!prev)
2378		return;
 
2379
2380	bit = 1 << BTRFS_PENDING_SET_INODE_MAP_CACHE;
2381	if (prev & bit)
2382		btrfs_set_opt(fs_info->mount_opt, INODE_MAP_CACHE);
2383	prev &= ~bit;
2384
2385	bit = 1 << BTRFS_PENDING_CLEAR_INODE_MAP_CACHE;
2386	if (prev & bit)
2387		btrfs_clear_opt(fs_info->mount_opt, INODE_MAP_CACHE);
2388	prev &= ~bit;
2389
2390	bit = 1 << BTRFS_PENDING_COMMIT;
2391	if (prev & bit)
2392		btrfs_debug(fs_info, "pending commit done");
2393	prev &= ~bit;
2394
2395	if (prev)
2396		btrfs_warn(fs_info,
2397			"unknown pending changes left 0x%lx, ignoring", prev);
2398}
v6.2
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 * Copyright (C) 2007 Oracle.  All rights reserved.
   4 */
   5
   6#include <linux/fs.h>
   7#include <linux/slab.h>
   8#include <linux/sched.h>
   9#include <linux/sched/mm.h>
  10#include <linux/writeback.h>
  11#include <linux/pagemap.h>
  12#include <linux/blkdev.h>
  13#include <linux/uuid.h>
  14#include <linux/timekeeping.h>
  15#include "misc.h"
  16#include "ctree.h"
  17#include "disk-io.h"
  18#include "transaction.h"
  19#include "locking.h"
  20#include "tree-log.h"
 
  21#include "volumes.h"
  22#include "dev-replace.h"
  23#include "qgroup.h"
  24#include "block-group.h"
  25#include "space-info.h"
  26#include "zoned.h"
  27#include "fs.h"
  28#include "accessors.h"
  29#include "extent-tree.h"
  30#include "root-tree.h"
  31#include "defrag.h"
  32#include "dir-item.h"
  33#include "uuid-tree.h"
  34#include "ioctl.h"
  35#include "relocation.h"
  36#include "scrub.h"
  37
  38static struct kmem_cache *btrfs_trans_handle_cachep;
  39
  40#define BTRFS_ROOT_TRANS_TAG 0
  41
  42/*
  43 * Transaction states and transitions
  44 *
  45 * No running transaction (fs tree blocks are not modified)
  46 * |
  47 * | To next stage:
  48 * |  Call start_transaction() variants. Except btrfs_join_transaction_nostart().
  49 * V
  50 * Transaction N [[TRANS_STATE_RUNNING]]
  51 * |
  52 * | New trans handles can be attached to transaction N by calling all
  53 * | start_transaction() variants.
  54 * |
  55 * | To next stage:
  56 * |  Call btrfs_commit_transaction() on any trans handle attached to
  57 * |  transaction N
  58 * V
  59 * Transaction N [[TRANS_STATE_COMMIT_START]]
  60 * |
  61 * | Will wait for previous running transaction to completely finish if there
  62 * | is one
  63 * |
  64 * | Then one of the following happes:
  65 * | - Wait for all other trans handle holders to release.
  66 * |   The btrfs_commit_transaction() caller will do the commit work.
  67 * | - Wait for current transaction to be committed by others.
  68 * |   Other btrfs_commit_transaction() caller will do the commit work.
  69 * |
  70 * | At this stage, only btrfs_join_transaction*() variants can attach
  71 * | to this running transaction.
  72 * | All other variants will wait for current one to finish and attach to
  73 * | transaction N+1.
  74 * |
  75 * | To next stage:
  76 * |  Caller is chosen to commit transaction N, and all other trans handle
  77 * |  haven been released.
  78 * V
  79 * Transaction N [[TRANS_STATE_COMMIT_DOING]]
  80 * |
  81 * | The heavy lifting transaction work is started.
  82 * | From running delayed refs (modifying extent tree) to creating pending
  83 * | snapshots, running qgroups.
  84 * | In short, modify supporting trees to reflect modifications of subvolume
  85 * | trees.
  86 * |
  87 * | At this stage, all start_transaction() calls will wait for this
  88 * | transaction to finish and attach to transaction N+1.
  89 * |
  90 * | To next stage:
  91 * |  Until all supporting trees are updated.
  92 * V
  93 * Transaction N [[TRANS_STATE_UNBLOCKED]]
  94 * |						    Transaction N+1
  95 * | All needed trees are modified, thus we only    [[TRANS_STATE_RUNNING]]
  96 * | need to write them back to disk and update	    |
  97 * | super blocks.				    |
  98 * |						    |
  99 * | At this stage, new transaction is allowed to   |
 100 * | start.					    |
 101 * | All new start_transaction() calls will be	    |
 102 * | attached to transid N+1.			    |
 103 * |						    |
 104 * | To next stage:				    |
 105 * |  Until all tree blocks are super blocks are    |
 106 * |  written to block devices			    |
 107 * V						    |
 108 * Transaction N [[TRANS_STATE_COMPLETED]]	    V
 109 *   All tree blocks and super blocks are written.  Transaction N+1
 110 *   This transaction is finished and all its	    [[TRANS_STATE_COMMIT_START]]
 111 *   data structures will be cleaned up.	    | Life goes on
 112 */
 113static const unsigned int btrfs_blocked_trans_types[TRANS_STATE_MAX] = {
 114	[TRANS_STATE_RUNNING]		= 0U,
 
 115	[TRANS_STATE_COMMIT_START]	= (__TRANS_START | __TRANS_ATTACH),
 116	[TRANS_STATE_COMMIT_DOING]	= (__TRANS_START |
 117					   __TRANS_ATTACH |
 118					   __TRANS_JOIN |
 119					   __TRANS_JOIN_NOSTART),
 120	[TRANS_STATE_UNBLOCKED]		= (__TRANS_START |
 121					   __TRANS_ATTACH |
 122					   __TRANS_JOIN |
 123					   __TRANS_JOIN_NOLOCK |
 124					   __TRANS_JOIN_NOSTART),
 125	[TRANS_STATE_SUPER_COMMITTED]	= (__TRANS_START |
 126					   __TRANS_ATTACH |
 127					   __TRANS_JOIN |
 128					   __TRANS_JOIN_NOLOCK |
 129					   __TRANS_JOIN_NOSTART),
 130	[TRANS_STATE_COMPLETED]		= (__TRANS_START |
 131					   __TRANS_ATTACH |
 132					   __TRANS_JOIN |
 133					   __TRANS_JOIN_NOLOCK |
 134					   __TRANS_JOIN_NOSTART),
 135};
 136
 137void btrfs_put_transaction(struct btrfs_transaction *transaction)
 138{
 139	WARN_ON(refcount_read(&transaction->use_count) == 0);
 140	if (refcount_dec_and_test(&transaction->use_count)) {
 141		BUG_ON(!list_empty(&transaction->list));
 142		WARN_ON(!RB_EMPTY_ROOT(
 143				&transaction->delayed_refs.href_root.rb_root));
 144		WARN_ON(!RB_EMPTY_ROOT(
 145				&transaction->delayed_refs.dirty_extent_root));
 146		if (transaction->delayed_refs.pending_csums)
 147			btrfs_err(transaction->fs_info,
 148				  "pending csums is %llu",
 149				  transaction->delayed_refs.pending_csums);
 150		/*
 151		 * If any block groups are found in ->deleted_bgs then it's
 152		 * because the transaction was aborted and a commit did not
 153		 * happen (things failed before writing the new superblock
 154		 * and calling btrfs_finish_extent_commit()), so we can not
 155		 * discard the physical locations of the block groups.
 156		 */
 157		while (!list_empty(&transaction->deleted_bgs)) {
 158			struct btrfs_block_group *cache;
 159
 160			cache = list_first_entry(&transaction->deleted_bgs,
 161						 struct btrfs_block_group,
 162						 bg_list);
 163			list_del_init(&cache->bg_list);
 164			btrfs_unfreeze_block_group(cache);
 165			btrfs_put_block_group(cache);
 166		}
 167		WARN_ON(!list_empty(&transaction->dev_update_list));
 168		kfree(transaction);
 169	}
 170}
 171
 172static noinline void switch_commit_roots(struct btrfs_trans_handle *trans)
 173{
 174	struct btrfs_transaction *cur_trans = trans->transaction;
 175	struct btrfs_fs_info *fs_info = trans->fs_info;
 176	struct btrfs_root *root, *tmp;
 177
 178	/*
 179	 * At this point no one can be using this transaction to modify any tree
 180	 * and no one can start another transaction to modify any tree either.
 181	 */
 182	ASSERT(cur_trans->state == TRANS_STATE_COMMIT_DOING);
 183
 184	down_write(&fs_info->commit_root_sem);
 185
 186	if (test_bit(BTRFS_FS_RELOC_RUNNING, &fs_info->flags))
 187		fs_info->last_reloc_trans = trans->transid;
 188
 189	list_for_each_entry_safe(root, tmp, &cur_trans->switch_commits,
 190				 dirty_list) {
 191		list_del_init(&root->dirty_list);
 192		free_extent_buffer(root->commit_root);
 193		root->commit_root = btrfs_root_node(root);
 
 
 194		extent_io_tree_release(&root->dirty_log_pages);
 195		btrfs_qgroup_clean_swapped_blocks(root);
 196	}
 197
 198	/* We can free old roots now. */
 199	spin_lock(&cur_trans->dropped_roots_lock);
 200	while (!list_empty(&cur_trans->dropped_roots)) {
 201		root = list_first_entry(&cur_trans->dropped_roots,
 202					struct btrfs_root, root_list);
 203		list_del_init(&root->root_list);
 204		spin_unlock(&cur_trans->dropped_roots_lock);
 205		btrfs_free_log(trans, root);
 206		btrfs_drop_and_free_fs_root(fs_info, root);
 207		spin_lock(&cur_trans->dropped_roots_lock);
 208	}
 209	spin_unlock(&cur_trans->dropped_roots_lock);
 210
 211	up_write(&fs_info->commit_root_sem);
 212}
 213
 214static inline void extwriter_counter_inc(struct btrfs_transaction *trans,
 215					 unsigned int type)
 216{
 217	if (type & TRANS_EXTWRITERS)
 218		atomic_inc(&trans->num_extwriters);
 219}
 220
 221static inline void extwriter_counter_dec(struct btrfs_transaction *trans,
 222					 unsigned int type)
 223{
 224	if (type & TRANS_EXTWRITERS)
 225		atomic_dec(&trans->num_extwriters);
 226}
 227
 228static inline void extwriter_counter_init(struct btrfs_transaction *trans,
 229					  unsigned int type)
 230{
 231	atomic_set(&trans->num_extwriters, ((type & TRANS_EXTWRITERS) ? 1 : 0));
 232}
 233
 234static inline int extwriter_counter_read(struct btrfs_transaction *trans)
 235{
 236	return atomic_read(&trans->num_extwriters);
 237}
 238
 239/*
 240 * To be called after doing the chunk btree updates right after allocating a new
 241 * chunk (after btrfs_chunk_alloc_add_chunk_item() is called), when removing a
 242 * chunk after all chunk btree updates and after finishing the second phase of
 243 * chunk allocation (btrfs_create_pending_block_groups()) in case some block
 244 * group had its chunk item insertion delayed to the second phase.
 245 */
 246void btrfs_trans_release_chunk_metadata(struct btrfs_trans_handle *trans)
 247{
 248	struct btrfs_fs_info *fs_info = trans->fs_info;
 249
 250	if (!trans->chunk_bytes_reserved)
 251		return;
 252
 
 
 253	btrfs_block_rsv_release(fs_info, &fs_info->chunk_block_rsv,
 254				trans->chunk_bytes_reserved, NULL);
 255	trans->chunk_bytes_reserved = 0;
 256}
 257
 258/*
 259 * either allocate a new transaction or hop into the existing one
 260 */
 261static noinline int join_transaction(struct btrfs_fs_info *fs_info,
 262				     unsigned int type)
 263{
 264	struct btrfs_transaction *cur_trans;
 265
 266	spin_lock(&fs_info->trans_lock);
 267loop:
 268	/* The file system has been taken offline. No new transactions. */
 269	if (BTRFS_FS_ERROR(fs_info)) {
 270		spin_unlock(&fs_info->trans_lock);
 271		return -EROFS;
 272	}
 273
 274	cur_trans = fs_info->running_transaction;
 275	if (cur_trans) {
 276		if (TRANS_ABORTED(cur_trans)) {
 277			spin_unlock(&fs_info->trans_lock);
 278			return cur_trans->aborted;
 279		}
 280		if (btrfs_blocked_trans_types[cur_trans->state] & type) {
 281			spin_unlock(&fs_info->trans_lock);
 282			return -EBUSY;
 283		}
 284		refcount_inc(&cur_trans->use_count);
 285		atomic_inc(&cur_trans->num_writers);
 286		extwriter_counter_inc(cur_trans, type);
 287		spin_unlock(&fs_info->trans_lock);
 288		btrfs_lockdep_acquire(fs_info, btrfs_trans_num_writers);
 289		btrfs_lockdep_acquire(fs_info, btrfs_trans_num_extwriters);
 290		return 0;
 291	}
 292	spin_unlock(&fs_info->trans_lock);
 293
 294	/*
 295	 * If we are ATTACH, we just want to catch the current transaction,
 296	 * and commit it. If there is no transaction, just return ENOENT.
 297	 */
 298	if (type == TRANS_ATTACH)
 299		return -ENOENT;
 300
 301	/*
 302	 * JOIN_NOLOCK only happens during the transaction commit, so
 303	 * it is impossible that ->running_transaction is NULL
 304	 */
 305	BUG_ON(type == TRANS_JOIN_NOLOCK);
 306
 307	cur_trans = kmalloc(sizeof(*cur_trans), GFP_NOFS);
 308	if (!cur_trans)
 309		return -ENOMEM;
 310
 311	btrfs_lockdep_acquire(fs_info, btrfs_trans_num_writers);
 312	btrfs_lockdep_acquire(fs_info, btrfs_trans_num_extwriters);
 313
 314	spin_lock(&fs_info->trans_lock);
 315	if (fs_info->running_transaction) {
 316		/*
 317		 * someone started a transaction after we unlocked.  Make sure
 318		 * to redo the checks above
 319		 */
 320		btrfs_lockdep_release(fs_info, btrfs_trans_num_extwriters);
 321		btrfs_lockdep_release(fs_info, btrfs_trans_num_writers);
 322		kfree(cur_trans);
 323		goto loop;
 324	} else if (BTRFS_FS_ERROR(fs_info)) {
 325		spin_unlock(&fs_info->trans_lock);
 326		btrfs_lockdep_release(fs_info, btrfs_trans_num_extwriters);
 327		btrfs_lockdep_release(fs_info, btrfs_trans_num_writers);
 328		kfree(cur_trans);
 329		return -EROFS;
 330	}
 331
 332	cur_trans->fs_info = fs_info;
 333	atomic_set(&cur_trans->pending_ordered, 0);
 334	init_waitqueue_head(&cur_trans->pending_wait);
 335	atomic_set(&cur_trans->num_writers, 1);
 336	extwriter_counter_init(cur_trans, type);
 337	init_waitqueue_head(&cur_trans->writer_wait);
 338	init_waitqueue_head(&cur_trans->commit_wait);
 339	cur_trans->state = TRANS_STATE_RUNNING;
 340	/*
 341	 * One for this trans handle, one so it will live on until we
 342	 * commit the transaction.
 343	 */
 344	refcount_set(&cur_trans->use_count, 2);
 345	cur_trans->flags = 0;
 346	cur_trans->start_time = ktime_get_seconds();
 347
 348	memset(&cur_trans->delayed_refs, 0, sizeof(cur_trans->delayed_refs));
 349
 350	cur_trans->delayed_refs.href_root = RB_ROOT_CACHED;
 351	cur_trans->delayed_refs.dirty_extent_root = RB_ROOT;
 352	atomic_set(&cur_trans->delayed_refs.num_entries, 0);
 353
 354	/*
 355	 * although the tree mod log is per file system and not per transaction,
 356	 * the log must never go across transaction boundaries.
 357	 */
 358	smp_mb();
 359	if (!list_empty(&fs_info->tree_mod_seq_list))
 360		WARN(1, KERN_ERR "BTRFS: tree_mod_seq_list not empty when creating a fresh transaction\n");
 361	if (!RB_EMPTY_ROOT(&fs_info->tree_mod_log))
 362		WARN(1, KERN_ERR "BTRFS: tree_mod_log rb tree not empty when creating a fresh transaction\n");
 363	atomic64_set(&fs_info->tree_mod_seq, 0);
 364
 365	spin_lock_init(&cur_trans->delayed_refs.lock);
 366
 367	INIT_LIST_HEAD(&cur_trans->pending_snapshots);
 368	INIT_LIST_HEAD(&cur_trans->dev_update_list);
 369	INIT_LIST_HEAD(&cur_trans->switch_commits);
 370	INIT_LIST_HEAD(&cur_trans->dirty_bgs);
 371	INIT_LIST_HEAD(&cur_trans->io_bgs);
 372	INIT_LIST_HEAD(&cur_trans->dropped_roots);
 373	mutex_init(&cur_trans->cache_write_mutex);
 374	spin_lock_init(&cur_trans->dirty_bgs_lock);
 375	INIT_LIST_HEAD(&cur_trans->deleted_bgs);
 376	spin_lock_init(&cur_trans->dropped_roots_lock);
 377	INIT_LIST_HEAD(&cur_trans->releasing_ebs);
 378	spin_lock_init(&cur_trans->releasing_ebs_lock);
 379	list_add_tail(&cur_trans->list, &fs_info->trans_list);
 380	extent_io_tree_init(fs_info, &cur_trans->dirty_pages,
 381			IO_TREE_TRANS_DIRTY_PAGES);
 382	extent_io_tree_init(fs_info, &cur_trans->pinned_extents,
 383			IO_TREE_FS_PINNED_EXTENTS);
 384	fs_info->generation++;
 385	cur_trans->transid = fs_info->generation;
 386	fs_info->running_transaction = cur_trans;
 387	cur_trans->aborted = 0;
 388	spin_unlock(&fs_info->trans_lock);
 389
 390	return 0;
 391}
 392
 393/*
 394 * This does all the record keeping required to make sure that a shareable root
 395 * is properly recorded in a given transaction.  This is required to make sure
 396 * the old root from before we joined the transaction is deleted when the
 397 * transaction commits.
 398 */
 399static int record_root_in_trans(struct btrfs_trans_handle *trans,
 400			       struct btrfs_root *root,
 401			       int force)
 402{
 403	struct btrfs_fs_info *fs_info = root->fs_info;
 404	int ret = 0;
 405
 406	if ((test_bit(BTRFS_ROOT_SHAREABLE, &root->state) &&
 407	    root->last_trans < trans->transid) || force) {
 
 408		WARN_ON(!force && root->commit_root != root->node);
 409
 410		/*
 411		 * see below for IN_TRANS_SETUP usage rules
 412		 * we have the reloc mutex held now, so there
 413		 * is only one writer in this function
 414		 */
 415		set_bit(BTRFS_ROOT_IN_TRANS_SETUP, &root->state);
 416
 417		/* make sure readers find IN_TRANS_SETUP before
 418		 * they find our root->last_trans update
 419		 */
 420		smp_wmb();
 421
 422		spin_lock(&fs_info->fs_roots_radix_lock);
 423		if (root->last_trans == trans->transid && !force) {
 424			spin_unlock(&fs_info->fs_roots_radix_lock);
 425			return 0;
 426		}
 427		radix_tree_tag_set(&fs_info->fs_roots_radix,
 428				   (unsigned long)root->root_key.objectid,
 429				   BTRFS_ROOT_TRANS_TAG);
 430		spin_unlock(&fs_info->fs_roots_radix_lock);
 431		root->last_trans = trans->transid;
 432
 433		/* this is pretty tricky.  We don't want to
 434		 * take the relocation lock in btrfs_record_root_in_trans
 435		 * unless we're really doing the first setup for this root in
 436		 * this transaction.
 437		 *
 438		 * Normally we'd use root->last_trans as a flag to decide
 439		 * if we want to take the expensive mutex.
 440		 *
 441		 * But, we have to set root->last_trans before we
 442		 * init the relocation root, otherwise, we trip over warnings
 443		 * in ctree.c.  The solution used here is to flag ourselves
 444		 * with root IN_TRANS_SETUP.  When this is 1, we're still
 445		 * fixing up the reloc trees and everyone must wait.
 446		 *
 447		 * When this is zero, they can trust root->last_trans and fly
 448		 * through btrfs_record_root_in_trans without having to take the
 449		 * lock.  smp_wmb() makes sure that all the writes above are
 450		 * done before we pop in the zero below
 451		 */
 452		ret = btrfs_init_reloc_root(trans, root);
 453		smp_mb__before_atomic();
 454		clear_bit(BTRFS_ROOT_IN_TRANS_SETUP, &root->state);
 455	}
 456	return ret;
 457}
 458
 459
 460void btrfs_add_dropped_root(struct btrfs_trans_handle *trans,
 461			    struct btrfs_root *root)
 462{
 463	struct btrfs_fs_info *fs_info = root->fs_info;
 464	struct btrfs_transaction *cur_trans = trans->transaction;
 465
 466	/* Add ourselves to the transaction dropped list */
 467	spin_lock(&cur_trans->dropped_roots_lock);
 468	list_add_tail(&root->root_list, &cur_trans->dropped_roots);
 469	spin_unlock(&cur_trans->dropped_roots_lock);
 470
 471	/* Make sure we don't try to update the root at commit time */
 472	spin_lock(&fs_info->fs_roots_radix_lock);
 473	radix_tree_tag_clear(&fs_info->fs_roots_radix,
 474			     (unsigned long)root->root_key.objectid,
 475			     BTRFS_ROOT_TRANS_TAG);
 476	spin_unlock(&fs_info->fs_roots_radix_lock);
 477}
 478
 479int btrfs_record_root_in_trans(struct btrfs_trans_handle *trans,
 480			       struct btrfs_root *root)
 481{
 482	struct btrfs_fs_info *fs_info = root->fs_info;
 483	int ret;
 484
 485	if (!test_bit(BTRFS_ROOT_SHAREABLE, &root->state))
 486		return 0;
 487
 488	/*
 489	 * see record_root_in_trans for comments about IN_TRANS_SETUP usage
 490	 * and barriers
 491	 */
 492	smp_rmb();
 493	if (root->last_trans == trans->transid &&
 494	    !test_bit(BTRFS_ROOT_IN_TRANS_SETUP, &root->state))
 495		return 0;
 496
 497	mutex_lock(&fs_info->reloc_mutex);
 498	ret = record_root_in_trans(trans, root, 0);
 499	mutex_unlock(&fs_info->reloc_mutex);
 500
 501	return ret;
 502}
 503
 504static inline int is_transaction_blocked(struct btrfs_transaction *trans)
 505{
 506	return (trans->state >= TRANS_STATE_COMMIT_START &&
 507		trans->state < TRANS_STATE_UNBLOCKED &&
 508		!TRANS_ABORTED(trans));
 509}
 510
 511/* wait for commit against the current transaction to become unblocked
 512 * when this is done, it is safe to start a new transaction, but the current
 513 * transaction might not be fully on disk.
 514 */
 515static void wait_current_trans(struct btrfs_fs_info *fs_info)
 516{
 517	struct btrfs_transaction *cur_trans;
 518
 519	spin_lock(&fs_info->trans_lock);
 520	cur_trans = fs_info->running_transaction;
 521	if (cur_trans && is_transaction_blocked(cur_trans)) {
 522		refcount_inc(&cur_trans->use_count);
 523		spin_unlock(&fs_info->trans_lock);
 524
 525		btrfs_might_wait_for_state(fs_info, BTRFS_LOCKDEP_TRANS_UNBLOCKED);
 526		wait_event(fs_info->transaction_wait,
 527			   cur_trans->state >= TRANS_STATE_UNBLOCKED ||
 528			   TRANS_ABORTED(cur_trans));
 529		btrfs_put_transaction(cur_trans);
 530	} else {
 531		spin_unlock(&fs_info->trans_lock);
 532	}
 533}
 534
 535static int may_wait_transaction(struct btrfs_fs_info *fs_info, int type)
 536{
 537	if (test_bit(BTRFS_FS_LOG_RECOVERING, &fs_info->flags))
 538		return 0;
 539
 540	if (type == TRANS_START)
 541		return 1;
 542
 543	return 0;
 544}
 545
 546static inline bool need_reserve_reloc_root(struct btrfs_root *root)
 547{
 548	struct btrfs_fs_info *fs_info = root->fs_info;
 549
 550	if (!fs_info->reloc_ctl ||
 551	    !test_bit(BTRFS_ROOT_SHAREABLE, &root->state) ||
 552	    root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID ||
 553	    root->reloc_root)
 554		return false;
 555
 556	return true;
 557}
 558
 559static struct btrfs_trans_handle *
 560start_transaction(struct btrfs_root *root, unsigned int num_items,
 561		  unsigned int type, enum btrfs_reserve_flush_enum flush,
 562		  bool enforce_qgroups)
 563{
 564	struct btrfs_fs_info *fs_info = root->fs_info;
 565	struct btrfs_block_rsv *delayed_refs_rsv = &fs_info->delayed_refs_rsv;
 566	struct btrfs_trans_handle *h;
 567	struct btrfs_transaction *cur_trans;
 568	u64 num_bytes = 0;
 569	u64 qgroup_reserved = 0;
 570	bool reloc_reserved = false;
 571	bool do_chunk_alloc = false;
 572	int ret;
 573
 574	if (BTRFS_FS_ERROR(fs_info))
 
 
 
 575		return ERR_PTR(-EROFS);
 576
 577	if (current->journal_info) {
 578		WARN_ON(type & TRANS_EXTWRITERS);
 579		h = current->journal_info;
 580		refcount_inc(&h->use_count);
 581		WARN_ON(refcount_read(&h->use_count) > 2);
 582		h->orig_rsv = h->block_rsv;
 583		h->block_rsv = NULL;
 584		goto got_it;
 585	}
 586
 587	/*
 588	 * Do the reservation before we join the transaction so we can do all
 589	 * the appropriate flushing if need be.
 590	 */
 591	if (num_items && root != fs_info->chunk_root) {
 592		struct btrfs_block_rsv *rsv = &fs_info->trans_block_rsv;
 593		u64 delayed_refs_bytes = 0;
 594
 595		qgroup_reserved = num_items * fs_info->nodesize;
 596		ret = btrfs_qgroup_reserve_meta_pertrans(root, qgroup_reserved,
 597				enforce_qgroups);
 598		if (ret)
 599			return ERR_PTR(ret);
 600
 601		/*
 602		 * We want to reserve all the bytes we may need all at once, so
 603		 * we only do 1 enospc flushing cycle per transaction start.  We
 604		 * accomplish this by simply assuming we'll do 2 x num_items
 605		 * worth of delayed refs updates in this trans handle, and
 606		 * refill that amount for whatever is missing in the reserve.
 607		 */
 608		num_bytes = btrfs_calc_insert_metadata_size(fs_info, num_items);
 609		if (flush == BTRFS_RESERVE_FLUSH_ALL &&
 610		    btrfs_block_rsv_full(delayed_refs_rsv) == 0) {
 611			delayed_refs_bytes = num_bytes;
 612			num_bytes <<= 1;
 613		}
 614
 615		/*
 616		 * Do the reservation for the relocation root creation
 617		 */
 618		if (need_reserve_reloc_root(root)) {
 619			num_bytes += fs_info->nodesize;
 620			reloc_reserved = true;
 621		}
 622
 623		ret = btrfs_block_rsv_add(fs_info, rsv, num_bytes, flush);
 624		if (ret)
 625			goto reserve_fail;
 626		if (delayed_refs_bytes) {
 627			btrfs_migrate_to_delayed_refs_rsv(fs_info, rsv,
 628							  delayed_refs_bytes);
 629			num_bytes -= delayed_refs_bytes;
 630		}
 631
 632		if (rsv->space_info->force_alloc)
 633			do_chunk_alloc = true;
 634	} else if (num_items == 0 && flush == BTRFS_RESERVE_FLUSH_ALL &&
 635		   !btrfs_block_rsv_full(delayed_refs_rsv)) {
 636		/*
 637		 * Some people call with btrfs_start_transaction(root, 0)
 638		 * because they can be throttled, but have some other mechanism
 639		 * for reserving space.  We still want these guys to refill the
 640		 * delayed block_rsv so just add 1 items worth of reservation
 641		 * here.
 642		 */
 643		ret = btrfs_delayed_refs_rsv_refill(fs_info, flush);
 644		if (ret)
 645			goto reserve_fail;
 646	}
 647again:
 648	h = kmem_cache_zalloc(btrfs_trans_handle_cachep, GFP_NOFS);
 649	if (!h) {
 650		ret = -ENOMEM;
 651		goto alloc_fail;
 652	}
 653
 654	/*
 655	 * If we are JOIN_NOLOCK we're already committing a transaction and
 656	 * waiting on this guy, so we don't need to do the sb_start_intwrite
 657	 * because we're already holding a ref.  We need this because we could
 658	 * have raced in and did an fsync() on a file which can kick a commit
 659	 * and then we deadlock with somebody doing a freeze.
 660	 *
 661	 * If we are ATTACH, it means we just want to catch the current
 662	 * transaction and commit it, so we needn't do sb_start_intwrite(). 
 663	 */
 664	if (type & __TRANS_FREEZABLE)
 665		sb_start_intwrite(fs_info->sb);
 666
 667	if (may_wait_transaction(fs_info, type))
 668		wait_current_trans(fs_info);
 669
 670	do {
 671		ret = join_transaction(fs_info, type);
 672		if (ret == -EBUSY) {
 673			wait_current_trans(fs_info);
 674			if (unlikely(type == TRANS_ATTACH ||
 675				     type == TRANS_JOIN_NOSTART))
 676				ret = -ENOENT;
 677		}
 678	} while (ret == -EBUSY);
 679
 680	if (ret < 0)
 681		goto join_fail;
 682
 683	cur_trans = fs_info->running_transaction;
 684
 685	h->transid = cur_trans->transid;
 686	h->transaction = cur_trans;
 
 687	refcount_set(&h->use_count, 1);
 688	h->fs_info = root->fs_info;
 689
 690	h->type = type;
 
 691	INIT_LIST_HEAD(&h->new_bgs);
 692
 693	smp_mb();
 694	if (cur_trans->state >= TRANS_STATE_COMMIT_START &&
 695	    may_wait_transaction(fs_info, type)) {
 696		current->journal_info = h;
 697		btrfs_commit_transaction(h);
 698		goto again;
 699	}
 700
 701	if (num_bytes) {
 702		trace_btrfs_space_reservation(fs_info, "transaction",
 703					      h->transid, num_bytes, 1);
 704		h->block_rsv = &fs_info->trans_block_rsv;
 705		h->bytes_reserved = num_bytes;
 706		h->reloc_reserved = reloc_reserved;
 707	}
 708
 709got_it:
 
 
 710	if (!current->journal_info)
 711		current->journal_info = h;
 712
 713	/*
 714	 * If the space_info is marked ALLOC_FORCE then we'll get upgraded to
 715	 * ALLOC_FORCE the first run through, and then we won't allocate for
 716	 * anybody else who races in later.  We don't care about the return
 717	 * value here.
 718	 */
 719	if (do_chunk_alloc && num_bytes) {
 720		u64 flags = h->block_rsv->space_info->flags;
 721
 722		btrfs_chunk_alloc(h, btrfs_get_alloc_profile(fs_info, flags),
 723				  CHUNK_ALLOC_NO_FORCE);
 724	}
 725
 726	/*
 727	 * btrfs_record_root_in_trans() needs to alloc new extents, and may
 728	 * call btrfs_join_transaction() while we're also starting a
 729	 * transaction.
 730	 *
 731	 * Thus it need to be called after current->journal_info initialized,
 732	 * or we can deadlock.
 733	 */
 734	ret = btrfs_record_root_in_trans(h, root);
 735	if (ret) {
 736		/*
 737		 * The transaction handle is fully initialized and linked with
 738		 * other structures so it needs to be ended in case of errors,
 739		 * not just freed.
 740		 */
 741		btrfs_end_transaction(h);
 742		return ERR_PTR(ret);
 743	}
 744
 745	return h;
 746
 747join_fail:
 748	if (type & __TRANS_FREEZABLE)
 749		sb_end_intwrite(fs_info->sb);
 750	kmem_cache_free(btrfs_trans_handle_cachep, h);
 751alloc_fail:
 752	if (num_bytes)
 753		btrfs_block_rsv_release(fs_info, &fs_info->trans_block_rsv,
 754					num_bytes, NULL);
 755reserve_fail:
 756	btrfs_qgroup_free_meta_pertrans(root, qgroup_reserved);
 757	return ERR_PTR(ret);
 758}
 759
 760struct btrfs_trans_handle *btrfs_start_transaction(struct btrfs_root *root,
 761						   unsigned int num_items)
 762{
 763	return start_transaction(root, num_items, TRANS_START,
 764				 BTRFS_RESERVE_FLUSH_ALL, true);
 765}
 766
 767struct btrfs_trans_handle *btrfs_start_transaction_fallback_global_rsv(
 768					struct btrfs_root *root,
 769					unsigned int num_items)
 
 770{
 771	return start_transaction(root, num_items, TRANS_START,
 772				 BTRFS_RESERVE_FLUSH_ALL_STEAL, false);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 773}
 774
 775struct btrfs_trans_handle *btrfs_join_transaction(struct btrfs_root *root)
 776{
 777	return start_transaction(root, 0, TRANS_JOIN, BTRFS_RESERVE_NO_FLUSH,
 778				 true);
 779}
 780
 781struct btrfs_trans_handle *btrfs_join_transaction_spacecache(struct btrfs_root *root)
 782{
 783	return start_transaction(root, 0, TRANS_JOIN_NOLOCK,
 784				 BTRFS_RESERVE_NO_FLUSH, true);
 785}
 786
 787/*
 788 * Similar to regular join but it never starts a transaction when none is
 789 * running or after waiting for the current one to finish.
 790 */
 791struct btrfs_trans_handle *btrfs_join_transaction_nostart(struct btrfs_root *root)
 792{
 793	return start_transaction(root, 0, TRANS_JOIN_NOSTART,
 794				 BTRFS_RESERVE_NO_FLUSH, true);
 795}
 796
 797/*
 798 * btrfs_attach_transaction() - catch the running transaction
 799 *
 800 * It is used when we want to commit the current the transaction, but
 801 * don't want to start a new one.
 802 *
 803 * Note: If this function return -ENOENT, it just means there is no
 804 * running transaction. But it is possible that the inactive transaction
 805 * is still in the memory, not fully on disk. If you hope there is no
 806 * inactive transaction in the fs when -ENOENT is returned, you should
 807 * invoke
 808 *     btrfs_attach_transaction_barrier()
 809 */
 810struct btrfs_trans_handle *btrfs_attach_transaction(struct btrfs_root *root)
 811{
 812	return start_transaction(root, 0, TRANS_ATTACH,
 813				 BTRFS_RESERVE_NO_FLUSH, true);
 814}
 815
 816/*
 817 * btrfs_attach_transaction_barrier() - catch the running transaction
 818 *
 819 * It is similar to the above function, the difference is this one
 820 * will wait for all the inactive transactions until they fully
 821 * complete.
 822 */
 823struct btrfs_trans_handle *
 824btrfs_attach_transaction_barrier(struct btrfs_root *root)
 825{
 826	struct btrfs_trans_handle *trans;
 827
 828	trans = start_transaction(root, 0, TRANS_ATTACH,
 829				  BTRFS_RESERVE_NO_FLUSH, true);
 830	if (trans == ERR_PTR(-ENOENT))
 831		btrfs_wait_for_commit(root->fs_info, 0);
 832
 833	return trans;
 834}
 835
 836/* Wait for a transaction commit to reach at least the given state. */
 837static noinline void wait_for_commit(struct btrfs_transaction *commit,
 838				     const enum btrfs_trans_state min_state)
 839{
 840	struct btrfs_fs_info *fs_info = commit->fs_info;
 841	u64 transid = commit->transid;
 842	bool put = false;
 843
 844	/*
 845	 * At the moment this function is called with min_state either being
 846	 * TRANS_STATE_COMPLETED or TRANS_STATE_SUPER_COMMITTED.
 847	 */
 848	if (min_state == TRANS_STATE_COMPLETED)
 849		btrfs_might_wait_for_state(fs_info, BTRFS_LOCKDEP_TRANS_COMPLETED);
 850	else
 851		btrfs_might_wait_for_state(fs_info, BTRFS_LOCKDEP_TRANS_SUPER_COMMITTED);
 852
 853	while (1) {
 854		wait_event(commit->commit_wait, commit->state >= min_state);
 855		if (put)
 856			btrfs_put_transaction(commit);
 857
 858		if (min_state < TRANS_STATE_COMPLETED)
 859			break;
 860
 861		/*
 862		 * A transaction isn't really completed until all of the
 863		 * previous transactions are completed, but with fsync we can
 864		 * end up with SUPER_COMMITTED transactions before a COMPLETED
 865		 * transaction. Wait for those.
 866		 */
 867
 868		spin_lock(&fs_info->trans_lock);
 869		commit = list_first_entry_or_null(&fs_info->trans_list,
 870						  struct btrfs_transaction,
 871						  list);
 872		if (!commit || commit->transid > transid) {
 873			spin_unlock(&fs_info->trans_lock);
 874			break;
 875		}
 876		refcount_inc(&commit->use_count);
 877		put = true;
 878		spin_unlock(&fs_info->trans_lock);
 879	}
 880}
 881
 882int btrfs_wait_for_commit(struct btrfs_fs_info *fs_info, u64 transid)
 883{
 884	struct btrfs_transaction *cur_trans = NULL, *t;
 885	int ret = 0;
 886
 887	if (transid) {
 888		if (transid <= fs_info->last_trans_committed)
 889			goto out;
 890
 891		/* find specified transaction */
 892		spin_lock(&fs_info->trans_lock);
 893		list_for_each_entry(t, &fs_info->trans_list, list) {
 894			if (t->transid == transid) {
 895				cur_trans = t;
 896				refcount_inc(&cur_trans->use_count);
 897				ret = 0;
 898				break;
 899			}
 900			if (t->transid > transid) {
 901				ret = 0;
 902				break;
 903			}
 904		}
 905		spin_unlock(&fs_info->trans_lock);
 906
 907		/*
 908		 * The specified transaction doesn't exist, or we
 909		 * raced with btrfs_commit_transaction
 910		 */
 911		if (!cur_trans) {
 912			if (transid > fs_info->last_trans_committed)
 913				ret = -EINVAL;
 914			goto out;
 915		}
 916	} else {
 917		/* find newest transaction that is committing | committed */
 918		spin_lock(&fs_info->trans_lock);
 919		list_for_each_entry_reverse(t, &fs_info->trans_list,
 920					    list) {
 921			if (t->state >= TRANS_STATE_COMMIT_START) {
 922				if (t->state == TRANS_STATE_COMPLETED)
 923					break;
 924				cur_trans = t;
 925				refcount_inc(&cur_trans->use_count);
 926				break;
 927			}
 928		}
 929		spin_unlock(&fs_info->trans_lock);
 930		if (!cur_trans)
 931			goto out;  /* nothing committing|committed */
 932	}
 933
 934	wait_for_commit(cur_trans, TRANS_STATE_COMPLETED);
 935	btrfs_put_transaction(cur_trans);
 936out:
 937	return ret;
 938}
 939
 940void btrfs_throttle(struct btrfs_fs_info *fs_info)
 941{
 942	wait_current_trans(fs_info);
 943}
 944
 945static bool should_end_transaction(struct btrfs_trans_handle *trans)
 946{
 947	struct btrfs_fs_info *fs_info = trans->fs_info;
 948
 949	if (btrfs_check_space_for_delayed_refs(fs_info))
 950		return true;
 951
 952	return !!btrfs_block_rsv_check(&fs_info->global_block_rsv, 50);
 953}
 954
 955bool btrfs_should_end_transaction(struct btrfs_trans_handle *trans)
 956{
 957	struct btrfs_transaction *cur_trans = trans->transaction;
 958
 959	if (cur_trans->state >= TRANS_STATE_COMMIT_START ||
 960	    test_bit(BTRFS_DELAYED_REFS_FLUSHING, &cur_trans->delayed_refs.flags))
 961		return true;
 
 962
 963	return should_end_transaction(trans);
 964}
 965
 966static void btrfs_trans_release_metadata(struct btrfs_trans_handle *trans)
 967
 968{
 969	struct btrfs_fs_info *fs_info = trans->fs_info;
 970
 971	if (!trans->block_rsv) {
 972		ASSERT(!trans->bytes_reserved);
 973		return;
 974	}
 975
 976	if (!trans->bytes_reserved)
 977		return;
 978
 979	ASSERT(trans->block_rsv == &fs_info->trans_block_rsv);
 980	trace_btrfs_space_reservation(fs_info, "transaction",
 981				      trans->transid, trans->bytes_reserved, 0);
 982	btrfs_block_rsv_release(fs_info, trans->block_rsv,
 983				trans->bytes_reserved, NULL);
 984	trans->bytes_reserved = 0;
 985}
 986
 987static int __btrfs_end_transaction(struct btrfs_trans_handle *trans,
 988				   int throttle)
 989{
 990	struct btrfs_fs_info *info = trans->fs_info;
 991	struct btrfs_transaction *cur_trans = trans->transaction;
 
 992	int err = 0;
 993
 994	if (refcount_read(&trans->use_count) > 1) {
 995		refcount_dec(&trans->use_count);
 996		trans->block_rsv = trans->orig_rsv;
 997		return 0;
 998	}
 999
1000	btrfs_trans_release_metadata(trans);
1001	trans->block_rsv = NULL;
1002
1003	btrfs_create_pending_block_groups(trans);
1004
1005	btrfs_trans_release_chunk_metadata(trans);
1006
 
 
 
 
 
 
 
1007	if (trans->type & __TRANS_FREEZABLE)
1008		sb_end_intwrite(info->sb);
1009
1010	WARN_ON(cur_trans != info->running_transaction);
1011	WARN_ON(atomic_read(&cur_trans->num_writers) < 1);
1012	atomic_dec(&cur_trans->num_writers);
1013	extwriter_counter_dec(cur_trans, trans->type);
1014
1015	cond_wake_up(&cur_trans->writer_wait);
1016
1017	btrfs_lockdep_release(info, btrfs_trans_num_extwriters);
1018	btrfs_lockdep_release(info, btrfs_trans_num_writers);
1019
1020	btrfs_put_transaction(cur_trans);
1021
1022	if (current->journal_info == trans)
1023		current->journal_info = NULL;
1024
1025	if (throttle)
1026		btrfs_run_delayed_iputs(info);
1027
1028	if (TRANS_ABORTED(trans) || BTRFS_FS_ERROR(info)) {
 
1029		wake_up_process(info->transaction_kthread);
1030		if (TRANS_ABORTED(trans))
1031			err = trans->aborted;
1032		else
1033			err = -EROFS;
1034	}
1035
1036	kmem_cache_free(btrfs_trans_handle_cachep, trans);
1037	return err;
1038}
1039
1040int btrfs_end_transaction(struct btrfs_trans_handle *trans)
1041{
1042	return __btrfs_end_transaction(trans, 0);
1043}
1044
1045int btrfs_end_transaction_throttle(struct btrfs_trans_handle *trans)
1046{
1047	return __btrfs_end_transaction(trans, 1);
1048}
1049
1050/*
1051 * when btree blocks are allocated, they have some corresponding bits set for
1052 * them in one of two extent_io trees.  This is used to make sure all of
1053 * those extents are sent to disk but does not wait on them
1054 */
1055int btrfs_write_marked_extents(struct btrfs_fs_info *fs_info,
1056			       struct extent_io_tree *dirty_pages, int mark)
1057{
1058	int err = 0;
1059	int werr = 0;
1060	struct address_space *mapping = fs_info->btree_inode->i_mapping;
1061	struct extent_state *cached_state = NULL;
1062	u64 start = 0;
1063	u64 end;
1064
1065	atomic_inc(&BTRFS_I(fs_info->btree_inode)->sync_writers);
1066	while (!find_first_extent_bit(dirty_pages, start, &start, &end,
1067				      mark, &cached_state)) {
1068		bool wait_writeback = false;
1069
1070		err = convert_extent_bit(dirty_pages, start, end,
1071					 EXTENT_NEED_WAIT,
1072					 mark, &cached_state);
1073		/*
1074		 * convert_extent_bit can return -ENOMEM, which is most of the
1075		 * time a temporary error. So when it happens, ignore the error
1076		 * and wait for writeback of this range to finish - because we
1077		 * failed to set the bit EXTENT_NEED_WAIT for the range, a call
1078		 * to __btrfs_wait_marked_extents() would not know that
1079		 * writeback for this range started and therefore wouldn't
1080		 * wait for it to finish - we don't want to commit a
1081		 * superblock that points to btree nodes/leafs for which
1082		 * writeback hasn't finished yet (and without errors).
1083		 * We cleanup any entries left in the io tree when committing
1084		 * the transaction (through extent_io_tree_release()).
1085		 */
1086		if (err == -ENOMEM) {
1087			err = 0;
1088			wait_writeback = true;
1089		}
1090		if (!err)
1091			err = filemap_fdatawrite_range(mapping, start, end);
1092		if (err)
1093			werr = err;
1094		else if (wait_writeback)
1095			werr = filemap_fdatawait_range(mapping, start, end);
1096		free_extent_state(cached_state);
1097		cached_state = NULL;
1098		cond_resched();
1099		start = end + 1;
1100	}
1101	atomic_dec(&BTRFS_I(fs_info->btree_inode)->sync_writers);
1102	return werr;
1103}
1104
1105/*
1106 * when btree blocks are allocated, they have some corresponding bits set for
1107 * them in one of two extent_io trees.  This is used to make sure all of
1108 * those extents are on disk for transaction or log commit.  We wait
1109 * on all the pages and clear them from the dirty pages state tree
1110 */
1111static int __btrfs_wait_marked_extents(struct btrfs_fs_info *fs_info,
1112				       struct extent_io_tree *dirty_pages)
1113{
1114	int err = 0;
1115	int werr = 0;
1116	struct address_space *mapping = fs_info->btree_inode->i_mapping;
1117	struct extent_state *cached_state = NULL;
1118	u64 start = 0;
1119	u64 end;
1120
1121	while (!find_first_extent_bit(dirty_pages, start, &start, &end,
1122				      EXTENT_NEED_WAIT, &cached_state)) {
1123		/*
1124		 * Ignore -ENOMEM errors returned by clear_extent_bit().
1125		 * When committing the transaction, we'll remove any entries
1126		 * left in the io tree. For a log commit, we don't remove them
1127		 * after committing the log because the tree can be accessed
1128		 * concurrently - we do it only at transaction commit time when
1129		 * it's safe to do it (through extent_io_tree_release()).
1130		 */
1131		err = clear_extent_bit(dirty_pages, start, end,
1132				       EXTENT_NEED_WAIT, &cached_state);
1133		if (err == -ENOMEM)
1134			err = 0;
1135		if (!err)
1136			err = filemap_fdatawait_range(mapping, start, end);
1137		if (err)
1138			werr = err;
1139		free_extent_state(cached_state);
1140		cached_state = NULL;
1141		cond_resched();
1142		start = end + 1;
1143	}
1144	if (err)
1145		werr = err;
1146	return werr;
1147}
1148
1149static int btrfs_wait_extents(struct btrfs_fs_info *fs_info,
1150		       struct extent_io_tree *dirty_pages)
1151{
1152	bool errors = false;
1153	int err;
1154
1155	err = __btrfs_wait_marked_extents(fs_info, dirty_pages);
1156	if (test_and_clear_bit(BTRFS_FS_BTREE_ERR, &fs_info->flags))
1157		errors = true;
1158
1159	if (errors && !err)
1160		err = -EIO;
1161	return err;
1162}
1163
1164int btrfs_wait_tree_log_extents(struct btrfs_root *log_root, int mark)
1165{
1166	struct btrfs_fs_info *fs_info = log_root->fs_info;
1167	struct extent_io_tree *dirty_pages = &log_root->dirty_log_pages;
1168	bool errors = false;
1169	int err;
1170
1171	ASSERT(log_root->root_key.objectid == BTRFS_TREE_LOG_OBJECTID);
1172
1173	err = __btrfs_wait_marked_extents(fs_info, dirty_pages);
1174	if ((mark & EXTENT_DIRTY) &&
1175	    test_and_clear_bit(BTRFS_FS_LOG1_ERR, &fs_info->flags))
1176		errors = true;
1177
1178	if ((mark & EXTENT_NEW) &&
1179	    test_and_clear_bit(BTRFS_FS_LOG2_ERR, &fs_info->flags))
1180		errors = true;
1181
1182	if (errors && !err)
1183		err = -EIO;
1184	return err;
1185}
1186
1187/*
1188 * When btree blocks are allocated the corresponding extents are marked dirty.
1189 * This function ensures such extents are persisted on disk for transaction or
1190 * log commit.
1191 *
1192 * @trans: transaction whose dirty pages we'd like to write
1193 */
1194static int btrfs_write_and_wait_transaction(struct btrfs_trans_handle *trans)
1195{
1196	int ret;
1197	int ret2;
1198	struct extent_io_tree *dirty_pages = &trans->transaction->dirty_pages;
1199	struct btrfs_fs_info *fs_info = trans->fs_info;
1200	struct blk_plug plug;
1201
1202	blk_start_plug(&plug);
1203	ret = btrfs_write_marked_extents(fs_info, dirty_pages, EXTENT_DIRTY);
1204	blk_finish_plug(&plug);
1205	ret2 = btrfs_wait_extents(fs_info, dirty_pages);
1206
1207	extent_io_tree_release(&trans->transaction->dirty_pages);
1208
1209	if (ret)
1210		return ret;
1211	else if (ret2)
1212		return ret2;
1213	else
1214		return 0;
1215}
1216
1217/*
1218 * this is used to update the root pointer in the tree of tree roots.
1219 *
1220 * But, in the case of the extent allocation tree, updating the root
1221 * pointer may allocate blocks which may change the root of the extent
1222 * allocation tree.
1223 *
1224 * So, this loops and repeats and makes sure the cowonly root didn't
1225 * change while the root pointer was being updated in the metadata.
1226 */
1227static int update_cowonly_root(struct btrfs_trans_handle *trans,
1228			       struct btrfs_root *root)
1229{
1230	int ret;
1231	u64 old_root_bytenr;
1232	u64 old_root_used;
1233	struct btrfs_fs_info *fs_info = root->fs_info;
1234	struct btrfs_root *tree_root = fs_info->tree_root;
1235
1236	old_root_used = btrfs_root_used(&root->root_item);
1237
1238	while (1) {
1239		old_root_bytenr = btrfs_root_bytenr(&root->root_item);
1240		if (old_root_bytenr == root->node->start &&
1241		    old_root_used == btrfs_root_used(&root->root_item))
1242			break;
1243
1244		btrfs_set_root_node(&root->root_item, root->node);
1245		ret = btrfs_update_root(trans, tree_root,
1246					&root->root_key,
1247					&root->root_item);
1248		if (ret)
1249			return ret;
1250
1251		old_root_used = btrfs_root_used(&root->root_item);
1252	}
1253
1254	return 0;
1255}
1256
1257/*
1258 * update all the cowonly tree roots on disk
1259 *
1260 * The error handling in this function may not be obvious. Any of the
1261 * failures will cause the file system to go offline. We still need
1262 * to clean up the delayed refs.
1263 */
1264static noinline int commit_cowonly_roots(struct btrfs_trans_handle *trans)
1265{
1266	struct btrfs_fs_info *fs_info = trans->fs_info;
1267	struct list_head *dirty_bgs = &trans->transaction->dirty_bgs;
1268	struct list_head *io_bgs = &trans->transaction->io_bgs;
1269	struct list_head *next;
1270	struct extent_buffer *eb;
1271	int ret;
1272
1273	/*
1274	 * At this point no one can be using this transaction to modify any tree
1275	 * and no one can start another transaction to modify any tree either.
1276	 */
1277	ASSERT(trans->transaction->state == TRANS_STATE_COMMIT_DOING);
1278
1279	eb = btrfs_lock_root_node(fs_info->tree_root);
1280	ret = btrfs_cow_block(trans, fs_info->tree_root, eb, NULL,
1281			      0, &eb, BTRFS_NESTING_COW);
1282	btrfs_tree_unlock(eb);
1283	free_extent_buffer(eb);
1284
1285	if (ret)
1286		return ret;
1287
 
 
 
 
1288	ret = btrfs_run_dev_stats(trans);
1289	if (ret)
1290		return ret;
1291	ret = btrfs_run_dev_replace(trans);
1292	if (ret)
1293		return ret;
1294	ret = btrfs_run_qgroups(trans);
1295	if (ret)
1296		return ret;
1297
1298	ret = btrfs_setup_space_cache(trans);
1299	if (ret)
1300		return ret;
1301
 
 
 
 
1302again:
1303	while (!list_empty(&fs_info->dirty_cowonly_roots)) {
1304		struct btrfs_root *root;
1305		next = fs_info->dirty_cowonly_roots.next;
1306		list_del_init(next);
1307		root = list_entry(next, struct btrfs_root, dirty_list);
1308		clear_bit(BTRFS_ROOT_DIRTY, &root->state);
1309
1310		list_add_tail(&root->dirty_list,
1311			      &trans->transaction->switch_commits);
 
1312		ret = update_cowonly_root(trans, root);
1313		if (ret)
1314			return ret;
 
 
 
1315	}
1316
1317	/* Now flush any delayed refs generated by updating all of the roots */
1318	ret = btrfs_run_delayed_refs(trans, (unsigned long)-1);
1319	if (ret)
1320		return ret;
1321
1322	while (!list_empty(dirty_bgs) || !list_empty(io_bgs)) {
1323		ret = btrfs_write_dirty_block_groups(trans);
1324		if (ret)
1325			return ret;
1326
1327		/*
1328		 * We're writing the dirty block groups, which could generate
1329		 * delayed refs, which could generate more dirty block groups,
1330		 * so we want to keep this flushing in this loop to make sure
1331		 * everything gets run.
1332		 */
1333		ret = btrfs_run_delayed_refs(trans, (unsigned long)-1);
1334		if (ret)
1335			return ret;
1336	}
1337
1338	if (!list_empty(&fs_info->dirty_cowonly_roots))
1339		goto again;
1340
 
 
 
1341	/* Update dev-replace pointer once everything is committed */
1342	fs_info->dev_replace.committed_cursor_left =
1343		fs_info->dev_replace.cursor_left_last_write_of_item;
1344
1345	return 0;
1346}
1347
1348/*
1349 * If we had a pending drop we need to see if there are any others left in our
1350 * dead roots list, and if not clear our bit and wake any waiters.
1351 */
1352void btrfs_maybe_wake_unfinished_drop(struct btrfs_fs_info *fs_info)
1353{
1354	/*
1355	 * We put the drop in progress roots at the front of the list, so if the
1356	 * first entry doesn't have UNFINISHED_DROP set we can wake everybody
1357	 * up.
1358	 */
1359	spin_lock(&fs_info->trans_lock);
1360	if (!list_empty(&fs_info->dead_roots)) {
1361		struct btrfs_root *root = list_first_entry(&fs_info->dead_roots,
1362							   struct btrfs_root,
1363							   root_list);
1364		if (test_bit(BTRFS_ROOT_UNFINISHED_DROP, &root->state)) {
1365			spin_unlock(&fs_info->trans_lock);
1366			return;
1367		}
1368	}
1369	spin_unlock(&fs_info->trans_lock);
1370
1371	btrfs_wake_unfinished_drop(fs_info);
1372}
1373
1374/*
1375 * dead roots are old snapshots that need to be deleted.  This allocates
1376 * a dirty root struct and adds it into the list of dead roots that need to
1377 * be deleted
1378 */
1379void btrfs_add_dead_root(struct btrfs_root *root)
1380{
1381	struct btrfs_fs_info *fs_info = root->fs_info;
1382
1383	spin_lock(&fs_info->trans_lock);
1384	if (list_empty(&root->root_list)) {
1385		btrfs_grab_root(root);
1386
1387		/* We want to process the partially complete drops first. */
1388		if (test_bit(BTRFS_ROOT_UNFINISHED_DROP, &root->state))
1389			list_add(&root->root_list, &fs_info->dead_roots);
1390		else
1391			list_add_tail(&root->root_list, &fs_info->dead_roots);
1392	}
1393	spin_unlock(&fs_info->trans_lock);
1394}
1395
1396/*
1397 * Update each subvolume root and its relocation root, if it exists, in the tree
1398 * of tree roots. Also free log roots if they exist.
1399 */
1400static noinline int commit_fs_roots(struct btrfs_trans_handle *trans)
1401{
1402	struct btrfs_fs_info *fs_info = trans->fs_info;
1403	struct btrfs_root *gang[8];
1404	int i;
1405	int ret;
1406
1407	/*
1408	 * At this point no one can be using this transaction to modify any tree
1409	 * and no one can start another transaction to modify any tree either.
1410	 */
1411	ASSERT(trans->transaction->state == TRANS_STATE_COMMIT_DOING);
1412
1413	spin_lock(&fs_info->fs_roots_radix_lock);
1414	while (1) {
1415		ret = radix_tree_gang_lookup_tag(&fs_info->fs_roots_radix,
1416						 (void **)gang, 0,
1417						 ARRAY_SIZE(gang),
1418						 BTRFS_ROOT_TRANS_TAG);
1419		if (ret == 0)
1420			break;
1421		for (i = 0; i < ret; i++) {
1422			struct btrfs_root *root = gang[i];
1423			int ret2;
1424
1425			/*
1426			 * At this point we can neither have tasks logging inodes
1427			 * from a root nor trying to commit a log tree.
1428			 */
1429			ASSERT(atomic_read(&root->log_writers) == 0);
1430			ASSERT(atomic_read(&root->log_commit[0]) == 0);
1431			ASSERT(atomic_read(&root->log_commit[1]) == 0);
1432
1433			radix_tree_tag_clear(&fs_info->fs_roots_radix,
1434					(unsigned long)root->root_key.objectid,
1435					BTRFS_ROOT_TRANS_TAG);
1436			spin_unlock(&fs_info->fs_roots_radix_lock);
1437
1438			btrfs_free_log(trans, root);
1439			ret2 = btrfs_update_reloc_root(trans, root);
1440			if (ret2)
1441				return ret2;
1442
1443			/* see comments in should_cow_block() */
1444			clear_bit(BTRFS_ROOT_FORCE_COW, &root->state);
1445			smp_mb__after_atomic();
1446
1447			if (root->commit_root != root->node) {
1448				list_add_tail(&root->dirty_list,
1449					&trans->transaction->switch_commits);
1450				btrfs_set_root_node(&root->root_item,
1451						    root->node);
1452			}
1453
1454			ret2 = btrfs_update_root(trans, fs_info->tree_root,
1455						&root->root_key,
1456						&root->root_item);
1457			if (ret2)
1458				return ret2;
1459			spin_lock(&fs_info->fs_roots_radix_lock);
 
 
1460			btrfs_qgroup_free_meta_all_pertrans(root);
1461		}
1462	}
1463	spin_unlock(&fs_info->fs_roots_radix_lock);
1464	return 0;
1465}
1466
1467/*
1468 * defrag a given btree.
1469 * Every leaf in the btree is read and defragged.
1470 */
1471int btrfs_defrag_root(struct btrfs_root *root)
1472{
1473	struct btrfs_fs_info *info = root->fs_info;
1474	struct btrfs_trans_handle *trans;
1475	int ret;
1476
1477	if (test_and_set_bit(BTRFS_ROOT_DEFRAG_RUNNING, &root->state))
1478		return 0;
1479
1480	while (1) {
1481		trans = btrfs_start_transaction(root, 0);
1482		if (IS_ERR(trans)) {
1483			ret = PTR_ERR(trans);
1484			break;
1485		}
1486
1487		ret = btrfs_defrag_leaves(trans, root);
1488
1489		btrfs_end_transaction(trans);
1490		btrfs_btree_balance_dirty(info);
1491		cond_resched();
1492
1493		if (btrfs_fs_closing(info) || ret != -EAGAIN)
1494			break;
1495
1496		if (btrfs_defrag_cancelled(info)) {
1497			btrfs_debug(info, "defrag_root cancelled");
1498			ret = -EAGAIN;
1499			break;
1500		}
1501	}
1502	clear_bit(BTRFS_ROOT_DEFRAG_RUNNING, &root->state);
1503	return ret;
1504}
1505
1506/*
1507 * Do all special snapshot related qgroup dirty hack.
1508 *
1509 * Will do all needed qgroup inherit and dirty hack like switch commit
1510 * roots inside one transaction and write all btree into disk, to make
1511 * qgroup works.
1512 */
1513static int qgroup_account_snapshot(struct btrfs_trans_handle *trans,
1514				   struct btrfs_root *src,
1515				   struct btrfs_root *parent,
1516				   struct btrfs_qgroup_inherit *inherit,
1517				   u64 dst_objectid)
1518{
1519	struct btrfs_fs_info *fs_info = src->fs_info;
1520	int ret;
1521
1522	/*
1523	 * Save some performance in the case that qgroups are not
1524	 * enabled. If this check races with the ioctl, rescan will
1525	 * kick in anyway.
1526	 */
1527	if (!test_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags))
1528		return 0;
1529
1530	/*
1531	 * Ensure dirty @src will be committed.  Or, after coming
1532	 * commit_fs_roots() and switch_commit_roots(), any dirty but not
1533	 * recorded root will never be updated again, causing an outdated root
1534	 * item.
1535	 */
1536	ret = record_root_in_trans(trans, src, 1);
1537	if (ret)
1538		return ret;
1539
1540	/*
1541	 * btrfs_qgroup_inherit relies on a consistent view of the usage for the
1542	 * src root, so we must run the delayed refs here.
1543	 *
1544	 * However this isn't particularly fool proof, because there's no
1545	 * synchronization keeping us from changing the tree after this point
1546	 * before we do the qgroup_inherit, or even from making changes while
1547	 * we're doing the qgroup_inherit.  But that's a problem for the future,
1548	 * for now flush the delayed refs to narrow the race window where the
1549	 * qgroup counters could end up wrong.
1550	 */
1551	ret = btrfs_run_delayed_refs(trans, (unsigned long)-1);
1552	if (ret) {
1553		btrfs_abort_transaction(trans, ret);
1554		return ret;
1555	}
1556
1557	ret = commit_fs_roots(trans);
1558	if (ret)
1559		goto out;
1560	ret = btrfs_qgroup_account_extents(trans);
1561	if (ret < 0)
1562		goto out;
1563
1564	/* Now qgroup are all updated, we can inherit it to new qgroups */
1565	ret = btrfs_qgroup_inherit(trans, src->root_key.objectid, dst_objectid,
1566				   inherit);
1567	if (ret < 0)
1568		goto out;
1569
1570	/*
1571	 * Now we do a simplified commit transaction, which will:
1572	 * 1) commit all subvolume and extent tree
1573	 *    To ensure all subvolume and extent tree have a valid
1574	 *    commit_root to accounting later insert_dir_item()
1575	 * 2) write all btree blocks onto disk
1576	 *    This is to make sure later btree modification will be cowed
1577	 *    Or commit_root can be populated and cause wrong qgroup numbers
1578	 * In this simplified commit, we don't really care about other trees
1579	 * like chunk and root tree, as they won't affect qgroup.
1580	 * And we don't write super to avoid half committed status.
1581	 */
1582	ret = commit_cowonly_roots(trans);
1583	if (ret)
1584		goto out;
1585	switch_commit_roots(trans);
1586	ret = btrfs_write_and_wait_transaction(trans);
1587	if (ret)
1588		btrfs_handle_fs_error(fs_info, ret,
1589			"Error while writing out transaction for qgroup");
1590
1591out:
 
 
1592	/*
1593	 * Force parent root to be updated, as we recorded it before so its
1594	 * last_trans == cur_transid.
1595	 * Or it won't be committed again onto disk after later
1596	 * insert_dir_item()
1597	 */
1598	if (!ret)
1599		ret = record_root_in_trans(trans, parent, 1);
1600	return ret;
1601}
1602
1603/*
1604 * new snapshots need to be created at a very specific time in the
1605 * transaction commit.  This does the actual creation.
1606 *
1607 * Note:
1608 * If the error which may affect the commitment of the current transaction
1609 * happens, we should return the error number. If the error which just affect
1610 * the creation of the pending snapshots, just return 0.
1611 */
1612static noinline int create_pending_snapshot(struct btrfs_trans_handle *trans,
1613				   struct btrfs_pending_snapshot *pending)
1614{
1615
1616	struct btrfs_fs_info *fs_info = trans->fs_info;
1617	struct btrfs_key key;
1618	struct btrfs_root_item *new_root_item;
1619	struct btrfs_root *tree_root = fs_info->tree_root;
1620	struct btrfs_root *root = pending->root;
1621	struct btrfs_root *parent_root;
1622	struct btrfs_block_rsv *rsv;
1623	struct inode *parent_inode = pending->dir;
1624	struct btrfs_path *path;
1625	struct btrfs_dir_item *dir_item;
 
1626	struct extent_buffer *tmp;
1627	struct extent_buffer *old;
1628	struct timespec64 cur_time;
1629	int ret = 0;
1630	u64 to_reserve = 0;
1631	u64 index = 0;
1632	u64 objectid;
1633	u64 root_flags;
1634	unsigned int nofs_flags;
1635	struct fscrypt_name fname;
1636
1637	ASSERT(pending->path);
1638	path = pending->path;
1639
1640	ASSERT(pending->root_item);
1641	new_root_item = pending->root_item;
1642
1643	/*
1644	 * We're inside a transaction and must make sure that any potential
1645	 * allocations with GFP_KERNEL in fscrypt won't recurse back to
1646	 * filesystem.
1647	 */
1648	nofs_flags = memalloc_nofs_save();
1649	pending->error = fscrypt_setup_filename(parent_inode,
1650						&pending->dentry->d_name, 0,
1651						&fname);
1652	memalloc_nofs_restore(nofs_flags);
1653	if (pending->error)
1654		goto free_pending;
1655
1656	pending->error = btrfs_get_free_objectid(tree_root, &objectid);
1657	if (pending->error)
1658		goto free_fname;
1659
1660	/*
1661	 * Make qgroup to skip current new snapshot's qgroupid, as it is
1662	 * accounted by later btrfs_qgroup_inherit().
1663	 */
1664	btrfs_set_skip_qgroup(trans, objectid);
1665
1666	btrfs_reloc_pre_snapshot(pending, &to_reserve);
1667
1668	if (to_reserve > 0) {
1669		pending->error = btrfs_block_rsv_add(fs_info,
1670						     &pending->block_rsv,
1671						     to_reserve,
1672						     BTRFS_RESERVE_NO_FLUSH);
1673		if (pending->error)
1674			goto clear_skip_qgroup;
1675	}
1676
1677	key.objectid = objectid;
1678	key.offset = (u64)-1;
1679	key.type = BTRFS_ROOT_ITEM_KEY;
1680
1681	rsv = trans->block_rsv;
1682	trans->block_rsv = &pending->block_rsv;
1683	trans->bytes_reserved = trans->block_rsv->reserved;
1684	trace_btrfs_space_reservation(fs_info, "transaction",
1685				      trans->transid,
1686				      trans->bytes_reserved, 1);
 
 
1687	parent_root = BTRFS_I(parent_inode)->root;
1688	ret = record_root_in_trans(trans, parent_root, 0);
1689	if (ret)
1690		goto fail;
1691	cur_time = current_time(parent_inode);
1692
1693	/*
1694	 * insert the directory item
1695	 */
1696	ret = btrfs_set_inode_index(BTRFS_I(parent_inode), &index);
1697	BUG_ON(ret); /* -ENOMEM */
1698
1699	/* check if there is a file/dir which has the same name. */
1700	dir_item = btrfs_lookup_dir_item(NULL, parent_root, path,
1701					 btrfs_ino(BTRFS_I(parent_inode)),
1702					 &fname.disk_name, 0);
 
1703	if (dir_item != NULL && !IS_ERR(dir_item)) {
1704		pending->error = -EEXIST;
1705		goto dir_item_existed;
1706	} else if (IS_ERR(dir_item)) {
1707		ret = PTR_ERR(dir_item);
1708		btrfs_abort_transaction(trans, ret);
1709		goto fail;
1710	}
1711	btrfs_release_path(path);
1712
1713	/*
1714	 * pull in the delayed directory update
1715	 * and the delayed inode item
1716	 * otherwise we corrupt the FS during
1717	 * snapshot
1718	 */
1719	ret = btrfs_run_delayed_items(trans);
1720	if (ret) {	/* Transaction aborted */
1721		btrfs_abort_transaction(trans, ret);
1722		goto fail;
1723	}
1724
1725	ret = record_root_in_trans(trans, root, 0);
1726	if (ret) {
1727		btrfs_abort_transaction(trans, ret);
1728		goto fail;
1729	}
1730	btrfs_set_root_last_snapshot(&root->root_item, trans->transid);
1731	memcpy(new_root_item, &root->root_item, sizeof(*new_root_item));
1732	btrfs_check_and_init_root_item(new_root_item);
1733
1734	root_flags = btrfs_root_flags(new_root_item);
1735	if (pending->readonly)
1736		root_flags |= BTRFS_ROOT_SUBVOL_RDONLY;
1737	else
1738		root_flags &= ~BTRFS_ROOT_SUBVOL_RDONLY;
1739	btrfs_set_root_flags(new_root_item, root_flags);
1740
1741	btrfs_set_root_generation_v2(new_root_item,
1742			trans->transid);
1743	generate_random_guid(new_root_item->uuid);
 
1744	memcpy(new_root_item->parent_uuid, root->root_item.uuid,
1745			BTRFS_UUID_SIZE);
1746	if (!(root_flags & BTRFS_ROOT_SUBVOL_RDONLY)) {
1747		memset(new_root_item->received_uuid, 0,
1748		       sizeof(new_root_item->received_uuid));
1749		memset(&new_root_item->stime, 0, sizeof(new_root_item->stime));
1750		memset(&new_root_item->rtime, 0, sizeof(new_root_item->rtime));
1751		btrfs_set_root_stransid(new_root_item, 0);
1752		btrfs_set_root_rtransid(new_root_item, 0);
1753	}
1754	btrfs_set_stack_timespec_sec(&new_root_item->otime, cur_time.tv_sec);
1755	btrfs_set_stack_timespec_nsec(&new_root_item->otime, cur_time.tv_nsec);
1756	btrfs_set_root_otransid(new_root_item, trans->transid);
1757
1758	old = btrfs_lock_root_node(root);
1759	ret = btrfs_cow_block(trans, root, old, NULL, 0, &old,
1760			      BTRFS_NESTING_COW);
1761	if (ret) {
1762		btrfs_tree_unlock(old);
1763		free_extent_buffer(old);
1764		btrfs_abort_transaction(trans, ret);
1765		goto fail;
1766	}
1767
 
 
1768	ret = btrfs_copy_root(trans, root, old, &tmp, objectid);
1769	/* clean up in any case */
1770	btrfs_tree_unlock(old);
1771	free_extent_buffer(old);
1772	if (ret) {
1773		btrfs_abort_transaction(trans, ret);
1774		goto fail;
1775	}
1776	/* see comments in should_cow_block() */
1777	set_bit(BTRFS_ROOT_FORCE_COW, &root->state);
1778	smp_wmb();
1779
1780	btrfs_set_root_node(new_root_item, tmp);
1781	/* record when the snapshot was created in key.offset */
1782	key.offset = trans->transid;
1783	ret = btrfs_insert_root(trans, tree_root, &key, new_root_item);
1784	btrfs_tree_unlock(tmp);
1785	free_extent_buffer(tmp);
1786	if (ret) {
1787		btrfs_abort_transaction(trans, ret);
1788		goto fail;
1789	}
1790
1791	/*
1792	 * insert root back/forward references
1793	 */
1794	ret = btrfs_add_root_ref(trans, objectid,
1795				 parent_root->root_key.objectid,
1796				 btrfs_ino(BTRFS_I(parent_inode)), index,
1797				 &fname.disk_name);
1798	if (ret) {
1799		btrfs_abort_transaction(trans, ret);
1800		goto fail;
1801	}
1802
1803	key.offset = (u64)-1;
1804	pending->snap = btrfs_get_new_fs_root(fs_info, objectid, pending->anon_dev);
1805	if (IS_ERR(pending->snap)) {
1806		ret = PTR_ERR(pending->snap);
1807		pending->snap = NULL;
1808		btrfs_abort_transaction(trans, ret);
1809		goto fail;
1810	}
1811
1812	ret = btrfs_reloc_post_snapshot(trans, pending);
1813	if (ret) {
1814		btrfs_abort_transaction(trans, ret);
1815		goto fail;
1816	}
1817
 
 
 
 
 
 
1818	/*
1819	 * Do special qgroup accounting for snapshot, as we do some qgroup
1820	 * snapshot hack to do fast snapshot.
1821	 * To co-operate with that hack, we do hack again.
1822	 * Or snapshot will be greatly slowed down by a subtree qgroup rescan
1823	 */
1824	ret = qgroup_account_snapshot(trans, root, parent_root,
1825				      pending->inherit, objectid);
1826	if (ret < 0)
1827		goto fail;
1828
1829	ret = btrfs_insert_dir_item(trans, &fname.disk_name,
1830				    BTRFS_I(parent_inode), &key, BTRFS_FT_DIR,
1831				    index);
1832	/* We have check then name at the beginning, so it is impossible. */
1833	BUG_ON(ret == -EEXIST || ret == -EOVERFLOW);
1834	if (ret) {
1835		btrfs_abort_transaction(trans, ret);
1836		goto fail;
1837	}
1838
1839	btrfs_i_size_write(BTRFS_I(parent_inode), parent_inode->i_size +
1840						  fname.disk_name.len * 2);
1841	parent_inode->i_mtime = current_time(parent_inode);
1842	parent_inode->i_ctime = parent_inode->i_mtime;
1843	ret = btrfs_update_inode_fallback(trans, parent_root, BTRFS_I(parent_inode));
1844	if (ret) {
1845		btrfs_abort_transaction(trans, ret);
1846		goto fail;
1847	}
1848	ret = btrfs_uuid_tree_add(trans, new_root_item->uuid,
1849				  BTRFS_UUID_KEY_SUBVOL,
1850				  objectid);
1851	if (ret) {
1852		btrfs_abort_transaction(trans, ret);
1853		goto fail;
1854	}
1855	if (!btrfs_is_empty_uuid(new_root_item->received_uuid)) {
1856		ret = btrfs_uuid_tree_add(trans, new_root_item->received_uuid,
1857					  BTRFS_UUID_KEY_RECEIVED_SUBVOL,
1858					  objectid);
1859		if (ret && ret != -EEXIST) {
1860			btrfs_abort_transaction(trans, ret);
1861			goto fail;
1862		}
1863	}
1864
 
 
 
 
 
 
1865fail:
1866	pending->error = ret;
1867dir_item_existed:
1868	trans->block_rsv = rsv;
1869	trans->bytes_reserved = 0;
1870clear_skip_qgroup:
1871	btrfs_clear_skip_qgroup(trans);
1872free_fname:
1873	fscrypt_free_filename(&fname);
1874free_pending:
1875	kfree(new_root_item);
1876	pending->root_item = NULL;
1877	btrfs_free_path(path);
1878	pending->path = NULL;
1879
1880	return ret;
1881}
1882
1883/*
1884 * create all the snapshots we've scheduled for creation
1885 */
1886static noinline int create_pending_snapshots(struct btrfs_trans_handle *trans)
1887{
1888	struct btrfs_pending_snapshot *pending, *next;
1889	struct list_head *head = &trans->transaction->pending_snapshots;
1890	int ret = 0;
1891
1892	list_for_each_entry_safe(pending, next, head, list) {
1893		list_del(&pending->list);
1894		ret = create_pending_snapshot(trans, pending);
1895		if (ret)
1896			break;
1897	}
1898	return ret;
1899}
1900
1901static void update_super_roots(struct btrfs_fs_info *fs_info)
1902{
1903	struct btrfs_root_item *root_item;
1904	struct btrfs_super_block *super;
1905
1906	super = fs_info->super_copy;
1907
1908	root_item = &fs_info->chunk_root->root_item;
1909	super->chunk_root = root_item->bytenr;
1910	super->chunk_root_generation = root_item->generation;
1911	super->chunk_root_level = root_item->level;
1912
1913	root_item = &fs_info->tree_root->root_item;
1914	super->root = root_item->bytenr;
1915	super->generation = root_item->generation;
1916	super->root_level = root_item->level;
1917	if (btrfs_test_opt(fs_info, SPACE_CACHE))
1918		super->cache_generation = root_item->generation;
1919	else if (test_bit(BTRFS_FS_CLEANUP_SPACE_CACHE_V1, &fs_info->flags))
1920		super->cache_generation = 0;
1921	if (test_bit(BTRFS_FS_UPDATE_UUID_TREE_GEN, &fs_info->flags))
1922		super->uuid_tree_generation = root_item->generation;
1923}
1924
1925int btrfs_transaction_in_commit(struct btrfs_fs_info *info)
1926{
1927	struct btrfs_transaction *trans;
1928	int ret = 0;
1929
1930	spin_lock(&info->trans_lock);
1931	trans = info->running_transaction;
1932	if (trans)
1933		ret = (trans->state >= TRANS_STATE_COMMIT_START);
1934	spin_unlock(&info->trans_lock);
1935	return ret;
1936}
1937
1938int btrfs_transaction_blocked(struct btrfs_fs_info *info)
1939{
1940	struct btrfs_transaction *trans;
1941	int ret = 0;
1942
1943	spin_lock(&info->trans_lock);
1944	trans = info->running_transaction;
1945	if (trans)
1946		ret = is_transaction_blocked(trans);
1947	spin_unlock(&info->trans_lock);
1948	return ret;
1949}
1950
1951void btrfs_commit_transaction_async(struct btrfs_trans_handle *trans)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1952{
1953	struct btrfs_fs_info *fs_info = trans->fs_info;
 
1954	struct btrfs_transaction *cur_trans;
1955
1956	/* Kick the transaction kthread. */
1957	set_bit(BTRFS_FS_COMMIT_TRANS, &fs_info->flags);
1958	wake_up_process(fs_info->transaction_kthread);
 
 
 
 
 
 
 
 
1959
1960	/* take transaction reference */
1961	cur_trans = trans->transaction;
1962	refcount_inc(&cur_trans->use_count);
1963
1964	btrfs_end_transaction(trans);
1965
1966	/*
1967	 * Wait for the current transaction commit to start and block
1968	 * subsequent transaction joins
1969	 */
1970	btrfs_might_wait_for_state(fs_info, BTRFS_LOCKDEP_TRANS_COMMIT_START);
1971	wait_event(fs_info->transaction_blocked_wait,
1972		   cur_trans->state >= TRANS_STATE_COMMIT_START ||
1973		   TRANS_ABORTED(cur_trans));
 
 
 
 
 
 
 
 
 
 
1974	btrfs_put_transaction(cur_trans);
 
1975}
1976
 
1977static void cleanup_transaction(struct btrfs_trans_handle *trans, int err)
1978{
1979	struct btrfs_fs_info *fs_info = trans->fs_info;
1980	struct btrfs_transaction *cur_trans = trans->transaction;
1981
1982	WARN_ON(refcount_read(&trans->use_count) > 1);
1983
1984	btrfs_abort_transaction(trans, err);
1985
1986	spin_lock(&fs_info->trans_lock);
1987
1988	/*
1989	 * If the transaction is removed from the list, it means this
1990	 * transaction has been committed successfully, so it is impossible
1991	 * to call the cleanup function.
1992	 */
1993	BUG_ON(list_empty(&cur_trans->list));
1994
 
1995	if (cur_trans == fs_info->running_transaction) {
1996		cur_trans->state = TRANS_STATE_COMMIT_DOING;
1997		spin_unlock(&fs_info->trans_lock);
1998
1999		/*
2000		 * The thread has already released the lockdep map as reader
2001		 * already in btrfs_commit_transaction().
2002		 */
2003		btrfs_might_wait_for_event(fs_info, btrfs_trans_num_writers);
2004		wait_event(cur_trans->writer_wait,
2005			   atomic_read(&cur_trans->num_writers) == 1);
2006
2007		spin_lock(&fs_info->trans_lock);
2008	}
2009
2010	/*
2011	 * Now that we know no one else is still using the transaction we can
2012	 * remove the transaction from the list of transactions. This avoids
2013	 * the transaction kthread from cleaning up the transaction while some
2014	 * other task is still using it, which could result in a use-after-free
2015	 * on things like log trees, as it forces the transaction kthread to
2016	 * wait for this transaction to be cleaned up by us.
2017	 */
2018	list_del_init(&cur_trans->list);
2019
2020	spin_unlock(&fs_info->trans_lock);
2021
2022	btrfs_cleanup_one_transaction(trans->transaction, fs_info);
2023
2024	spin_lock(&fs_info->trans_lock);
2025	if (cur_trans == fs_info->running_transaction)
2026		fs_info->running_transaction = NULL;
2027	spin_unlock(&fs_info->trans_lock);
2028
2029	if (trans->type & __TRANS_FREEZABLE)
2030		sb_end_intwrite(fs_info->sb);
2031	btrfs_put_transaction(cur_trans);
2032	btrfs_put_transaction(cur_trans);
2033
2034	trace_btrfs_transaction_commit(fs_info);
2035
2036	if (current->journal_info == trans)
2037		current->journal_info = NULL;
2038	btrfs_scrub_cancel(fs_info);
2039
2040	kmem_cache_free(btrfs_trans_handle_cachep, trans);
2041}
2042
2043/*
2044 * Release reserved delayed ref space of all pending block groups of the
2045 * transaction and remove them from the list
2046 */
2047static void btrfs_cleanup_pending_block_groups(struct btrfs_trans_handle *trans)
2048{
2049       struct btrfs_fs_info *fs_info = trans->fs_info;
2050       struct btrfs_block_group *block_group, *tmp;
2051
2052       list_for_each_entry_safe(block_group, tmp, &trans->new_bgs, bg_list) {
2053               btrfs_delayed_refs_rsv_release(fs_info, 1);
2054               list_del_init(&block_group->bg_list);
2055       }
2056}
2057
2058static inline int btrfs_start_delalloc_flush(struct btrfs_fs_info *fs_info)
2059{
 
 
2060	/*
2061	 * We use try_to_writeback_inodes_sb() here because if we used
2062	 * btrfs_start_delalloc_roots we would deadlock with fs freeze.
2063	 * Currently are holding the fs freeze lock, if we do an async flush
2064	 * we'll do btrfs_join_transaction() and deadlock because we need to
2065	 * wait for the fs freeze lock.  Using the direct flushing we benefit
2066	 * from already being in a transaction and our join_transaction doesn't
2067	 * have to re-take the fs freeze lock.
2068	 *
2069	 * Note that try_to_writeback_inodes_sb() will only trigger writeback
2070	 * if it can read lock sb->s_umount. It will always be able to lock it,
2071	 * except when the filesystem is being unmounted or being frozen, but in
2072	 * those cases sync_filesystem() is called, which results in calling
2073	 * writeback_inodes_sb() while holding a write lock on sb->s_umount.
2074	 * Note that we don't call writeback_inodes_sb() directly, because it
2075	 * will emit a warning if sb->s_umount is not locked.
2076	 */
2077	if (btrfs_test_opt(fs_info, FLUSHONCOMMIT))
2078		try_to_writeback_inodes_sb(fs_info->sb, WB_REASON_SYNC);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2079	return 0;
2080}
2081
2082static inline void btrfs_wait_delalloc_flush(struct btrfs_fs_info *fs_info)
2083{
2084	if (btrfs_test_opt(fs_info, FLUSHONCOMMIT))
 
 
2085		btrfs_wait_ordered_roots(fs_info, U64_MAX, 0, (u64)-1);
2086}
 
 
2087
2088/*
2089 * Add a pending snapshot associated with the given transaction handle to the
2090 * respective handle. This must be called after the transaction commit started
2091 * and while holding fs_info->trans_lock.
2092 * This serves to guarantee a caller of btrfs_commit_transaction() that it can
2093 * safely free the pending snapshot pointer in case btrfs_commit_transaction()
2094 * returns an error.
2095 */
2096static void add_pending_snapshot(struct btrfs_trans_handle *trans)
2097{
2098	struct btrfs_transaction *cur_trans = trans->transaction;
2099
2100	if (!trans->pending_snapshot)
2101		return;
2102
2103	lockdep_assert_held(&trans->fs_info->trans_lock);
2104	ASSERT(cur_trans->state >= TRANS_STATE_COMMIT_START);
2105
2106	list_add(&trans->pending_snapshot->list, &cur_trans->pending_snapshots);
2107}
2108
2109static void update_commit_stats(struct btrfs_fs_info *fs_info, ktime_t interval)
2110{
2111	fs_info->commit_stats.commit_count++;
2112	fs_info->commit_stats.last_commit_dur = interval;
2113	fs_info->commit_stats.max_commit_dur =
2114			max_t(u64, fs_info->commit_stats.max_commit_dur, interval);
2115	fs_info->commit_stats.total_commit_dur += interval;
2116}
2117
2118int btrfs_commit_transaction(struct btrfs_trans_handle *trans)
2119{
2120	struct btrfs_fs_info *fs_info = trans->fs_info;
2121	struct btrfs_transaction *cur_trans = trans->transaction;
2122	struct btrfs_transaction *prev_trans = NULL;
2123	int ret;
2124	ktime_t start_time;
2125	ktime_t interval;
2126
2127	ASSERT(refcount_read(&trans->use_count) == 1);
2128	btrfs_trans_state_lockdep_acquire(fs_info, BTRFS_LOCKDEP_TRANS_COMMIT_START);
2129
2130	clear_bit(BTRFS_FS_NEED_TRANS_COMMIT, &fs_info->flags);
2131
2132	/* Stop the commit early if ->aborted is set */
2133	if (TRANS_ABORTED(cur_trans)) {
2134		ret = cur_trans->aborted;
2135		goto lockdep_trans_commit_start_release;
 
2136	}
2137
2138	btrfs_trans_release_metadata(trans);
2139	trans->block_rsv = NULL;
2140
 
 
 
 
 
 
 
 
 
 
 
2141	/*
2142	 * We only want one transaction commit doing the flushing so we do not
2143	 * waste a bunch of time on lock contention on the extent root node.
2144	 */
2145	if (!test_and_set_bit(BTRFS_DELAYED_REFS_FLUSHING,
2146			      &cur_trans->delayed_refs.flags)) {
2147		/*
2148		 * Make a pass through all the delayed refs we have so far.
2149		 * Any running threads may add more while we are here.
2150		 */
2151		ret = btrfs_run_delayed_refs(trans, 0);
2152		if (ret)
2153			goto lockdep_trans_commit_start_release;
2154	}
2155
2156	btrfs_create_pending_block_groups(trans);
2157
 
 
 
 
 
 
2158	if (!test_bit(BTRFS_TRANS_DIRTY_BG_RUN, &cur_trans->flags)) {
2159		int run_it = 0;
2160
2161		/* this mutex is also taken before trying to set
2162		 * block groups readonly.  We need to make sure
2163		 * that nobody has set a block group readonly
2164		 * after a extents from that block group have been
2165		 * allocated for cache files.  btrfs_set_block_group_ro
2166		 * will wait for the transaction to commit if it
2167		 * finds BTRFS_TRANS_DIRTY_BG_RUN set.
2168		 *
2169		 * The BTRFS_TRANS_DIRTY_BG_RUN flag is also used to make sure
2170		 * only one process starts all the block group IO.  It wouldn't
2171		 * hurt to have more than one go through, but there's no
2172		 * real advantage to it either.
2173		 */
2174		mutex_lock(&fs_info->ro_block_group_mutex);
2175		if (!test_and_set_bit(BTRFS_TRANS_DIRTY_BG_RUN,
2176				      &cur_trans->flags))
2177			run_it = 1;
2178		mutex_unlock(&fs_info->ro_block_group_mutex);
2179
2180		if (run_it) {
2181			ret = btrfs_start_dirty_block_groups(trans);
2182			if (ret)
2183				goto lockdep_trans_commit_start_release;
 
 
2184		}
2185	}
2186
2187	spin_lock(&fs_info->trans_lock);
2188	if (cur_trans->state >= TRANS_STATE_COMMIT_START) {
2189		enum btrfs_trans_state want_state = TRANS_STATE_COMPLETED;
2190
2191		add_pending_snapshot(trans);
2192
2193		spin_unlock(&fs_info->trans_lock);
2194		refcount_inc(&cur_trans->use_count);
 
2195
2196		if (trans->in_fsync)
2197			want_state = TRANS_STATE_SUPER_COMMITTED;
2198
2199		btrfs_trans_state_lockdep_release(fs_info,
2200						  BTRFS_LOCKDEP_TRANS_COMMIT_START);
2201		ret = btrfs_end_transaction(trans);
2202		wait_for_commit(cur_trans, want_state);
2203
2204		if (TRANS_ABORTED(cur_trans))
2205			ret = cur_trans->aborted;
2206
2207		btrfs_put_transaction(cur_trans);
2208
2209		return ret;
2210	}
2211
2212	cur_trans->state = TRANS_STATE_COMMIT_START;
2213	wake_up(&fs_info->transaction_blocked_wait);
2214	btrfs_trans_state_lockdep_release(fs_info, BTRFS_LOCKDEP_TRANS_COMMIT_START);
2215
2216	if (cur_trans->list.prev != &fs_info->trans_list) {
2217		enum btrfs_trans_state want_state = TRANS_STATE_COMPLETED;
2218
2219		if (trans->in_fsync)
2220			want_state = TRANS_STATE_SUPER_COMMITTED;
2221
2222		prev_trans = list_entry(cur_trans->list.prev,
2223					struct btrfs_transaction, list);
2224		if (prev_trans->state < want_state) {
2225			refcount_inc(&prev_trans->use_count);
2226			spin_unlock(&fs_info->trans_lock);
2227
2228			wait_for_commit(prev_trans, want_state);
2229
2230			ret = READ_ONCE(prev_trans->aborted);
2231
2232			btrfs_put_transaction(prev_trans);
2233			if (ret)
2234				goto lockdep_release;
2235		} else {
2236			spin_unlock(&fs_info->trans_lock);
2237		}
2238	} else {
2239		spin_unlock(&fs_info->trans_lock);
2240		/*
2241		 * The previous transaction was aborted and was already removed
2242		 * from the list of transactions at fs_info->trans_list. So we
2243		 * abort to prevent writing a new superblock that reflects a
2244		 * corrupt state (pointing to trees with unwritten nodes/leafs).
2245		 */
2246		if (BTRFS_FS_ERROR(fs_info)) {
2247			ret = -EROFS;
2248			goto lockdep_release;
2249		}
2250	}
2251
2252	/*
2253	 * Get the time spent on the work done by the commit thread and not
2254	 * the time spent waiting on a previous commit
2255	 */
2256	start_time = ktime_get_ns();
2257
2258	extwriter_counter_dec(cur_trans, trans->type);
2259
2260	ret = btrfs_start_delalloc_flush(fs_info);
2261	if (ret)
2262		goto lockdep_release;
2263
2264	ret = btrfs_run_delayed_items(trans);
2265	if (ret)
2266		goto lockdep_release;
2267
2268	/*
2269	 * The thread has started/joined the transaction thus it holds the
2270	 * lockdep map as a reader. It has to release it before acquiring the
2271	 * lockdep map as a writer.
2272	 */
2273	btrfs_lockdep_release(fs_info, btrfs_trans_num_extwriters);
2274	btrfs_might_wait_for_event(fs_info, btrfs_trans_num_extwriters);
2275	wait_event(cur_trans->writer_wait,
2276		   extwriter_counter_read(cur_trans) == 0);
2277
2278	/* some pending stuffs might be added after the previous flush. */
2279	ret = btrfs_run_delayed_items(trans);
2280	if (ret) {
2281		btrfs_lockdep_release(fs_info, btrfs_trans_num_writers);
2282		goto cleanup_transaction;
2283	}
2284
2285	btrfs_wait_delalloc_flush(fs_info);
2286
2287	/*
2288	 * Wait for all ordered extents started by a fast fsync that joined this
2289	 * transaction. Otherwise if this transaction commits before the ordered
2290	 * extents complete we lose logged data after a power failure.
2291	 */
2292	btrfs_might_wait_for_event(fs_info, btrfs_trans_pending_ordered);
2293	wait_event(cur_trans->pending_wait,
2294		   atomic_read(&cur_trans->pending_ordered) == 0);
2295
2296	btrfs_scrub_pause(fs_info);
2297	/*
2298	 * Ok now we need to make sure to block out any other joins while we
2299	 * commit the transaction.  We could have started a join before setting
2300	 * COMMIT_DOING so make sure to wait for num_writers to == 1 again.
2301	 */
2302	spin_lock(&fs_info->trans_lock);
2303	add_pending_snapshot(trans);
2304	cur_trans->state = TRANS_STATE_COMMIT_DOING;
2305	spin_unlock(&fs_info->trans_lock);
2306
2307	/*
2308	 * The thread has started/joined the transaction thus it holds the
2309	 * lockdep map as a reader. It has to release it before acquiring the
2310	 * lockdep map as a writer.
2311	 */
2312	btrfs_lockdep_release(fs_info, btrfs_trans_num_writers);
2313	btrfs_might_wait_for_event(fs_info, btrfs_trans_num_writers);
2314	wait_event(cur_trans->writer_wait,
2315		   atomic_read(&cur_trans->num_writers) == 1);
2316
2317	/*
2318	 * Make lockdep happy by acquiring the state locks after
2319	 * btrfs_trans_num_writers is released. If we acquired the state locks
2320	 * before releasing the btrfs_trans_num_writers lock then lockdep would
2321	 * complain because we did not follow the reverse order unlocking rule.
2322	 */
2323	btrfs_trans_state_lockdep_acquire(fs_info, BTRFS_LOCKDEP_TRANS_COMPLETED);
2324	btrfs_trans_state_lockdep_acquire(fs_info, BTRFS_LOCKDEP_TRANS_SUPER_COMMITTED);
2325	btrfs_trans_state_lockdep_acquire(fs_info, BTRFS_LOCKDEP_TRANS_UNBLOCKED);
2326
2327	/*
2328	 * We've started the commit, clear the flag in case we were triggered to
2329	 * do an async commit but somebody else started before the transaction
2330	 * kthread could do the work.
2331	 */
2332	clear_bit(BTRFS_FS_COMMIT_TRANS, &fs_info->flags);
2333
2334	if (TRANS_ABORTED(cur_trans)) {
2335		ret = cur_trans->aborted;
2336		btrfs_trans_state_lockdep_release(fs_info, BTRFS_LOCKDEP_TRANS_UNBLOCKED);
2337		goto scrub_continue;
2338	}
2339	/*
2340	 * the reloc mutex makes sure that we stop
2341	 * the balancing code from coming in and moving
2342	 * extents around in the middle of the commit
2343	 */
2344	mutex_lock(&fs_info->reloc_mutex);
2345
2346	/*
2347	 * We needn't worry about the delayed items because we will
2348	 * deal with them in create_pending_snapshot(), which is the
2349	 * core function of the snapshot creation.
2350	 */
2351	ret = create_pending_snapshots(trans);
2352	if (ret)
2353		goto unlock_reloc;
 
 
2354
2355	/*
2356	 * We insert the dir indexes of the snapshots and update the inode
2357	 * of the snapshots' parents after the snapshot creation, so there
2358	 * are some delayed items which are not dealt with. Now deal with
2359	 * them.
2360	 *
2361	 * We needn't worry that this operation will corrupt the snapshots,
2362	 * because all the tree which are snapshoted will be forced to COW
2363	 * the nodes and leaves.
2364	 */
2365	ret = btrfs_run_delayed_items(trans);
2366	if (ret)
2367		goto unlock_reloc;
 
 
2368
2369	ret = btrfs_run_delayed_refs(trans, (unsigned long)-1);
2370	if (ret)
2371		goto unlock_reloc;
 
 
2372
2373	/*
2374	 * make sure none of the code above managed to slip in a
2375	 * delayed item
2376	 */
2377	btrfs_assert_delayed_root_empty(fs_info);
2378
2379	WARN_ON(cur_trans != trans->transaction);
2380
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2381	ret = commit_fs_roots(trans);
2382	if (ret)
2383		goto unlock_reloc;
 
 
 
 
 
 
 
 
 
2384
2385	/* commit_fs_roots gets rid of all the tree log roots, it is now
2386	 * safe to free the root of tree log roots
2387	 */
2388	btrfs_free_log_root_tree(trans, fs_info);
2389
2390	/*
 
 
 
 
 
 
 
 
 
 
 
2391	 * Since fs roots are all committed, we can get a quite accurate
2392	 * new_roots. So let's do quota accounting.
2393	 */
2394	ret = btrfs_qgroup_account_extents(trans);
2395	if (ret < 0)
2396		goto unlock_reloc;
 
 
 
2397
2398	ret = commit_cowonly_roots(trans);
2399	if (ret)
2400		goto unlock_reloc;
 
 
 
2401
2402	/*
2403	 * The tasks which save the space cache and inode cache may also
2404	 * update ->aborted, check it.
2405	 */
2406	if (TRANS_ABORTED(cur_trans)) {
2407		ret = cur_trans->aborted;
2408		goto unlock_reloc;
 
 
2409	}
2410
 
 
2411	cur_trans = fs_info->running_transaction;
2412
2413	btrfs_set_root_node(&fs_info->tree_root->root_item,
2414			    fs_info->tree_root->node);
2415	list_add_tail(&fs_info->tree_root->dirty_list,
2416		      &cur_trans->switch_commits);
2417
2418	btrfs_set_root_node(&fs_info->chunk_root->root_item,
2419			    fs_info->chunk_root->node);
2420	list_add_tail(&fs_info->chunk_root->dirty_list,
2421		      &cur_trans->switch_commits);
2422
2423	if (btrfs_fs_incompat(fs_info, EXTENT_TREE_V2)) {
2424		btrfs_set_root_node(&fs_info->block_group_root->root_item,
2425				    fs_info->block_group_root->node);
2426		list_add_tail(&fs_info->block_group_root->dirty_list,
2427			      &cur_trans->switch_commits);
2428	}
2429
2430	switch_commit_roots(trans);
2431
2432	ASSERT(list_empty(&cur_trans->dirty_bgs));
2433	ASSERT(list_empty(&cur_trans->io_bgs));
2434	update_super_roots(fs_info);
2435
2436	btrfs_set_super_log_root(fs_info->super_copy, 0);
2437	btrfs_set_super_log_root_level(fs_info->super_copy, 0);
2438	memcpy(fs_info->super_for_commit, fs_info->super_copy,
2439	       sizeof(*fs_info->super_copy));
2440
2441	btrfs_commit_device_sizes(cur_trans);
2442
2443	clear_bit(BTRFS_FS_LOG1_ERR, &fs_info->flags);
2444	clear_bit(BTRFS_FS_LOG2_ERR, &fs_info->flags);
2445
2446	btrfs_trans_release_chunk_metadata(trans);
2447
2448	/*
2449	 * Before changing the transaction state to TRANS_STATE_UNBLOCKED and
2450	 * setting fs_info->running_transaction to NULL, lock tree_log_mutex to
2451	 * make sure that before we commit our superblock, no other task can
2452	 * start a new transaction and commit a log tree before we commit our
2453	 * superblock. Anyone trying to commit a log tree locks this mutex before
2454	 * writing its superblock.
2455	 */
2456	mutex_lock(&fs_info->tree_log_mutex);
2457
2458	spin_lock(&fs_info->trans_lock);
2459	cur_trans->state = TRANS_STATE_UNBLOCKED;
2460	fs_info->running_transaction = NULL;
2461	spin_unlock(&fs_info->trans_lock);
2462	mutex_unlock(&fs_info->reloc_mutex);
2463
2464	wake_up(&fs_info->transaction_wait);
2465	btrfs_trans_state_lockdep_release(fs_info, BTRFS_LOCKDEP_TRANS_UNBLOCKED);
2466
2467	ret = btrfs_write_and_wait_transaction(trans);
2468	if (ret) {
2469		btrfs_handle_fs_error(fs_info, ret,
2470				      "Error while writing out transaction");
2471		mutex_unlock(&fs_info->tree_log_mutex);
2472		goto scrub_continue;
2473	}
2474
2475	/*
2476	 * At this point, we should have written all the tree blocks allocated
2477	 * in this transaction. So it's now safe to free the redirtyied extent
2478	 * buffers.
2479	 */
2480	btrfs_free_redirty_list(cur_trans);
2481
2482	ret = write_all_supers(fs_info, 0);
2483	/*
2484	 * the super is written, we can safely allow the tree-loggers
2485	 * to go about their business
2486	 */
2487	mutex_unlock(&fs_info->tree_log_mutex);
2488	if (ret)
2489		goto scrub_continue;
2490
2491	/*
2492	 * We needn't acquire the lock here because there is no other task
2493	 * which can change it.
2494	 */
2495	cur_trans->state = TRANS_STATE_SUPER_COMMITTED;
2496	wake_up(&cur_trans->commit_wait);
2497	btrfs_trans_state_lockdep_release(fs_info, BTRFS_LOCKDEP_TRANS_SUPER_COMMITTED);
2498
2499	btrfs_finish_extent_commit(trans);
2500
2501	if (test_bit(BTRFS_TRANS_HAVE_FREE_BGS, &cur_trans->flags))
2502		btrfs_clear_space_info_full(fs_info);
2503
2504	fs_info->last_trans_committed = cur_trans->transid;
2505	/*
2506	 * We needn't acquire the lock here because there is no other task
2507	 * which can change it.
2508	 */
2509	cur_trans->state = TRANS_STATE_COMPLETED;
2510	wake_up(&cur_trans->commit_wait);
2511	btrfs_trans_state_lockdep_release(fs_info, BTRFS_LOCKDEP_TRANS_COMPLETED);
2512
2513	spin_lock(&fs_info->trans_lock);
2514	list_del_init(&cur_trans->list);
2515	spin_unlock(&fs_info->trans_lock);
2516
2517	btrfs_put_transaction(cur_trans);
2518	btrfs_put_transaction(cur_trans);
2519
2520	if (trans->type & __TRANS_FREEZABLE)
2521		sb_end_intwrite(fs_info->sb);
2522
2523	trace_btrfs_transaction_commit(fs_info);
2524
2525	interval = ktime_get_ns() - start_time;
2526
2527	btrfs_scrub_continue(fs_info);
2528
2529	if (current->journal_info == trans)
2530		current->journal_info = NULL;
2531
2532	kmem_cache_free(btrfs_trans_handle_cachep, trans);
2533
2534	update_commit_stats(fs_info, interval);
2535
2536	return ret;
2537
2538unlock_reloc:
2539	mutex_unlock(&fs_info->reloc_mutex);
2540	btrfs_trans_state_lockdep_release(fs_info, BTRFS_LOCKDEP_TRANS_UNBLOCKED);
2541scrub_continue:
2542	btrfs_trans_state_lockdep_release(fs_info, BTRFS_LOCKDEP_TRANS_SUPER_COMMITTED);
2543	btrfs_trans_state_lockdep_release(fs_info, BTRFS_LOCKDEP_TRANS_COMPLETED);
2544	btrfs_scrub_continue(fs_info);
2545cleanup_transaction:
2546	btrfs_trans_release_metadata(trans);
2547	btrfs_cleanup_pending_block_groups(trans);
2548	btrfs_trans_release_chunk_metadata(trans);
2549	trans->block_rsv = NULL;
2550	btrfs_warn(fs_info, "Skipping commit of aborted transaction.");
2551	if (current->journal_info == trans)
2552		current->journal_info = NULL;
2553	cleanup_transaction(trans, ret);
2554
2555	return ret;
2556
2557lockdep_release:
2558	btrfs_lockdep_release(fs_info, btrfs_trans_num_extwriters);
2559	btrfs_lockdep_release(fs_info, btrfs_trans_num_writers);
2560	goto cleanup_transaction;
2561
2562lockdep_trans_commit_start_release:
2563	btrfs_trans_state_lockdep_release(fs_info, BTRFS_LOCKDEP_TRANS_COMMIT_START);
2564	btrfs_end_transaction(trans);
2565	return ret;
2566}
2567
2568/*
2569 * return < 0 if error
2570 * 0 if there are no more dead_roots at the time of call
2571 * 1 there are more to be processed, call me again
2572 *
2573 * The return value indicates there are certainly more snapshots to delete, but
2574 * if there comes a new one during processing, it may return 0. We don't mind,
2575 * because btrfs_commit_super will poke cleaner thread and it will process it a
2576 * few seconds later.
2577 */
2578int btrfs_clean_one_deleted_snapshot(struct btrfs_fs_info *fs_info)
2579{
2580	struct btrfs_root *root;
2581	int ret;
 
2582
2583	spin_lock(&fs_info->trans_lock);
2584	if (list_empty(&fs_info->dead_roots)) {
2585		spin_unlock(&fs_info->trans_lock);
2586		return 0;
2587	}
2588	root = list_first_entry(&fs_info->dead_roots,
2589			struct btrfs_root, root_list);
2590	list_del_init(&root->root_list);
2591	spin_unlock(&fs_info->trans_lock);
2592
2593	btrfs_debug(fs_info, "cleaner removing %llu", root->root_key.objectid);
2594
2595	btrfs_kill_all_delayed_nodes(root);
2596
2597	if (btrfs_header_backref_rev(root->node) <
2598			BTRFS_MIXED_BACKREF_REV)
2599		ret = btrfs_drop_snapshot(root, 0, 0);
2600	else
2601		ret = btrfs_drop_snapshot(root, 1, 0);
2602
2603	btrfs_put_root(root);
2604	return (ret < 0) ? 0 : 1;
2605}
2606
2607int __init btrfs_transaction_init(void)
2608{
2609	btrfs_trans_handle_cachep = kmem_cache_create("btrfs_trans_handle",
2610			sizeof(struct btrfs_trans_handle), 0,
2611			SLAB_TEMPORARY | SLAB_MEM_SPREAD, NULL);
2612	if (!btrfs_trans_handle_cachep)
2613		return -ENOMEM;
2614	return 0;
2615}
2616
2617void __cold btrfs_transaction_exit(void)
2618{
2619	kmem_cache_destroy(btrfs_trans_handle_cachep);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2620}