Linux Audio

Check our new training course

Loading...
v3.1
 
   1/*
   2 * linux/fs/jbd2/commit.c
   3 *
   4 * Written by Stephen C. Tweedie <sct@redhat.com>, 1998
   5 *
   6 * Copyright 1998 Red Hat corp --- All Rights Reserved
   7 *
   8 * This file is part of the Linux kernel and is made available under
   9 * the terms of the GNU General Public License, version 2, or at your
  10 * option, any later version, incorporated herein by reference.
  11 *
  12 * Journal commit routines for the generic filesystem journaling code;
  13 * part of the ext2fs journaling system.
  14 */
  15
  16#include <linux/time.h>
  17#include <linux/fs.h>
  18#include <linux/jbd2.h>
  19#include <linux/errno.h>
  20#include <linux/slab.h>
  21#include <linux/mm.h>
  22#include <linux/pagemap.h>
  23#include <linux/jiffies.h>
  24#include <linux/crc32.h>
  25#include <linux/writeback.h>
  26#include <linux/backing-dev.h>
  27#include <linux/bio.h>
  28#include <linux/blkdev.h>
  29#include <linux/bitops.h>
  30#include <trace/events/jbd2.h>
  31#include <asm/system.h>
  32
  33/*
  34 * Default IO end handler for temporary BJ_IO buffer_heads.
  35 */
  36static void journal_end_buffer_io_sync(struct buffer_head *bh, int uptodate)
  37{
 
 
  38	BUFFER_TRACE(bh, "");
  39	if (uptodate)
  40		set_buffer_uptodate(bh);
  41	else
  42		clear_buffer_uptodate(bh);
 
 
 
 
 
  43	unlock_buffer(bh);
  44}
  45
  46/*
  47 * When an ext4 file is truncated, it is possible that some pages are not
  48 * successfully freed, because they are attached to a committing transaction.
  49 * After the transaction commits, these pages are left on the LRU, with no
  50 * ->mapping, and with attached buffers.  These pages are trivially reclaimable
  51 * by the VM, but their apparent absence upsets the VM accounting, and it makes
  52 * the numbers in /proc/meminfo look odd.
  53 *
  54 * So here, we have a buffer which has just come off the forget list.  Look to
  55 * see if we can strip all buffers from the backing page.
  56 *
  57 * Called under lock_journal(), and possibly under journal_datalist_lock.  The
  58 * caller provided us with a ref against the buffer, and we drop that here.
  59 */
  60static void release_buffer_page(struct buffer_head *bh)
  61{
  62	struct page *page;
  63
  64	if (buffer_dirty(bh))
  65		goto nope;
  66	if (atomic_read(&bh->b_count) != 1)
  67		goto nope;
  68	page = bh->b_page;
  69	if (!page)
  70		goto nope;
  71	if (page->mapping)
  72		goto nope;
  73
  74	/* OK, it's a truncated page */
  75	if (!trylock_page(page))
  76		goto nope;
  77
  78	page_cache_get(page);
  79	__brelse(bh);
  80	try_to_free_buffers(page);
  81	unlock_page(page);
  82	page_cache_release(page);
  83	return;
  84
  85nope:
  86	__brelse(bh);
  87}
  88
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
  89/*
  90 * Done it all: now submit the commit record.  We should have
  91 * cleaned up our previous buffers by now, so if we are in abort
  92 * mode we can now just skip the rest of the journal write
  93 * entirely.
  94 *
  95 * Returns 1 if the journal needs to be aborted or 0 on success
  96 */
  97static int journal_submit_commit_record(journal_t *journal,
  98					transaction_t *commit_transaction,
  99					struct buffer_head **cbh,
 100					__u32 crc32_sum)
 101{
 102	struct journal_head *descriptor;
 103	struct commit_header *tmp;
 104	struct buffer_head *bh;
 105	int ret;
 106	struct timespec now = current_kernel_time();
 107
 108	*cbh = NULL;
 109
 110	if (is_journal_aborted(journal))
 111		return 0;
 112
 113	descriptor = jbd2_journal_get_descriptor_buffer(journal);
 114	if (!descriptor)
 
 115		return 1;
 116
 117	bh = jh2bh(descriptor);
 118
 119	tmp = (struct commit_header *)bh->b_data;
 120	tmp->h_magic = cpu_to_be32(JBD2_MAGIC_NUMBER);
 121	tmp->h_blocktype = cpu_to_be32(JBD2_COMMIT_BLOCK);
 122	tmp->h_sequence = cpu_to_be32(commit_transaction->t_tid);
 123	tmp->h_commit_sec = cpu_to_be64(now.tv_sec);
 124	tmp->h_commit_nsec = cpu_to_be32(now.tv_nsec);
 125
 126	if (JBD2_HAS_COMPAT_FEATURE(journal,
 127				    JBD2_FEATURE_COMPAT_CHECKSUM)) {
 128		tmp->h_chksum_type 	= JBD2_CRC32_CHKSUM;
 129		tmp->h_chksum_size 	= JBD2_CRC32_CHKSUM_SIZE;
 130		tmp->h_chksum[0] 	= cpu_to_be32(crc32_sum);
 131	}
 
 132
 133	JBUFFER_TRACE(descriptor, "submit commit block");
 134	lock_buffer(bh);
 135	clear_buffer_dirty(bh);
 136	set_buffer_uptodate(bh);
 137	bh->b_end_io = journal_end_buffer_io_sync;
 138
 139	if (journal->j_flags & JBD2_BARRIER &&
 140	    !JBD2_HAS_INCOMPAT_FEATURE(journal,
 141				       JBD2_FEATURE_INCOMPAT_ASYNC_COMMIT))
 142		ret = submit_bh(WRITE_SYNC | WRITE_FLUSH_FUA, bh);
 143	else
 144		ret = submit_bh(WRITE_SYNC, bh);
 145
 
 146	*cbh = bh;
 147	return ret;
 148}
 149
 150/*
 151 * This function along with journal_submit_commit_record
 152 * allows to write the commit record asynchronously.
 153 */
 154static int journal_wait_on_commit_record(journal_t *journal,
 155					 struct buffer_head *bh)
 156{
 157	int ret = 0;
 158
 159	clear_buffer_dirty(bh);
 160	wait_on_buffer(bh);
 161
 162	if (unlikely(!buffer_uptodate(bh)))
 163		ret = -EIO;
 164	put_bh(bh);            /* One for getblk() */
 165	jbd2_journal_put_journal_head(bh2jh(bh));
 166
 167	return ret;
 168}
 169
 170/*
 171 * write the filemap data using writepage() address_space_operations.
 172 * We don't do block allocation here even for delalloc. We don't
 173 * use writepages() because with dealyed allocation we may be doing
 174 * block allocation in writepages().
 175 */
 176static int journal_submit_inode_data_buffers(struct address_space *mapping)
 177{
 178	int ret;
 179	struct writeback_control wbc = {
 180		.sync_mode =  WB_SYNC_ALL,
 181		.nr_to_write = mapping->nrpages * 2,
 182		.range_start = 0,
 183		.range_end = i_size_read(mapping->host),
 184	};
 185
 186	ret = generic_writepages(mapping, &wbc);
 187	return ret;
 
 
 
 
 
 
 
 
 
 
 
 
 188}
 
 189
 190/*
 191 * Submit all the data buffers of inode associated with the transaction to
 192 * disk.
 193 *
 194 * We are in a committing transaction. Therefore no new inode can be added to
 195 * our inode list. We use JI_COMMIT_RUNNING flag to protect inode we currently
 196 * operate on from being released while we write out pages.
 197 */
 198static int journal_submit_data_buffers(journal_t *journal,
 199		transaction_t *commit_transaction)
 200{
 201	struct jbd2_inode *jinode;
 202	int err, ret = 0;
 203	struct address_space *mapping;
 204
 205	spin_lock(&journal->j_list_lock);
 206	list_for_each_entry(jinode, &commit_transaction->t_inode_list, i_list) {
 207		mapping = jinode->i_vfs_inode->i_mapping;
 208		set_bit(__JI_COMMIT_RUNNING, &jinode->i_flags);
 
 209		spin_unlock(&journal->j_list_lock);
 210		/*
 211		 * submit the inode data buffers. We use writepage
 212		 * instead of writepages. Because writepages can do
 213		 * block allocation  with delalloc. We need to write
 214		 * only allocated blocks here.
 215		 */
 216		trace_jbd2_submit_inode_data(jinode->i_vfs_inode);
 217		err = journal_submit_inode_data_buffers(mapping);
 218		if (!ret)
 219			ret = err;
 
 
 220		spin_lock(&journal->j_list_lock);
 221		J_ASSERT(jinode->i_transaction == commit_transaction);
 222		clear_bit(__JI_COMMIT_RUNNING, &jinode->i_flags);
 223		smp_mb__after_clear_bit();
 224		wake_up_bit(&jinode->i_flags, __JI_COMMIT_RUNNING);
 225	}
 226	spin_unlock(&journal->j_list_lock);
 227	return ret;
 228}
 229
 
 
 
 
 
 
 
 
 
 230/*
 231 * Wait for data submitted for writeout, refile inodes to proper
 232 * transaction if needed.
 233 *
 234 */
 235static int journal_finish_inode_data_buffers(journal_t *journal,
 236		transaction_t *commit_transaction)
 237{
 238	struct jbd2_inode *jinode, *next_i;
 239	int err, ret = 0;
 240
 241	/* For locking, see the comment in journal_submit_data_buffers() */
 242	spin_lock(&journal->j_list_lock);
 243	list_for_each_entry(jinode, &commit_transaction->t_inode_list, i_list) {
 244		set_bit(__JI_COMMIT_RUNNING, &jinode->i_flags);
 
 
 245		spin_unlock(&journal->j_list_lock);
 246		err = filemap_fdatawait(jinode->i_vfs_inode->i_mapping);
 247		if (err) {
 248			/*
 249			 * Because AS_EIO is cleared by
 250			 * filemap_fdatawait_range(), set it again so
 251			 * that user process can get -EIO from fsync().
 252			 */
 253			set_bit(AS_EIO,
 254				&jinode->i_vfs_inode->i_mapping->flags);
 255
 256			if (!ret)
 257				ret = err;
 258		}
 
 259		spin_lock(&journal->j_list_lock);
 260		clear_bit(__JI_COMMIT_RUNNING, &jinode->i_flags);
 261		smp_mb__after_clear_bit();
 262		wake_up_bit(&jinode->i_flags, __JI_COMMIT_RUNNING);
 263	}
 264
 265	/* Now refile inode to proper lists */
 266	list_for_each_entry_safe(jinode, next_i,
 267				 &commit_transaction->t_inode_list, i_list) {
 268		list_del(&jinode->i_list);
 269		if (jinode->i_next_transaction) {
 270			jinode->i_transaction = jinode->i_next_transaction;
 271			jinode->i_next_transaction = NULL;
 272			list_add(&jinode->i_list,
 273				&jinode->i_transaction->t_inode_list);
 274		} else {
 275			jinode->i_transaction = NULL;
 
 
 276		}
 277	}
 278	spin_unlock(&journal->j_list_lock);
 279
 280	return ret;
 281}
 282
 283static __u32 jbd2_checksum_data(__u32 crc32_sum, struct buffer_head *bh)
 284{
 285	struct page *page = bh->b_page;
 286	char *addr;
 287	__u32 checksum;
 288
 289	addr = kmap_atomic(page, KM_USER0);
 290	checksum = crc32_be(crc32_sum,
 291		(void *)(addr + offset_in_page(bh->b_data)), bh->b_size);
 292	kunmap_atomic(addr, KM_USER0);
 293
 294	return checksum;
 295}
 296
 297static void write_tag_block(int tag_bytes, journal_block_tag_t *tag,
 298				   unsigned long long block)
 299{
 300	tag->t_blocknr = cpu_to_be32(block & (u32)~0);
 301	if (tag_bytes > JBD2_TAG_SIZE32)
 302		tag->t_blocknr_high = cpu_to_be32((block >> 31) >> 1);
 303}
 304
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 305/*
 306 * jbd2_journal_commit_transaction
 307 *
 308 * The primary function for committing a transaction to the log.  This
 309 * function is called by the journal thread to begin a complete commit.
 310 */
 311void jbd2_journal_commit_transaction(journal_t *journal)
 312{
 313	struct transaction_stats_s stats;
 314	transaction_t *commit_transaction;
 315	struct journal_head *jh, *new_jh, *descriptor;
 
 316	struct buffer_head **wbuf = journal->j_wbuf;
 317	int bufs;
 318	int flags;
 319	int err;
 320	unsigned long long blocknr;
 321	ktime_t start_time;
 322	u64 commit_time;
 323	char *tagp = NULL;
 324	journal_header_t *header;
 325	journal_block_tag_t *tag = NULL;
 326	int space_left = 0;
 327	int first_tag = 0;
 328	int tag_flag;
 329	int i, to_free = 0;
 330	int tag_bytes = journal_tag_bytes(journal);
 331	struct buffer_head *cbh = NULL; /* For transactional checksums */
 332	__u32 crc32_sum = ~0;
 333	struct blk_plug plug;
 
 
 
 
 
 
 
 
 
 
 334
 335	/*
 336	 * First job: lock down the current transaction and wait for
 337	 * all outstanding updates to complete.
 338	 */
 339
 340	/* Do we need to erase the effects of a prior jbd2_journal_flush? */
 341	if (journal->j_flags & JBD2_FLUSHED) {
 342		jbd_debug(3, "super block updated\n");
 343		jbd2_journal_update_superblock(journal, 1);
 
 
 
 
 
 
 
 
 
 
 344	} else {
 345		jbd_debug(3, "superblock not updated\n");
 346	}
 347
 348	J_ASSERT(journal->j_running_transaction != NULL);
 349	J_ASSERT(journal->j_committing_transaction == NULL);
 350
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 351	commit_transaction = journal->j_running_transaction;
 352	J_ASSERT(commit_transaction->t_state == T_RUNNING);
 353
 354	trace_jbd2_start_commit(journal, commit_transaction);
 355	jbd_debug(1, "JBD: starting commit of transaction %d\n",
 356			commit_transaction->t_tid);
 357
 358	write_lock(&journal->j_state_lock);
 
 
 359	commit_transaction->t_state = T_LOCKED;
 360
 361	trace_jbd2_commit_locking(journal, commit_transaction);
 362	stats.run.rs_wait = commit_transaction->t_max_wait;
 
 363	stats.run.rs_locked = jiffies;
 
 
 
 
 364	stats.run.rs_running = jbd2_time_diff(commit_transaction->t_start,
 365					      stats.run.rs_locked);
 366
 367	spin_lock(&commit_transaction->t_handle_lock);
 368	while (atomic_read(&commit_transaction->t_updates)) {
 369		DEFINE_WAIT(wait);
 370
 371		prepare_to_wait(&journal->j_wait_updates, &wait,
 372					TASK_UNINTERRUPTIBLE);
 373		if (atomic_read(&commit_transaction->t_updates)) {
 374			spin_unlock(&commit_transaction->t_handle_lock);
 375			write_unlock(&journal->j_state_lock);
 376			schedule();
 377			write_lock(&journal->j_state_lock);
 378			spin_lock(&commit_transaction->t_handle_lock);
 379		}
 380		finish_wait(&journal->j_wait_updates, &wait);
 381	}
 382	spin_unlock(&commit_transaction->t_handle_lock);
 383
 384	J_ASSERT (atomic_read(&commit_transaction->t_outstanding_credits) <=
 385			journal->j_max_transaction_buffers);
 386
 387	/*
 388	 * First thing we are allowed to do is to discard any remaining
 389	 * BJ_Reserved buffers.  Note, it is _not_ permissible to assume
 390	 * that there are no such buffers: if a large filesystem
 391	 * operation like a truncate needs to split itself over multiple
 392	 * transactions, then it may try to do a jbd2_journal_restart() while
 393	 * there are still BJ_Reserved buffers outstanding.  These must
 394	 * be released cleanly from the current transaction.
 395	 *
 396	 * In this case, the filesystem must still reserve write access
 397	 * again before modifying the buffer in the new transaction, but
 398	 * we do not require it to remember exactly which old buffers it
 399	 * has reserved.  This is consistent with the existing behaviour
 400	 * that multiple jbd2_journal_get_write_access() calls to the same
 401	 * buffer are perfectly permissible.
 
 
 402	 */
 403	while (commit_transaction->t_reserved_list) {
 404		jh = commit_transaction->t_reserved_list;
 405		JBUFFER_TRACE(jh, "reserved, unused: refile");
 406		/*
 407		 * A jbd2_journal_get_undo_access()+jbd2_journal_release_buffer() may
 408		 * leave undo-committed data.
 409		 */
 410		if (jh->b_committed_data) {
 411			struct buffer_head *bh = jh2bh(jh);
 412
 413			jbd_lock_bh_state(bh);
 414			jbd2_free(jh->b_committed_data, bh->b_size);
 415			jh->b_committed_data = NULL;
 416			jbd_unlock_bh_state(bh);
 417		}
 418		jbd2_journal_refile_buffer(journal, jh);
 419	}
 420
 
 421	/*
 422	 * Now try to drop any written-back buffers from the journal's
 423	 * checkpoint lists.  We do this *before* commit because it potentially
 424	 * frees some memory
 425	 */
 426	spin_lock(&journal->j_list_lock);
 427	__jbd2_journal_clean_checkpoint_list(journal);
 428	spin_unlock(&journal->j_list_lock);
 429
 430	jbd_debug (3, "JBD: commit phase 1\n");
 
 
 
 
 
 
 431
 432	/*
 433	 * Switch to a new revoke table.
 434	 */
 435	jbd2_journal_switch_revoke_table(journal);
 436
 
 
 
 
 
 
 
 437	trace_jbd2_commit_flushing(journal, commit_transaction);
 438	stats.run.rs_flushing = jiffies;
 439	stats.run.rs_locked = jbd2_time_diff(stats.run.rs_locked,
 440					     stats.run.rs_flushing);
 441
 442	commit_transaction->t_state = T_FLUSH;
 443	journal->j_committing_transaction = commit_transaction;
 444	journal->j_running_transaction = NULL;
 445	start_time = ktime_get();
 446	commit_transaction->t_log_start = journal->j_head;
 447	wake_up(&journal->j_wait_transaction_locked);
 448	write_unlock(&journal->j_state_lock);
 449
 450	jbd_debug (3, "JBD: commit phase 2\n");
 451
 452	/*
 453	 * Now start flushing things to disk, in the order they appear
 454	 * on the transaction lists.  Data blocks go first.
 455	 */
 456	err = journal_submit_data_buffers(journal, commit_transaction);
 457	if (err)
 458		jbd2_journal_abort(journal, err);
 459
 460	blk_start_plug(&plug);
 461	jbd2_journal_write_revoke_records(journal, commit_transaction,
 462					  WRITE_SYNC);
 463	blk_finish_plug(&plug);
 464
 465	jbd_debug(3, "JBD: commit phase 2\n");
 466
 467	/*
 468	 * Way to go: we have now written out all of the data for a
 469	 * transaction!  Now comes the tricky part: we need to write out
 470	 * metadata.  Loop over the transaction's entire buffer list:
 471	 */
 472	write_lock(&journal->j_state_lock);
 473	commit_transaction->t_state = T_COMMIT;
 474	write_unlock(&journal->j_state_lock);
 475
 476	trace_jbd2_commit_logging(journal, commit_transaction);
 477	stats.run.rs_logging = jiffies;
 478	stats.run.rs_flushing = jbd2_time_diff(stats.run.rs_flushing,
 479					       stats.run.rs_logging);
 480	stats.run.rs_blocks =
 481		atomic_read(&commit_transaction->t_outstanding_credits);
 482	stats.run.rs_blocks_logged = 0;
 483
 484	J_ASSERT(commit_transaction->t_nr_buffers <=
 485		 atomic_read(&commit_transaction->t_outstanding_credits));
 486
 487	err = 0;
 488	descriptor = NULL;
 489	bufs = 0;
 490	blk_start_plug(&plug);
 491	while (commit_transaction->t_buffers) {
 492
 493		/* Find the next buffer to be journaled... */
 494
 495		jh = commit_transaction->t_buffers;
 496
 497		/* If we're in abort mode, we just un-journal the buffer and
 498		   release it. */
 499
 500		if (is_journal_aborted(journal)) {
 501			clear_buffer_jbddirty(jh2bh(jh));
 502			JBUFFER_TRACE(jh, "journal is aborting: refile");
 503			jbd2_buffer_abort_trigger(jh,
 504						  jh->b_frozen_data ?
 505						  jh->b_frozen_triggers :
 506						  jh->b_triggers);
 507			jbd2_journal_refile_buffer(journal, jh);
 508			/* If that was the last one, we need to clean up
 509			 * any descriptor buffers which may have been
 510			 * already allocated, even if we are now
 511			 * aborting. */
 512			if (!commit_transaction->t_buffers)
 513				goto start_journal_io;
 514			continue;
 515		}
 516
 517		/* Make sure we have a descriptor block in which to
 518		   record the metadata buffer. */
 519
 520		if (!descriptor) {
 521			struct buffer_head *bh;
 522
 523			J_ASSERT (bufs == 0);
 524
 525			jbd_debug(4, "JBD: get descriptor\n");
 526
 527			descriptor = jbd2_journal_get_descriptor_buffer(journal);
 
 
 528			if (!descriptor) {
 529				jbd2_journal_abort(journal, -EIO);
 530				continue;
 531			}
 532
 533			bh = jh2bh(descriptor);
 534			jbd_debug(4, "JBD: got buffer %llu (%p)\n",
 535				(unsigned long long)bh->b_blocknr, bh->b_data);
 536			header = (journal_header_t *)&bh->b_data[0];
 537			header->h_magic     = cpu_to_be32(JBD2_MAGIC_NUMBER);
 538			header->h_blocktype = cpu_to_be32(JBD2_DESCRIPTOR_BLOCK);
 539			header->h_sequence  = cpu_to_be32(commit_transaction->t_tid);
 540
 541			tagp = &bh->b_data[sizeof(journal_header_t)];
 542			space_left = bh->b_size - sizeof(journal_header_t);
 543			first_tag = 1;
 544			set_buffer_jwrite(bh);
 545			set_buffer_dirty(bh);
 546			wbuf[bufs++] = bh;
 547
 548			/* Record it so that we can wait for IO
 549                           completion later */
 550			BUFFER_TRACE(bh, "ph3: file as descriptor");
 551			jbd2_journal_file_buffer(descriptor, commit_transaction,
 552					BJ_LogCtl);
 553		}
 554
 555		/* Where is the buffer to be written? */
 556
 557		err = jbd2_journal_next_log_block(journal, &blocknr);
 558		/* If the block mapping failed, just abandon the buffer
 559		   and repeat this loop: we'll fall into the
 560		   refile-on-abort condition above. */
 561		if (err) {
 562			jbd2_journal_abort(journal, err);
 563			continue;
 564		}
 565
 566		/*
 567		 * start_this_handle() uses t_outstanding_credits to determine
 568		 * the free space in the log, but this counter is changed
 569		 * by jbd2_journal_next_log_block() also.
 570		 */
 571		atomic_dec(&commit_transaction->t_outstanding_credits);
 572
 573		/* Bump b_count to prevent truncate from stumbling over
 574                   the shadowed buffer!  @@@ This can go if we ever get
 575                   rid of the BJ_IO/BJ_Shadow pairing of buffers. */
 576		atomic_inc(&jh2bh(jh)->b_count);
 577
 578		/* Make a temporary IO buffer with which to write it out
 579                   (this will requeue both the metadata buffer and the
 580                   temporary IO buffer). new_bh goes on BJ_IO*/
 581
 582		set_bit(BH_JWrite, &jh2bh(jh)->b_state);
 583		/*
 584		 * akpm: jbd2_journal_write_metadata_buffer() sets
 585		 * new_bh->b_transaction to commit_transaction.
 586		 * We need to clean this up before we release new_bh
 587		 * (which is of type BJ_IO)
 588		 */
 
 589		JBUFFER_TRACE(jh, "ph3: write metadata");
 590		flags = jbd2_journal_write_metadata_buffer(commit_transaction,
 591						      jh, &new_jh, blocknr);
 592		if (flags < 0) {
 593			jbd2_journal_abort(journal, flags);
 594			continue;
 595		}
 596		set_bit(BH_JWrite, &jh2bh(new_jh)->b_state);
 597		wbuf[bufs++] = jh2bh(new_jh);
 598
 599		/* Record the new block's tag in the current descriptor
 600                   buffer */
 601
 602		tag_flag = 0;
 603		if (flags & 1)
 604			tag_flag |= JBD2_FLAG_ESCAPE;
 605		if (!first_tag)
 606			tag_flag |= JBD2_FLAG_SAME_UUID;
 607
 608		tag = (journal_block_tag_t *) tagp;
 609		write_tag_block(tag_bytes, tag, jh2bh(jh)->b_blocknr);
 610		tag->t_flags = cpu_to_be32(tag_flag);
 
 
 611		tagp += tag_bytes;
 612		space_left -= tag_bytes;
 
 613
 614		if (first_tag) {
 615			memcpy (tagp, journal->j_uuid, 16);
 616			tagp += 16;
 617			space_left -= 16;
 618			first_tag = 0;
 619		}
 620
 621		/* If there's no more to do, or if the descriptor is full,
 622		   let the IO rip! */
 623
 624		if (bufs == journal->j_wbufsize ||
 625		    commit_transaction->t_buffers == NULL ||
 626		    space_left < tag_bytes + 16) {
 627
 628			jbd_debug(4, "JBD: Submit %d IOs\n", bufs);
 629
 630			/* Write an end-of-descriptor marker before
 631                           submitting the IOs.  "tag" still points to
 632                           the last tag we set up. */
 633
 634			tag->t_flags |= cpu_to_be32(JBD2_FLAG_LAST_TAG);
 635
 636start_journal_io:
 
 
 
 
 637			for (i = 0; i < bufs; i++) {
 638				struct buffer_head *bh = wbuf[i];
 
 639				/*
 640				 * Compute checksum.
 641				 */
 642				if (JBD2_HAS_COMPAT_FEATURE(journal,
 643					JBD2_FEATURE_COMPAT_CHECKSUM)) {
 644					crc32_sum =
 645					    jbd2_checksum_data(crc32_sum, bh);
 646				}
 647
 648				lock_buffer(bh);
 649				clear_buffer_dirty(bh);
 650				set_buffer_uptodate(bh);
 651				bh->b_end_io = journal_end_buffer_io_sync;
 652				submit_bh(WRITE_SYNC, bh);
 
 653			}
 654			cond_resched();
 655			stats.run.rs_blocks_logged += bufs;
 656
 657			/* Force a new descriptor to be generated next
 658                           time round the loop. */
 659			descriptor = NULL;
 660			bufs = 0;
 661		}
 662	}
 663
 664	err = journal_finish_inode_data_buffers(journal, commit_transaction);
 665	if (err) {
 666		printk(KERN_WARNING
 667			"JBD2: Detected IO errors while flushing file data "
 668		       "on %s\n", journal->j_devname);
 669		if (journal->j_flags & JBD2_ABORT_ON_SYNCDATA_ERR)
 670			jbd2_journal_abort(journal, err);
 671		err = 0;
 672	}
 673
 
 
 
 
 
 
 
 
 
 
 674	write_lock(&journal->j_state_lock);
 
 
 
 
 
 
 
 
 
 675	J_ASSERT(commit_transaction->t_state == T_COMMIT);
 676	commit_transaction->t_state = T_COMMIT_DFLUSH;
 677	write_unlock(&journal->j_state_lock);
 678	/* 
 
 679	 * If the journal is not located on the file system device,
 680	 * then we must flush the file system device before we issue
 681	 * the commit record
 682	 */
 683	if (commit_transaction->t_need_data_flush &&
 684	    (journal->j_fs_dev != journal->j_dev) &&
 685	    (journal->j_flags & JBD2_BARRIER))
 686		blkdev_issue_flush(journal->j_fs_dev, GFP_KERNEL, NULL);
 687
 688	/* Done it all: now write the commit record asynchronously. */
 689	if (JBD2_HAS_INCOMPAT_FEATURE(journal,
 690				      JBD2_FEATURE_INCOMPAT_ASYNC_COMMIT)) {
 691		err = journal_submit_commit_record(journal, commit_transaction,
 692						 &cbh, crc32_sum);
 693		if (err)
 694			__jbd2_journal_abort_hard(journal);
 695	}
 696
 697	blk_finish_plug(&plug);
 698
 699	/* Lo and behold: we have just managed to send a transaction to
 700           the log.  Before we can commit it, wait for the IO so far to
 701           complete.  Control buffers being written are on the
 702           transaction's t_log_list queue, and metadata buffers are on
 703           the t_iobuf_list queue.
 704
 705	   Wait for the buffers in reverse order.  That way we are
 706	   less likely to be woken up until all IOs have completed, and
 707	   so we incur less scheduling load.
 708	*/
 709
 710	jbd_debug(3, "JBD: commit phase 3\n");
 711
 712	/*
 713	 * akpm: these are BJ_IO, and j_list_lock is not needed.
 714	 * See __journal_try_to_free_buffer.
 715	 */
 716wait_for_iobuf:
 717	while (commit_transaction->t_iobuf_list != NULL) {
 718		struct buffer_head *bh;
 719
 720		jh = commit_transaction->t_iobuf_list->b_tprev;
 721		bh = jh2bh(jh);
 722		if (buffer_locked(bh)) {
 723			wait_on_buffer(bh);
 724			goto wait_for_iobuf;
 725		}
 726		if (cond_resched())
 727			goto wait_for_iobuf;
 728
 729		if (unlikely(!buffer_uptodate(bh)))
 730			err = -EIO;
 731
 732		clear_buffer_jwrite(bh);
 733
 734		JBUFFER_TRACE(jh, "ph4: unfile after journal write");
 735		jbd2_journal_unfile_buffer(journal, jh);
 736
 737		/*
 738		 * ->t_iobuf_list should contain only dummy buffer_heads
 739		 * which were created by jbd2_journal_write_metadata_buffer().
 740		 */
 741		BUFFER_TRACE(bh, "dumping temporary bh");
 742		jbd2_journal_put_journal_head(jh);
 743		__brelse(bh);
 744		J_ASSERT_BH(bh, atomic_read(&bh->b_count) == 0);
 745		free_buffer_head(bh);
 746
 747		/* We also have to unlock and free the corresponding
 748                   shadowed buffer */
 749		jh = commit_transaction->t_shadow_list->b_tprev;
 750		bh = jh2bh(jh);
 751		clear_bit(BH_JWrite, &bh->b_state);
 752		J_ASSERT_BH(bh, buffer_jbddirty(bh));
 
 753
 754		/* The metadata is now released for reuse, but we need
 755                   to remember it against this transaction so that when
 756                   we finally commit, we can do any checkpointing
 757                   required. */
 758		JBUFFER_TRACE(jh, "file as BJ_Forget");
 759		jbd2_journal_file_buffer(jh, commit_transaction, BJ_Forget);
 760		/*
 761		 * Wake up any transactions which were waiting for this IO to
 762		 * complete. The barrier must be here so that changes by
 763		 * jbd2_journal_file_buffer() take effect before wake_up_bit()
 764		 * does the waitqueue check.
 765		 */
 766		smp_mb();
 767		wake_up_bit(&bh->b_state, BH_Unshadow);
 768		JBUFFER_TRACE(jh, "brelse shadowed buffer");
 769		__brelse(bh);
 770	}
 771
 772	J_ASSERT (commit_transaction->t_shadow_list == NULL);
 773
 774	jbd_debug(3, "JBD: commit phase 4\n");
 775
 776	/* Here we wait for the revoke record and descriptor record buffers */
 777 wait_for_ctlbuf:
 778	while (commit_transaction->t_log_list != NULL) {
 779		struct buffer_head *bh;
 780
 781		jh = commit_transaction->t_log_list->b_tprev;
 782		bh = jh2bh(jh);
 783		if (buffer_locked(bh)) {
 784			wait_on_buffer(bh);
 785			goto wait_for_ctlbuf;
 786		}
 787		if (cond_resched())
 788			goto wait_for_ctlbuf;
 789
 790		if (unlikely(!buffer_uptodate(bh)))
 791			err = -EIO;
 792
 793		BUFFER_TRACE(bh, "ph5: control buffer writeout done: unfile");
 794		clear_buffer_jwrite(bh);
 795		jbd2_journal_unfile_buffer(journal, jh);
 796		jbd2_journal_put_journal_head(jh);
 797		__brelse(bh);		/* One for getblk */
 798		/* AKPM: bforget here */
 799	}
 800
 801	if (err)
 802		jbd2_journal_abort(journal, err);
 803
 804	jbd_debug(3, "JBD: commit phase 5\n");
 805	write_lock(&journal->j_state_lock);
 806	J_ASSERT(commit_transaction->t_state == T_COMMIT_DFLUSH);
 807	commit_transaction->t_state = T_COMMIT_JFLUSH;
 808	write_unlock(&journal->j_state_lock);
 809
 810	if (!JBD2_HAS_INCOMPAT_FEATURE(journal,
 811				       JBD2_FEATURE_INCOMPAT_ASYNC_COMMIT)) {
 812		err = journal_submit_commit_record(journal, commit_transaction,
 813						&cbh, crc32_sum);
 814		if (err)
 815			__jbd2_journal_abort_hard(journal);
 816	}
 817	if (cbh)
 818		err = journal_wait_on_commit_record(journal, cbh);
 819	if (JBD2_HAS_INCOMPAT_FEATURE(journal,
 820				      JBD2_FEATURE_INCOMPAT_ASYNC_COMMIT) &&
 821	    journal->j_flags & JBD2_BARRIER) {
 822		blkdev_issue_flush(journal->j_dev, GFP_KERNEL, NULL);
 823	}
 824
 825	if (err)
 826		jbd2_journal_abort(journal, err);
 827
 
 
 
 
 
 
 
 
 
 
 
 828	/* End of a transaction!  Finally, we can do checkpoint
 829           processing: any buffers committed as a result of this
 830           transaction can be removed from any checkpoint list it was on
 831           before. */
 832
 833	jbd_debug(3, "JBD: commit phase 6\n");
 834
 835	J_ASSERT(list_empty(&commit_transaction->t_inode_list));
 836	J_ASSERT(commit_transaction->t_buffers == NULL);
 837	J_ASSERT(commit_transaction->t_checkpoint_list == NULL);
 838	J_ASSERT(commit_transaction->t_iobuf_list == NULL);
 839	J_ASSERT(commit_transaction->t_shadow_list == NULL);
 840	J_ASSERT(commit_transaction->t_log_list == NULL);
 841
 842restart_loop:
 843	/*
 844	 * As there are other places (journal_unmap_buffer()) adding buffers
 845	 * to this list we have to be careful and hold the j_list_lock.
 846	 */
 847	spin_lock(&journal->j_list_lock);
 848	while (commit_transaction->t_forget) {
 849		transaction_t *cp_transaction;
 850		struct buffer_head *bh;
 851		int try_to_free = 0;
 
 852
 853		jh = commit_transaction->t_forget;
 854		spin_unlock(&journal->j_list_lock);
 855		bh = jh2bh(jh);
 856		/*
 857		 * Get a reference so that bh cannot be freed before we are
 858		 * done with it.
 859		 */
 860		get_bh(bh);
 861		jbd_lock_bh_state(bh);
 862		J_ASSERT_JH(jh,	jh->b_transaction == commit_transaction);
 863
 864		/*
 865		 * If there is undo-protected committed data against
 866		 * this buffer, then we can remove it now.  If it is a
 867		 * buffer needing such protection, the old frozen_data
 868		 * field now points to a committed version of the
 869		 * buffer, so rotate that field to the new committed
 870		 * data.
 871		 *
 872		 * Otherwise, we can just throw away the frozen data now.
 873		 *
 874		 * We also know that the frozen data has already fired
 875		 * its triggers if they exist, so we can clear that too.
 876		 */
 877		if (jh->b_committed_data) {
 878			jbd2_free(jh->b_committed_data, bh->b_size);
 879			jh->b_committed_data = NULL;
 880			if (jh->b_frozen_data) {
 881				jh->b_committed_data = jh->b_frozen_data;
 882				jh->b_frozen_data = NULL;
 883				jh->b_frozen_triggers = NULL;
 884			}
 885		} else if (jh->b_frozen_data) {
 886			jbd2_free(jh->b_frozen_data, bh->b_size);
 887			jh->b_frozen_data = NULL;
 888			jh->b_frozen_triggers = NULL;
 889		}
 890
 891		spin_lock(&journal->j_list_lock);
 892		cp_transaction = jh->b_cp_transaction;
 893		if (cp_transaction) {
 894			JBUFFER_TRACE(jh, "remove from old cp transaction");
 895			cp_transaction->t_chp_stats.cs_dropped++;
 896			__jbd2_journal_remove_checkpoint(jh);
 897		}
 898
 899		/* Only re-checkpoint the buffer_head if it is marked
 900		 * dirty.  If the buffer was added to the BJ_Forget list
 901		 * by jbd2_journal_forget, it may no longer be dirty and
 902		 * there's no point in keeping a checkpoint record for
 903		 * it. */
 904
 905		/* A buffer which has been freed while still being
 906		 * journaled by a previous transaction may end up still
 907		 * being dirty here, but we want to avoid writing back
 908		 * that buffer in the future after the "add to orphan"
 909		 * operation been committed,  That's not only a performance
 910		 * gain, it also stops aliasing problems if the buffer is
 911		 * left behind for writeback and gets reallocated for another
 912		 * use in a different page. */
 
 913		if (buffer_freed(bh) && !jh->b_next_transaction) {
 
 
 914			clear_buffer_freed(bh);
 915			clear_buffer_jbddirty(bh);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 916		}
 917
 918		if (buffer_jbddirty(bh)) {
 919			JBUFFER_TRACE(jh, "add to new checkpointing trans");
 920			__jbd2_journal_insert_checkpoint(jh, commit_transaction);
 921			if (is_journal_aborted(journal))
 922				clear_buffer_jbddirty(bh);
 923		} else {
 924			J_ASSERT_BH(bh, !buffer_dirty(bh));
 925			/*
 926			 * The buffer on BJ_Forget list and not jbddirty means
 927			 * it has been freed by this transaction and hence it
 928			 * could not have been reallocated until this
 929			 * transaction has committed. *BUT* it could be
 930			 * reallocated once we have written all the data to
 931			 * disk and before we process the buffer on BJ_Forget
 932			 * list.
 933			 */
 934			if (!jh->b_next_transaction)
 935				try_to_free = 1;
 936		}
 937		JBUFFER_TRACE(jh, "refile or unfile buffer");
 938		__jbd2_journal_refile_buffer(jh);
 939		jbd_unlock_bh_state(bh);
 
 
 940		if (try_to_free)
 941			release_buffer_page(bh);	/* Drops bh reference */
 942		else
 943			__brelse(bh);
 944		cond_resched_lock(&journal->j_list_lock);
 945	}
 946	spin_unlock(&journal->j_list_lock);
 947	/*
 948	 * This is a bit sleazy.  We use j_list_lock to protect transition
 949	 * of a transaction into T_FINISHED state and calling
 950	 * __jbd2_journal_drop_transaction(). Otherwise we could race with
 951	 * other checkpointing code processing the transaction...
 952	 */
 953	write_lock(&journal->j_state_lock);
 954	spin_lock(&journal->j_list_lock);
 955	/*
 956	 * Now recheck if some buffers did not get attached to the transaction
 957	 * while the lock was dropped...
 958	 */
 959	if (commit_transaction->t_forget) {
 960		spin_unlock(&journal->j_list_lock);
 961		write_unlock(&journal->j_state_lock);
 962		goto restart_loop;
 963	}
 964
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 965	/* Done with this transaction! */
 966
 967	jbd_debug(3, "JBD: commit phase 7\n");
 968
 969	J_ASSERT(commit_transaction->t_state == T_COMMIT_JFLUSH);
 970
 971	commit_transaction->t_start = jiffies;
 972	stats.run.rs_logging = jbd2_time_diff(stats.run.rs_logging,
 973					      commit_transaction->t_start);
 974
 975	/*
 976	 * File the transaction statistics
 977	 */
 978	stats.ts_tid = commit_transaction->t_tid;
 979	stats.run.rs_handle_count =
 980		atomic_read(&commit_transaction->t_handle_count);
 981	trace_jbd2_run_stats(journal->j_fs_dev->bd_dev,
 982			     commit_transaction->t_tid, &stats.run);
 
 983
 984	/*
 985	 * Calculate overall stats
 986	 */
 987	spin_lock(&journal->j_history_lock);
 988	journal->j_stats.ts_tid++;
 989	journal->j_stats.run.rs_wait += stats.run.rs_wait;
 990	journal->j_stats.run.rs_running += stats.run.rs_running;
 991	journal->j_stats.run.rs_locked += stats.run.rs_locked;
 992	journal->j_stats.run.rs_flushing += stats.run.rs_flushing;
 993	journal->j_stats.run.rs_logging += stats.run.rs_logging;
 994	journal->j_stats.run.rs_handle_count += stats.run.rs_handle_count;
 995	journal->j_stats.run.rs_blocks += stats.run.rs_blocks;
 996	journal->j_stats.run.rs_blocks_logged += stats.run.rs_blocks_logged;
 997	spin_unlock(&journal->j_history_lock);
 998
 999	commit_transaction->t_state = T_FINISHED;
1000	J_ASSERT(commit_transaction == journal->j_committing_transaction);
1001	journal->j_commit_sequence = commit_transaction->t_tid;
1002	journal->j_committing_transaction = NULL;
1003	commit_time = ktime_to_ns(ktime_sub(ktime_get(), start_time));
1004
1005	/*
1006	 * weight the commit time higher than the average time so we don't
1007	 * react too strongly to vast changes in the commit time
1008	 */
1009	if (likely(journal->j_average_commit_time))
1010		journal->j_average_commit_time = (commit_time +
1011				journal->j_average_commit_time*3) / 4;
1012	else
1013		journal->j_average_commit_time = commit_time;
1014	write_unlock(&journal->j_state_lock);
1015
1016	if (commit_transaction->t_checkpoint_list == NULL &&
1017	    commit_transaction->t_checkpoint_io_list == NULL) {
1018		__jbd2_journal_drop_transaction(journal, commit_transaction);
1019		to_free = 1;
1020	} else {
1021		if (journal->j_checkpoint_transactions == NULL) {
1022			journal->j_checkpoint_transactions = commit_transaction;
1023			commit_transaction->t_cpnext = commit_transaction;
1024			commit_transaction->t_cpprev = commit_transaction;
1025		} else {
1026			commit_transaction->t_cpnext =
1027				journal->j_checkpoint_transactions;
1028			commit_transaction->t_cpprev =
1029				commit_transaction->t_cpnext->t_cpprev;
1030			commit_transaction->t_cpnext->t_cpprev =
1031				commit_transaction;
1032			commit_transaction->t_cpprev->t_cpnext =
1033				commit_transaction;
1034		}
1035	}
1036	spin_unlock(&journal->j_list_lock);
1037
1038	if (journal->j_commit_callback)
1039		journal->j_commit_callback(journal, commit_transaction);
 
 
1040
1041	trace_jbd2_end_commit(journal, commit_transaction);
1042	jbd_debug(1, "JBD: commit %d complete, head %d\n",
1043		  journal->j_commit_sequence, journal->j_tail_sequence);
1044	if (to_free)
1045		kfree(commit_transaction);
1046
 
 
 
 
 
 
 
 
 
 
 
 
1047	wake_up(&journal->j_wait_done_commit);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1048}
v6.13.7
   1// SPDX-License-Identifier: GPL-2.0+
   2/*
   3 * linux/fs/jbd2/commit.c
   4 *
   5 * Written by Stephen C. Tweedie <sct@redhat.com>, 1998
   6 *
   7 * Copyright 1998 Red Hat corp --- All Rights Reserved
   8 *
 
 
 
 
   9 * Journal commit routines for the generic filesystem journaling code;
  10 * part of the ext2fs journaling system.
  11 */
  12
  13#include <linux/time.h>
  14#include <linux/fs.h>
  15#include <linux/jbd2.h>
  16#include <linux/errno.h>
  17#include <linux/slab.h>
  18#include <linux/mm.h>
  19#include <linux/pagemap.h>
  20#include <linux/jiffies.h>
  21#include <linux/crc32.h>
  22#include <linux/writeback.h>
  23#include <linux/backing-dev.h>
  24#include <linux/bio.h>
  25#include <linux/blkdev.h>
  26#include <linux/bitops.h>
  27#include <trace/events/jbd2.h>
 
  28
  29/*
  30 * IO end handler for temporary buffer_heads handling writes to the journal.
  31 */
  32static void journal_end_buffer_io_sync(struct buffer_head *bh, int uptodate)
  33{
  34	struct buffer_head *orig_bh = bh->b_private;
  35
  36	BUFFER_TRACE(bh, "");
  37	if (uptodate)
  38		set_buffer_uptodate(bh);
  39	else
  40		clear_buffer_uptodate(bh);
  41	if (orig_bh) {
  42		clear_bit_unlock(BH_Shadow, &orig_bh->b_state);
  43		smp_mb__after_atomic();
  44		wake_up_bit(&orig_bh->b_state, BH_Shadow);
  45	}
  46	unlock_buffer(bh);
  47}
  48
  49/*
  50 * When an ext4 file is truncated, it is possible that some pages are not
  51 * successfully freed, because they are attached to a committing transaction.
  52 * After the transaction commits, these pages are left on the LRU, with no
  53 * ->mapping, and with attached buffers.  These pages are trivially reclaimable
  54 * by the VM, but their apparent absence upsets the VM accounting, and it makes
  55 * the numbers in /proc/meminfo look odd.
  56 *
  57 * So here, we have a buffer which has just come off the forget list.  Look to
  58 * see if we can strip all buffers from the backing page.
  59 *
  60 * Called under lock_journal(), and possibly under journal_datalist_lock.  The
  61 * caller provided us with a ref against the buffer, and we drop that here.
  62 */
  63static void release_buffer_page(struct buffer_head *bh)
  64{
  65	struct folio *folio;
  66
  67	if (buffer_dirty(bh))
  68		goto nope;
  69	if (atomic_read(&bh->b_count) != 1)
  70		goto nope;
  71	folio = bh->b_folio;
  72	if (folio->mapping)
 
 
  73		goto nope;
  74
  75	/* OK, it's a truncated page */
  76	if (!folio_trylock(folio))
  77		goto nope;
  78
  79	folio_get(folio);
  80	__brelse(bh);
  81	try_to_free_buffers(folio);
  82	folio_unlock(folio);
  83	folio_put(folio);
  84	return;
  85
  86nope:
  87	__brelse(bh);
  88}
  89
  90static void jbd2_commit_block_csum_set(journal_t *j, struct buffer_head *bh)
  91{
  92	struct commit_header *h;
  93	__u32 csum;
  94
  95	if (!jbd2_journal_has_csum_v2or3(j))
  96		return;
  97
  98	h = (struct commit_header *)(bh->b_data);
  99	h->h_chksum_type = 0;
 100	h->h_chksum_size = 0;
 101	h->h_chksum[0] = 0;
 102	csum = jbd2_chksum(j, j->j_csum_seed, bh->b_data, j->j_blocksize);
 103	h->h_chksum[0] = cpu_to_be32(csum);
 104}
 105
 106/*
 107 * Done it all: now submit the commit record.  We should have
 108 * cleaned up our previous buffers by now, so if we are in abort
 109 * mode we can now just skip the rest of the journal write
 110 * entirely.
 111 *
 112 * Returns 1 if the journal needs to be aborted or 0 on success
 113 */
 114static int journal_submit_commit_record(journal_t *journal,
 115					transaction_t *commit_transaction,
 116					struct buffer_head **cbh,
 117					__u32 crc32_sum)
 118{
 
 119	struct commit_header *tmp;
 120	struct buffer_head *bh;
 121	struct timespec64 now;
 122	blk_opf_t write_flags = REQ_OP_WRITE | JBD2_JOURNAL_REQ_FLAGS;
 123
 124	*cbh = NULL;
 125
 126	if (is_journal_aborted(journal))
 127		return 0;
 128
 129	bh = jbd2_journal_get_descriptor_buffer(commit_transaction,
 130						JBD2_COMMIT_BLOCK);
 131	if (!bh)
 132		return 1;
 133
 
 
 134	tmp = (struct commit_header *)bh->b_data;
 135	ktime_get_coarse_real_ts64(&now);
 
 
 136	tmp->h_commit_sec = cpu_to_be64(now.tv_sec);
 137	tmp->h_commit_nsec = cpu_to_be32(now.tv_nsec);
 138
 139	if (jbd2_has_feature_checksum(journal)) {
 
 140		tmp->h_chksum_type 	= JBD2_CRC32_CHKSUM;
 141		tmp->h_chksum_size 	= JBD2_CRC32_CHKSUM_SIZE;
 142		tmp->h_chksum[0] 	= cpu_to_be32(crc32_sum);
 143	}
 144	jbd2_commit_block_csum_set(journal, bh);
 145
 146	BUFFER_TRACE(bh, "submit commit block");
 147	lock_buffer(bh);
 148	clear_buffer_dirty(bh);
 149	set_buffer_uptodate(bh);
 150	bh->b_end_io = journal_end_buffer_io_sync;
 151
 152	if (journal->j_flags & JBD2_BARRIER &&
 153	    !jbd2_has_feature_async_commit(journal))
 154		write_flags |= REQ_PREFLUSH | REQ_FUA;
 
 
 
 155
 156	submit_bh(write_flags, bh);
 157	*cbh = bh;
 158	return 0;
 159}
 160
 161/*
 162 * This function along with journal_submit_commit_record
 163 * allows to write the commit record asynchronously.
 164 */
 165static int journal_wait_on_commit_record(journal_t *journal,
 166					 struct buffer_head *bh)
 167{
 168	int ret = 0;
 169
 170	clear_buffer_dirty(bh);
 171	wait_on_buffer(bh);
 172
 173	if (unlikely(!buffer_uptodate(bh)))
 174		ret = -EIO;
 175	put_bh(bh);            /* One for getblk() */
 
 176
 177	return ret;
 178}
 179
 180/* Send all the data buffers related to an inode */
 181int jbd2_submit_inode_data(journal_t *journal, struct jbd2_inode *jinode)
 
 
 
 
 
 182{
 183	if (!jinode || !(jinode->i_flags & JI_WRITE_DATA))
 184		return 0;
 
 
 
 
 
 185
 186	trace_jbd2_submit_inode_data(jinode->i_vfs_inode);
 187	return journal->j_submit_inode_data_buffers(jinode);
 188
 189}
 190EXPORT_SYMBOL(jbd2_submit_inode_data);
 191
 192int jbd2_wait_inode_data(journal_t *journal, struct jbd2_inode *jinode)
 193{
 194	if (!jinode || !(jinode->i_flags & JI_WAIT_DATA) ||
 195		!jinode->i_vfs_inode || !jinode->i_vfs_inode->i_mapping)
 196		return 0;
 197	return filemap_fdatawait_range_keep_errors(
 198		jinode->i_vfs_inode->i_mapping, jinode->i_dirty_start,
 199		jinode->i_dirty_end);
 200}
 201EXPORT_SYMBOL(jbd2_wait_inode_data);
 202
 203/*
 204 * Submit all the data buffers of inode associated with the transaction to
 205 * disk.
 206 *
 207 * We are in a committing transaction. Therefore no new inode can be added to
 208 * our inode list. We use JI_COMMIT_RUNNING flag to protect inode we currently
 209 * operate on from being released while we write out pages.
 210 */
 211static int journal_submit_data_buffers(journal_t *journal,
 212		transaction_t *commit_transaction)
 213{
 214	struct jbd2_inode *jinode;
 215	int err, ret = 0;
 
 216
 217	spin_lock(&journal->j_list_lock);
 218	list_for_each_entry(jinode, &commit_transaction->t_inode_list, i_list) {
 219		if (!(jinode->i_flags & JI_WRITE_DATA))
 220			continue;
 221		jinode->i_flags |= JI_COMMIT_RUNNING;
 222		spin_unlock(&journal->j_list_lock);
 223		/* submit the inode data buffers. */
 
 
 
 
 
 224		trace_jbd2_submit_inode_data(jinode->i_vfs_inode);
 225		if (journal->j_submit_inode_data_buffers) {
 226			err = journal->j_submit_inode_data_buffers(jinode);
 227			if (!ret)
 228				ret = err;
 229		}
 230		spin_lock(&journal->j_list_lock);
 231		J_ASSERT(jinode->i_transaction == commit_transaction);
 232		jinode->i_flags &= ~JI_COMMIT_RUNNING;
 233		smp_mb();
 234		wake_up_bit(&jinode->i_flags, __JI_COMMIT_RUNNING);
 235	}
 236	spin_unlock(&journal->j_list_lock);
 237	return ret;
 238}
 239
 240int jbd2_journal_finish_inode_data_buffers(struct jbd2_inode *jinode)
 241{
 242	struct address_space *mapping = jinode->i_vfs_inode->i_mapping;
 243
 244	return filemap_fdatawait_range_keep_errors(mapping,
 245						   jinode->i_dirty_start,
 246						   jinode->i_dirty_end);
 247}
 248
 249/*
 250 * Wait for data submitted for writeout, refile inodes to proper
 251 * transaction if needed.
 252 *
 253 */
 254static int journal_finish_inode_data_buffers(journal_t *journal,
 255		transaction_t *commit_transaction)
 256{
 257	struct jbd2_inode *jinode, *next_i;
 258	int err, ret = 0;
 259
 260	/* For locking, see the comment in journal_submit_data_buffers() */
 261	spin_lock(&journal->j_list_lock);
 262	list_for_each_entry(jinode, &commit_transaction->t_inode_list, i_list) {
 263		if (!(jinode->i_flags & JI_WAIT_DATA))
 264			continue;
 265		jinode->i_flags |= JI_COMMIT_RUNNING;
 266		spin_unlock(&journal->j_list_lock);
 267		/* wait for the inode data buffers writeout. */
 268		if (journal->j_finish_inode_data_buffers) {
 269			err = journal->j_finish_inode_data_buffers(jinode);
 
 
 
 
 
 
 
 270			if (!ret)
 271				ret = err;
 272		}
 273		cond_resched();
 274		spin_lock(&journal->j_list_lock);
 275		jinode->i_flags &= ~JI_COMMIT_RUNNING;
 276		smp_mb();
 277		wake_up_bit(&jinode->i_flags, __JI_COMMIT_RUNNING);
 278	}
 279
 280	/* Now refile inode to proper lists */
 281	list_for_each_entry_safe(jinode, next_i,
 282				 &commit_transaction->t_inode_list, i_list) {
 283		list_del(&jinode->i_list);
 284		if (jinode->i_next_transaction) {
 285			jinode->i_transaction = jinode->i_next_transaction;
 286			jinode->i_next_transaction = NULL;
 287			list_add(&jinode->i_list,
 288				&jinode->i_transaction->t_inode_list);
 289		} else {
 290			jinode->i_transaction = NULL;
 291			jinode->i_dirty_start = 0;
 292			jinode->i_dirty_end = 0;
 293		}
 294	}
 295	spin_unlock(&journal->j_list_lock);
 296
 297	return ret;
 298}
 299
 300static __u32 jbd2_checksum_data(__u32 crc32_sum, struct buffer_head *bh)
 301{
 
 302	char *addr;
 303	__u32 checksum;
 304
 305	addr = kmap_local_folio(bh->b_folio, bh_offset(bh));
 306	checksum = crc32_be(crc32_sum, addr, bh->b_size);
 307	kunmap_local(addr);
 
 308
 309	return checksum;
 310}
 311
 312static void write_tag_block(journal_t *j, journal_block_tag_t *tag,
 313				   unsigned long long block)
 314{
 315	tag->t_blocknr = cpu_to_be32(block & (u32)~0);
 316	if (jbd2_has_feature_64bit(j))
 317		tag->t_blocknr_high = cpu_to_be32((block >> 31) >> 1);
 318}
 319
 320static void jbd2_block_tag_csum_set(journal_t *j, journal_block_tag_t *tag,
 321				    struct buffer_head *bh, __u32 sequence)
 322{
 323	journal_block_tag3_t *tag3 = (journal_block_tag3_t *)tag;
 324	__u8 *addr;
 325	__u32 csum32;
 326	__be32 seq;
 327
 328	if (!jbd2_journal_has_csum_v2or3(j))
 329		return;
 330
 331	seq = cpu_to_be32(sequence);
 332	addr = kmap_local_folio(bh->b_folio, bh_offset(bh));
 333	csum32 = jbd2_chksum(j, j->j_csum_seed, (__u8 *)&seq, sizeof(seq));
 334	csum32 = jbd2_chksum(j, csum32, addr, bh->b_size);
 335	kunmap_local(addr);
 336
 337	if (jbd2_has_feature_csum3(j))
 338		tag3->t_checksum = cpu_to_be32(csum32);
 339	else
 340		tag->t_checksum = cpu_to_be16(csum32);
 341}
 342/*
 343 * jbd2_journal_commit_transaction
 344 *
 345 * The primary function for committing a transaction to the log.  This
 346 * function is called by the journal thread to begin a complete commit.
 347 */
 348void jbd2_journal_commit_transaction(journal_t *journal)
 349{
 350	struct transaction_stats_s stats;
 351	transaction_t *commit_transaction;
 352	struct journal_head *jh;
 353	struct buffer_head *descriptor;
 354	struct buffer_head **wbuf = journal->j_wbuf;
 355	int bufs;
 356	int escape;
 357	int err;
 358	unsigned long long blocknr;
 359	ktime_t start_time;
 360	u64 commit_time;
 361	char *tagp = NULL;
 
 362	journal_block_tag_t *tag = NULL;
 363	int space_left = 0;
 364	int first_tag = 0;
 365	int tag_flag;
 366	int i;
 367	int tag_bytes = journal_tag_bytes(journal);
 368	struct buffer_head *cbh = NULL; /* For transactional checksums */
 369	__u32 crc32_sum = ~0;
 370	struct blk_plug plug;
 371	/* Tail of the journal */
 372	unsigned long first_block;
 373	tid_t first_tid;
 374	int update_tail;
 375	int csum_size = 0;
 376	LIST_HEAD(io_bufs);
 377	LIST_HEAD(log_bufs);
 378
 379	if (jbd2_journal_has_csum_v2or3(journal))
 380		csum_size = sizeof(struct jbd2_journal_block_tail);
 381
 382	/*
 383	 * First job: lock down the current transaction and wait for
 384	 * all outstanding updates to complete.
 385	 */
 386
 387	/* Do we need to erase the effects of a prior jbd2_journal_flush? */
 388	if (journal->j_flags & JBD2_FLUSHED) {
 389		jbd2_debug(3, "super block updated\n");
 390		mutex_lock_io(&journal->j_checkpoint_mutex);
 391		/*
 392		 * We hold j_checkpoint_mutex so tail cannot change under us.
 393		 * We don't need any special data guarantees for writing sb
 394		 * since journal is empty and it is ok for write to be
 395		 * flushed only with transaction commit.
 396		 */
 397		jbd2_journal_update_sb_log_tail(journal,
 398						journal->j_tail_sequence,
 399						journal->j_tail, 0);
 400		mutex_unlock(&journal->j_checkpoint_mutex);
 401	} else {
 402		jbd2_debug(3, "superblock not updated\n");
 403	}
 404
 405	J_ASSERT(journal->j_running_transaction != NULL);
 406	J_ASSERT(journal->j_committing_transaction == NULL);
 407
 408	write_lock(&journal->j_state_lock);
 409	journal->j_flags |= JBD2_FULL_COMMIT_ONGOING;
 410	while (journal->j_flags & JBD2_FAST_COMMIT_ONGOING) {
 411		DEFINE_WAIT(wait);
 412
 413		prepare_to_wait(&journal->j_fc_wait, &wait,
 414				TASK_UNINTERRUPTIBLE);
 415		write_unlock(&journal->j_state_lock);
 416		schedule();
 417		write_lock(&journal->j_state_lock);
 418		finish_wait(&journal->j_fc_wait, &wait);
 419		/*
 420		 * TODO: by blocking fast commits here, we are increasing
 421		 * fsync() latency slightly. Strictly speaking, we don't need
 422		 * to block fast commits until the transaction enters T_FLUSH
 423		 * state. So an optimization is possible where we block new fast
 424		 * commits here and wait for existing ones to complete
 425		 * just before we enter T_FLUSH. That way, the existing fast
 426		 * commits and this full commit can proceed parallely.
 427		 */
 428	}
 429	write_unlock(&journal->j_state_lock);
 430
 431	commit_transaction = journal->j_running_transaction;
 
 432
 433	trace_jbd2_start_commit(journal, commit_transaction);
 434	jbd2_debug(1, "JBD2: starting commit of transaction %d\n",
 435			commit_transaction->t_tid);
 436
 437	write_lock(&journal->j_state_lock);
 438	journal->j_fc_off = 0;
 439	J_ASSERT(commit_transaction->t_state == T_RUNNING);
 440	commit_transaction->t_state = T_LOCKED;
 441
 442	trace_jbd2_commit_locking(journal, commit_transaction);
 443	stats.run.rs_wait = commit_transaction->t_max_wait;
 444	stats.run.rs_request_delay = 0;
 445	stats.run.rs_locked = jiffies;
 446	if (commit_transaction->t_requested)
 447		stats.run.rs_request_delay =
 448			jbd2_time_diff(commit_transaction->t_requested,
 449				       stats.run.rs_locked);
 450	stats.run.rs_running = jbd2_time_diff(commit_transaction->t_start,
 451					      stats.run.rs_locked);
 452
 453	// waits for any t_updates to finish
 454	jbd2_journal_wait_updates(journal);
 
 455
 456	commit_transaction->t_state = T_SWITCH;
 
 
 
 
 
 
 
 
 
 
 
 457
 458	J_ASSERT (atomic_read(&commit_transaction->t_outstanding_credits) <=
 459			journal->j_max_transaction_buffers);
 460
 461	/*
 462	 * First thing we are allowed to do is to discard any remaining
 463	 * BJ_Reserved buffers.  Note, it is _not_ permissible to assume
 464	 * that there are no such buffers: if a large filesystem
 465	 * operation like a truncate needs to split itself over multiple
 466	 * transactions, then it may try to do a jbd2_journal_restart() while
 467	 * there are still BJ_Reserved buffers outstanding.  These must
 468	 * be released cleanly from the current transaction.
 469	 *
 470	 * In this case, the filesystem must still reserve write access
 471	 * again before modifying the buffer in the new transaction, but
 472	 * we do not require it to remember exactly which old buffers it
 473	 * has reserved.  This is consistent with the existing behaviour
 474	 * that multiple jbd2_journal_get_write_access() calls to the same
 475	 * buffer are perfectly permissible.
 476	 * We use journal->j_state_lock here to serialize processing of
 477	 * t_reserved_list with eviction of buffers from journal_unmap_buffer().
 478	 */
 479	while (commit_transaction->t_reserved_list) {
 480		jh = commit_transaction->t_reserved_list;
 481		JBUFFER_TRACE(jh, "reserved, unused: refile");
 482		/*
 483		 * A jbd2_journal_get_undo_access()+jbd2_journal_release_buffer() may
 484		 * leave undo-committed data.
 485		 */
 486		if (jh->b_committed_data) {
 487			struct buffer_head *bh = jh2bh(jh);
 488
 489			spin_lock(&jh->b_state_lock);
 490			jbd2_free(jh->b_committed_data, bh->b_size);
 491			jh->b_committed_data = NULL;
 492			spin_unlock(&jh->b_state_lock);
 493		}
 494		jbd2_journal_refile_buffer(journal, jh);
 495	}
 496
 497	write_unlock(&journal->j_state_lock);
 498	/*
 499	 * Now try to drop any written-back buffers from the journal's
 500	 * checkpoint lists.  We do this *before* commit because it potentially
 501	 * frees some memory
 502	 */
 503	spin_lock(&journal->j_list_lock);
 504	__jbd2_journal_clean_checkpoint_list(journal, JBD2_SHRINK_BUSY_STOP);
 505	spin_unlock(&journal->j_list_lock);
 506
 507	jbd2_debug(3, "JBD2: commit phase 1\n");
 508
 509	/*
 510	 * Clear revoked flag to reflect there is no revoked buffers
 511	 * in the next transaction which is going to be started.
 512	 */
 513	jbd2_clear_buffer_revoked_flags(journal);
 514
 515	/*
 516	 * Switch to a new revoke table.
 517	 */
 518	jbd2_journal_switch_revoke_table(journal);
 519
 520	write_lock(&journal->j_state_lock);
 521	/*
 522	 * Reserved credits cannot be claimed anymore, free them
 523	 */
 524	atomic_sub(atomic_read(&journal->j_reserved_credits),
 525		   &commit_transaction->t_outstanding_credits);
 526
 527	trace_jbd2_commit_flushing(journal, commit_transaction);
 528	stats.run.rs_flushing = jiffies;
 529	stats.run.rs_locked = jbd2_time_diff(stats.run.rs_locked,
 530					     stats.run.rs_flushing);
 531
 532	commit_transaction->t_state = T_FLUSH;
 533	journal->j_committing_transaction = commit_transaction;
 534	journal->j_running_transaction = NULL;
 535	start_time = ktime_get();
 536	commit_transaction->t_log_start = journal->j_head;
 537	wake_up_all(&journal->j_wait_transaction_locked);
 538	write_unlock(&journal->j_state_lock);
 539
 540	jbd2_debug(3, "JBD2: commit phase 2a\n");
 541
 542	/*
 543	 * Now start flushing things to disk, in the order they appear
 544	 * on the transaction lists.  Data blocks go first.
 545	 */
 546	err = journal_submit_data_buffers(journal, commit_transaction);
 547	if (err)
 548		jbd2_journal_abort(journal, err);
 549
 550	blk_start_plug(&plug);
 551	jbd2_journal_write_revoke_records(commit_transaction, &log_bufs);
 
 
 552
 553	jbd2_debug(3, "JBD2: commit phase 2b\n");
 554
 555	/*
 556	 * Way to go: we have now written out all of the data for a
 557	 * transaction!  Now comes the tricky part: we need to write out
 558	 * metadata.  Loop over the transaction's entire buffer list:
 559	 */
 560	write_lock(&journal->j_state_lock);
 561	commit_transaction->t_state = T_COMMIT;
 562	write_unlock(&journal->j_state_lock);
 563
 564	trace_jbd2_commit_logging(journal, commit_transaction);
 565	stats.run.rs_logging = jiffies;
 566	stats.run.rs_flushing = jbd2_time_diff(stats.run.rs_flushing,
 567					       stats.run.rs_logging);
 568	stats.run.rs_blocks = commit_transaction->t_nr_buffers;
 
 569	stats.run.rs_blocks_logged = 0;
 570
 571	J_ASSERT(commit_transaction->t_nr_buffers <=
 572		 atomic_read(&commit_transaction->t_outstanding_credits));
 573
 
 
 574	bufs = 0;
 575	descriptor = NULL;
 576	while (commit_transaction->t_buffers) {
 577
 578		/* Find the next buffer to be journaled... */
 579
 580		jh = commit_transaction->t_buffers;
 581
 582		/* If we're in abort mode, we just un-journal the buffer and
 583		   release it. */
 584
 585		if (is_journal_aborted(journal)) {
 586			clear_buffer_jbddirty(jh2bh(jh));
 587			JBUFFER_TRACE(jh, "journal is aborting: refile");
 588			jbd2_buffer_abort_trigger(jh,
 589						  jh->b_frozen_data ?
 590						  jh->b_frozen_triggers :
 591						  jh->b_triggers);
 592			jbd2_journal_refile_buffer(journal, jh);
 593			/* If that was the last one, we need to clean up
 594			 * any descriptor buffers which may have been
 595			 * already allocated, even if we are now
 596			 * aborting. */
 597			if (!commit_transaction->t_buffers)
 598				goto start_journal_io;
 599			continue;
 600		}
 601
 602		/* Make sure we have a descriptor block in which to
 603		   record the metadata buffer. */
 604
 605		if (!descriptor) {
 
 
 606			J_ASSERT (bufs == 0);
 607
 608			jbd2_debug(4, "JBD2: get descriptor\n");
 609
 610			descriptor = jbd2_journal_get_descriptor_buffer(
 611							commit_transaction,
 612							JBD2_DESCRIPTOR_BLOCK);
 613			if (!descriptor) {
 614				jbd2_journal_abort(journal, -EIO);
 615				continue;
 616			}
 617
 618			jbd2_debug(4, "JBD2: got buffer %llu (%p)\n",
 619				(unsigned long long)descriptor->b_blocknr,
 620				descriptor->b_data);
 621			tagp = &descriptor->b_data[sizeof(journal_header_t)];
 622			space_left = descriptor->b_size -
 623						sizeof(journal_header_t);
 
 
 
 
 624			first_tag = 1;
 625			set_buffer_jwrite(descriptor);
 626			set_buffer_dirty(descriptor);
 627			wbuf[bufs++] = descriptor;
 628
 629			/* Record it so that we can wait for IO
 630                           completion later */
 631			BUFFER_TRACE(descriptor, "ph3: file as descriptor");
 632			jbd2_file_log_bh(&log_bufs, descriptor);
 
 633		}
 634
 635		/* Where is the buffer to be written? */
 636
 637		err = jbd2_journal_next_log_block(journal, &blocknr);
 638		/* If the block mapping failed, just abandon the buffer
 639		   and repeat this loop: we'll fall into the
 640		   refile-on-abort condition above. */
 641		if (err) {
 642			jbd2_journal_abort(journal, err);
 643			continue;
 644		}
 645
 646		/*
 647		 * start_this_handle() uses t_outstanding_credits to determine
 648		 * the free space in the log.
 
 649		 */
 650		atomic_dec(&commit_transaction->t_outstanding_credits);
 651
 652		/* Bump b_count to prevent truncate from stumbling over
 653                   the shadowed buffer!  @@@ This can go if we ever get
 654                   rid of the shadow pairing of buffers. */
 655		atomic_inc(&jh2bh(jh)->b_count);
 656
 
 
 
 
 
 657		/*
 658		 * Make a temporary IO buffer with which to write it out
 659		 * (this will requeue the metadata buffer to BJ_Shadow).
 
 
 660		 */
 661		set_bit(BH_JWrite, &jh2bh(jh)->b_state);
 662		JBUFFER_TRACE(jh, "ph3: write metadata");
 663		escape = jbd2_journal_write_metadata_buffer(commit_transaction,
 664						jh, &wbuf[bufs], blocknr);
 665		jbd2_file_log_bh(&io_bufs, wbuf[bufs]);
 
 
 
 
 
 666
 667		/* Record the new block's tag in the current descriptor
 668                   buffer */
 669
 670		tag_flag = 0;
 671		if (escape)
 672			tag_flag |= JBD2_FLAG_ESCAPE;
 673		if (!first_tag)
 674			tag_flag |= JBD2_FLAG_SAME_UUID;
 675
 676		tag = (journal_block_tag_t *) tagp;
 677		write_tag_block(journal, tag, jh2bh(jh)->b_blocknr);
 678		tag->t_flags = cpu_to_be16(tag_flag);
 679		jbd2_block_tag_csum_set(journal, tag, wbuf[bufs],
 680					commit_transaction->t_tid);
 681		tagp += tag_bytes;
 682		space_left -= tag_bytes;
 683		bufs++;
 684
 685		if (first_tag) {
 686			memcpy (tagp, journal->j_uuid, 16);
 687			tagp += 16;
 688			space_left -= 16;
 689			first_tag = 0;
 690		}
 691
 692		/* If there's no more to do, or if the descriptor is full,
 693		   let the IO rip! */
 694
 695		if (bufs == journal->j_wbufsize ||
 696		    commit_transaction->t_buffers == NULL ||
 697		    space_left < tag_bytes + 16 + csum_size) {
 698
 699			jbd2_debug(4, "JBD2: Submit %d IOs\n", bufs);
 700
 701			/* Write an end-of-descriptor marker before
 702                           submitting the IOs.  "tag" still points to
 703                           the last tag we set up. */
 704
 705			tag->t_flags |= cpu_to_be16(JBD2_FLAG_LAST_TAG);
 
 706start_journal_io:
 707			if (descriptor)
 708				jbd2_descriptor_block_csum_set(journal,
 709							descriptor);
 710
 711			for (i = 0; i < bufs; i++) {
 712				struct buffer_head *bh = wbuf[i];
 713
 714				/*
 715				 * Compute checksum.
 716				 */
 717				if (jbd2_has_feature_checksum(journal)) {
 
 718					crc32_sum =
 719					    jbd2_checksum_data(crc32_sum, bh);
 720				}
 721
 722				lock_buffer(bh);
 723				clear_buffer_dirty(bh);
 724				set_buffer_uptodate(bh);
 725				bh->b_end_io = journal_end_buffer_io_sync;
 726				submit_bh(REQ_OP_WRITE | JBD2_JOURNAL_REQ_FLAGS,
 727					  bh);
 728			}
 729			cond_resched();
 
 730
 731			/* Force a new descriptor to be generated next
 732                           time round the loop. */
 733			descriptor = NULL;
 734			bufs = 0;
 735		}
 736	}
 737
 738	err = journal_finish_inode_data_buffers(journal, commit_transaction);
 739	if (err) {
 740		printk(KERN_WARNING
 741			"JBD2: Detected IO errors while flushing file data "
 742		       "on %s\n", journal->j_devname);
 743		if (journal->j_flags & JBD2_ABORT_ON_SYNCDATA_ERR)
 744			jbd2_journal_abort(journal, err);
 745		err = 0;
 746	}
 747
 748	/*
 749	 * Get current oldest transaction in the log before we issue flush
 750	 * to the filesystem device. After the flush we can be sure that
 751	 * blocks of all older transactions are checkpointed to persistent
 752	 * storage and we will be safe to update journal start in the
 753	 * superblock with the numbers we get here.
 754	 */
 755	update_tail =
 756		jbd2_journal_get_log_tail(journal, &first_tid, &first_block);
 757
 758	write_lock(&journal->j_state_lock);
 759	if (update_tail) {
 760		long freed = first_block - journal->j_tail;
 761
 762		if (first_block < journal->j_tail)
 763			freed += journal->j_last - journal->j_first;
 764		/* Update tail only if we free significant amount of space */
 765		if (freed < journal->j_max_transaction_buffers)
 766			update_tail = 0;
 767	}
 768	J_ASSERT(commit_transaction->t_state == T_COMMIT);
 769	commit_transaction->t_state = T_COMMIT_DFLUSH;
 770	write_unlock(&journal->j_state_lock);
 771
 772	/*
 773	 * If the journal is not located on the file system device,
 774	 * then we must flush the file system device before we issue
 775	 * the commit record and update the journal tail sequence.
 776	 */
 777	if ((commit_transaction->t_need_data_flush || update_tail) &&
 778	    (journal->j_fs_dev != journal->j_dev) &&
 779	    (journal->j_flags & JBD2_BARRIER))
 780		blkdev_issue_flush(journal->j_fs_dev);
 781
 782	/* Done it all: now write the commit record asynchronously. */
 783	if (jbd2_has_feature_async_commit(journal)) {
 
 784		err = journal_submit_commit_record(journal, commit_transaction,
 785						 &cbh, crc32_sum);
 786		if (err)
 787			jbd2_journal_abort(journal, err);
 788	}
 789
 790	blk_finish_plug(&plug);
 791
 792	/* Lo and behold: we have just managed to send a transaction to
 793           the log.  Before we can commit it, wait for the IO so far to
 794           complete.  Control buffers being written are on the
 795           transaction's t_log_list queue, and metadata buffers are on
 796           the io_bufs list.
 797
 798	   Wait for the buffers in reverse order.  That way we are
 799	   less likely to be woken up until all IOs have completed, and
 800	   so we incur less scheduling load.
 801	*/
 802
 803	jbd2_debug(3, "JBD2: commit phase 3\n");
 804
 805	while (!list_empty(&io_bufs)) {
 806		struct buffer_head *bh = list_entry(io_bufs.prev,
 807						    struct buffer_head,
 808						    b_assoc_buffers);
 
 
 
 809
 810		wait_on_buffer(bh);
 811		cond_resched();
 
 
 
 
 
 
 812
 813		if (unlikely(!buffer_uptodate(bh)))
 814			err = -EIO;
 815		jbd2_unfile_log_bh(bh);
 816		stats.run.rs_blocks_logged++;
 
 
 
 817
 818		/*
 819		 * The list contains temporary buffer heads created by
 820		 * jbd2_journal_write_metadata_buffer().
 821		 */
 822		BUFFER_TRACE(bh, "dumping temporary bh");
 
 823		__brelse(bh);
 824		J_ASSERT_BH(bh, atomic_read(&bh->b_count) == 0);
 825		free_buffer_head(bh);
 826
 827		/* We also have to refile the corresponding shadowed buffer */
 
 828		jh = commit_transaction->t_shadow_list->b_tprev;
 829		bh = jh2bh(jh);
 830		clear_buffer_jwrite(bh);
 831		J_ASSERT_BH(bh, buffer_jbddirty(bh));
 832		J_ASSERT_BH(bh, !buffer_shadow(bh));
 833
 834		/* The metadata is now released for reuse, but we need
 835                   to remember it against this transaction so that when
 836                   we finally commit, we can do any checkpointing
 837                   required. */
 838		JBUFFER_TRACE(jh, "file as BJ_Forget");
 839		jbd2_journal_file_buffer(jh, commit_transaction, BJ_Forget);
 
 
 
 
 
 
 
 
 840		JBUFFER_TRACE(jh, "brelse shadowed buffer");
 841		__brelse(bh);
 842	}
 843
 844	J_ASSERT (commit_transaction->t_shadow_list == NULL);
 845
 846	jbd2_debug(3, "JBD2: commit phase 4\n");
 847
 848	/* Here we wait for the revoke record and descriptor record buffers */
 849	while (!list_empty(&log_bufs)) {
 
 850		struct buffer_head *bh;
 851
 852		bh = list_entry(log_bufs.prev, struct buffer_head, b_assoc_buffers);
 853		wait_on_buffer(bh);
 854		cond_resched();
 
 
 
 
 
 855
 856		if (unlikely(!buffer_uptodate(bh)))
 857			err = -EIO;
 858
 859		BUFFER_TRACE(bh, "ph5: control buffer writeout done: unfile");
 860		clear_buffer_jwrite(bh);
 861		jbd2_unfile_log_bh(bh);
 862		stats.run.rs_blocks_logged++;
 863		__brelse(bh);		/* One for getblk */
 864		/* AKPM: bforget here */
 865	}
 866
 867	if (err)
 868		jbd2_journal_abort(journal, err);
 869
 870	jbd2_debug(3, "JBD2: commit phase 5\n");
 871	write_lock(&journal->j_state_lock);
 872	J_ASSERT(commit_transaction->t_state == T_COMMIT_DFLUSH);
 873	commit_transaction->t_state = T_COMMIT_JFLUSH;
 874	write_unlock(&journal->j_state_lock);
 875
 876	if (!jbd2_has_feature_async_commit(journal)) {
 
 877		err = journal_submit_commit_record(journal, commit_transaction,
 878						&cbh, crc32_sum);
 879		if (err)
 880			jbd2_journal_abort(journal, err);
 881	}
 882	if (cbh)
 883		err = journal_wait_on_commit_record(journal, cbh);
 884	stats.run.rs_blocks_logged++;
 885	if (jbd2_has_feature_async_commit(journal) &&
 886	    journal->j_flags & JBD2_BARRIER) {
 887		blkdev_issue_flush(journal->j_dev);
 888	}
 889
 890	if (err)
 891		jbd2_journal_abort(journal, err);
 892
 893	WARN_ON_ONCE(
 894		atomic_read(&commit_transaction->t_outstanding_credits) < 0);
 895
 896	/*
 897	 * Now disk caches for filesystem device are flushed so we are safe to
 898	 * erase checkpointed transactions from the log by updating journal
 899	 * superblock.
 900	 */
 901	if (update_tail)
 902		jbd2_update_log_tail(journal, first_tid, first_block);
 903
 904	/* End of a transaction!  Finally, we can do checkpoint
 905           processing: any buffers committed as a result of this
 906           transaction can be removed from any checkpoint list it was on
 907           before. */
 908
 909	jbd2_debug(3, "JBD2: commit phase 6\n");
 910
 911	J_ASSERT(list_empty(&commit_transaction->t_inode_list));
 912	J_ASSERT(commit_transaction->t_buffers == NULL);
 913	J_ASSERT(commit_transaction->t_checkpoint_list == NULL);
 
 914	J_ASSERT(commit_transaction->t_shadow_list == NULL);
 
 915
 916restart_loop:
 917	/*
 918	 * As there are other places (journal_unmap_buffer()) adding buffers
 919	 * to this list we have to be careful and hold the j_list_lock.
 920	 */
 921	spin_lock(&journal->j_list_lock);
 922	while (commit_transaction->t_forget) {
 923		transaction_t *cp_transaction;
 924		struct buffer_head *bh;
 925		int try_to_free = 0;
 926		bool drop_ref;
 927
 928		jh = commit_transaction->t_forget;
 929		spin_unlock(&journal->j_list_lock);
 930		bh = jh2bh(jh);
 931		/*
 932		 * Get a reference so that bh cannot be freed before we are
 933		 * done with it.
 934		 */
 935		get_bh(bh);
 936		spin_lock(&jh->b_state_lock);
 937		J_ASSERT_JH(jh,	jh->b_transaction == commit_transaction);
 938
 939		/*
 940		 * If there is undo-protected committed data against
 941		 * this buffer, then we can remove it now.  If it is a
 942		 * buffer needing such protection, the old frozen_data
 943		 * field now points to a committed version of the
 944		 * buffer, so rotate that field to the new committed
 945		 * data.
 946		 *
 947		 * Otherwise, we can just throw away the frozen data now.
 948		 *
 949		 * We also know that the frozen data has already fired
 950		 * its triggers if they exist, so we can clear that too.
 951		 */
 952		if (jh->b_committed_data) {
 953			jbd2_free(jh->b_committed_data, bh->b_size);
 954			jh->b_committed_data = NULL;
 955			if (jh->b_frozen_data) {
 956				jh->b_committed_data = jh->b_frozen_data;
 957				jh->b_frozen_data = NULL;
 958				jh->b_frozen_triggers = NULL;
 959			}
 960		} else if (jh->b_frozen_data) {
 961			jbd2_free(jh->b_frozen_data, bh->b_size);
 962			jh->b_frozen_data = NULL;
 963			jh->b_frozen_triggers = NULL;
 964		}
 965
 966		spin_lock(&journal->j_list_lock);
 967		cp_transaction = jh->b_cp_transaction;
 968		if (cp_transaction) {
 969			JBUFFER_TRACE(jh, "remove from old cp transaction");
 970			cp_transaction->t_chp_stats.cs_dropped++;
 971			__jbd2_journal_remove_checkpoint(jh);
 972		}
 973
 974		/* Only re-checkpoint the buffer_head if it is marked
 975		 * dirty.  If the buffer was added to the BJ_Forget list
 976		 * by jbd2_journal_forget, it may no longer be dirty and
 977		 * there's no point in keeping a checkpoint record for
 978		 * it. */
 979
 980		/*
 981		 * A buffer which has been freed while still being journaled
 982		 * by a previous transaction, refile the buffer to BJ_Forget of
 983		 * the running transaction. If the just committed transaction
 984		 * contains "add to orphan" operation, we can completely
 985		 * invalidate the buffer now. We are rather through in that
 986		 * since the buffer may be still accessible when blocksize <
 987		 * pagesize and it is attached to the last partial page.
 988		 */
 989		if (buffer_freed(bh) && !jh->b_next_transaction) {
 990			struct address_space *mapping;
 991
 992			clear_buffer_freed(bh);
 993			clear_buffer_jbddirty(bh);
 994
 995			/*
 996			 * Block device buffers need to stay mapped all the
 997			 * time, so it is enough to clear buffer_jbddirty and
 998			 * buffer_freed bits. For the file mapping buffers (i.e.
 999			 * journalled data) we need to unmap buffer and clear
1000			 * more bits. We also need to be careful about the check
1001			 * because the data page mapping can get cleared under
1002			 * our hands. Note that if mapping == NULL, we don't
1003			 * need to make buffer unmapped because the page is
1004			 * already detached from the mapping and buffers cannot
1005			 * get reused.
1006			 */
1007			mapping = READ_ONCE(bh->b_folio->mapping);
1008			if (mapping && !sb_is_blkdev_sb(mapping->host->i_sb)) {
1009				clear_buffer_mapped(bh);
1010				clear_buffer_new(bh);
1011				clear_buffer_req(bh);
1012				bh->b_bdev = NULL;
1013			}
1014		}
1015
1016		if (buffer_jbddirty(bh)) {
1017			JBUFFER_TRACE(jh, "add to new checkpointing trans");
1018			__jbd2_journal_insert_checkpoint(jh, commit_transaction);
1019			if (is_journal_aborted(journal))
1020				clear_buffer_jbddirty(bh);
1021		} else {
1022			J_ASSERT_BH(bh, !buffer_dirty(bh));
1023			/*
1024			 * The buffer on BJ_Forget list and not jbddirty means
1025			 * it has been freed by this transaction and hence it
1026			 * could not have been reallocated until this
1027			 * transaction has committed. *BUT* it could be
1028			 * reallocated once we have written all the data to
1029			 * disk and before we process the buffer on BJ_Forget
1030			 * list.
1031			 */
1032			if (!jh->b_next_transaction)
1033				try_to_free = 1;
1034		}
1035		JBUFFER_TRACE(jh, "refile or unfile buffer");
1036		drop_ref = __jbd2_journal_refile_buffer(jh);
1037		spin_unlock(&jh->b_state_lock);
1038		if (drop_ref)
1039			jbd2_journal_put_journal_head(jh);
1040		if (try_to_free)
1041			release_buffer_page(bh);	/* Drops bh reference */
1042		else
1043			__brelse(bh);
1044		cond_resched_lock(&journal->j_list_lock);
1045	}
1046	spin_unlock(&journal->j_list_lock);
1047	/*
1048	 * This is a bit sleazy.  We use j_list_lock to protect transition
1049	 * of a transaction into T_FINISHED state and calling
1050	 * __jbd2_journal_drop_transaction(). Otherwise we could race with
1051	 * other checkpointing code processing the transaction...
1052	 */
1053	write_lock(&journal->j_state_lock);
1054	spin_lock(&journal->j_list_lock);
1055	/*
1056	 * Now recheck if some buffers did not get attached to the transaction
1057	 * while the lock was dropped...
1058	 */
1059	if (commit_transaction->t_forget) {
1060		spin_unlock(&journal->j_list_lock);
1061		write_unlock(&journal->j_state_lock);
1062		goto restart_loop;
1063	}
1064
1065	/* Add the transaction to the checkpoint list
1066	 * __journal_remove_checkpoint() can not destroy transaction
1067	 * under us because it is not marked as T_FINISHED yet */
1068	if (journal->j_checkpoint_transactions == NULL) {
1069		journal->j_checkpoint_transactions = commit_transaction;
1070		commit_transaction->t_cpnext = commit_transaction;
1071		commit_transaction->t_cpprev = commit_transaction;
1072	} else {
1073		commit_transaction->t_cpnext =
1074			journal->j_checkpoint_transactions;
1075		commit_transaction->t_cpprev =
1076			commit_transaction->t_cpnext->t_cpprev;
1077		commit_transaction->t_cpnext->t_cpprev =
1078			commit_transaction;
1079		commit_transaction->t_cpprev->t_cpnext =
1080				commit_transaction;
1081	}
1082	spin_unlock(&journal->j_list_lock);
1083
1084	/* Done with this transaction! */
1085
1086	jbd2_debug(3, "JBD2: commit phase 7\n");
1087
1088	J_ASSERT(commit_transaction->t_state == T_COMMIT_JFLUSH);
1089
1090	commit_transaction->t_start = jiffies;
1091	stats.run.rs_logging = jbd2_time_diff(stats.run.rs_logging,
1092					      commit_transaction->t_start);
1093
1094	/*
1095	 * File the transaction statistics
1096	 */
1097	stats.ts_tid = commit_transaction->t_tid;
1098	stats.run.rs_handle_count =
1099		atomic_read(&commit_transaction->t_handle_count);
1100	trace_jbd2_run_stats(journal->j_fs_dev->bd_dev,
1101			     commit_transaction->t_tid, &stats.run);
1102	stats.ts_requested = (commit_transaction->t_requested) ? 1 : 0;
1103
1104	commit_transaction->t_state = T_COMMIT_CALLBACK;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1105	J_ASSERT(commit_transaction == journal->j_committing_transaction);
1106	WRITE_ONCE(journal->j_commit_sequence, commit_transaction->t_tid);
1107	journal->j_committing_transaction = NULL;
1108	commit_time = ktime_to_ns(ktime_sub(ktime_get(), start_time));
1109
1110	/*
1111	 * weight the commit time higher than the average time so we don't
1112	 * react too strongly to vast changes in the commit time
1113	 */
1114	if (likely(journal->j_average_commit_time))
1115		journal->j_average_commit_time = (commit_time +
1116				journal->j_average_commit_time*3) / 4;
1117	else
1118		journal->j_average_commit_time = commit_time;
 
1119
1120	write_unlock(&journal->j_state_lock);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1121
1122	if (journal->j_commit_callback)
1123		journal->j_commit_callback(journal, commit_transaction);
1124	if (journal->j_fc_cleanup_callback)
1125		journal->j_fc_cleanup_callback(journal, 1, commit_transaction->t_tid);
1126
1127	trace_jbd2_end_commit(journal, commit_transaction);
1128	jbd2_debug(1, "JBD2: commit %d complete, head %d\n",
1129		  journal->j_commit_sequence, journal->j_tail_sequence);
 
 
1130
1131	write_lock(&journal->j_state_lock);
1132	journal->j_flags &= ~JBD2_FULL_COMMIT_ONGOING;
1133	journal->j_flags &= ~JBD2_FAST_COMMIT_ONGOING;
1134	spin_lock(&journal->j_list_lock);
1135	commit_transaction->t_state = T_FINISHED;
1136	/* Check if the transaction can be dropped now that we are finished */
1137	if (commit_transaction->t_checkpoint_list == NULL) {
1138		__jbd2_journal_drop_transaction(journal, commit_transaction);
1139		jbd2_journal_free_transaction(commit_transaction);
1140	}
1141	spin_unlock(&journal->j_list_lock);
1142	write_unlock(&journal->j_state_lock);
1143	wake_up(&journal->j_wait_done_commit);
1144	wake_up(&journal->j_fc_wait);
1145
1146	/*
1147	 * Calculate overall stats
1148	 */
1149	spin_lock(&journal->j_history_lock);
1150	journal->j_stats.ts_tid++;
1151	journal->j_stats.ts_requested += stats.ts_requested;
1152	journal->j_stats.run.rs_wait += stats.run.rs_wait;
1153	journal->j_stats.run.rs_request_delay += stats.run.rs_request_delay;
1154	journal->j_stats.run.rs_running += stats.run.rs_running;
1155	journal->j_stats.run.rs_locked += stats.run.rs_locked;
1156	journal->j_stats.run.rs_flushing += stats.run.rs_flushing;
1157	journal->j_stats.run.rs_logging += stats.run.rs_logging;
1158	journal->j_stats.run.rs_handle_count += stats.run.rs_handle_count;
1159	journal->j_stats.run.rs_blocks += stats.run.rs_blocks;
1160	journal->j_stats.run.rs_blocks_logged += stats.run.rs_blocks_logged;
1161	spin_unlock(&journal->j_history_lock);
1162}