Linux Audio

Check our new training course

Linux debugging, profiling, tracing and performance analysis training

Mar 24-27, 2025, special US time zones
Register
Loading...
v6.2
   1// SPDX-License-Identifier: GPL-2.0+
   2/*
   3 * linux/fs/jbd2/commit.c
   4 *
   5 * Written by Stephen C. Tweedie <sct@redhat.com>, 1998
   6 *
   7 * Copyright 1998 Red Hat corp --- All Rights Reserved
   8 *
 
 
 
 
   9 * Journal commit routines for the generic filesystem journaling code;
  10 * part of the ext2fs journaling system.
  11 */
  12
  13#include <linux/time.h>
  14#include <linux/fs.h>
  15#include <linux/jbd2.h>
  16#include <linux/errno.h>
  17#include <linux/slab.h>
  18#include <linux/mm.h>
  19#include <linux/pagemap.h>
  20#include <linux/jiffies.h>
  21#include <linux/crc32.h>
  22#include <linux/writeback.h>
  23#include <linux/backing-dev.h>
  24#include <linux/bio.h>
  25#include <linux/blkdev.h>
  26#include <linux/bitops.h>
  27#include <trace/events/jbd2.h>
  28
  29/*
  30 * IO end handler for temporary buffer_heads handling writes to the journal.
  31 */
  32static void journal_end_buffer_io_sync(struct buffer_head *bh, int uptodate)
  33{
  34	struct buffer_head *orig_bh = bh->b_private;
  35
  36	BUFFER_TRACE(bh, "");
  37	if (uptodate)
  38		set_buffer_uptodate(bh);
  39	else
  40		clear_buffer_uptodate(bh);
  41	if (orig_bh) {
  42		clear_bit_unlock(BH_Shadow, &orig_bh->b_state);
  43		smp_mb__after_atomic();
  44		wake_up_bit(&orig_bh->b_state, BH_Shadow);
  45	}
  46	unlock_buffer(bh);
  47}
  48
  49/*
  50 * When an ext4 file is truncated, it is possible that some pages are not
  51 * successfully freed, because they are attached to a committing transaction.
  52 * After the transaction commits, these pages are left on the LRU, with no
  53 * ->mapping, and with attached buffers.  These pages are trivially reclaimable
  54 * by the VM, but their apparent absence upsets the VM accounting, and it makes
  55 * the numbers in /proc/meminfo look odd.
  56 *
  57 * So here, we have a buffer which has just come off the forget list.  Look to
  58 * see if we can strip all buffers from the backing page.
  59 *
  60 * Called under lock_journal(), and possibly under journal_datalist_lock.  The
  61 * caller provided us with a ref against the buffer, and we drop that here.
  62 */
  63static void release_buffer_page(struct buffer_head *bh)
  64{
  65	struct folio *folio;
  66	struct page *page;
  67
  68	if (buffer_dirty(bh))
  69		goto nope;
  70	if (atomic_read(&bh->b_count) != 1)
  71		goto nope;
  72	page = bh->b_page;
  73	if (!page)
  74		goto nope;
  75	folio = page_folio(page);
  76	if (folio->mapping)
  77		goto nope;
  78
  79	/* OK, it's a truncated page */
  80	if (!folio_trylock(folio))
  81		goto nope;
  82
  83	folio_get(folio);
  84	__brelse(bh);
  85	try_to_free_buffers(folio);
  86	folio_unlock(folio);
  87	folio_put(folio);
  88	return;
  89
  90nope:
  91	__brelse(bh);
  92}
  93
  94static void jbd2_commit_block_csum_set(journal_t *j, struct buffer_head *bh)
  95{
  96	struct commit_header *h;
  97	__u32 csum;
  98
  99	if (!jbd2_journal_has_csum_v2or3(j))
 100		return;
 101
 102	h = (struct commit_header *)(bh->b_data);
 103	h->h_chksum_type = 0;
 104	h->h_chksum_size = 0;
 105	h->h_chksum[0] = 0;
 106	csum = jbd2_chksum(j, j->j_csum_seed, bh->b_data, j->j_blocksize);
 107	h->h_chksum[0] = cpu_to_be32(csum);
 108}
 109
 110/*
 111 * Done it all: now submit the commit record.  We should have
 112 * cleaned up our previous buffers by now, so if we are in abort
 113 * mode we can now just skip the rest of the journal write
 114 * entirely.
 115 *
 116 * Returns 1 if the journal needs to be aborted or 0 on success
 117 */
 118static int journal_submit_commit_record(journal_t *journal,
 119					transaction_t *commit_transaction,
 120					struct buffer_head **cbh,
 121					__u32 crc32_sum)
 122{
 123	struct commit_header *tmp;
 124	struct buffer_head *bh;
 125	struct timespec64 now;
 126	blk_opf_t write_flags = REQ_OP_WRITE | REQ_SYNC;
 127
 128	*cbh = NULL;
 129
 130	if (is_journal_aborted(journal))
 131		return 0;
 132
 133	bh = jbd2_journal_get_descriptor_buffer(commit_transaction,
 134						JBD2_COMMIT_BLOCK);
 135	if (!bh)
 136		return 1;
 137
 138	tmp = (struct commit_header *)bh->b_data;
 139	ktime_get_coarse_real_ts64(&now);
 140	tmp->h_commit_sec = cpu_to_be64(now.tv_sec);
 141	tmp->h_commit_nsec = cpu_to_be32(now.tv_nsec);
 142
 143	if (jbd2_has_feature_checksum(journal)) {
 144		tmp->h_chksum_type 	= JBD2_CRC32_CHKSUM;
 145		tmp->h_chksum_size 	= JBD2_CRC32_CHKSUM_SIZE;
 146		tmp->h_chksum[0] 	= cpu_to_be32(crc32_sum);
 147	}
 148	jbd2_commit_block_csum_set(journal, bh);
 149
 150	BUFFER_TRACE(bh, "submit commit block");
 151	lock_buffer(bh);
 152	clear_buffer_dirty(bh);
 153	set_buffer_uptodate(bh);
 154	bh->b_end_io = journal_end_buffer_io_sync;
 155
 156	if (journal->j_flags & JBD2_BARRIER &&
 157	    !jbd2_has_feature_async_commit(journal))
 158		write_flags |= REQ_PREFLUSH | REQ_FUA;
 
 
 
 159
 160	submit_bh(write_flags, bh);
 161	*cbh = bh;
 162	return 0;
 163}
 164
 165/*
 166 * This function along with journal_submit_commit_record
 167 * allows to write the commit record asynchronously.
 168 */
 169static int journal_wait_on_commit_record(journal_t *journal,
 170					 struct buffer_head *bh)
 171{
 172	int ret = 0;
 173
 174	clear_buffer_dirty(bh);
 175	wait_on_buffer(bh);
 176
 177	if (unlikely(!buffer_uptodate(bh)))
 178		ret = -EIO;
 179	put_bh(bh);            /* One for getblk() */
 180
 181	return ret;
 182}
 183
 184/*
 185 * write the filemap data using writepage() address_space_operations.
 186 * We don't do block allocation here even for delalloc. We don't
 187 * use writepages() because with delayed allocation we may be doing
 188 * block allocation in writepages().
 189 */
 190int jbd2_journal_submit_inode_data_buffers(struct jbd2_inode *jinode)
 191{
 192	struct address_space *mapping = jinode->i_vfs_inode->i_mapping;
 193	struct writeback_control wbc = {
 194		.sync_mode =  WB_SYNC_ALL,
 195		.nr_to_write = mapping->nrpages * 2,
 196		.range_start = jinode->i_dirty_start,
 197		.range_end = jinode->i_dirty_end,
 198	};
 199
 200	/*
 201	 * submit the inode data buffers. We use writepage
 202	 * instead of writepages. Because writepages can do
 203	 * block allocation with delalloc. We need to write
 204	 * only allocated blocks here.
 205	 */
 206	return generic_writepages(mapping, &wbc);
 207}
 208
 209/* Send all the data buffers related to an inode */
 210int jbd2_submit_inode_data(journal_t *journal, struct jbd2_inode *jinode)
 211{
 212	if (!jinode || !(jinode->i_flags & JI_WRITE_DATA))
 213		return 0;
 214
 215	trace_jbd2_submit_inode_data(jinode->i_vfs_inode);
 216	return journal->j_submit_inode_data_buffers(jinode);
 217
 218}
 219EXPORT_SYMBOL(jbd2_submit_inode_data);
 220
 221int jbd2_wait_inode_data(journal_t *journal, struct jbd2_inode *jinode)
 222{
 223	if (!jinode || !(jinode->i_flags & JI_WAIT_DATA) ||
 224		!jinode->i_vfs_inode || !jinode->i_vfs_inode->i_mapping)
 225		return 0;
 226	return filemap_fdatawait_range_keep_errors(
 227		jinode->i_vfs_inode->i_mapping, jinode->i_dirty_start,
 228		jinode->i_dirty_end);
 229}
 230EXPORT_SYMBOL(jbd2_wait_inode_data);
 231
 232/*
 233 * Submit all the data buffers of inode associated with the transaction to
 234 * disk.
 235 *
 236 * We are in a committing transaction. Therefore no new inode can be added to
 237 * our inode list. We use JI_COMMIT_RUNNING flag to protect inode we currently
 238 * operate on from being released while we write out pages.
 239 */
 240static int journal_submit_data_buffers(journal_t *journal,
 241		transaction_t *commit_transaction)
 242{
 243	struct jbd2_inode *jinode;
 244	int err, ret = 0;
 
 245
 246	spin_lock(&journal->j_list_lock);
 247	list_for_each_entry(jinode, &commit_transaction->t_inode_list, i_list) {
 248		if (!(jinode->i_flags & JI_WRITE_DATA))
 249			continue;
 
 250		jinode->i_flags |= JI_COMMIT_RUNNING;
 251		spin_unlock(&journal->j_list_lock);
 252		/* submit the inode data buffers. */
 
 
 
 
 
 253		trace_jbd2_submit_inode_data(jinode->i_vfs_inode);
 254		if (journal->j_submit_inode_data_buffers) {
 255			err = journal->j_submit_inode_data_buffers(jinode);
 256			if (!ret)
 257				ret = err;
 258		}
 259		spin_lock(&journal->j_list_lock);
 260		J_ASSERT(jinode->i_transaction == commit_transaction);
 261		jinode->i_flags &= ~JI_COMMIT_RUNNING;
 262		smp_mb();
 263		wake_up_bit(&jinode->i_flags, __JI_COMMIT_RUNNING);
 264	}
 265	spin_unlock(&journal->j_list_lock);
 266	return ret;
 267}
 268
 269int jbd2_journal_finish_inode_data_buffers(struct jbd2_inode *jinode)
 270{
 271	struct address_space *mapping = jinode->i_vfs_inode->i_mapping;
 272
 273	return filemap_fdatawait_range_keep_errors(mapping,
 274						   jinode->i_dirty_start,
 275						   jinode->i_dirty_end);
 276}
 277
 278/*
 279 * Wait for data submitted for writeout, refile inodes to proper
 280 * transaction if needed.
 281 *
 282 */
 283static int journal_finish_inode_data_buffers(journal_t *journal,
 284		transaction_t *commit_transaction)
 285{
 286	struct jbd2_inode *jinode, *next_i;
 287	int err, ret = 0;
 288
 289	/* For locking, see the comment in journal_submit_data_buffers() */
 290	spin_lock(&journal->j_list_lock);
 291	list_for_each_entry(jinode, &commit_transaction->t_inode_list, i_list) {
 292		if (!(jinode->i_flags & JI_WAIT_DATA))
 293			continue;
 294		jinode->i_flags |= JI_COMMIT_RUNNING;
 295		spin_unlock(&journal->j_list_lock);
 296		/* wait for the inode data buffers writeout. */
 297		if (journal->j_finish_inode_data_buffers) {
 298			err = journal->j_finish_inode_data_buffers(jinode);
 
 
 
 
 
 
 299			if (!ret)
 300				ret = err;
 301		}
 302		spin_lock(&journal->j_list_lock);
 303		jinode->i_flags &= ~JI_COMMIT_RUNNING;
 304		smp_mb();
 305		wake_up_bit(&jinode->i_flags, __JI_COMMIT_RUNNING);
 306	}
 307
 308	/* Now refile inode to proper lists */
 309	list_for_each_entry_safe(jinode, next_i,
 310				 &commit_transaction->t_inode_list, i_list) {
 311		list_del(&jinode->i_list);
 312		if (jinode->i_next_transaction) {
 313			jinode->i_transaction = jinode->i_next_transaction;
 314			jinode->i_next_transaction = NULL;
 315			list_add(&jinode->i_list,
 316				&jinode->i_transaction->t_inode_list);
 317		} else {
 318			jinode->i_transaction = NULL;
 319			jinode->i_dirty_start = 0;
 320			jinode->i_dirty_end = 0;
 321		}
 322	}
 323	spin_unlock(&journal->j_list_lock);
 324
 325	return ret;
 326}
 327
 328static __u32 jbd2_checksum_data(__u32 crc32_sum, struct buffer_head *bh)
 329{
 330	struct page *page = bh->b_page;
 331	char *addr;
 332	__u32 checksum;
 333
 334	addr = kmap_atomic(page);
 335	checksum = crc32_be(crc32_sum,
 336		(void *)(addr + offset_in_page(bh->b_data)), bh->b_size);
 337	kunmap_atomic(addr);
 338
 339	return checksum;
 340}
 341
 342static void write_tag_block(journal_t *j, journal_block_tag_t *tag,
 343				   unsigned long long block)
 344{
 345	tag->t_blocknr = cpu_to_be32(block & (u32)~0);
 346	if (jbd2_has_feature_64bit(j))
 347		tag->t_blocknr_high = cpu_to_be32((block >> 31) >> 1);
 348}
 349
 350static void jbd2_block_tag_csum_set(journal_t *j, journal_block_tag_t *tag,
 351				    struct buffer_head *bh, __u32 sequence)
 352{
 353	journal_block_tag3_t *tag3 = (journal_block_tag3_t *)tag;
 354	struct page *page = bh->b_page;
 355	__u8 *addr;
 356	__u32 csum32;
 357	__be32 seq;
 358
 359	if (!jbd2_journal_has_csum_v2or3(j))
 360		return;
 361
 362	seq = cpu_to_be32(sequence);
 363	addr = kmap_atomic(page);
 364	csum32 = jbd2_chksum(j, j->j_csum_seed, (__u8 *)&seq, sizeof(seq));
 365	csum32 = jbd2_chksum(j, csum32, addr + offset_in_page(bh->b_data),
 366			     bh->b_size);
 367	kunmap_atomic(addr);
 368
 369	if (jbd2_has_feature_csum3(j))
 370		tag3->t_checksum = cpu_to_be32(csum32);
 371	else
 372		tag->t_checksum = cpu_to_be16(csum32);
 373}
 374/*
 375 * jbd2_journal_commit_transaction
 376 *
 377 * The primary function for committing a transaction to the log.  This
 378 * function is called by the journal thread to begin a complete commit.
 379 */
 380void jbd2_journal_commit_transaction(journal_t *journal)
 381{
 382	struct transaction_stats_s stats;
 383	transaction_t *commit_transaction;
 384	struct journal_head *jh;
 385	struct buffer_head *descriptor;
 386	struct buffer_head **wbuf = journal->j_wbuf;
 387	int bufs;
 388	int flags;
 389	int err;
 390	unsigned long long blocknr;
 391	ktime_t start_time;
 392	u64 commit_time;
 393	char *tagp = NULL;
 394	journal_block_tag_t *tag = NULL;
 395	int space_left = 0;
 396	int first_tag = 0;
 397	int tag_flag;
 398	int i;
 399	int tag_bytes = journal_tag_bytes(journal);
 400	struct buffer_head *cbh = NULL; /* For transactional checksums */
 401	__u32 crc32_sum = ~0;
 402	struct blk_plug plug;
 403	/* Tail of the journal */
 404	unsigned long first_block;
 405	tid_t first_tid;
 406	int update_tail;
 407	int csum_size = 0;
 408	LIST_HEAD(io_bufs);
 409	LIST_HEAD(log_bufs);
 410
 411	if (jbd2_journal_has_csum_v2or3(journal))
 412		csum_size = sizeof(struct jbd2_journal_block_tail);
 413
 414	/*
 415	 * First job: lock down the current transaction and wait for
 416	 * all outstanding updates to complete.
 417	 */
 418
 419	/* Do we need to erase the effects of a prior jbd2_journal_flush? */
 420	if (journal->j_flags & JBD2_FLUSHED) {
 421		jbd2_debug(3, "super block updated\n");
 422		mutex_lock_io(&journal->j_checkpoint_mutex);
 423		/*
 424		 * We hold j_checkpoint_mutex so tail cannot change under us.
 425		 * We don't need any special data guarantees for writing sb
 426		 * since journal is empty and it is ok for write to be
 427		 * flushed only with transaction commit.
 428		 */
 429		jbd2_journal_update_sb_log_tail(journal,
 430						journal->j_tail_sequence,
 431						journal->j_tail,
 432						REQ_SYNC);
 433		mutex_unlock(&journal->j_checkpoint_mutex);
 434	} else {
 435		jbd2_debug(3, "superblock not updated\n");
 436	}
 437
 438	J_ASSERT(journal->j_running_transaction != NULL);
 439	J_ASSERT(journal->j_committing_transaction == NULL);
 440
 441	write_lock(&journal->j_state_lock);
 442	journal->j_flags |= JBD2_FULL_COMMIT_ONGOING;
 443	while (journal->j_flags & JBD2_FAST_COMMIT_ONGOING) {
 444		DEFINE_WAIT(wait);
 445
 446		prepare_to_wait(&journal->j_fc_wait, &wait,
 447				TASK_UNINTERRUPTIBLE);
 448		write_unlock(&journal->j_state_lock);
 449		schedule();
 450		write_lock(&journal->j_state_lock);
 451		finish_wait(&journal->j_fc_wait, &wait);
 452		/*
 453		 * TODO: by blocking fast commits here, we are increasing
 454		 * fsync() latency slightly. Strictly speaking, we don't need
 455		 * to block fast commits until the transaction enters T_FLUSH
 456		 * state. So an optimization is possible where we block new fast
 457		 * commits here and wait for existing ones to complete
 458		 * just before we enter T_FLUSH. That way, the existing fast
 459		 * commits and this full commit can proceed parallely.
 460		 */
 461	}
 462	write_unlock(&journal->j_state_lock);
 463
 464	commit_transaction = journal->j_running_transaction;
 465
 466	trace_jbd2_start_commit(journal, commit_transaction);
 467	jbd2_debug(1, "JBD2: starting commit of transaction %d\n",
 468			commit_transaction->t_tid);
 469
 470	write_lock(&journal->j_state_lock);
 471	journal->j_fc_off = 0;
 472	J_ASSERT(commit_transaction->t_state == T_RUNNING);
 473	commit_transaction->t_state = T_LOCKED;
 474
 475	trace_jbd2_commit_locking(journal, commit_transaction);
 476	stats.run.rs_wait = commit_transaction->t_max_wait;
 477	stats.run.rs_request_delay = 0;
 478	stats.run.rs_locked = jiffies;
 479	if (commit_transaction->t_requested)
 480		stats.run.rs_request_delay =
 481			jbd2_time_diff(commit_transaction->t_requested,
 482				       stats.run.rs_locked);
 483	stats.run.rs_running = jbd2_time_diff(commit_transaction->t_start,
 484					      stats.run.rs_locked);
 485
 486	// waits for any t_updates to finish
 487	jbd2_journal_wait_updates(journal);
 
 488
 489	commit_transaction->t_state = T_SWITCH;
 
 
 
 
 
 
 
 
 
 
 
 490
 491	J_ASSERT (atomic_read(&commit_transaction->t_outstanding_credits) <=
 492			journal->j_max_transaction_buffers);
 493
 494	/*
 495	 * First thing we are allowed to do is to discard any remaining
 496	 * BJ_Reserved buffers.  Note, it is _not_ permissible to assume
 497	 * that there are no such buffers: if a large filesystem
 498	 * operation like a truncate needs to split itself over multiple
 499	 * transactions, then it may try to do a jbd2_journal_restart() while
 500	 * there are still BJ_Reserved buffers outstanding.  These must
 501	 * be released cleanly from the current transaction.
 502	 *
 503	 * In this case, the filesystem must still reserve write access
 504	 * again before modifying the buffer in the new transaction, but
 505	 * we do not require it to remember exactly which old buffers it
 506	 * has reserved.  This is consistent with the existing behaviour
 507	 * that multiple jbd2_journal_get_write_access() calls to the same
 508	 * buffer are perfectly permissible.
 509	 * We use journal->j_state_lock here to serialize processing of
 510	 * t_reserved_list with eviction of buffers from journal_unmap_buffer().
 511	 */
 512	while (commit_transaction->t_reserved_list) {
 513		jh = commit_transaction->t_reserved_list;
 514		JBUFFER_TRACE(jh, "reserved, unused: refile");
 515		/*
 516		 * A jbd2_journal_get_undo_access()+jbd2_journal_release_buffer() may
 517		 * leave undo-committed data.
 518		 */
 519		if (jh->b_committed_data) {
 520			struct buffer_head *bh = jh2bh(jh);
 521
 522			spin_lock(&jh->b_state_lock);
 523			jbd2_free(jh->b_committed_data, bh->b_size);
 524			jh->b_committed_data = NULL;
 525			spin_unlock(&jh->b_state_lock);
 526		}
 527		jbd2_journal_refile_buffer(journal, jh);
 528	}
 529
 530	write_unlock(&journal->j_state_lock);
 531	/*
 532	 * Now try to drop any written-back buffers from the journal's
 533	 * checkpoint lists.  We do this *before* commit because it potentially
 534	 * frees some memory
 535	 */
 536	spin_lock(&journal->j_list_lock);
 537	__jbd2_journal_clean_checkpoint_list(journal, false);
 538	spin_unlock(&journal->j_list_lock);
 539
 540	jbd2_debug(3, "JBD2: commit phase 1\n");
 541
 542	/*
 543	 * Clear revoked flag to reflect there is no revoked buffers
 544	 * in the next transaction which is going to be started.
 545	 */
 546	jbd2_clear_buffer_revoked_flags(journal);
 547
 548	/*
 549	 * Switch to a new revoke table.
 550	 */
 551	jbd2_journal_switch_revoke_table(journal);
 552
 553	write_lock(&journal->j_state_lock);
 554	/*
 555	 * Reserved credits cannot be claimed anymore, free them
 556	 */
 557	atomic_sub(atomic_read(&journal->j_reserved_credits),
 558		   &commit_transaction->t_outstanding_credits);
 559
 560	trace_jbd2_commit_flushing(journal, commit_transaction);
 561	stats.run.rs_flushing = jiffies;
 562	stats.run.rs_locked = jbd2_time_diff(stats.run.rs_locked,
 563					     stats.run.rs_flushing);
 564
 565	commit_transaction->t_state = T_FLUSH;
 566	journal->j_committing_transaction = commit_transaction;
 567	journal->j_running_transaction = NULL;
 568	start_time = ktime_get();
 569	commit_transaction->t_log_start = journal->j_head;
 570	wake_up_all(&journal->j_wait_transaction_locked);
 571	write_unlock(&journal->j_state_lock);
 572
 573	jbd2_debug(3, "JBD2: commit phase 2a\n");
 574
 575	/*
 576	 * Now start flushing things to disk, in the order they appear
 577	 * on the transaction lists.  Data blocks go first.
 578	 */
 579	err = journal_submit_data_buffers(journal, commit_transaction);
 580	if (err)
 581		jbd2_journal_abort(journal, err);
 582
 583	blk_start_plug(&plug);
 584	jbd2_journal_write_revoke_records(commit_transaction, &log_bufs);
 585
 586	jbd2_debug(3, "JBD2: commit phase 2b\n");
 587
 588	/*
 589	 * Way to go: we have now written out all of the data for a
 590	 * transaction!  Now comes the tricky part: we need to write out
 591	 * metadata.  Loop over the transaction's entire buffer list:
 592	 */
 593	write_lock(&journal->j_state_lock);
 594	commit_transaction->t_state = T_COMMIT;
 595	write_unlock(&journal->j_state_lock);
 596
 597	trace_jbd2_commit_logging(journal, commit_transaction);
 598	stats.run.rs_logging = jiffies;
 599	stats.run.rs_flushing = jbd2_time_diff(stats.run.rs_flushing,
 600					       stats.run.rs_logging);
 601	stats.run.rs_blocks = commit_transaction->t_nr_buffers;
 
 602	stats.run.rs_blocks_logged = 0;
 603
 604	J_ASSERT(commit_transaction->t_nr_buffers <=
 605		 atomic_read(&commit_transaction->t_outstanding_credits));
 606
 607	err = 0;
 608	bufs = 0;
 609	descriptor = NULL;
 610	while (commit_transaction->t_buffers) {
 611
 612		/* Find the next buffer to be journaled... */
 613
 614		jh = commit_transaction->t_buffers;
 615
 616		/* If we're in abort mode, we just un-journal the buffer and
 617		   release it. */
 618
 619		if (is_journal_aborted(journal)) {
 620			clear_buffer_jbddirty(jh2bh(jh));
 621			JBUFFER_TRACE(jh, "journal is aborting: refile");
 622			jbd2_buffer_abort_trigger(jh,
 623						  jh->b_frozen_data ?
 624						  jh->b_frozen_triggers :
 625						  jh->b_triggers);
 626			jbd2_journal_refile_buffer(journal, jh);
 627			/* If that was the last one, we need to clean up
 628			 * any descriptor buffers which may have been
 629			 * already allocated, even if we are now
 630			 * aborting. */
 631			if (!commit_transaction->t_buffers)
 632				goto start_journal_io;
 633			continue;
 634		}
 635
 636		/* Make sure we have a descriptor block in which to
 637		   record the metadata buffer. */
 638
 639		if (!descriptor) {
 640			J_ASSERT (bufs == 0);
 641
 642			jbd2_debug(4, "JBD2: get descriptor\n");
 643
 644			descriptor = jbd2_journal_get_descriptor_buffer(
 645							commit_transaction,
 646							JBD2_DESCRIPTOR_BLOCK);
 647			if (!descriptor) {
 648				jbd2_journal_abort(journal, -EIO);
 649				continue;
 650			}
 651
 652			jbd2_debug(4, "JBD2: got buffer %llu (%p)\n",
 653				(unsigned long long)descriptor->b_blocknr,
 654				descriptor->b_data);
 655			tagp = &descriptor->b_data[sizeof(journal_header_t)];
 656			space_left = descriptor->b_size -
 657						sizeof(journal_header_t);
 658			first_tag = 1;
 659			set_buffer_jwrite(descriptor);
 660			set_buffer_dirty(descriptor);
 661			wbuf[bufs++] = descriptor;
 662
 663			/* Record it so that we can wait for IO
 664                           completion later */
 665			BUFFER_TRACE(descriptor, "ph3: file as descriptor");
 666			jbd2_file_log_bh(&log_bufs, descriptor);
 667		}
 668
 669		/* Where is the buffer to be written? */
 670
 671		err = jbd2_journal_next_log_block(journal, &blocknr);
 672		/* If the block mapping failed, just abandon the buffer
 673		   and repeat this loop: we'll fall into the
 674		   refile-on-abort condition above. */
 675		if (err) {
 676			jbd2_journal_abort(journal, err);
 677			continue;
 678		}
 679
 680		/*
 681		 * start_this_handle() uses t_outstanding_credits to determine
 682		 * the free space in the log.
 
 683		 */
 684		atomic_dec(&commit_transaction->t_outstanding_credits);
 685
 686		/* Bump b_count to prevent truncate from stumbling over
 687                   the shadowed buffer!  @@@ This can go if we ever get
 688                   rid of the shadow pairing of buffers. */
 689		atomic_inc(&jh2bh(jh)->b_count);
 690
 691		/*
 692		 * Make a temporary IO buffer with which to write it out
 693		 * (this will requeue the metadata buffer to BJ_Shadow).
 694		 */
 695		set_bit(BH_JWrite, &jh2bh(jh)->b_state);
 696		JBUFFER_TRACE(jh, "ph3: write metadata");
 697		flags = jbd2_journal_write_metadata_buffer(commit_transaction,
 698						jh, &wbuf[bufs], blocknr);
 699		if (flags < 0) {
 700			jbd2_journal_abort(journal, flags);
 701			continue;
 702		}
 703		jbd2_file_log_bh(&io_bufs, wbuf[bufs]);
 704
 705		/* Record the new block's tag in the current descriptor
 706                   buffer */
 707
 708		tag_flag = 0;
 709		if (flags & 1)
 710			tag_flag |= JBD2_FLAG_ESCAPE;
 711		if (!first_tag)
 712			tag_flag |= JBD2_FLAG_SAME_UUID;
 713
 714		tag = (journal_block_tag_t *) tagp;
 715		write_tag_block(journal, tag, jh2bh(jh)->b_blocknr);
 716		tag->t_flags = cpu_to_be16(tag_flag);
 717		jbd2_block_tag_csum_set(journal, tag, wbuf[bufs],
 718					commit_transaction->t_tid);
 719		tagp += tag_bytes;
 720		space_left -= tag_bytes;
 721		bufs++;
 722
 723		if (first_tag) {
 724			memcpy (tagp, journal->j_uuid, 16);
 725			tagp += 16;
 726			space_left -= 16;
 727			first_tag = 0;
 728		}
 729
 730		/* If there's no more to do, or if the descriptor is full,
 731		   let the IO rip! */
 732
 733		if (bufs == journal->j_wbufsize ||
 734		    commit_transaction->t_buffers == NULL ||
 735		    space_left < tag_bytes + 16 + csum_size) {
 736
 737			jbd2_debug(4, "JBD2: Submit %d IOs\n", bufs);
 738
 739			/* Write an end-of-descriptor marker before
 740                           submitting the IOs.  "tag" still points to
 741                           the last tag we set up. */
 742
 743			tag->t_flags |= cpu_to_be16(JBD2_FLAG_LAST_TAG);
 744start_journal_io:
 745			if (descriptor)
 746				jbd2_descriptor_block_csum_set(journal,
 747							descriptor);
 748
 
 
 749			for (i = 0; i < bufs; i++) {
 750				struct buffer_head *bh = wbuf[i];
 751				/*
 752				 * Compute checksum.
 753				 */
 754				if (jbd2_has_feature_checksum(journal)) {
 755					crc32_sum =
 756					    jbd2_checksum_data(crc32_sum, bh);
 757				}
 758
 759				lock_buffer(bh);
 760				clear_buffer_dirty(bh);
 761				set_buffer_uptodate(bh);
 762				bh->b_end_io = journal_end_buffer_io_sync;
 763				submit_bh(REQ_OP_WRITE | REQ_SYNC, bh);
 764			}
 765			cond_resched();
 
 766
 767			/* Force a new descriptor to be generated next
 768                           time round the loop. */
 769			descriptor = NULL;
 770			bufs = 0;
 771		}
 772	}
 773
 774	err = journal_finish_inode_data_buffers(journal, commit_transaction);
 775	if (err) {
 776		printk(KERN_WARNING
 777			"JBD2: Detected IO errors while flushing file data "
 778		       "on %s\n", journal->j_devname);
 779		if (journal->j_flags & JBD2_ABORT_ON_SYNCDATA_ERR)
 780			jbd2_journal_abort(journal, err);
 781		err = 0;
 782	}
 783
 784	/*
 785	 * Get current oldest transaction in the log before we issue flush
 786	 * to the filesystem device. After the flush we can be sure that
 787	 * blocks of all older transactions are checkpointed to persistent
 788	 * storage and we will be safe to update journal start in the
 789	 * superblock with the numbers we get here.
 790	 */
 791	update_tail =
 792		jbd2_journal_get_log_tail(journal, &first_tid, &first_block);
 793
 794	write_lock(&journal->j_state_lock);
 795	if (update_tail) {
 796		long freed = first_block - journal->j_tail;
 797
 798		if (first_block < journal->j_tail)
 799			freed += journal->j_last - journal->j_first;
 800		/* Update tail only if we free significant amount of space */
 801		if (freed < jbd2_journal_get_max_txn_bufs(journal))
 802			update_tail = 0;
 803	}
 804	J_ASSERT(commit_transaction->t_state == T_COMMIT);
 805	commit_transaction->t_state = T_COMMIT_DFLUSH;
 806	write_unlock(&journal->j_state_lock);
 807
 808	/*
 809	 * If the journal is not located on the file system device,
 810	 * then we must flush the file system device before we issue
 811	 * the commit record
 812	 */
 813	if (commit_transaction->t_need_data_flush &&
 814	    (journal->j_fs_dev != journal->j_dev) &&
 815	    (journal->j_flags & JBD2_BARRIER))
 816		blkdev_issue_flush(journal->j_fs_dev);
 817
 818	/* Done it all: now write the commit record asynchronously. */
 819	if (jbd2_has_feature_async_commit(journal)) {
 820		err = journal_submit_commit_record(journal, commit_transaction,
 821						 &cbh, crc32_sum);
 822		if (err)
 823			jbd2_journal_abort(journal, err);
 824	}
 825
 826	blk_finish_plug(&plug);
 827
 828	/* Lo and behold: we have just managed to send a transaction to
 829           the log.  Before we can commit it, wait for the IO so far to
 830           complete.  Control buffers being written are on the
 831           transaction's t_log_list queue, and metadata buffers are on
 832           the io_bufs list.
 833
 834	   Wait for the buffers in reverse order.  That way we are
 835	   less likely to be woken up until all IOs have completed, and
 836	   so we incur less scheduling load.
 837	*/
 838
 839	jbd2_debug(3, "JBD2: commit phase 3\n");
 840
 841	while (!list_empty(&io_bufs)) {
 842		struct buffer_head *bh = list_entry(io_bufs.prev,
 843						    struct buffer_head,
 844						    b_assoc_buffers);
 845
 846		wait_on_buffer(bh);
 847		cond_resched();
 848
 849		if (unlikely(!buffer_uptodate(bh)))
 850			err = -EIO;
 851		jbd2_unfile_log_bh(bh);
 852		stats.run.rs_blocks_logged++;
 853
 854		/*
 855		 * The list contains temporary buffer heads created by
 856		 * jbd2_journal_write_metadata_buffer().
 857		 */
 858		BUFFER_TRACE(bh, "dumping temporary bh");
 859		__brelse(bh);
 860		J_ASSERT_BH(bh, atomic_read(&bh->b_count) == 0);
 861		free_buffer_head(bh);
 862
 863		/* We also have to refile the corresponding shadowed buffer */
 864		jh = commit_transaction->t_shadow_list->b_tprev;
 865		bh = jh2bh(jh);
 866		clear_buffer_jwrite(bh);
 867		J_ASSERT_BH(bh, buffer_jbddirty(bh));
 868		J_ASSERT_BH(bh, !buffer_shadow(bh));
 869
 870		/* The metadata is now released for reuse, but we need
 871                   to remember it against this transaction so that when
 872                   we finally commit, we can do any checkpointing
 873                   required. */
 874		JBUFFER_TRACE(jh, "file as BJ_Forget");
 875		jbd2_journal_file_buffer(jh, commit_transaction, BJ_Forget);
 876		JBUFFER_TRACE(jh, "brelse shadowed buffer");
 877		__brelse(bh);
 878	}
 879
 880	J_ASSERT (commit_transaction->t_shadow_list == NULL);
 881
 882	jbd2_debug(3, "JBD2: commit phase 4\n");
 883
 884	/* Here we wait for the revoke record and descriptor record buffers */
 885	while (!list_empty(&log_bufs)) {
 886		struct buffer_head *bh;
 887
 888		bh = list_entry(log_bufs.prev, struct buffer_head, b_assoc_buffers);
 889		wait_on_buffer(bh);
 890		cond_resched();
 891
 892		if (unlikely(!buffer_uptodate(bh)))
 893			err = -EIO;
 894
 895		BUFFER_TRACE(bh, "ph5: control buffer writeout done: unfile");
 896		clear_buffer_jwrite(bh);
 897		jbd2_unfile_log_bh(bh);
 898		stats.run.rs_blocks_logged++;
 899		__brelse(bh);		/* One for getblk */
 900		/* AKPM: bforget here */
 901	}
 902
 903	if (err)
 904		jbd2_journal_abort(journal, err);
 905
 906	jbd2_debug(3, "JBD2: commit phase 5\n");
 907	write_lock(&journal->j_state_lock);
 908	J_ASSERT(commit_transaction->t_state == T_COMMIT_DFLUSH);
 909	commit_transaction->t_state = T_COMMIT_JFLUSH;
 910	write_unlock(&journal->j_state_lock);
 911
 912	if (!jbd2_has_feature_async_commit(journal)) {
 913		err = journal_submit_commit_record(journal, commit_transaction,
 914						&cbh, crc32_sum);
 915		if (err)
 916			jbd2_journal_abort(journal, err);
 917	}
 918	if (cbh)
 919		err = journal_wait_on_commit_record(journal, cbh);
 920	stats.run.rs_blocks_logged++;
 921	if (jbd2_has_feature_async_commit(journal) &&
 922	    journal->j_flags & JBD2_BARRIER) {
 923		blkdev_issue_flush(journal->j_dev);
 924	}
 925
 926	if (err)
 927		jbd2_journal_abort(journal, err);
 928
 929	WARN_ON_ONCE(
 930		atomic_read(&commit_transaction->t_outstanding_credits) < 0);
 931
 932	/*
 933	 * Now disk caches for filesystem device are flushed so we are safe to
 934	 * erase checkpointed transactions from the log by updating journal
 935	 * superblock.
 936	 */
 937	if (update_tail)
 938		jbd2_update_log_tail(journal, first_tid, first_block);
 939
 940	/* End of a transaction!  Finally, we can do checkpoint
 941           processing: any buffers committed as a result of this
 942           transaction can be removed from any checkpoint list it was on
 943           before. */
 944
 945	jbd2_debug(3, "JBD2: commit phase 6\n");
 946
 947	J_ASSERT(list_empty(&commit_transaction->t_inode_list));
 948	J_ASSERT(commit_transaction->t_buffers == NULL);
 949	J_ASSERT(commit_transaction->t_checkpoint_list == NULL);
 950	J_ASSERT(commit_transaction->t_shadow_list == NULL);
 951
 952restart_loop:
 953	/*
 954	 * As there are other places (journal_unmap_buffer()) adding buffers
 955	 * to this list we have to be careful and hold the j_list_lock.
 956	 */
 957	spin_lock(&journal->j_list_lock);
 958	while (commit_transaction->t_forget) {
 959		transaction_t *cp_transaction;
 960		struct buffer_head *bh;
 961		int try_to_free = 0;
 962		bool drop_ref;
 963
 964		jh = commit_transaction->t_forget;
 965		spin_unlock(&journal->j_list_lock);
 966		bh = jh2bh(jh);
 967		/*
 968		 * Get a reference so that bh cannot be freed before we are
 969		 * done with it.
 970		 */
 971		get_bh(bh);
 972		spin_lock(&jh->b_state_lock);
 973		J_ASSERT_JH(jh,	jh->b_transaction == commit_transaction);
 974
 975		/*
 976		 * If there is undo-protected committed data against
 977		 * this buffer, then we can remove it now.  If it is a
 978		 * buffer needing such protection, the old frozen_data
 979		 * field now points to a committed version of the
 980		 * buffer, so rotate that field to the new committed
 981		 * data.
 982		 *
 983		 * Otherwise, we can just throw away the frozen data now.
 984		 *
 985		 * We also know that the frozen data has already fired
 986		 * its triggers if they exist, so we can clear that too.
 987		 */
 988		if (jh->b_committed_data) {
 989			jbd2_free(jh->b_committed_data, bh->b_size);
 990			jh->b_committed_data = NULL;
 991			if (jh->b_frozen_data) {
 992				jh->b_committed_data = jh->b_frozen_data;
 993				jh->b_frozen_data = NULL;
 994				jh->b_frozen_triggers = NULL;
 995			}
 996		} else if (jh->b_frozen_data) {
 997			jbd2_free(jh->b_frozen_data, bh->b_size);
 998			jh->b_frozen_data = NULL;
 999			jh->b_frozen_triggers = NULL;
1000		}
1001
1002		spin_lock(&journal->j_list_lock);
1003		cp_transaction = jh->b_cp_transaction;
1004		if (cp_transaction) {
1005			JBUFFER_TRACE(jh, "remove from old cp transaction");
1006			cp_transaction->t_chp_stats.cs_dropped++;
1007			__jbd2_journal_remove_checkpoint(jh);
1008		}
1009
1010		/* Only re-checkpoint the buffer_head if it is marked
1011		 * dirty.  If the buffer was added to the BJ_Forget list
1012		 * by jbd2_journal_forget, it may no longer be dirty and
1013		 * there's no point in keeping a checkpoint record for
1014		 * it. */
1015
1016		/*
1017		 * A buffer which has been freed while still being journaled
1018		 * by a previous transaction, refile the buffer to BJ_Forget of
1019		 * the running transaction. If the just committed transaction
1020		 * contains "add to orphan" operation, we can completely
1021		 * invalidate the buffer now. We are rather through in that
1022		 * since the buffer may be still accessible when blocksize <
1023		 * pagesize and it is attached to the last partial page.
1024		 */
1025		if (buffer_freed(bh) && !jh->b_next_transaction) {
1026			struct address_space *mapping;
1027
1028			clear_buffer_freed(bh);
1029			clear_buffer_jbddirty(bh);
1030
1031			/*
1032			 * Block device buffers need to stay mapped all the
1033			 * time, so it is enough to clear buffer_jbddirty and
1034			 * buffer_freed bits. For the file mapping buffers (i.e.
1035			 * journalled data) we need to unmap buffer and clear
1036			 * more bits. We also need to be careful about the check
1037			 * because the data page mapping can get cleared under
1038			 * our hands. Note that if mapping == NULL, we don't
1039			 * need to make buffer unmapped because the page is
1040			 * already detached from the mapping and buffers cannot
1041			 * get reused.
 
 
 
1042			 */
1043			mapping = READ_ONCE(bh->b_page->mapping);
1044			if (mapping && !sb_is_blkdev_sb(mapping->host->i_sb)) {
 
 
1045				clear_buffer_mapped(bh);
1046				clear_buffer_new(bh);
1047				clear_buffer_req(bh);
1048				bh->b_bdev = NULL;
1049			}
1050		}
1051
1052		if (buffer_jbddirty(bh)) {
1053			JBUFFER_TRACE(jh, "add to new checkpointing trans");
1054			__jbd2_journal_insert_checkpoint(jh, commit_transaction);
1055			if (is_journal_aborted(journal))
1056				clear_buffer_jbddirty(bh);
1057		} else {
1058			J_ASSERT_BH(bh, !buffer_dirty(bh));
1059			/*
1060			 * The buffer on BJ_Forget list and not jbddirty means
1061			 * it has been freed by this transaction and hence it
1062			 * could not have been reallocated until this
1063			 * transaction has committed. *BUT* it could be
1064			 * reallocated once we have written all the data to
1065			 * disk and before we process the buffer on BJ_Forget
1066			 * list.
1067			 */
1068			if (!jh->b_next_transaction)
1069				try_to_free = 1;
1070		}
1071		JBUFFER_TRACE(jh, "refile or unfile buffer");
1072		drop_ref = __jbd2_journal_refile_buffer(jh);
1073		spin_unlock(&jh->b_state_lock);
1074		if (drop_ref)
1075			jbd2_journal_put_journal_head(jh);
1076		if (try_to_free)
1077			release_buffer_page(bh);	/* Drops bh reference */
1078		else
1079			__brelse(bh);
1080		cond_resched_lock(&journal->j_list_lock);
1081	}
1082	spin_unlock(&journal->j_list_lock);
1083	/*
1084	 * This is a bit sleazy.  We use j_list_lock to protect transition
1085	 * of a transaction into T_FINISHED state and calling
1086	 * __jbd2_journal_drop_transaction(). Otherwise we could race with
1087	 * other checkpointing code processing the transaction...
1088	 */
1089	write_lock(&journal->j_state_lock);
1090	spin_lock(&journal->j_list_lock);
1091	/*
1092	 * Now recheck if some buffers did not get attached to the transaction
1093	 * while the lock was dropped...
1094	 */
1095	if (commit_transaction->t_forget) {
1096		spin_unlock(&journal->j_list_lock);
1097		write_unlock(&journal->j_state_lock);
1098		goto restart_loop;
1099	}
1100
1101	/* Add the transaction to the checkpoint list
1102	 * __journal_remove_checkpoint() can not destroy transaction
1103	 * under us because it is not marked as T_FINISHED yet */
1104	if (journal->j_checkpoint_transactions == NULL) {
1105		journal->j_checkpoint_transactions = commit_transaction;
1106		commit_transaction->t_cpnext = commit_transaction;
1107		commit_transaction->t_cpprev = commit_transaction;
1108	} else {
1109		commit_transaction->t_cpnext =
1110			journal->j_checkpoint_transactions;
1111		commit_transaction->t_cpprev =
1112			commit_transaction->t_cpnext->t_cpprev;
1113		commit_transaction->t_cpnext->t_cpprev =
1114			commit_transaction;
1115		commit_transaction->t_cpprev->t_cpnext =
1116				commit_transaction;
1117	}
1118	spin_unlock(&journal->j_list_lock);
1119
1120	/* Done with this transaction! */
1121
1122	jbd2_debug(3, "JBD2: commit phase 7\n");
1123
1124	J_ASSERT(commit_transaction->t_state == T_COMMIT_JFLUSH);
1125
1126	commit_transaction->t_start = jiffies;
1127	stats.run.rs_logging = jbd2_time_diff(stats.run.rs_logging,
1128					      commit_transaction->t_start);
1129
1130	/*
1131	 * File the transaction statistics
1132	 */
1133	stats.ts_tid = commit_transaction->t_tid;
1134	stats.run.rs_handle_count =
1135		atomic_read(&commit_transaction->t_handle_count);
1136	trace_jbd2_run_stats(journal->j_fs_dev->bd_dev,
1137			     commit_transaction->t_tid, &stats.run);
1138	stats.ts_requested = (commit_transaction->t_requested) ? 1 : 0;
1139
1140	commit_transaction->t_state = T_COMMIT_CALLBACK;
1141	J_ASSERT(commit_transaction == journal->j_committing_transaction);
1142	journal->j_commit_sequence = commit_transaction->t_tid;
1143	journal->j_committing_transaction = NULL;
1144	commit_time = ktime_to_ns(ktime_sub(ktime_get(), start_time));
1145
1146	/*
1147	 * weight the commit time higher than the average time so we don't
1148	 * react too strongly to vast changes in the commit time
1149	 */
1150	if (likely(journal->j_average_commit_time))
1151		journal->j_average_commit_time = (commit_time +
1152				journal->j_average_commit_time*3) / 4;
1153	else
1154		journal->j_average_commit_time = commit_time;
1155
1156	write_unlock(&journal->j_state_lock);
1157
1158	if (journal->j_commit_callback)
1159		journal->j_commit_callback(journal, commit_transaction);
1160	if (journal->j_fc_cleanup_callback)
1161		journal->j_fc_cleanup_callback(journal, 1, commit_transaction->t_tid);
1162
1163	trace_jbd2_end_commit(journal, commit_transaction);
1164	jbd2_debug(1, "JBD2: commit %d complete, head %d\n",
1165		  journal->j_commit_sequence, journal->j_tail_sequence);
1166
1167	write_lock(&journal->j_state_lock);
1168	journal->j_flags &= ~JBD2_FULL_COMMIT_ONGOING;
1169	journal->j_flags &= ~JBD2_FAST_COMMIT_ONGOING;
1170	spin_lock(&journal->j_list_lock);
1171	commit_transaction->t_state = T_FINISHED;
1172	/* Check if the transaction can be dropped now that we are finished */
1173	if (commit_transaction->t_checkpoint_list == NULL &&
1174	    commit_transaction->t_checkpoint_io_list == NULL) {
1175		__jbd2_journal_drop_transaction(journal, commit_transaction);
1176		jbd2_journal_free_transaction(commit_transaction);
1177	}
1178	spin_unlock(&journal->j_list_lock);
1179	write_unlock(&journal->j_state_lock);
1180	wake_up(&journal->j_wait_done_commit);
1181	wake_up(&journal->j_fc_wait);
1182
1183	/*
1184	 * Calculate overall stats
1185	 */
1186	spin_lock(&journal->j_history_lock);
1187	journal->j_stats.ts_tid++;
1188	journal->j_stats.ts_requested += stats.ts_requested;
1189	journal->j_stats.run.rs_wait += stats.run.rs_wait;
1190	journal->j_stats.run.rs_request_delay += stats.run.rs_request_delay;
1191	journal->j_stats.run.rs_running += stats.run.rs_running;
1192	journal->j_stats.run.rs_locked += stats.run.rs_locked;
1193	journal->j_stats.run.rs_flushing += stats.run.rs_flushing;
1194	journal->j_stats.run.rs_logging += stats.run.rs_logging;
1195	journal->j_stats.run.rs_handle_count += stats.run.rs_handle_count;
1196	journal->j_stats.run.rs_blocks += stats.run.rs_blocks;
1197	journal->j_stats.run.rs_blocks_logged += stats.run.rs_blocks_logged;
1198	spin_unlock(&journal->j_history_lock);
1199}
v4.10.11
 
   1/*
   2 * linux/fs/jbd2/commit.c
   3 *
   4 * Written by Stephen C. Tweedie <sct@redhat.com>, 1998
   5 *
   6 * Copyright 1998 Red Hat corp --- All Rights Reserved
   7 *
   8 * This file is part of the Linux kernel and is made available under
   9 * the terms of the GNU General Public License, version 2, or at your
  10 * option, any later version, incorporated herein by reference.
  11 *
  12 * Journal commit routines for the generic filesystem journaling code;
  13 * part of the ext2fs journaling system.
  14 */
  15
  16#include <linux/time.h>
  17#include <linux/fs.h>
  18#include <linux/jbd2.h>
  19#include <linux/errno.h>
  20#include <linux/slab.h>
  21#include <linux/mm.h>
  22#include <linux/pagemap.h>
  23#include <linux/jiffies.h>
  24#include <linux/crc32.h>
  25#include <linux/writeback.h>
  26#include <linux/backing-dev.h>
  27#include <linux/bio.h>
  28#include <linux/blkdev.h>
  29#include <linux/bitops.h>
  30#include <trace/events/jbd2.h>
  31
  32/*
  33 * IO end handler for temporary buffer_heads handling writes to the journal.
  34 */
  35static void journal_end_buffer_io_sync(struct buffer_head *bh, int uptodate)
  36{
  37	struct buffer_head *orig_bh = bh->b_private;
  38
  39	BUFFER_TRACE(bh, "");
  40	if (uptodate)
  41		set_buffer_uptodate(bh);
  42	else
  43		clear_buffer_uptodate(bh);
  44	if (orig_bh) {
  45		clear_bit_unlock(BH_Shadow, &orig_bh->b_state);
  46		smp_mb__after_atomic();
  47		wake_up_bit(&orig_bh->b_state, BH_Shadow);
  48	}
  49	unlock_buffer(bh);
  50}
  51
  52/*
  53 * When an ext4 file is truncated, it is possible that some pages are not
  54 * successfully freed, because they are attached to a committing transaction.
  55 * After the transaction commits, these pages are left on the LRU, with no
  56 * ->mapping, and with attached buffers.  These pages are trivially reclaimable
  57 * by the VM, but their apparent absence upsets the VM accounting, and it makes
  58 * the numbers in /proc/meminfo look odd.
  59 *
  60 * So here, we have a buffer which has just come off the forget list.  Look to
  61 * see if we can strip all buffers from the backing page.
  62 *
  63 * Called under lock_journal(), and possibly under journal_datalist_lock.  The
  64 * caller provided us with a ref against the buffer, and we drop that here.
  65 */
  66static void release_buffer_page(struct buffer_head *bh)
  67{
 
  68	struct page *page;
  69
  70	if (buffer_dirty(bh))
  71		goto nope;
  72	if (atomic_read(&bh->b_count) != 1)
  73		goto nope;
  74	page = bh->b_page;
  75	if (!page)
  76		goto nope;
  77	if (page->mapping)
 
  78		goto nope;
  79
  80	/* OK, it's a truncated page */
  81	if (!trylock_page(page))
  82		goto nope;
  83
  84	get_page(page);
  85	__brelse(bh);
  86	try_to_free_buffers(page);
  87	unlock_page(page);
  88	put_page(page);
  89	return;
  90
  91nope:
  92	__brelse(bh);
  93}
  94
  95static void jbd2_commit_block_csum_set(journal_t *j, struct buffer_head *bh)
  96{
  97	struct commit_header *h;
  98	__u32 csum;
  99
 100	if (!jbd2_journal_has_csum_v2or3(j))
 101		return;
 102
 103	h = (struct commit_header *)(bh->b_data);
 104	h->h_chksum_type = 0;
 105	h->h_chksum_size = 0;
 106	h->h_chksum[0] = 0;
 107	csum = jbd2_chksum(j, j->j_csum_seed, bh->b_data, j->j_blocksize);
 108	h->h_chksum[0] = cpu_to_be32(csum);
 109}
 110
 111/*
 112 * Done it all: now submit the commit record.  We should have
 113 * cleaned up our previous buffers by now, so if we are in abort
 114 * mode we can now just skip the rest of the journal write
 115 * entirely.
 116 *
 117 * Returns 1 if the journal needs to be aborted or 0 on success
 118 */
 119static int journal_submit_commit_record(journal_t *journal,
 120					transaction_t *commit_transaction,
 121					struct buffer_head **cbh,
 122					__u32 crc32_sum)
 123{
 124	struct commit_header *tmp;
 125	struct buffer_head *bh;
 126	int ret;
 127	struct timespec64 now = current_kernel_time64();
 128
 129	*cbh = NULL;
 130
 131	if (is_journal_aborted(journal))
 132		return 0;
 133
 134	bh = jbd2_journal_get_descriptor_buffer(commit_transaction,
 135						JBD2_COMMIT_BLOCK);
 136	if (!bh)
 137		return 1;
 138
 139	tmp = (struct commit_header *)bh->b_data;
 
 140	tmp->h_commit_sec = cpu_to_be64(now.tv_sec);
 141	tmp->h_commit_nsec = cpu_to_be32(now.tv_nsec);
 142
 143	if (jbd2_has_feature_checksum(journal)) {
 144		tmp->h_chksum_type 	= JBD2_CRC32_CHKSUM;
 145		tmp->h_chksum_size 	= JBD2_CRC32_CHKSUM_SIZE;
 146		tmp->h_chksum[0] 	= cpu_to_be32(crc32_sum);
 147	}
 148	jbd2_commit_block_csum_set(journal, bh);
 149
 150	BUFFER_TRACE(bh, "submit commit block");
 151	lock_buffer(bh);
 152	clear_buffer_dirty(bh);
 153	set_buffer_uptodate(bh);
 154	bh->b_end_io = journal_end_buffer_io_sync;
 155
 156	if (journal->j_flags & JBD2_BARRIER &&
 157	    !jbd2_has_feature_async_commit(journal))
 158		ret = submit_bh(REQ_OP_WRITE,
 159			REQ_SYNC | REQ_PREFLUSH | REQ_FUA, bh);
 160	else
 161		ret = submit_bh(REQ_OP_WRITE, REQ_SYNC, bh);
 162
 
 163	*cbh = bh;
 164	return ret;
 165}
 166
 167/*
 168 * This function along with journal_submit_commit_record
 169 * allows to write the commit record asynchronously.
 170 */
 171static int journal_wait_on_commit_record(journal_t *journal,
 172					 struct buffer_head *bh)
 173{
 174	int ret = 0;
 175
 176	clear_buffer_dirty(bh);
 177	wait_on_buffer(bh);
 178
 179	if (unlikely(!buffer_uptodate(bh)))
 180		ret = -EIO;
 181	put_bh(bh);            /* One for getblk() */
 182
 183	return ret;
 184}
 185
 186/*
 187 * write the filemap data using writepage() address_space_operations.
 188 * We don't do block allocation here even for delalloc. We don't
 189 * use writepages() because with dealyed allocation we may be doing
 190 * block allocation in writepages().
 191 */
 192static int journal_submit_inode_data_buffers(struct address_space *mapping)
 193{
 194	int ret;
 195	struct writeback_control wbc = {
 196		.sync_mode =  WB_SYNC_ALL,
 197		.nr_to_write = mapping->nrpages * 2,
 198		.range_start = 0,
 199		.range_end = i_size_read(mapping->host),
 200	};
 201
 202	ret = generic_writepages(mapping, &wbc);
 203	return ret;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 204}
 
 205
 206/*
 207 * Submit all the data buffers of inode associated with the transaction to
 208 * disk.
 209 *
 210 * We are in a committing transaction. Therefore no new inode can be added to
 211 * our inode list. We use JI_COMMIT_RUNNING flag to protect inode we currently
 212 * operate on from being released while we write out pages.
 213 */
 214static int journal_submit_data_buffers(journal_t *journal,
 215		transaction_t *commit_transaction)
 216{
 217	struct jbd2_inode *jinode;
 218	int err, ret = 0;
 219	struct address_space *mapping;
 220
 221	spin_lock(&journal->j_list_lock);
 222	list_for_each_entry(jinode, &commit_transaction->t_inode_list, i_list) {
 223		if (!(jinode->i_flags & JI_WRITE_DATA))
 224			continue;
 225		mapping = jinode->i_vfs_inode->i_mapping;
 226		jinode->i_flags |= JI_COMMIT_RUNNING;
 227		spin_unlock(&journal->j_list_lock);
 228		/*
 229		 * submit the inode data buffers. We use writepage
 230		 * instead of writepages. Because writepages can do
 231		 * block allocation  with delalloc. We need to write
 232		 * only allocated blocks here.
 233		 */
 234		trace_jbd2_submit_inode_data(jinode->i_vfs_inode);
 235		err = journal_submit_inode_data_buffers(mapping);
 236		if (!ret)
 237			ret = err;
 
 
 238		spin_lock(&journal->j_list_lock);
 239		J_ASSERT(jinode->i_transaction == commit_transaction);
 240		jinode->i_flags &= ~JI_COMMIT_RUNNING;
 241		smp_mb();
 242		wake_up_bit(&jinode->i_flags, __JI_COMMIT_RUNNING);
 243	}
 244	spin_unlock(&journal->j_list_lock);
 245	return ret;
 246}
 247
 
 
 
 
 
 
 
 
 
 248/*
 249 * Wait for data submitted for writeout, refile inodes to proper
 250 * transaction if needed.
 251 *
 252 */
 253static int journal_finish_inode_data_buffers(journal_t *journal,
 254		transaction_t *commit_transaction)
 255{
 256	struct jbd2_inode *jinode, *next_i;
 257	int err, ret = 0;
 258
 259	/* For locking, see the comment in journal_submit_data_buffers() */
 260	spin_lock(&journal->j_list_lock);
 261	list_for_each_entry(jinode, &commit_transaction->t_inode_list, i_list) {
 262		if (!(jinode->i_flags & JI_WAIT_DATA))
 263			continue;
 264		jinode->i_flags |= JI_COMMIT_RUNNING;
 265		spin_unlock(&journal->j_list_lock);
 266		err = filemap_fdatawait(jinode->i_vfs_inode->i_mapping);
 267		if (err) {
 268			/*
 269			 * Because AS_EIO is cleared by
 270			 * filemap_fdatawait_range(), set it again so
 271			 * that user process can get -EIO from fsync().
 272			 */
 273			mapping_set_error(jinode->i_vfs_inode->i_mapping, -EIO);
 274
 275			if (!ret)
 276				ret = err;
 277		}
 278		spin_lock(&journal->j_list_lock);
 279		jinode->i_flags &= ~JI_COMMIT_RUNNING;
 280		smp_mb();
 281		wake_up_bit(&jinode->i_flags, __JI_COMMIT_RUNNING);
 282	}
 283
 284	/* Now refile inode to proper lists */
 285	list_for_each_entry_safe(jinode, next_i,
 286				 &commit_transaction->t_inode_list, i_list) {
 287		list_del(&jinode->i_list);
 288		if (jinode->i_next_transaction) {
 289			jinode->i_transaction = jinode->i_next_transaction;
 290			jinode->i_next_transaction = NULL;
 291			list_add(&jinode->i_list,
 292				&jinode->i_transaction->t_inode_list);
 293		} else {
 294			jinode->i_transaction = NULL;
 
 
 295		}
 296	}
 297	spin_unlock(&journal->j_list_lock);
 298
 299	return ret;
 300}
 301
 302static __u32 jbd2_checksum_data(__u32 crc32_sum, struct buffer_head *bh)
 303{
 304	struct page *page = bh->b_page;
 305	char *addr;
 306	__u32 checksum;
 307
 308	addr = kmap_atomic(page);
 309	checksum = crc32_be(crc32_sum,
 310		(void *)(addr + offset_in_page(bh->b_data)), bh->b_size);
 311	kunmap_atomic(addr);
 312
 313	return checksum;
 314}
 315
 316static void write_tag_block(journal_t *j, journal_block_tag_t *tag,
 317				   unsigned long long block)
 318{
 319	tag->t_blocknr = cpu_to_be32(block & (u32)~0);
 320	if (jbd2_has_feature_64bit(j))
 321		tag->t_blocknr_high = cpu_to_be32((block >> 31) >> 1);
 322}
 323
 324static void jbd2_block_tag_csum_set(journal_t *j, journal_block_tag_t *tag,
 325				    struct buffer_head *bh, __u32 sequence)
 326{
 327	journal_block_tag3_t *tag3 = (journal_block_tag3_t *)tag;
 328	struct page *page = bh->b_page;
 329	__u8 *addr;
 330	__u32 csum32;
 331	__be32 seq;
 332
 333	if (!jbd2_journal_has_csum_v2or3(j))
 334		return;
 335
 336	seq = cpu_to_be32(sequence);
 337	addr = kmap_atomic(page);
 338	csum32 = jbd2_chksum(j, j->j_csum_seed, (__u8 *)&seq, sizeof(seq));
 339	csum32 = jbd2_chksum(j, csum32, addr + offset_in_page(bh->b_data),
 340			     bh->b_size);
 341	kunmap_atomic(addr);
 342
 343	if (jbd2_has_feature_csum3(j))
 344		tag3->t_checksum = cpu_to_be32(csum32);
 345	else
 346		tag->t_checksum = cpu_to_be16(csum32);
 347}
 348/*
 349 * jbd2_journal_commit_transaction
 350 *
 351 * The primary function for committing a transaction to the log.  This
 352 * function is called by the journal thread to begin a complete commit.
 353 */
 354void jbd2_journal_commit_transaction(journal_t *journal)
 355{
 356	struct transaction_stats_s stats;
 357	transaction_t *commit_transaction;
 358	struct journal_head *jh;
 359	struct buffer_head *descriptor;
 360	struct buffer_head **wbuf = journal->j_wbuf;
 361	int bufs;
 362	int flags;
 363	int err;
 364	unsigned long long blocknr;
 365	ktime_t start_time;
 366	u64 commit_time;
 367	char *tagp = NULL;
 368	journal_block_tag_t *tag = NULL;
 369	int space_left = 0;
 370	int first_tag = 0;
 371	int tag_flag;
 372	int i;
 373	int tag_bytes = journal_tag_bytes(journal);
 374	struct buffer_head *cbh = NULL; /* For transactional checksums */
 375	__u32 crc32_sum = ~0;
 376	struct blk_plug plug;
 377	/* Tail of the journal */
 378	unsigned long first_block;
 379	tid_t first_tid;
 380	int update_tail;
 381	int csum_size = 0;
 382	LIST_HEAD(io_bufs);
 383	LIST_HEAD(log_bufs);
 384
 385	if (jbd2_journal_has_csum_v2or3(journal))
 386		csum_size = sizeof(struct jbd2_journal_block_tail);
 387
 388	/*
 389	 * First job: lock down the current transaction and wait for
 390	 * all outstanding updates to complete.
 391	 */
 392
 393	/* Do we need to erase the effects of a prior jbd2_journal_flush? */
 394	if (journal->j_flags & JBD2_FLUSHED) {
 395		jbd_debug(3, "super block updated\n");
 396		mutex_lock(&journal->j_checkpoint_mutex);
 397		/*
 398		 * We hold j_checkpoint_mutex so tail cannot change under us.
 399		 * We don't need any special data guarantees for writing sb
 400		 * since journal is empty and it is ok for write to be
 401		 * flushed only with transaction commit.
 402		 */
 403		jbd2_journal_update_sb_log_tail(journal,
 404						journal->j_tail_sequence,
 405						journal->j_tail,
 406						REQ_SYNC);
 407		mutex_unlock(&journal->j_checkpoint_mutex);
 408	} else {
 409		jbd_debug(3, "superblock not updated\n");
 410	}
 411
 412	J_ASSERT(journal->j_running_transaction != NULL);
 413	J_ASSERT(journal->j_committing_transaction == NULL);
 414
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 415	commit_transaction = journal->j_running_transaction;
 416
 417	trace_jbd2_start_commit(journal, commit_transaction);
 418	jbd_debug(1, "JBD2: starting commit of transaction %d\n",
 419			commit_transaction->t_tid);
 420
 421	write_lock(&journal->j_state_lock);
 
 422	J_ASSERT(commit_transaction->t_state == T_RUNNING);
 423	commit_transaction->t_state = T_LOCKED;
 424
 425	trace_jbd2_commit_locking(journal, commit_transaction);
 426	stats.run.rs_wait = commit_transaction->t_max_wait;
 427	stats.run.rs_request_delay = 0;
 428	stats.run.rs_locked = jiffies;
 429	if (commit_transaction->t_requested)
 430		stats.run.rs_request_delay =
 431			jbd2_time_diff(commit_transaction->t_requested,
 432				       stats.run.rs_locked);
 433	stats.run.rs_running = jbd2_time_diff(commit_transaction->t_start,
 434					      stats.run.rs_locked);
 435
 436	spin_lock(&commit_transaction->t_handle_lock);
 437	while (atomic_read(&commit_transaction->t_updates)) {
 438		DEFINE_WAIT(wait);
 439
 440		prepare_to_wait(&journal->j_wait_updates, &wait,
 441					TASK_UNINTERRUPTIBLE);
 442		if (atomic_read(&commit_transaction->t_updates)) {
 443			spin_unlock(&commit_transaction->t_handle_lock);
 444			write_unlock(&journal->j_state_lock);
 445			schedule();
 446			write_lock(&journal->j_state_lock);
 447			spin_lock(&commit_transaction->t_handle_lock);
 448		}
 449		finish_wait(&journal->j_wait_updates, &wait);
 450	}
 451	spin_unlock(&commit_transaction->t_handle_lock);
 452
 453	J_ASSERT (atomic_read(&commit_transaction->t_outstanding_credits) <=
 454			journal->j_max_transaction_buffers);
 455
 456	/*
 457	 * First thing we are allowed to do is to discard any remaining
 458	 * BJ_Reserved buffers.  Note, it is _not_ permissible to assume
 459	 * that there are no such buffers: if a large filesystem
 460	 * operation like a truncate needs to split itself over multiple
 461	 * transactions, then it may try to do a jbd2_journal_restart() while
 462	 * there are still BJ_Reserved buffers outstanding.  These must
 463	 * be released cleanly from the current transaction.
 464	 *
 465	 * In this case, the filesystem must still reserve write access
 466	 * again before modifying the buffer in the new transaction, but
 467	 * we do not require it to remember exactly which old buffers it
 468	 * has reserved.  This is consistent with the existing behaviour
 469	 * that multiple jbd2_journal_get_write_access() calls to the same
 470	 * buffer are perfectly permissible.
 
 
 471	 */
 472	while (commit_transaction->t_reserved_list) {
 473		jh = commit_transaction->t_reserved_list;
 474		JBUFFER_TRACE(jh, "reserved, unused: refile");
 475		/*
 476		 * A jbd2_journal_get_undo_access()+jbd2_journal_release_buffer() may
 477		 * leave undo-committed data.
 478		 */
 479		if (jh->b_committed_data) {
 480			struct buffer_head *bh = jh2bh(jh);
 481
 482			jbd_lock_bh_state(bh);
 483			jbd2_free(jh->b_committed_data, bh->b_size);
 484			jh->b_committed_data = NULL;
 485			jbd_unlock_bh_state(bh);
 486		}
 487		jbd2_journal_refile_buffer(journal, jh);
 488	}
 489
 
 490	/*
 491	 * Now try to drop any written-back buffers from the journal's
 492	 * checkpoint lists.  We do this *before* commit because it potentially
 493	 * frees some memory
 494	 */
 495	spin_lock(&journal->j_list_lock);
 496	__jbd2_journal_clean_checkpoint_list(journal, false);
 497	spin_unlock(&journal->j_list_lock);
 498
 499	jbd_debug(3, "JBD2: commit phase 1\n");
 500
 501	/*
 502	 * Clear revoked flag to reflect there is no revoked buffers
 503	 * in the next transaction which is going to be started.
 504	 */
 505	jbd2_clear_buffer_revoked_flags(journal);
 506
 507	/*
 508	 * Switch to a new revoke table.
 509	 */
 510	jbd2_journal_switch_revoke_table(journal);
 511
 
 512	/*
 513	 * Reserved credits cannot be claimed anymore, free them
 514	 */
 515	atomic_sub(atomic_read(&journal->j_reserved_credits),
 516		   &commit_transaction->t_outstanding_credits);
 517
 518	trace_jbd2_commit_flushing(journal, commit_transaction);
 519	stats.run.rs_flushing = jiffies;
 520	stats.run.rs_locked = jbd2_time_diff(stats.run.rs_locked,
 521					     stats.run.rs_flushing);
 522
 523	commit_transaction->t_state = T_FLUSH;
 524	journal->j_committing_transaction = commit_transaction;
 525	journal->j_running_transaction = NULL;
 526	start_time = ktime_get();
 527	commit_transaction->t_log_start = journal->j_head;
 528	wake_up(&journal->j_wait_transaction_locked);
 529	write_unlock(&journal->j_state_lock);
 530
 531	jbd_debug(3, "JBD2: commit phase 2a\n");
 532
 533	/*
 534	 * Now start flushing things to disk, in the order they appear
 535	 * on the transaction lists.  Data blocks go first.
 536	 */
 537	err = journal_submit_data_buffers(journal, commit_transaction);
 538	if (err)
 539		jbd2_journal_abort(journal, err);
 540
 541	blk_start_plug(&plug);
 542	jbd2_journal_write_revoke_records(commit_transaction, &log_bufs);
 543
 544	jbd_debug(3, "JBD2: commit phase 2b\n");
 545
 546	/*
 547	 * Way to go: we have now written out all of the data for a
 548	 * transaction!  Now comes the tricky part: we need to write out
 549	 * metadata.  Loop over the transaction's entire buffer list:
 550	 */
 551	write_lock(&journal->j_state_lock);
 552	commit_transaction->t_state = T_COMMIT;
 553	write_unlock(&journal->j_state_lock);
 554
 555	trace_jbd2_commit_logging(journal, commit_transaction);
 556	stats.run.rs_logging = jiffies;
 557	stats.run.rs_flushing = jbd2_time_diff(stats.run.rs_flushing,
 558					       stats.run.rs_logging);
 559	stats.run.rs_blocks =
 560		atomic_read(&commit_transaction->t_outstanding_credits);
 561	stats.run.rs_blocks_logged = 0;
 562
 563	J_ASSERT(commit_transaction->t_nr_buffers <=
 564		 atomic_read(&commit_transaction->t_outstanding_credits));
 565
 566	err = 0;
 567	bufs = 0;
 568	descriptor = NULL;
 569	while (commit_transaction->t_buffers) {
 570
 571		/* Find the next buffer to be journaled... */
 572
 573		jh = commit_transaction->t_buffers;
 574
 575		/* If we're in abort mode, we just un-journal the buffer and
 576		   release it. */
 577
 578		if (is_journal_aborted(journal)) {
 579			clear_buffer_jbddirty(jh2bh(jh));
 580			JBUFFER_TRACE(jh, "journal is aborting: refile");
 581			jbd2_buffer_abort_trigger(jh,
 582						  jh->b_frozen_data ?
 583						  jh->b_frozen_triggers :
 584						  jh->b_triggers);
 585			jbd2_journal_refile_buffer(journal, jh);
 586			/* If that was the last one, we need to clean up
 587			 * any descriptor buffers which may have been
 588			 * already allocated, even if we are now
 589			 * aborting. */
 590			if (!commit_transaction->t_buffers)
 591				goto start_journal_io;
 592			continue;
 593		}
 594
 595		/* Make sure we have a descriptor block in which to
 596		   record the metadata buffer. */
 597
 598		if (!descriptor) {
 599			J_ASSERT (bufs == 0);
 600
 601			jbd_debug(4, "JBD2: get descriptor\n");
 602
 603			descriptor = jbd2_journal_get_descriptor_buffer(
 604							commit_transaction,
 605							JBD2_DESCRIPTOR_BLOCK);
 606			if (!descriptor) {
 607				jbd2_journal_abort(journal, -EIO);
 608				continue;
 609			}
 610
 611			jbd_debug(4, "JBD2: got buffer %llu (%p)\n",
 612				(unsigned long long)descriptor->b_blocknr,
 613				descriptor->b_data);
 614			tagp = &descriptor->b_data[sizeof(journal_header_t)];
 615			space_left = descriptor->b_size -
 616						sizeof(journal_header_t);
 617			first_tag = 1;
 618			set_buffer_jwrite(descriptor);
 619			set_buffer_dirty(descriptor);
 620			wbuf[bufs++] = descriptor;
 621
 622			/* Record it so that we can wait for IO
 623                           completion later */
 624			BUFFER_TRACE(descriptor, "ph3: file as descriptor");
 625			jbd2_file_log_bh(&log_bufs, descriptor);
 626		}
 627
 628		/* Where is the buffer to be written? */
 629
 630		err = jbd2_journal_next_log_block(journal, &blocknr);
 631		/* If the block mapping failed, just abandon the buffer
 632		   and repeat this loop: we'll fall into the
 633		   refile-on-abort condition above. */
 634		if (err) {
 635			jbd2_journal_abort(journal, err);
 636			continue;
 637		}
 638
 639		/*
 640		 * start_this_handle() uses t_outstanding_credits to determine
 641		 * the free space in the log, but this counter is changed
 642		 * by jbd2_journal_next_log_block() also.
 643		 */
 644		atomic_dec(&commit_transaction->t_outstanding_credits);
 645
 646		/* Bump b_count to prevent truncate from stumbling over
 647                   the shadowed buffer!  @@@ This can go if we ever get
 648                   rid of the shadow pairing of buffers. */
 649		atomic_inc(&jh2bh(jh)->b_count);
 650
 651		/*
 652		 * Make a temporary IO buffer with which to write it out
 653		 * (this will requeue the metadata buffer to BJ_Shadow).
 654		 */
 655		set_bit(BH_JWrite, &jh2bh(jh)->b_state);
 656		JBUFFER_TRACE(jh, "ph3: write metadata");
 657		flags = jbd2_journal_write_metadata_buffer(commit_transaction,
 658						jh, &wbuf[bufs], blocknr);
 659		if (flags < 0) {
 660			jbd2_journal_abort(journal, flags);
 661			continue;
 662		}
 663		jbd2_file_log_bh(&io_bufs, wbuf[bufs]);
 664
 665		/* Record the new block's tag in the current descriptor
 666                   buffer */
 667
 668		tag_flag = 0;
 669		if (flags & 1)
 670			tag_flag |= JBD2_FLAG_ESCAPE;
 671		if (!first_tag)
 672			tag_flag |= JBD2_FLAG_SAME_UUID;
 673
 674		tag = (journal_block_tag_t *) tagp;
 675		write_tag_block(journal, tag, jh2bh(jh)->b_blocknr);
 676		tag->t_flags = cpu_to_be16(tag_flag);
 677		jbd2_block_tag_csum_set(journal, tag, wbuf[bufs],
 678					commit_transaction->t_tid);
 679		tagp += tag_bytes;
 680		space_left -= tag_bytes;
 681		bufs++;
 682
 683		if (first_tag) {
 684			memcpy (tagp, journal->j_uuid, 16);
 685			tagp += 16;
 686			space_left -= 16;
 687			first_tag = 0;
 688		}
 689
 690		/* If there's no more to do, or if the descriptor is full,
 691		   let the IO rip! */
 692
 693		if (bufs == journal->j_wbufsize ||
 694		    commit_transaction->t_buffers == NULL ||
 695		    space_left < tag_bytes + 16 + csum_size) {
 696
 697			jbd_debug(4, "JBD2: Submit %d IOs\n", bufs);
 698
 699			/* Write an end-of-descriptor marker before
 700                           submitting the IOs.  "tag" still points to
 701                           the last tag we set up. */
 702
 703			tag->t_flags |= cpu_to_be16(JBD2_FLAG_LAST_TAG);
 
 
 
 
 704
 705			jbd2_descriptor_block_csum_set(journal, descriptor);
 706start_journal_io:
 707			for (i = 0; i < bufs; i++) {
 708				struct buffer_head *bh = wbuf[i];
 709				/*
 710				 * Compute checksum.
 711				 */
 712				if (jbd2_has_feature_checksum(journal)) {
 713					crc32_sum =
 714					    jbd2_checksum_data(crc32_sum, bh);
 715				}
 716
 717				lock_buffer(bh);
 718				clear_buffer_dirty(bh);
 719				set_buffer_uptodate(bh);
 720				bh->b_end_io = journal_end_buffer_io_sync;
 721				submit_bh(REQ_OP_WRITE, REQ_SYNC, bh);
 722			}
 723			cond_resched();
 724			stats.run.rs_blocks_logged += bufs;
 725
 726			/* Force a new descriptor to be generated next
 727                           time round the loop. */
 728			descriptor = NULL;
 729			bufs = 0;
 730		}
 731	}
 732
 733	err = journal_finish_inode_data_buffers(journal, commit_transaction);
 734	if (err) {
 735		printk(KERN_WARNING
 736			"JBD2: Detected IO errors while flushing file data "
 737		       "on %s\n", journal->j_devname);
 738		if (journal->j_flags & JBD2_ABORT_ON_SYNCDATA_ERR)
 739			jbd2_journal_abort(journal, err);
 740		err = 0;
 741	}
 742
 743	/*
 744	 * Get current oldest transaction in the log before we issue flush
 745	 * to the filesystem device. After the flush we can be sure that
 746	 * blocks of all older transactions are checkpointed to persistent
 747	 * storage and we will be safe to update journal start in the
 748	 * superblock with the numbers we get here.
 749	 */
 750	update_tail =
 751		jbd2_journal_get_log_tail(journal, &first_tid, &first_block);
 752
 753	write_lock(&journal->j_state_lock);
 754	if (update_tail) {
 755		long freed = first_block - journal->j_tail;
 756
 757		if (first_block < journal->j_tail)
 758			freed += journal->j_last - journal->j_first;
 759		/* Update tail only if we free significant amount of space */
 760		if (freed < journal->j_maxlen / 4)
 761			update_tail = 0;
 762	}
 763	J_ASSERT(commit_transaction->t_state == T_COMMIT);
 764	commit_transaction->t_state = T_COMMIT_DFLUSH;
 765	write_unlock(&journal->j_state_lock);
 766
 767	/* 
 768	 * If the journal is not located on the file system device,
 769	 * then we must flush the file system device before we issue
 770	 * the commit record
 771	 */
 772	if (commit_transaction->t_need_data_flush &&
 773	    (journal->j_fs_dev != journal->j_dev) &&
 774	    (journal->j_flags & JBD2_BARRIER))
 775		blkdev_issue_flush(journal->j_fs_dev, GFP_NOFS, NULL);
 776
 777	/* Done it all: now write the commit record asynchronously. */
 778	if (jbd2_has_feature_async_commit(journal)) {
 779		err = journal_submit_commit_record(journal, commit_transaction,
 780						 &cbh, crc32_sum);
 781		if (err)
 782			__jbd2_journal_abort_hard(journal);
 783	}
 784
 785	blk_finish_plug(&plug);
 786
 787	/* Lo and behold: we have just managed to send a transaction to
 788           the log.  Before we can commit it, wait for the IO so far to
 789           complete.  Control buffers being written are on the
 790           transaction's t_log_list queue, and metadata buffers are on
 791           the io_bufs list.
 792
 793	   Wait for the buffers in reverse order.  That way we are
 794	   less likely to be woken up until all IOs have completed, and
 795	   so we incur less scheduling load.
 796	*/
 797
 798	jbd_debug(3, "JBD2: commit phase 3\n");
 799
 800	while (!list_empty(&io_bufs)) {
 801		struct buffer_head *bh = list_entry(io_bufs.prev,
 802						    struct buffer_head,
 803						    b_assoc_buffers);
 804
 805		wait_on_buffer(bh);
 806		cond_resched();
 807
 808		if (unlikely(!buffer_uptodate(bh)))
 809			err = -EIO;
 810		jbd2_unfile_log_bh(bh);
 
 811
 812		/*
 813		 * The list contains temporary buffer heads created by
 814		 * jbd2_journal_write_metadata_buffer().
 815		 */
 816		BUFFER_TRACE(bh, "dumping temporary bh");
 817		__brelse(bh);
 818		J_ASSERT_BH(bh, atomic_read(&bh->b_count) == 0);
 819		free_buffer_head(bh);
 820
 821		/* We also have to refile the corresponding shadowed buffer */
 822		jh = commit_transaction->t_shadow_list->b_tprev;
 823		bh = jh2bh(jh);
 824		clear_buffer_jwrite(bh);
 825		J_ASSERT_BH(bh, buffer_jbddirty(bh));
 826		J_ASSERT_BH(bh, !buffer_shadow(bh));
 827
 828		/* The metadata is now released for reuse, but we need
 829                   to remember it against this transaction so that when
 830                   we finally commit, we can do any checkpointing
 831                   required. */
 832		JBUFFER_TRACE(jh, "file as BJ_Forget");
 833		jbd2_journal_file_buffer(jh, commit_transaction, BJ_Forget);
 834		JBUFFER_TRACE(jh, "brelse shadowed buffer");
 835		__brelse(bh);
 836	}
 837
 838	J_ASSERT (commit_transaction->t_shadow_list == NULL);
 839
 840	jbd_debug(3, "JBD2: commit phase 4\n");
 841
 842	/* Here we wait for the revoke record and descriptor record buffers */
 843	while (!list_empty(&log_bufs)) {
 844		struct buffer_head *bh;
 845
 846		bh = list_entry(log_bufs.prev, struct buffer_head, b_assoc_buffers);
 847		wait_on_buffer(bh);
 848		cond_resched();
 849
 850		if (unlikely(!buffer_uptodate(bh)))
 851			err = -EIO;
 852
 853		BUFFER_TRACE(bh, "ph5: control buffer writeout done: unfile");
 854		clear_buffer_jwrite(bh);
 855		jbd2_unfile_log_bh(bh);
 
 856		__brelse(bh);		/* One for getblk */
 857		/* AKPM: bforget here */
 858	}
 859
 860	if (err)
 861		jbd2_journal_abort(journal, err);
 862
 863	jbd_debug(3, "JBD2: commit phase 5\n");
 864	write_lock(&journal->j_state_lock);
 865	J_ASSERT(commit_transaction->t_state == T_COMMIT_DFLUSH);
 866	commit_transaction->t_state = T_COMMIT_JFLUSH;
 867	write_unlock(&journal->j_state_lock);
 868
 869	if (!jbd2_has_feature_async_commit(journal)) {
 870		err = journal_submit_commit_record(journal, commit_transaction,
 871						&cbh, crc32_sum);
 872		if (err)
 873			__jbd2_journal_abort_hard(journal);
 874	}
 875	if (cbh)
 876		err = journal_wait_on_commit_record(journal, cbh);
 
 877	if (jbd2_has_feature_async_commit(journal) &&
 878	    journal->j_flags & JBD2_BARRIER) {
 879		blkdev_issue_flush(journal->j_dev, GFP_NOFS, NULL);
 880	}
 881
 882	if (err)
 883		jbd2_journal_abort(journal, err);
 884
 
 
 
 885	/*
 886	 * Now disk caches for filesystem device are flushed so we are safe to
 887	 * erase checkpointed transactions from the log by updating journal
 888	 * superblock.
 889	 */
 890	if (update_tail)
 891		jbd2_update_log_tail(journal, first_tid, first_block);
 892
 893	/* End of a transaction!  Finally, we can do checkpoint
 894           processing: any buffers committed as a result of this
 895           transaction can be removed from any checkpoint list it was on
 896           before. */
 897
 898	jbd_debug(3, "JBD2: commit phase 6\n");
 899
 900	J_ASSERT(list_empty(&commit_transaction->t_inode_list));
 901	J_ASSERT(commit_transaction->t_buffers == NULL);
 902	J_ASSERT(commit_transaction->t_checkpoint_list == NULL);
 903	J_ASSERT(commit_transaction->t_shadow_list == NULL);
 904
 905restart_loop:
 906	/*
 907	 * As there are other places (journal_unmap_buffer()) adding buffers
 908	 * to this list we have to be careful and hold the j_list_lock.
 909	 */
 910	spin_lock(&journal->j_list_lock);
 911	while (commit_transaction->t_forget) {
 912		transaction_t *cp_transaction;
 913		struct buffer_head *bh;
 914		int try_to_free = 0;
 
 915
 916		jh = commit_transaction->t_forget;
 917		spin_unlock(&journal->j_list_lock);
 918		bh = jh2bh(jh);
 919		/*
 920		 * Get a reference so that bh cannot be freed before we are
 921		 * done with it.
 922		 */
 923		get_bh(bh);
 924		jbd_lock_bh_state(bh);
 925		J_ASSERT_JH(jh,	jh->b_transaction == commit_transaction);
 926
 927		/*
 928		 * If there is undo-protected committed data against
 929		 * this buffer, then we can remove it now.  If it is a
 930		 * buffer needing such protection, the old frozen_data
 931		 * field now points to a committed version of the
 932		 * buffer, so rotate that field to the new committed
 933		 * data.
 934		 *
 935		 * Otherwise, we can just throw away the frozen data now.
 936		 *
 937		 * We also know that the frozen data has already fired
 938		 * its triggers if they exist, so we can clear that too.
 939		 */
 940		if (jh->b_committed_data) {
 941			jbd2_free(jh->b_committed_data, bh->b_size);
 942			jh->b_committed_data = NULL;
 943			if (jh->b_frozen_data) {
 944				jh->b_committed_data = jh->b_frozen_data;
 945				jh->b_frozen_data = NULL;
 946				jh->b_frozen_triggers = NULL;
 947			}
 948		} else if (jh->b_frozen_data) {
 949			jbd2_free(jh->b_frozen_data, bh->b_size);
 950			jh->b_frozen_data = NULL;
 951			jh->b_frozen_triggers = NULL;
 952		}
 953
 954		spin_lock(&journal->j_list_lock);
 955		cp_transaction = jh->b_cp_transaction;
 956		if (cp_transaction) {
 957			JBUFFER_TRACE(jh, "remove from old cp transaction");
 958			cp_transaction->t_chp_stats.cs_dropped++;
 959			__jbd2_journal_remove_checkpoint(jh);
 960		}
 961
 962		/* Only re-checkpoint the buffer_head if it is marked
 963		 * dirty.  If the buffer was added to the BJ_Forget list
 964		 * by jbd2_journal_forget, it may no longer be dirty and
 965		 * there's no point in keeping a checkpoint record for
 966		 * it. */
 967
 968		/*
 969		* A buffer which has been freed while still being journaled by
 970		* a previous transaction.
 971		*/
 972		if (buffer_freed(bh)) {
 
 
 
 
 
 
 
 
 
 
 973			/*
 974			 * If the running transaction is the one containing
 975			 * "add to orphan" operation (b_next_transaction !=
 976			 * NULL), we have to wait for that transaction to
 977			 * commit before we can really get rid of the buffer.
 978			 * So just clear b_modified to not confuse transaction
 979			 * credit accounting and refile the buffer to
 980			 * BJ_Forget of the running transaction. If the just
 981			 * committed transaction contains "add to orphan"
 982			 * operation, we can completely invalidate the buffer
 983			 * now. We are rather through in that since the
 984			 * buffer may be still accessible when blocksize <
 985			 * pagesize and it is attached to the last partial
 986			 * page.
 987			 */
 988			jh->b_modified = 0;
 989			if (!jh->b_next_transaction) {
 990				clear_buffer_freed(bh);
 991				clear_buffer_jbddirty(bh);
 992				clear_buffer_mapped(bh);
 993				clear_buffer_new(bh);
 994				clear_buffer_req(bh);
 995				bh->b_bdev = NULL;
 996			}
 997		}
 998
 999		if (buffer_jbddirty(bh)) {
1000			JBUFFER_TRACE(jh, "add to new checkpointing trans");
1001			__jbd2_journal_insert_checkpoint(jh, commit_transaction);
1002			if (is_journal_aborted(journal))
1003				clear_buffer_jbddirty(bh);
1004		} else {
1005			J_ASSERT_BH(bh, !buffer_dirty(bh));
1006			/*
1007			 * The buffer on BJ_Forget list and not jbddirty means
1008			 * it has been freed by this transaction and hence it
1009			 * could not have been reallocated until this
1010			 * transaction has committed. *BUT* it could be
1011			 * reallocated once we have written all the data to
1012			 * disk and before we process the buffer on BJ_Forget
1013			 * list.
1014			 */
1015			if (!jh->b_next_transaction)
1016				try_to_free = 1;
1017		}
1018		JBUFFER_TRACE(jh, "refile or unfile buffer");
1019		__jbd2_journal_refile_buffer(jh);
1020		jbd_unlock_bh_state(bh);
 
 
1021		if (try_to_free)
1022			release_buffer_page(bh);	/* Drops bh reference */
1023		else
1024			__brelse(bh);
1025		cond_resched_lock(&journal->j_list_lock);
1026	}
1027	spin_unlock(&journal->j_list_lock);
1028	/*
1029	 * This is a bit sleazy.  We use j_list_lock to protect transition
1030	 * of a transaction into T_FINISHED state and calling
1031	 * __jbd2_journal_drop_transaction(). Otherwise we could race with
1032	 * other checkpointing code processing the transaction...
1033	 */
1034	write_lock(&journal->j_state_lock);
1035	spin_lock(&journal->j_list_lock);
1036	/*
1037	 * Now recheck if some buffers did not get attached to the transaction
1038	 * while the lock was dropped...
1039	 */
1040	if (commit_transaction->t_forget) {
1041		spin_unlock(&journal->j_list_lock);
1042		write_unlock(&journal->j_state_lock);
1043		goto restart_loop;
1044	}
1045
1046	/* Add the transaction to the checkpoint list
1047	 * __journal_remove_checkpoint() can not destroy transaction
1048	 * under us because it is not marked as T_FINISHED yet */
1049	if (journal->j_checkpoint_transactions == NULL) {
1050		journal->j_checkpoint_transactions = commit_transaction;
1051		commit_transaction->t_cpnext = commit_transaction;
1052		commit_transaction->t_cpprev = commit_transaction;
1053	} else {
1054		commit_transaction->t_cpnext =
1055			journal->j_checkpoint_transactions;
1056		commit_transaction->t_cpprev =
1057			commit_transaction->t_cpnext->t_cpprev;
1058		commit_transaction->t_cpnext->t_cpprev =
1059			commit_transaction;
1060		commit_transaction->t_cpprev->t_cpnext =
1061				commit_transaction;
1062	}
1063	spin_unlock(&journal->j_list_lock);
1064
1065	/* Done with this transaction! */
1066
1067	jbd_debug(3, "JBD2: commit phase 7\n");
1068
1069	J_ASSERT(commit_transaction->t_state == T_COMMIT_JFLUSH);
1070
1071	commit_transaction->t_start = jiffies;
1072	stats.run.rs_logging = jbd2_time_diff(stats.run.rs_logging,
1073					      commit_transaction->t_start);
1074
1075	/*
1076	 * File the transaction statistics
1077	 */
1078	stats.ts_tid = commit_transaction->t_tid;
1079	stats.run.rs_handle_count =
1080		atomic_read(&commit_transaction->t_handle_count);
1081	trace_jbd2_run_stats(journal->j_fs_dev->bd_dev,
1082			     commit_transaction->t_tid, &stats.run);
1083	stats.ts_requested = (commit_transaction->t_requested) ? 1 : 0;
1084
1085	commit_transaction->t_state = T_COMMIT_CALLBACK;
1086	J_ASSERT(commit_transaction == journal->j_committing_transaction);
1087	journal->j_commit_sequence = commit_transaction->t_tid;
1088	journal->j_committing_transaction = NULL;
1089	commit_time = ktime_to_ns(ktime_sub(ktime_get(), start_time));
1090
1091	/*
1092	 * weight the commit time higher than the average time so we don't
1093	 * react too strongly to vast changes in the commit time
1094	 */
1095	if (likely(journal->j_average_commit_time))
1096		journal->j_average_commit_time = (commit_time +
1097				journal->j_average_commit_time*3) / 4;
1098	else
1099		journal->j_average_commit_time = commit_time;
1100
1101	write_unlock(&journal->j_state_lock);
1102
1103	if (journal->j_commit_callback)
1104		journal->j_commit_callback(journal, commit_transaction);
 
 
1105
1106	trace_jbd2_end_commit(journal, commit_transaction);
1107	jbd_debug(1, "JBD2: commit %d complete, head %d\n",
1108		  journal->j_commit_sequence, journal->j_tail_sequence);
1109
1110	write_lock(&journal->j_state_lock);
 
 
1111	spin_lock(&journal->j_list_lock);
1112	commit_transaction->t_state = T_FINISHED;
1113	/* Check if the transaction can be dropped now that we are finished */
1114	if (commit_transaction->t_checkpoint_list == NULL &&
1115	    commit_transaction->t_checkpoint_io_list == NULL) {
1116		__jbd2_journal_drop_transaction(journal, commit_transaction);
1117		jbd2_journal_free_transaction(commit_transaction);
1118	}
1119	spin_unlock(&journal->j_list_lock);
1120	write_unlock(&journal->j_state_lock);
1121	wake_up(&journal->j_wait_done_commit);
 
1122
1123	/*
1124	 * Calculate overall stats
1125	 */
1126	spin_lock(&journal->j_history_lock);
1127	journal->j_stats.ts_tid++;
1128	journal->j_stats.ts_requested += stats.ts_requested;
1129	journal->j_stats.run.rs_wait += stats.run.rs_wait;
1130	journal->j_stats.run.rs_request_delay += stats.run.rs_request_delay;
1131	journal->j_stats.run.rs_running += stats.run.rs_running;
1132	journal->j_stats.run.rs_locked += stats.run.rs_locked;
1133	journal->j_stats.run.rs_flushing += stats.run.rs_flushing;
1134	journal->j_stats.run.rs_logging += stats.run.rs_logging;
1135	journal->j_stats.run.rs_handle_count += stats.run.rs_handle_count;
1136	journal->j_stats.run.rs_blocks += stats.run.rs_blocks;
1137	journal->j_stats.run.rs_blocks_logged += stats.run.rs_blocks_logged;
1138	spin_unlock(&journal->j_history_lock);
1139}