Linux Audio

Check our new training course

Loading...
Note: File does not exist in v6.13.7.
  1/*
  2 * linux/fs/jbd/commit.c
  3 *
  4 * Written by Stephen C. Tweedie <sct@redhat.com>, 1998
  5 *
  6 * Copyright 1998 Red Hat corp --- All Rights Reserved
  7 *
  8 * This file is part of the Linux kernel and is made available under
  9 * the terms of the GNU General Public License, version 2, or at your
 10 * option, any later version, incorporated herein by reference.
 11 *
 12 * Journal commit routines for the generic filesystem journaling code;
 13 * part of the ext2fs journaling system.
 14 */
 15
 16#include <linux/time.h>
 17#include <linux/fs.h>
 18#include <linux/jbd.h>
 19#include <linux/errno.h>
 20#include <linux/mm.h>
 21#include <linux/pagemap.h>
 22#include <linux/bio.h>
 23#include <linux/blkdev.h>
 24#include <trace/events/jbd.h>
 25
 26/*
 27 * Default IO end handler for temporary BJ_IO buffer_heads.
 28 */
 29static void journal_end_buffer_io_sync(struct buffer_head *bh, int uptodate)
 30{
 31	BUFFER_TRACE(bh, "");
 32	if (uptodate)
 33		set_buffer_uptodate(bh);
 34	else
 35		clear_buffer_uptodate(bh);
 36	unlock_buffer(bh);
 37}
 38
 39/*
 40 * When an ext3-ordered file is truncated, it is possible that many pages are
 41 * not successfully freed, because they are attached to a committing transaction.
 42 * After the transaction commits, these pages are left on the LRU, with no
 43 * ->mapping, and with attached buffers.  These pages are trivially reclaimable
 44 * by the VM, but their apparent absence upsets the VM accounting, and it makes
 45 * the numbers in /proc/meminfo look odd.
 46 *
 47 * So here, we have a buffer which has just come off the forget list.  Look to
 48 * see if we can strip all buffers from the backing page.
 49 *
 50 * Called under journal->j_list_lock.  The caller provided us with a ref
 51 * against the buffer, and we drop that here.
 52 */
 53static void release_buffer_page(struct buffer_head *bh)
 54{
 55	struct page *page;
 56
 57	if (buffer_dirty(bh))
 58		goto nope;
 59	if (atomic_read(&bh->b_count) != 1)
 60		goto nope;
 61	page = bh->b_page;
 62	if (!page)
 63		goto nope;
 64	if (page->mapping)
 65		goto nope;
 66
 67	/* OK, it's a truncated page */
 68	if (!trylock_page(page))
 69		goto nope;
 70
 71	page_cache_get(page);
 72	__brelse(bh);
 73	try_to_free_buffers(page);
 74	unlock_page(page);
 75	page_cache_release(page);
 76	return;
 77
 78nope:
 79	__brelse(bh);
 80}
 81
 82/*
 83 * Decrement reference counter for data buffer. If it has been marked
 84 * 'BH_Freed', release it and the page to which it belongs if possible.
 85 */
 86static void release_data_buffer(struct buffer_head *bh)
 87{
 88	if (buffer_freed(bh)) {
 89		clear_buffer_freed(bh);
 90		release_buffer_page(bh);
 91	} else
 92		put_bh(bh);
 93}
 94
 95/*
 96 * Try to acquire jbd_lock_bh_state() against the buffer, when j_list_lock is
 97 * held.  For ranking reasons we must trylock.  If we lose, schedule away and
 98 * return 0.  j_list_lock is dropped in this case.
 99 */
100static int inverted_lock(journal_t *journal, struct buffer_head *bh)
101{
102	if (!jbd_trylock_bh_state(bh)) {
103		spin_unlock(&journal->j_list_lock);
104		schedule();
105		return 0;
106	}
107	return 1;
108}
109
110/* Done it all: now write the commit record.  We should have
111 * cleaned up our previous buffers by now, so if we are in abort
112 * mode we can now just skip the rest of the journal write
113 * entirely.
114 *
115 * Returns 1 if the journal needs to be aborted or 0 on success
116 */
117static int journal_write_commit_record(journal_t *journal,
118					transaction_t *commit_transaction)
119{
120	struct journal_head *descriptor;
121	struct buffer_head *bh;
122	journal_header_t *header;
123	int ret;
124
125	if (is_journal_aborted(journal))
126		return 0;
127
128	descriptor = journal_get_descriptor_buffer(journal);
129	if (!descriptor)
130		return 1;
131
132	bh = jh2bh(descriptor);
133
134	header = (journal_header_t *)(bh->b_data);
135	header->h_magic = cpu_to_be32(JFS_MAGIC_NUMBER);
136	header->h_blocktype = cpu_to_be32(JFS_COMMIT_BLOCK);
137	header->h_sequence = cpu_to_be32(commit_transaction->t_tid);
138
139	JBUFFER_TRACE(descriptor, "write commit block");
140	set_buffer_dirty(bh);
141
142	if (journal->j_flags & JFS_BARRIER)
143		ret = __sync_dirty_buffer(bh, WRITE_SYNC | WRITE_FLUSH_FUA);
144	else
145		ret = sync_dirty_buffer(bh);
146
147	put_bh(bh);		/* One for getblk() */
148	journal_put_journal_head(descriptor);
149
150	return (ret == -EIO);
151}
152
153static void journal_do_submit_data(struct buffer_head **wbuf, int bufs,
154				   int write_op)
155{
156	int i;
157
158	for (i = 0; i < bufs; i++) {
159		wbuf[i]->b_end_io = end_buffer_write_sync;
160		/* We use-up our safety reference in submit_bh() */
161		submit_bh(write_op, wbuf[i]);
162	}
163}
164
165/*
166 *  Submit all the data buffers to disk
167 */
168static int journal_submit_data_buffers(journal_t *journal,
169				       transaction_t *commit_transaction,
170				       int write_op)
171{
172	struct journal_head *jh;
173	struct buffer_head *bh;
174	int locked;
175	int bufs = 0;
176	struct buffer_head **wbuf = journal->j_wbuf;
177	int err = 0;
178
179	/*
180	 * Whenever we unlock the journal and sleep, things can get added
181	 * onto ->t_sync_datalist, so we have to keep looping back to
182	 * write_out_data until we *know* that the list is empty.
183	 *
184	 * Cleanup any flushed data buffers from the data list.  Even in
185	 * abort mode, we want to flush this out as soon as possible.
186	 */
187write_out_data:
188	cond_resched();
189	spin_lock(&journal->j_list_lock);
190
191	while (commit_transaction->t_sync_datalist) {
192		jh = commit_transaction->t_sync_datalist;
193		bh = jh2bh(jh);
194		locked = 0;
195
196		/* Get reference just to make sure buffer does not disappear
197		 * when we are forced to drop various locks */
198		get_bh(bh);
199		/* If the buffer is dirty, we need to submit IO and hence
200		 * we need the buffer lock. We try to lock the buffer without
201		 * blocking. If we fail, we need to drop j_list_lock and do
202		 * blocking lock_buffer().
203		 */
204		if (buffer_dirty(bh)) {
205			if (!trylock_buffer(bh)) {
206				BUFFER_TRACE(bh, "needs blocking lock");
207				spin_unlock(&journal->j_list_lock);
208				trace_jbd_do_submit_data(journal,
209						     commit_transaction);
210				/* Write out all data to prevent deadlocks */
211				journal_do_submit_data(wbuf, bufs, write_op);
212				bufs = 0;
213				lock_buffer(bh);
214				spin_lock(&journal->j_list_lock);
215			}
216			locked = 1;
217		}
218		/* We have to get bh_state lock. Again out of order, sigh. */
219		if (!inverted_lock(journal, bh)) {
220			jbd_lock_bh_state(bh);
221			spin_lock(&journal->j_list_lock);
222		}
223		/* Someone already cleaned up the buffer? */
224		if (!buffer_jbd(bh) || bh2jh(bh) != jh
225			|| jh->b_transaction != commit_transaction
226			|| jh->b_jlist != BJ_SyncData) {
227			jbd_unlock_bh_state(bh);
228			if (locked)
229				unlock_buffer(bh);
230			BUFFER_TRACE(bh, "already cleaned up");
231			release_data_buffer(bh);
232			continue;
233		}
234		if (locked && test_clear_buffer_dirty(bh)) {
235			BUFFER_TRACE(bh, "needs writeout, adding to array");
236			wbuf[bufs++] = bh;
237			__journal_file_buffer(jh, commit_transaction,
238						BJ_Locked);
239			jbd_unlock_bh_state(bh);
240			if (bufs == journal->j_wbufsize) {
241				spin_unlock(&journal->j_list_lock);
242				trace_jbd_do_submit_data(journal,
243						     commit_transaction);
244				journal_do_submit_data(wbuf, bufs, write_op);
245				bufs = 0;
246				goto write_out_data;
247			}
248		} else if (!locked && buffer_locked(bh)) {
249			__journal_file_buffer(jh, commit_transaction,
250						BJ_Locked);
251			jbd_unlock_bh_state(bh);
252			put_bh(bh);
253		} else {
254			BUFFER_TRACE(bh, "writeout complete: unfile");
255			if (unlikely(!buffer_uptodate(bh)))
256				err = -EIO;
257			__journal_unfile_buffer(jh);
258			jbd_unlock_bh_state(bh);
259			if (locked)
260				unlock_buffer(bh);
261			release_data_buffer(bh);
262		}
263
264		if (need_resched() || spin_needbreak(&journal->j_list_lock)) {
265			spin_unlock(&journal->j_list_lock);
266			goto write_out_data;
267		}
268	}
269	spin_unlock(&journal->j_list_lock);
270	trace_jbd_do_submit_data(journal, commit_transaction);
271	journal_do_submit_data(wbuf, bufs, write_op);
272
273	return err;
274}
275
276/*
277 * journal_commit_transaction
278 *
279 * The primary function for committing a transaction to the log.  This
280 * function is called by the journal thread to begin a complete commit.
281 */
282void journal_commit_transaction(journal_t *journal)
283{
284	transaction_t *commit_transaction;
285	struct journal_head *jh, *new_jh, *descriptor;
286	struct buffer_head **wbuf = journal->j_wbuf;
287	int bufs;
288	int flags;
289	int err;
290	unsigned int blocknr;
291	ktime_t start_time;
292	u64 commit_time;
293	char *tagp = NULL;
294	journal_header_t *header;
295	journal_block_tag_t *tag = NULL;
296	int space_left = 0;
297	int first_tag = 0;
298	int tag_flag;
299	int i;
300	struct blk_plug plug;
301
302	/*
303	 * First job: lock down the current transaction and wait for
304	 * all outstanding updates to complete.
305	 */
306
307	/* Do we need to erase the effects of a prior journal_flush? */
308	if (journal->j_flags & JFS_FLUSHED) {
309		jbd_debug(3, "super block updated\n");
310		journal_update_superblock(journal, 1);
311	} else {
312		jbd_debug(3, "superblock not updated\n");
313	}
314
315	J_ASSERT(journal->j_running_transaction != NULL);
316	J_ASSERT(journal->j_committing_transaction == NULL);
317
318	commit_transaction = journal->j_running_transaction;
319	J_ASSERT(commit_transaction->t_state == T_RUNNING);
320
321	trace_jbd_start_commit(journal, commit_transaction);
322	jbd_debug(1, "JBD: starting commit of transaction %d\n",
323			commit_transaction->t_tid);
324
325	spin_lock(&journal->j_state_lock);
326	commit_transaction->t_state = T_LOCKED;
327
328	trace_jbd_commit_locking(journal, commit_transaction);
329	spin_lock(&commit_transaction->t_handle_lock);
330	while (commit_transaction->t_updates) {
331		DEFINE_WAIT(wait);
332
333		prepare_to_wait(&journal->j_wait_updates, &wait,
334					TASK_UNINTERRUPTIBLE);
335		if (commit_transaction->t_updates) {
336			spin_unlock(&commit_transaction->t_handle_lock);
337			spin_unlock(&journal->j_state_lock);
338			schedule();
339			spin_lock(&journal->j_state_lock);
340			spin_lock(&commit_transaction->t_handle_lock);
341		}
342		finish_wait(&journal->j_wait_updates, &wait);
343	}
344	spin_unlock(&commit_transaction->t_handle_lock);
345
346	J_ASSERT (commit_transaction->t_outstanding_credits <=
347			journal->j_max_transaction_buffers);
348
349	/*
350	 * First thing we are allowed to do is to discard any remaining
351	 * BJ_Reserved buffers.  Note, it is _not_ permissible to assume
352	 * that there are no such buffers: if a large filesystem
353	 * operation like a truncate needs to split itself over multiple
354	 * transactions, then it may try to do a journal_restart() while
355	 * there are still BJ_Reserved buffers outstanding.  These must
356	 * be released cleanly from the current transaction.
357	 *
358	 * In this case, the filesystem must still reserve write access
359	 * again before modifying the buffer in the new transaction, but
360	 * we do not require it to remember exactly which old buffers it
361	 * has reserved.  This is consistent with the existing behaviour
362	 * that multiple journal_get_write_access() calls to the same
363	 * buffer are perfectly permissible.
364	 */
365	while (commit_transaction->t_reserved_list) {
366		jh = commit_transaction->t_reserved_list;
367		JBUFFER_TRACE(jh, "reserved, unused: refile");
368		/*
369		 * A journal_get_undo_access()+journal_release_buffer() may
370		 * leave undo-committed data.
371		 */
372		if (jh->b_committed_data) {
373			struct buffer_head *bh = jh2bh(jh);
374
375			jbd_lock_bh_state(bh);
376			jbd_free(jh->b_committed_data, bh->b_size);
377			jh->b_committed_data = NULL;
378			jbd_unlock_bh_state(bh);
379		}
380		journal_refile_buffer(journal, jh);
381	}
382
383	/*
384	 * Now try to drop any written-back buffers from the journal's
385	 * checkpoint lists.  We do this *before* commit because it potentially
386	 * frees some memory
387	 */
388	spin_lock(&journal->j_list_lock);
389	__journal_clean_checkpoint_list(journal);
390	spin_unlock(&journal->j_list_lock);
391
392	jbd_debug (3, "JBD: commit phase 1\n");
393
394	/*
395	 * Switch to a new revoke table.
396	 */
397	journal_switch_revoke_table(journal);
398
399	trace_jbd_commit_flushing(journal, commit_transaction);
400	commit_transaction->t_state = T_FLUSH;
401	journal->j_committing_transaction = commit_transaction;
402	journal->j_running_transaction = NULL;
403	start_time = ktime_get();
404	commit_transaction->t_log_start = journal->j_head;
405	wake_up(&journal->j_wait_transaction_locked);
406	spin_unlock(&journal->j_state_lock);
407
408	jbd_debug (3, "JBD: commit phase 2\n");
409
410	/*
411	 * Now start flushing things to disk, in the order they appear
412	 * on the transaction lists.  Data blocks go first.
413	 */
414	blk_start_plug(&plug);
415	err = journal_submit_data_buffers(journal, commit_transaction,
416					  WRITE_SYNC);
417	blk_finish_plug(&plug);
418
419	/*
420	 * Wait for all previously submitted IO to complete.
421	 */
422	spin_lock(&journal->j_list_lock);
423	while (commit_transaction->t_locked_list) {
424		struct buffer_head *bh;
425
426		jh = commit_transaction->t_locked_list->b_tprev;
427		bh = jh2bh(jh);
428		get_bh(bh);
429		if (buffer_locked(bh)) {
430			spin_unlock(&journal->j_list_lock);
431			wait_on_buffer(bh);
432			spin_lock(&journal->j_list_lock);
433		}
434		if (unlikely(!buffer_uptodate(bh))) {
435			if (!trylock_page(bh->b_page)) {
436				spin_unlock(&journal->j_list_lock);
437				lock_page(bh->b_page);
438				spin_lock(&journal->j_list_lock);
439			}
440			if (bh->b_page->mapping)
441				set_bit(AS_EIO, &bh->b_page->mapping->flags);
442
443			unlock_page(bh->b_page);
444			SetPageError(bh->b_page);
445			err = -EIO;
446		}
447		if (!inverted_lock(journal, bh)) {
448			put_bh(bh);
449			spin_lock(&journal->j_list_lock);
450			continue;
451		}
452		if (buffer_jbd(bh) && bh2jh(bh) == jh &&
453		    jh->b_transaction == commit_transaction &&
454		    jh->b_jlist == BJ_Locked)
455			__journal_unfile_buffer(jh);
456		jbd_unlock_bh_state(bh);
457		release_data_buffer(bh);
458		cond_resched_lock(&journal->j_list_lock);
459	}
460	spin_unlock(&journal->j_list_lock);
461
462	if (err) {
463		char b[BDEVNAME_SIZE];
464
465		printk(KERN_WARNING
466			"JBD: Detected IO errors while flushing file data "
467			"on %s\n", bdevname(journal->j_fs_dev, b));
468		if (journal->j_flags & JFS_ABORT_ON_SYNCDATA_ERR)
469			journal_abort(journal, err);
470		err = 0;
471	}
472
473	blk_start_plug(&plug);
474
475	journal_write_revoke_records(journal, commit_transaction, WRITE_SYNC);
476
477	/*
478	 * If we found any dirty or locked buffers, then we should have
479	 * looped back up to the write_out_data label.  If there weren't
480	 * any then journal_clean_data_list should have wiped the list
481	 * clean by now, so check that it is in fact empty.
482	 */
483	J_ASSERT (commit_transaction->t_sync_datalist == NULL);
484
485	jbd_debug (3, "JBD: commit phase 3\n");
486
487	/*
488	 * Way to go: we have now written out all of the data for a
489	 * transaction!  Now comes the tricky part: we need to write out
490	 * metadata.  Loop over the transaction's entire buffer list:
491	 */
492	spin_lock(&journal->j_state_lock);
493	commit_transaction->t_state = T_COMMIT;
494	spin_unlock(&journal->j_state_lock);
495
496	trace_jbd_commit_logging(journal, commit_transaction);
497	J_ASSERT(commit_transaction->t_nr_buffers <=
498		 commit_transaction->t_outstanding_credits);
499
500	descriptor = NULL;
501	bufs = 0;
502	while (commit_transaction->t_buffers) {
503
504		/* Find the next buffer to be journaled... */
505
506		jh = commit_transaction->t_buffers;
507
508		/* If we're in abort mode, we just un-journal the buffer and
509		   release it. */
510
511		if (is_journal_aborted(journal)) {
512			clear_buffer_jbddirty(jh2bh(jh));
513			JBUFFER_TRACE(jh, "journal is aborting: refile");
514			journal_refile_buffer(journal, jh);
515			/* If that was the last one, we need to clean up
516			 * any descriptor buffers which may have been
517			 * already allocated, even if we are now
518			 * aborting. */
519			if (!commit_transaction->t_buffers)
520				goto start_journal_io;
521			continue;
522		}
523
524		/* Make sure we have a descriptor block in which to
525		   record the metadata buffer. */
526
527		if (!descriptor) {
528			struct buffer_head *bh;
529
530			J_ASSERT (bufs == 0);
531
532			jbd_debug(4, "JBD: get descriptor\n");
533
534			descriptor = journal_get_descriptor_buffer(journal);
535			if (!descriptor) {
536				journal_abort(journal, -EIO);
537				continue;
538			}
539
540			bh = jh2bh(descriptor);
541			jbd_debug(4, "JBD: got buffer %llu (%p)\n",
542				(unsigned long long)bh->b_blocknr, bh->b_data);
543			header = (journal_header_t *)&bh->b_data[0];
544			header->h_magic     = cpu_to_be32(JFS_MAGIC_NUMBER);
545			header->h_blocktype = cpu_to_be32(JFS_DESCRIPTOR_BLOCK);
546			header->h_sequence  = cpu_to_be32(commit_transaction->t_tid);
547
548			tagp = &bh->b_data[sizeof(journal_header_t)];
549			space_left = bh->b_size - sizeof(journal_header_t);
550			first_tag = 1;
551			set_buffer_jwrite(bh);
552			set_buffer_dirty(bh);
553			wbuf[bufs++] = bh;
554
555			/* Record it so that we can wait for IO
556                           completion later */
557			BUFFER_TRACE(bh, "ph3: file as descriptor");
558			journal_file_buffer(descriptor, commit_transaction,
559					BJ_LogCtl);
560		}
561
562		/* Where is the buffer to be written? */
563
564		err = journal_next_log_block(journal, &blocknr);
565		/* If the block mapping failed, just abandon the buffer
566		   and repeat this loop: we'll fall into the
567		   refile-on-abort condition above. */
568		if (err) {
569			journal_abort(journal, err);
570			continue;
571		}
572
573		/*
574		 * start_this_handle() uses t_outstanding_credits to determine
575		 * the free space in the log, but this counter is changed
576		 * by journal_next_log_block() also.
577		 */
578		commit_transaction->t_outstanding_credits--;
579
580		/* Bump b_count to prevent truncate from stumbling over
581                   the shadowed buffer!  @@@ This can go if we ever get
582                   rid of the BJ_IO/BJ_Shadow pairing of buffers. */
583		get_bh(jh2bh(jh));
584
585		/* Make a temporary IO buffer with which to write it out
586                   (this will requeue both the metadata buffer and the
587                   temporary IO buffer). new_bh goes on BJ_IO*/
588
589		set_buffer_jwrite(jh2bh(jh));
590		/*
591		 * akpm: journal_write_metadata_buffer() sets
592		 * new_bh->b_transaction to commit_transaction.
593		 * We need to clean this up before we release new_bh
594		 * (which is of type BJ_IO)
595		 */
596		JBUFFER_TRACE(jh, "ph3: write metadata");
597		flags = journal_write_metadata_buffer(commit_transaction,
598						      jh, &new_jh, blocknr);
599		set_buffer_jwrite(jh2bh(new_jh));
600		wbuf[bufs++] = jh2bh(new_jh);
601
602		/* Record the new block's tag in the current descriptor
603                   buffer */
604
605		tag_flag = 0;
606		if (flags & 1)
607			tag_flag |= JFS_FLAG_ESCAPE;
608		if (!first_tag)
609			tag_flag |= JFS_FLAG_SAME_UUID;
610
611		tag = (journal_block_tag_t *) tagp;
612		tag->t_blocknr = cpu_to_be32(jh2bh(jh)->b_blocknr);
613		tag->t_flags = cpu_to_be32(tag_flag);
614		tagp += sizeof(journal_block_tag_t);
615		space_left -= sizeof(journal_block_tag_t);
616
617		if (first_tag) {
618			memcpy (tagp, journal->j_uuid, 16);
619			tagp += 16;
620			space_left -= 16;
621			first_tag = 0;
622		}
623
624		/* If there's no more to do, or if the descriptor is full,
625		   let the IO rip! */
626
627		if (bufs == journal->j_wbufsize ||
628		    commit_transaction->t_buffers == NULL ||
629		    space_left < sizeof(journal_block_tag_t) + 16) {
630
631			jbd_debug(4, "JBD: Submit %d IOs\n", bufs);
632
633			/* Write an end-of-descriptor marker before
634                           submitting the IOs.  "tag" still points to
635                           the last tag we set up. */
636
637			tag->t_flags |= cpu_to_be32(JFS_FLAG_LAST_TAG);
638
639start_journal_io:
640			for (i = 0; i < bufs; i++) {
641				struct buffer_head *bh = wbuf[i];
642				lock_buffer(bh);
643				clear_buffer_dirty(bh);
644				set_buffer_uptodate(bh);
645				bh->b_end_io = journal_end_buffer_io_sync;
646				submit_bh(WRITE_SYNC, bh);
647			}
648			cond_resched();
649
650			/* Force a new descriptor to be generated next
651                           time round the loop. */
652			descriptor = NULL;
653			bufs = 0;
654		}
655	}
656
657	blk_finish_plug(&plug);
658
659	/* Lo and behold: we have just managed to send a transaction to
660           the log.  Before we can commit it, wait for the IO so far to
661           complete.  Control buffers being written are on the
662           transaction's t_log_list queue, and metadata buffers are on
663           the t_iobuf_list queue.
664
665	   Wait for the buffers in reverse order.  That way we are
666	   less likely to be woken up until all IOs have completed, and
667	   so we incur less scheduling load.
668	*/
669
670	jbd_debug(3, "JBD: commit phase 4\n");
671
672	/*
673	 * akpm: these are BJ_IO, and j_list_lock is not needed.
674	 * See __journal_try_to_free_buffer.
675	 */
676wait_for_iobuf:
677	while (commit_transaction->t_iobuf_list != NULL) {
678		struct buffer_head *bh;
679
680		jh = commit_transaction->t_iobuf_list->b_tprev;
681		bh = jh2bh(jh);
682		if (buffer_locked(bh)) {
683			wait_on_buffer(bh);
684			goto wait_for_iobuf;
685		}
686		if (cond_resched())
687			goto wait_for_iobuf;
688
689		if (unlikely(!buffer_uptodate(bh)))
690			err = -EIO;
691
692		clear_buffer_jwrite(bh);
693
694		JBUFFER_TRACE(jh, "ph4: unfile after journal write");
695		journal_unfile_buffer(journal, jh);
696
697		/*
698		 * ->t_iobuf_list should contain only dummy buffer_heads
699		 * which were created by journal_write_metadata_buffer().
700		 */
701		BUFFER_TRACE(bh, "dumping temporary bh");
702		journal_put_journal_head(jh);
703		__brelse(bh);
704		J_ASSERT_BH(bh, atomic_read(&bh->b_count) == 0);
705		free_buffer_head(bh);
706
707		/* We also have to unlock and free the corresponding
708                   shadowed buffer */
709		jh = commit_transaction->t_shadow_list->b_tprev;
710		bh = jh2bh(jh);
711		clear_buffer_jwrite(bh);
712		J_ASSERT_BH(bh, buffer_jbddirty(bh));
713
714		/* The metadata is now released for reuse, but we need
715                   to remember it against this transaction so that when
716                   we finally commit, we can do any checkpointing
717                   required. */
718		JBUFFER_TRACE(jh, "file as BJ_Forget");
719		journal_file_buffer(jh, commit_transaction, BJ_Forget);
720		/*
721		 * Wake up any transactions which were waiting for this
722		 * IO to complete. The barrier must be here so that changes
723		 * by journal_file_buffer() take effect before wake_up_bit()
724		 * does the waitqueue check.
725		 */
726		smp_mb();
727		wake_up_bit(&bh->b_state, BH_Unshadow);
728		JBUFFER_TRACE(jh, "brelse shadowed buffer");
729		__brelse(bh);
730	}
731
732	J_ASSERT (commit_transaction->t_shadow_list == NULL);
733
734	jbd_debug(3, "JBD: commit phase 5\n");
735
736	/* Here we wait for the revoke record and descriptor record buffers */
737 wait_for_ctlbuf:
738	while (commit_transaction->t_log_list != NULL) {
739		struct buffer_head *bh;
740
741		jh = commit_transaction->t_log_list->b_tprev;
742		bh = jh2bh(jh);
743		if (buffer_locked(bh)) {
744			wait_on_buffer(bh);
745			goto wait_for_ctlbuf;
746		}
747		if (cond_resched())
748			goto wait_for_ctlbuf;
749
750		if (unlikely(!buffer_uptodate(bh)))
751			err = -EIO;
752
753		BUFFER_TRACE(bh, "ph5: control buffer writeout done: unfile");
754		clear_buffer_jwrite(bh);
755		journal_unfile_buffer(journal, jh);
756		journal_put_journal_head(jh);
757		__brelse(bh);		/* One for getblk */
758		/* AKPM: bforget here */
759	}
760
761	if (err)
762		journal_abort(journal, err);
763
764	jbd_debug(3, "JBD: commit phase 6\n");
765
766	/* All metadata is written, now write commit record and do cleanup */
767	spin_lock(&journal->j_state_lock);
768	J_ASSERT(commit_transaction->t_state == T_COMMIT);
769	commit_transaction->t_state = T_COMMIT_RECORD;
770	spin_unlock(&journal->j_state_lock);
771
772	if (journal_write_commit_record(journal, commit_transaction))
773		err = -EIO;
774
775	if (err)
776		journal_abort(journal, err);
777
778	/* End of a transaction!  Finally, we can do checkpoint
779           processing: any buffers committed as a result of this
780           transaction can be removed from any checkpoint list it was on
781           before. */
782
783	jbd_debug(3, "JBD: commit phase 7\n");
784
785	J_ASSERT(commit_transaction->t_sync_datalist == NULL);
786	J_ASSERT(commit_transaction->t_buffers == NULL);
787	J_ASSERT(commit_transaction->t_checkpoint_list == NULL);
788	J_ASSERT(commit_transaction->t_iobuf_list == NULL);
789	J_ASSERT(commit_transaction->t_shadow_list == NULL);
790	J_ASSERT(commit_transaction->t_log_list == NULL);
791
792restart_loop:
793	/*
794	 * As there are other places (journal_unmap_buffer()) adding buffers
795	 * to this list we have to be careful and hold the j_list_lock.
796	 */
797	spin_lock(&journal->j_list_lock);
798	while (commit_transaction->t_forget) {
799		transaction_t *cp_transaction;
800		struct buffer_head *bh;
801		int try_to_free = 0;
802
803		jh = commit_transaction->t_forget;
804		spin_unlock(&journal->j_list_lock);
805		bh = jh2bh(jh);
806		/*
807		 * Get a reference so that bh cannot be freed before we are
808		 * done with it.
809		 */
810		get_bh(bh);
811		jbd_lock_bh_state(bh);
812		J_ASSERT_JH(jh,	jh->b_transaction == commit_transaction ||
813			jh->b_transaction == journal->j_running_transaction);
814
815		/*
816		 * If there is undo-protected committed data against
817		 * this buffer, then we can remove it now.  If it is a
818		 * buffer needing such protection, the old frozen_data
819		 * field now points to a committed version of the
820		 * buffer, so rotate that field to the new committed
821		 * data.
822		 *
823		 * Otherwise, we can just throw away the frozen data now.
824		 */
825		if (jh->b_committed_data) {
826			jbd_free(jh->b_committed_data, bh->b_size);
827			jh->b_committed_data = NULL;
828			if (jh->b_frozen_data) {
829				jh->b_committed_data = jh->b_frozen_data;
830				jh->b_frozen_data = NULL;
831			}
832		} else if (jh->b_frozen_data) {
833			jbd_free(jh->b_frozen_data, bh->b_size);
834			jh->b_frozen_data = NULL;
835		}
836
837		spin_lock(&journal->j_list_lock);
838		cp_transaction = jh->b_cp_transaction;
839		if (cp_transaction) {
840			JBUFFER_TRACE(jh, "remove from old cp transaction");
841			__journal_remove_checkpoint(jh);
842		}
843
844		/* Only re-checkpoint the buffer_head if it is marked
845		 * dirty.  If the buffer was added to the BJ_Forget list
846		 * by journal_forget, it may no longer be dirty and
847		 * there's no point in keeping a checkpoint record for
848		 * it. */
849
850		/* A buffer which has been freed while still being
851		 * journaled by a previous transaction may end up still
852		 * being dirty here, but we want to avoid writing back
853		 * that buffer in the future after the "add to orphan"
854		 * operation been committed,  That's not only a performance
855		 * gain, it also stops aliasing problems if the buffer is
856		 * left behind for writeback and gets reallocated for another
857		 * use in a different page. */
858		if (buffer_freed(bh) && !jh->b_next_transaction) {
859			clear_buffer_freed(bh);
860			clear_buffer_jbddirty(bh);
861		}
862
863		if (buffer_jbddirty(bh)) {
864			JBUFFER_TRACE(jh, "add to new checkpointing trans");
865			__journal_insert_checkpoint(jh, commit_transaction);
866			if (is_journal_aborted(journal))
867				clear_buffer_jbddirty(bh);
868		} else {
869			J_ASSERT_BH(bh, !buffer_dirty(bh));
870			/*
871			 * The buffer on BJ_Forget list and not jbddirty means
872			 * it has been freed by this transaction and hence it
873			 * could not have been reallocated until this
874			 * transaction has committed. *BUT* it could be
875			 * reallocated once we have written all the data to
876			 * disk and before we process the buffer on BJ_Forget
877			 * list.
878			 */
879			if (!jh->b_next_transaction)
880				try_to_free = 1;
881		}
882		JBUFFER_TRACE(jh, "refile or unfile freed buffer");
883		__journal_refile_buffer(jh);
884		jbd_unlock_bh_state(bh);
885		if (try_to_free)
886			release_buffer_page(bh);
887		else
888			__brelse(bh);
889		cond_resched_lock(&journal->j_list_lock);
890	}
891	spin_unlock(&journal->j_list_lock);
892	/*
893	 * This is a bit sleazy.  We use j_list_lock to protect transition
894	 * of a transaction into T_FINISHED state and calling
895	 * __journal_drop_transaction(). Otherwise we could race with
896	 * other checkpointing code processing the transaction...
897	 */
898	spin_lock(&journal->j_state_lock);
899	spin_lock(&journal->j_list_lock);
900	/*
901	 * Now recheck if some buffers did not get attached to the transaction
902	 * while the lock was dropped...
903	 */
904	if (commit_transaction->t_forget) {
905		spin_unlock(&journal->j_list_lock);
906		spin_unlock(&journal->j_state_lock);
907		goto restart_loop;
908	}
909
910	/* Done with this transaction! */
911
912	jbd_debug(3, "JBD: commit phase 8\n");
913
914	J_ASSERT(commit_transaction->t_state == T_COMMIT_RECORD);
915
916	commit_transaction->t_state = T_FINISHED;
917	J_ASSERT(commit_transaction == journal->j_committing_transaction);
918	journal->j_commit_sequence = commit_transaction->t_tid;
919	journal->j_committing_transaction = NULL;
920	commit_time = ktime_to_ns(ktime_sub(ktime_get(), start_time));
921
922	/*
923	 * weight the commit time higher than the average time so we don't
924	 * react too strongly to vast changes in commit time
925	 */
926	if (likely(journal->j_average_commit_time))
927		journal->j_average_commit_time = (commit_time*3 +
928				journal->j_average_commit_time) / 4;
929	else
930		journal->j_average_commit_time = commit_time;
931
932	spin_unlock(&journal->j_state_lock);
933
934	if (commit_transaction->t_checkpoint_list == NULL &&
935	    commit_transaction->t_checkpoint_io_list == NULL) {
936		__journal_drop_transaction(journal, commit_transaction);
937	} else {
938		if (journal->j_checkpoint_transactions == NULL) {
939			journal->j_checkpoint_transactions = commit_transaction;
940			commit_transaction->t_cpnext = commit_transaction;
941			commit_transaction->t_cpprev = commit_transaction;
942		} else {
943			commit_transaction->t_cpnext =
944				journal->j_checkpoint_transactions;
945			commit_transaction->t_cpprev =
946				commit_transaction->t_cpnext->t_cpprev;
947			commit_transaction->t_cpnext->t_cpprev =
948				commit_transaction;
949			commit_transaction->t_cpprev->t_cpnext =
950				commit_transaction;
951		}
952	}
953	spin_unlock(&journal->j_list_lock);
954
955	trace_jbd_end_commit(journal, commit_transaction);
956	jbd_debug(1, "JBD: commit %d complete, head %d\n",
957		  journal->j_commit_sequence, journal->j_tail_sequence);
958
959	wake_up(&journal->j_wait_done_commit);
960}