Loading...
1// SPDX-License-Identifier: GPL-2.0+
2/*
3 * linux/fs/jbd2/commit.c
4 *
5 * Written by Stephen C. Tweedie <sct@redhat.com>, 1998
6 *
7 * Copyright 1998 Red Hat corp --- All Rights Reserved
8 *
9 * Journal commit routines for the generic filesystem journaling code;
10 * part of the ext2fs journaling system.
11 */
12
13#include <linux/time.h>
14#include <linux/fs.h>
15#include <linux/jbd2.h>
16#include <linux/errno.h>
17#include <linux/slab.h>
18#include <linux/mm.h>
19#include <linux/pagemap.h>
20#include <linux/jiffies.h>
21#include <linux/crc32.h>
22#include <linux/writeback.h>
23#include <linux/backing-dev.h>
24#include <linux/bio.h>
25#include <linux/blkdev.h>
26#include <linux/bitops.h>
27#include <trace/events/jbd2.h>
28
29/*
30 * IO end handler for temporary buffer_heads handling writes to the journal.
31 */
32static void journal_end_buffer_io_sync(struct buffer_head *bh, int uptodate)
33{
34 struct buffer_head *orig_bh = bh->b_private;
35
36 BUFFER_TRACE(bh, "");
37 if (uptodate)
38 set_buffer_uptodate(bh);
39 else
40 clear_buffer_uptodate(bh);
41 if (orig_bh) {
42 clear_bit_unlock(BH_Shadow, &orig_bh->b_state);
43 smp_mb__after_atomic();
44 wake_up_bit(&orig_bh->b_state, BH_Shadow);
45 }
46 unlock_buffer(bh);
47}
48
49/*
50 * When an ext4 file is truncated, it is possible that some pages are not
51 * successfully freed, because they are attached to a committing transaction.
52 * After the transaction commits, these pages are left on the LRU, with no
53 * ->mapping, and with attached buffers. These pages are trivially reclaimable
54 * by the VM, but their apparent absence upsets the VM accounting, and it makes
55 * the numbers in /proc/meminfo look odd.
56 *
57 * So here, we have a buffer which has just come off the forget list. Look to
58 * see if we can strip all buffers from the backing page.
59 *
60 * Called under lock_journal(), and possibly under journal_datalist_lock. The
61 * caller provided us with a ref against the buffer, and we drop that here.
62 */
63static void release_buffer_page(struct buffer_head *bh)
64{
65 struct page *page;
66
67 if (buffer_dirty(bh))
68 goto nope;
69 if (atomic_read(&bh->b_count) != 1)
70 goto nope;
71 page = bh->b_page;
72 if (!page)
73 goto nope;
74 if (page->mapping)
75 goto nope;
76
77 /* OK, it's a truncated page */
78 if (!trylock_page(page))
79 goto nope;
80
81 get_page(page);
82 __brelse(bh);
83 try_to_free_buffers(page);
84 unlock_page(page);
85 put_page(page);
86 return;
87
88nope:
89 __brelse(bh);
90}
91
92static void jbd2_commit_block_csum_set(journal_t *j, struct buffer_head *bh)
93{
94 struct commit_header *h;
95 __u32 csum;
96
97 if (!jbd2_journal_has_csum_v2or3(j))
98 return;
99
100 h = (struct commit_header *)(bh->b_data);
101 h->h_chksum_type = 0;
102 h->h_chksum_size = 0;
103 h->h_chksum[0] = 0;
104 csum = jbd2_chksum(j, j->j_csum_seed, bh->b_data, j->j_blocksize);
105 h->h_chksum[0] = cpu_to_be32(csum);
106}
107
108/*
109 * Done it all: now submit the commit record. We should have
110 * cleaned up our previous buffers by now, so if we are in abort
111 * mode we can now just skip the rest of the journal write
112 * entirely.
113 *
114 * Returns 1 if the journal needs to be aborted or 0 on success
115 */
116static int journal_submit_commit_record(journal_t *journal,
117 transaction_t *commit_transaction,
118 struct buffer_head **cbh,
119 __u32 crc32_sum)
120{
121 struct commit_header *tmp;
122 struct buffer_head *bh;
123 int ret;
124 struct timespec64 now;
125
126 *cbh = NULL;
127
128 if (is_journal_aborted(journal))
129 return 0;
130
131 bh = jbd2_journal_get_descriptor_buffer(commit_transaction,
132 JBD2_COMMIT_BLOCK);
133 if (!bh)
134 return 1;
135
136 tmp = (struct commit_header *)bh->b_data;
137 ktime_get_coarse_real_ts64(&now);
138 tmp->h_commit_sec = cpu_to_be64(now.tv_sec);
139 tmp->h_commit_nsec = cpu_to_be32(now.tv_nsec);
140
141 if (jbd2_has_feature_checksum(journal)) {
142 tmp->h_chksum_type = JBD2_CRC32_CHKSUM;
143 tmp->h_chksum_size = JBD2_CRC32_CHKSUM_SIZE;
144 tmp->h_chksum[0] = cpu_to_be32(crc32_sum);
145 }
146 jbd2_commit_block_csum_set(journal, bh);
147
148 BUFFER_TRACE(bh, "submit commit block");
149 lock_buffer(bh);
150 clear_buffer_dirty(bh);
151 set_buffer_uptodate(bh);
152 bh->b_end_io = journal_end_buffer_io_sync;
153
154 if (journal->j_flags & JBD2_BARRIER &&
155 !jbd2_has_feature_async_commit(journal))
156 ret = submit_bh(REQ_OP_WRITE,
157 REQ_SYNC | REQ_PREFLUSH | REQ_FUA, bh);
158 else
159 ret = submit_bh(REQ_OP_WRITE, REQ_SYNC, bh);
160
161 *cbh = bh;
162 return ret;
163}
164
165/*
166 * This function along with journal_submit_commit_record
167 * allows to write the commit record asynchronously.
168 */
169static int journal_wait_on_commit_record(journal_t *journal,
170 struct buffer_head *bh)
171{
172 int ret = 0;
173
174 clear_buffer_dirty(bh);
175 wait_on_buffer(bh);
176
177 if (unlikely(!buffer_uptodate(bh)))
178 ret = -EIO;
179 put_bh(bh); /* One for getblk() */
180
181 return ret;
182}
183
184/*
185 * write the filemap data using writepage() address_space_operations.
186 * We don't do block allocation here even for delalloc. We don't
187 * use writepages() because with delayed allocation we may be doing
188 * block allocation in writepages().
189 */
190static int journal_submit_inode_data_buffers(struct address_space *mapping,
191 loff_t dirty_start, loff_t dirty_end)
192{
193 int ret;
194 struct writeback_control wbc = {
195 .sync_mode = WB_SYNC_ALL,
196 .nr_to_write = mapping->nrpages * 2,
197 .range_start = dirty_start,
198 .range_end = dirty_end,
199 };
200
201 ret = generic_writepages(mapping, &wbc);
202 return ret;
203}
204
205/*
206 * Submit all the data buffers of inode associated with the transaction to
207 * disk.
208 *
209 * We are in a committing transaction. Therefore no new inode can be added to
210 * our inode list. We use JI_COMMIT_RUNNING flag to protect inode we currently
211 * operate on from being released while we write out pages.
212 */
213static int journal_submit_data_buffers(journal_t *journal,
214 transaction_t *commit_transaction)
215{
216 struct jbd2_inode *jinode;
217 int err, ret = 0;
218 struct address_space *mapping;
219
220 spin_lock(&journal->j_list_lock);
221 list_for_each_entry(jinode, &commit_transaction->t_inode_list, i_list) {
222 loff_t dirty_start = jinode->i_dirty_start;
223 loff_t dirty_end = jinode->i_dirty_end;
224
225 if (!(jinode->i_flags & JI_WRITE_DATA))
226 continue;
227 mapping = jinode->i_vfs_inode->i_mapping;
228 jinode->i_flags |= JI_COMMIT_RUNNING;
229 spin_unlock(&journal->j_list_lock);
230 /*
231 * submit the inode data buffers. We use writepage
232 * instead of writepages. Because writepages can do
233 * block allocation with delalloc. We need to write
234 * only allocated blocks here.
235 */
236 trace_jbd2_submit_inode_data(jinode->i_vfs_inode);
237 err = journal_submit_inode_data_buffers(mapping, dirty_start,
238 dirty_end);
239 if (!ret)
240 ret = err;
241 spin_lock(&journal->j_list_lock);
242 J_ASSERT(jinode->i_transaction == commit_transaction);
243 jinode->i_flags &= ~JI_COMMIT_RUNNING;
244 smp_mb();
245 wake_up_bit(&jinode->i_flags, __JI_COMMIT_RUNNING);
246 }
247 spin_unlock(&journal->j_list_lock);
248 return ret;
249}
250
251/*
252 * Wait for data submitted for writeout, refile inodes to proper
253 * transaction if needed.
254 *
255 */
256static int journal_finish_inode_data_buffers(journal_t *journal,
257 transaction_t *commit_transaction)
258{
259 struct jbd2_inode *jinode, *next_i;
260 int err, ret = 0;
261
262 /* For locking, see the comment in journal_submit_data_buffers() */
263 spin_lock(&journal->j_list_lock);
264 list_for_each_entry(jinode, &commit_transaction->t_inode_list, i_list) {
265 loff_t dirty_start = jinode->i_dirty_start;
266 loff_t dirty_end = jinode->i_dirty_end;
267
268 if (!(jinode->i_flags & JI_WAIT_DATA))
269 continue;
270 jinode->i_flags |= JI_COMMIT_RUNNING;
271 spin_unlock(&journal->j_list_lock);
272 err = filemap_fdatawait_range_keep_errors(
273 jinode->i_vfs_inode->i_mapping, dirty_start,
274 dirty_end);
275 if (!ret)
276 ret = err;
277 spin_lock(&journal->j_list_lock);
278 jinode->i_flags &= ~JI_COMMIT_RUNNING;
279 smp_mb();
280 wake_up_bit(&jinode->i_flags, __JI_COMMIT_RUNNING);
281 }
282
283 /* Now refile inode to proper lists */
284 list_for_each_entry_safe(jinode, next_i,
285 &commit_transaction->t_inode_list, i_list) {
286 list_del(&jinode->i_list);
287 if (jinode->i_next_transaction) {
288 jinode->i_transaction = jinode->i_next_transaction;
289 jinode->i_next_transaction = NULL;
290 list_add(&jinode->i_list,
291 &jinode->i_transaction->t_inode_list);
292 } else {
293 jinode->i_transaction = NULL;
294 jinode->i_dirty_start = 0;
295 jinode->i_dirty_end = 0;
296 }
297 }
298 spin_unlock(&journal->j_list_lock);
299
300 return ret;
301}
302
303static __u32 jbd2_checksum_data(__u32 crc32_sum, struct buffer_head *bh)
304{
305 struct page *page = bh->b_page;
306 char *addr;
307 __u32 checksum;
308
309 addr = kmap_atomic(page);
310 checksum = crc32_be(crc32_sum,
311 (void *)(addr + offset_in_page(bh->b_data)), bh->b_size);
312 kunmap_atomic(addr);
313
314 return checksum;
315}
316
317static void write_tag_block(journal_t *j, journal_block_tag_t *tag,
318 unsigned long long block)
319{
320 tag->t_blocknr = cpu_to_be32(block & (u32)~0);
321 if (jbd2_has_feature_64bit(j))
322 tag->t_blocknr_high = cpu_to_be32((block >> 31) >> 1);
323}
324
325static void jbd2_block_tag_csum_set(journal_t *j, journal_block_tag_t *tag,
326 struct buffer_head *bh, __u32 sequence)
327{
328 journal_block_tag3_t *tag3 = (journal_block_tag3_t *)tag;
329 struct page *page = bh->b_page;
330 __u8 *addr;
331 __u32 csum32;
332 __be32 seq;
333
334 if (!jbd2_journal_has_csum_v2or3(j))
335 return;
336
337 seq = cpu_to_be32(sequence);
338 addr = kmap_atomic(page);
339 csum32 = jbd2_chksum(j, j->j_csum_seed, (__u8 *)&seq, sizeof(seq));
340 csum32 = jbd2_chksum(j, csum32, addr + offset_in_page(bh->b_data),
341 bh->b_size);
342 kunmap_atomic(addr);
343
344 if (jbd2_has_feature_csum3(j))
345 tag3->t_checksum = cpu_to_be32(csum32);
346 else
347 tag->t_checksum = cpu_to_be16(csum32);
348}
349/*
350 * jbd2_journal_commit_transaction
351 *
352 * The primary function for committing a transaction to the log. This
353 * function is called by the journal thread to begin a complete commit.
354 */
355void jbd2_journal_commit_transaction(journal_t *journal)
356{
357 struct transaction_stats_s stats;
358 transaction_t *commit_transaction;
359 struct journal_head *jh;
360 struct buffer_head *descriptor;
361 struct buffer_head **wbuf = journal->j_wbuf;
362 int bufs;
363 int flags;
364 int err;
365 unsigned long long blocknr;
366 ktime_t start_time;
367 u64 commit_time;
368 char *tagp = NULL;
369 journal_block_tag_t *tag = NULL;
370 int space_left = 0;
371 int first_tag = 0;
372 int tag_flag;
373 int i;
374 int tag_bytes = journal_tag_bytes(journal);
375 struct buffer_head *cbh = NULL; /* For transactional checksums */
376 __u32 crc32_sum = ~0;
377 struct blk_plug plug;
378 /* Tail of the journal */
379 unsigned long first_block;
380 tid_t first_tid;
381 int update_tail;
382 int csum_size = 0;
383 LIST_HEAD(io_bufs);
384 LIST_HEAD(log_bufs);
385
386 if (jbd2_journal_has_csum_v2or3(journal))
387 csum_size = sizeof(struct jbd2_journal_block_tail);
388
389 /*
390 * First job: lock down the current transaction and wait for
391 * all outstanding updates to complete.
392 */
393
394 /* Do we need to erase the effects of a prior jbd2_journal_flush? */
395 if (journal->j_flags & JBD2_FLUSHED) {
396 jbd_debug(3, "super block updated\n");
397 mutex_lock_io(&journal->j_checkpoint_mutex);
398 /*
399 * We hold j_checkpoint_mutex so tail cannot change under us.
400 * We don't need any special data guarantees for writing sb
401 * since journal is empty and it is ok for write to be
402 * flushed only with transaction commit.
403 */
404 jbd2_journal_update_sb_log_tail(journal,
405 journal->j_tail_sequence,
406 journal->j_tail,
407 REQ_SYNC);
408 mutex_unlock(&journal->j_checkpoint_mutex);
409 } else {
410 jbd_debug(3, "superblock not updated\n");
411 }
412
413 J_ASSERT(journal->j_running_transaction != NULL);
414 J_ASSERT(journal->j_committing_transaction == NULL);
415
416 commit_transaction = journal->j_running_transaction;
417
418 trace_jbd2_start_commit(journal, commit_transaction);
419 jbd_debug(1, "JBD2: starting commit of transaction %d\n",
420 commit_transaction->t_tid);
421
422 write_lock(&journal->j_state_lock);
423 J_ASSERT(commit_transaction->t_state == T_RUNNING);
424 commit_transaction->t_state = T_LOCKED;
425
426 trace_jbd2_commit_locking(journal, commit_transaction);
427 stats.run.rs_wait = commit_transaction->t_max_wait;
428 stats.run.rs_request_delay = 0;
429 stats.run.rs_locked = jiffies;
430 if (commit_transaction->t_requested)
431 stats.run.rs_request_delay =
432 jbd2_time_diff(commit_transaction->t_requested,
433 stats.run.rs_locked);
434 stats.run.rs_running = jbd2_time_diff(commit_transaction->t_start,
435 stats.run.rs_locked);
436
437 spin_lock(&commit_transaction->t_handle_lock);
438 while (atomic_read(&commit_transaction->t_updates)) {
439 DEFINE_WAIT(wait);
440
441 prepare_to_wait(&journal->j_wait_updates, &wait,
442 TASK_UNINTERRUPTIBLE);
443 if (atomic_read(&commit_transaction->t_updates)) {
444 spin_unlock(&commit_transaction->t_handle_lock);
445 write_unlock(&journal->j_state_lock);
446 schedule();
447 write_lock(&journal->j_state_lock);
448 spin_lock(&commit_transaction->t_handle_lock);
449 }
450 finish_wait(&journal->j_wait_updates, &wait);
451 }
452 spin_unlock(&commit_transaction->t_handle_lock);
453 commit_transaction->t_state = T_SWITCH;
454 write_unlock(&journal->j_state_lock);
455
456 J_ASSERT (atomic_read(&commit_transaction->t_outstanding_credits) <=
457 journal->j_max_transaction_buffers);
458
459 /*
460 * First thing we are allowed to do is to discard any remaining
461 * BJ_Reserved buffers. Note, it is _not_ permissible to assume
462 * that there are no such buffers: if a large filesystem
463 * operation like a truncate needs to split itself over multiple
464 * transactions, then it may try to do a jbd2_journal_restart() while
465 * there are still BJ_Reserved buffers outstanding. These must
466 * be released cleanly from the current transaction.
467 *
468 * In this case, the filesystem must still reserve write access
469 * again before modifying the buffer in the new transaction, but
470 * we do not require it to remember exactly which old buffers it
471 * has reserved. This is consistent with the existing behaviour
472 * that multiple jbd2_journal_get_write_access() calls to the same
473 * buffer are perfectly permissible.
474 */
475 while (commit_transaction->t_reserved_list) {
476 jh = commit_transaction->t_reserved_list;
477 JBUFFER_TRACE(jh, "reserved, unused: refile");
478 /*
479 * A jbd2_journal_get_undo_access()+jbd2_journal_release_buffer() may
480 * leave undo-committed data.
481 */
482 if (jh->b_committed_data) {
483 struct buffer_head *bh = jh2bh(jh);
484
485 spin_lock(&jh->b_state_lock);
486 jbd2_free(jh->b_committed_data, bh->b_size);
487 jh->b_committed_data = NULL;
488 spin_unlock(&jh->b_state_lock);
489 }
490 jbd2_journal_refile_buffer(journal, jh);
491 }
492
493 /*
494 * Now try to drop any written-back buffers from the journal's
495 * checkpoint lists. We do this *before* commit because it potentially
496 * frees some memory
497 */
498 spin_lock(&journal->j_list_lock);
499 __jbd2_journal_clean_checkpoint_list(journal, false);
500 spin_unlock(&journal->j_list_lock);
501
502 jbd_debug(3, "JBD2: commit phase 1\n");
503
504 /*
505 * Clear revoked flag to reflect there is no revoked buffers
506 * in the next transaction which is going to be started.
507 */
508 jbd2_clear_buffer_revoked_flags(journal);
509
510 /*
511 * Switch to a new revoke table.
512 */
513 jbd2_journal_switch_revoke_table(journal);
514
515 /*
516 * Reserved credits cannot be claimed anymore, free them
517 */
518 atomic_sub(atomic_read(&journal->j_reserved_credits),
519 &commit_transaction->t_outstanding_credits);
520
521 write_lock(&journal->j_state_lock);
522 trace_jbd2_commit_flushing(journal, commit_transaction);
523 stats.run.rs_flushing = jiffies;
524 stats.run.rs_locked = jbd2_time_diff(stats.run.rs_locked,
525 stats.run.rs_flushing);
526
527 commit_transaction->t_state = T_FLUSH;
528 journal->j_committing_transaction = commit_transaction;
529 journal->j_running_transaction = NULL;
530 start_time = ktime_get();
531 commit_transaction->t_log_start = journal->j_head;
532 wake_up(&journal->j_wait_transaction_locked);
533 write_unlock(&journal->j_state_lock);
534
535 jbd_debug(3, "JBD2: commit phase 2a\n");
536
537 /*
538 * Now start flushing things to disk, in the order they appear
539 * on the transaction lists. Data blocks go first.
540 */
541 err = journal_submit_data_buffers(journal, commit_transaction);
542 if (err)
543 jbd2_journal_abort(journal, err);
544
545 blk_start_plug(&plug);
546 jbd2_journal_write_revoke_records(commit_transaction, &log_bufs);
547
548 jbd_debug(3, "JBD2: commit phase 2b\n");
549
550 /*
551 * Way to go: we have now written out all of the data for a
552 * transaction! Now comes the tricky part: we need to write out
553 * metadata. Loop over the transaction's entire buffer list:
554 */
555 write_lock(&journal->j_state_lock);
556 commit_transaction->t_state = T_COMMIT;
557 write_unlock(&journal->j_state_lock);
558
559 trace_jbd2_commit_logging(journal, commit_transaction);
560 stats.run.rs_logging = jiffies;
561 stats.run.rs_flushing = jbd2_time_diff(stats.run.rs_flushing,
562 stats.run.rs_logging);
563 stats.run.rs_blocks = commit_transaction->t_nr_buffers;
564 stats.run.rs_blocks_logged = 0;
565
566 J_ASSERT(commit_transaction->t_nr_buffers <=
567 atomic_read(&commit_transaction->t_outstanding_credits));
568
569 err = 0;
570 bufs = 0;
571 descriptor = NULL;
572 while (commit_transaction->t_buffers) {
573
574 /* Find the next buffer to be journaled... */
575
576 jh = commit_transaction->t_buffers;
577
578 /* If we're in abort mode, we just un-journal the buffer and
579 release it. */
580
581 if (is_journal_aborted(journal)) {
582 clear_buffer_jbddirty(jh2bh(jh));
583 JBUFFER_TRACE(jh, "journal is aborting: refile");
584 jbd2_buffer_abort_trigger(jh,
585 jh->b_frozen_data ?
586 jh->b_frozen_triggers :
587 jh->b_triggers);
588 jbd2_journal_refile_buffer(journal, jh);
589 /* If that was the last one, we need to clean up
590 * any descriptor buffers which may have been
591 * already allocated, even if we are now
592 * aborting. */
593 if (!commit_transaction->t_buffers)
594 goto start_journal_io;
595 continue;
596 }
597
598 /* Make sure we have a descriptor block in which to
599 record the metadata buffer. */
600
601 if (!descriptor) {
602 J_ASSERT (bufs == 0);
603
604 jbd_debug(4, "JBD2: get descriptor\n");
605
606 descriptor = jbd2_journal_get_descriptor_buffer(
607 commit_transaction,
608 JBD2_DESCRIPTOR_BLOCK);
609 if (!descriptor) {
610 jbd2_journal_abort(journal, -EIO);
611 continue;
612 }
613
614 jbd_debug(4, "JBD2: got buffer %llu (%p)\n",
615 (unsigned long long)descriptor->b_blocknr,
616 descriptor->b_data);
617 tagp = &descriptor->b_data[sizeof(journal_header_t)];
618 space_left = descriptor->b_size -
619 sizeof(journal_header_t);
620 first_tag = 1;
621 set_buffer_jwrite(descriptor);
622 set_buffer_dirty(descriptor);
623 wbuf[bufs++] = descriptor;
624
625 /* Record it so that we can wait for IO
626 completion later */
627 BUFFER_TRACE(descriptor, "ph3: file as descriptor");
628 jbd2_file_log_bh(&log_bufs, descriptor);
629 }
630
631 /* Where is the buffer to be written? */
632
633 err = jbd2_journal_next_log_block(journal, &blocknr);
634 /* If the block mapping failed, just abandon the buffer
635 and repeat this loop: we'll fall into the
636 refile-on-abort condition above. */
637 if (err) {
638 jbd2_journal_abort(journal, err);
639 continue;
640 }
641
642 /*
643 * start_this_handle() uses t_outstanding_credits to determine
644 * the free space in the log.
645 */
646 atomic_dec(&commit_transaction->t_outstanding_credits);
647
648 /* Bump b_count to prevent truncate from stumbling over
649 the shadowed buffer! @@@ This can go if we ever get
650 rid of the shadow pairing of buffers. */
651 atomic_inc(&jh2bh(jh)->b_count);
652
653 /*
654 * Make a temporary IO buffer with which to write it out
655 * (this will requeue the metadata buffer to BJ_Shadow).
656 */
657 set_bit(BH_JWrite, &jh2bh(jh)->b_state);
658 JBUFFER_TRACE(jh, "ph3: write metadata");
659 flags = jbd2_journal_write_metadata_buffer(commit_transaction,
660 jh, &wbuf[bufs], blocknr);
661 if (flags < 0) {
662 jbd2_journal_abort(journal, flags);
663 continue;
664 }
665 jbd2_file_log_bh(&io_bufs, wbuf[bufs]);
666
667 /* Record the new block's tag in the current descriptor
668 buffer */
669
670 tag_flag = 0;
671 if (flags & 1)
672 tag_flag |= JBD2_FLAG_ESCAPE;
673 if (!first_tag)
674 tag_flag |= JBD2_FLAG_SAME_UUID;
675
676 tag = (journal_block_tag_t *) tagp;
677 write_tag_block(journal, tag, jh2bh(jh)->b_blocknr);
678 tag->t_flags = cpu_to_be16(tag_flag);
679 jbd2_block_tag_csum_set(journal, tag, wbuf[bufs],
680 commit_transaction->t_tid);
681 tagp += tag_bytes;
682 space_left -= tag_bytes;
683 bufs++;
684
685 if (first_tag) {
686 memcpy (tagp, journal->j_uuid, 16);
687 tagp += 16;
688 space_left -= 16;
689 first_tag = 0;
690 }
691
692 /* If there's no more to do, or if the descriptor is full,
693 let the IO rip! */
694
695 if (bufs == journal->j_wbufsize ||
696 commit_transaction->t_buffers == NULL ||
697 space_left < tag_bytes + 16 + csum_size) {
698
699 jbd_debug(4, "JBD2: Submit %d IOs\n", bufs);
700
701 /* Write an end-of-descriptor marker before
702 submitting the IOs. "tag" still points to
703 the last tag we set up. */
704
705 tag->t_flags |= cpu_to_be16(JBD2_FLAG_LAST_TAG);
706start_journal_io:
707 if (descriptor)
708 jbd2_descriptor_block_csum_set(journal,
709 descriptor);
710
711 for (i = 0; i < bufs; i++) {
712 struct buffer_head *bh = wbuf[i];
713 /*
714 * Compute checksum.
715 */
716 if (jbd2_has_feature_checksum(journal)) {
717 crc32_sum =
718 jbd2_checksum_data(crc32_sum, bh);
719 }
720
721 lock_buffer(bh);
722 clear_buffer_dirty(bh);
723 set_buffer_uptodate(bh);
724 bh->b_end_io = journal_end_buffer_io_sync;
725 submit_bh(REQ_OP_WRITE, REQ_SYNC, bh);
726 }
727 cond_resched();
728
729 /* Force a new descriptor to be generated next
730 time round the loop. */
731 descriptor = NULL;
732 bufs = 0;
733 }
734 }
735
736 err = journal_finish_inode_data_buffers(journal, commit_transaction);
737 if (err) {
738 printk(KERN_WARNING
739 "JBD2: Detected IO errors while flushing file data "
740 "on %s\n", journal->j_devname);
741 if (journal->j_flags & JBD2_ABORT_ON_SYNCDATA_ERR)
742 jbd2_journal_abort(journal, err);
743 err = 0;
744 }
745
746 /*
747 * Get current oldest transaction in the log before we issue flush
748 * to the filesystem device. After the flush we can be sure that
749 * blocks of all older transactions are checkpointed to persistent
750 * storage and we will be safe to update journal start in the
751 * superblock with the numbers we get here.
752 */
753 update_tail =
754 jbd2_journal_get_log_tail(journal, &first_tid, &first_block);
755
756 write_lock(&journal->j_state_lock);
757 if (update_tail) {
758 long freed = first_block - journal->j_tail;
759
760 if (first_block < journal->j_tail)
761 freed += journal->j_last - journal->j_first;
762 /* Update tail only if we free significant amount of space */
763 if (freed < journal->j_maxlen / 4)
764 update_tail = 0;
765 }
766 J_ASSERT(commit_transaction->t_state == T_COMMIT);
767 commit_transaction->t_state = T_COMMIT_DFLUSH;
768 write_unlock(&journal->j_state_lock);
769
770 /*
771 * If the journal is not located on the file system device,
772 * then we must flush the file system device before we issue
773 * the commit record
774 */
775 if (commit_transaction->t_need_data_flush &&
776 (journal->j_fs_dev != journal->j_dev) &&
777 (journal->j_flags & JBD2_BARRIER))
778 blkdev_issue_flush(journal->j_fs_dev, GFP_NOFS);
779
780 /* Done it all: now write the commit record asynchronously. */
781 if (jbd2_has_feature_async_commit(journal)) {
782 err = journal_submit_commit_record(journal, commit_transaction,
783 &cbh, crc32_sum);
784 if (err)
785 jbd2_journal_abort(journal, err);
786 }
787
788 blk_finish_plug(&plug);
789
790 /* Lo and behold: we have just managed to send a transaction to
791 the log. Before we can commit it, wait for the IO so far to
792 complete. Control buffers being written are on the
793 transaction's t_log_list queue, and metadata buffers are on
794 the io_bufs list.
795
796 Wait for the buffers in reverse order. That way we are
797 less likely to be woken up until all IOs have completed, and
798 so we incur less scheduling load.
799 */
800
801 jbd_debug(3, "JBD2: commit phase 3\n");
802
803 while (!list_empty(&io_bufs)) {
804 struct buffer_head *bh = list_entry(io_bufs.prev,
805 struct buffer_head,
806 b_assoc_buffers);
807
808 wait_on_buffer(bh);
809 cond_resched();
810
811 if (unlikely(!buffer_uptodate(bh)))
812 err = -EIO;
813 jbd2_unfile_log_bh(bh);
814 stats.run.rs_blocks_logged++;
815
816 /*
817 * The list contains temporary buffer heads created by
818 * jbd2_journal_write_metadata_buffer().
819 */
820 BUFFER_TRACE(bh, "dumping temporary bh");
821 __brelse(bh);
822 J_ASSERT_BH(bh, atomic_read(&bh->b_count) == 0);
823 free_buffer_head(bh);
824
825 /* We also have to refile the corresponding shadowed buffer */
826 jh = commit_transaction->t_shadow_list->b_tprev;
827 bh = jh2bh(jh);
828 clear_buffer_jwrite(bh);
829 J_ASSERT_BH(bh, buffer_jbddirty(bh));
830 J_ASSERT_BH(bh, !buffer_shadow(bh));
831
832 /* The metadata is now released for reuse, but we need
833 to remember it against this transaction so that when
834 we finally commit, we can do any checkpointing
835 required. */
836 JBUFFER_TRACE(jh, "file as BJ_Forget");
837 jbd2_journal_file_buffer(jh, commit_transaction, BJ_Forget);
838 JBUFFER_TRACE(jh, "brelse shadowed buffer");
839 __brelse(bh);
840 }
841
842 J_ASSERT (commit_transaction->t_shadow_list == NULL);
843
844 jbd_debug(3, "JBD2: commit phase 4\n");
845
846 /* Here we wait for the revoke record and descriptor record buffers */
847 while (!list_empty(&log_bufs)) {
848 struct buffer_head *bh;
849
850 bh = list_entry(log_bufs.prev, struct buffer_head, b_assoc_buffers);
851 wait_on_buffer(bh);
852 cond_resched();
853
854 if (unlikely(!buffer_uptodate(bh)))
855 err = -EIO;
856
857 BUFFER_TRACE(bh, "ph5: control buffer writeout done: unfile");
858 clear_buffer_jwrite(bh);
859 jbd2_unfile_log_bh(bh);
860 stats.run.rs_blocks_logged++;
861 __brelse(bh); /* One for getblk */
862 /* AKPM: bforget here */
863 }
864
865 if (err)
866 jbd2_journal_abort(journal, err);
867
868 jbd_debug(3, "JBD2: commit phase 5\n");
869 write_lock(&journal->j_state_lock);
870 J_ASSERT(commit_transaction->t_state == T_COMMIT_DFLUSH);
871 commit_transaction->t_state = T_COMMIT_JFLUSH;
872 write_unlock(&journal->j_state_lock);
873
874 if (!jbd2_has_feature_async_commit(journal)) {
875 err = journal_submit_commit_record(journal, commit_transaction,
876 &cbh, crc32_sum);
877 if (err)
878 jbd2_journal_abort(journal, err);
879 }
880 if (cbh)
881 err = journal_wait_on_commit_record(journal, cbh);
882 stats.run.rs_blocks_logged++;
883 if (jbd2_has_feature_async_commit(journal) &&
884 journal->j_flags & JBD2_BARRIER) {
885 blkdev_issue_flush(journal->j_dev, GFP_NOFS);
886 }
887
888 if (err)
889 jbd2_journal_abort(journal, err);
890
891 WARN_ON_ONCE(
892 atomic_read(&commit_transaction->t_outstanding_credits) < 0);
893
894 /*
895 * Now disk caches for filesystem device are flushed so we are safe to
896 * erase checkpointed transactions from the log by updating journal
897 * superblock.
898 */
899 if (update_tail)
900 jbd2_update_log_tail(journal, first_tid, first_block);
901
902 /* End of a transaction! Finally, we can do checkpoint
903 processing: any buffers committed as a result of this
904 transaction can be removed from any checkpoint list it was on
905 before. */
906
907 jbd_debug(3, "JBD2: commit phase 6\n");
908
909 J_ASSERT(list_empty(&commit_transaction->t_inode_list));
910 J_ASSERT(commit_transaction->t_buffers == NULL);
911 J_ASSERT(commit_transaction->t_checkpoint_list == NULL);
912 J_ASSERT(commit_transaction->t_shadow_list == NULL);
913
914restart_loop:
915 /*
916 * As there are other places (journal_unmap_buffer()) adding buffers
917 * to this list we have to be careful and hold the j_list_lock.
918 */
919 spin_lock(&journal->j_list_lock);
920 while (commit_transaction->t_forget) {
921 transaction_t *cp_transaction;
922 struct buffer_head *bh;
923 int try_to_free = 0;
924 bool drop_ref;
925
926 jh = commit_transaction->t_forget;
927 spin_unlock(&journal->j_list_lock);
928 bh = jh2bh(jh);
929 /*
930 * Get a reference so that bh cannot be freed before we are
931 * done with it.
932 */
933 get_bh(bh);
934 spin_lock(&jh->b_state_lock);
935 J_ASSERT_JH(jh, jh->b_transaction == commit_transaction);
936
937 /*
938 * If there is undo-protected committed data against
939 * this buffer, then we can remove it now. If it is a
940 * buffer needing such protection, the old frozen_data
941 * field now points to a committed version of the
942 * buffer, so rotate that field to the new committed
943 * data.
944 *
945 * Otherwise, we can just throw away the frozen data now.
946 *
947 * We also know that the frozen data has already fired
948 * its triggers if they exist, so we can clear that too.
949 */
950 if (jh->b_committed_data) {
951 jbd2_free(jh->b_committed_data, bh->b_size);
952 jh->b_committed_data = NULL;
953 if (jh->b_frozen_data) {
954 jh->b_committed_data = jh->b_frozen_data;
955 jh->b_frozen_data = NULL;
956 jh->b_frozen_triggers = NULL;
957 }
958 } else if (jh->b_frozen_data) {
959 jbd2_free(jh->b_frozen_data, bh->b_size);
960 jh->b_frozen_data = NULL;
961 jh->b_frozen_triggers = NULL;
962 }
963
964 spin_lock(&journal->j_list_lock);
965 cp_transaction = jh->b_cp_transaction;
966 if (cp_transaction) {
967 JBUFFER_TRACE(jh, "remove from old cp transaction");
968 cp_transaction->t_chp_stats.cs_dropped++;
969 __jbd2_journal_remove_checkpoint(jh);
970 }
971
972 /* Only re-checkpoint the buffer_head if it is marked
973 * dirty. If the buffer was added to the BJ_Forget list
974 * by jbd2_journal_forget, it may no longer be dirty and
975 * there's no point in keeping a checkpoint record for
976 * it. */
977
978 /*
979 * A buffer which has been freed while still being journaled
980 * by a previous transaction, refile the buffer to BJ_Forget of
981 * the running transaction. If the just committed transaction
982 * contains "add to orphan" operation, we can completely
983 * invalidate the buffer now. We are rather through in that
984 * since the buffer may be still accessible when blocksize <
985 * pagesize and it is attached to the last partial page.
986 */
987 if (buffer_freed(bh) && !jh->b_next_transaction) {
988 struct address_space *mapping;
989
990 clear_buffer_freed(bh);
991 clear_buffer_jbddirty(bh);
992
993 /*
994 * Block device buffers need to stay mapped all the
995 * time, so it is enough to clear buffer_jbddirty and
996 * buffer_freed bits. For the file mapping buffers (i.e.
997 * journalled data) we need to unmap buffer and clear
998 * more bits. We also need to be careful about the check
999 * because the data page mapping can get cleared under
1000 * our hands. Note that if mapping == NULL, we don't
1001 * need to make buffer unmapped because the page is
1002 * already detached from the mapping and buffers cannot
1003 * get reused.
1004 */
1005 mapping = READ_ONCE(bh->b_page->mapping);
1006 if (mapping && !sb_is_blkdev_sb(mapping->host->i_sb)) {
1007 clear_buffer_mapped(bh);
1008 clear_buffer_new(bh);
1009 clear_buffer_req(bh);
1010 bh->b_bdev = NULL;
1011 }
1012 }
1013
1014 if (buffer_jbddirty(bh)) {
1015 JBUFFER_TRACE(jh, "add to new checkpointing trans");
1016 __jbd2_journal_insert_checkpoint(jh, commit_transaction);
1017 if (is_journal_aborted(journal))
1018 clear_buffer_jbddirty(bh);
1019 } else {
1020 J_ASSERT_BH(bh, !buffer_dirty(bh));
1021 /*
1022 * The buffer on BJ_Forget list and not jbddirty means
1023 * it has been freed by this transaction and hence it
1024 * could not have been reallocated until this
1025 * transaction has committed. *BUT* it could be
1026 * reallocated once we have written all the data to
1027 * disk and before we process the buffer on BJ_Forget
1028 * list.
1029 */
1030 if (!jh->b_next_transaction)
1031 try_to_free = 1;
1032 }
1033 JBUFFER_TRACE(jh, "refile or unfile buffer");
1034 drop_ref = __jbd2_journal_refile_buffer(jh);
1035 spin_unlock(&jh->b_state_lock);
1036 if (drop_ref)
1037 jbd2_journal_put_journal_head(jh);
1038 if (try_to_free)
1039 release_buffer_page(bh); /* Drops bh reference */
1040 else
1041 __brelse(bh);
1042 cond_resched_lock(&journal->j_list_lock);
1043 }
1044 spin_unlock(&journal->j_list_lock);
1045 /*
1046 * This is a bit sleazy. We use j_list_lock to protect transition
1047 * of a transaction into T_FINISHED state and calling
1048 * __jbd2_journal_drop_transaction(). Otherwise we could race with
1049 * other checkpointing code processing the transaction...
1050 */
1051 write_lock(&journal->j_state_lock);
1052 spin_lock(&journal->j_list_lock);
1053 /*
1054 * Now recheck if some buffers did not get attached to the transaction
1055 * while the lock was dropped...
1056 */
1057 if (commit_transaction->t_forget) {
1058 spin_unlock(&journal->j_list_lock);
1059 write_unlock(&journal->j_state_lock);
1060 goto restart_loop;
1061 }
1062
1063 /* Add the transaction to the checkpoint list
1064 * __journal_remove_checkpoint() can not destroy transaction
1065 * under us because it is not marked as T_FINISHED yet */
1066 if (journal->j_checkpoint_transactions == NULL) {
1067 journal->j_checkpoint_transactions = commit_transaction;
1068 commit_transaction->t_cpnext = commit_transaction;
1069 commit_transaction->t_cpprev = commit_transaction;
1070 } else {
1071 commit_transaction->t_cpnext =
1072 journal->j_checkpoint_transactions;
1073 commit_transaction->t_cpprev =
1074 commit_transaction->t_cpnext->t_cpprev;
1075 commit_transaction->t_cpnext->t_cpprev =
1076 commit_transaction;
1077 commit_transaction->t_cpprev->t_cpnext =
1078 commit_transaction;
1079 }
1080 spin_unlock(&journal->j_list_lock);
1081
1082 /* Done with this transaction! */
1083
1084 jbd_debug(3, "JBD2: commit phase 7\n");
1085
1086 J_ASSERT(commit_transaction->t_state == T_COMMIT_JFLUSH);
1087
1088 commit_transaction->t_start = jiffies;
1089 stats.run.rs_logging = jbd2_time_diff(stats.run.rs_logging,
1090 commit_transaction->t_start);
1091
1092 /*
1093 * File the transaction statistics
1094 */
1095 stats.ts_tid = commit_transaction->t_tid;
1096 stats.run.rs_handle_count =
1097 atomic_read(&commit_transaction->t_handle_count);
1098 trace_jbd2_run_stats(journal->j_fs_dev->bd_dev,
1099 commit_transaction->t_tid, &stats.run);
1100 stats.ts_requested = (commit_transaction->t_requested) ? 1 : 0;
1101
1102 commit_transaction->t_state = T_COMMIT_CALLBACK;
1103 J_ASSERT(commit_transaction == journal->j_committing_transaction);
1104 journal->j_commit_sequence = commit_transaction->t_tid;
1105 journal->j_committing_transaction = NULL;
1106 commit_time = ktime_to_ns(ktime_sub(ktime_get(), start_time));
1107
1108 /*
1109 * weight the commit time higher than the average time so we don't
1110 * react too strongly to vast changes in the commit time
1111 */
1112 if (likely(journal->j_average_commit_time))
1113 journal->j_average_commit_time = (commit_time +
1114 journal->j_average_commit_time*3) / 4;
1115 else
1116 journal->j_average_commit_time = commit_time;
1117
1118 write_unlock(&journal->j_state_lock);
1119
1120 if (journal->j_commit_callback)
1121 journal->j_commit_callback(journal, commit_transaction);
1122
1123 trace_jbd2_end_commit(journal, commit_transaction);
1124 jbd_debug(1, "JBD2: commit %d complete, head %d\n",
1125 journal->j_commit_sequence, journal->j_tail_sequence);
1126
1127 write_lock(&journal->j_state_lock);
1128 spin_lock(&journal->j_list_lock);
1129 commit_transaction->t_state = T_FINISHED;
1130 /* Check if the transaction can be dropped now that we are finished */
1131 if (commit_transaction->t_checkpoint_list == NULL &&
1132 commit_transaction->t_checkpoint_io_list == NULL) {
1133 __jbd2_journal_drop_transaction(journal, commit_transaction);
1134 jbd2_journal_free_transaction(commit_transaction);
1135 }
1136 spin_unlock(&journal->j_list_lock);
1137 write_unlock(&journal->j_state_lock);
1138 wake_up(&journal->j_wait_done_commit);
1139
1140 /*
1141 * Calculate overall stats
1142 */
1143 spin_lock(&journal->j_history_lock);
1144 journal->j_stats.ts_tid++;
1145 journal->j_stats.ts_requested += stats.ts_requested;
1146 journal->j_stats.run.rs_wait += stats.run.rs_wait;
1147 journal->j_stats.run.rs_request_delay += stats.run.rs_request_delay;
1148 journal->j_stats.run.rs_running += stats.run.rs_running;
1149 journal->j_stats.run.rs_locked += stats.run.rs_locked;
1150 journal->j_stats.run.rs_flushing += stats.run.rs_flushing;
1151 journal->j_stats.run.rs_logging += stats.run.rs_logging;
1152 journal->j_stats.run.rs_handle_count += stats.run.rs_handle_count;
1153 journal->j_stats.run.rs_blocks += stats.run.rs_blocks;
1154 journal->j_stats.run.rs_blocks_logged += stats.run.rs_blocks_logged;
1155 spin_unlock(&journal->j_history_lock);
1156}
1// SPDX-License-Identifier: GPL-2.0+
2/*
3 * linux/fs/jbd2/commit.c
4 *
5 * Written by Stephen C. Tweedie <sct@redhat.com>, 1998
6 *
7 * Copyright 1998 Red Hat corp --- All Rights Reserved
8 *
9 * Journal commit routines for the generic filesystem journaling code;
10 * part of the ext2fs journaling system.
11 */
12
13#include <linux/time.h>
14#include <linux/fs.h>
15#include <linux/jbd2.h>
16#include <linux/errno.h>
17#include <linux/slab.h>
18#include <linux/mm.h>
19#include <linux/pagemap.h>
20#include <linux/jiffies.h>
21#include <linux/crc32.h>
22#include <linux/writeback.h>
23#include <linux/backing-dev.h>
24#include <linux/bio.h>
25#include <linux/blkdev.h>
26#include <linux/bitops.h>
27#include <trace/events/jbd2.h>
28
29/*
30 * IO end handler for temporary buffer_heads handling writes to the journal.
31 */
32static void journal_end_buffer_io_sync(struct buffer_head *bh, int uptodate)
33{
34 struct buffer_head *orig_bh = bh->b_private;
35
36 BUFFER_TRACE(bh, "");
37 if (uptodate)
38 set_buffer_uptodate(bh);
39 else
40 clear_buffer_uptodate(bh);
41 if (orig_bh) {
42 clear_bit_unlock(BH_Shadow, &orig_bh->b_state);
43 smp_mb__after_atomic();
44 wake_up_bit(&orig_bh->b_state, BH_Shadow);
45 }
46 unlock_buffer(bh);
47}
48
49/*
50 * When an ext4 file is truncated, it is possible that some pages are not
51 * successfully freed, because they are attached to a committing transaction.
52 * After the transaction commits, these pages are left on the LRU, with no
53 * ->mapping, and with attached buffers. These pages are trivially reclaimable
54 * by the VM, but their apparent absence upsets the VM accounting, and it makes
55 * the numbers in /proc/meminfo look odd.
56 *
57 * So here, we have a buffer which has just come off the forget list. Look to
58 * see if we can strip all buffers from the backing page.
59 *
60 * Called under lock_journal(), and possibly under journal_datalist_lock. The
61 * caller provided us with a ref against the buffer, and we drop that here.
62 */
63static void release_buffer_page(struct buffer_head *bh)
64{
65 struct folio *folio;
66
67 if (buffer_dirty(bh))
68 goto nope;
69 if (atomic_read(&bh->b_count) != 1)
70 goto nope;
71 folio = bh->b_folio;
72 if (folio->mapping)
73 goto nope;
74
75 /* OK, it's a truncated page */
76 if (!folio_trylock(folio))
77 goto nope;
78
79 folio_get(folio);
80 __brelse(bh);
81 try_to_free_buffers(folio);
82 folio_unlock(folio);
83 folio_put(folio);
84 return;
85
86nope:
87 __brelse(bh);
88}
89
90static void jbd2_commit_block_csum_set(journal_t *j, struct buffer_head *bh)
91{
92 struct commit_header *h;
93 __u32 csum;
94
95 if (!jbd2_journal_has_csum_v2or3(j))
96 return;
97
98 h = (struct commit_header *)(bh->b_data);
99 h->h_chksum_type = 0;
100 h->h_chksum_size = 0;
101 h->h_chksum[0] = 0;
102 csum = jbd2_chksum(j, j->j_csum_seed, bh->b_data, j->j_blocksize);
103 h->h_chksum[0] = cpu_to_be32(csum);
104}
105
106/*
107 * Done it all: now submit the commit record. We should have
108 * cleaned up our previous buffers by now, so if we are in abort
109 * mode we can now just skip the rest of the journal write
110 * entirely.
111 *
112 * Returns 1 if the journal needs to be aborted or 0 on success
113 */
114static int journal_submit_commit_record(journal_t *journal,
115 transaction_t *commit_transaction,
116 struct buffer_head **cbh,
117 __u32 crc32_sum)
118{
119 struct commit_header *tmp;
120 struct buffer_head *bh;
121 struct timespec64 now;
122 blk_opf_t write_flags = REQ_OP_WRITE | JBD2_JOURNAL_REQ_FLAGS;
123
124 *cbh = NULL;
125
126 if (is_journal_aborted(journal))
127 return 0;
128
129 bh = jbd2_journal_get_descriptor_buffer(commit_transaction,
130 JBD2_COMMIT_BLOCK);
131 if (!bh)
132 return 1;
133
134 tmp = (struct commit_header *)bh->b_data;
135 ktime_get_coarse_real_ts64(&now);
136 tmp->h_commit_sec = cpu_to_be64(now.tv_sec);
137 tmp->h_commit_nsec = cpu_to_be32(now.tv_nsec);
138
139 if (jbd2_has_feature_checksum(journal)) {
140 tmp->h_chksum_type = JBD2_CRC32_CHKSUM;
141 tmp->h_chksum_size = JBD2_CRC32_CHKSUM_SIZE;
142 tmp->h_chksum[0] = cpu_to_be32(crc32_sum);
143 }
144 jbd2_commit_block_csum_set(journal, bh);
145
146 BUFFER_TRACE(bh, "submit commit block");
147 lock_buffer(bh);
148 clear_buffer_dirty(bh);
149 set_buffer_uptodate(bh);
150 bh->b_end_io = journal_end_buffer_io_sync;
151
152 if (journal->j_flags & JBD2_BARRIER &&
153 !jbd2_has_feature_async_commit(journal))
154 write_flags |= REQ_PREFLUSH | REQ_FUA;
155
156 submit_bh(write_flags, bh);
157 *cbh = bh;
158 return 0;
159}
160
161/*
162 * This function along with journal_submit_commit_record
163 * allows to write the commit record asynchronously.
164 */
165static int journal_wait_on_commit_record(journal_t *journal,
166 struct buffer_head *bh)
167{
168 int ret = 0;
169
170 clear_buffer_dirty(bh);
171 wait_on_buffer(bh);
172
173 if (unlikely(!buffer_uptodate(bh)))
174 ret = -EIO;
175 put_bh(bh); /* One for getblk() */
176
177 return ret;
178}
179
180/* Send all the data buffers related to an inode */
181int jbd2_submit_inode_data(journal_t *journal, struct jbd2_inode *jinode)
182{
183 if (!jinode || !(jinode->i_flags & JI_WRITE_DATA))
184 return 0;
185
186 trace_jbd2_submit_inode_data(jinode->i_vfs_inode);
187 return journal->j_submit_inode_data_buffers(jinode);
188
189}
190EXPORT_SYMBOL(jbd2_submit_inode_data);
191
192int jbd2_wait_inode_data(journal_t *journal, struct jbd2_inode *jinode)
193{
194 if (!jinode || !(jinode->i_flags & JI_WAIT_DATA) ||
195 !jinode->i_vfs_inode || !jinode->i_vfs_inode->i_mapping)
196 return 0;
197 return filemap_fdatawait_range_keep_errors(
198 jinode->i_vfs_inode->i_mapping, jinode->i_dirty_start,
199 jinode->i_dirty_end);
200}
201EXPORT_SYMBOL(jbd2_wait_inode_data);
202
203/*
204 * Submit all the data buffers of inode associated with the transaction to
205 * disk.
206 *
207 * We are in a committing transaction. Therefore no new inode can be added to
208 * our inode list. We use JI_COMMIT_RUNNING flag to protect inode we currently
209 * operate on from being released while we write out pages.
210 */
211static int journal_submit_data_buffers(journal_t *journal,
212 transaction_t *commit_transaction)
213{
214 struct jbd2_inode *jinode;
215 int err, ret = 0;
216
217 spin_lock(&journal->j_list_lock);
218 list_for_each_entry(jinode, &commit_transaction->t_inode_list, i_list) {
219 if (!(jinode->i_flags & JI_WRITE_DATA))
220 continue;
221 jinode->i_flags |= JI_COMMIT_RUNNING;
222 spin_unlock(&journal->j_list_lock);
223 /* submit the inode data buffers. */
224 trace_jbd2_submit_inode_data(jinode->i_vfs_inode);
225 if (journal->j_submit_inode_data_buffers) {
226 err = journal->j_submit_inode_data_buffers(jinode);
227 if (!ret)
228 ret = err;
229 }
230 spin_lock(&journal->j_list_lock);
231 J_ASSERT(jinode->i_transaction == commit_transaction);
232 jinode->i_flags &= ~JI_COMMIT_RUNNING;
233 smp_mb();
234 wake_up_bit(&jinode->i_flags, __JI_COMMIT_RUNNING);
235 }
236 spin_unlock(&journal->j_list_lock);
237 return ret;
238}
239
240int jbd2_journal_finish_inode_data_buffers(struct jbd2_inode *jinode)
241{
242 struct address_space *mapping = jinode->i_vfs_inode->i_mapping;
243
244 return filemap_fdatawait_range_keep_errors(mapping,
245 jinode->i_dirty_start,
246 jinode->i_dirty_end);
247}
248
249/*
250 * Wait for data submitted for writeout, refile inodes to proper
251 * transaction if needed.
252 *
253 */
254static int journal_finish_inode_data_buffers(journal_t *journal,
255 transaction_t *commit_transaction)
256{
257 struct jbd2_inode *jinode, *next_i;
258 int err, ret = 0;
259
260 /* For locking, see the comment in journal_submit_data_buffers() */
261 spin_lock(&journal->j_list_lock);
262 list_for_each_entry(jinode, &commit_transaction->t_inode_list, i_list) {
263 if (!(jinode->i_flags & JI_WAIT_DATA))
264 continue;
265 jinode->i_flags |= JI_COMMIT_RUNNING;
266 spin_unlock(&journal->j_list_lock);
267 /* wait for the inode data buffers writeout. */
268 if (journal->j_finish_inode_data_buffers) {
269 err = journal->j_finish_inode_data_buffers(jinode);
270 if (!ret)
271 ret = err;
272 }
273 cond_resched();
274 spin_lock(&journal->j_list_lock);
275 jinode->i_flags &= ~JI_COMMIT_RUNNING;
276 smp_mb();
277 wake_up_bit(&jinode->i_flags, __JI_COMMIT_RUNNING);
278 }
279
280 /* Now refile inode to proper lists */
281 list_for_each_entry_safe(jinode, next_i,
282 &commit_transaction->t_inode_list, i_list) {
283 list_del(&jinode->i_list);
284 if (jinode->i_next_transaction) {
285 jinode->i_transaction = jinode->i_next_transaction;
286 jinode->i_next_transaction = NULL;
287 list_add(&jinode->i_list,
288 &jinode->i_transaction->t_inode_list);
289 } else {
290 jinode->i_transaction = NULL;
291 jinode->i_dirty_start = 0;
292 jinode->i_dirty_end = 0;
293 }
294 }
295 spin_unlock(&journal->j_list_lock);
296
297 return ret;
298}
299
300static __u32 jbd2_checksum_data(__u32 crc32_sum, struct buffer_head *bh)
301{
302 char *addr;
303 __u32 checksum;
304
305 addr = kmap_local_folio(bh->b_folio, bh_offset(bh));
306 checksum = crc32_be(crc32_sum, addr, bh->b_size);
307 kunmap_local(addr);
308
309 return checksum;
310}
311
312static void write_tag_block(journal_t *j, journal_block_tag_t *tag,
313 unsigned long long block)
314{
315 tag->t_blocknr = cpu_to_be32(block & (u32)~0);
316 if (jbd2_has_feature_64bit(j))
317 tag->t_blocknr_high = cpu_to_be32((block >> 31) >> 1);
318}
319
320static void jbd2_block_tag_csum_set(journal_t *j, journal_block_tag_t *tag,
321 struct buffer_head *bh, __u32 sequence)
322{
323 journal_block_tag3_t *tag3 = (journal_block_tag3_t *)tag;
324 __u8 *addr;
325 __u32 csum32;
326 __be32 seq;
327
328 if (!jbd2_journal_has_csum_v2or3(j))
329 return;
330
331 seq = cpu_to_be32(sequence);
332 addr = kmap_local_folio(bh->b_folio, bh_offset(bh));
333 csum32 = jbd2_chksum(j, j->j_csum_seed, (__u8 *)&seq, sizeof(seq));
334 csum32 = jbd2_chksum(j, csum32, addr, bh->b_size);
335 kunmap_local(addr);
336
337 if (jbd2_has_feature_csum3(j))
338 tag3->t_checksum = cpu_to_be32(csum32);
339 else
340 tag->t_checksum = cpu_to_be16(csum32);
341}
342/*
343 * jbd2_journal_commit_transaction
344 *
345 * The primary function for committing a transaction to the log. This
346 * function is called by the journal thread to begin a complete commit.
347 */
348void jbd2_journal_commit_transaction(journal_t *journal)
349{
350 struct transaction_stats_s stats;
351 transaction_t *commit_transaction;
352 struct journal_head *jh;
353 struct buffer_head *descriptor;
354 struct buffer_head **wbuf = journal->j_wbuf;
355 int bufs;
356 int flags;
357 int err;
358 unsigned long long blocknr;
359 ktime_t start_time;
360 u64 commit_time;
361 char *tagp = NULL;
362 journal_block_tag_t *tag = NULL;
363 int space_left = 0;
364 int first_tag = 0;
365 int tag_flag;
366 int i;
367 int tag_bytes = journal_tag_bytes(journal);
368 struct buffer_head *cbh = NULL; /* For transactional checksums */
369 __u32 crc32_sum = ~0;
370 struct blk_plug plug;
371 /* Tail of the journal */
372 unsigned long first_block;
373 tid_t first_tid;
374 int update_tail;
375 int csum_size = 0;
376 LIST_HEAD(io_bufs);
377 LIST_HEAD(log_bufs);
378
379 if (jbd2_journal_has_csum_v2or3(journal))
380 csum_size = sizeof(struct jbd2_journal_block_tail);
381
382 /*
383 * First job: lock down the current transaction and wait for
384 * all outstanding updates to complete.
385 */
386
387 /* Do we need to erase the effects of a prior jbd2_journal_flush? */
388 if (journal->j_flags & JBD2_FLUSHED) {
389 jbd2_debug(3, "super block updated\n");
390 mutex_lock_io(&journal->j_checkpoint_mutex);
391 /*
392 * We hold j_checkpoint_mutex so tail cannot change under us.
393 * We don't need any special data guarantees for writing sb
394 * since journal is empty and it is ok for write to be
395 * flushed only with transaction commit.
396 */
397 jbd2_journal_update_sb_log_tail(journal,
398 journal->j_tail_sequence,
399 journal->j_tail, 0);
400 mutex_unlock(&journal->j_checkpoint_mutex);
401 } else {
402 jbd2_debug(3, "superblock not updated\n");
403 }
404
405 J_ASSERT(journal->j_running_transaction != NULL);
406 J_ASSERT(journal->j_committing_transaction == NULL);
407
408 write_lock(&journal->j_state_lock);
409 journal->j_flags |= JBD2_FULL_COMMIT_ONGOING;
410 while (journal->j_flags & JBD2_FAST_COMMIT_ONGOING) {
411 DEFINE_WAIT(wait);
412
413 prepare_to_wait(&journal->j_fc_wait, &wait,
414 TASK_UNINTERRUPTIBLE);
415 write_unlock(&journal->j_state_lock);
416 schedule();
417 write_lock(&journal->j_state_lock);
418 finish_wait(&journal->j_fc_wait, &wait);
419 /*
420 * TODO: by blocking fast commits here, we are increasing
421 * fsync() latency slightly. Strictly speaking, we don't need
422 * to block fast commits until the transaction enters T_FLUSH
423 * state. So an optimization is possible where we block new fast
424 * commits here and wait for existing ones to complete
425 * just before we enter T_FLUSH. That way, the existing fast
426 * commits and this full commit can proceed parallely.
427 */
428 }
429 write_unlock(&journal->j_state_lock);
430
431 commit_transaction = journal->j_running_transaction;
432
433 trace_jbd2_start_commit(journal, commit_transaction);
434 jbd2_debug(1, "JBD2: starting commit of transaction %d\n",
435 commit_transaction->t_tid);
436
437 write_lock(&journal->j_state_lock);
438 journal->j_fc_off = 0;
439 J_ASSERT(commit_transaction->t_state == T_RUNNING);
440 commit_transaction->t_state = T_LOCKED;
441
442 trace_jbd2_commit_locking(journal, commit_transaction);
443 stats.run.rs_wait = commit_transaction->t_max_wait;
444 stats.run.rs_request_delay = 0;
445 stats.run.rs_locked = jiffies;
446 if (commit_transaction->t_requested)
447 stats.run.rs_request_delay =
448 jbd2_time_diff(commit_transaction->t_requested,
449 stats.run.rs_locked);
450 stats.run.rs_running = jbd2_time_diff(commit_transaction->t_start,
451 stats.run.rs_locked);
452
453 // waits for any t_updates to finish
454 jbd2_journal_wait_updates(journal);
455
456 commit_transaction->t_state = T_SWITCH;
457
458 J_ASSERT (atomic_read(&commit_transaction->t_outstanding_credits) <=
459 journal->j_max_transaction_buffers);
460
461 /*
462 * First thing we are allowed to do is to discard any remaining
463 * BJ_Reserved buffers. Note, it is _not_ permissible to assume
464 * that there are no such buffers: if a large filesystem
465 * operation like a truncate needs to split itself over multiple
466 * transactions, then it may try to do a jbd2_journal_restart() while
467 * there are still BJ_Reserved buffers outstanding. These must
468 * be released cleanly from the current transaction.
469 *
470 * In this case, the filesystem must still reserve write access
471 * again before modifying the buffer in the new transaction, but
472 * we do not require it to remember exactly which old buffers it
473 * has reserved. This is consistent with the existing behaviour
474 * that multiple jbd2_journal_get_write_access() calls to the same
475 * buffer are perfectly permissible.
476 * We use journal->j_state_lock here to serialize processing of
477 * t_reserved_list with eviction of buffers from journal_unmap_buffer().
478 */
479 while (commit_transaction->t_reserved_list) {
480 jh = commit_transaction->t_reserved_list;
481 JBUFFER_TRACE(jh, "reserved, unused: refile");
482 /*
483 * A jbd2_journal_get_undo_access()+jbd2_journal_release_buffer() may
484 * leave undo-committed data.
485 */
486 if (jh->b_committed_data) {
487 struct buffer_head *bh = jh2bh(jh);
488
489 spin_lock(&jh->b_state_lock);
490 jbd2_free(jh->b_committed_data, bh->b_size);
491 jh->b_committed_data = NULL;
492 spin_unlock(&jh->b_state_lock);
493 }
494 jbd2_journal_refile_buffer(journal, jh);
495 }
496
497 write_unlock(&journal->j_state_lock);
498 /*
499 * Now try to drop any written-back buffers from the journal's
500 * checkpoint lists. We do this *before* commit because it potentially
501 * frees some memory
502 */
503 spin_lock(&journal->j_list_lock);
504 __jbd2_journal_clean_checkpoint_list(journal, false);
505 spin_unlock(&journal->j_list_lock);
506
507 jbd2_debug(3, "JBD2: commit phase 1\n");
508
509 /*
510 * Clear revoked flag to reflect there is no revoked buffers
511 * in the next transaction which is going to be started.
512 */
513 jbd2_clear_buffer_revoked_flags(journal);
514
515 /*
516 * Switch to a new revoke table.
517 */
518 jbd2_journal_switch_revoke_table(journal);
519
520 write_lock(&journal->j_state_lock);
521 /*
522 * Reserved credits cannot be claimed anymore, free them
523 */
524 atomic_sub(atomic_read(&journal->j_reserved_credits),
525 &commit_transaction->t_outstanding_credits);
526
527 trace_jbd2_commit_flushing(journal, commit_transaction);
528 stats.run.rs_flushing = jiffies;
529 stats.run.rs_locked = jbd2_time_diff(stats.run.rs_locked,
530 stats.run.rs_flushing);
531
532 commit_transaction->t_state = T_FLUSH;
533 journal->j_committing_transaction = commit_transaction;
534 journal->j_running_transaction = NULL;
535 start_time = ktime_get();
536 commit_transaction->t_log_start = journal->j_head;
537 wake_up_all(&journal->j_wait_transaction_locked);
538 write_unlock(&journal->j_state_lock);
539
540 jbd2_debug(3, "JBD2: commit phase 2a\n");
541
542 /*
543 * Now start flushing things to disk, in the order they appear
544 * on the transaction lists. Data blocks go first.
545 */
546 err = journal_submit_data_buffers(journal, commit_transaction);
547 if (err)
548 jbd2_journal_abort(journal, err);
549
550 blk_start_plug(&plug);
551 jbd2_journal_write_revoke_records(commit_transaction, &log_bufs);
552
553 jbd2_debug(3, "JBD2: commit phase 2b\n");
554
555 /*
556 * Way to go: we have now written out all of the data for a
557 * transaction! Now comes the tricky part: we need to write out
558 * metadata. Loop over the transaction's entire buffer list:
559 */
560 write_lock(&journal->j_state_lock);
561 commit_transaction->t_state = T_COMMIT;
562 write_unlock(&journal->j_state_lock);
563
564 trace_jbd2_commit_logging(journal, commit_transaction);
565 stats.run.rs_logging = jiffies;
566 stats.run.rs_flushing = jbd2_time_diff(stats.run.rs_flushing,
567 stats.run.rs_logging);
568 stats.run.rs_blocks = commit_transaction->t_nr_buffers;
569 stats.run.rs_blocks_logged = 0;
570
571 J_ASSERT(commit_transaction->t_nr_buffers <=
572 atomic_read(&commit_transaction->t_outstanding_credits));
573
574 err = 0;
575 bufs = 0;
576 descriptor = NULL;
577 while (commit_transaction->t_buffers) {
578
579 /* Find the next buffer to be journaled... */
580
581 jh = commit_transaction->t_buffers;
582
583 /* If we're in abort mode, we just un-journal the buffer and
584 release it. */
585
586 if (is_journal_aborted(journal)) {
587 clear_buffer_jbddirty(jh2bh(jh));
588 JBUFFER_TRACE(jh, "journal is aborting: refile");
589 jbd2_buffer_abort_trigger(jh,
590 jh->b_frozen_data ?
591 jh->b_frozen_triggers :
592 jh->b_triggers);
593 jbd2_journal_refile_buffer(journal, jh);
594 /* If that was the last one, we need to clean up
595 * any descriptor buffers which may have been
596 * already allocated, even if we are now
597 * aborting. */
598 if (!commit_transaction->t_buffers)
599 goto start_journal_io;
600 continue;
601 }
602
603 /* Make sure we have a descriptor block in which to
604 record the metadata buffer. */
605
606 if (!descriptor) {
607 J_ASSERT (bufs == 0);
608
609 jbd2_debug(4, "JBD2: get descriptor\n");
610
611 descriptor = jbd2_journal_get_descriptor_buffer(
612 commit_transaction,
613 JBD2_DESCRIPTOR_BLOCK);
614 if (!descriptor) {
615 jbd2_journal_abort(journal, -EIO);
616 continue;
617 }
618
619 jbd2_debug(4, "JBD2: got buffer %llu (%p)\n",
620 (unsigned long long)descriptor->b_blocknr,
621 descriptor->b_data);
622 tagp = &descriptor->b_data[sizeof(journal_header_t)];
623 space_left = descriptor->b_size -
624 sizeof(journal_header_t);
625 first_tag = 1;
626 set_buffer_jwrite(descriptor);
627 set_buffer_dirty(descriptor);
628 wbuf[bufs++] = descriptor;
629
630 /* Record it so that we can wait for IO
631 completion later */
632 BUFFER_TRACE(descriptor, "ph3: file as descriptor");
633 jbd2_file_log_bh(&log_bufs, descriptor);
634 }
635
636 /* Where is the buffer to be written? */
637
638 err = jbd2_journal_next_log_block(journal, &blocknr);
639 /* If the block mapping failed, just abandon the buffer
640 and repeat this loop: we'll fall into the
641 refile-on-abort condition above. */
642 if (err) {
643 jbd2_journal_abort(journal, err);
644 continue;
645 }
646
647 /*
648 * start_this_handle() uses t_outstanding_credits to determine
649 * the free space in the log.
650 */
651 atomic_dec(&commit_transaction->t_outstanding_credits);
652
653 /* Bump b_count to prevent truncate from stumbling over
654 the shadowed buffer! @@@ This can go if we ever get
655 rid of the shadow pairing of buffers. */
656 atomic_inc(&jh2bh(jh)->b_count);
657
658 /*
659 * Make a temporary IO buffer with which to write it out
660 * (this will requeue the metadata buffer to BJ_Shadow).
661 */
662 set_bit(BH_JWrite, &jh2bh(jh)->b_state);
663 JBUFFER_TRACE(jh, "ph3: write metadata");
664 flags = jbd2_journal_write_metadata_buffer(commit_transaction,
665 jh, &wbuf[bufs], blocknr);
666 if (flags < 0) {
667 jbd2_journal_abort(journal, flags);
668 continue;
669 }
670 jbd2_file_log_bh(&io_bufs, wbuf[bufs]);
671
672 /* Record the new block's tag in the current descriptor
673 buffer */
674
675 tag_flag = 0;
676 if (flags & 1)
677 tag_flag |= JBD2_FLAG_ESCAPE;
678 if (!first_tag)
679 tag_flag |= JBD2_FLAG_SAME_UUID;
680
681 tag = (journal_block_tag_t *) tagp;
682 write_tag_block(journal, tag, jh2bh(jh)->b_blocknr);
683 tag->t_flags = cpu_to_be16(tag_flag);
684 jbd2_block_tag_csum_set(journal, tag, wbuf[bufs],
685 commit_transaction->t_tid);
686 tagp += tag_bytes;
687 space_left -= tag_bytes;
688 bufs++;
689
690 if (first_tag) {
691 memcpy (tagp, journal->j_uuid, 16);
692 tagp += 16;
693 space_left -= 16;
694 first_tag = 0;
695 }
696
697 /* If there's no more to do, or if the descriptor is full,
698 let the IO rip! */
699
700 if (bufs == journal->j_wbufsize ||
701 commit_transaction->t_buffers == NULL ||
702 space_left < tag_bytes + 16 + csum_size) {
703
704 jbd2_debug(4, "JBD2: Submit %d IOs\n", bufs);
705
706 /* Write an end-of-descriptor marker before
707 submitting the IOs. "tag" still points to
708 the last tag we set up. */
709
710 tag->t_flags |= cpu_to_be16(JBD2_FLAG_LAST_TAG);
711start_journal_io:
712 if (descriptor)
713 jbd2_descriptor_block_csum_set(journal,
714 descriptor);
715
716 for (i = 0; i < bufs; i++) {
717 struct buffer_head *bh = wbuf[i];
718
719 /*
720 * Compute checksum.
721 */
722 if (jbd2_has_feature_checksum(journal)) {
723 crc32_sum =
724 jbd2_checksum_data(crc32_sum, bh);
725 }
726
727 lock_buffer(bh);
728 clear_buffer_dirty(bh);
729 set_buffer_uptodate(bh);
730 bh->b_end_io = journal_end_buffer_io_sync;
731 submit_bh(REQ_OP_WRITE | JBD2_JOURNAL_REQ_FLAGS,
732 bh);
733 }
734 cond_resched();
735
736 /* Force a new descriptor to be generated next
737 time round the loop. */
738 descriptor = NULL;
739 bufs = 0;
740 }
741 }
742
743 err = journal_finish_inode_data_buffers(journal, commit_transaction);
744 if (err) {
745 printk(KERN_WARNING
746 "JBD2: Detected IO errors while flushing file data "
747 "on %s\n", journal->j_devname);
748 if (journal->j_flags & JBD2_ABORT_ON_SYNCDATA_ERR)
749 jbd2_journal_abort(journal, err);
750 err = 0;
751 }
752
753 /*
754 * Get current oldest transaction in the log before we issue flush
755 * to the filesystem device. After the flush we can be sure that
756 * blocks of all older transactions are checkpointed to persistent
757 * storage and we will be safe to update journal start in the
758 * superblock with the numbers we get here.
759 */
760 update_tail =
761 jbd2_journal_get_log_tail(journal, &first_tid, &first_block);
762
763 write_lock(&journal->j_state_lock);
764 if (update_tail) {
765 long freed = first_block - journal->j_tail;
766
767 if (first_block < journal->j_tail)
768 freed += journal->j_last - journal->j_first;
769 /* Update tail only if we free significant amount of space */
770 if (freed < jbd2_journal_get_max_txn_bufs(journal))
771 update_tail = 0;
772 }
773 J_ASSERT(commit_transaction->t_state == T_COMMIT);
774 commit_transaction->t_state = T_COMMIT_DFLUSH;
775 write_unlock(&journal->j_state_lock);
776
777 /*
778 * If the journal is not located on the file system device,
779 * then we must flush the file system device before we issue
780 * the commit record
781 */
782 if (commit_transaction->t_need_data_flush &&
783 (journal->j_fs_dev != journal->j_dev) &&
784 (journal->j_flags & JBD2_BARRIER))
785 blkdev_issue_flush(journal->j_fs_dev);
786
787 /* Done it all: now write the commit record asynchronously. */
788 if (jbd2_has_feature_async_commit(journal)) {
789 err = journal_submit_commit_record(journal, commit_transaction,
790 &cbh, crc32_sum);
791 if (err)
792 jbd2_journal_abort(journal, err);
793 }
794
795 blk_finish_plug(&plug);
796
797 /* Lo and behold: we have just managed to send a transaction to
798 the log. Before we can commit it, wait for the IO so far to
799 complete. Control buffers being written are on the
800 transaction's t_log_list queue, and metadata buffers are on
801 the io_bufs list.
802
803 Wait for the buffers in reverse order. That way we are
804 less likely to be woken up until all IOs have completed, and
805 so we incur less scheduling load.
806 */
807
808 jbd2_debug(3, "JBD2: commit phase 3\n");
809
810 while (!list_empty(&io_bufs)) {
811 struct buffer_head *bh = list_entry(io_bufs.prev,
812 struct buffer_head,
813 b_assoc_buffers);
814
815 wait_on_buffer(bh);
816 cond_resched();
817
818 if (unlikely(!buffer_uptodate(bh)))
819 err = -EIO;
820 jbd2_unfile_log_bh(bh);
821 stats.run.rs_blocks_logged++;
822
823 /*
824 * The list contains temporary buffer heads created by
825 * jbd2_journal_write_metadata_buffer().
826 */
827 BUFFER_TRACE(bh, "dumping temporary bh");
828 __brelse(bh);
829 J_ASSERT_BH(bh, atomic_read(&bh->b_count) == 0);
830 free_buffer_head(bh);
831
832 /* We also have to refile the corresponding shadowed buffer */
833 jh = commit_transaction->t_shadow_list->b_tprev;
834 bh = jh2bh(jh);
835 clear_buffer_jwrite(bh);
836 J_ASSERT_BH(bh, buffer_jbddirty(bh));
837 J_ASSERT_BH(bh, !buffer_shadow(bh));
838
839 /* The metadata is now released for reuse, but we need
840 to remember it against this transaction so that when
841 we finally commit, we can do any checkpointing
842 required. */
843 JBUFFER_TRACE(jh, "file as BJ_Forget");
844 jbd2_journal_file_buffer(jh, commit_transaction, BJ_Forget);
845 JBUFFER_TRACE(jh, "brelse shadowed buffer");
846 __brelse(bh);
847 }
848
849 J_ASSERT (commit_transaction->t_shadow_list == NULL);
850
851 jbd2_debug(3, "JBD2: commit phase 4\n");
852
853 /* Here we wait for the revoke record and descriptor record buffers */
854 while (!list_empty(&log_bufs)) {
855 struct buffer_head *bh;
856
857 bh = list_entry(log_bufs.prev, struct buffer_head, b_assoc_buffers);
858 wait_on_buffer(bh);
859 cond_resched();
860
861 if (unlikely(!buffer_uptodate(bh)))
862 err = -EIO;
863
864 BUFFER_TRACE(bh, "ph5: control buffer writeout done: unfile");
865 clear_buffer_jwrite(bh);
866 jbd2_unfile_log_bh(bh);
867 stats.run.rs_blocks_logged++;
868 __brelse(bh); /* One for getblk */
869 /* AKPM: bforget here */
870 }
871
872 if (err)
873 jbd2_journal_abort(journal, err);
874
875 jbd2_debug(3, "JBD2: commit phase 5\n");
876 write_lock(&journal->j_state_lock);
877 J_ASSERT(commit_transaction->t_state == T_COMMIT_DFLUSH);
878 commit_transaction->t_state = T_COMMIT_JFLUSH;
879 write_unlock(&journal->j_state_lock);
880
881 if (!jbd2_has_feature_async_commit(journal)) {
882 err = journal_submit_commit_record(journal, commit_transaction,
883 &cbh, crc32_sum);
884 if (err)
885 jbd2_journal_abort(journal, err);
886 }
887 if (cbh)
888 err = journal_wait_on_commit_record(journal, cbh);
889 stats.run.rs_blocks_logged++;
890 if (jbd2_has_feature_async_commit(journal) &&
891 journal->j_flags & JBD2_BARRIER) {
892 blkdev_issue_flush(journal->j_dev);
893 }
894
895 if (err)
896 jbd2_journal_abort(journal, err);
897
898 WARN_ON_ONCE(
899 atomic_read(&commit_transaction->t_outstanding_credits) < 0);
900
901 /*
902 * Now disk caches for filesystem device are flushed so we are safe to
903 * erase checkpointed transactions from the log by updating journal
904 * superblock.
905 */
906 if (update_tail)
907 jbd2_update_log_tail(journal, first_tid, first_block);
908
909 /* End of a transaction! Finally, we can do checkpoint
910 processing: any buffers committed as a result of this
911 transaction can be removed from any checkpoint list it was on
912 before. */
913
914 jbd2_debug(3, "JBD2: commit phase 6\n");
915
916 J_ASSERT(list_empty(&commit_transaction->t_inode_list));
917 J_ASSERT(commit_transaction->t_buffers == NULL);
918 J_ASSERT(commit_transaction->t_checkpoint_list == NULL);
919 J_ASSERT(commit_transaction->t_shadow_list == NULL);
920
921restart_loop:
922 /*
923 * As there are other places (journal_unmap_buffer()) adding buffers
924 * to this list we have to be careful and hold the j_list_lock.
925 */
926 spin_lock(&journal->j_list_lock);
927 while (commit_transaction->t_forget) {
928 transaction_t *cp_transaction;
929 struct buffer_head *bh;
930 int try_to_free = 0;
931 bool drop_ref;
932
933 jh = commit_transaction->t_forget;
934 spin_unlock(&journal->j_list_lock);
935 bh = jh2bh(jh);
936 /*
937 * Get a reference so that bh cannot be freed before we are
938 * done with it.
939 */
940 get_bh(bh);
941 spin_lock(&jh->b_state_lock);
942 J_ASSERT_JH(jh, jh->b_transaction == commit_transaction);
943
944 /*
945 * If there is undo-protected committed data against
946 * this buffer, then we can remove it now. If it is a
947 * buffer needing such protection, the old frozen_data
948 * field now points to a committed version of the
949 * buffer, so rotate that field to the new committed
950 * data.
951 *
952 * Otherwise, we can just throw away the frozen data now.
953 *
954 * We also know that the frozen data has already fired
955 * its triggers if they exist, so we can clear that too.
956 */
957 if (jh->b_committed_data) {
958 jbd2_free(jh->b_committed_data, bh->b_size);
959 jh->b_committed_data = NULL;
960 if (jh->b_frozen_data) {
961 jh->b_committed_data = jh->b_frozen_data;
962 jh->b_frozen_data = NULL;
963 jh->b_frozen_triggers = NULL;
964 }
965 } else if (jh->b_frozen_data) {
966 jbd2_free(jh->b_frozen_data, bh->b_size);
967 jh->b_frozen_data = NULL;
968 jh->b_frozen_triggers = NULL;
969 }
970
971 spin_lock(&journal->j_list_lock);
972 cp_transaction = jh->b_cp_transaction;
973 if (cp_transaction) {
974 JBUFFER_TRACE(jh, "remove from old cp transaction");
975 cp_transaction->t_chp_stats.cs_dropped++;
976 __jbd2_journal_remove_checkpoint(jh);
977 }
978
979 /* Only re-checkpoint the buffer_head if it is marked
980 * dirty. If the buffer was added to the BJ_Forget list
981 * by jbd2_journal_forget, it may no longer be dirty and
982 * there's no point in keeping a checkpoint record for
983 * it. */
984
985 /*
986 * A buffer which has been freed while still being journaled
987 * by a previous transaction, refile the buffer to BJ_Forget of
988 * the running transaction. If the just committed transaction
989 * contains "add to orphan" operation, we can completely
990 * invalidate the buffer now. We are rather through in that
991 * since the buffer may be still accessible when blocksize <
992 * pagesize and it is attached to the last partial page.
993 */
994 if (buffer_freed(bh) && !jh->b_next_transaction) {
995 struct address_space *mapping;
996
997 clear_buffer_freed(bh);
998 clear_buffer_jbddirty(bh);
999
1000 /*
1001 * Block device buffers need to stay mapped all the
1002 * time, so it is enough to clear buffer_jbddirty and
1003 * buffer_freed bits. For the file mapping buffers (i.e.
1004 * journalled data) we need to unmap buffer and clear
1005 * more bits. We also need to be careful about the check
1006 * because the data page mapping can get cleared under
1007 * our hands. Note that if mapping == NULL, we don't
1008 * need to make buffer unmapped because the page is
1009 * already detached from the mapping and buffers cannot
1010 * get reused.
1011 */
1012 mapping = READ_ONCE(bh->b_folio->mapping);
1013 if (mapping && !sb_is_blkdev_sb(mapping->host->i_sb)) {
1014 clear_buffer_mapped(bh);
1015 clear_buffer_new(bh);
1016 clear_buffer_req(bh);
1017 bh->b_bdev = NULL;
1018 }
1019 }
1020
1021 if (buffer_jbddirty(bh)) {
1022 JBUFFER_TRACE(jh, "add to new checkpointing trans");
1023 __jbd2_journal_insert_checkpoint(jh, commit_transaction);
1024 if (is_journal_aborted(journal))
1025 clear_buffer_jbddirty(bh);
1026 } else {
1027 J_ASSERT_BH(bh, !buffer_dirty(bh));
1028 /*
1029 * The buffer on BJ_Forget list and not jbddirty means
1030 * it has been freed by this transaction and hence it
1031 * could not have been reallocated until this
1032 * transaction has committed. *BUT* it could be
1033 * reallocated once we have written all the data to
1034 * disk and before we process the buffer on BJ_Forget
1035 * list.
1036 */
1037 if (!jh->b_next_transaction)
1038 try_to_free = 1;
1039 }
1040 JBUFFER_TRACE(jh, "refile or unfile buffer");
1041 drop_ref = __jbd2_journal_refile_buffer(jh);
1042 spin_unlock(&jh->b_state_lock);
1043 if (drop_ref)
1044 jbd2_journal_put_journal_head(jh);
1045 if (try_to_free)
1046 release_buffer_page(bh); /* Drops bh reference */
1047 else
1048 __brelse(bh);
1049 cond_resched_lock(&journal->j_list_lock);
1050 }
1051 spin_unlock(&journal->j_list_lock);
1052 /*
1053 * This is a bit sleazy. We use j_list_lock to protect transition
1054 * of a transaction into T_FINISHED state and calling
1055 * __jbd2_journal_drop_transaction(). Otherwise we could race with
1056 * other checkpointing code processing the transaction...
1057 */
1058 write_lock(&journal->j_state_lock);
1059 spin_lock(&journal->j_list_lock);
1060 /*
1061 * Now recheck if some buffers did not get attached to the transaction
1062 * while the lock was dropped...
1063 */
1064 if (commit_transaction->t_forget) {
1065 spin_unlock(&journal->j_list_lock);
1066 write_unlock(&journal->j_state_lock);
1067 goto restart_loop;
1068 }
1069
1070 /* Add the transaction to the checkpoint list
1071 * __journal_remove_checkpoint() can not destroy transaction
1072 * under us because it is not marked as T_FINISHED yet */
1073 if (journal->j_checkpoint_transactions == NULL) {
1074 journal->j_checkpoint_transactions = commit_transaction;
1075 commit_transaction->t_cpnext = commit_transaction;
1076 commit_transaction->t_cpprev = commit_transaction;
1077 } else {
1078 commit_transaction->t_cpnext =
1079 journal->j_checkpoint_transactions;
1080 commit_transaction->t_cpprev =
1081 commit_transaction->t_cpnext->t_cpprev;
1082 commit_transaction->t_cpnext->t_cpprev =
1083 commit_transaction;
1084 commit_transaction->t_cpprev->t_cpnext =
1085 commit_transaction;
1086 }
1087 spin_unlock(&journal->j_list_lock);
1088
1089 /* Done with this transaction! */
1090
1091 jbd2_debug(3, "JBD2: commit phase 7\n");
1092
1093 J_ASSERT(commit_transaction->t_state == T_COMMIT_JFLUSH);
1094
1095 commit_transaction->t_start = jiffies;
1096 stats.run.rs_logging = jbd2_time_diff(stats.run.rs_logging,
1097 commit_transaction->t_start);
1098
1099 /*
1100 * File the transaction statistics
1101 */
1102 stats.ts_tid = commit_transaction->t_tid;
1103 stats.run.rs_handle_count =
1104 atomic_read(&commit_transaction->t_handle_count);
1105 trace_jbd2_run_stats(journal->j_fs_dev->bd_dev,
1106 commit_transaction->t_tid, &stats.run);
1107 stats.ts_requested = (commit_transaction->t_requested) ? 1 : 0;
1108
1109 commit_transaction->t_state = T_COMMIT_CALLBACK;
1110 J_ASSERT(commit_transaction == journal->j_committing_transaction);
1111 journal->j_commit_sequence = commit_transaction->t_tid;
1112 journal->j_committing_transaction = NULL;
1113 commit_time = ktime_to_ns(ktime_sub(ktime_get(), start_time));
1114
1115 /*
1116 * weight the commit time higher than the average time so we don't
1117 * react too strongly to vast changes in the commit time
1118 */
1119 if (likely(journal->j_average_commit_time))
1120 journal->j_average_commit_time = (commit_time +
1121 journal->j_average_commit_time*3) / 4;
1122 else
1123 journal->j_average_commit_time = commit_time;
1124
1125 write_unlock(&journal->j_state_lock);
1126
1127 if (journal->j_commit_callback)
1128 journal->j_commit_callback(journal, commit_transaction);
1129 if (journal->j_fc_cleanup_callback)
1130 journal->j_fc_cleanup_callback(journal, 1, commit_transaction->t_tid);
1131
1132 trace_jbd2_end_commit(journal, commit_transaction);
1133 jbd2_debug(1, "JBD2: commit %d complete, head %d\n",
1134 journal->j_commit_sequence, journal->j_tail_sequence);
1135
1136 write_lock(&journal->j_state_lock);
1137 journal->j_flags &= ~JBD2_FULL_COMMIT_ONGOING;
1138 journal->j_flags &= ~JBD2_FAST_COMMIT_ONGOING;
1139 spin_lock(&journal->j_list_lock);
1140 commit_transaction->t_state = T_FINISHED;
1141 /* Check if the transaction can be dropped now that we are finished */
1142 if (commit_transaction->t_checkpoint_list == NULL) {
1143 __jbd2_journal_drop_transaction(journal, commit_transaction);
1144 jbd2_journal_free_transaction(commit_transaction);
1145 }
1146 spin_unlock(&journal->j_list_lock);
1147 write_unlock(&journal->j_state_lock);
1148 wake_up(&journal->j_wait_done_commit);
1149 wake_up(&journal->j_fc_wait);
1150
1151 /*
1152 * Calculate overall stats
1153 */
1154 spin_lock(&journal->j_history_lock);
1155 journal->j_stats.ts_tid++;
1156 journal->j_stats.ts_requested += stats.ts_requested;
1157 journal->j_stats.run.rs_wait += stats.run.rs_wait;
1158 journal->j_stats.run.rs_request_delay += stats.run.rs_request_delay;
1159 journal->j_stats.run.rs_running += stats.run.rs_running;
1160 journal->j_stats.run.rs_locked += stats.run.rs_locked;
1161 journal->j_stats.run.rs_flushing += stats.run.rs_flushing;
1162 journal->j_stats.run.rs_logging += stats.run.rs_logging;
1163 journal->j_stats.run.rs_handle_count += stats.run.rs_handle_count;
1164 journal->j_stats.run.rs_blocks += stats.run.rs_blocks;
1165 journal->j_stats.run.rs_blocks_logged += stats.run.rs_blocks_logged;
1166 spin_unlock(&journal->j_history_lock);
1167}