Loading...
1/*
2 * Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved.
3 * Copyright (C) 2004-2007 Red Hat, Inc. All rights reserved.
4 *
5 * This copyrighted material is made available to anyone wishing to use,
6 * modify, copy, or redistribute it subject to the terms and conditions
7 * of the GNU General Public License version 2.
8 */
9
10#include <linux/sched.h>
11#include <linux/slab.h>
12#include <linux/spinlock.h>
13#include <linux/completion.h>
14#include <linux/buffer_head.h>
15#include <linux/gfs2_ondisk.h>
16#include <linux/crc32.h>
17#include <linux/delay.h>
18#include <linux/kthread.h>
19#include <linux/freezer.h>
20#include <linux/bio.h>
21#include <linux/writeback.h>
22
23#include "gfs2.h"
24#include "incore.h"
25#include "bmap.h"
26#include "glock.h"
27#include "log.h"
28#include "lops.h"
29#include "meta_io.h"
30#include "util.h"
31#include "dir.h"
32#include "trace_gfs2.h"
33
34#define PULL 1
35
36/**
37 * gfs2_struct2blk - compute stuff
38 * @sdp: the filesystem
39 * @nstruct: the number of structures
40 * @ssize: the size of the structures
41 *
42 * Compute the number of log descriptor blocks needed to hold a certain number
43 * of structures of a certain size.
44 *
45 * Returns: the number of blocks needed (minimum is always 1)
46 */
47
48unsigned int gfs2_struct2blk(struct gfs2_sbd *sdp, unsigned int nstruct,
49 unsigned int ssize)
50{
51 unsigned int blks;
52 unsigned int first, second;
53
54 blks = 1;
55 first = (sdp->sd_sb.sb_bsize - sizeof(struct gfs2_log_descriptor)) / ssize;
56
57 if (nstruct > first) {
58 second = (sdp->sd_sb.sb_bsize -
59 sizeof(struct gfs2_meta_header)) / ssize;
60 blks += DIV_ROUND_UP(nstruct - first, second);
61 }
62
63 return blks;
64}
65
66/**
67 * gfs2_remove_from_ail - Remove an entry from the ail lists, updating counters
68 * @mapping: The associated mapping (maybe NULL)
69 * @bd: The gfs2_bufdata to remove
70 *
71 * The ail lock _must_ be held when calling this function
72 *
73 */
74
75void gfs2_remove_from_ail(struct gfs2_bufdata *bd)
76{
77 bd->bd_ail = NULL;
78 list_del_init(&bd->bd_ail_st_list);
79 list_del_init(&bd->bd_ail_gl_list);
80 atomic_dec(&bd->bd_gl->gl_ail_count);
81 brelse(bd->bd_bh);
82}
83
84/**
85 * gfs2_ail1_start_one - Start I/O on a part of the AIL
86 * @sdp: the filesystem
87 * @wbc: The writeback control structure
88 * @ai: The ail structure
89 *
90 */
91
92static int gfs2_ail1_start_one(struct gfs2_sbd *sdp,
93 struct writeback_control *wbc,
94 struct gfs2_ail *ai)
95__releases(&sdp->sd_ail_lock)
96__acquires(&sdp->sd_ail_lock)
97{
98 struct gfs2_glock *gl = NULL;
99 struct address_space *mapping;
100 struct gfs2_bufdata *bd, *s;
101 struct buffer_head *bh;
102
103 list_for_each_entry_safe_reverse(bd, s, &ai->ai_ail1_list, bd_ail_st_list) {
104 bh = bd->bd_bh;
105
106 gfs2_assert(sdp, bd->bd_ail == ai);
107
108 if (!buffer_busy(bh)) {
109 if (!buffer_uptodate(bh))
110 gfs2_io_error_bh(sdp, bh);
111 list_move(&bd->bd_ail_st_list, &ai->ai_ail2_list);
112 continue;
113 }
114
115 if (!buffer_dirty(bh))
116 continue;
117 if (gl == bd->bd_gl)
118 continue;
119 gl = bd->bd_gl;
120 list_move(&bd->bd_ail_st_list, &ai->ai_ail1_list);
121 mapping = bh->b_page->mapping;
122 if (!mapping)
123 continue;
124 spin_unlock(&sdp->sd_ail_lock);
125 generic_writepages(mapping, wbc);
126 spin_lock(&sdp->sd_ail_lock);
127 if (wbc->nr_to_write <= 0)
128 break;
129 return 1;
130 }
131
132 return 0;
133}
134
135
136/**
137 * gfs2_ail1_flush - start writeback of some ail1 entries
138 * @sdp: The super block
139 * @wbc: The writeback control structure
140 *
141 * Writes back some ail1 entries, according to the limits in the
142 * writeback control structure
143 */
144
145void gfs2_ail1_flush(struct gfs2_sbd *sdp, struct writeback_control *wbc)
146{
147 struct list_head *head = &sdp->sd_ail1_list;
148 struct gfs2_ail *ai;
149
150 trace_gfs2_ail_flush(sdp, wbc, 1);
151 spin_lock(&sdp->sd_ail_lock);
152restart:
153 list_for_each_entry_reverse(ai, head, ai_list) {
154 if (wbc->nr_to_write <= 0)
155 break;
156 if (gfs2_ail1_start_one(sdp, wbc, ai))
157 goto restart;
158 }
159 spin_unlock(&sdp->sd_ail_lock);
160 trace_gfs2_ail_flush(sdp, wbc, 0);
161}
162
163/**
164 * gfs2_ail1_start - start writeback of all ail1 entries
165 * @sdp: The superblock
166 */
167
168static void gfs2_ail1_start(struct gfs2_sbd *sdp)
169{
170 struct writeback_control wbc = {
171 .sync_mode = WB_SYNC_NONE,
172 .nr_to_write = LONG_MAX,
173 .range_start = 0,
174 .range_end = LLONG_MAX,
175 };
176
177 return gfs2_ail1_flush(sdp, &wbc);
178}
179
180/**
181 * gfs2_ail1_empty_one - Check whether or not a trans in the AIL has been synced
182 * @sdp: the filesystem
183 * @ai: the AIL entry
184 *
185 */
186
187static void gfs2_ail1_empty_one(struct gfs2_sbd *sdp, struct gfs2_ail *ai)
188{
189 struct gfs2_bufdata *bd, *s;
190 struct buffer_head *bh;
191
192 list_for_each_entry_safe_reverse(bd, s, &ai->ai_ail1_list,
193 bd_ail_st_list) {
194 bh = bd->bd_bh;
195 gfs2_assert(sdp, bd->bd_ail == ai);
196 if (buffer_busy(bh))
197 continue;
198 if (!buffer_uptodate(bh))
199 gfs2_io_error_bh(sdp, bh);
200 list_move(&bd->bd_ail_st_list, &ai->ai_ail2_list);
201 }
202
203}
204
205/**
206 * gfs2_ail1_empty - Try to empty the ail1 lists
207 * @sdp: The superblock
208 *
209 * Tries to empty the ail1 lists, starting with the oldest first
210 */
211
212static int gfs2_ail1_empty(struct gfs2_sbd *sdp)
213{
214 struct gfs2_ail *ai, *s;
215 int ret;
216
217 spin_lock(&sdp->sd_ail_lock);
218 list_for_each_entry_safe_reverse(ai, s, &sdp->sd_ail1_list, ai_list) {
219 gfs2_ail1_empty_one(sdp, ai);
220 if (list_empty(&ai->ai_ail1_list))
221 list_move(&ai->ai_list, &sdp->sd_ail2_list);
222 else
223 break;
224 }
225 ret = list_empty(&sdp->sd_ail1_list);
226 spin_unlock(&sdp->sd_ail_lock);
227
228 return ret;
229}
230
231static void gfs2_ail1_wait(struct gfs2_sbd *sdp)
232{
233 struct gfs2_ail *ai;
234 struct gfs2_bufdata *bd;
235 struct buffer_head *bh;
236
237 spin_lock(&sdp->sd_ail_lock);
238 list_for_each_entry_reverse(ai, &sdp->sd_ail1_list, ai_list) {
239 list_for_each_entry(bd, &ai->ai_ail1_list, bd_ail_st_list) {
240 bh = bd->bd_bh;
241 if (!buffer_locked(bh))
242 continue;
243 get_bh(bh);
244 spin_unlock(&sdp->sd_ail_lock);
245 wait_on_buffer(bh);
246 brelse(bh);
247 return;
248 }
249 }
250 spin_unlock(&sdp->sd_ail_lock);
251}
252
253/**
254 * gfs2_ail2_empty_one - Check whether or not a trans in the AIL has been synced
255 * @sdp: the filesystem
256 * @ai: the AIL entry
257 *
258 */
259
260static void gfs2_ail2_empty_one(struct gfs2_sbd *sdp, struct gfs2_ail *ai)
261{
262 struct list_head *head = &ai->ai_ail2_list;
263 struct gfs2_bufdata *bd;
264
265 while (!list_empty(head)) {
266 bd = list_entry(head->prev, struct gfs2_bufdata,
267 bd_ail_st_list);
268 gfs2_assert(sdp, bd->bd_ail == ai);
269 gfs2_remove_from_ail(bd);
270 }
271}
272
273static void ail2_empty(struct gfs2_sbd *sdp, unsigned int new_tail)
274{
275 struct gfs2_ail *ai, *safe;
276 unsigned int old_tail = sdp->sd_log_tail;
277 int wrap = (new_tail < old_tail);
278 int a, b, rm;
279
280 spin_lock(&sdp->sd_ail_lock);
281
282 list_for_each_entry_safe(ai, safe, &sdp->sd_ail2_list, ai_list) {
283 a = (old_tail <= ai->ai_first);
284 b = (ai->ai_first < new_tail);
285 rm = (wrap) ? (a || b) : (a && b);
286 if (!rm)
287 continue;
288
289 gfs2_ail2_empty_one(sdp, ai);
290 list_del(&ai->ai_list);
291 gfs2_assert_warn(sdp, list_empty(&ai->ai_ail1_list));
292 gfs2_assert_warn(sdp, list_empty(&ai->ai_ail2_list));
293 kfree(ai);
294 }
295
296 spin_unlock(&sdp->sd_ail_lock);
297}
298
299/**
300 * gfs2_log_reserve - Make a log reservation
301 * @sdp: The GFS2 superblock
302 * @blks: The number of blocks to reserve
303 *
304 * Note that we never give out the last few blocks of the journal. Thats
305 * due to the fact that there is a small number of header blocks
306 * associated with each log flush. The exact number can't be known until
307 * flush time, so we ensure that we have just enough free blocks at all
308 * times to avoid running out during a log flush.
309 *
310 * We no longer flush the log here, instead we wake up logd to do that
311 * for us. To avoid the thundering herd and to ensure that we deal fairly
312 * with queued waiters, we use an exclusive wait. This means that when we
313 * get woken with enough journal space to get our reservation, we need to
314 * wake the next waiter on the list.
315 *
316 * Returns: errno
317 */
318
319int gfs2_log_reserve(struct gfs2_sbd *sdp, unsigned int blks)
320{
321 unsigned reserved_blks = 6 * (4096 / sdp->sd_vfs->s_blocksize);
322 unsigned wanted = blks + reserved_blks;
323 DEFINE_WAIT(wait);
324 int did_wait = 0;
325 unsigned int free_blocks;
326
327 if (gfs2_assert_warn(sdp, blks) ||
328 gfs2_assert_warn(sdp, blks <= sdp->sd_jdesc->jd_blocks))
329 return -EINVAL;
330retry:
331 free_blocks = atomic_read(&sdp->sd_log_blks_free);
332 if (unlikely(free_blocks <= wanted)) {
333 do {
334 prepare_to_wait_exclusive(&sdp->sd_log_waitq, &wait,
335 TASK_UNINTERRUPTIBLE);
336 wake_up(&sdp->sd_logd_waitq);
337 did_wait = 1;
338 if (atomic_read(&sdp->sd_log_blks_free) <= wanted)
339 io_schedule();
340 free_blocks = atomic_read(&sdp->sd_log_blks_free);
341 } while(free_blocks <= wanted);
342 finish_wait(&sdp->sd_log_waitq, &wait);
343 }
344 if (atomic_cmpxchg(&sdp->sd_log_blks_free, free_blocks,
345 free_blocks - blks) != free_blocks)
346 goto retry;
347 trace_gfs2_log_blocks(sdp, -blks);
348
349 /*
350 * If we waited, then so might others, wake them up _after_ we get
351 * our share of the log.
352 */
353 if (unlikely(did_wait))
354 wake_up(&sdp->sd_log_waitq);
355
356 down_read(&sdp->sd_log_flush_lock);
357
358 return 0;
359}
360
361static u64 log_bmap(struct gfs2_sbd *sdp, unsigned int lbn)
362{
363 struct gfs2_journal_extent *je;
364
365 list_for_each_entry(je, &sdp->sd_jdesc->extent_list, extent_list) {
366 if (lbn >= je->lblock && lbn < je->lblock + je->blocks)
367 return je->dblock + lbn - je->lblock;
368 }
369
370 return -1;
371}
372
373/**
374 * log_distance - Compute distance between two journal blocks
375 * @sdp: The GFS2 superblock
376 * @newer: The most recent journal block of the pair
377 * @older: The older journal block of the pair
378 *
379 * Compute the distance (in the journal direction) between two
380 * blocks in the journal
381 *
382 * Returns: the distance in blocks
383 */
384
385static inline unsigned int log_distance(struct gfs2_sbd *sdp, unsigned int newer,
386 unsigned int older)
387{
388 int dist;
389
390 dist = newer - older;
391 if (dist < 0)
392 dist += sdp->sd_jdesc->jd_blocks;
393
394 return dist;
395}
396
397/**
398 * calc_reserved - Calculate the number of blocks to reserve when
399 * refunding a transaction's unused buffers.
400 * @sdp: The GFS2 superblock
401 *
402 * This is complex. We need to reserve room for all our currently used
403 * metadata buffers (e.g. normal file I/O rewriting file time stamps) and
404 * all our journaled data buffers for journaled files (e.g. files in the
405 * meta_fs like rindex, or files for which chattr +j was done.)
406 * If we don't reserve enough space, gfs2_log_refund and gfs2_log_flush
407 * will count it as free space (sd_log_blks_free) and corruption will follow.
408 *
409 * We can have metadata bufs and jdata bufs in the same journal. So each
410 * type gets its own log header, for which we need to reserve a block.
411 * In fact, each type has the potential for needing more than one header
412 * in cases where we have more buffers than will fit on a journal page.
413 * Metadata journal entries take up half the space of journaled buffer entries.
414 * Thus, metadata entries have buf_limit (502) and journaled buffers have
415 * databuf_limit (251) before they cause a wrap around.
416 *
417 * Also, we need to reserve blocks for revoke journal entries and one for an
418 * overall header for the lot.
419 *
420 * Returns: the number of blocks reserved
421 */
422static unsigned int calc_reserved(struct gfs2_sbd *sdp)
423{
424 unsigned int reserved = 0;
425 unsigned int mbuf_limit, metabufhdrs_needed;
426 unsigned int dbuf_limit, databufhdrs_needed;
427 unsigned int revokes = 0;
428
429 mbuf_limit = buf_limit(sdp);
430 metabufhdrs_needed = (sdp->sd_log_commited_buf +
431 (mbuf_limit - 1)) / mbuf_limit;
432 dbuf_limit = databuf_limit(sdp);
433 databufhdrs_needed = (sdp->sd_log_commited_databuf +
434 (dbuf_limit - 1)) / dbuf_limit;
435
436 if (sdp->sd_log_commited_revoke > 0)
437 revokes = gfs2_struct2blk(sdp, sdp->sd_log_commited_revoke,
438 sizeof(u64));
439
440 reserved = sdp->sd_log_commited_buf + metabufhdrs_needed +
441 sdp->sd_log_commited_databuf + databufhdrs_needed +
442 revokes;
443 /* One for the overall header */
444 if (reserved)
445 reserved++;
446 return reserved;
447}
448
449static unsigned int current_tail(struct gfs2_sbd *sdp)
450{
451 struct gfs2_ail *ai;
452 unsigned int tail;
453
454 spin_lock(&sdp->sd_ail_lock);
455
456 if (list_empty(&sdp->sd_ail1_list)) {
457 tail = sdp->sd_log_head;
458 } else {
459 ai = list_entry(sdp->sd_ail1_list.prev, struct gfs2_ail, ai_list);
460 tail = ai->ai_first;
461 }
462
463 spin_unlock(&sdp->sd_ail_lock);
464
465 return tail;
466}
467
468void gfs2_log_incr_head(struct gfs2_sbd *sdp)
469{
470 if (sdp->sd_log_flush_head == sdp->sd_log_tail)
471 BUG_ON(sdp->sd_log_flush_head != sdp->sd_log_head);
472
473 if (++sdp->sd_log_flush_head == sdp->sd_jdesc->jd_blocks) {
474 sdp->sd_log_flush_head = 0;
475 sdp->sd_log_flush_wrapped = 1;
476 }
477}
478
479/**
480 * gfs2_log_write_endio - End of I/O for a log buffer
481 * @bh: The buffer head
482 * @uptodate: I/O Status
483 *
484 */
485
486static void gfs2_log_write_endio(struct buffer_head *bh, int uptodate)
487{
488 struct gfs2_sbd *sdp = bh->b_private;
489 bh->b_private = NULL;
490
491 end_buffer_write_sync(bh, uptodate);
492 if (atomic_dec_and_test(&sdp->sd_log_in_flight))
493 wake_up(&sdp->sd_log_flush_wait);
494}
495
496/**
497 * gfs2_log_get_buf - Get and initialize a buffer to use for log control data
498 * @sdp: The GFS2 superblock
499 *
500 * Returns: the buffer_head
501 */
502
503struct buffer_head *gfs2_log_get_buf(struct gfs2_sbd *sdp)
504{
505 u64 blkno = log_bmap(sdp, sdp->sd_log_flush_head);
506 struct buffer_head *bh;
507
508 bh = sb_getblk(sdp->sd_vfs, blkno);
509 lock_buffer(bh);
510 memset(bh->b_data, 0, bh->b_size);
511 set_buffer_uptodate(bh);
512 clear_buffer_dirty(bh);
513 gfs2_log_incr_head(sdp);
514 atomic_inc(&sdp->sd_log_in_flight);
515 bh->b_private = sdp;
516 bh->b_end_io = gfs2_log_write_endio;
517
518 return bh;
519}
520
521/**
522 * gfs2_fake_write_endio -
523 * @bh: The buffer head
524 * @uptodate: The I/O Status
525 *
526 */
527
528static void gfs2_fake_write_endio(struct buffer_head *bh, int uptodate)
529{
530 struct buffer_head *real_bh = bh->b_private;
531 struct gfs2_bufdata *bd = real_bh->b_private;
532 struct gfs2_sbd *sdp = bd->bd_gl->gl_sbd;
533
534 end_buffer_write_sync(bh, uptodate);
535 free_buffer_head(bh);
536 unlock_buffer(real_bh);
537 brelse(real_bh);
538 if (atomic_dec_and_test(&sdp->sd_log_in_flight))
539 wake_up(&sdp->sd_log_flush_wait);
540}
541
542/**
543 * gfs2_log_fake_buf - Build a fake buffer head to write metadata buffer to log
544 * @sdp: the filesystem
545 * @data: the data the buffer_head should point to
546 *
547 * Returns: the log buffer descriptor
548 */
549
550struct buffer_head *gfs2_log_fake_buf(struct gfs2_sbd *sdp,
551 struct buffer_head *real)
552{
553 u64 blkno = log_bmap(sdp, sdp->sd_log_flush_head);
554 struct buffer_head *bh;
555
556 bh = alloc_buffer_head(GFP_NOFS | __GFP_NOFAIL);
557 atomic_set(&bh->b_count, 1);
558 bh->b_state = (1 << BH_Mapped) | (1 << BH_Uptodate) | (1 << BH_Lock);
559 set_bh_page(bh, real->b_page, bh_offset(real));
560 bh->b_blocknr = blkno;
561 bh->b_size = sdp->sd_sb.sb_bsize;
562 bh->b_bdev = sdp->sd_vfs->s_bdev;
563 bh->b_private = real;
564 bh->b_end_io = gfs2_fake_write_endio;
565
566 gfs2_log_incr_head(sdp);
567 atomic_inc(&sdp->sd_log_in_flight);
568
569 return bh;
570}
571
572static void log_pull_tail(struct gfs2_sbd *sdp, unsigned int new_tail)
573{
574 unsigned int dist = log_distance(sdp, new_tail, sdp->sd_log_tail);
575
576 ail2_empty(sdp, new_tail);
577
578 atomic_add(dist, &sdp->sd_log_blks_free);
579 trace_gfs2_log_blocks(sdp, dist);
580 gfs2_assert_withdraw(sdp, atomic_read(&sdp->sd_log_blks_free) <=
581 sdp->sd_jdesc->jd_blocks);
582
583 sdp->sd_log_tail = new_tail;
584}
585
586/**
587 * log_write_header - Get and initialize a journal header buffer
588 * @sdp: The GFS2 superblock
589 *
590 * Returns: the initialized log buffer descriptor
591 */
592
593static void log_write_header(struct gfs2_sbd *sdp, u32 flags, int pull)
594{
595 u64 blkno = log_bmap(sdp, sdp->sd_log_flush_head);
596 struct buffer_head *bh;
597 struct gfs2_log_header *lh;
598 unsigned int tail;
599 u32 hash;
600
601 bh = sb_getblk(sdp->sd_vfs, blkno);
602 lock_buffer(bh);
603 memset(bh->b_data, 0, bh->b_size);
604 set_buffer_uptodate(bh);
605 clear_buffer_dirty(bh);
606
607 gfs2_ail1_empty(sdp);
608 tail = current_tail(sdp);
609
610 lh = (struct gfs2_log_header *)bh->b_data;
611 memset(lh, 0, sizeof(struct gfs2_log_header));
612 lh->lh_header.mh_magic = cpu_to_be32(GFS2_MAGIC);
613 lh->lh_header.mh_type = cpu_to_be32(GFS2_METATYPE_LH);
614 lh->lh_header.__pad0 = cpu_to_be64(0);
615 lh->lh_header.mh_format = cpu_to_be32(GFS2_FORMAT_LH);
616 lh->lh_header.mh_jid = cpu_to_be32(sdp->sd_jdesc->jd_jid);
617 lh->lh_sequence = cpu_to_be64(sdp->sd_log_sequence++);
618 lh->lh_flags = cpu_to_be32(flags);
619 lh->lh_tail = cpu_to_be32(tail);
620 lh->lh_blkno = cpu_to_be32(sdp->sd_log_flush_head);
621 hash = gfs2_disk_hash(bh->b_data, sizeof(struct gfs2_log_header));
622 lh->lh_hash = cpu_to_be32(hash);
623
624 bh->b_end_io = end_buffer_write_sync;
625 get_bh(bh);
626 if (test_bit(SDF_NOBARRIERS, &sdp->sd_flags))
627 submit_bh(WRITE_SYNC | REQ_META | REQ_PRIO, bh);
628 else
629 submit_bh(WRITE_FLUSH_FUA | REQ_META | REQ_PRIO, bh);
630 wait_on_buffer(bh);
631
632 if (!buffer_uptodate(bh))
633 gfs2_io_error_bh(sdp, bh);
634 brelse(bh);
635
636 if (sdp->sd_log_tail != tail)
637 log_pull_tail(sdp, tail);
638 else
639 gfs2_assert_withdraw(sdp, !pull);
640
641 sdp->sd_log_idle = (tail == sdp->sd_log_flush_head);
642 gfs2_log_incr_head(sdp);
643}
644
645static void log_flush_commit(struct gfs2_sbd *sdp)
646{
647 DEFINE_WAIT(wait);
648
649 if (atomic_read(&sdp->sd_log_in_flight)) {
650 do {
651 prepare_to_wait(&sdp->sd_log_flush_wait, &wait,
652 TASK_UNINTERRUPTIBLE);
653 if (atomic_read(&sdp->sd_log_in_flight))
654 io_schedule();
655 } while(atomic_read(&sdp->sd_log_in_flight));
656 finish_wait(&sdp->sd_log_flush_wait, &wait);
657 }
658
659 log_write_header(sdp, 0, 0);
660}
661
662static void gfs2_ordered_write(struct gfs2_sbd *sdp)
663{
664 struct gfs2_bufdata *bd;
665 struct buffer_head *bh;
666 LIST_HEAD(written);
667
668 gfs2_log_lock(sdp);
669 while (!list_empty(&sdp->sd_log_le_ordered)) {
670 bd = list_entry(sdp->sd_log_le_ordered.next, struct gfs2_bufdata, bd_le.le_list);
671 list_move(&bd->bd_le.le_list, &written);
672 bh = bd->bd_bh;
673 if (!buffer_dirty(bh))
674 continue;
675 get_bh(bh);
676 gfs2_log_unlock(sdp);
677 lock_buffer(bh);
678 if (buffer_mapped(bh) && test_clear_buffer_dirty(bh)) {
679 bh->b_end_io = end_buffer_write_sync;
680 submit_bh(WRITE_SYNC, bh);
681 } else {
682 unlock_buffer(bh);
683 brelse(bh);
684 }
685 gfs2_log_lock(sdp);
686 }
687 list_splice(&written, &sdp->sd_log_le_ordered);
688 gfs2_log_unlock(sdp);
689}
690
691static void gfs2_ordered_wait(struct gfs2_sbd *sdp)
692{
693 struct gfs2_bufdata *bd;
694 struct buffer_head *bh;
695
696 gfs2_log_lock(sdp);
697 while (!list_empty(&sdp->sd_log_le_ordered)) {
698 bd = list_entry(sdp->sd_log_le_ordered.prev, struct gfs2_bufdata, bd_le.le_list);
699 bh = bd->bd_bh;
700 if (buffer_locked(bh)) {
701 get_bh(bh);
702 gfs2_log_unlock(sdp);
703 wait_on_buffer(bh);
704 brelse(bh);
705 gfs2_log_lock(sdp);
706 continue;
707 }
708 list_del_init(&bd->bd_le.le_list);
709 }
710 gfs2_log_unlock(sdp);
711}
712
713/**
714 * gfs2_log_flush - flush incore transaction(s)
715 * @sdp: the filesystem
716 * @gl: The glock structure to flush. If NULL, flush the whole incore log
717 *
718 */
719
720void gfs2_log_flush(struct gfs2_sbd *sdp, struct gfs2_glock *gl)
721{
722 struct gfs2_ail *ai;
723
724 down_write(&sdp->sd_log_flush_lock);
725
726 /* Log might have been flushed while we waited for the flush lock */
727 if (gl && !test_bit(GLF_LFLUSH, &gl->gl_flags)) {
728 up_write(&sdp->sd_log_flush_lock);
729 return;
730 }
731 trace_gfs2_log_flush(sdp, 1);
732
733 ai = kzalloc(sizeof(struct gfs2_ail), GFP_NOFS | __GFP_NOFAIL);
734 INIT_LIST_HEAD(&ai->ai_ail1_list);
735 INIT_LIST_HEAD(&ai->ai_ail2_list);
736
737 if (sdp->sd_log_num_buf != sdp->sd_log_commited_buf) {
738 printk(KERN_INFO "GFS2: log buf %u %u\n", sdp->sd_log_num_buf,
739 sdp->sd_log_commited_buf);
740 gfs2_assert_withdraw(sdp, 0);
741 }
742 if (sdp->sd_log_num_databuf != sdp->sd_log_commited_databuf) {
743 printk(KERN_INFO "GFS2: log databuf %u %u\n",
744 sdp->sd_log_num_databuf, sdp->sd_log_commited_databuf);
745 gfs2_assert_withdraw(sdp, 0);
746 }
747 gfs2_assert_withdraw(sdp,
748 sdp->sd_log_num_revoke == sdp->sd_log_commited_revoke);
749
750 sdp->sd_log_flush_head = sdp->sd_log_head;
751 sdp->sd_log_flush_wrapped = 0;
752 ai->ai_first = sdp->sd_log_flush_head;
753
754 gfs2_ordered_write(sdp);
755 lops_before_commit(sdp);
756 gfs2_ordered_wait(sdp);
757
758 if (sdp->sd_log_head != sdp->sd_log_flush_head)
759 log_flush_commit(sdp);
760 else if (sdp->sd_log_tail != current_tail(sdp) && !sdp->sd_log_idle){
761 gfs2_log_lock(sdp);
762 atomic_dec(&sdp->sd_log_blks_free); /* Adjust for unreserved buffer */
763 trace_gfs2_log_blocks(sdp, -1);
764 gfs2_log_unlock(sdp);
765 log_write_header(sdp, 0, PULL);
766 }
767 lops_after_commit(sdp, ai);
768
769 gfs2_log_lock(sdp);
770 sdp->sd_log_head = sdp->sd_log_flush_head;
771 sdp->sd_log_blks_reserved = 0;
772 sdp->sd_log_commited_buf = 0;
773 sdp->sd_log_commited_databuf = 0;
774 sdp->sd_log_commited_revoke = 0;
775
776 spin_lock(&sdp->sd_ail_lock);
777 if (!list_empty(&ai->ai_ail1_list)) {
778 list_add(&ai->ai_list, &sdp->sd_ail1_list);
779 ai = NULL;
780 }
781 spin_unlock(&sdp->sd_ail_lock);
782 gfs2_log_unlock(sdp);
783 trace_gfs2_log_flush(sdp, 0);
784 up_write(&sdp->sd_log_flush_lock);
785
786 kfree(ai);
787}
788
789static void log_refund(struct gfs2_sbd *sdp, struct gfs2_trans *tr)
790{
791 unsigned int reserved;
792 unsigned int unused;
793
794 gfs2_log_lock(sdp);
795
796 sdp->sd_log_commited_buf += tr->tr_num_buf_new - tr->tr_num_buf_rm;
797 sdp->sd_log_commited_databuf += tr->tr_num_databuf_new -
798 tr->tr_num_databuf_rm;
799 gfs2_assert_withdraw(sdp, (((int)sdp->sd_log_commited_buf) >= 0) ||
800 (((int)sdp->sd_log_commited_databuf) >= 0));
801 sdp->sd_log_commited_revoke += tr->tr_num_revoke - tr->tr_num_revoke_rm;
802 reserved = calc_reserved(sdp);
803 gfs2_assert_withdraw(sdp, sdp->sd_log_blks_reserved + tr->tr_reserved >= reserved);
804 unused = sdp->sd_log_blks_reserved - reserved + tr->tr_reserved;
805 atomic_add(unused, &sdp->sd_log_blks_free);
806 trace_gfs2_log_blocks(sdp, unused);
807 gfs2_assert_withdraw(sdp, atomic_read(&sdp->sd_log_blks_free) <=
808 sdp->sd_jdesc->jd_blocks);
809 sdp->sd_log_blks_reserved = reserved;
810
811 gfs2_log_unlock(sdp);
812}
813
814static void buf_lo_incore_commit(struct gfs2_sbd *sdp, struct gfs2_trans *tr)
815{
816 struct list_head *head = &tr->tr_list_buf;
817 struct gfs2_bufdata *bd;
818
819 gfs2_log_lock(sdp);
820 while (!list_empty(head)) {
821 bd = list_entry(head->next, struct gfs2_bufdata, bd_list_tr);
822 list_del_init(&bd->bd_list_tr);
823 tr->tr_num_buf--;
824 }
825 gfs2_log_unlock(sdp);
826 gfs2_assert_warn(sdp, !tr->tr_num_buf);
827}
828
829/**
830 * gfs2_log_commit - Commit a transaction to the log
831 * @sdp: the filesystem
832 * @tr: the transaction
833 *
834 * We wake up gfs2_logd if the number of pinned blocks exceed thresh1
835 * or the total number of used blocks (pinned blocks plus AIL blocks)
836 * is greater than thresh2.
837 *
838 * At mount time thresh1 is 1/3rd of journal size, thresh2 is 2/3rd of
839 * journal size.
840 *
841 * Returns: errno
842 */
843
844void gfs2_log_commit(struct gfs2_sbd *sdp, struct gfs2_trans *tr)
845{
846 log_refund(sdp, tr);
847 buf_lo_incore_commit(sdp, tr);
848
849 up_read(&sdp->sd_log_flush_lock);
850
851 if (atomic_read(&sdp->sd_log_pinned) > atomic_read(&sdp->sd_log_thresh1) ||
852 ((sdp->sd_jdesc->jd_blocks - atomic_read(&sdp->sd_log_blks_free)) >
853 atomic_read(&sdp->sd_log_thresh2)))
854 wake_up(&sdp->sd_logd_waitq);
855}
856
857/**
858 * gfs2_log_shutdown - write a shutdown header into a journal
859 * @sdp: the filesystem
860 *
861 */
862
863void gfs2_log_shutdown(struct gfs2_sbd *sdp)
864{
865 down_write(&sdp->sd_log_flush_lock);
866
867 gfs2_assert_withdraw(sdp, !sdp->sd_log_blks_reserved);
868 gfs2_assert_withdraw(sdp, !sdp->sd_log_num_buf);
869 gfs2_assert_withdraw(sdp, !sdp->sd_log_num_revoke);
870 gfs2_assert_withdraw(sdp, !sdp->sd_log_num_rg);
871 gfs2_assert_withdraw(sdp, !sdp->sd_log_num_databuf);
872 gfs2_assert_withdraw(sdp, list_empty(&sdp->sd_ail1_list));
873
874 sdp->sd_log_flush_head = sdp->sd_log_head;
875 sdp->sd_log_flush_wrapped = 0;
876
877 log_write_header(sdp, GFS2_LOG_HEAD_UNMOUNT,
878 (sdp->sd_log_tail == current_tail(sdp)) ? 0 : PULL);
879
880 gfs2_assert_warn(sdp, atomic_read(&sdp->sd_log_blks_free) == sdp->sd_jdesc->jd_blocks);
881 gfs2_assert_warn(sdp, sdp->sd_log_head == sdp->sd_log_tail);
882 gfs2_assert_warn(sdp, list_empty(&sdp->sd_ail2_list));
883
884 sdp->sd_log_head = sdp->sd_log_flush_head;
885 sdp->sd_log_tail = sdp->sd_log_head;
886
887 up_write(&sdp->sd_log_flush_lock);
888}
889
890
891/**
892 * gfs2_meta_syncfs - sync all the buffers in a filesystem
893 * @sdp: the filesystem
894 *
895 */
896
897void gfs2_meta_syncfs(struct gfs2_sbd *sdp)
898{
899 gfs2_log_flush(sdp, NULL);
900 for (;;) {
901 gfs2_ail1_start(sdp);
902 gfs2_ail1_wait(sdp);
903 if (gfs2_ail1_empty(sdp))
904 break;
905 }
906 gfs2_log_flush(sdp, NULL);
907}
908
909static inline int gfs2_jrnl_flush_reqd(struct gfs2_sbd *sdp)
910{
911 return (atomic_read(&sdp->sd_log_pinned) >= atomic_read(&sdp->sd_log_thresh1));
912}
913
914static inline int gfs2_ail_flush_reqd(struct gfs2_sbd *sdp)
915{
916 unsigned int used_blocks = sdp->sd_jdesc->jd_blocks - atomic_read(&sdp->sd_log_blks_free);
917 return used_blocks >= atomic_read(&sdp->sd_log_thresh2);
918}
919
920/**
921 * gfs2_logd - Update log tail as Active Items get flushed to in-place blocks
922 * @sdp: Pointer to GFS2 superblock
923 *
924 * Also, periodically check to make sure that we're using the most recent
925 * journal index.
926 */
927
928int gfs2_logd(void *data)
929{
930 struct gfs2_sbd *sdp = data;
931 unsigned long t = 1;
932 DEFINE_WAIT(wait);
933 unsigned preflush;
934
935 while (!kthread_should_stop()) {
936
937 preflush = atomic_read(&sdp->sd_log_pinned);
938 if (gfs2_jrnl_flush_reqd(sdp) || t == 0) {
939 gfs2_ail1_empty(sdp);
940 gfs2_log_flush(sdp, NULL);
941 }
942
943 if (gfs2_ail_flush_reqd(sdp)) {
944 gfs2_ail1_start(sdp);
945 gfs2_ail1_wait(sdp);
946 gfs2_ail1_empty(sdp);
947 gfs2_log_flush(sdp, NULL);
948 }
949
950 if (!gfs2_ail_flush_reqd(sdp))
951 wake_up(&sdp->sd_log_waitq);
952
953 t = gfs2_tune_get(sdp, gt_logd_secs) * HZ;
954 if (freezing(current))
955 refrigerator();
956
957 do {
958 prepare_to_wait(&sdp->sd_logd_waitq, &wait,
959 TASK_INTERRUPTIBLE);
960 if (!gfs2_ail_flush_reqd(sdp) &&
961 !gfs2_jrnl_flush_reqd(sdp) &&
962 !kthread_should_stop())
963 t = schedule_timeout(t);
964 } while(t && !gfs2_ail_flush_reqd(sdp) &&
965 !gfs2_jrnl_flush_reqd(sdp) &&
966 !kthread_should_stop());
967 finish_wait(&sdp->sd_logd_waitq, &wait);
968 }
969
970 return 0;
971}
972
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved.
4 * Copyright (C) 2004-2007 Red Hat, Inc. All rights reserved.
5 */
6
7#include <linux/sched.h>
8#include <linux/slab.h>
9#include <linux/spinlock.h>
10#include <linux/completion.h>
11#include <linux/buffer_head.h>
12#include <linux/gfs2_ondisk.h>
13#include <linux/crc32.h>
14#include <linux/crc32c.h>
15#include <linux/delay.h>
16#include <linux/kthread.h>
17#include <linux/freezer.h>
18#include <linux/bio.h>
19#include <linux/blkdev.h>
20#include <linux/writeback.h>
21#include <linux/list_sort.h>
22
23#include "gfs2.h"
24#include "incore.h"
25#include "bmap.h"
26#include "glock.h"
27#include "log.h"
28#include "lops.h"
29#include "meta_io.h"
30#include "util.h"
31#include "dir.h"
32#include "trace_gfs2.h"
33#include "trans.h"
34
35static void gfs2_log_shutdown(struct gfs2_sbd *sdp);
36
37/**
38 * gfs2_struct2blk - compute stuff
39 * @sdp: the filesystem
40 * @nstruct: the number of structures
41 *
42 * Compute the number of log descriptor blocks needed to hold a certain number
43 * of structures of a certain size.
44 *
45 * Returns: the number of blocks needed (minimum is always 1)
46 */
47
48unsigned int gfs2_struct2blk(struct gfs2_sbd *sdp, unsigned int nstruct)
49{
50 unsigned int blks;
51 unsigned int first, second;
52
53 /* The initial struct gfs2_log_descriptor block */
54 blks = 1;
55 first = sdp->sd_ldptrs;
56
57 if (nstruct > first) {
58 /* Subsequent struct gfs2_meta_header blocks */
59 second = sdp->sd_inptrs;
60 blks += DIV_ROUND_UP(nstruct - first, second);
61 }
62
63 return blks;
64}
65
66/**
67 * gfs2_remove_from_ail - Remove an entry from the ail lists, updating counters
68 * @bd: The gfs2_bufdata to remove
69 *
70 * The ail lock _must_ be held when calling this function
71 *
72 */
73
74void gfs2_remove_from_ail(struct gfs2_bufdata *bd)
75{
76 bd->bd_tr = NULL;
77 list_del_init(&bd->bd_ail_st_list);
78 list_del_init(&bd->bd_ail_gl_list);
79 atomic_dec(&bd->bd_gl->gl_ail_count);
80 brelse(bd->bd_bh);
81}
82
83/**
84 * gfs2_ail1_start_one - Start I/O on a transaction
85 * @sdp: The superblock
86 * @wbc: The writeback control structure
87 * @tr: The transaction to start I/O on
88 * @plug: The block plug currently active
89 */
90
91static int gfs2_ail1_start_one(struct gfs2_sbd *sdp,
92 struct writeback_control *wbc,
93 struct gfs2_trans *tr, struct blk_plug *plug)
94__releases(&sdp->sd_ail_lock)
95__acquires(&sdp->sd_ail_lock)
96{
97 struct gfs2_glock *gl = NULL;
98 struct address_space *mapping;
99 struct gfs2_bufdata *bd, *s;
100 struct buffer_head *bh;
101 int ret = 0;
102
103 list_for_each_entry_safe_reverse(bd, s, &tr->tr_ail1_list, bd_ail_st_list) {
104 bh = bd->bd_bh;
105
106 gfs2_assert(sdp, bd->bd_tr == tr);
107
108 if (!buffer_busy(bh)) {
109 if (buffer_uptodate(bh)) {
110 list_move(&bd->bd_ail_st_list,
111 &tr->tr_ail2_list);
112 continue;
113 }
114 if (!cmpxchg(&sdp->sd_log_error, 0, -EIO)) {
115 gfs2_io_error_bh(sdp, bh);
116 gfs2_withdraw_delayed(sdp);
117 }
118 }
119
120 if (gfs2_withdrawing_or_withdrawn(sdp)) {
121 gfs2_remove_from_ail(bd);
122 continue;
123 }
124 if (!buffer_dirty(bh))
125 continue;
126 if (gl == bd->bd_gl)
127 continue;
128 gl = bd->bd_gl;
129 list_move(&bd->bd_ail_st_list, &tr->tr_ail1_list);
130 mapping = bh->b_folio->mapping;
131 if (!mapping)
132 continue;
133 spin_unlock(&sdp->sd_ail_lock);
134 ret = mapping->a_ops->writepages(mapping, wbc);
135 if (need_resched()) {
136 blk_finish_plug(plug);
137 cond_resched();
138 blk_start_plug(plug);
139 }
140 spin_lock(&sdp->sd_ail_lock);
141 if (ret == -ENODATA) /* if a jdata write into a new hole */
142 ret = 0; /* ignore it */
143 mapping_set_error(mapping, ret);
144 if (ret || wbc->nr_to_write <= 0)
145 break;
146 return -EBUSY;
147 }
148
149 return ret;
150}
151
152static void dump_ail_list(struct gfs2_sbd *sdp)
153{
154 struct gfs2_trans *tr;
155 struct gfs2_bufdata *bd;
156 struct buffer_head *bh;
157
158 list_for_each_entry_reverse(tr, &sdp->sd_ail1_list, tr_list) {
159 list_for_each_entry_reverse(bd, &tr->tr_ail1_list,
160 bd_ail_st_list) {
161 bh = bd->bd_bh;
162 fs_err(sdp, "bd %p: blk:0x%llx bh=%p ", bd,
163 (unsigned long long)bd->bd_blkno, bh);
164 if (!bh) {
165 fs_err(sdp, "\n");
166 continue;
167 }
168 fs_err(sdp, "0x%llx up2:%d dirt:%d lkd:%d req:%d "
169 "map:%d new:%d ar:%d aw:%d delay:%d "
170 "io err:%d unwritten:%d dfr:%d pin:%d esc:%d\n",
171 (unsigned long long)bh->b_blocknr,
172 buffer_uptodate(bh), buffer_dirty(bh),
173 buffer_locked(bh), buffer_req(bh),
174 buffer_mapped(bh), buffer_new(bh),
175 buffer_async_read(bh), buffer_async_write(bh),
176 buffer_delay(bh), buffer_write_io_error(bh),
177 buffer_unwritten(bh),
178 buffer_defer_completion(bh),
179 buffer_pinned(bh), buffer_escaped(bh));
180 }
181 }
182}
183
184/**
185 * gfs2_ail1_flush - start writeback of some ail1 entries
186 * @sdp: The super block
187 * @wbc: The writeback control structure
188 *
189 * Writes back some ail1 entries, according to the limits in the
190 * writeback control structure
191 */
192
193void gfs2_ail1_flush(struct gfs2_sbd *sdp, struct writeback_control *wbc)
194{
195 struct list_head *head = &sdp->sd_ail1_list;
196 struct gfs2_trans *tr;
197 struct blk_plug plug;
198 int ret;
199 unsigned long flush_start = jiffies;
200
201 trace_gfs2_ail_flush(sdp, wbc, 1);
202 blk_start_plug(&plug);
203 spin_lock(&sdp->sd_ail_lock);
204restart:
205 ret = 0;
206 if (time_after(jiffies, flush_start + (HZ * 600))) {
207 fs_err(sdp, "Error: In %s for ten minutes! t=%d\n",
208 __func__, current->journal_info ? 1 : 0);
209 dump_ail_list(sdp);
210 goto out;
211 }
212 list_for_each_entry_reverse(tr, head, tr_list) {
213 if (wbc->nr_to_write <= 0)
214 break;
215 ret = gfs2_ail1_start_one(sdp, wbc, tr, &plug);
216 if (ret) {
217 if (ret == -EBUSY)
218 goto restart;
219 break;
220 }
221 }
222out:
223 spin_unlock(&sdp->sd_ail_lock);
224 blk_finish_plug(&plug);
225 if (ret) {
226 gfs2_lm(sdp, "gfs2_ail1_start_one returned: %d\n", ret);
227 gfs2_withdraw(sdp);
228 }
229 trace_gfs2_ail_flush(sdp, wbc, 0);
230}
231
232/**
233 * gfs2_ail1_start - start writeback of all ail1 entries
234 * @sdp: The superblock
235 */
236
237static void gfs2_ail1_start(struct gfs2_sbd *sdp)
238{
239 struct writeback_control wbc = {
240 .sync_mode = WB_SYNC_NONE,
241 .nr_to_write = LONG_MAX,
242 .range_start = 0,
243 .range_end = LLONG_MAX,
244 };
245
246 return gfs2_ail1_flush(sdp, &wbc);
247}
248
249static void gfs2_log_update_flush_tail(struct gfs2_sbd *sdp)
250{
251 unsigned int new_flush_tail = sdp->sd_log_head;
252 struct gfs2_trans *tr;
253
254 if (!list_empty(&sdp->sd_ail1_list)) {
255 tr = list_last_entry(&sdp->sd_ail1_list,
256 struct gfs2_trans, tr_list);
257 new_flush_tail = tr->tr_first;
258 }
259 sdp->sd_log_flush_tail = new_flush_tail;
260}
261
262static void gfs2_log_update_head(struct gfs2_sbd *sdp)
263{
264 unsigned int new_head = sdp->sd_log_flush_head;
265
266 if (sdp->sd_log_flush_tail == sdp->sd_log_head)
267 sdp->sd_log_flush_tail = new_head;
268 sdp->sd_log_head = new_head;
269}
270
271/*
272 * gfs2_ail_empty_tr - empty one of the ail lists of a transaction
273 */
274
275static void gfs2_ail_empty_tr(struct gfs2_sbd *sdp, struct gfs2_trans *tr,
276 struct list_head *head)
277{
278 struct gfs2_bufdata *bd;
279
280 while (!list_empty(head)) {
281 bd = list_first_entry(head, struct gfs2_bufdata,
282 bd_ail_st_list);
283 gfs2_assert(sdp, bd->bd_tr == tr);
284 gfs2_remove_from_ail(bd);
285 }
286}
287
288/**
289 * gfs2_ail1_empty_one - Check whether or not a trans in the AIL has been synced
290 * @sdp: the filesystem
291 * @tr: the transaction
292 * @max_revokes: If nonzero, issue revokes for the bd items for written buffers
293 *
294 * returns: the transaction's count of remaining active items
295 */
296
297static int gfs2_ail1_empty_one(struct gfs2_sbd *sdp, struct gfs2_trans *tr,
298 int *max_revokes)
299{
300 struct gfs2_bufdata *bd, *s;
301 struct buffer_head *bh;
302 int active_count = 0;
303
304 list_for_each_entry_safe_reverse(bd, s, &tr->tr_ail1_list,
305 bd_ail_st_list) {
306 bh = bd->bd_bh;
307 gfs2_assert(sdp, bd->bd_tr == tr);
308 /*
309 * If another process flagged an io error, e.g. writing to the
310 * journal, error all other bhs and move them off the ail1 to
311 * prevent a tight loop when unmount tries to flush ail1,
312 * regardless of whether they're still busy. If no outside
313 * errors were found and the buffer is busy, move to the next.
314 * If the ail buffer is not busy and caught an error, flag it
315 * for others.
316 */
317 if (!sdp->sd_log_error && buffer_busy(bh)) {
318 active_count++;
319 continue;
320 }
321 if (!buffer_uptodate(bh) &&
322 !cmpxchg(&sdp->sd_log_error, 0, -EIO)) {
323 gfs2_io_error_bh(sdp, bh);
324 gfs2_withdraw_delayed(sdp);
325 }
326 /*
327 * If we have space for revokes and the bd is no longer on any
328 * buf list, we can just add a revoke for it immediately and
329 * avoid having to put it on the ail2 list, where it would need
330 * to be revoked later.
331 */
332 if (*max_revokes && list_empty(&bd->bd_list)) {
333 gfs2_add_revoke(sdp, bd);
334 (*max_revokes)--;
335 continue;
336 }
337 list_move(&bd->bd_ail_st_list, &tr->tr_ail2_list);
338 }
339 return active_count;
340}
341
342/**
343 * gfs2_ail1_empty - Try to empty the ail1 lists
344 * @sdp: The superblock
345 * @max_revokes: If non-zero, add revokes where appropriate
346 *
347 * Tries to empty the ail1 lists, starting with the oldest first.
348 * Returns %true if the ail1 list is now empty.
349 */
350
351static bool gfs2_ail1_empty(struct gfs2_sbd *sdp, int max_revokes)
352{
353 struct gfs2_trans *tr, *s;
354 int oldest_tr = 1;
355 bool empty;
356
357 spin_lock(&sdp->sd_ail_lock);
358 list_for_each_entry_safe_reverse(tr, s, &sdp->sd_ail1_list, tr_list) {
359 if (!gfs2_ail1_empty_one(sdp, tr, &max_revokes) && oldest_tr)
360 list_move(&tr->tr_list, &sdp->sd_ail2_list);
361 else
362 oldest_tr = 0;
363 }
364 gfs2_log_update_flush_tail(sdp);
365 empty = list_empty(&sdp->sd_ail1_list);
366 spin_unlock(&sdp->sd_ail_lock);
367
368 return empty;
369}
370
371static void gfs2_ail1_wait(struct gfs2_sbd *sdp)
372{
373 struct gfs2_trans *tr;
374 struct gfs2_bufdata *bd;
375 struct buffer_head *bh;
376
377 spin_lock(&sdp->sd_ail_lock);
378 list_for_each_entry_reverse(tr, &sdp->sd_ail1_list, tr_list) {
379 list_for_each_entry(bd, &tr->tr_ail1_list, bd_ail_st_list) {
380 bh = bd->bd_bh;
381 if (!buffer_locked(bh))
382 continue;
383 get_bh(bh);
384 spin_unlock(&sdp->sd_ail_lock);
385 wait_on_buffer(bh);
386 brelse(bh);
387 return;
388 }
389 }
390 spin_unlock(&sdp->sd_ail_lock);
391}
392
393static void __ail2_empty(struct gfs2_sbd *sdp, struct gfs2_trans *tr)
394{
395 gfs2_ail_empty_tr(sdp, tr, &tr->tr_ail2_list);
396 list_del(&tr->tr_list);
397 gfs2_assert_warn(sdp, list_empty(&tr->tr_ail1_list));
398 gfs2_assert_warn(sdp, list_empty(&tr->tr_ail2_list));
399 gfs2_trans_free(sdp, tr);
400}
401
402static void ail2_empty(struct gfs2_sbd *sdp, unsigned int new_tail)
403{
404 struct list_head *ail2_list = &sdp->sd_ail2_list;
405 unsigned int old_tail = sdp->sd_log_tail;
406 struct gfs2_trans *tr, *safe;
407
408 spin_lock(&sdp->sd_ail_lock);
409 if (old_tail <= new_tail) {
410 list_for_each_entry_safe(tr, safe, ail2_list, tr_list) {
411 if (old_tail <= tr->tr_first && tr->tr_first < new_tail)
412 __ail2_empty(sdp, tr);
413 }
414 } else {
415 list_for_each_entry_safe(tr, safe, ail2_list, tr_list) {
416 if (old_tail <= tr->tr_first || tr->tr_first < new_tail)
417 __ail2_empty(sdp, tr);
418 }
419 }
420 spin_unlock(&sdp->sd_ail_lock);
421}
422
423/**
424 * gfs2_log_is_empty - Check if the log is empty
425 * @sdp: The GFS2 superblock
426 */
427
428bool gfs2_log_is_empty(struct gfs2_sbd *sdp) {
429 return atomic_read(&sdp->sd_log_blks_free) == sdp->sd_jdesc->jd_blocks;
430}
431
432static bool __gfs2_log_try_reserve_revokes(struct gfs2_sbd *sdp, unsigned int revokes)
433{
434 unsigned int available;
435
436 available = atomic_read(&sdp->sd_log_revokes_available);
437 while (available >= revokes) {
438 if (atomic_try_cmpxchg(&sdp->sd_log_revokes_available,
439 &available, available - revokes))
440 return true;
441 }
442 return false;
443}
444
445/**
446 * gfs2_log_release_revokes - Release a given number of revokes
447 * @sdp: The GFS2 superblock
448 * @revokes: The number of revokes to release
449 *
450 * sdp->sd_log_flush_lock must be held.
451 */
452void gfs2_log_release_revokes(struct gfs2_sbd *sdp, unsigned int revokes)
453{
454 if (revokes)
455 atomic_add(revokes, &sdp->sd_log_revokes_available);
456}
457
458/**
459 * gfs2_log_release - Release a given number of log blocks
460 * @sdp: The GFS2 superblock
461 * @blks: The number of blocks
462 *
463 */
464
465void gfs2_log_release(struct gfs2_sbd *sdp, unsigned int blks)
466{
467 atomic_add(blks, &sdp->sd_log_blks_free);
468 trace_gfs2_log_blocks(sdp, blks);
469 gfs2_assert_withdraw(sdp, atomic_read(&sdp->sd_log_blks_free) <=
470 sdp->sd_jdesc->jd_blocks);
471 if (atomic_read(&sdp->sd_log_blks_needed))
472 wake_up(&sdp->sd_log_waitq);
473}
474
475/**
476 * __gfs2_log_try_reserve - Try to make a log reservation
477 * @sdp: The GFS2 superblock
478 * @blks: The number of blocks to reserve
479 * @taboo_blks: The number of blocks to leave free
480 *
481 * Try to do the same as __gfs2_log_reserve(), but fail if no more log
482 * space is immediately available.
483 */
484static bool __gfs2_log_try_reserve(struct gfs2_sbd *sdp, unsigned int blks,
485 unsigned int taboo_blks)
486{
487 unsigned wanted = blks + taboo_blks;
488 unsigned int free_blocks;
489
490 free_blocks = atomic_read(&sdp->sd_log_blks_free);
491 while (free_blocks >= wanted) {
492 if (atomic_try_cmpxchg(&sdp->sd_log_blks_free, &free_blocks,
493 free_blocks - blks)) {
494 trace_gfs2_log_blocks(sdp, -blks);
495 return true;
496 }
497 }
498 return false;
499}
500
501/**
502 * __gfs2_log_reserve - Make a log reservation
503 * @sdp: The GFS2 superblock
504 * @blks: The number of blocks to reserve
505 * @taboo_blks: The number of blocks to leave free
506 *
507 * @taboo_blks is set to 0 for logd, and to GFS2_LOG_FLUSH_MIN_BLOCKS
508 * for all other processes. This ensures that when the log is almost full,
509 * logd will still be able to call gfs2_log_flush one more time without
510 * blocking, which will advance the tail and make some more log space
511 * available.
512 *
513 * We no longer flush the log here, instead we wake up logd to do that
514 * for us. To avoid the thundering herd and to ensure that we deal fairly
515 * with queued waiters, we use an exclusive wait. This means that when we
516 * get woken with enough journal space to get our reservation, we need to
517 * wake the next waiter on the list.
518 */
519
520static void __gfs2_log_reserve(struct gfs2_sbd *sdp, unsigned int blks,
521 unsigned int taboo_blks)
522{
523 unsigned wanted = blks + taboo_blks;
524 unsigned int free_blocks;
525
526 atomic_add(blks, &sdp->sd_log_blks_needed);
527 for (;;) {
528 if (current != sdp->sd_logd_process)
529 wake_up(&sdp->sd_logd_waitq);
530 io_wait_event(sdp->sd_log_waitq,
531 (free_blocks = atomic_read(&sdp->sd_log_blks_free),
532 free_blocks >= wanted));
533 do {
534 if (atomic_try_cmpxchg(&sdp->sd_log_blks_free,
535 &free_blocks,
536 free_blocks - blks))
537 goto reserved;
538 } while (free_blocks >= wanted);
539 }
540
541reserved:
542 trace_gfs2_log_blocks(sdp, -blks);
543 if (atomic_sub_return(blks, &sdp->sd_log_blks_needed))
544 wake_up(&sdp->sd_log_waitq);
545}
546
547/**
548 * gfs2_log_try_reserve - Try to make a log reservation
549 * @sdp: The GFS2 superblock
550 * @tr: The transaction
551 * @extra_revokes: The number of additional revokes reserved (output)
552 *
553 * This is similar to gfs2_log_reserve, but sdp->sd_log_flush_lock must be
554 * held for correct revoke accounting.
555 */
556
557bool gfs2_log_try_reserve(struct gfs2_sbd *sdp, struct gfs2_trans *tr,
558 unsigned int *extra_revokes)
559{
560 unsigned int blks = tr->tr_reserved;
561 unsigned int revokes = tr->tr_revokes;
562 unsigned int revoke_blks = 0;
563
564 *extra_revokes = 0;
565 if (revokes && !__gfs2_log_try_reserve_revokes(sdp, revokes)) {
566 revoke_blks = DIV_ROUND_UP(revokes, sdp->sd_inptrs);
567 *extra_revokes = revoke_blks * sdp->sd_inptrs - revokes;
568 blks += revoke_blks;
569 }
570 if (!blks)
571 return true;
572 if (__gfs2_log_try_reserve(sdp, blks, GFS2_LOG_FLUSH_MIN_BLOCKS))
573 return true;
574 if (!revoke_blks)
575 gfs2_log_release_revokes(sdp, revokes);
576 return false;
577}
578
579/**
580 * gfs2_log_reserve - Make a log reservation
581 * @sdp: The GFS2 superblock
582 * @tr: The transaction
583 * @extra_revokes: The number of additional revokes reserved (output)
584 *
585 * sdp->sd_log_flush_lock must not be held.
586 */
587
588void gfs2_log_reserve(struct gfs2_sbd *sdp, struct gfs2_trans *tr,
589 unsigned int *extra_revokes)
590{
591 unsigned int blks = tr->tr_reserved;
592 unsigned int revokes = tr->tr_revokes;
593 unsigned int revoke_blks;
594
595 *extra_revokes = 0;
596 if (revokes) {
597 revoke_blks = DIV_ROUND_UP(revokes, sdp->sd_inptrs);
598 *extra_revokes = revoke_blks * sdp->sd_inptrs - revokes;
599 blks += revoke_blks;
600 }
601 __gfs2_log_reserve(sdp, blks, GFS2_LOG_FLUSH_MIN_BLOCKS);
602}
603
604/**
605 * log_distance - Compute distance between two journal blocks
606 * @sdp: The GFS2 superblock
607 * @newer: The most recent journal block of the pair
608 * @older: The older journal block of the pair
609 *
610 * Compute the distance (in the journal direction) between two
611 * blocks in the journal
612 *
613 * Returns: the distance in blocks
614 */
615
616static inline unsigned int log_distance(struct gfs2_sbd *sdp, unsigned int newer,
617 unsigned int older)
618{
619 int dist;
620
621 dist = newer - older;
622 if (dist < 0)
623 dist += sdp->sd_jdesc->jd_blocks;
624
625 return dist;
626}
627
628/**
629 * calc_reserved - Calculate the number of blocks to keep reserved
630 * @sdp: The GFS2 superblock
631 *
632 * This is complex. We need to reserve room for all our currently used
633 * metadata blocks (e.g. normal file I/O rewriting file time stamps) and
634 * all our journaled data blocks for journaled files (e.g. files in the
635 * meta_fs like rindex, or files for which chattr +j was done.)
636 * If we don't reserve enough space, corruption will follow.
637 *
638 * We can have metadata blocks and jdata blocks in the same journal. Each
639 * type gets its own log descriptor, for which we need to reserve a block.
640 * In fact, each type has the potential for needing more than one log descriptor
641 * in cases where we have more blocks than will fit in a log descriptor.
642 * Metadata journal entries take up half the space of journaled buffer entries.
643 *
644 * Also, we need to reserve blocks for revoke journal entries and one for an
645 * overall header for the lot.
646 *
647 * Returns: the number of blocks reserved
648 */
649static unsigned int calc_reserved(struct gfs2_sbd *sdp)
650{
651 unsigned int reserved = GFS2_LOG_FLUSH_MIN_BLOCKS;
652 unsigned int blocks;
653 struct gfs2_trans *tr = sdp->sd_log_tr;
654
655 if (tr) {
656 blocks = tr->tr_num_buf_new - tr->tr_num_buf_rm;
657 reserved += blocks + DIV_ROUND_UP(blocks, buf_limit(sdp));
658 blocks = tr->tr_num_databuf_new - tr->tr_num_databuf_rm;
659 reserved += blocks + DIV_ROUND_UP(blocks, databuf_limit(sdp));
660 }
661 return reserved;
662}
663
664static void log_pull_tail(struct gfs2_sbd *sdp)
665{
666 unsigned int new_tail = sdp->sd_log_flush_tail;
667 unsigned int dist;
668
669 if (new_tail == sdp->sd_log_tail)
670 return;
671 dist = log_distance(sdp, new_tail, sdp->sd_log_tail);
672 ail2_empty(sdp, new_tail);
673 gfs2_log_release(sdp, dist);
674 sdp->sd_log_tail = new_tail;
675}
676
677
678void log_flush_wait(struct gfs2_sbd *sdp)
679{
680 DEFINE_WAIT(wait);
681
682 if (atomic_read(&sdp->sd_log_in_flight)) {
683 do {
684 prepare_to_wait(&sdp->sd_log_flush_wait, &wait,
685 TASK_UNINTERRUPTIBLE);
686 if (atomic_read(&sdp->sd_log_in_flight))
687 io_schedule();
688 } while(atomic_read(&sdp->sd_log_in_flight));
689 finish_wait(&sdp->sd_log_flush_wait, &wait);
690 }
691}
692
693static int ip_cmp(void *priv, const struct list_head *a, const struct list_head *b)
694{
695 struct gfs2_inode *ipa, *ipb;
696
697 ipa = list_entry(a, struct gfs2_inode, i_ordered);
698 ipb = list_entry(b, struct gfs2_inode, i_ordered);
699
700 if (ipa->i_no_addr < ipb->i_no_addr)
701 return -1;
702 if (ipa->i_no_addr > ipb->i_no_addr)
703 return 1;
704 return 0;
705}
706
707static void __ordered_del_inode(struct gfs2_inode *ip)
708{
709 if (!list_empty(&ip->i_ordered))
710 list_del_init(&ip->i_ordered);
711}
712
713static void gfs2_ordered_write(struct gfs2_sbd *sdp)
714{
715 struct gfs2_inode *ip;
716 LIST_HEAD(written);
717
718 spin_lock(&sdp->sd_ordered_lock);
719 list_sort(NULL, &sdp->sd_log_ordered, &ip_cmp);
720 while (!list_empty(&sdp->sd_log_ordered)) {
721 ip = list_first_entry(&sdp->sd_log_ordered, struct gfs2_inode, i_ordered);
722 if (ip->i_inode.i_mapping->nrpages == 0) {
723 __ordered_del_inode(ip);
724 continue;
725 }
726 list_move(&ip->i_ordered, &written);
727 spin_unlock(&sdp->sd_ordered_lock);
728 filemap_fdatawrite(ip->i_inode.i_mapping);
729 spin_lock(&sdp->sd_ordered_lock);
730 }
731 list_splice(&written, &sdp->sd_log_ordered);
732 spin_unlock(&sdp->sd_ordered_lock);
733}
734
735static void gfs2_ordered_wait(struct gfs2_sbd *sdp)
736{
737 struct gfs2_inode *ip;
738
739 spin_lock(&sdp->sd_ordered_lock);
740 while (!list_empty(&sdp->sd_log_ordered)) {
741 ip = list_first_entry(&sdp->sd_log_ordered, struct gfs2_inode, i_ordered);
742 __ordered_del_inode(ip);
743 if (ip->i_inode.i_mapping->nrpages == 0)
744 continue;
745 spin_unlock(&sdp->sd_ordered_lock);
746 filemap_fdatawait(ip->i_inode.i_mapping);
747 spin_lock(&sdp->sd_ordered_lock);
748 }
749 spin_unlock(&sdp->sd_ordered_lock);
750}
751
752void gfs2_ordered_del_inode(struct gfs2_inode *ip)
753{
754 struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
755
756 spin_lock(&sdp->sd_ordered_lock);
757 __ordered_del_inode(ip);
758 spin_unlock(&sdp->sd_ordered_lock);
759}
760
761void gfs2_add_revoke(struct gfs2_sbd *sdp, struct gfs2_bufdata *bd)
762{
763 struct buffer_head *bh = bd->bd_bh;
764 struct gfs2_glock *gl = bd->bd_gl;
765
766 sdp->sd_log_num_revoke++;
767 if (atomic_inc_return(&gl->gl_revokes) == 1)
768 gfs2_glock_hold(gl);
769 bh->b_private = NULL;
770 bd->bd_blkno = bh->b_blocknr;
771 gfs2_remove_from_ail(bd); /* drops ref on bh */
772 bd->bd_bh = NULL;
773 set_bit(GLF_LFLUSH, &gl->gl_flags);
774 list_add(&bd->bd_list, &sdp->sd_log_revokes);
775}
776
777void gfs2_glock_remove_revoke(struct gfs2_glock *gl)
778{
779 if (atomic_dec_return(&gl->gl_revokes) == 0) {
780 clear_bit(GLF_LFLUSH, &gl->gl_flags);
781 gfs2_glock_put_async(gl);
782 }
783}
784
785/**
786 * gfs2_flush_revokes - Add as many revokes to the system transaction as we can
787 * @sdp: The GFS2 superblock
788 *
789 * Our usual strategy is to defer writing revokes as much as we can in the hope
790 * that we'll eventually overwrite the journal, which will make those revokes
791 * go away. This changes when we flush the log: at that point, there will
792 * likely be some left-over space in the last revoke block of that transaction.
793 * We can fill that space with additional revokes for blocks that have already
794 * been written back. This will basically come at no cost now, and will save
795 * us from having to keep track of those blocks on the AIL2 list later.
796 */
797void gfs2_flush_revokes(struct gfs2_sbd *sdp)
798{
799 /* number of revokes we still have room for */
800 unsigned int max_revokes = atomic_read(&sdp->sd_log_revokes_available);
801
802 gfs2_log_lock(sdp);
803 gfs2_ail1_empty(sdp, max_revokes);
804 gfs2_log_unlock(sdp);
805
806 if (gfs2_withdrawing(sdp))
807 gfs2_withdraw(sdp);
808}
809
810/**
811 * gfs2_write_log_header - Write a journal log header buffer at lblock
812 * @sdp: The GFS2 superblock
813 * @jd: journal descriptor of the journal to which we are writing
814 * @seq: sequence number
815 * @tail: tail of the log
816 * @lblock: value for lh_blkno (block number relative to start of journal)
817 * @flags: log header flags GFS2_LOG_HEAD_*
818 * @op_flags: flags to pass to the bio
819 *
820 * Returns: the initialized log buffer descriptor
821 */
822
823void gfs2_write_log_header(struct gfs2_sbd *sdp, struct gfs2_jdesc *jd,
824 u64 seq, u32 tail, u32 lblock, u32 flags,
825 blk_opf_t op_flags)
826{
827 struct gfs2_log_header *lh;
828 u32 hash, crc;
829 struct page *page;
830 struct gfs2_statfs_change_host *l_sc = &sdp->sd_statfs_local;
831 struct timespec64 tv;
832 struct super_block *sb = sdp->sd_vfs;
833 u64 dblock;
834
835 if (gfs2_withdrawing_or_withdrawn(sdp))
836 return;
837
838 page = mempool_alloc(gfs2_page_pool, GFP_NOIO);
839 lh = page_address(page);
840 clear_page(lh);
841
842 lh->lh_header.mh_magic = cpu_to_be32(GFS2_MAGIC);
843 lh->lh_header.mh_type = cpu_to_be32(GFS2_METATYPE_LH);
844 lh->lh_header.__pad0 = cpu_to_be64(0);
845 lh->lh_header.mh_format = cpu_to_be32(GFS2_FORMAT_LH);
846 lh->lh_header.mh_jid = cpu_to_be32(sdp->sd_jdesc->jd_jid);
847 lh->lh_sequence = cpu_to_be64(seq);
848 lh->lh_flags = cpu_to_be32(flags);
849 lh->lh_tail = cpu_to_be32(tail);
850 lh->lh_blkno = cpu_to_be32(lblock);
851 hash = ~crc32(~0, lh, LH_V1_SIZE);
852 lh->lh_hash = cpu_to_be32(hash);
853
854 ktime_get_coarse_real_ts64(&tv);
855 lh->lh_nsec = cpu_to_be32(tv.tv_nsec);
856 lh->lh_sec = cpu_to_be64(tv.tv_sec);
857 if (!list_empty(&jd->extent_list))
858 dblock = gfs2_log_bmap(jd, lblock);
859 else {
860 unsigned int extlen;
861 int ret;
862
863 extlen = 1;
864 ret = gfs2_get_extent(jd->jd_inode, lblock, &dblock, &extlen);
865 if (gfs2_assert_withdraw(sdp, ret == 0))
866 return;
867 }
868 lh->lh_addr = cpu_to_be64(dblock);
869 lh->lh_jinode = cpu_to_be64(GFS2_I(jd->jd_inode)->i_no_addr);
870
871 /* We may only write local statfs, quota, etc., when writing to our
872 own journal. The values are left 0 when recovering a journal
873 different from our own. */
874 if (!(flags & GFS2_LOG_HEAD_RECOVERY)) {
875 lh->lh_statfs_addr =
876 cpu_to_be64(GFS2_I(sdp->sd_sc_inode)->i_no_addr);
877 lh->lh_quota_addr =
878 cpu_to_be64(GFS2_I(sdp->sd_qc_inode)->i_no_addr);
879
880 spin_lock(&sdp->sd_statfs_spin);
881 lh->lh_local_total = cpu_to_be64(l_sc->sc_total);
882 lh->lh_local_free = cpu_to_be64(l_sc->sc_free);
883 lh->lh_local_dinodes = cpu_to_be64(l_sc->sc_dinodes);
884 spin_unlock(&sdp->sd_statfs_spin);
885 }
886
887 BUILD_BUG_ON(offsetof(struct gfs2_log_header, lh_crc) != LH_V1_SIZE);
888
889 crc = crc32c(~0, (void *)lh + LH_V1_SIZE + 4,
890 sb->s_blocksize - LH_V1_SIZE - 4);
891 lh->lh_crc = cpu_to_be32(crc);
892
893 gfs2_log_write(sdp, jd, page, sb->s_blocksize, 0, dblock);
894 gfs2_log_submit_bio(&jd->jd_log_bio, REQ_OP_WRITE | op_flags);
895}
896
897/**
898 * log_write_header - Get and initialize a journal header buffer
899 * @sdp: The GFS2 superblock
900 * @flags: The log header flags, including log header origin
901 *
902 * Returns: the initialized log buffer descriptor
903 */
904
905static void log_write_header(struct gfs2_sbd *sdp, u32 flags)
906{
907 blk_opf_t op_flags = REQ_PREFLUSH | REQ_FUA | REQ_META | REQ_SYNC;
908 struct super_block *sb = sdp->sd_vfs;
909
910 gfs2_assert_withdraw(sdp, sb->s_writers.frozen != SB_FREEZE_COMPLETE);
911
912 if (test_bit(SDF_NOBARRIERS, &sdp->sd_flags)) {
913 gfs2_ordered_wait(sdp);
914 log_flush_wait(sdp);
915 op_flags = REQ_SYNC | REQ_META | REQ_PRIO;
916 }
917 sdp->sd_log_idle = (sdp->sd_log_flush_tail == sdp->sd_log_flush_head);
918 gfs2_write_log_header(sdp, sdp->sd_jdesc, sdp->sd_log_sequence++,
919 sdp->sd_log_flush_tail, sdp->sd_log_flush_head,
920 flags, op_flags);
921 gfs2_log_incr_head(sdp);
922 log_flush_wait(sdp);
923 log_pull_tail(sdp);
924 gfs2_log_update_head(sdp);
925}
926
927/**
928 * gfs2_ail_drain - drain the ail lists after a withdraw
929 * @sdp: Pointer to GFS2 superblock
930 */
931void gfs2_ail_drain(struct gfs2_sbd *sdp)
932{
933 struct gfs2_trans *tr;
934
935 spin_lock(&sdp->sd_ail_lock);
936 /*
937 * For transactions on the sd_ail1_list we need to drain both the
938 * ail1 and ail2 lists. That's because function gfs2_ail1_start_one
939 * (temporarily) moves items from its tr_ail1 list to tr_ail2 list
940 * before revokes are sent for that block. Items on the sd_ail2_list
941 * should have already gotten beyond that point, so no need.
942 */
943 while (!list_empty(&sdp->sd_ail1_list)) {
944 tr = list_first_entry(&sdp->sd_ail1_list, struct gfs2_trans,
945 tr_list);
946 gfs2_ail_empty_tr(sdp, tr, &tr->tr_ail1_list);
947 gfs2_ail_empty_tr(sdp, tr, &tr->tr_ail2_list);
948 list_del(&tr->tr_list);
949 gfs2_trans_free(sdp, tr);
950 }
951 while (!list_empty(&sdp->sd_ail2_list)) {
952 tr = list_first_entry(&sdp->sd_ail2_list, struct gfs2_trans,
953 tr_list);
954 gfs2_ail_empty_tr(sdp, tr, &tr->tr_ail2_list);
955 list_del(&tr->tr_list);
956 gfs2_trans_free(sdp, tr);
957 }
958 gfs2_drain_revokes(sdp);
959 spin_unlock(&sdp->sd_ail_lock);
960}
961
962/**
963 * empty_ail1_list - try to start IO and empty the ail1 list
964 * @sdp: Pointer to GFS2 superblock
965 */
966static void empty_ail1_list(struct gfs2_sbd *sdp)
967{
968 unsigned long start = jiffies;
969 bool empty = false;
970
971 while (!empty) {
972 if (time_after(jiffies, start + (HZ * 600))) {
973 fs_err(sdp, "Error: In %s for 10 minutes! t=%d\n",
974 __func__, current->journal_info ? 1 : 0);
975 dump_ail_list(sdp);
976 return;
977 }
978 gfs2_ail1_start(sdp);
979 gfs2_ail1_wait(sdp);
980 empty = gfs2_ail1_empty(sdp, 0);
981
982 if (gfs2_withdrawing_or_withdrawn(sdp))
983 break;
984 }
985
986 if (gfs2_withdrawing(sdp))
987 gfs2_withdraw(sdp);
988}
989
990/**
991 * trans_drain - drain the buf and databuf queue for a failed transaction
992 * @tr: the transaction to drain
993 *
994 * When this is called, we're taking an error exit for a log write that failed
995 * but since we bypassed the after_commit functions, we need to remove the
996 * items from the buf and databuf queue.
997 */
998static void trans_drain(struct gfs2_trans *tr)
999{
1000 struct gfs2_bufdata *bd;
1001 struct list_head *head;
1002
1003 if (!tr)
1004 return;
1005
1006 head = &tr->tr_buf;
1007 while (!list_empty(head)) {
1008 bd = list_first_entry(head, struct gfs2_bufdata, bd_list);
1009 list_del_init(&bd->bd_list);
1010 if (!list_empty(&bd->bd_ail_st_list))
1011 gfs2_remove_from_ail(bd);
1012 kmem_cache_free(gfs2_bufdata_cachep, bd);
1013 }
1014 head = &tr->tr_databuf;
1015 while (!list_empty(head)) {
1016 bd = list_first_entry(head, struct gfs2_bufdata, bd_list);
1017 list_del_init(&bd->bd_list);
1018 if (!list_empty(&bd->bd_ail_st_list))
1019 gfs2_remove_from_ail(bd);
1020 kmem_cache_free(gfs2_bufdata_cachep, bd);
1021 }
1022}
1023
1024/**
1025 * gfs2_log_flush - flush incore transaction(s)
1026 * @sdp: The filesystem
1027 * @gl: The glock structure to flush. If NULL, flush the whole incore log
1028 * @flags: The log header flags: GFS2_LOG_HEAD_FLUSH_* and debug flags
1029 *
1030 */
1031
1032void gfs2_log_flush(struct gfs2_sbd *sdp, struct gfs2_glock *gl, u32 flags)
1033{
1034 struct gfs2_trans *tr = NULL;
1035 unsigned int reserved_blocks = 0, used_blocks = 0;
1036 bool frozen = test_bit(SDF_FROZEN, &sdp->sd_flags);
1037 unsigned int first_log_head;
1038 unsigned int reserved_revokes = 0;
1039
1040 down_write(&sdp->sd_log_flush_lock);
1041 trace_gfs2_log_flush(sdp, 1, flags);
1042
1043repeat:
1044 /*
1045 * Do this check while holding the log_flush_lock to prevent new
1046 * buffers from being added to the ail via gfs2_pin()
1047 */
1048 if (gfs2_withdrawing_or_withdrawn(sdp) ||
1049 !test_bit(SDF_JOURNAL_LIVE, &sdp->sd_flags))
1050 goto out;
1051
1052 /* Log might have been flushed while we waited for the flush lock */
1053 if (gl && !test_bit(GLF_LFLUSH, &gl->gl_flags))
1054 goto out;
1055
1056 first_log_head = sdp->sd_log_head;
1057 sdp->sd_log_flush_head = first_log_head;
1058
1059 tr = sdp->sd_log_tr;
1060 if (tr || sdp->sd_log_num_revoke) {
1061 if (reserved_blocks)
1062 gfs2_log_release(sdp, reserved_blocks);
1063 reserved_blocks = sdp->sd_log_blks_reserved;
1064 reserved_revokes = sdp->sd_log_num_revoke;
1065 if (tr) {
1066 sdp->sd_log_tr = NULL;
1067 tr->tr_first = first_log_head;
1068 if (unlikely(frozen)) {
1069 if (gfs2_assert_withdraw_delayed(sdp,
1070 !tr->tr_num_buf_new && !tr->tr_num_databuf_new))
1071 goto out_withdraw;
1072 }
1073 }
1074 } else if (!reserved_blocks) {
1075 unsigned int taboo_blocks = GFS2_LOG_FLUSH_MIN_BLOCKS;
1076
1077 reserved_blocks = GFS2_LOG_FLUSH_MIN_BLOCKS;
1078 if (current == sdp->sd_logd_process)
1079 taboo_blocks = 0;
1080
1081 if (!__gfs2_log_try_reserve(sdp, reserved_blocks, taboo_blocks)) {
1082 up_write(&sdp->sd_log_flush_lock);
1083 __gfs2_log_reserve(sdp, reserved_blocks, taboo_blocks);
1084 down_write(&sdp->sd_log_flush_lock);
1085 goto repeat;
1086 }
1087 BUG_ON(sdp->sd_log_num_revoke);
1088 }
1089
1090 if (flags & GFS2_LOG_HEAD_FLUSH_SHUTDOWN)
1091 clear_bit(SDF_JOURNAL_LIVE, &sdp->sd_flags);
1092
1093 if (unlikely(frozen))
1094 if (gfs2_assert_withdraw_delayed(sdp, !reserved_revokes))
1095 goto out_withdraw;
1096
1097 gfs2_ordered_write(sdp);
1098 if (gfs2_withdrawing_or_withdrawn(sdp))
1099 goto out_withdraw;
1100 lops_before_commit(sdp, tr);
1101 if (gfs2_withdrawing_or_withdrawn(sdp))
1102 goto out_withdraw;
1103 if (sdp->sd_jdesc)
1104 gfs2_log_submit_bio(&sdp->sd_jdesc->jd_log_bio, REQ_OP_WRITE);
1105 if (gfs2_withdrawing_or_withdrawn(sdp))
1106 goto out_withdraw;
1107
1108 if (sdp->sd_log_head != sdp->sd_log_flush_head) {
1109 log_write_header(sdp, flags);
1110 } else if (sdp->sd_log_tail != sdp->sd_log_flush_tail && !sdp->sd_log_idle) {
1111 log_write_header(sdp, flags);
1112 }
1113 if (gfs2_withdrawing_or_withdrawn(sdp))
1114 goto out_withdraw;
1115 lops_after_commit(sdp, tr);
1116
1117 gfs2_log_lock(sdp);
1118 sdp->sd_log_blks_reserved = 0;
1119
1120 spin_lock(&sdp->sd_ail_lock);
1121 if (tr && !list_empty(&tr->tr_ail1_list)) {
1122 list_add(&tr->tr_list, &sdp->sd_ail1_list);
1123 tr = NULL;
1124 }
1125 spin_unlock(&sdp->sd_ail_lock);
1126 gfs2_log_unlock(sdp);
1127
1128 if (!(flags & GFS2_LOG_HEAD_FLUSH_NORMAL)) {
1129 if (!sdp->sd_log_idle) {
1130 empty_ail1_list(sdp);
1131 if (gfs2_withdrawing_or_withdrawn(sdp))
1132 goto out_withdraw;
1133 log_write_header(sdp, flags);
1134 }
1135 if (flags & (GFS2_LOG_HEAD_FLUSH_SHUTDOWN |
1136 GFS2_LOG_HEAD_FLUSH_FREEZE))
1137 gfs2_log_shutdown(sdp);
1138 }
1139
1140out_end:
1141 used_blocks = log_distance(sdp, sdp->sd_log_flush_head, first_log_head);
1142 reserved_revokes += atomic_read(&sdp->sd_log_revokes_available);
1143 atomic_set(&sdp->sd_log_revokes_available, sdp->sd_ldptrs);
1144 gfs2_assert_withdraw(sdp, reserved_revokes % sdp->sd_inptrs == sdp->sd_ldptrs);
1145 if (reserved_revokes > sdp->sd_ldptrs)
1146 reserved_blocks += (reserved_revokes - sdp->sd_ldptrs) / sdp->sd_inptrs;
1147out:
1148 if (used_blocks != reserved_blocks) {
1149 gfs2_assert_withdraw_delayed(sdp, used_blocks < reserved_blocks);
1150 gfs2_log_release(sdp, reserved_blocks - used_blocks);
1151 }
1152 up_write(&sdp->sd_log_flush_lock);
1153 gfs2_trans_free(sdp, tr);
1154 if (gfs2_withdrawing(sdp))
1155 gfs2_withdraw(sdp);
1156 trace_gfs2_log_flush(sdp, 0, flags);
1157 return;
1158
1159out_withdraw:
1160 trans_drain(tr);
1161 /**
1162 * If the tr_list is empty, we're withdrawing during a log
1163 * flush that targets a transaction, but the transaction was
1164 * never queued onto any of the ail lists. Here we add it to
1165 * ail1 just so that ail_drain() will find and free it.
1166 */
1167 spin_lock(&sdp->sd_ail_lock);
1168 if (tr && list_empty(&tr->tr_list))
1169 list_add(&tr->tr_list, &sdp->sd_ail1_list);
1170 spin_unlock(&sdp->sd_ail_lock);
1171 tr = NULL;
1172 goto out_end;
1173}
1174
1175/**
1176 * gfs2_merge_trans - Merge a new transaction into a cached transaction
1177 * @sdp: the filesystem
1178 * @new: New transaction to be merged
1179 */
1180
1181static void gfs2_merge_trans(struct gfs2_sbd *sdp, struct gfs2_trans *new)
1182{
1183 struct gfs2_trans *old = sdp->sd_log_tr;
1184
1185 WARN_ON_ONCE(!test_bit(TR_ATTACHED, &old->tr_flags));
1186
1187 old->tr_num_buf_new += new->tr_num_buf_new;
1188 old->tr_num_databuf_new += new->tr_num_databuf_new;
1189 old->tr_num_buf_rm += new->tr_num_buf_rm;
1190 old->tr_num_databuf_rm += new->tr_num_databuf_rm;
1191 old->tr_revokes += new->tr_revokes;
1192 old->tr_num_revoke += new->tr_num_revoke;
1193
1194 list_splice_tail_init(&new->tr_databuf, &old->tr_databuf);
1195 list_splice_tail_init(&new->tr_buf, &old->tr_buf);
1196
1197 spin_lock(&sdp->sd_ail_lock);
1198 list_splice_tail_init(&new->tr_ail1_list, &old->tr_ail1_list);
1199 list_splice_tail_init(&new->tr_ail2_list, &old->tr_ail2_list);
1200 spin_unlock(&sdp->sd_ail_lock);
1201}
1202
1203static void log_refund(struct gfs2_sbd *sdp, struct gfs2_trans *tr)
1204{
1205 unsigned int reserved;
1206 unsigned int unused;
1207 unsigned int maxres;
1208
1209 gfs2_log_lock(sdp);
1210
1211 if (sdp->sd_log_tr) {
1212 gfs2_merge_trans(sdp, tr);
1213 } else if (tr->tr_num_buf_new || tr->tr_num_databuf_new) {
1214 gfs2_assert_withdraw(sdp, !test_bit(TR_ONSTACK, &tr->tr_flags));
1215 sdp->sd_log_tr = tr;
1216 set_bit(TR_ATTACHED, &tr->tr_flags);
1217 }
1218
1219 reserved = calc_reserved(sdp);
1220 maxres = sdp->sd_log_blks_reserved + tr->tr_reserved;
1221 gfs2_assert_withdraw(sdp, maxres >= reserved);
1222 unused = maxres - reserved;
1223 if (unused)
1224 gfs2_log_release(sdp, unused);
1225 sdp->sd_log_blks_reserved = reserved;
1226
1227 gfs2_log_unlock(sdp);
1228}
1229
1230static inline int gfs2_jrnl_flush_reqd(struct gfs2_sbd *sdp)
1231{
1232 return atomic_read(&sdp->sd_log_pinned) +
1233 atomic_read(&sdp->sd_log_blks_needed) >=
1234 atomic_read(&sdp->sd_log_thresh1);
1235}
1236
1237static inline int gfs2_ail_flush_reqd(struct gfs2_sbd *sdp)
1238{
1239 return sdp->sd_jdesc->jd_blocks -
1240 atomic_read(&sdp->sd_log_blks_free) +
1241 atomic_read(&sdp->sd_log_blks_needed) >=
1242 atomic_read(&sdp->sd_log_thresh2);
1243}
1244
1245/**
1246 * gfs2_log_commit - Commit a transaction to the log
1247 * @sdp: the filesystem
1248 * @tr: the transaction
1249 *
1250 * We wake up gfs2_logd if the number of pinned blocks exceed thresh1
1251 * or the total number of used blocks (pinned blocks plus AIL blocks)
1252 * is greater than thresh2.
1253 *
1254 * At mount time thresh1 is 2/5ths of journal size, thresh2 is 4/5ths of
1255 * journal size.
1256 *
1257 * Returns: errno
1258 */
1259
1260void gfs2_log_commit(struct gfs2_sbd *sdp, struct gfs2_trans *tr)
1261{
1262 log_refund(sdp, tr);
1263
1264 if (gfs2_ail_flush_reqd(sdp) || gfs2_jrnl_flush_reqd(sdp))
1265 wake_up(&sdp->sd_logd_waitq);
1266}
1267
1268/**
1269 * gfs2_log_shutdown - write a shutdown header into a journal
1270 * @sdp: the filesystem
1271 *
1272 */
1273
1274static void gfs2_log_shutdown(struct gfs2_sbd *sdp)
1275{
1276 gfs2_assert_withdraw(sdp, !sdp->sd_log_blks_reserved);
1277 gfs2_assert_withdraw(sdp, !sdp->sd_log_num_revoke);
1278 gfs2_assert_withdraw(sdp, list_empty(&sdp->sd_ail1_list));
1279
1280 log_write_header(sdp, GFS2_LOG_HEAD_UNMOUNT | GFS2_LFC_SHUTDOWN);
1281 log_pull_tail(sdp);
1282
1283 gfs2_assert_warn(sdp, sdp->sd_log_head == sdp->sd_log_tail);
1284 gfs2_assert_warn(sdp, list_empty(&sdp->sd_ail2_list));
1285}
1286
1287/**
1288 * gfs2_logd - Update log tail as Active Items get flushed to in-place blocks
1289 * @data: Pointer to GFS2 superblock
1290 *
1291 * Also, periodically check to make sure that we're using the most recent
1292 * journal index.
1293 */
1294
1295int gfs2_logd(void *data)
1296{
1297 struct gfs2_sbd *sdp = data;
1298 unsigned long t = 1;
1299
1300 set_freezable();
1301 while (!kthread_should_stop()) {
1302 if (gfs2_withdrawing_or_withdrawn(sdp))
1303 break;
1304
1305 /* Check for errors writing to the journal */
1306 if (sdp->sd_log_error) {
1307 gfs2_lm(sdp,
1308 "GFS2: fsid=%s: error %d: "
1309 "withdrawing the file system to "
1310 "prevent further damage.\n",
1311 sdp->sd_fsname, sdp->sd_log_error);
1312 gfs2_withdraw(sdp);
1313 break;
1314 }
1315
1316 if (gfs2_jrnl_flush_reqd(sdp) || t == 0) {
1317 gfs2_ail1_empty(sdp, 0);
1318 gfs2_log_flush(sdp, NULL, GFS2_LOG_HEAD_FLUSH_NORMAL |
1319 GFS2_LFC_LOGD_JFLUSH_REQD);
1320 }
1321
1322 if (test_bit(SDF_FORCE_AIL_FLUSH, &sdp->sd_flags) ||
1323 gfs2_ail_flush_reqd(sdp)) {
1324 clear_bit(SDF_FORCE_AIL_FLUSH, &sdp->sd_flags);
1325 gfs2_ail1_start(sdp);
1326 gfs2_ail1_wait(sdp);
1327 gfs2_ail1_empty(sdp, 0);
1328 gfs2_log_flush(sdp, NULL, GFS2_LOG_HEAD_FLUSH_NORMAL |
1329 GFS2_LFC_LOGD_AIL_FLUSH_REQD);
1330 }
1331
1332 t = gfs2_tune_get(sdp, gt_logd_secs) * HZ;
1333
1334 t = wait_event_freezable_timeout(sdp->sd_logd_waitq,
1335 test_bit(SDF_FORCE_AIL_FLUSH, &sdp->sd_flags) ||
1336 gfs2_ail_flush_reqd(sdp) ||
1337 gfs2_jrnl_flush_reqd(sdp) ||
1338 sdp->sd_log_error ||
1339 gfs2_withdrawing_or_withdrawn(sdp) ||
1340 kthread_should_stop(),
1341 t);
1342 }
1343
1344 if (gfs2_withdrawing(sdp))
1345 gfs2_withdraw(sdp);
1346
1347 return 0;
1348}
1349