Loading...
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Copyright (c) 2000-2006 Silicon Graphics, Inc.
4 * All Rights Reserved.
5 */
6#include "xfs.h"
7#include "xfs_fs.h"
8#include "xfs_shared.h"
9#include "xfs_format.h"
10#include "xfs_log_format.h"
11#include "xfs_trans_resv.h"
12#include "xfs_bit.h"
13#include "xfs_sb.h"
14#include "xfs_mount.h"
15#include "xfs_defer.h"
16#include "xfs_inode.h"
17#include "xfs_trans.h"
18#include "xfs_log.h"
19#include "xfs_log_priv.h"
20#include "xfs_log_recover.h"
21#include "xfs_trans_priv.h"
22#include "xfs_alloc.h"
23#include "xfs_ialloc.h"
24#include "xfs_trace.h"
25#include "xfs_icache.h"
26#include "xfs_error.h"
27#include "xfs_buf_item.h"
28#include "xfs_ag.h"
29#include "xfs_quota.h"
30#include "xfs_reflink.h"
31
32#define BLK_AVG(blk1, blk2) ((blk1+blk2) >> 1)
33
34STATIC int
35xlog_find_zeroed(
36 struct xlog *,
37 xfs_daddr_t *);
38STATIC int
39xlog_clear_stale_blocks(
40 struct xlog *,
41 xfs_lsn_t);
42STATIC int
43xlog_do_recovery_pass(
44 struct xlog *, xfs_daddr_t, xfs_daddr_t, int, xfs_daddr_t *);
45
46/*
47 * Sector aligned buffer routines for buffer create/read/write/access
48 */
49
50/*
51 * Verify the log-relative block number and length in basic blocks are valid for
52 * an operation involving the given XFS log buffer. Returns true if the fields
53 * are valid, false otherwise.
54 */
55static inline bool
56xlog_verify_bno(
57 struct xlog *log,
58 xfs_daddr_t blk_no,
59 int bbcount)
60{
61 if (blk_no < 0 || blk_no >= log->l_logBBsize)
62 return false;
63 if (bbcount <= 0 || (blk_no + bbcount) > log->l_logBBsize)
64 return false;
65 return true;
66}
67
68/*
69 * Allocate a buffer to hold log data. The buffer needs to be able to map to
70 * a range of nbblks basic blocks at any valid offset within the log.
71 */
72static char *
73xlog_alloc_buffer(
74 struct xlog *log,
75 int nbblks)
76{
77 /*
78 * Pass log block 0 since we don't have an addr yet, buffer will be
79 * verified on read.
80 */
81 if (XFS_IS_CORRUPT(log->l_mp, !xlog_verify_bno(log, 0, nbblks))) {
82 xfs_warn(log->l_mp, "Invalid block length (0x%x) for buffer",
83 nbblks);
84 return NULL;
85 }
86
87 /*
88 * We do log I/O in units of log sectors (a power-of-2 multiple of the
89 * basic block size), so we round up the requested size to accommodate
90 * the basic blocks required for complete log sectors.
91 *
92 * In addition, the buffer may be used for a non-sector-aligned block
93 * offset, in which case an I/O of the requested size could extend
94 * beyond the end of the buffer. If the requested size is only 1 basic
95 * block it will never straddle a sector boundary, so this won't be an
96 * issue. Nor will this be a problem if the log I/O is done in basic
97 * blocks (sector size 1). But otherwise we extend the buffer by one
98 * extra log sector to ensure there's space to accommodate this
99 * possibility.
100 */
101 if (nbblks > 1 && log->l_sectBBsize > 1)
102 nbblks += log->l_sectBBsize;
103 nbblks = round_up(nbblks, log->l_sectBBsize);
104 return kvzalloc(BBTOB(nbblks), GFP_KERNEL | __GFP_RETRY_MAYFAIL);
105}
106
107/*
108 * Return the address of the start of the given block number's data
109 * in a log buffer. The buffer covers a log sector-aligned region.
110 */
111static inline unsigned int
112xlog_align(
113 struct xlog *log,
114 xfs_daddr_t blk_no)
115{
116 return BBTOB(blk_no & ((xfs_daddr_t)log->l_sectBBsize - 1));
117}
118
119static int
120xlog_do_io(
121 struct xlog *log,
122 xfs_daddr_t blk_no,
123 unsigned int nbblks,
124 char *data,
125 enum req_op op)
126{
127 int error;
128
129 if (XFS_IS_CORRUPT(log->l_mp, !xlog_verify_bno(log, blk_no, nbblks))) {
130 xfs_warn(log->l_mp,
131 "Invalid log block/length (0x%llx, 0x%x) for buffer",
132 blk_no, nbblks);
133 return -EFSCORRUPTED;
134 }
135
136 blk_no = round_down(blk_no, log->l_sectBBsize);
137 nbblks = round_up(nbblks, log->l_sectBBsize);
138 ASSERT(nbblks > 0);
139
140 error = xfs_rw_bdev(log->l_targ->bt_bdev, log->l_logBBstart + blk_no,
141 BBTOB(nbblks), data, op);
142 if (error && !xlog_is_shutdown(log)) {
143 xfs_alert(log->l_mp,
144 "log recovery %s I/O error at daddr 0x%llx len %d error %d",
145 op == REQ_OP_WRITE ? "write" : "read",
146 blk_no, nbblks, error);
147 }
148 return error;
149}
150
151STATIC int
152xlog_bread_noalign(
153 struct xlog *log,
154 xfs_daddr_t blk_no,
155 int nbblks,
156 char *data)
157{
158 return xlog_do_io(log, blk_no, nbblks, data, REQ_OP_READ);
159}
160
161STATIC int
162xlog_bread(
163 struct xlog *log,
164 xfs_daddr_t blk_no,
165 int nbblks,
166 char *data,
167 char **offset)
168{
169 int error;
170
171 error = xlog_do_io(log, blk_no, nbblks, data, REQ_OP_READ);
172 if (!error)
173 *offset = data + xlog_align(log, blk_no);
174 return error;
175}
176
177STATIC int
178xlog_bwrite(
179 struct xlog *log,
180 xfs_daddr_t blk_no,
181 int nbblks,
182 char *data)
183{
184 return xlog_do_io(log, blk_no, nbblks, data, REQ_OP_WRITE);
185}
186
187#ifdef DEBUG
188/*
189 * dump debug superblock and log record information
190 */
191STATIC void
192xlog_header_check_dump(
193 xfs_mount_t *mp,
194 xlog_rec_header_t *head)
195{
196 xfs_debug(mp, "%s: SB : uuid = %pU, fmt = %d",
197 __func__, &mp->m_sb.sb_uuid, XLOG_FMT);
198 xfs_debug(mp, " log : uuid = %pU, fmt = %d",
199 &head->h_fs_uuid, be32_to_cpu(head->h_fmt));
200}
201#else
202#define xlog_header_check_dump(mp, head)
203#endif
204
205/*
206 * check log record header for recovery
207 */
208STATIC int
209xlog_header_check_recover(
210 xfs_mount_t *mp,
211 xlog_rec_header_t *head)
212{
213 ASSERT(head->h_magicno == cpu_to_be32(XLOG_HEADER_MAGIC_NUM));
214
215 /*
216 * IRIX doesn't write the h_fmt field and leaves it zeroed
217 * (XLOG_FMT_UNKNOWN). This stops us from trying to recover
218 * a dirty log created in IRIX.
219 */
220 if (XFS_IS_CORRUPT(mp, head->h_fmt != cpu_to_be32(XLOG_FMT))) {
221 xfs_warn(mp,
222 "dirty log written in incompatible format - can't recover");
223 xlog_header_check_dump(mp, head);
224 return -EFSCORRUPTED;
225 }
226 if (XFS_IS_CORRUPT(mp, !uuid_equal(&mp->m_sb.sb_uuid,
227 &head->h_fs_uuid))) {
228 xfs_warn(mp,
229 "dirty log entry has mismatched uuid - can't recover");
230 xlog_header_check_dump(mp, head);
231 return -EFSCORRUPTED;
232 }
233 return 0;
234}
235
236/*
237 * read the head block of the log and check the header
238 */
239STATIC int
240xlog_header_check_mount(
241 xfs_mount_t *mp,
242 xlog_rec_header_t *head)
243{
244 ASSERT(head->h_magicno == cpu_to_be32(XLOG_HEADER_MAGIC_NUM));
245
246 if (uuid_is_null(&head->h_fs_uuid)) {
247 /*
248 * IRIX doesn't write the h_fs_uuid or h_fmt fields. If
249 * h_fs_uuid is null, we assume this log was last mounted
250 * by IRIX and continue.
251 */
252 xfs_warn(mp, "null uuid in log - IRIX style log");
253 } else if (XFS_IS_CORRUPT(mp, !uuid_equal(&mp->m_sb.sb_uuid,
254 &head->h_fs_uuid))) {
255 xfs_warn(mp, "log has mismatched uuid - can't recover");
256 xlog_header_check_dump(mp, head);
257 return -EFSCORRUPTED;
258 }
259 return 0;
260}
261
262/*
263 * This routine finds (to an approximation) the first block in the physical
264 * log which contains the given cycle. It uses a binary search algorithm.
265 * Note that the algorithm can not be perfect because the disk will not
266 * necessarily be perfect.
267 */
268STATIC int
269xlog_find_cycle_start(
270 struct xlog *log,
271 char *buffer,
272 xfs_daddr_t first_blk,
273 xfs_daddr_t *last_blk,
274 uint cycle)
275{
276 char *offset;
277 xfs_daddr_t mid_blk;
278 xfs_daddr_t end_blk;
279 uint mid_cycle;
280 int error;
281
282 end_blk = *last_blk;
283 mid_blk = BLK_AVG(first_blk, end_blk);
284 while (mid_blk != first_blk && mid_blk != end_blk) {
285 error = xlog_bread(log, mid_blk, 1, buffer, &offset);
286 if (error)
287 return error;
288 mid_cycle = xlog_get_cycle(offset);
289 if (mid_cycle == cycle)
290 end_blk = mid_blk; /* last_half_cycle == mid_cycle */
291 else
292 first_blk = mid_blk; /* first_half_cycle == mid_cycle */
293 mid_blk = BLK_AVG(first_blk, end_blk);
294 }
295 ASSERT((mid_blk == first_blk && mid_blk+1 == end_blk) ||
296 (mid_blk == end_blk && mid_blk-1 == first_blk));
297
298 *last_blk = end_blk;
299
300 return 0;
301}
302
303/*
304 * Check that a range of blocks does not contain stop_on_cycle_no.
305 * Fill in *new_blk with the block offset where such a block is
306 * found, or with -1 (an invalid block number) if there is no such
307 * block in the range. The scan needs to occur from front to back
308 * and the pointer into the region must be updated since a later
309 * routine will need to perform another test.
310 */
311STATIC int
312xlog_find_verify_cycle(
313 struct xlog *log,
314 xfs_daddr_t start_blk,
315 int nbblks,
316 uint stop_on_cycle_no,
317 xfs_daddr_t *new_blk)
318{
319 xfs_daddr_t i, j;
320 uint cycle;
321 char *buffer;
322 xfs_daddr_t bufblks;
323 char *buf = NULL;
324 int error = 0;
325
326 /*
327 * Greedily allocate a buffer big enough to handle the full
328 * range of basic blocks we'll be examining. If that fails,
329 * try a smaller size. We need to be able to read at least
330 * a log sector, or we're out of luck.
331 */
332 bufblks = roundup_pow_of_two(nbblks);
333 while (bufblks > log->l_logBBsize)
334 bufblks >>= 1;
335 while (!(buffer = xlog_alloc_buffer(log, bufblks))) {
336 bufblks >>= 1;
337 if (bufblks < log->l_sectBBsize)
338 return -ENOMEM;
339 }
340
341 for (i = start_blk; i < start_blk + nbblks; i += bufblks) {
342 int bcount;
343
344 bcount = min(bufblks, (start_blk + nbblks - i));
345
346 error = xlog_bread(log, i, bcount, buffer, &buf);
347 if (error)
348 goto out;
349
350 for (j = 0; j < bcount; j++) {
351 cycle = xlog_get_cycle(buf);
352 if (cycle == stop_on_cycle_no) {
353 *new_blk = i+j;
354 goto out;
355 }
356
357 buf += BBSIZE;
358 }
359 }
360
361 *new_blk = -1;
362
363out:
364 kmem_free(buffer);
365 return error;
366}
367
368static inline int
369xlog_logrec_hblks(struct xlog *log, struct xlog_rec_header *rh)
370{
371 if (xfs_has_logv2(log->l_mp)) {
372 int h_size = be32_to_cpu(rh->h_size);
373
374 if ((be32_to_cpu(rh->h_version) & XLOG_VERSION_2) &&
375 h_size > XLOG_HEADER_CYCLE_SIZE)
376 return DIV_ROUND_UP(h_size, XLOG_HEADER_CYCLE_SIZE);
377 }
378 return 1;
379}
380
381/*
382 * Potentially backup over partial log record write.
383 *
384 * In the typical case, last_blk is the number of the block directly after
385 * a good log record. Therefore, we subtract one to get the block number
386 * of the last block in the given buffer. extra_bblks contains the number
387 * of blocks we would have read on a previous read. This happens when the
388 * last log record is split over the end of the physical log.
389 *
390 * extra_bblks is the number of blocks potentially verified on a previous
391 * call to this routine.
392 */
393STATIC int
394xlog_find_verify_log_record(
395 struct xlog *log,
396 xfs_daddr_t start_blk,
397 xfs_daddr_t *last_blk,
398 int extra_bblks)
399{
400 xfs_daddr_t i;
401 char *buffer;
402 char *offset = NULL;
403 xlog_rec_header_t *head = NULL;
404 int error = 0;
405 int smallmem = 0;
406 int num_blks = *last_blk - start_blk;
407 int xhdrs;
408
409 ASSERT(start_blk != 0 || *last_blk != start_blk);
410
411 buffer = xlog_alloc_buffer(log, num_blks);
412 if (!buffer) {
413 buffer = xlog_alloc_buffer(log, 1);
414 if (!buffer)
415 return -ENOMEM;
416 smallmem = 1;
417 } else {
418 error = xlog_bread(log, start_blk, num_blks, buffer, &offset);
419 if (error)
420 goto out;
421 offset += ((num_blks - 1) << BBSHIFT);
422 }
423
424 for (i = (*last_blk) - 1; i >= 0; i--) {
425 if (i < start_blk) {
426 /* valid log record not found */
427 xfs_warn(log->l_mp,
428 "Log inconsistent (didn't find previous header)");
429 ASSERT(0);
430 error = -EFSCORRUPTED;
431 goto out;
432 }
433
434 if (smallmem) {
435 error = xlog_bread(log, i, 1, buffer, &offset);
436 if (error)
437 goto out;
438 }
439
440 head = (xlog_rec_header_t *)offset;
441
442 if (head->h_magicno == cpu_to_be32(XLOG_HEADER_MAGIC_NUM))
443 break;
444
445 if (!smallmem)
446 offset -= BBSIZE;
447 }
448
449 /*
450 * We hit the beginning of the physical log & still no header. Return
451 * to caller. If caller can handle a return of -1, then this routine
452 * will be called again for the end of the physical log.
453 */
454 if (i == -1) {
455 error = 1;
456 goto out;
457 }
458
459 /*
460 * We have the final block of the good log (the first block
461 * of the log record _before_ the head. So we check the uuid.
462 */
463 if ((error = xlog_header_check_mount(log->l_mp, head)))
464 goto out;
465
466 /*
467 * We may have found a log record header before we expected one.
468 * last_blk will be the 1st block # with a given cycle #. We may end
469 * up reading an entire log record. In this case, we don't want to
470 * reset last_blk. Only when last_blk points in the middle of a log
471 * record do we update last_blk.
472 */
473 xhdrs = xlog_logrec_hblks(log, head);
474
475 if (*last_blk - i + extra_bblks !=
476 BTOBB(be32_to_cpu(head->h_len)) + xhdrs)
477 *last_blk = i;
478
479out:
480 kmem_free(buffer);
481 return error;
482}
483
484/*
485 * Head is defined to be the point of the log where the next log write
486 * could go. This means that incomplete LR writes at the end are
487 * eliminated when calculating the head. We aren't guaranteed that previous
488 * LR have complete transactions. We only know that a cycle number of
489 * current cycle number -1 won't be present in the log if we start writing
490 * from our current block number.
491 *
492 * last_blk contains the block number of the first block with a given
493 * cycle number.
494 *
495 * Return: zero if normal, non-zero if error.
496 */
497STATIC int
498xlog_find_head(
499 struct xlog *log,
500 xfs_daddr_t *return_head_blk)
501{
502 char *buffer;
503 char *offset;
504 xfs_daddr_t new_blk, first_blk, start_blk, last_blk, head_blk;
505 int num_scan_bblks;
506 uint first_half_cycle, last_half_cycle;
507 uint stop_on_cycle;
508 int error, log_bbnum = log->l_logBBsize;
509
510 /* Is the end of the log device zeroed? */
511 error = xlog_find_zeroed(log, &first_blk);
512 if (error < 0) {
513 xfs_warn(log->l_mp, "empty log check failed");
514 return error;
515 }
516 if (error == 1) {
517 *return_head_blk = first_blk;
518
519 /* Is the whole lot zeroed? */
520 if (!first_blk) {
521 /* Linux XFS shouldn't generate totally zeroed logs -
522 * mkfs etc write a dummy unmount record to a fresh
523 * log so we can store the uuid in there
524 */
525 xfs_warn(log->l_mp, "totally zeroed log");
526 }
527
528 return 0;
529 }
530
531 first_blk = 0; /* get cycle # of 1st block */
532 buffer = xlog_alloc_buffer(log, 1);
533 if (!buffer)
534 return -ENOMEM;
535
536 error = xlog_bread(log, 0, 1, buffer, &offset);
537 if (error)
538 goto out_free_buffer;
539
540 first_half_cycle = xlog_get_cycle(offset);
541
542 last_blk = head_blk = log_bbnum - 1; /* get cycle # of last block */
543 error = xlog_bread(log, last_blk, 1, buffer, &offset);
544 if (error)
545 goto out_free_buffer;
546
547 last_half_cycle = xlog_get_cycle(offset);
548 ASSERT(last_half_cycle != 0);
549
550 /*
551 * If the 1st half cycle number is equal to the last half cycle number,
552 * then the entire log is stamped with the same cycle number. In this
553 * case, head_blk can't be set to zero (which makes sense). The below
554 * math doesn't work out properly with head_blk equal to zero. Instead,
555 * we set it to log_bbnum which is an invalid block number, but this
556 * value makes the math correct. If head_blk doesn't changed through
557 * all the tests below, *head_blk is set to zero at the very end rather
558 * than log_bbnum. In a sense, log_bbnum and zero are the same block
559 * in a circular file.
560 */
561 if (first_half_cycle == last_half_cycle) {
562 /*
563 * In this case we believe that the entire log should have
564 * cycle number last_half_cycle. We need to scan backwards
565 * from the end verifying that there are no holes still
566 * containing last_half_cycle - 1. If we find such a hole,
567 * then the start of that hole will be the new head. The
568 * simple case looks like
569 * x | x ... | x - 1 | x
570 * Another case that fits this picture would be
571 * x | x + 1 | x ... | x
572 * In this case the head really is somewhere at the end of the
573 * log, as one of the latest writes at the beginning was
574 * incomplete.
575 * One more case is
576 * x | x + 1 | x ... | x - 1 | x
577 * This is really the combination of the above two cases, and
578 * the head has to end up at the start of the x-1 hole at the
579 * end of the log.
580 *
581 * In the 256k log case, we will read from the beginning to the
582 * end of the log and search for cycle numbers equal to x-1.
583 * We don't worry about the x+1 blocks that we encounter,
584 * because we know that they cannot be the head since the log
585 * started with x.
586 */
587 head_blk = log_bbnum;
588 stop_on_cycle = last_half_cycle - 1;
589 } else {
590 /*
591 * In this case we want to find the first block with cycle
592 * number matching last_half_cycle. We expect the log to be
593 * some variation on
594 * x + 1 ... | x ... | x
595 * The first block with cycle number x (last_half_cycle) will
596 * be where the new head belongs. First we do a binary search
597 * for the first occurrence of last_half_cycle. The binary
598 * search may not be totally accurate, so then we scan back
599 * from there looking for occurrences of last_half_cycle before
600 * us. If that backwards scan wraps around the beginning of
601 * the log, then we look for occurrences of last_half_cycle - 1
602 * at the end of the log. The cases we're looking for look
603 * like
604 * v binary search stopped here
605 * x + 1 ... | x | x + 1 | x ... | x
606 * ^ but we want to locate this spot
607 * or
608 * <---------> less than scan distance
609 * x + 1 ... | x ... | x - 1 | x
610 * ^ we want to locate this spot
611 */
612 stop_on_cycle = last_half_cycle;
613 error = xlog_find_cycle_start(log, buffer, first_blk, &head_blk,
614 last_half_cycle);
615 if (error)
616 goto out_free_buffer;
617 }
618
619 /*
620 * Now validate the answer. Scan back some number of maximum possible
621 * blocks and make sure each one has the expected cycle number. The
622 * maximum is determined by the total possible amount of buffering
623 * in the in-core log. The following number can be made tighter if
624 * we actually look at the block size of the filesystem.
625 */
626 num_scan_bblks = min_t(int, log_bbnum, XLOG_TOTAL_REC_SHIFT(log));
627 if (head_blk >= num_scan_bblks) {
628 /*
629 * We are guaranteed that the entire check can be performed
630 * in one buffer.
631 */
632 start_blk = head_blk - num_scan_bblks;
633 if ((error = xlog_find_verify_cycle(log,
634 start_blk, num_scan_bblks,
635 stop_on_cycle, &new_blk)))
636 goto out_free_buffer;
637 if (new_blk != -1)
638 head_blk = new_blk;
639 } else { /* need to read 2 parts of log */
640 /*
641 * We are going to scan backwards in the log in two parts.
642 * First we scan the physical end of the log. In this part
643 * of the log, we are looking for blocks with cycle number
644 * last_half_cycle - 1.
645 * If we find one, then we know that the log starts there, as
646 * we've found a hole that didn't get written in going around
647 * the end of the physical log. The simple case for this is
648 * x + 1 ... | x ... | x - 1 | x
649 * <---------> less than scan distance
650 * If all of the blocks at the end of the log have cycle number
651 * last_half_cycle, then we check the blocks at the start of
652 * the log looking for occurrences of last_half_cycle. If we
653 * find one, then our current estimate for the location of the
654 * first occurrence of last_half_cycle is wrong and we move
655 * back to the hole we've found. This case looks like
656 * x + 1 ... | x | x + 1 | x ...
657 * ^ binary search stopped here
658 * Another case we need to handle that only occurs in 256k
659 * logs is
660 * x + 1 ... | x ... | x+1 | x ...
661 * ^ binary search stops here
662 * In a 256k log, the scan at the end of the log will see the
663 * x + 1 blocks. We need to skip past those since that is
664 * certainly not the head of the log. By searching for
665 * last_half_cycle-1 we accomplish that.
666 */
667 ASSERT(head_blk <= INT_MAX &&
668 (xfs_daddr_t) num_scan_bblks >= head_blk);
669 start_blk = log_bbnum - (num_scan_bblks - head_blk);
670 if ((error = xlog_find_verify_cycle(log, start_blk,
671 num_scan_bblks - (int)head_blk,
672 (stop_on_cycle - 1), &new_blk)))
673 goto out_free_buffer;
674 if (new_blk != -1) {
675 head_blk = new_blk;
676 goto validate_head;
677 }
678
679 /*
680 * Scan beginning of log now. The last part of the physical
681 * log is good. This scan needs to verify that it doesn't find
682 * the last_half_cycle.
683 */
684 start_blk = 0;
685 ASSERT(head_blk <= INT_MAX);
686 if ((error = xlog_find_verify_cycle(log,
687 start_blk, (int)head_blk,
688 stop_on_cycle, &new_blk)))
689 goto out_free_buffer;
690 if (new_blk != -1)
691 head_blk = new_blk;
692 }
693
694validate_head:
695 /*
696 * Now we need to make sure head_blk is not pointing to a block in
697 * the middle of a log record.
698 */
699 num_scan_bblks = XLOG_REC_SHIFT(log);
700 if (head_blk >= num_scan_bblks) {
701 start_blk = head_blk - num_scan_bblks; /* don't read head_blk */
702
703 /* start ptr at last block ptr before head_blk */
704 error = xlog_find_verify_log_record(log, start_blk, &head_blk, 0);
705 if (error == 1)
706 error = -EIO;
707 if (error)
708 goto out_free_buffer;
709 } else {
710 start_blk = 0;
711 ASSERT(head_blk <= INT_MAX);
712 error = xlog_find_verify_log_record(log, start_blk, &head_blk, 0);
713 if (error < 0)
714 goto out_free_buffer;
715 if (error == 1) {
716 /* We hit the beginning of the log during our search */
717 start_blk = log_bbnum - (num_scan_bblks - head_blk);
718 new_blk = log_bbnum;
719 ASSERT(start_blk <= INT_MAX &&
720 (xfs_daddr_t) log_bbnum-start_blk >= 0);
721 ASSERT(head_blk <= INT_MAX);
722 error = xlog_find_verify_log_record(log, start_blk,
723 &new_blk, (int)head_blk);
724 if (error == 1)
725 error = -EIO;
726 if (error)
727 goto out_free_buffer;
728 if (new_blk != log_bbnum)
729 head_blk = new_blk;
730 } else if (error)
731 goto out_free_buffer;
732 }
733
734 kmem_free(buffer);
735 if (head_blk == log_bbnum)
736 *return_head_blk = 0;
737 else
738 *return_head_blk = head_blk;
739 /*
740 * When returning here, we have a good block number. Bad block
741 * means that during a previous crash, we didn't have a clean break
742 * from cycle number N to cycle number N-1. In this case, we need
743 * to find the first block with cycle number N-1.
744 */
745 return 0;
746
747out_free_buffer:
748 kmem_free(buffer);
749 if (error)
750 xfs_warn(log->l_mp, "failed to find log head");
751 return error;
752}
753
754/*
755 * Seek backwards in the log for log record headers.
756 *
757 * Given a starting log block, walk backwards until we find the provided number
758 * of records or hit the provided tail block. The return value is the number of
759 * records encountered or a negative error code. The log block and buffer
760 * pointer of the last record seen are returned in rblk and rhead respectively.
761 */
762STATIC int
763xlog_rseek_logrec_hdr(
764 struct xlog *log,
765 xfs_daddr_t head_blk,
766 xfs_daddr_t tail_blk,
767 int count,
768 char *buffer,
769 xfs_daddr_t *rblk,
770 struct xlog_rec_header **rhead,
771 bool *wrapped)
772{
773 int i;
774 int error;
775 int found = 0;
776 char *offset = NULL;
777 xfs_daddr_t end_blk;
778
779 *wrapped = false;
780
781 /*
782 * Walk backwards from the head block until we hit the tail or the first
783 * block in the log.
784 */
785 end_blk = head_blk > tail_blk ? tail_blk : 0;
786 for (i = (int) head_blk - 1; i >= end_blk; i--) {
787 error = xlog_bread(log, i, 1, buffer, &offset);
788 if (error)
789 goto out_error;
790
791 if (*(__be32 *) offset == cpu_to_be32(XLOG_HEADER_MAGIC_NUM)) {
792 *rblk = i;
793 *rhead = (struct xlog_rec_header *) offset;
794 if (++found == count)
795 break;
796 }
797 }
798
799 /*
800 * If we haven't hit the tail block or the log record header count,
801 * start looking again from the end of the physical log. Note that
802 * callers can pass head == tail if the tail is not yet known.
803 */
804 if (tail_blk >= head_blk && found != count) {
805 for (i = log->l_logBBsize - 1; i >= (int) tail_blk; i--) {
806 error = xlog_bread(log, i, 1, buffer, &offset);
807 if (error)
808 goto out_error;
809
810 if (*(__be32 *)offset ==
811 cpu_to_be32(XLOG_HEADER_MAGIC_NUM)) {
812 *wrapped = true;
813 *rblk = i;
814 *rhead = (struct xlog_rec_header *) offset;
815 if (++found == count)
816 break;
817 }
818 }
819 }
820
821 return found;
822
823out_error:
824 return error;
825}
826
827/*
828 * Seek forward in the log for log record headers.
829 *
830 * Given head and tail blocks, walk forward from the tail block until we find
831 * the provided number of records or hit the head block. The return value is the
832 * number of records encountered or a negative error code. The log block and
833 * buffer pointer of the last record seen are returned in rblk and rhead
834 * respectively.
835 */
836STATIC int
837xlog_seek_logrec_hdr(
838 struct xlog *log,
839 xfs_daddr_t head_blk,
840 xfs_daddr_t tail_blk,
841 int count,
842 char *buffer,
843 xfs_daddr_t *rblk,
844 struct xlog_rec_header **rhead,
845 bool *wrapped)
846{
847 int i;
848 int error;
849 int found = 0;
850 char *offset = NULL;
851 xfs_daddr_t end_blk;
852
853 *wrapped = false;
854
855 /*
856 * Walk forward from the tail block until we hit the head or the last
857 * block in the log.
858 */
859 end_blk = head_blk > tail_blk ? head_blk : log->l_logBBsize - 1;
860 for (i = (int) tail_blk; i <= end_blk; i++) {
861 error = xlog_bread(log, i, 1, buffer, &offset);
862 if (error)
863 goto out_error;
864
865 if (*(__be32 *) offset == cpu_to_be32(XLOG_HEADER_MAGIC_NUM)) {
866 *rblk = i;
867 *rhead = (struct xlog_rec_header *) offset;
868 if (++found == count)
869 break;
870 }
871 }
872
873 /*
874 * If we haven't hit the head block or the log record header count,
875 * start looking again from the start of the physical log.
876 */
877 if (tail_blk > head_blk && found != count) {
878 for (i = 0; i < (int) head_blk; i++) {
879 error = xlog_bread(log, i, 1, buffer, &offset);
880 if (error)
881 goto out_error;
882
883 if (*(__be32 *)offset ==
884 cpu_to_be32(XLOG_HEADER_MAGIC_NUM)) {
885 *wrapped = true;
886 *rblk = i;
887 *rhead = (struct xlog_rec_header *) offset;
888 if (++found == count)
889 break;
890 }
891 }
892 }
893
894 return found;
895
896out_error:
897 return error;
898}
899
900/*
901 * Calculate distance from head to tail (i.e., unused space in the log).
902 */
903static inline int
904xlog_tail_distance(
905 struct xlog *log,
906 xfs_daddr_t head_blk,
907 xfs_daddr_t tail_blk)
908{
909 if (head_blk < tail_blk)
910 return tail_blk - head_blk;
911
912 return tail_blk + (log->l_logBBsize - head_blk);
913}
914
915/*
916 * Verify the log tail. This is particularly important when torn or incomplete
917 * writes have been detected near the front of the log and the head has been
918 * walked back accordingly.
919 *
920 * We also have to handle the case where the tail was pinned and the head
921 * blocked behind the tail right before a crash. If the tail had been pushed
922 * immediately prior to the crash and the subsequent checkpoint was only
923 * partially written, it's possible it overwrote the last referenced tail in the
924 * log with garbage. This is not a coherency problem because the tail must have
925 * been pushed before it can be overwritten, but appears as log corruption to
926 * recovery because we have no way to know the tail was updated if the
927 * subsequent checkpoint didn't write successfully.
928 *
929 * Therefore, CRC check the log from tail to head. If a failure occurs and the
930 * offending record is within max iclog bufs from the head, walk the tail
931 * forward and retry until a valid tail is found or corruption is detected out
932 * of the range of a possible overwrite.
933 */
934STATIC int
935xlog_verify_tail(
936 struct xlog *log,
937 xfs_daddr_t head_blk,
938 xfs_daddr_t *tail_blk,
939 int hsize)
940{
941 struct xlog_rec_header *thead;
942 char *buffer;
943 xfs_daddr_t first_bad;
944 int error = 0;
945 bool wrapped;
946 xfs_daddr_t tmp_tail;
947 xfs_daddr_t orig_tail = *tail_blk;
948
949 buffer = xlog_alloc_buffer(log, 1);
950 if (!buffer)
951 return -ENOMEM;
952
953 /*
954 * Make sure the tail points to a record (returns positive count on
955 * success).
956 */
957 error = xlog_seek_logrec_hdr(log, head_blk, *tail_blk, 1, buffer,
958 &tmp_tail, &thead, &wrapped);
959 if (error < 0)
960 goto out;
961 if (*tail_blk != tmp_tail)
962 *tail_blk = tmp_tail;
963
964 /*
965 * Run a CRC check from the tail to the head. We can't just check
966 * MAX_ICLOGS records past the tail because the tail may point to stale
967 * blocks cleared during the search for the head/tail. These blocks are
968 * overwritten with zero-length records and thus record count is not a
969 * reliable indicator of the iclog state before a crash.
970 */
971 first_bad = 0;
972 error = xlog_do_recovery_pass(log, head_blk, *tail_blk,
973 XLOG_RECOVER_CRCPASS, &first_bad);
974 while ((error == -EFSBADCRC || error == -EFSCORRUPTED) && first_bad) {
975 int tail_distance;
976
977 /*
978 * Is corruption within range of the head? If so, retry from
979 * the next record. Otherwise return an error.
980 */
981 tail_distance = xlog_tail_distance(log, head_blk, first_bad);
982 if (tail_distance > BTOBB(XLOG_MAX_ICLOGS * hsize))
983 break;
984
985 /* skip to the next record; returns positive count on success */
986 error = xlog_seek_logrec_hdr(log, head_blk, first_bad, 2,
987 buffer, &tmp_tail, &thead, &wrapped);
988 if (error < 0)
989 goto out;
990
991 *tail_blk = tmp_tail;
992 first_bad = 0;
993 error = xlog_do_recovery_pass(log, head_blk, *tail_blk,
994 XLOG_RECOVER_CRCPASS, &first_bad);
995 }
996
997 if (!error && *tail_blk != orig_tail)
998 xfs_warn(log->l_mp,
999 "Tail block (0x%llx) overwrite detected. Updated to 0x%llx",
1000 orig_tail, *tail_blk);
1001out:
1002 kmem_free(buffer);
1003 return error;
1004}
1005
1006/*
1007 * Detect and trim torn writes from the head of the log.
1008 *
1009 * Storage without sector atomicity guarantees can result in torn writes in the
1010 * log in the event of a crash. Our only means to detect this scenario is via
1011 * CRC verification. While we can't always be certain that CRC verification
1012 * failure is due to a torn write vs. an unrelated corruption, we do know that
1013 * only a certain number (XLOG_MAX_ICLOGS) of log records can be written out at
1014 * one time. Therefore, CRC verify up to XLOG_MAX_ICLOGS records at the head of
1015 * the log and treat failures in this range as torn writes as a matter of
1016 * policy. In the event of CRC failure, the head is walked back to the last good
1017 * record in the log and the tail is updated from that record and verified.
1018 */
1019STATIC int
1020xlog_verify_head(
1021 struct xlog *log,
1022 xfs_daddr_t *head_blk, /* in/out: unverified head */
1023 xfs_daddr_t *tail_blk, /* out: tail block */
1024 char *buffer,
1025 xfs_daddr_t *rhead_blk, /* start blk of last record */
1026 struct xlog_rec_header **rhead, /* ptr to last record */
1027 bool *wrapped) /* last rec. wraps phys. log */
1028{
1029 struct xlog_rec_header *tmp_rhead;
1030 char *tmp_buffer;
1031 xfs_daddr_t first_bad;
1032 xfs_daddr_t tmp_rhead_blk;
1033 int found;
1034 int error;
1035 bool tmp_wrapped;
1036
1037 /*
1038 * Check the head of the log for torn writes. Search backwards from the
1039 * head until we hit the tail or the maximum number of log record I/Os
1040 * that could have been in flight at one time. Use a temporary buffer so
1041 * we don't trash the rhead/buffer pointers from the caller.
1042 */
1043 tmp_buffer = xlog_alloc_buffer(log, 1);
1044 if (!tmp_buffer)
1045 return -ENOMEM;
1046 error = xlog_rseek_logrec_hdr(log, *head_blk, *tail_blk,
1047 XLOG_MAX_ICLOGS, tmp_buffer,
1048 &tmp_rhead_blk, &tmp_rhead, &tmp_wrapped);
1049 kmem_free(tmp_buffer);
1050 if (error < 0)
1051 return error;
1052
1053 /*
1054 * Now run a CRC verification pass over the records starting at the
1055 * block found above to the current head. If a CRC failure occurs, the
1056 * log block of the first bad record is saved in first_bad.
1057 */
1058 error = xlog_do_recovery_pass(log, *head_blk, tmp_rhead_blk,
1059 XLOG_RECOVER_CRCPASS, &first_bad);
1060 if ((error == -EFSBADCRC || error == -EFSCORRUPTED) && first_bad) {
1061 /*
1062 * We've hit a potential torn write. Reset the error and warn
1063 * about it.
1064 */
1065 error = 0;
1066 xfs_warn(log->l_mp,
1067"Torn write (CRC failure) detected at log block 0x%llx. Truncating head block from 0x%llx.",
1068 first_bad, *head_blk);
1069
1070 /*
1071 * Get the header block and buffer pointer for the last good
1072 * record before the bad record.
1073 *
1074 * Note that xlog_find_tail() clears the blocks at the new head
1075 * (i.e., the records with invalid CRC) if the cycle number
1076 * matches the current cycle.
1077 */
1078 found = xlog_rseek_logrec_hdr(log, first_bad, *tail_blk, 1,
1079 buffer, rhead_blk, rhead, wrapped);
1080 if (found < 0)
1081 return found;
1082 if (found == 0) /* XXX: right thing to do here? */
1083 return -EIO;
1084
1085 /*
1086 * Reset the head block to the starting block of the first bad
1087 * log record and set the tail block based on the last good
1088 * record.
1089 *
1090 * Bail out if the updated head/tail match as this indicates
1091 * possible corruption outside of the acceptable
1092 * (XLOG_MAX_ICLOGS) range. This is a job for xfs_repair...
1093 */
1094 *head_blk = first_bad;
1095 *tail_blk = BLOCK_LSN(be64_to_cpu((*rhead)->h_tail_lsn));
1096 if (*head_blk == *tail_blk) {
1097 ASSERT(0);
1098 return 0;
1099 }
1100 }
1101 if (error)
1102 return error;
1103
1104 return xlog_verify_tail(log, *head_blk, tail_blk,
1105 be32_to_cpu((*rhead)->h_size));
1106}
1107
1108/*
1109 * We need to make sure we handle log wrapping properly, so we can't use the
1110 * calculated logbno directly. Make sure it wraps to the correct bno inside the
1111 * log.
1112 *
1113 * The log is limited to 32 bit sizes, so we use the appropriate modulus
1114 * operation here and cast it back to a 64 bit daddr on return.
1115 */
1116static inline xfs_daddr_t
1117xlog_wrap_logbno(
1118 struct xlog *log,
1119 xfs_daddr_t bno)
1120{
1121 int mod;
1122
1123 div_s64_rem(bno, log->l_logBBsize, &mod);
1124 return mod;
1125}
1126
1127/*
1128 * Check whether the head of the log points to an unmount record. In other
1129 * words, determine whether the log is clean. If so, update the in-core state
1130 * appropriately.
1131 */
1132static int
1133xlog_check_unmount_rec(
1134 struct xlog *log,
1135 xfs_daddr_t *head_blk,
1136 xfs_daddr_t *tail_blk,
1137 struct xlog_rec_header *rhead,
1138 xfs_daddr_t rhead_blk,
1139 char *buffer,
1140 bool *clean)
1141{
1142 struct xlog_op_header *op_head;
1143 xfs_daddr_t umount_data_blk;
1144 xfs_daddr_t after_umount_blk;
1145 int hblks;
1146 int error;
1147 char *offset;
1148
1149 *clean = false;
1150
1151 /*
1152 * Look for unmount record. If we find it, then we know there was a
1153 * clean unmount. Since 'i' could be the last block in the physical
1154 * log, we convert to a log block before comparing to the head_blk.
1155 *
1156 * Save the current tail lsn to use to pass to xlog_clear_stale_blocks()
1157 * below. We won't want to clear the unmount record if there is one, so
1158 * we pass the lsn of the unmount record rather than the block after it.
1159 */
1160 hblks = xlog_logrec_hblks(log, rhead);
1161 after_umount_blk = xlog_wrap_logbno(log,
1162 rhead_blk + hblks + BTOBB(be32_to_cpu(rhead->h_len)));
1163
1164 if (*head_blk == after_umount_blk &&
1165 be32_to_cpu(rhead->h_num_logops) == 1) {
1166 umount_data_blk = xlog_wrap_logbno(log, rhead_blk + hblks);
1167 error = xlog_bread(log, umount_data_blk, 1, buffer, &offset);
1168 if (error)
1169 return error;
1170
1171 op_head = (struct xlog_op_header *)offset;
1172 if (op_head->oh_flags & XLOG_UNMOUNT_TRANS) {
1173 /*
1174 * Set tail and last sync so that newly written log
1175 * records will point recovery to after the current
1176 * unmount record.
1177 */
1178 xlog_assign_atomic_lsn(&log->l_tail_lsn,
1179 log->l_curr_cycle, after_umount_blk);
1180 xlog_assign_atomic_lsn(&log->l_last_sync_lsn,
1181 log->l_curr_cycle, after_umount_blk);
1182 *tail_blk = after_umount_blk;
1183
1184 *clean = true;
1185 }
1186 }
1187
1188 return 0;
1189}
1190
1191static void
1192xlog_set_state(
1193 struct xlog *log,
1194 xfs_daddr_t head_blk,
1195 struct xlog_rec_header *rhead,
1196 xfs_daddr_t rhead_blk,
1197 bool bump_cycle)
1198{
1199 /*
1200 * Reset log values according to the state of the log when we
1201 * crashed. In the case where head_blk == 0, we bump curr_cycle
1202 * one because the next write starts a new cycle rather than
1203 * continuing the cycle of the last good log record. At this
1204 * point we have guaranteed that all partial log records have been
1205 * accounted for. Therefore, we know that the last good log record
1206 * written was complete and ended exactly on the end boundary
1207 * of the physical log.
1208 */
1209 log->l_prev_block = rhead_blk;
1210 log->l_curr_block = (int)head_blk;
1211 log->l_curr_cycle = be32_to_cpu(rhead->h_cycle);
1212 if (bump_cycle)
1213 log->l_curr_cycle++;
1214 atomic64_set(&log->l_tail_lsn, be64_to_cpu(rhead->h_tail_lsn));
1215 atomic64_set(&log->l_last_sync_lsn, be64_to_cpu(rhead->h_lsn));
1216 xlog_assign_grant_head(&log->l_reserve_head.grant, log->l_curr_cycle,
1217 BBTOB(log->l_curr_block));
1218 xlog_assign_grant_head(&log->l_write_head.grant, log->l_curr_cycle,
1219 BBTOB(log->l_curr_block));
1220}
1221
1222/*
1223 * Find the sync block number or the tail of the log.
1224 *
1225 * This will be the block number of the last record to have its
1226 * associated buffers synced to disk. Every log record header has
1227 * a sync lsn embedded in it. LSNs hold block numbers, so it is easy
1228 * to get a sync block number. The only concern is to figure out which
1229 * log record header to believe.
1230 *
1231 * The following algorithm uses the log record header with the largest
1232 * lsn. The entire log record does not need to be valid. We only care
1233 * that the header is valid.
1234 *
1235 * We could speed up search by using current head_blk buffer, but it is not
1236 * available.
1237 */
1238STATIC int
1239xlog_find_tail(
1240 struct xlog *log,
1241 xfs_daddr_t *head_blk,
1242 xfs_daddr_t *tail_blk)
1243{
1244 xlog_rec_header_t *rhead;
1245 char *offset = NULL;
1246 char *buffer;
1247 int error;
1248 xfs_daddr_t rhead_blk;
1249 xfs_lsn_t tail_lsn;
1250 bool wrapped = false;
1251 bool clean = false;
1252
1253 /*
1254 * Find previous log record
1255 */
1256 if ((error = xlog_find_head(log, head_blk)))
1257 return error;
1258 ASSERT(*head_blk < INT_MAX);
1259
1260 buffer = xlog_alloc_buffer(log, 1);
1261 if (!buffer)
1262 return -ENOMEM;
1263 if (*head_blk == 0) { /* special case */
1264 error = xlog_bread(log, 0, 1, buffer, &offset);
1265 if (error)
1266 goto done;
1267
1268 if (xlog_get_cycle(offset) == 0) {
1269 *tail_blk = 0;
1270 /* leave all other log inited values alone */
1271 goto done;
1272 }
1273 }
1274
1275 /*
1276 * Search backwards through the log looking for the log record header
1277 * block. This wraps all the way back around to the head so something is
1278 * seriously wrong if we can't find it.
1279 */
1280 error = xlog_rseek_logrec_hdr(log, *head_blk, *head_blk, 1, buffer,
1281 &rhead_blk, &rhead, &wrapped);
1282 if (error < 0)
1283 goto done;
1284 if (!error) {
1285 xfs_warn(log->l_mp, "%s: couldn't find sync record", __func__);
1286 error = -EFSCORRUPTED;
1287 goto done;
1288 }
1289 *tail_blk = BLOCK_LSN(be64_to_cpu(rhead->h_tail_lsn));
1290
1291 /*
1292 * Set the log state based on the current head record.
1293 */
1294 xlog_set_state(log, *head_blk, rhead, rhead_blk, wrapped);
1295 tail_lsn = atomic64_read(&log->l_tail_lsn);
1296
1297 /*
1298 * Look for an unmount record at the head of the log. This sets the log
1299 * state to determine whether recovery is necessary.
1300 */
1301 error = xlog_check_unmount_rec(log, head_blk, tail_blk, rhead,
1302 rhead_blk, buffer, &clean);
1303 if (error)
1304 goto done;
1305
1306 /*
1307 * Verify the log head if the log is not clean (e.g., we have anything
1308 * but an unmount record at the head). This uses CRC verification to
1309 * detect and trim torn writes. If discovered, CRC failures are
1310 * considered torn writes and the log head is trimmed accordingly.
1311 *
1312 * Note that we can only run CRC verification when the log is dirty
1313 * because there's no guarantee that the log data behind an unmount
1314 * record is compatible with the current architecture.
1315 */
1316 if (!clean) {
1317 xfs_daddr_t orig_head = *head_blk;
1318
1319 error = xlog_verify_head(log, head_blk, tail_blk, buffer,
1320 &rhead_blk, &rhead, &wrapped);
1321 if (error)
1322 goto done;
1323
1324 /* update in-core state again if the head changed */
1325 if (*head_blk != orig_head) {
1326 xlog_set_state(log, *head_blk, rhead, rhead_blk,
1327 wrapped);
1328 tail_lsn = atomic64_read(&log->l_tail_lsn);
1329 error = xlog_check_unmount_rec(log, head_blk, tail_blk,
1330 rhead, rhead_blk, buffer,
1331 &clean);
1332 if (error)
1333 goto done;
1334 }
1335 }
1336
1337 /*
1338 * Note that the unmount was clean. If the unmount was not clean, we
1339 * need to know this to rebuild the superblock counters from the perag
1340 * headers if we have a filesystem using non-persistent counters.
1341 */
1342 if (clean)
1343 set_bit(XFS_OPSTATE_CLEAN, &log->l_mp->m_opstate);
1344
1345 /*
1346 * Make sure that there are no blocks in front of the head
1347 * with the same cycle number as the head. This can happen
1348 * because we allow multiple outstanding log writes concurrently,
1349 * and the later writes might make it out before earlier ones.
1350 *
1351 * We use the lsn from before modifying it so that we'll never
1352 * overwrite the unmount record after a clean unmount.
1353 *
1354 * Do this only if we are going to recover the filesystem
1355 *
1356 * NOTE: This used to say "if (!readonly)"
1357 * However on Linux, we can & do recover a read-only filesystem.
1358 * We only skip recovery if NORECOVERY is specified on mount,
1359 * in which case we would not be here.
1360 *
1361 * But... if the -device- itself is readonly, just skip this.
1362 * We can't recover this device anyway, so it won't matter.
1363 */
1364 if (!xfs_readonly_buftarg(log->l_targ))
1365 error = xlog_clear_stale_blocks(log, tail_lsn);
1366
1367done:
1368 kmem_free(buffer);
1369
1370 if (error)
1371 xfs_warn(log->l_mp, "failed to locate log tail");
1372 return error;
1373}
1374
1375/*
1376 * Is the log zeroed at all?
1377 *
1378 * The last binary search should be changed to perform an X block read
1379 * once X becomes small enough. You can then search linearly through
1380 * the X blocks. This will cut down on the number of reads we need to do.
1381 *
1382 * If the log is partially zeroed, this routine will pass back the blkno
1383 * of the first block with cycle number 0. It won't have a complete LR
1384 * preceding it.
1385 *
1386 * Return:
1387 * 0 => the log is completely written to
1388 * 1 => use *blk_no as the first block of the log
1389 * <0 => error has occurred
1390 */
1391STATIC int
1392xlog_find_zeroed(
1393 struct xlog *log,
1394 xfs_daddr_t *blk_no)
1395{
1396 char *buffer;
1397 char *offset;
1398 uint first_cycle, last_cycle;
1399 xfs_daddr_t new_blk, last_blk, start_blk;
1400 xfs_daddr_t num_scan_bblks;
1401 int error, log_bbnum = log->l_logBBsize;
1402
1403 *blk_no = 0;
1404
1405 /* check totally zeroed log */
1406 buffer = xlog_alloc_buffer(log, 1);
1407 if (!buffer)
1408 return -ENOMEM;
1409 error = xlog_bread(log, 0, 1, buffer, &offset);
1410 if (error)
1411 goto out_free_buffer;
1412
1413 first_cycle = xlog_get_cycle(offset);
1414 if (first_cycle == 0) { /* completely zeroed log */
1415 *blk_no = 0;
1416 kmem_free(buffer);
1417 return 1;
1418 }
1419
1420 /* check partially zeroed log */
1421 error = xlog_bread(log, log_bbnum-1, 1, buffer, &offset);
1422 if (error)
1423 goto out_free_buffer;
1424
1425 last_cycle = xlog_get_cycle(offset);
1426 if (last_cycle != 0) { /* log completely written to */
1427 kmem_free(buffer);
1428 return 0;
1429 }
1430
1431 /* we have a partially zeroed log */
1432 last_blk = log_bbnum-1;
1433 error = xlog_find_cycle_start(log, buffer, 0, &last_blk, 0);
1434 if (error)
1435 goto out_free_buffer;
1436
1437 /*
1438 * Validate the answer. Because there is no way to guarantee that
1439 * the entire log is made up of log records which are the same size,
1440 * we scan over the defined maximum blocks. At this point, the maximum
1441 * is not chosen to mean anything special. XXXmiken
1442 */
1443 num_scan_bblks = XLOG_TOTAL_REC_SHIFT(log);
1444 ASSERT(num_scan_bblks <= INT_MAX);
1445
1446 if (last_blk < num_scan_bblks)
1447 num_scan_bblks = last_blk;
1448 start_blk = last_blk - num_scan_bblks;
1449
1450 /*
1451 * We search for any instances of cycle number 0 that occur before
1452 * our current estimate of the head. What we're trying to detect is
1453 * 1 ... | 0 | 1 | 0...
1454 * ^ binary search ends here
1455 */
1456 if ((error = xlog_find_verify_cycle(log, start_blk,
1457 (int)num_scan_bblks, 0, &new_blk)))
1458 goto out_free_buffer;
1459 if (new_blk != -1)
1460 last_blk = new_blk;
1461
1462 /*
1463 * Potentially backup over partial log record write. We don't need
1464 * to search the end of the log because we know it is zero.
1465 */
1466 error = xlog_find_verify_log_record(log, start_blk, &last_blk, 0);
1467 if (error == 1)
1468 error = -EIO;
1469 if (error)
1470 goto out_free_buffer;
1471
1472 *blk_no = last_blk;
1473out_free_buffer:
1474 kmem_free(buffer);
1475 if (error)
1476 return error;
1477 return 1;
1478}
1479
1480/*
1481 * These are simple subroutines used by xlog_clear_stale_blocks() below
1482 * to initialize a buffer full of empty log record headers and write
1483 * them into the log.
1484 */
1485STATIC void
1486xlog_add_record(
1487 struct xlog *log,
1488 char *buf,
1489 int cycle,
1490 int block,
1491 int tail_cycle,
1492 int tail_block)
1493{
1494 xlog_rec_header_t *recp = (xlog_rec_header_t *)buf;
1495
1496 memset(buf, 0, BBSIZE);
1497 recp->h_magicno = cpu_to_be32(XLOG_HEADER_MAGIC_NUM);
1498 recp->h_cycle = cpu_to_be32(cycle);
1499 recp->h_version = cpu_to_be32(
1500 xfs_has_logv2(log->l_mp) ? 2 : 1);
1501 recp->h_lsn = cpu_to_be64(xlog_assign_lsn(cycle, block));
1502 recp->h_tail_lsn = cpu_to_be64(xlog_assign_lsn(tail_cycle, tail_block));
1503 recp->h_fmt = cpu_to_be32(XLOG_FMT);
1504 memcpy(&recp->h_fs_uuid, &log->l_mp->m_sb.sb_uuid, sizeof(uuid_t));
1505}
1506
1507STATIC int
1508xlog_write_log_records(
1509 struct xlog *log,
1510 int cycle,
1511 int start_block,
1512 int blocks,
1513 int tail_cycle,
1514 int tail_block)
1515{
1516 char *offset;
1517 char *buffer;
1518 int balign, ealign;
1519 int sectbb = log->l_sectBBsize;
1520 int end_block = start_block + blocks;
1521 int bufblks;
1522 int error = 0;
1523 int i, j = 0;
1524
1525 /*
1526 * Greedily allocate a buffer big enough to handle the full
1527 * range of basic blocks to be written. If that fails, try
1528 * a smaller size. We need to be able to write at least a
1529 * log sector, or we're out of luck.
1530 */
1531 bufblks = roundup_pow_of_two(blocks);
1532 while (bufblks > log->l_logBBsize)
1533 bufblks >>= 1;
1534 while (!(buffer = xlog_alloc_buffer(log, bufblks))) {
1535 bufblks >>= 1;
1536 if (bufblks < sectbb)
1537 return -ENOMEM;
1538 }
1539
1540 /* We may need to do a read at the start to fill in part of
1541 * the buffer in the starting sector not covered by the first
1542 * write below.
1543 */
1544 balign = round_down(start_block, sectbb);
1545 if (balign != start_block) {
1546 error = xlog_bread_noalign(log, start_block, 1, buffer);
1547 if (error)
1548 goto out_free_buffer;
1549
1550 j = start_block - balign;
1551 }
1552
1553 for (i = start_block; i < end_block; i += bufblks) {
1554 int bcount, endcount;
1555
1556 bcount = min(bufblks, end_block - start_block);
1557 endcount = bcount - j;
1558
1559 /* We may need to do a read at the end to fill in part of
1560 * the buffer in the final sector not covered by the write.
1561 * If this is the same sector as the above read, skip it.
1562 */
1563 ealign = round_down(end_block, sectbb);
1564 if (j == 0 && (start_block + endcount > ealign)) {
1565 error = xlog_bread_noalign(log, ealign, sectbb,
1566 buffer + BBTOB(ealign - start_block));
1567 if (error)
1568 break;
1569
1570 }
1571
1572 offset = buffer + xlog_align(log, start_block);
1573 for (; j < endcount; j++) {
1574 xlog_add_record(log, offset, cycle, i+j,
1575 tail_cycle, tail_block);
1576 offset += BBSIZE;
1577 }
1578 error = xlog_bwrite(log, start_block, endcount, buffer);
1579 if (error)
1580 break;
1581 start_block += endcount;
1582 j = 0;
1583 }
1584
1585out_free_buffer:
1586 kmem_free(buffer);
1587 return error;
1588}
1589
1590/*
1591 * This routine is called to blow away any incomplete log writes out
1592 * in front of the log head. We do this so that we won't become confused
1593 * if we come up, write only a little bit more, and then crash again.
1594 * If we leave the partial log records out there, this situation could
1595 * cause us to think those partial writes are valid blocks since they
1596 * have the current cycle number. We get rid of them by overwriting them
1597 * with empty log records with the old cycle number rather than the
1598 * current one.
1599 *
1600 * The tail lsn is passed in rather than taken from
1601 * the log so that we will not write over the unmount record after a
1602 * clean unmount in a 512 block log. Doing so would leave the log without
1603 * any valid log records in it until a new one was written. If we crashed
1604 * during that time we would not be able to recover.
1605 */
1606STATIC int
1607xlog_clear_stale_blocks(
1608 struct xlog *log,
1609 xfs_lsn_t tail_lsn)
1610{
1611 int tail_cycle, head_cycle;
1612 int tail_block, head_block;
1613 int tail_distance, max_distance;
1614 int distance;
1615 int error;
1616
1617 tail_cycle = CYCLE_LSN(tail_lsn);
1618 tail_block = BLOCK_LSN(tail_lsn);
1619 head_cycle = log->l_curr_cycle;
1620 head_block = log->l_curr_block;
1621
1622 /*
1623 * Figure out the distance between the new head of the log
1624 * and the tail. We want to write over any blocks beyond the
1625 * head that we may have written just before the crash, but
1626 * we don't want to overwrite the tail of the log.
1627 */
1628 if (head_cycle == tail_cycle) {
1629 /*
1630 * The tail is behind the head in the physical log,
1631 * so the distance from the head to the tail is the
1632 * distance from the head to the end of the log plus
1633 * the distance from the beginning of the log to the
1634 * tail.
1635 */
1636 if (XFS_IS_CORRUPT(log->l_mp,
1637 head_block < tail_block ||
1638 head_block >= log->l_logBBsize))
1639 return -EFSCORRUPTED;
1640 tail_distance = tail_block + (log->l_logBBsize - head_block);
1641 } else {
1642 /*
1643 * The head is behind the tail in the physical log,
1644 * so the distance from the head to the tail is just
1645 * the tail block minus the head block.
1646 */
1647 if (XFS_IS_CORRUPT(log->l_mp,
1648 head_block >= tail_block ||
1649 head_cycle != tail_cycle + 1))
1650 return -EFSCORRUPTED;
1651 tail_distance = tail_block - head_block;
1652 }
1653
1654 /*
1655 * If the head is right up against the tail, we can't clear
1656 * anything.
1657 */
1658 if (tail_distance <= 0) {
1659 ASSERT(tail_distance == 0);
1660 return 0;
1661 }
1662
1663 max_distance = XLOG_TOTAL_REC_SHIFT(log);
1664 /*
1665 * Take the smaller of the maximum amount of outstanding I/O
1666 * we could have and the distance to the tail to clear out.
1667 * We take the smaller so that we don't overwrite the tail and
1668 * we don't waste all day writing from the head to the tail
1669 * for no reason.
1670 */
1671 max_distance = min(max_distance, tail_distance);
1672
1673 if ((head_block + max_distance) <= log->l_logBBsize) {
1674 /*
1675 * We can stomp all the blocks we need to without
1676 * wrapping around the end of the log. Just do it
1677 * in a single write. Use the cycle number of the
1678 * current cycle minus one so that the log will look like:
1679 * n ... | n - 1 ...
1680 */
1681 error = xlog_write_log_records(log, (head_cycle - 1),
1682 head_block, max_distance, tail_cycle,
1683 tail_block);
1684 if (error)
1685 return error;
1686 } else {
1687 /*
1688 * We need to wrap around the end of the physical log in
1689 * order to clear all the blocks. Do it in two separate
1690 * I/Os. The first write should be from the head to the
1691 * end of the physical log, and it should use the current
1692 * cycle number minus one just like above.
1693 */
1694 distance = log->l_logBBsize - head_block;
1695 error = xlog_write_log_records(log, (head_cycle - 1),
1696 head_block, distance, tail_cycle,
1697 tail_block);
1698
1699 if (error)
1700 return error;
1701
1702 /*
1703 * Now write the blocks at the start of the physical log.
1704 * This writes the remainder of the blocks we want to clear.
1705 * It uses the current cycle number since we're now on the
1706 * same cycle as the head so that we get:
1707 * n ... n ... | n - 1 ...
1708 * ^^^^^ blocks we're writing
1709 */
1710 distance = max_distance - (log->l_logBBsize - head_block);
1711 error = xlog_write_log_records(log, head_cycle, 0, distance,
1712 tail_cycle, tail_block);
1713 if (error)
1714 return error;
1715 }
1716
1717 return 0;
1718}
1719
1720/*
1721 * Release the recovered intent item in the AIL that matches the given intent
1722 * type and intent id.
1723 */
1724void
1725xlog_recover_release_intent(
1726 struct xlog *log,
1727 unsigned short intent_type,
1728 uint64_t intent_id)
1729{
1730 struct xfs_defer_pending *dfp, *n;
1731
1732 list_for_each_entry_safe(dfp, n, &log->r_dfops, dfp_list) {
1733 struct xfs_log_item *lip = dfp->dfp_intent;
1734
1735 if (lip->li_type != intent_type)
1736 continue;
1737 if (!lip->li_ops->iop_match(lip, intent_id))
1738 continue;
1739
1740 ASSERT(xlog_item_is_intent(lip));
1741
1742 xfs_defer_cancel_recovery(log->l_mp, dfp);
1743 }
1744}
1745
1746int
1747xlog_recover_iget(
1748 struct xfs_mount *mp,
1749 xfs_ino_t ino,
1750 struct xfs_inode **ipp)
1751{
1752 int error;
1753
1754 error = xfs_iget(mp, NULL, ino, 0, 0, ipp);
1755 if (error)
1756 return error;
1757
1758 error = xfs_qm_dqattach(*ipp);
1759 if (error) {
1760 xfs_irele(*ipp);
1761 return error;
1762 }
1763
1764 if (VFS_I(*ipp)->i_nlink == 0)
1765 xfs_iflags_set(*ipp, XFS_IRECOVERY);
1766
1767 return 0;
1768}
1769
1770/******************************************************************************
1771 *
1772 * Log recover routines
1773 *
1774 ******************************************************************************
1775 */
1776static const struct xlog_recover_item_ops *xlog_recover_item_ops[] = {
1777 &xlog_buf_item_ops,
1778 &xlog_inode_item_ops,
1779 &xlog_dquot_item_ops,
1780 &xlog_quotaoff_item_ops,
1781 &xlog_icreate_item_ops,
1782 &xlog_efi_item_ops,
1783 &xlog_efd_item_ops,
1784 &xlog_rui_item_ops,
1785 &xlog_rud_item_ops,
1786 &xlog_cui_item_ops,
1787 &xlog_cud_item_ops,
1788 &xlog_bui_item_ops,
1789 &xlog_bud_item_ops,
1790 &xlog_attri_item_ops,
1791 &xlog_attrd_item_ops,
1792};
1793
1794static const struct xlog_recover_item_ops *
1795xlog_find_item_ops(
1796 struct xlog_recover_item *item)
1797{
1798 unsigned int i;
1799
1800 for (i = 0; i < ARRAY_SIZE(xlog_recover_item_ops); i++)
1801 if (ITEM_TYPE(item) == xlog_recover_item_ops[i]->item_type)
1802 return xlog_recover_item_ops[i];
1803
1804 return NULL;
1805}
1806
1807/*
1808 * Sort the log items in the transaction.
1809 *
1810 * The ordering constraints are defined by the inode allocation and unlink
1811 * behaviour. The rules are:
1812 *
1813 * 1. Every item is only logged once in a given transaction. Hence it
1814 * represents the last logged state of the item. Hence ordering is
1815 * dependent on the order in which operations need to be performed so
1816 * required initial conditions are always met.
1817 *
1818 * 2. Cancelled buffers are recorded in pass 1 in a separate table and
1819 * there's nothing to replay from them so we can simply cull them
1820 * from the transaction. However, we can't do that until after we've
1821 * replayed all the other items because they may be dependent on the
1822 * cancelled buffer and replaying the cancelled buffer can remove it
1823 * form the cancelled buffer table. Hence they have tobe done last.
1824 *
1825 * 3. Inode allocation buffers must be replayed before inode items that
1826 * read the buffer and replay changes into it. For filesystems using the
1827 * ICREATE transactions, this means XFS_LI_ICREATE objects need to get
1828 * treated the same as inode allocation buffers as they create and
1829 * initialise the buffers directly.
1830 *
1831 * 4. Inode unlink buffers must be replayed after inode items are replayed.
1832 * This ensures that inodes are completely flushed to the inode buffer
1833 * in a "free" state before we remove the unlinked inode list pointer.
1834 *
1835 * Hence the ordering needs to be inode allocation buffers first, inode items
1836 * second, inode unlink buffers third and cancelled buffers last.
1837 *
1838 * But there's a problem with that - we can't tell an inode allocation buffer
1839 * apart from a regular buffer, so we can't separate them. We can, however,
1840 * tell an inode unlink buffer from the others, and so we can separate them out
1841 * from all the other buffers and move them to last.
1842 *
1843 * Hence, 4 lists, in order from head to tail:
1844 * - buffer_list for all buffers except cancelled/inode unlink buffers
1845 * - item_list for all non-buffer items
1846 * - inode_buffer_list for inode unlink buffers
1847 * - cancel_list for the cancelled buffers
1848 *
1849 * Note that we add objects to the tail of the lists so that first-to-last
1850 * ordering is preserved within the lists. Adding objects to the head of the
1851 * list means when we traverse from the head we walk them in last-to-first
1852 * order. For cancelled buffers and inode unlink buffers this doesn't matter,
1853 * but for all other items there may be specific ordering that we need to
1854 * preserve.
1855 */
1856STATIC int
1857xlog_recover_reorder_trans(
1858 struct xlog *log,
1859 struct xlog_recover *trans,
1860 int pass)
1861{
1862 struct xlog_recover_item *item, *n;
1863 int error = 0;
1864 LIST_HEAD(sort_list);
1865 LIST_HEAD(cancel_list);
1866 LIST_HEAD(buffer_list);
1867 LIST_HEAD(inode_buffer_list);
1868 LIST_HEAD(item_list);
1869
1870 list_splice_init(&trans->r_itemq, &sort_list);
1871 list_for_each_entry_safe(item, n, &sort_list, ri_list) {
1872 enum xlog_recover_reorder fate = XLOG_REORDER_ITEM_LIST;
1873
1874 item->ri_ops = xlog_find_item_ops(item);
1875 if (!item->ri_ops) {
1876 xfs_warn(log->l_mp,
1877 "%s: unrecognized type of log operation (%d)",
1878 __func__, ITEM_TYPE(item));
1879 ASSERT(0);
1880 /*
1881 * return the remaining items back to the transaction
1882 * item list so they can be freed in caller.
1883 */
1884 if (!list_empty(&sort_list))
1885 list_splice_init(&sort_list, &trans->r_itemq);
1886 error = -EFSCORRUPTED;
1887 break;
1888 }
1889
1890 if (item->ri_ops->reorder)
1891 fate = item->ri_ops->reorder(item);
1892
1893 switch (fate) {
1894 case XLOG_REORDER_BUFFER_LIST:
1895 list_move_tail(&item->ri_list, &buffer_list);
1896 break;
1897 case XLOG_REORDER_CANCEL_LIST:
1898 trace_xfs_log_recover_item_reorder_head(log,
1899 trans, item, pass);
1900 list_move(&item->ri_list, &cancel_list);
1901 break;
1902 case XLOG_REORDER_INODE_BUFFER_LIST:
1903 list_move(&item->ri_list, &inode_buffer_list);
1904 break;
1905 case XLOG_REORDER_ITEM_LIST:
1906 trace_xfs_log_recover_item_reorder_tail(log,
1907 trans, item, pass);
1908 list_move_tail(&item->ri_list, &item_list);
1909 break;
1910 }
1911 }
1912
1913 ASSERT(list_empty(&sort_list));
1914 if (!list_empty(&buffer_list))
1915 list_splice(&buffer_list, &trans->r_itemq);
1916 if (!list_empty(&item_list))
1917 list_splice_tail(&item_list, &trans->r_itemq);
1918 if (!list_empty(&inode_buffer_list))
1919 list_splice_tail(&inode_buffer_list, &trans->r_itemq);
1920 if (!list_empty(&cancel_list))
1921 list_splice_tail(&cancel_list, &trans->r_itemq);
1922 return error;
1923}
1924
1925void
1926xlog_buf_readahead(
1927 struct xlog *log,
1928 xfs_daddr_t blkno,
1929 uint len,
1930 const struct xfs_buf_ops *ops)
1931{
1932 if (!xlog_is_buffer_cancelled(log, blkno, len))
1933 xfs_buf_readahead(log->l_mp->m_ddev_targp, blkno, len, ops);
1934}
1935
1936/*
1937 * Create a deferred work structure for resuming and tracking the progress of a
1938 * log intent item that was found during recovery.
1939 */
1940void
1941xlog_recover_intent_item(
1942 struct xlog *log,
1943 struct xfs_log_item *lip,
1944 xfs_lsn_t lsn,
1945 const struct xfs_defer_op_type *ops)
1946{
1947 ASSERT(xlog_item_is_intent(lip));
1948
1949 xfs_defer_start_recovery(lip, &log->r_dfops, ops);
1950
1951 /*
1952 * Insert the intent into the AIL directly and drop one reference so
1953 * that finishing or canceling the work will drop the other.
1954 */
1955 xfs_trans_ail_insert(log->l_ailp, lip, lsn);
1956 lip->li_ops->iop_unpin(lip, 0);
1957}
1958
1959STATIC int
1960xlog_recover_items_pass2(
1961 struct xlog *log,
1962 struct xlog_recover *trans,
1963 struct list_head *buffer_list,
1964 struct list_head *item_list)
1965{
1966 struct xlog_recover_item *item;
1967 int error = 0;
1968
1969 list_for_each_entry(item, item_list, ri_list) {
1970 trace_xfs_log_recover_item_recover(log, trans, item,
1971 XLOG_RECOVER_PASS2);
1972
1973 if (item->ri_ops->commit_pass2)
1974 error = item->ri_ops->commit_pass2(log, buffer_list,
1975 item, trans->r_lsn);
1976 if (error)
1977 return error;
1978 }
1979
1980 return error;
1981}
1982
1983/*
1984 * Perform the transaction.
1985 *
1986 * If the transaction modifies a buffer or inode, do it now. Otherwise,
1987 * EFIs and EFDs get queued up by adding entries into the AIL for them.
1988 */
1989STATIC int
1990xlog_recover_commit_trans(
1991 struct xlog *log,
1992 struct xlog_recover *trans,
1993 int pass,
1994 struct list_head *buffer_list)
1995{
1996 int error = 0;
1997 int items_queued = 0;
1998 struct xlog_recover_item *item;
1999 struct xlog_recover_item *next;
2000 LIST_HEAD (ra_list);
2001 LIST_HEAD (done_list);
2002
2003 #define XLOG_RECOVER_COMMIT_QUEUE_MAX 100
2004
2005 hlist_del_init(&trans->r_list);
2006
2007 error = xlog_recover_reorder_trans(log, trans, pass);
2008 if (error)
2009 return error;
2010
2011 list_for_each_entry_safe(item, next, &trans->r_itemq, ri_list) {
2012 trace_xfs_log_recover_item_recover(log, trans, item, pass);
2013
2014 switch (pass) {
2015 case XLOG_RECOVER_PASS1:
2016 if (item->ri_ops->commit_pass1)
2017 error = item->ri_ops->commit_pass1(log, item);
2018 break;
2019 case XLOG_RECOVER_PASS2:
2020 if (item->ri_ops->ra_pass2)
2021 item->ri_ops->ra_pass2(log, item);
2022 list_move_tail(&item->ri_list, &ra_list);
2023 items_queued++;
2024 if (items_queued >= XLOG_RECOVER_COMMIT_QUEUE_MAX) {
2025 error = xlog_recover_items_pass2(log, trans,
2026 buffer_list, &ra_list);
2027 list_splice_tail_init(&ra_list, &done_list);
2028 items_queued = 0;
2029 }
2030
2031 break;
2032 default:
2033 ASSERT(0);
2034 }
2035
2036 if (error)
2037 goto out;
2038 }
2039
2040out:
2041 if (!list_empty(&ra_list)) {
2042 if (!error)
2043 error = xlog_recover_items_pass2(log, trans,
2044 buffer_list, &ra_list);
2045 list_splice_tail_init(&ra_list, &done_list);
2046 }
2047
2048 if (!list_empty(&done_list))
2049 list_splice_init(&done_list, &trans->r_itemq);
2050
2051 return error;
2052}
2053
2054STATIC void
2055xlog_recover_add_item(
2056 struct list_head *head)
2057{
2058 struct xlog_recover_item *item;
2059
2060 item = kmem_zalloc(sizeof(struct xlog_recover_item), 0);
2061 INIT_LIST_HEAD(&item->ri_list);
2062 list_add_tail(&item->ri_list, head);
2063}
2064
2065STATIC int
2066xlog_recover_add_to_cont_trans(
2067 struct xlog *log,
2068 struct xlog_recover *trans,
2069 char *dp,
2070 int len)
2071{
2072 struct xlog_recover_item *item;
2073 char *ptr, *old_ptr;
2074 int old_len;
2075
2076 /*
2077 * If the transaction is empty, the header was split across this and the
2078 * previous record. Copy the rest of the header.
2079 */
2080 if (list_empty(&trans->r_itemq)) {
2081 ASSERT(len <= sizeof(struct xfs_trans_header));
2082 if (len > sizeof(struct xfs_trans_header)) {
2083 xfs_warn(log->l_mp, "%s: bad header length", __func__);
2084 return -EFSCORRUPTED;
2085 }
2086
2087 xlog_recover_add_item(&trans->r_itemq);
2088 ptr = (char *)&trans->r_theader +
2089 sizeof(struct xfs_trans_header) - len;
2090 memcpy(ptr, dp, len);
2091 return 0;
2092 }
2093
2094 /* take the tail entry */
2095 item = list_entry(trans->r_itemq.prev, struct xlog_recover_item,
2096 ri_list);
2097
2098 old_ptr = item->ri_buf[item->ri_cnt-1].i_addr;
2099 old_len = item->ri_buf[item->ri_cnt-1].i_len;
2100
2101 ptr = kvrealloc(old_ptr, old_len, len + old_len, GFP_KERNEL);
2102 if (!ptr)
2103 return -ENOMEM;
2104 memcpy(&ptr[old_len], dp, len);
2105 item->ri_buf[item->ri_cnt-1].i_len += len;
2106 item->ri_buf[item->ri_cnt-1].i_addr = ptr;
2107 trace_xfs_log_recover_item_add_cont(log, trans, item, 0);
2108 return 0;
2109}
2110
2111/*
2112 * The next region to add is the start of a new region. It could be
2113 * a whole region or it could be the first part of a new region. Because
2114 * of this, the assumption here is that the type and size fields of all
2115 * format structures fit into the first 32 bits of the structure.
2116 *
2117 * This works because all regions must be 32 bit aligned. Therefore, we
2118 * either have both fields or we have neither field. In the case we have
2119 * neither field, the data part of the region is zero length. We only have
2120 * a log_op_header and can throw away the header since a new one will appear
2121 * later. If we have at least 4 bytes, then we can determine how many regions
2122 * will appear in the current log item.
2123 */
2124STATIC int
2125xlog_recover_add_to_trans(
2126 struct xlog *log,
2127 struct xlog_recover *trans,
2128 char *dp,
2129 int len)
2130{
2131 struct xfs_inode_log_format *in_f; /* any will do */
2132 struct xlog_recover_item *item;
2133 char *ptr;
2134
2135 if (!len)
2136 return 0;
2137 if (list_empty(&trans->r_itemq)) {
2138 /* we need to catch log corruptions here */
2139 if (*(uint *)dp != XFS_TRANS_HEADER_MAGIC) {
2140 xfs_warn(log->l_mp, "%s: bad header magic number",
2141 __func__);
2142 ASSERT(0);
2143 return -EFSCORRUPTED;
2144 }
2145
2146 if (len > sizeof(struct xfs_trans_header)) {
2147 xfs_warn(log->l_mp, "%s: bad header length", __func__);
2148 ASSERT(0);
2149 return -EFSCORRUPTED;
2150 }
2151
2152 /*
2153 * The transaction header can be arbitrarily split across op
2154 * records. If we don't have the whole thing here, copy what we
2155 * do have and handle the rest in the next record.
2156 */
2157 if (len == sizeof(struct xfs_trans_header))
2158 xlog_recover_add_item(&trans->r_itemq);
2159 memcpy(&trans->r_theader, dp, len);
2160 return 0;
2161 }
2162
2163 ptr = kmem_alloc(len, 0);
2164 memcpy(ptr, dp, len);
2165 in_f = (struct xfs_inode_log_format *)ptr;
2166
2167 /* take the tail entry */
2168 item = list_entry(trans->r_itemq.prev, struct xlog_recover_item,
2169 ri_list);
2170 if (item->ri_total != 0 &&
2171 item->ri_total == item->ri_cnt) {
2172 /* tail item is in use, get a new one */
2173 xlog_recover_add_item(&trans->r_itemq);
2174 item = list_entry(trans->r_itemq.prev,
2175 struct xlog_recover_item, ri_list);
2176 }
2177
2178 if (item->ri_total == 0) { /* first region to be added */
2179 if (in_f->ilf_size == 0 ||
2180 in_f->ilf_size > XLOG_MAX_REGIONS_IN_ITEM) {
2181 xfs_warn(log->l_mp,
2182 "bad number of regions (%d) in inode log format",
2183 in_f->ilf_size);
2184 ASSERT(0);
2185 kmem_free(ptr);
2186 return -EFSCORRUPTED;
2187 }
2188
2189 item->ri_total = in_f->ilf_size;
2190 item->ri_buf =
2191 kmem_zalloc(item->ri_total * sizeof(xfs_log_iovec_t),
2192 0);
2193 }
2194
2195 if (item->ri_total <= item->ri_cnt) {
2196 xfs_warn(log->l_mp,
2197 "log item region count (%d) overflowed size (%d)",
2198 item->ri_cnt, item->ri_total);
2199 ASSERT(0);
2200 kmem_free(ptr);
2201 return -EFSCORRUPTED;
2202 }
2203
2204 /* Description region is ri_buf[0] */
2205 item->ri_buf[item->ri_cnt].i_addr = ptr;
2206 item->ri_buf[item->ri_cnt].i_len = len;
2207 item->ri_cnt++;
2208 trace_xfs_log_recover_item_add(log, trans, item, 0);
2209 return 0;
2210}
2211
2212/*
2213 * Free up any resources allocated by the transaction
2214 *
2215 * Remember that EFIs, EFDs, and IUNLINKs are handled later.
2216 */
2217STATIC void
2218xlog_recover_free_trans(
2219 struct xlog_recover *trans)
2220{
2221 struct xlog_recover_item *item, *n;
2222 int i;
2223
2224 hlist_del_init(&trans->r_list);
2225
2226 list_for_each_entry_safe(item, n, &trans->r_itemq, ri_list) {
2227 /* Free the regions in the item. */
2228 list_del(&item->ri_list);
2229 for (i = 0; i < item->ri_cnt; i++)
2230 kmem_free(item->ri_buf[i].i_addr);
2231 /* Free the item itself */
2232 kmem_free(item->ri_buf);
2233 kmem_free(item);
2234 }
2235 /* Free the transaction recover structure */
2236 kmem_free(trans);
2237}
2238
2239/*
2240 * On error or completion, trans is freed.
2241 */
2242STATIC int
2243xlog_recovery_process_trans(
2244 struct xlog *log,
2245 struct xlog_recover *trans,
2246 char *dp,
2247 unsigned int len,
2248 unsigned int flags,
2249 int pass,
2250 struct list_head *buffer_list)
2251{
2252 int error = 0;
2253 bool freeit = false;
2254
2255 /* mask off ophdr transaction container flags */
2256 flags &= ~XLOG_END_TRANS;
2257 if (flags & XLOG_WAS_CONT_TRANS)
2258 flags &= ~XLOG_CONTINUE_TRANS;
2259
2260 /*
2261 * Callees must not free the trans structure. We'll decide if we need to
2262 * free it or not based on the operation being done and it's result.
2263 */
2264 switch (flags) {
2265 /* expected flag values */
2266 case 0:
2267 case XLOG_CONTINUE_TRANS:
2268 error = xlog_recover_add_to_trans(log, trans, dp, len);
2269 break;
2270 case XLOG_WAS_CONT_TRANS:
2271 error = xlog_recover_add_to_cont_trans(log, trans, dp, len);
2272 break;
2273 case XLOG_COMMIT_TRANS:
2274 error = xlog_recover_commit_trans(log, trans, pass,
2275 buffer_list);
2276 /* success or fail, we are now done with this transaction. */
2277 freeit = true;
2278 break;
2279
2280 /* unexpected flag values */
2281 case XLOG_UNMOUNT_TRANS:
2282 /* just skip trans */
2283 xfs_warn(log->l_mp, "%s: Unmount LR", __func__);
2284 freeit = true;
2285 break;
2286 case XLOG_START_TRANS:
2287 default:
2288 xfs_warn(log->l_mp, "%s: bad flag 0x%x", __func__, flags);
2289 ASSERT(0);
2290 error = -EFSCORRUPTED;
2291 break;
2292 }
2293 if (error || freeit)
2294 xlog_recover_free_trans(trans);
2295 return error;
2296}
2297
2298/*
2299 * Lookup the transaction recovery structure associated with the ID in the
2300 * current ophdr. If the transaction doesn't exist and the start flag is set in
2301 * the ophdr, then allocate a new transaction for future ID matches to find.
2302 * Either way, return what we found during the lookup - an existing transaction
2303 * or nothing.
2304 */
2305STATIC struct xlog_recover *
2306xlog_recover_ophdr_to_trans(
2307 struct hlist_head rhash[],
2308 struct xlog_rec_header *rhead,
2309 struct xlog_op_header *ohead)
2310{
2311 struct xlog_recover *trans;
2312 xlog_tid_t tid;
2313 struct hlist_head *rhp;
2314
2315 tid = be32_to_cpu(ohead->oh_tid);
2316 rhp = &rhash[XLOG_RHASH(tid)];
2317 hlist_for_each_entry(trans, rhp, r_list) {
2318 if (trans->r_log_tid == tid)
2319 return trans;
2320 }
2321
2322 /*
2323 * skip over non-start transaction headers - we could be
2324 * processing slack space before the next transaction starts
2325 */
2326 if (!(ohead->oh_flags & XLOG_START_TRANS))
2327 return NULL;
2328
2329 ASSERT(be32_to_cpu(ohead->oh_len) == 0);
2330
2331 /*
2332 * This is a new transaction so allocate a new recovery container to
2333 * hold the recovery ops that will follow.
2334 */
2335 trans = kmem_zalloc(sizeof(struct xlog_recover), 0);
2336 trans->r_log_tid = tid;
2337 trans->r_lsn = be64_to_cpu(rhead->h_lsn);
2338 INIT_LIST_HEAD(&trans->r_itemq);
2339 INIT_HLIST_NODE(&trans->r_list);
2340 hlist_add_head(&trans->r_list, rhp);
2341
2342 /*
2343 * Nothing more to do for this ophdr. Items to be added to this new
2344 * transaction will be in subsequent ophdr containers.
2345 */
2346 return NULL;
2347}
2348
2349STATIC int
2350xlog_recover_process_ophdr(
2351 struct xlog *log,
2352 struct hlist_head rhash[],
2353 struct xlog_rec_header *rhead,
2354 struct xlog_op_header *ohead,
2355 char *dp,
2356 char *end,
2357 int pass,
2358 struct list_head *buffer_list)
2359{
2360 struct xlog_recover *trans;
2361 unsigned int len;
2362 int error;
2363
2364 /* Do we understand who wrote this op? */
2365 if (ohead->oh_clientid != XFS_TRANSACTION &&
2366 ohead->oh_clientid != XFS_LOG) {
2367 xfs_warn(log->l_mp, "%s: bad clientid 0x%x",
2368 __func__, ohead->oh_clientid);
2369 ASSERT(0);
2370 return -EFSCORRUPTED;
2371 }
2372
2373 /*
2374 * Check the ophdr contains all the data it is supposed to contain.
2375 */
2376 len = be32_to_cpu(ohead->oh_len);
2377 if (dp + len > end) {
2378 xfs_warn(log->l_mp, "%s: bad length 0x%x", __func__, len);
2379 WARN_ON(1);
2380 return -EFSCORRUPTED;
2381 }
2382
2383 trans = xlog_recover_ophdr_to_trans(rhash, rhead, ohead);
2384 if (!trans) {
2385 /* nothing to do, so skip over this ophdr */
2386 return 0;
2387 }
2388
2389 /*
2390 * The recovered buffer queue is drained only once we know that all
2391 * recovery items for the current LSN have been processed. This is
2392 * required because:
2393 *
2394 * - Buffer write submission updates the metadata LSN of the buffer.
2395 * - Log recovery skips items with a metadata LSN >= the current LSN of
2396 * the recovery item.
2397 * - Separate recovery items against the same metadata buffer can share
2398 * a current LSN. I.e., consider that the LSN of a recovery item is
2399 * defined as the starting LSN of the first record in which its
2400 * transaction appears, that a record can hold multiple transactions,
2401 * and/or that a transaction can span multiple records.
2402 *
2403 * In other words, we are allowed to submit a buffer from log recovery
2404 * once per current LSN. Otherwise, we may incorrectly skip recovery
2405 * items and cause corruption.
2406 *
2407 * We don't know up front whether buffers are updated multiple times per
2408 * LSN. Therefore, track the current LSN of each commit log record as it
2409 * is processed and drain the queue when it changes. Use commit records
2410 * because they are ordered correctly by the logging code.
2411 */
2412 if (log->l_recovery_lsn != trans->r_lsn &&
2413 ohead->oh_flags & XLOG_COMMIT_TRANS) {
2414 error = xfs_buf_delwri_submit(buffer_list);
2415 if (error)
2416 return error;
2417 log->l_recovery_lsn = trans->r_lsn;
2418 }
2419
2420 return xlog_recovery_process_trans(log, trans, dp, len,
2421 ohead->oh_flags, pass, buffer_list);
2422}
2423
2424/*
2425 * There are two valid states of the r_state field. 0 indicates that the
2426 * transaction structure is in a normal state. We have either seen the
2427 * start of the transaction or the last operation we added was not a partial
2428 * operation. If the last operation we added to the transaction was a
2429 * partial operation, we need to mark r_state with XLOG_WAS_CONT_TRANS.
2430 *
2431 * NOTE: skip LRs with 0 data length.
2432 */
2433STATIC int
2434xlog_recover_process_data(
2435 struct xlog *log,
2436 struct hlist_head rhash[],
2437 struct xlog_rec_header *rhead,
2438 char *dp,
2439 int pass,
2440 struct list_head *buffer_list)
2441{
2442 struct xlog_op_header *ohead;
2443 char *end;
2444 int num_logops;
2445 int error;
2446
2447 end = dp + be32_to_cpu(rhead->h_len);
2448 num_logops = be32_to_cpu(rhead->h_num_logops);
2449
2450 /* check the log format matches our own - else we can't recover */
2451 if (xlog_header_check_recover(log->l_mp, rhead))
2452 return -EIO;
2453
2454 trace_xfs_log_recover_record(log, rhead, pass);
2455 while ((dp < end) && num_logops) {
2456
2457 ohead = (struct xlog_op_header *)dp;
2458 dp += sizeof(*ohead);
2459 ASSERT(dp <= end);
2460
2461 /* errors will abort recovery */
2462 error = xlog_recover_process_ophdr(log, rhash, rhead, ohead,
2463 dp, end, pass, buffer_list);
2464 if (error)
2465 return error;
2466
2467 dp += be32_to_cpu(ohead->oh_len);
2468 num_logops--;
2469 }
2470 return 0;
2471}
2472
2473/* Take all the collected deferred ops and finish them in order. */
2474static int
2475xlog_finish_defer_ops(
2476 struct xfs_mount *mp,
2477 struct list_head *capture_list)
2478{
2479 struct xfs_defer_capture *dfc, *next;
2480 struct xfs_trans *tp;
2481 int error = 0;
2482
2483 list_for_each_entry_safe(dfc, next, capture_list, dfc_list) {
2484 struct xfs_trans_res resv;
2485 struct xfs_defer_resources dres;
2486
2487 /*
2488 * Create a new transaction reservation from the captured
2489 * information. Set logcount to 1 to force the new transaction
2490 * to regrant every roll so that we can make forward progress
2491 * in recovery no matter how full the log might be.
2492 */
2493 resv.tr_logres = dfc->dfc_logres;
2494 resv.tr_logcount = 1;
2495 resv.tr_logflags = XFS_TRANS_PERM_LOG_RES;
2496
2497 error = xfs_trans_alloc(mp, &resv, dfc->dfc_blkres,
2498 dfc->dfc_rtxres, XFS_TRANS_RESERVE, &tp);
2499 if (error) {
2500 xlog_force_shutdown(mp->m_log, SHUTDOWN_LOG_IO_ERROR);
2501 return error;
2502 }
2503
2504 /*
2505 * Transfer to this new transaction all the dfops we captured
2506 * from recovering a single intent item.
2507 */
2508 list_del_init(&dfc->dfc_list);
2509 xfs_defer_ops_continue(dfc, tp, &dres);
2510 error = xfs_trans_commit(tp);
2511 xfs_defer_resources_rele(&dres);
2512 if (error)
2513 return error;
2514 }
2515
2516 ASSERT(list_empty(capture_list));
2517 return 0;
2518}
2519
2520/* Release all the captured defer ops and capture structures in this list. */
2521static void
2522xlog_abort_defer_ops(
2523 struct xfs_mount *mp,
2524 struct list_head *capture_list)
2525{
2526 struct xfs_defer_capture *dfc;
2527 struct xfs_defer_capture *next;
2528
2529 list_for_each_entry_safe(dfc, next, capture_list, dfc_list) {
2530 list_del_init(&dfc->dfc_list);
2531 xfs_defer_ops_capture_abort(mp, dfc);
2532 }
2533}
2534
2535/*
2536 * When this is called, all of the log intent items which did not have
2537 * corresponding log done items should be in the AIL. What we do now is update
2538 * the data structures associated with each one.
2539 *
2540 * Since we process the log intent items in normal transactions, they will be
2541 * removed at some point after the commit. This prevents us from just walking
2542 * down the list processing each one. We'll use a flag in the intent item to
2543 * skip those that we've already processed and use the AIL iteration mechanism's
2544 * generation count to try to speed this up at least a bit.
2545 *
2546 * When we start, we know that the intents are the only things in the AIL. As we
2547 * process them, however, other items are added to the AIL. Hence we know we
2548 * have started recovery on all the pending intents when we find an non-intent
2549 * item in the AIL.
2550 */
2551STATIC int
2552xlog_recover_process_intents(
2553 struct xlog *log)
2554{
2555 LIST_HEAD(capture_list);
2556 struct xfs_defer_pending *dfp, *n;
2557 int error = 0;
2558#if defined(DEBUG) || defined(XFS_WARN)
2559 xfs_lsn_t last_lsn;
2560
2561 last_lsn = xlog_assign_lsn(log->l_curr_cycle, log->l_curr_block);
2562#endif
2563
2564 list_for_each_entry_safe(dfp, n, &log->r_dfops, dfp_list) {
2565 ASSERT(xlog_item_is_intent(dfp->dfp_intent));
2566
2567 /*
2568 * We should never see a redo item with a LSN higher than
2569 * the last transaction we found in the log at the start
2570 * of recovery.
2571 */
2572 ASSERT(XFS_LSN_CMP(last_lsn, dfp->dfp_intent->li_lsn) >= 0);
2573
2574 /*
2575 * NOTE: If your intent processing routine can create more
2576 * deferred ops, you /must/ attach them to the capture list in
2577 * the recover routine or else those subsequent intents will be
2578 * replayed in the wrong order!
2579 *
2580 * The recovery function can free the log item, so we must not
2581 * access dfp->dfp_intent after it returns. It must dispose of
2582 * @dfp if it returns 0.
2583 */
2584 error = xfs_defer_finish_recovery(log->l_mp, dfp,
2585 &capture_list);
2586 if (error)
2587 break;
2588 }
2589 if (error)
2590 goto err;
2591
2592 error = xlog_finish_defer_ops(log->l_mp, &capture_list);
2593 if (error)
2594 goto err;
2595
2596 return 0;
2597err:
2598 xlog_abort_defer_ops(log->l_mp, &capture_list);
2599 return error;
2600}
2601
2602/*
2603 * A cancel occurs when the mount has failed and we're bailing out. Release all
2604 * pending log intent items that we haven't started recovery on so they don't
2605 * pin the AIL.
2606 */
2607STATIC void
2608xlog_recover_cancel_intents(
2609 struct xlog *log)
2610{
2611 struct xfs_defer_pending *dfp, *n;
2612
2613 list_for_each_entry_safe(dfp, n, &log->r_dfops, dfp_list) {
2614 ASSERT(xlog_item_is_intent(dfp->dfp_intent));
2615
2616 xfs_defer_cancel_recovery(log->l_mp, dfp);
2617 }
2618}
2619
2620/*
2621 * Transfer ownership of the recovered pending work to the recovery transaction
2622 * and try to finish the work. If there is more work to be done, the dfp will
2623 * remain attached to the transaction. If not, the dfp is freed.
2624 */
2625int
2626xlog_recover_finish_intent(
2627 struct xfs_trans *tp,
2628 struct xfs_defer_pending *dfp)
2629{
2630 int error;
2631
2632 list_move(&dfp->dfp_list, &tp->t_dfops);
2633 error = xfs_defer_finish_one(tp, dfp);
2634 if (error == -EAGAIN)
2635 return 0;
2636 return error;
2637}
2638
2639/*
2640 * This routine performs a transaction to null out a bad inode pointer
2641 * in an agi unlinked inode hash bucket.
2642 */
2643STATIC void
2644xlog_recover_clear_agi_bucket(
2645 struct xfs_perag *pag,
2646 int bucket)
2647{
2648 struct xfs_mount *mp = pag->pag_mount;
2649 struct xfs_trans *tp;
2650 struct xfs_agi *agi;
2651 struct xfs_buf *agibp;
2652 int offset;
2653 int error;
2654
2655 error = xfs_trans_alloc(mp, &M_RES(mp)->tr_clearagi, 0, 0, 0, &tp);
2656 if (error)
2657 goto out_error;
2658
2659 error = xfs_read_agi(pag, tp, &agibp);
2660 if (error)
2661 goto out_abort;
2662
2663 agi = agibp->b_addr;
2664 agi->agi_unlinked[bucket] = cpu_to_be32(NULLAGINO);
2665 offset = offsetof(xfs_agi_t, agi_unlinked) +
2666 (sizeof(xfs_agino_t) * bucket);
2667 xfs_trans_log_buf(tp, agibp, offset,
2668 (offset + sizeof(xfs_agino_t) - 1));
2669
2670 error = xfs_trans_commit(tp);
2671 if (error)
2672 goto out_error;
2673 return;
2674
2675out_abort:
2676 xfs_trans_cancel(tp);
2677out_error:
2678 xfs_warn(mp, "%s: failed to clear agi %d. Continuing.", __func__,
2679 pag->pag_agno);
2680 return;
2681}
2682
2683static int
2684xlog_recover_iunlink_bucket(
2685 struct xfs_perag *pag,
2686 struct xfs_agi *agi,
2687 int bucket)
2688{
2689 struct xfs_mount *mp = pag->pag_mount;
2690 struct xfs_inode *prev_ip = NULL;
2691 struct xfs_inode *ip;
2692 xfs_agino_t prev_agino, agino;
2693 int error = 0;
2694
2695 agino = be32_to_cpu(agi->agi_unlinked[bucket]);
2696 while (agino != NULLAGINO) {
2697 error = xfs_iget(mp, NULL,
2698 XFS_AGINO_TO_INO(mp, pag->pag_agno, agino),
2699 0, 0, &ip);
2700 if (error)
2701 break;
2702
2703 ASSERT(VFS_I(ip)->i_nlink == 0);
2704 ASSERT(VFS_I(ip)->i_mode != 0);
2705 xfs_iflags_clear(ip, XFS_IRECOVERY);
2706 agino = ip->i_next_unlinked;
2707
2708 if (prev_ip) {
2709 ip->i_prev_unlinked = prev_agino;
2710 xfs_irele(prev_ip);
2711
2712 /*
2713 * Ensure the inode is removed from the unlinked list
2714 * before we continue so that it won't race with
2715 * building the in-memory list here. This could be
2716 * serialised with the agibp lock, but that just
2717 * serialises via lockstepping and it's much simpler
2718 * just to flush the inodegc queue and wait for it to
2719 * complete.
2720 */
2721 error = xfs_inodegc_flush(mp);
2722 if (error)
2723 break;
2724 }
2725
2726 prev_agino = agino;
2727 prev_ip = ip;
2728 }
2729
2730 if (prev_ip) {
2731 int error2;
2732
2733 ip->i_prev_unlinked = prev_agino;
2734 xfs_irele(prev_ip);
2735
2736 error2 = xfs_inodegc_flush(mp);
2737 if (error2 && !error)
2738 return error2;
2739 }
2740 return error;
2741}
2742
2743/*
2744 * Recover AGI unlinked lists
2745 *
2746 * This is called during recovery to process any inodes which we unlinked but
2747 * not freed when the system crashed. These inodes will be on the lists in the
2748 * AGI blocks. What we do here is scan all the AGIs and fully truncate and free
2749 * any inodes found on the lists. Each inode is removed from the lists when it
2750 * has been fully truncated and is freed. The freeing of the inode and its
2751 * removal from the list must be atomic.
2752 *
2753 * If everything we touch in the agi processing loop is already in memory, this
2754 * loop can hold the cpu for a long time. It runs without lock contention,
2755 * memory allocation contention, the need wait for IO, etc, and so will run
2756 * until we either run out of inodes to process, run low on memory or we run out
2757 * of log space.
2758 *
2759 * This behaviour is bad for latency on single CPU and non-preemptible kernels,
2760 * and can prevent other filesystem work (such as CIL pushes) from running. This
2761 * can lead to deadlocks if the recovery process runs out of log reservation
2762 * space. Hence we need to yield the CPU when there is other kernel work
2763 * scheduled on this CPU to ensure other scheduled work can run without undue
2764 * latency.
2765 */
2766static void
2767xlog_recover_iunlink_ag(
2768 struct xfs_perag *pag)
2769{
2770 struct xfs_agi *agi;
2771 struct xfs_buf *agibp;
2772 int bucket;
2773 int error;
2774
2775 error = xfs_read_agi(pag, NULL, &agibp);
2776 if (error) {
2777 /*
2778 * AGI is b0rked. Don't process it.
2779 *
2780 * We should probably mark the filesystem as corrupt after we've
2781 * recovered all the ag's we can....
2782 */
2783 return;
2784 }
2785
2786 /*
2787 * Unlock the buffer so that it can be acquired in the normal course of
2788 * the transaction to truncate and free each inode. Because we are not
2789 * racing with anyone else here for the AGI buffer, we don't even need
2790 * to hold it locked to read the initial unlinked bucket entries out of
2791 * the buffer. We keep buffer reference though, so that it stays pinned
2792 * in memory while we need the buffer.
2793 */
2794 agi = agibp->b_addr;
2795 xfs_buf_unlock(agibp);
2796
2797 for (bucket = 0; bucket < XFS_AGI_UNLINKED_BUCKETS; bucket++) {
2798 error = xlog_recover_iunlink_bucket(pag, agi, bucket);
2799 if (error) {
2800 /*
2801 * Bucket is unrecoverable, so only a repair scan can
2802 * free the remaining unlinked inodes. Just empty the
2803 * bucket and remaining inodes on it unreferenced and
2804 * unfreeable.
2805 */
2806 xlog_recover_clear_agi_bucket(pag, bucket);
2807 }
2808 }
2809
2810 xfs_buf_rele(agibp);
2811}
2812
2813static void
2814xlog_recover_process_iunlinks(
2815 struct xlog *log)
2816{
2817 struct xfs_perag *pag;
2818 xfs_agnumber_t agno;
2819
2820 for_each_perag(log->l_mp, agno, pag)
2821 xlog_recover_iunlink_ag(pag);
2822}
2823
2824STATIC void
2825xlog_unpack_data(
2826 struct xlog_rec_header *rhead,
2827 char *dp,
2828 struct xlog *log)
2829{
2830 int i, j, k;
2831
2832 for (i = 0; i < BTOBB(be32_to_cpu(rhead->h_len)) &&
2833 i < (XLOG_HEADER_CYCLE_SIZE / BBSIZE); i++) {
2834 *(__be32 *)dp = *(__be32 *)&rhead->h_cycle_data[i];
2835 dp += BBSIZE;
2836 }
2837
2838 if (xfs_has_logv2(log->l_mp)) {
2839 xlog_in_core_2_t *xhdr = (xlog_in_core_2_t *)rhead;
2840 for ( ; i < BTOBB(be32_to_cpu(rhead->h_len)); i++) {
2841 j = i / (XLOG_HEADER_CYCLE_SIZE / BBSIZE);
2842 k = i % (XLOG_HEADER_CYCLE_SIZE / BBSIZE);
2843 *(__be32 *)dp = xhdr[j].hic_xheader.xh_cycle_data[k];
2844 dp += BBSIZE;
2845 }
2846 }
2847}
2848
2849/*
2850 * CRC check, unpack and process a log record.
2851 */
2852STATIC int
2853xlog_recover_process(
2854 struct xlog *log,
2855 struct hlist_head rhash[],
2856 struct xlog_rec_header *rhead,
2857 char *dp,
2858 int pass,
2859 struct list_head *buffer_list)
2860{
2861 __le32 old_crc = rhead->h_crc;
2862 __le32 crc;
2863
2864 crc = xlog_cksum(log, rhead, dp, be32_to_cpu(rhead->h_len));
2865
2866 /*
2867 * Nothing else to do if this is a CRC verification pass. Just return
2868 * if this a record with a non-zero crc. Unfortunately, mkfs always
2869 * sets old_crc to 0 so we must consider this valid even on v5 supers.
2870 * Otherwise, return EFSBADCRC on failure so the callers up the stack
2871 * know precisely what failed.
2872 */
2873 if (pass == XLOG_RECOVER_CRCPASS) {
2874 if (old_crc && crc != old_crc)
2875 return -EFSBADCRC;
2876 return 0;
2877 }
2878
2879 /*
2880 * We're in the normal recovery path. Issue a warning if and only if the
2881 * CRC in the header is non-zero. This is an advisory warning and the
2882 * zero CRC check prevents warnings from being emitted when upgrading
2883 * the kernel from one that does not add CRCs by default.
2884 */
2885 if (crc != old_crc) {
2886 if (old_crc || xfs_has_crc(log->l_mp)) {
2887 xfs_alert(log->l_mp,
2888 "log record CRC mismatch: found 0x%x, expected 0x%x.",
2889 le32_to_cpu(old_crc),
2890 le32_to_cpu(crc));
2891 xfs_hex_dump(dp, 32);
2892 }
2893
2894 /*
2895 * If the filesystem is CRC enabled, this mismatch becomes a
2896 * fatal log corruption failure.
2897 */
2898 if (xfs_has_crc(log->l_mp)) {
2899 XFS_ERROR_REPORT(__func__, XFS_ERRLEVEL_LOW, log->l_mp);
2900 return -EFSCORRUPTED;
2901 }
2902 }
2903
2904 xlog_unpack_data(rhead, dp, log);
2905
2906 return xlog_recover_process_data(log, rhash, rhead, dp, pass,
2907 buffer_list);
2908}
2909
2910STATIC int
2911xlog_valid_rec_header(
2912 struct xlog *log,
2913 struct xlog_rec_header *rhead,
2914 xfs_daddr_t blkno,
2915 int bufsize)
2916{
2917 int hlen;
2918
2919 if (XFS_IS_CORRUPT(log->l_mp,
2920 rhead->h_magicno != cpu_to_be32(XLOG_HEADER_MAGIC_NUM)))
2921 return -EFSCORRUPTED;
2922 if (XFS_IS_CORRUPT(log->l_mp,
2923 (!rhead->h_version ||
2924 (be32_to_cpu(rhead->h_version) &
2925 (~XLOG_VERSION_OKBITS))))) {
2926 xfs_warn(log->l_mp, "%s: unrecognised log version (%d).",
2927 __func__, be32_to_cpu(rhead->h_version));
2928 return -EFSCORRUPTED;
2929 }
2930
2931 /*
2932 * LR body must have data (or it wouldn't have been written)
2933 * and h_len must not be greater than LR buffer size.
2934 */
2935 hlen = be32_to_cpu(rhead->h_len);
2936 if (XFS_IS_CORRUPT(log->l_mp, hlen <= 0 || hlen > bufsize))
2937 return -EFSCORRUPTED;
2938
2939 if (XFS_IS_CORRUPT(log->l_mp,
2940 blkno > log->l_logBBsize || blkno > INT_MAX))
2941 return -EFSCORRUPTED;
2942 return 0;
2943}
2944
2945/*
2946 * Read the log from tail to head and process the log records found.
2947 * Handle the two cases where the tail and head are in the same cycle
2948 * and where the active portion of the log wraps around the end of
2949 * the physical log separately. The pass parameter is passed through
2950 * to the routines called to process the data and is not looked at
2951 * here.
2952 */
2953STATIC int
2954xlog_do_recovery_pass(
2955 struct xlog *log,
2956 xfs_daddr_t head_blk,
2957 xfs_daddr_t tail_blk,
2958 int pass,
2959 xfs_daddr_t *first_bad) /* out: first bad log rec */
2960{
2961 xlog_rec_header_t *rhead;
2962 xfs_daddr_t blk_no, rblk_no;
2963 xfs_daddr_t rhead_blk;
2964 char *offset;
2965 char *hbp, *dbp;
2966 int error = 0, h_size, h_len;
2967 int error2 = 0;
2968 int bblks, split_bblks;
2969 int hblks, split_hblks, wrapped_hblks;
2970 int i;
2971 struct hlist_head rhash[XLOG_RHASH_SIZE];
2972 LIST_HEAD (buffer_list);
2973
2974 ASSERT(head_blk != tail_blk);
2975 blk_no = rhead_blk = tail_blk;
2976
2977 for (i = 0; i < XLOG_RHASH_SIZE; i++)
2978 INIT_HLIST_HEAD(&rhash[i]);
2979
2980 /*
2981 * Read the header of the tail block and get the iclog buffer size from
2982 * h_size. Use this to tell how many sectors make up the log header.
2983 */
2984 if (xfs_has_logv2(log->l_mp)) {
2985 /*
2986 * When using variable length iclogs, read first sector of
2987 * iclog header and extract the header size from it. Get a
2988 * new hbp that is the correct size.
2989 */
2990 hbp = xlog_alloc_buffer(log, 1);
2991 if (!hbp)
2992 return -ENOMEM;
2993
2994 error = xlog_bread(log, tail_blk, 1, hbp, &offset);
2995 if (error)
2996 goto bread_err1;
2997
2998 rhead = (xlog_rec_header_t *)offset;
2999
3000 /*
3001 * xfsprogs has a bug where record length is based on lsunit but
3002 * h_size (iclog size) is hardcoded to 32k. Now that we
3003 * unconditionally CRC verify the unmount record, this means the
3004 * log buffer can be too small for the record and cause an
3005 * overrun.
3006 *
3007 * Detect this condition here. Use lsunit for the buffer size as
3008 * long as this looks like the mkfs case. Otherwise, return an
3009 * error to avoid a buffer overrun.
3010 */
3011 h_size = be32_to_cpu(rhead->h_size);
3012 h_len = be32_to_cpu(rhead->h_len);
3013 if (h_len > h_size && h_len <= log->l_mp->m_logbsize &&
3014 rhead->h_num_logops == cpu_to_be32(1)) {
3015 xfs_warn(log->l_mp,
3016 "invalid iclog size (%d bytes), using lsunit (%d bytes)",
3017 h_size, log->l_mp->m_logbsize);
3018 h_size = log->l_mp->m_logbsize;
3019 }
3020
3021 error = xlog_valid_rec_header(log, rhead, tail_blk, h_size);
3022 if (error)
3023 goto bread_err1;
3024
3025 hblks = xlog_logrec_hblks(log, rhead);
3026 if (hblks != 1) {
3027 kmem_free(hbp);
3028 hbp = xlog_alloc_buffer(log, hblks);
3029 }
3030 } else {
3031 ASSERT(log->l_sectBBsize == 1);
3032 hblks = 1;
3033 hbp = xlog_alloc_buffer(log, 1);
3034 h_size = XLOG_BIG_RECORD_BSIZE;
3035 }
3036
3037 if (!hbp)
3038 return -ENOMEM;
3039 dbp = xlog_alloc_buffer(log, BTOBB(h_size));
3040 if (!dbp) {
3041 kmem_free(hbp);
3042 return -ENOMEM;
3043 }
3044
3045 memset(rhash, 0, sizeof(rhash));
3046 if (tail_blk > head_blk) {
3047 /*
3048 * Perform recovery around the end of the physical log.
3049 * When the head is not on the same cycle number as the tail,
3050 * we can't do a sequential recovery.
3051 */
3052 while (blk_no < log->l_logBBsize) {
3053 /*
3054 * Check for header wrapping around physical end-of-log
3055 */
3056 offset = hbp;
3057 split_hblks = 0;
3058 wrapped_hblks = 0;
3059 if (blk_no + hblks <= log->l_logBBsize) {
3060 /* Read header in one read */
3061 error = xlog_bread(log, blk_no, hblks, hbp,
3062 &offset);
3063 if (error)
3064 goto bread_err2;
3065 } else {
3066 /* This LR is split across physical log end */
3067 if (blk_no != log->l_logBBsize) {
3068 /* some data before physical log end */
3069 ASSERT(blk_no <= INT_MAX);
3070 split_hblks = log->l_logBBsize - (int)blk_no;
3071 ASSERT(split_hblks > 0);
3072 error = xlog_bread(log, blk_no,
3073 split_hblks, hbp,
3074 &offset);
3075 if (error)
3076 goto bread_err2;
3077 }
3078
3079 /*
3080 * Note: this black magic still works with
3081 * large sector sizes (non-512) only because:
3082 * - we increased the buffer size originally
3083 * by 1 sector giving us enough extra space
3084 * for the second read;
3085 * - the log start is guaranteed to be sector
3086 * aligned;
3087 * - we read the log end (LR header start)
3088 * _first_, then the log start (LR header end)
3089 * - order is important.
3090 */
3091 wrapped_hblks = hblks - split_hblks;
3092 error = xlog_bread_noalign(log, 0,
3093 wrapped_hblks,
3094 offset + BBTOB(split_hblks));
3095 if (error)
3096 goto bread_err2;
3097 }
3098 rhead = (xlog_rec_header_t *)offset;
3099 error = xlog_valid_rec_header(log, rhead,
3100 split_hblks ? blk_no : 0, h_size);
3101 if (error)
3102 goto bread_err2;
3103
3104 bblks = (int)BTOBB(be32_to_cpu(rhead->h_len));
3105 blk_no += hblks;
3106
3107 /*
3108 * Read the log record data in multiple reads if it
3109 * wraps around the end of the log. Note that if the
3110 * header already wrapped, blk_no could point past the
3111 * end of the log. The record data is contiguous in
3112 * that case.
3113 */
3114 if (blk_no + bblks <= log->l_logBBsize ||
3115 blk_no >= log->l_logBBsize) {
3116 rblk_no = xlog_wrap_logbno(log, blk_no);
3117 error = xlog_bread(log, rblk_no, bblks, dbp,
3118 &offset);
3119 if (error)
3120 goto bread_err2;
3121 } else {
3122 /* This log record is split across the
3123 * physical end of log */
3124 offset = dbp;
3125 split_bblks = 0;
3126 if (blk_no != log->l_logBBsize) {
3127 /* some data is before the physical
3128 * end of log */
3129 ASSERT(!wrapped_hblks);
3130 ASSERT(blk_no <= INT_MAX);
3131 split_bblks =
3132 log->l_logBBsize - (int)blk_no;
3133 ASSERT(split_bblks > 0);
3134 error = xlog_bread(log, blk_no,
3135 split_bblks, dbp,
3136 &offset);
3137 if (error)
3138 goto bread_err2;
3139 }
3140
3141 /*
3142 * Note: this black magic still works with
3143 * large sector sizes (non-512) only because:
3144 * - we increased the buffer size originally
3145 * by 1 sector giving us enough extra space
3146 * for the second read;
3147 * - the log start is guaranteed to be sector
3148 * aligned;
3149 * - we read the log end (LR header start)
3150 * _first_, then the log start (LR header end)
3151 * - order is important.
3152 */
3153 error = xlog_bread_noalign(log, 0,
3154 bblks - split_bblks,
3155 offset + BBTOB(split_bblks));
3156 if (error)
3157 goto bread_err2;
3158 }
3159
3160 error = xlog_recover_process(log, rhash, rhead, offset,
3161 pass, &buffer_list);
3162 if (error)
3163 goto bread_err2;
3164
3165 blk_no += bblks;
3166 rhead_blk = blk_no;
3167 }
3168
3169 ASSERT(blk_no >= log->l_logBBsize);
3170 blk_no -= log->l_logBBsize;
3171 rhead_blk = blk_no;
3172 }
3173
3174 /* read first part of physical log */
3175 while (blk_no < head_blk) {
3176 error = xlog_bread(log, blk_no, hblks, hbp, &offset);
3177 if (error)
3178 goto bread_err2;
3179
3180 rhead = (xlog_rec_header_t *)offset;
3181 error = xlog_valid_rec_header(log, rhead, blk_no, h_size);
3182 if (error)
3183 goto bread_err2;
3184
3185 /* blocks in data section */
3186 bblks = (int)BTOBB(be32_to_cpu(rhead->h_len));
3187 error = xlog_bread(log, blk_no+hblks, bblks, dbp,
3188 &offset);
3189 if (error)
3190 goto bread_err2;
3191
3192 error = xlog_recover_process(log, rhash, rhead, offset, pass,
3193 &buffer_list);
3194 if (error)
3195 goto bread_err2;
3196
3197 blk_no += bblks + hblks;
3198 rhead_blk = blk_no;
3199 }
3200
3201 bread_err2:
3202 kmem_free(dbp);
3203 bread_err1:
3204 kmem_free(hbp);
3205
3206 /*
3207 * Submit buffers that have been added from the last record processed,
3208 * regardless of error status.
3209 */
3210 if (!list_empty(&buffer_list))
3211 error2 = xfs_buf_delwri_submit(&buffer_list);
3212
3213 if (error && first_bad)
3214 *first_bad = rhead_blk;
3215
3216 /*
3217 * Transactions are freed at commit time but transactions without commit
3218 * records on disk are never committed. Free any that may be left in the
3219 * hash table.
3220 */
3221 for (i = 0; i < XLOG_RHASH_SIZE; i++) {
3222 struct hlist_node *tmp;
3223 struct xlog_recover *trans;
3224
3225 hlist_for_each_entry_safe(trans, tmp, &rhash[i], r_list)
3226 xlog_recover_free_trans(trans);
3227 }
3228
3229 return error ? error : error2;
3230}
3231
3232/*
3233 * Do the recovery of the log. We actually do this in two phases.
3234 * The two passes are necessary in order to implement the function
3235 * of cancelling a record written into the log. The first pass
3236 * determines those things which have been cancelled, and the
3237 * second pass replays log items normally except for those which
3238 * have been cancelled. The handling of the replay and cancellations
3239 * takes place in the log item type specific routines.
3240 *
3241 * The table of items which have cancel records in the log is allocated
3242 * and freed at this level, since only here do we know when all of
3243 * the log recovery has been completed.
3244 */
3245STATIC int
3246xlog_do_log_recovery(
3247 struct xlog *log,
3248 xfs_daddr_t head_blk,
3249 xfs_daddr_t tail_blk)
3250{
3251 int error;
3252
3253 ASSERT(head_blk != tail_blk);
3254
3255 /*
3256 * First do a pass to find all of the cancelled buf log items.
3257 * Store them in the buf_cancel_table for use in the second pass.
3258 */
3259 error = xlog_alloc_buf_cancel_table(log);
3260 if (error)
3261 return error;
3262
3263 error = xlog_do_recovery_pass(log, head_blk, tail_blk,
3264 XLOG_RECOVER_PASS1, NULL);
3265 if (error != 0)
3266 goto out_cancel;
3267
3268 /*
3269 * Then do a second pass to actually recover the items in the log.
3270 * When it is complete free the table of buf cancel items.
3271 */
3272 error = xlog_do_recovery_pass(log, head_blk, tail_blk,
3273 XLOG_RECOVER_PASS2, NULL);
3274 if (!error)
3275 xlog_check_buf_cancel_table(log);
3276out_cancel:
3277 xlog_free_buf_cancel_table(log);
3278 return error;
3279}
3280
3281/*
3282 * Do the actual recovery
3283 */
3284STATIC int
3285xlog_do_recover(
3286 struct xlog *log,
3287 xfs_daddr_t head_blk,
3288 xfs_daddr_t tail_blk)
3289{
3290 struct xfs_mount *mp = log->l_mp;
3291 struct xfs_buf *bp = mp->m_sb_bp;
3292 struct xfs_sb *sbp = &mp->m_sb;
3293 int error;
3294
3295 trace_xfs_log_recover(log, head_blk, tail_blk);
3296
3297 /*
3298 * First replay the images in the log.
3299 */
3300 error = xlog_do_log_recovery(log, head_blk, tail_blk);
3301 if (error)
3302 return error;
3303
3304 if (xlog_is_shutdown(log))
3305 return -EIO;
3306
3307 /*
3308 * We now update the tail_lsn since much of the recovery has completed
3309 * and there may be space available to use. If there were no extent
3310 * or iunlinks, we can free up the entire log and set the tail_lsn to
3311 * be the last_sync_lsn. This was set in xlog_find_tail to be the
3312 * lsn of the last known good LR on disk. If there are extent frees
3313 * or iunlinks they will have some entries in the AIL; so we look at
3314 * the AIL to determine how to set the tail_lsn.
3315 */
3316 xlog_assign_tail_lsn(mp);
3317
3318 /*
3319 * Now that we've finished replaying all buffer and inode updates,
3320 * re-read the superblock and reverify it.
3321 */
3322 xfs_buf_lock(bp);
3323 xfs_buf_hold(bp);
3324 error = _xfs_buf_read(bp, XBF_READ);
3325 if (error) {
3326 if (!xlog_is_shutdown(log)) {
3327 xfs_buf_ioerror_alert(bp, __this_address);
3328 ASSERT(0);
3329 }
3330 xfs_buf_relse(bp);
3331 return error;
3332 }
3333
3334 /* Convert superblock from on-disk format */
3335 xfs_sb_from_disk(sbp, bp->b_addr);
3336 xfs_buf_relse(bp);
3337
3338 /* re-initialise in-core superblock and geometry structures */
3339 mp->m_features |= xfs_sb_version_to_features(sbp);
3340 xfs_reinit_percpu_counters(mp);
3341 error = xfs_initialize_perag(mp, sbp->sb_agcount, sbp->sb_dblocks,
3342 &mp->m_maxagi);
3343 if (error) {
3344 xfs_warn(mp, "Failed post-recovery per-ag init: %d", error);
3345 return error;
3346 }
3347 mp->m_alloc_set_aside = xfs_alloc_set_aside(mp);
3348
3349 /* Normal transactions can now occur */
3350 clear_bit(XLOG_ACTIVE_RECOVERY, &log->l_opstate);
3351 return 0;
3352}
3353
3354/*
3355 * Perform recovery and re-initialize some log variables in xlog_find_tail.
3356 *
3357 * Return error or zero.
3358 */
3359int
3360xlog_recover(
3361 struct xlog *log)
3362{
3363 xfs_daddr_t head_blk, tail_blk;
3364 int error;
3365
3366 /* find the tail of the log */
3367 error = xlog_find_tail(log, &head_blk, &tail_blk);
3368 if (error)
3369 return error;
3370
3371 /*
3372 * The superblock was read before the log was available and thus the LSN
3373 * could not be verified. Check the superblock LSN against the current
3374 * LSN now that it's known.
3375 */
3376 if (xfs_has_crc(log->l_mp) &&
3377 !xfs_log_check_lsn(log->l_mp, log->l_mp->m_sb.sb_lsn))
3378 return -EINVAL;
3379
3380 if (tail_blk != head_blk) {
3381 /* There used to be a comment here:
3382 *
3383 * disallow recovery on read-only mounts. note -- mount
3384 * checks for ENOSPC and turns it into an intelligent
3385 * error message.
3386 * ...but this is no longer true. Now, unless you specify
3387 * NORECOVERY (in which case this function would never be
3388 * called), we just go ahead and recover. We do this all
3389 * under the vfs layer, so we can get away with it unless
3390 * the device itself is read-only, in which case we fail.
3391 */
3392 if ((error = xfs_dev_is_read_only(log->l_mp, "recovery"))) {
3393 return error;
3394 }
3395
3396 /*
3397 * Version 5 superblock log feature mask validation. We know the
3398 * log is dirty so check if there are any unknown log features
3399 * in what we need to recover. If there are unknown features
3400 * (e.g. unsupported transactions, then simply reject the
3401 * attempt at recovery before touching anything.
3402 */
3403 if (xfs_sb_is_v5(&log->l_mp->m_sb) &&
3404 xfs_sb_has_incompat_log_feature(&log->l_mp->m_sb,
3405 XFS_SB_FEAT_INCOMPAT_LOG_UNKNOWN)) {
3406 xfs_warn(log->l_mp,
3407"Superblock has unknown incompatible log features (0x%x) enabled.",
3408 (log->l_mp->m_sb.sb_features_log_incompat &
3409 XFS_SB_FEAT_INCOMPAT_LOG_UNKNOWN));
3410 xfs_warn(log->l_mp,
3411"The log can not be fully and/or safely recovered by this kernel.");
3412 xfs_warn(log->l_mp,
3413"Please recover the log on a kernel that supports the unknown features.");
3414 return -EINVAL;
3415 }
3416
3417 /*
3418 * Delay log recovery if the debug hook is set. This is debug
3419 * instrumentation to coordinate simulation of I/O failures with
3420 * log recovery.
3421 */
3422 if (xfs_globals.log_recovery_delay) {
3423 xfs_notice(log->l_mp,
3424 "Delaying log recovery for %d seconds.",
3425 xfs_globals.log_recovery_delay);
3426 msleep(xfs_globals.log_recovery_delay * 1000);
3427 }
3428
3429 xfs_notice(log->l_mp, "Starting recovery (logdev: %s)",
3430 log->l_mp->m_logname ? log->l_mp->m_logname
3431 : "internal");
3432
3433 error = xlog_do_recover(log, head_blk, tail_blk);
3434 set_bit(XLOG_RECOVERY_NEEDED, &log->l_opstate);
3435 }
3436 return error;
3437}
3438
3439/*
3440 * In the first part of recovery we replay inodes and buffers and build up the
3441 * list of intents which need to be processed. Here we process the intents and
3442 * clean up the on disk unlinked inode lists. This is separated from the first
3443 * part of recovery so that the root and real-time bitmap inodes can be read in
3444 * from disk in between the two stages. This is necessary so that we can free
3445 * space in the real-time portion of the file system.
3446 */
3447int
3448xlog_recover_finish(
3449 struct xlog *log)
3450{
3451 int error;
3452
3453 error = xlog_recover_process_intents(log);
3454 if (error) {
3455 /*
3456 * Cancel all the unprocessed intent items now so that we don't
3457 * leave them pinned in the AIL. This can cause the AIL to
3458 * livelock on the pinned item if anyone tries to push the AIL
3459 * (inode reclaim does this) before we get around to
3460 * xfs_log_mount_cancel.
3461 */
3462 xlog_recover_cancel_intents(log);
3463 xfs_alert(log->l_mp, "Failed to recover intents");
3464 xlog_force_shutdown(log, SHUTDOWN_LOG_IO_ERROR);
3465 return error;
3466 }
3467
3468 /*
3469 * Sync the log to get all the intents out of the AIL. This isn't
3470 * absolutely necessary, but it helps in case the unlink transactions
3471 * would have problems pushing the intents out of the way.
3472 */
3473 xfs_log_force(log->l_mp, XFS_LOG_SYNC);
3474
3475 /*
3476 * Now that we've recovered the log and all the intents, we can clear
3477 * the log incompat feature bits in the superblock because there's no
3478 * longer anything to protect. We rely on the AIL push to write out the
3479 * updated superblock after everything else.
3480 */
3481 if (xfs_clear_incompat_log_features(log->l_mp)) {
3482 error = xfs_sync_sb(log->l_mp, false);
3483 if (error < 0) {
3484 xfs_alert(log->l_mp,
3485 "Failed to clear log incompat features on recovery");
3486 return error;
3487 }
3488 }
3489
3490 xlog_recover_process_iunlinks(log);
3491
3492 /*
3493 * Recover any CoW staging blocks that are still referenced by the
3494 * ondisk refcount metadata. During mount there cannot be any live
3495 * staging extents as we have not permitted any user modifications.
3496 * Therefore, it is safe to free them all right now, even on a
3497 * read-only mount.
3498 */
3499 error = xfs_reflink_recover_cow(log->l_mp);
3500 if (error) {
3501 xfs_alert(log->l_mp,
3502 "Failed to recover leftover CoW staging extents, err %d.",
3503 error);
3504 /*
3505 * If we get an error here, make sure the log is shut down
3506 * but return zero so that any log items committed since the
3507 * end of intents processing can be pushed through the CIL
3508 * and AIL.
3509 */
3510 xlog_force_shutdown(log, SHUTDOWN_LOG_IO_ERROR);
3511 }
3512
3513 return 0;
3514}
3515
3516void
3517xlog_recover_cancel(
3518 struct xlog *log)
3519{
3520 if (xlog_recovery_needed(log))
3521 xlog_recover_cancel_intents(log);
3522}
3523
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Copyright (c) 2000-2006 Silicon Graphics, Inc.
4 * All Rights Reserved.
5 */
6#include "xfs.h"
7#include "xfs_fs.h"
8#include "xfs_shared.h"
9#include "xfs_format.h"
10#include "xfs_log_format.h"
11#include "xfs_trans_resv.h"
12#include "xfs_bit.h"
13#include "xfs_sb.h"
14#include "xfs_mount.h"
15#include "xfs_defer.h"
16#include "xfs_inode.h"
17#include "xfs_trans.h"
18#include "xfs_log.h"
19#include "xfs_log_priv.h"
20#include "xfs_log_recover.h"
21#include "xfs_inode_item.h"
22#include "xfs_extfree_item.h"
23#include "xfs_trans_priv.h"
24#include "xfs_alloc.h"
25#include "xfs_ialloc.h"
26#include "xfs_quota.h"
27#include "xfs_trace.h"
28#include "xfs_icache.h"
29#include "xfs_bmap_btree.h"
30#include "xfs_error.h"
31#include "xfs_dir2.h"
32#include "xfs_rmap_item.h"
33#include "xfs_buf_item.h"
34#include "xfs_refcount_item.h"
35#include "xfs_bmap_item.h"
36
37#define BLK_AVG(blk1, blk2) ((blk1+blk2) >> 1)
38
39STATIC int
40xlog_find_zeroed(
41 struct xlog *,
42 xfs_daddr_t *);
43STATIC int
44xlog_clear_stale_blocks(
45 struct xlog *,
46 xfs_lsn_t);
47#if defined(DEBUG)
48STATIC void
49xlog_recover_check_summary(
50 struct xlog *);
51#else
52#define xlog_recover_check_summary(log)
53#endif
54STATIC int
55xlog_do_recovery_pass(
56 struct xlog *, xfs_daddr_t, xfs_daddr_t, int, xfs_daddr_t *);
57
58/*
59 * This structure is used during recovery to record the buf log items which
60 * have been canceled and should not be replayed.
61 */
62struct xfs_buf_cancel {
63 xfs_daddr_t bc_blkno;
64 uint bc_len;
65 int bc_refcount;
66 struct list_head bc_list;
67};
68
69/*
70 * Sector aligned buffer routines for buffer create/read/write/access
71 */
72
73/*
74 * Verify the log-relative block number and length in basic blocks are valid for
75 * an operation involving the given XFS log buffer. Returns true if the fields
76 * are valid, false otherwise.
77 */
78static inline bool
79xlog_verify_bno(
80 struct xlog *log,
81 xfs_daddr_t blk_no,
82 int bbcount)
83{
84 if (blk_no < 0 || blk_no >= log->l_logBBsize)
85 return false;
86 if (bbcount <= 0 || (blk_no + bbcount) > log->l_logBBsize)
87 return false;
88 return true;
89}
90
91/*
92 * Allocate a buffer to hold log data. The buffer needs to be able to map to
93 * a range of nbblks basic blocks at any valid offset within the log.
94 */
95static char *
96xlog_alloc_buffer(
97 struct xlog *log,
98 int nbblks)
99{
100 int align_mask = xfs_buftarg_dma_alignment(log->l_targ);
101
102 /*
103 * Pass log block 0 since we don't have an addr yet, buffer will be
104 * verified on read.
105 */
106 if (!xlog_verify_bno(log, 0, nbblks)) {
107 xfs_warn(log->l_mp, "Invalid block length (0x%x) for buffer",
108 nbblks);
109 XFS_ERROR_REPORT(__func__, XFS_ERRLEVEL_HIGH, log->l_mp);
110 return NULL;
111 }
112
113 /*
114 * We do log I/O in units of log sectors (a power-of-2 multiple of the
115 * basic block size), so we round up the requested size to accommodate
116 * the basic blocks required for complete log sectors.
117 *
118 * In addition, the buffer may be used for a non-sector-aligned block
119 * offset, in which case an I/O of the requested size could extend
120 * beyond the end of the buffer. If the requested size is only 1 basic
121 * block it will never straddle a sector boundary, so this won't be an
122 * issue. Nor will this be a problem if the log I/O is done in basic
123 * blocks (sector size 1). But otherwise we extend the buffer by one
124 * extra log sector to ensure there's space to accommodate this
125 * possibility.
126 */
127 if (nbblks > 1 && log->l_sectBBsize > 1)
128 nbblks += log->l_sectBBsize;
129 nbblks = round_up(nbblks, log->l_sectBBsize);
130 return kmem_alloc_io(BBTOB(nbblks), align_mask, KM_MAYFAIL | KM_ZERO);
131}
132
133/*
134 * Return the address of the start of the given block number's data
135 * in a log buffer. The buffer covers a log sector-aligned region.
136 */
137static inline unsigned int
138xlog_align(
139 struct xlog *log,
140 xfs_daddr_t blk_no)
141{
142 return BBTOB(blk_no & ((xfs_daddr_t)log->l_sectBBsize - 1));
143}
144
145static int
146xlog_do_io(
147 struct xlog *log,
148 xfs_daddr_t blk_no,
149 unsigned int nbblks,
150 char *data,
151 unsigned int op)
152{
153 int error;
154
155 if (!xlog_verify_bno(log, blk_no, nbblks)) {
156 xfs_warn(log->l_mp,
157 "Invalid log block/length (0x%llx, 0x%x) for buffer",
158 blk_no, nbblks);
159 XFS_ERROR_REPORT(__func__, XFS_ERRLEVEL_HIGH, log->l_mp);
160 return -EFSCORRUPTED;
161 }
162
163 blk_no = round_down(blk_no, log->l_sectBBsize);
164 nbblks = round_up(nbblks, log->l_sectBBsize);
165 ASSERT(nbblks > 0);
166
167 error = xfs_rw_bdev(log->l_targ->bt_bdev, log->l_logBBstart + blk_no,
168 BBTOB(nbblks), data, op);
169 if (error && !XFS_FORCED_SHUTDOWN(log->l_mp)) {
170 xfs_alert(log->l_mp,
171 "log recovery %s I/O error at daddr 0x%llx len %d error %d",
172 op == REQ_OP_WRITE ? "write" : "read",
173 blk_no, nbblks, error);
174 }
175 return error;
176}
177
178STATIC int
179xlog_bread_noalign(
180 struct xlog *log,
181 xfs_daddr_t blk_no,
182 int nbblks,
183 char *data)
184{
185 return xlog_do_io(log, blk_no, nbblks, data, REQ_OP_READ);
186}
187
188STATIC int
189xlog_bread(
190 struct xlog *log,
191 xfs_daddr_t blk_no,
192 int nbblks,
193 char *data,
194 char **offset)
195{
196 int error;
197
198 error = xlog_do_io(log, blk_no, nbblks, data, REQ_OP_READ);
199 if (!error)
200 *offset = data + xlog_align(log, blk_no);
201 return error;
202}
203
204STATIC int
205xlog_bwrite(
206 struct xlog *log,
207 xfs_daddr_t blk_no,
208 int nbblks,
209 char *data)
210{
211 return xlog_do_io(log, blk_no, nbblks, data, REQ_OP_WRITE);
212}
213
214#ifdef DEBUG
215/*
216 * dump debug superblock and log record information
217 */
218STATIC void
219xlog_header_check_dump(
220 xfs_mount_t *mp,
221 xlog_rec_header_t *head)
222{
223 xfs_debug(mp, "%s: SB : uuid = %pU, fmt = %d",
224 __func__, &mp->m_sb.sb_uuid, XLOG_FMT);
225 xfs_debug(mp, " log : uuid = %pU, fmt = %d",
226 &head->h_fs_uuid, be32_to_cpu(head->h_fmt));
227}
228#else
229#define xlog_header_check_dump(mp, head)
230#endif
231
232/*
233 * check log record header for recovery
234 */
235STATIC int
236xlog_header_check_recover(
237 xfs_mount_t *mp,
238 xlog_rec_header_t *head)
239{
240 ASSERT(head->h_magicno == cpu_to_be32(XLOG_HEADER_MAGIC_NUM));
241
242 /*
243 * IRIX doesn't write the h_fmt field and leaves it zeroed
244 * (XLOG_FMT_UNKNOWN). This stops us from trying to recover
245 * a dirty log created in IRIX.
246 */
247 if (unlikely(head->h_fmt != cpu_to_be32(XLOG_FMT))) {
248 xfs_warn(mp,
249 "dirty log written in incompatible format - can't recover");
250 xlog_header_check_dump(mp, head);
251 XFS_ERROR_REPORT("xlog_header_check_recover(1)",
252 XFS_ERRLEVEL_HIGH, mp);
253 return -EFSCORRUPTED;
254 } else if (unlikely(!uuid_equal(&mp->m_sb.sb_uuid, &head->h_fs_uuid))) {
255 xfs_warn(mp,
256 "dirty log entry has mismatched uuid - can't recover");
257 xlog_header_check_dump(mp, head);
258 XFS_ERROR_REPORT("xlog_header_check_recover(2)",
259 XFS_ERRLEVEL_HIGH, mp);
260 return -EFSCORRUPTED;
261 }
262 return 0;
263}
264
265/*
266 * read the head block of the log and check the header
267 */
268STATIC int
269xlog_header_check_mount(
270 xfs_mount_t *mp,
271 xlog_rec_header_t *head)
272{
273 ASSERT(head->h_magicno == cpu_to_be32(XLOG_HEADER_MAGIC_NUM));
274
275 if (uuid_is_null(&head->h_fs_uuid)) {
276 /*
277 * IRIX doesn't write the h_fs_uuid or h_fmt fields. If
278 * h_fs_uuid is null, we assume this log was last mounted
279 * by IRIX and continue.
280 */
281 xfs_warn(mp, "null uuid in log - IRIX style log");
282 } else if (unlikely(!uuid_equal(&mp->m_sb.sb_uuid, &head->h_fs_uuid))) {
283 xfs_warn(mp, "log has mismatched uuid - can't recover");
284 xlog_header_check_dump(mp, head);
285 XFS_ERROR_REPORT("xlog_header_check_mount",
286 XFS_ERRLEVEL_HIGH, mp);
287 return -EFSCORRUPTED;
288 }
289 return 0;
290}
291
292STATIC void
293xlog_recover_iodone(
294 struct xfs_buf *bp)
295{
296 if (bp->b_error) {
297 /*
298 * We're not going to bother about retrying
299 * this during recovery. One strike!
300 */
301 if (!XFS_FORCED_SHUTDOWN(bp->b_mount)) {
302 xfs_buf_ioerror_alert(bp, __func__);
303 xfs_force_shutdown(bp->b_mount, SHUTDOWN_META_IO_ERROR);
304 }
305 }
306
307 /*
308 * On v5 supers, a bli could be attached to update the metadata LSN.
309 * Clean it up.
310 */
311 if (bp->b_log_item)
312 xfs_buf_item_relse(bp);
313 ASSERT(bp->b_log_item == NULL);
314
315 bp->b_iodone = NULL;
316 xfs_buf_ioend(bp);
317}
318
319/*
320 * This routine finds (to an approximation) the first block in the physical
321 * log which contains the given cycle. It uses a binary search algorithm.
322 * Note that the algorithm can not be perfect because the disk will not
323 * necessarily be perfect.
324 */
325STATIC int
326xlog_find_cycle_start(
327 struct xlog *log,
328 char *buffer,
329 xfs_daddr_t first_blk,
330 xfs_daddr_t *last_blk,
331 uint cycle)
332{
333 char *offset;
334 xfs_daddr_t mid_blk;
335 xfs_daddr_t end_blk;
336 uint mid_cycle;
337 int error;
338
339 end_blk = *last_blk;
340 mid_blk = BLK_AVG(first_blk, end_blk);
341 while (mid_blk != first_blk && mid_blk != end_blk) {
342 error = xlog_bread(log, mid_blk, 1, buffer, &offset);
343 if (error)
344 return error;
345 mid_cycle = xlog_get_cycle(offset);
346 if (mid_cycle == cycle)
347 end_blk = mid_blk; /* last_half_cycle == mid_cycle */
348 else
349 first_blk = mid_blk; /* first_half_cycle == mid_cycle */
350 mid_blk = BLK_AVG(first_blk, end_blk);
351 }
352 ASSERT((mid_blk == first_blk && mid_blk+1 == end_blk) ||
353 (mid_blk == end_blk && mid_blk-1 == first_blk));
354
355 *last_blk = end_blk;
356
357 return 0;
358}
359
360/*
361 * Check that a range of blocks does not contain stop_on_cycle_no.
362 * Fill in *new_blk with the block offset where such a block is
363 * found, or with -1 (an invalid block number) if there is no such
364 * block in the range. The scan needs to occur from front to back
365 * and the pointer into the region must be updated since a later
366 * routine will need to perform another test.
367 */
368STATIC int
369xlog_find_verify_cycle(
370 struct xlog *log,
371 xfs_daddr_t start_blk,
372 int nbblks,
373 uint stop_on_cycle_no,
374 xfs_daddr_t *new_blk)
375{
376 xfs_daddr_t i, j;
377 uint cycle;
378 char *buffer;
379 xfs_daddr_t bufblks;
380 char *buf = NULL;
381 int error = 0;
382
383 /*
384 * Greedily allocate a buffer big enough to handle the full
385 * range of basic blocks we'll be examining. If that fails,
386 * try a smaller size. We need to be able to read at least
387 * a log sector, or we're out of luck.
388 */
389 bufblks = 1 << ffs(nbblks);
390 while (bufblks > log->l_logBBsize)
391 bufblks >>= 1;
392 while (!(buffer = xlog_alloc_buffer(log, bufblks))) {
393 bufblks >>= 1;
394 if (bufblks < log->l_sectBBsize)
395 return -ENOMEM;
396 }
397
398 for (i = start_blk; i < start_blk + nbblks; i += bufblks) {
399 int bcount;
400
401 bcount = min(bufblks, (start_blk + nbblks - i));
402
403 error = xlog_bread(log, i, bcount, buffer, &buf);
404 if (error)
405 goto out;
406
407 for (j = 0; j < bcount; j++) {
408 cycle = xlog_get_cycle(buf);
409 if (cycle == stop_on_cycle_no) {
410 *new_blk = i+j;
411 goto out;
412 }
413
414 buf += BBSIZE;
415 }
416 }
417
418 *new_blk = -1;
419
420out:
421 kmem_free(buffer);
422 return error;
423}
424
425/*
426 * Potentially backup over partial log record write.
427 *
428 * In the typical case, last_blk is the number of the block directly after
429 * a good log record. Therefore, we subtract one to get the block number
430 * of the last block in the given buffer. extra_bblks contains the number
431 * of blocks we would have read on a previous read. This happens when the
432 * last log record is split over the end of the physical log.
433 *
434 * extra_bblks is the number of blocks potentially verified on a previous
435 * call to this routine.
436 */
437STATIC int
438xlog_find_verify_log_record(
439 struct xlog *log,
440 xfs_daddr_t start_blk,
441 xfs_daddr_t *last_blk,
442 int extra_bblks)
443{
444 xfs_daddr_t i;
445 char *buffer;
446 char *offset = NULL;
447 xlog_rec_header_t *head = NULL;
448 int error = 0;
449 int smallmem = 0;
450 int num_blks = *last_blk - start_blk;
451 int xhdrs;
452
453 ASSERT(start_blk != 0 || *last_blk != start_blk);
454
455 buffer = xlog_alloc_buffer(log, num_blks);
456 if (!buffer) {
457 buffer = xlog_alloc_buffer(log, 1);
458 if (!buffer)
459 return -ENOMEM;
460 smallmem = 1;
461 } else {
462 error = xlog_bread(log, start_blk, num_blks, buffer, &offset);
463 if (error)
464 goto out;
465 offset += ((num_blks - 1) << BBSHIFT);
466 }
467
468 for (i = (*last_blk) - 1; i >= 0; i--) {
469 if (i < start_blk) {
470 /* valid log record not found */
471 xfs_warn(log->l_mp,
472 "Log inconsistent (didn't find previous header)");
473 ASSERT(0);
474 error = -EIO;
475 goto out;
476 }
477
478 if (smallmem) {
479 error = xlog_bread(log, i, 1, buffer, &offset);
480 if (error)
481 goto out;
482 }
483
484 head = (xlog_rec_header_t *)offset;
485
486 if (head->h_magicno == cpu_to_be32(XLOG_HEADER_MAGIC_NUM))
487 break;
488
489 if (!smallmem)
490 offset -= BBSIZE;
491 }
492
493 /*
494 * We hit the beginning of the physical log & still no header. Return
495 * to caller. If caller can handle a return of -1, then this routine
496 * will be called again for the end of the physical log.
497 */
498 if (i == -1) {
499 error = 1;
500 goto out;
501 }
502
503 /*
504 * We have the final block of the good log (the first block
505 * of the log record _before_ the head. So we check the uuid.
506 */
507 if ((error = xlog_header_check_mount(log->l_mp, head)))
508 goto out;
509
510 /*
511 * We may have found a log record header before we expected one.
512 * last_blk will be the 1st block # with a given cycle #. We may end
513 * up reading an entire log record. In this case, we don't want to
514 * reset last_blk. Only when last_blk points in the middle of a log
515 * record do we update last_blk.
516 */
517 if (xfs_sb_version_haslogv2(&log->l_mp->m_sb)) {
518 uint h_size = be32_to_cpu(head->h_size);
519
520 xhdrs = h_size / XLOG_HEADER_CYCLE_SIZE;
521 if (h_size % XLOG_HEADER_CYCLE_SIZE)
522 xhdrs++;
523 } else {
524 xhdrs = 1;
525 }
526
527 if (*last_blk - i + extra_bblks !=
528 BTOBB(be32_to_cpu(head->h_len)) + xhdrs)
529 *last_blk = i;
530
531out:
532 kmem_free(buffer);
533 return error;
534}
535
536/*
537 * Head is defined to be the point of the log where the next log write
538 * could go. This means that incomplete LR writes at the end are
539 * eliminated when calculating the head. We aren't guaranteed that previous
540 * LR have complete transactions. We only know that a cycle number of
541 * current cycle number -1 won't be present in the log if we start writing
542 * from our current block number.
543 *
544 * last_blk contains the block number of the first block with a given
545 * cycle number.
546 *
547 * Return: zero if normal, non-zero if error.
548 */
549STATIC int
550xlog_find_head(
551 struct xlog *log,
552 xfs_daddr_t *return_head_blk)
553{
554 char *buffer;
555 char *offset;
556 xfs_daddr_t new_blk, first_blk, start_blk, last_blk, head_blk;
557 int num_scan_bblks;
558 uint first_half_cycle, last_half_cycle;
559 uint stop_on_cycle;
560 int error, log_bbnum = log->l_logBBsize;
561
562 /* Is the end of the log device zeroed? */
563 error = xlog_find_zeroed(log, &first_blk);
564 if (error < 0) {
565 xfs_warn(log->l_mp, "empty log check failed");
566 return error;
567 }
568 if (error == 1) {
569 *return_head_blk = first_blk;
570
571 /* Is the whole lot zeroed? */
572 if (!first_blk) {
573 /* Linux XFS shouldn't generate totally zeroed logs -
574 * mkfs etc write a dummy unmount record to a fresh
575 * log so we can store the uuid in there
576 */
577 xfs_warn(log->l_mp, "totally zeroed log");
578 }
579
580 return 0;
581 }
582
583 first_blk = 0; /* get cycle # of 1st block */
584 buffer = xlog_alloc_buffer(log, 1);
585 if (!buffer)
586 return -ENOMEM;
587
588 error = xlog_bread(log, 0, 1, buffer, &offset);
589 if (error)
590 goto out_free_buffer;
591
592 first_half_cycle = xlog_get_cycle(offset);
593
594 last_blk = head_blk = log_bbnum - 1; /* get cycle # of last block */
595 error = xlog_bread(log, last_blk, 1, buffer, &offset);
596 if (error)
597 goto out_free_buffer;
598
599 last_half_cycle = xlog_get_cycle(offset);
600 ASSERT(last_half_cycle != 0);
601
602 /*
603 * If the 1st half cycle number is equal to the last half cycle number,
604 * then the entire log is stamped with the same cycle number. In this
605 * case, head_blk can't be set to zero (which makes sense). The below
606 * math doesn't work out properly with head_blk equal to zero. Instead,
607 * we set it to log_bbnum which is an invalid block number, but this
608 * value makes the math correct. If head_blk doesn't changed through
609 * all the tests below, *head_blk is set to zero at the very end rather
610 * than log_bbnum. In a sense, log_bbnum and zero are the same block
611 * in a circular file.
612 */
613 if (first_half_cycle == last_half_cycle) {
614 /*
615 * In this case we believe that the entire log should have
616 * cycle number last_half_cycle. We need to scan backwards
617 * from the end verifying that there are no holes still
618 * containing last_half_cycle - 1. If we find such a hole,
619 * then the start of that hole will be the new head. The
620 * simple case looks like
621 * x | x ... | x - 1 | x
622 * Another case that fits this picture would be
623 * x | x + 1 | x ... | x
624 * In this case the head really is somewhere at the end of the
625 * log, as one of the latest writes at the beginning was
626 * incomplete.
627 * One more case is
628 * x | x + 1 | x ... | x - 1 | x
629 * This is really the combination of the above two cases, and
630 * the head has to end up at the start of the x-1 hole at the
631 * end of the log.
632 *
633 * In the 256k log case, we will read from the beginning to the
634 * end of the log and search for cycle numbers equal to x-1.
635 * We don't worry about the x+1 blocks that we encounter,
636 * because we know that they cannot be the head since the log
637 * started with x.
638 */
639 head_blk = log_bbnum;
640 stop_on_cycle = last_half_cycle - 1;
641 } else {
642 /*
643 * In this case we want to find the first block with cycle
644 * number matching last_half_cycle. We expect the log to be
645 * some variation on
646 * x + 1 ... | x ... | x
647 * The first block with cycle number x (last_half_cycle) will
648 * be where the new head belongs. First we do a binary search
649 * for the first occurrence of last_half_cycle. The binary
650 * search may not be totally accurate, so then we scan back
651 * from there looking for occurrences of last_half_cycle before
652 * us. If that backwards scan wraps around the beginning of
653 * the log, then we look for occurrences of last_half_cycle - 1
654 * at the end of the log. The cases we're looking for look
655 * like
656 * v binary search stopped here
657 * x + 1 ... | x | x + 1 | x ... | x
658 * ^ but we want to locate this spot
659 * or
660 * <---------> less than scan distance
661 * x + 1 ... | x ... | x - 1 | x
662 * ^ we want to locate this spot
663 */
664 stop_on_cycle = last_half_cycle;
665 error = xlog_find_cycle_start(log, buffer, first_blk, &head_blk,
666 last_half_cycle);
667 if (error)
668 goto out_free_buffer;
669 }
670
671 /*
672 * Now validate the answer. Scan back some number of maximum possible
673 * blocks and make sure each one has the expected cycle number. The
674 * maximum is determined by the total possible amount of buffering
675 * in the in-core log. The following number can be made tighter if
676 * we actually look at the block size of the filesystem.
677 */
678 num_scan_bblks = min_t(int, log_bbnum, XLOG_TOTAL_REC_SHIFT(log));
679 if (head_blk >= num_scan_bblks) {
680 /*
681 * We are guaranteed that the entire check can be performed
682 * in one buffer.
683 */
684 start_blk = head_blk - num_scan_bblks;
685 if ((error = xlog_find_verify_cycle(log,
686 start_blk, num_scan_bblks,
687 stop_on_cycle, &new_blk)))
688 goto out_free_buffer;
689 if (new_blk != -1)
690 head_blk = new_blk;
691 } else { /* need to read 2 parts of log */
692 /*
693 * We are going to scan backwards in the log in two parts.
694 * First we scan the physical end of the log. In this part
695 * of the log, we are looking for blocks with cycle number
696 * last_half_cycle - 1.
697 * If we find one, then we know that the log starts there, as
698 * we've found a hole that didn't get written in going around
699 * the end of the physical log. The simple case for this is
700 * x + 1 ... | x ... | x - 1 | x
701 * <---------> less than scan distance
702 * If all of the blocks at the end of the log have cycle number
703 * last_half_cycle, then we check the blocks at the start of
704 * the log looking for occurrences of last_half_cycle. If we
705 * find one, then our current estimate for the location of the
706 * first occurrence of last_half_cycle is wrong and we move
707 * back to the hole we've found. This case looks like
708 * x + 1 ... | x | x + 1 | x ...
709 * ^ binary search stopped here
710 * Another case we need to handle that only occurs in 256k
711 * logs is
712 * x + 1 ... | x ... | x+1 | x ...
713 * ^ binary search stops here
714 * In a 256k log, the scan at the end of the log will see the
715 * x + 1 blocks. We need to skip past those since that is
716 * certainly not the head of the log. By searching for
717 * last_half_cycle-1 we accomplish that.
718 */
719 ASSERT(head_blk <= INT_MAX &&
720 (xfs_daddr_t) num_scan_bblks >= head_blk);
721 start_blk = log_bbnum - (num_scan_bblks - head_blk);
722 if ((error = xlog_find_verify_cycle(log, start_blk,
723 num_scan_bblks - (int)head_blk,
724 (stop_on_cycle - 1), &new_blk)))
725 goto out_free_buffer;
726 if (new_blk != -1) {
727 head_blk = new_blk;
728 goto validate_head;
729 }
730
731 /*
732 * Scan beginning of log now. The last part of the physical
733 * log is good. This scan needs to verify that it doesn't find
734 * the last_half_cycle.
735 */
736 start_blk = 0;
737 ASSERT(head_blk <= INT_MAX);
738 if ((error = xlog_find_verify_cycle(log,
739 start_blk, (int)head_blk,
740 stop_on_cycle, &new_blk)))
741 goto out_free_buffer;
742 if (new_blk != -1)
743 head_blk = new_blk;
744 }
745
746validate_head:
747 /*
748 * Now we need to make sure head_blk is not pointing to a block in
749 * the middle of a log record.
750 */
751 num_scan_bblks = XLOG_REC_SHIFT(log);
752 if (head_blk >= num_scan_bblks) {
753 start_blk = head_blk - num_scan_bblks; /* don't read head_blk */
754
755 /* start ptr at last block ptr before head_blk */
756 error = xlog_find_verify_log_record(log, start_blk, &head_blk, 0);
757 if (error == 1)
758 error = -EIO;
759 if (error)
760 goto out_free_buffer;
761 } else {
762 start_blk = 0;
763 ASSERT(head_blk <= INT_MAX);
764 error = xlog_find_verify_log_record(log, start_blk, &head_blk, 0);
765 if (error < 0)
766 goto out_free_buffer;
767 if (error == 1) {
768 /* We hit the beginning of the log during our search */
769 start_blk = log_bbnum - (num_scan_bblks - head_blk);
770 new_blk = log_bbnum;
771 ASSERT(start_blk <= INT_MAX &&
772 (xfs_daddr_t) log_bbnum-start_blk >= 0);
773 ASSERT(head_blk <= INT_MAX);
774 error = xlog_find_verify_log_record(log, start_blk,
775 &new_blk, (int)head_blk);
776 if (error == 1)
777 error = -EIO;
778 if (error)
779 goto out_free_buffer;
780 if (new_blk != log_bbnum)
781 head_blk = new_blk;
782 } else if (error)
783 goto out_free_buffer;
784 }
785
786 kmem_free(buffer);
787 if (head_blk == log_bbnum)
788 *return_head_blk = 0;
789 else
790 *return_head_blk = head_blk;
791 /*
792 * When returning here, we have a good block number. Bad block
793 * means that during a previous crash, we didn't have a clean break
794 * from cycle number N to cycle number N-1. In this case, we need
795 * to find the first block with cycle number N-1.
796 */
797 return 0;
798
799out_free_buffer:
800 kmem_free(buffer);
801 if (error)
802 xfs_warn(log->l_mp, "failed to find log head");
803 return error;
804}
805
806/*
807 * Seek backwards in the log for log record headers.
808 *
809 * Given a starting log block, walk backwards until we find the provided number
810 * of records or hit the provided tail block. The return value is the number of
811 * records encountered or a negative error code. The log block and buffer
812 * pointer of the last record seen are returned in rblk and rhead respectively.
813 */
814STATIC int
815xlog_rseek_logrec_hdr(
816 struct xlog *log,
817 xfs_daddr_t head_blk,
818 xfs_daddr_t tail_blk,
819 int count,
820 char *buffer,
821 xfs_daddr_t *rblk,
822 struct xlog_rec_header **rhead,
823 bool *wrapped)
824{
825 int i;
826 int error;
827 int found = 0;
828 char *offset = NULL;
829 xfs_daddr_t end_blk;
830
831 *wrapped = false;
832
833 /*
834 * Walk backwards from the head block until we hit the tail or the first
835 * block in the log.
836 */
837 end_blk = head_blk > tail_blk ? tail_blk : 0;
838 for (i = (int) head_blk - 1; i >= end_blk; i--) {
839 error = xlog_bread(log, i, 1, buffer, &offset);
840 if (error)
841 goto out_error;
842
843 if (*(__be32 *) offset == cpu_to_be32(XLOG_HEADER_MAGIC_NUM)) {
844 *rblk = i;
845 *rhead = (struct xlog_rec_header *) offset;
846 if (++found == count)
847 break;
848 }
849 }
850
851 /*
852 * If we haven't hit the tail block or the log record header count,
853 * start looking again from the end of the physical log. Note that
854 * callers can pass head == tail if the tail is not yet known.
855 */
856 if (tail_blk >= head_blk && found != count) {
857 for (i = log->l_logBBsize - 1; i >= (int) tail_blk; i--) {
858 error = xlog_bread(log, i, 1, buffer, &offset);
859 if (error)
860 goto out_error;
861
862 if (*(__be32 *)offset ==
863 cpu_to_be32(XLOG_HEADER_MAGIC_NUM)) {
864 *wrapped = true;
865 *rblk = i;
866 *rhead = (struct xlog_rec_header *) offset;
867 if (++found == count)
868 break;
869 }
870 }
871 }
872
873 return found;
874
875out_error:
876 return error;
877}
878
879/*
880 * Seek forward in the log for log record headers.
881 *
882 * Given head and tail blocks, walk forward from the tail block until we find
883 * the provided number of records or hit the head block. The return value is the
884 * number of records encountered or a negative error code. The log block and
885 * buffer pointer of the last record seen are returned in rblk and rhead
886 * respectively.
887 */
888STATIC int
889xlog_seek_logrec_hdr(
890 struct xlog *log,
891 xfs_daddr_t head_blk,
892 xfs_daddr_t tail_blk,
893 int count,
894 char *buffer,
895 xfs_daddr_t *rblk,
896 struct xlog_rec_header **rhead,
897 bool *wrapped)
898{
899 int i;
900 int error;
901 int found = 0;
902 char *offset = NULL;
903 xfs_daddr_t end_blk;
904
905 *wrapped = false;
906
907 /*
908 * Walk forward from the tail block until we hit the head or the last
909 * block in the log.
910 */
911 end_blk = head_blk > tail_blk ? head_blk : log->l_logBBsize - 1;
912 for (i = (int) tail_blk; i <= end_blk; i++) {
913 error = xlog_bread(log, i, 1, buffer, &offset);
914 if (error)
915 goto out_error;
916
917 if (*(__be32 *) offset == cpu_to_be32(XLOG_HEADER_MAGIC_NUM)) {
918 *rblk = i;
919 *rhead = (struct xlog_rec_header *) offset;
920 if (++found == count)
921 break;
922 }
923 }
924
925 /*
926 * If we haven't hit the head block or the log record header count,
927 * start looking again from the start of the physical log.
928 */
929 if (tail_blk > head_blk && found != count) {
930 for (i = 0; i < (int) head_blk; i++) {
931 error = xlog_bread(log, i, 1, buffer, &offset);
932 if (error)
933 goto out_error;
934
935 if (*(__be32 *)offset ==
936 cpu_to_be32(XLOG_HEADER_MAGIC_NUM)) {
937 *wrapped = true;
938 *rblk = i;
939 *rhead = (struct xlog_rec_header *) offset;
940 if (++found == count)
941 break;
942 }
943 }
944 }
945
946 return found;
947
948out_error:
949 return error;
950}
951
952/*
953 * Calculate distance from head to tail (i.e., unused space in the log).
954 */
955static inline int
956xlog_tail_distance(
957 struct xlog *log,
958 xfs_daddr_t head_blk,
959 xfs_daddr_t tail_blk)
960{
961 if (head_blk < tail_blk)
962 return tail_blk - head_blk;
963
964 return tail_blk + (log->l_logBBsize - head_blk);
965}
966
967/*
968 * Verify the log tail. This is particularly important when torn or incomplete
969 * writes have been detected near the front of the log and the head has been
970 * walked back accordingly.
971 *
972 * We also have to handle the case where the tail was pinned and the head
973 * blocked behind the tail right before a crash. If the tail had been pushed
974 * immediately prior to the crash and the subsequent checkpoint was only
975 * partially written, it's possible it overwrote the last referenced tail in the
976 * log with garbage. This is not a coherency problem because the tail must have
977 * been pushed before it can be overwritten, but appears as log corruption to
978 * recovery because we have no way to know the tail was updated if the
979 * subsequent checkpoint didn't write successfully.
980 *
981 * Therefore, CRC check the log from tail to head. If a failure occurs and the
982 * offending record is within max iclog bufs from the head, walk the tail
983 * forward and retry until a valid tail is found or corruption is detected out
984 * of the range of a possible overwrite.
985 */
986STATIC int
987xlog_verify_tail(
988 struct xlog *log,
989 xfs_daddr_t head_blk,
990 xfs_daddr_t *tail_blk,
991 int hsize)
992{
993 struct xlog_rec_header *thead;
994 char *buffer;
995 xfs_daddr_t first_bad;
996 int error = 0;
997 bool wrapped;
998 xfs_daddr_t tmp_tail;
999 xfs_daddr_t orig_tail = *tail_blk;
1000
1001 buffer = xlog_alloc_buffer(log, 1);
1002 if (!buffer)
1003 return -ENOMEM;
1004
1005 /*
1006 * Make sure the tail points to a record (returns positive count on
1007 * success).
1008 */
1009 error = xlog_seek_logrec_hdr(log, head_blk, *tail_blk, 1, buffer,
1010 &tmp_tail, &thead, &wrapped);
1011 if (error < 0)
1012 goto out;
1013 if (*tail_blk != tmp_tail)
1014 *tail_blk = tmp_tail;
1015
1016 /*
1017 * Run a CRC check from the tail to the head. We can't just check
1018 * MAX_ICLOGS records past the tail because the tail may point to stale
1019 * blocks cleared during the search for the head/tail. These blocks are
1020 * overwritten with zero-length records and thus record count is not a
1021 * reliable indicator of the iclog state before a crash.
1022 */
1023 first_bad = 0;
1024 error = xlog_do_recovery_pass(log, head_blk, *tail_blk,
1025 XLOG_RECOVER_CRCPASS, &first_bad);
1026 while ((error == -EFSBADCRC || error == -EFSCORRUPTED) && first_bad) {
1027 int tail_distance;
1028
1029 /*
1030 * Is corruption within range of the head? If so, retry from
1031 * the next record. Otherwise return an error.
1032 */
1033 tail_distance = xlog_tail_distance(log, head_blk, first_bad);
1034 if (tail_distance > BTOBB(XLOG_MAX_ICLOGS * hsize))
1035 break;
1036
1037 /* skip to the next record; returns positive count on success */
1038 error = xlog_seek_logrec_hdr(log, head_blk, first_bad, 2,
1039 buffer, &tmp_tail, &thead, &wrapped);
1040 if (error < 0)
1041 goto out;
1042
1043 *tail_blk = tmp_tail;
1044 first_bad = 0;
1045 error = xlog_do_recovery_pass(log, head_blk, *tail_blk,
1046 XLOG_RECOVER_CRCPASS, &first_bad);
1047 }
1048
1049 if (!error && *tail_blk != orig_tail)
1050 xfs_warn(log->l_mp,
1051 "Tail block (0x%llx) overwrite detected. Updated to 0x%llx",
1052 orig_tail, *tail_blk);
1053out:
1054 kmem_free(buffer);
1055 return error;
1056}
1057
1058/*
1059 * Detect and trim torn writes from the head of the log.
1060 *
1061 * Storage without sector atomicity guarantees can result in torn writes in the
1062 * log in the event of a crash. Our only means to detect this scenario is via
1063 * CRC verification. While we can't always be certain that CRC verification
1064 * failure is due to a torn write vs. an unrelated corruption, we do know that
1065 * only a certain number (XLOG_MAX_ICLOGS) of log records can be written out at
1066 * one time. Therefore, CRC verify up to XLOG_MAX_ICLOGS records at the head of
1067 * the log and treat failures in this range as torn writes as a matter of
1068 * policy. In the event of CRC failure, the head is walked back to the last good
1069 * record in the log and the tail is updated from that record and verified.
1070 */
1071STATIC int
1072xlog_verify_head(
1073 struct xlog *log,
1074 xfs_daddr_t *head_blk, /* in/out: unverified head */
1075 xfs_daddr_t *tail_blk, /* out: tail block */
1076 char *buffer,
1077 xfs_daddr_t *rhead_blk, /* start blk of last record */
1078 struct xlog_rec_header **rhead, /* ptr to last record */
1079 bool *wrapped) /* last rec. wraps phys. log */
1080{
1081 struct xlog_rec_header *tmp_rhead;
1082 char *tmp_buffer;
1083 xfs_daddr_t first_bad;
1084 xfs_daddr_t tmp_rhead_blk;
1085 int found;
1086 int error;
1087 bool tmp_wrapped;
1088
1089 /*
1090 * Check the head of the log for torn writes. Search backwards from the
1091 * head until we hit the tail or the maximum number of log record I/Os
1092 * that could have been in flight at one time. Use a temporary buffer so
1093 * we don't trash the rhead/buffer pointers from the caller.
1094 */
1095 tmp_buffer = xlog_alloc_buffer(log, 1);
1096 if (!tmp_buffer)
1097 return -ENOMEM;
1098 error = xlog_rseek_logrec_hdr(log, *head_blk, *tail_blk,
1099 XLOG_MAX_ICLOGS, tmp_buffer,
1100 &tmp_rhead_blk, &tmp_rhead, &tmp_wrapped);
1101 kmem_free(tmp_buffer);
1102 if (error < 0)
1103 return error;
1104
1105 /*
1106 * Now run a CRC verification pass over the records starting at the
1107 * block found above to the current head. If a CRC failure occurs, the
1108 * log block of the first bad record is saved in first_bad.
1109 */
1110 error = xlog_do_recovery_pass(log, *head_blk, tmp_rhead_blk,
1111 XLOG_RECOVER_CRCPASS, &first_bad);
1112 if ((error == -EFSBADCRC || error == -EFSCORRUPTED) && first_bad) {
1113 /*
1114 * We've hit a potential torn write. Reset the error and warn
1115 * about it.
1116 */
1117 error = 0;
1118 xfs_warn(log->l_mp,
1119"Torn write (CRC failure) detected at log block 0x%llx. Truncating head block from 0x%llx.",
1120 first_bad, *head_blk);
1121
1122 /*
1123 * Get the header block and buffer pointer for the last good
1124 * record before the bad record.
1125 *
1126 * Note that xlog_find_tail() clears the blocks at the new head
1127 * (i.e., the records with invalid CRC) if the cycle number
1128 * matches the the current cycle.
1129 */
1130 found = xlog_rseek_logrec_hdr(log, first_bad, *tail_blk, 1,
1131 buffer, rhead_blk, rhead, wrapped);
1132 if (found < 0)
1133 return found;
1134 if (found == 0) /* XXX: right thing to do here? */
1135 return -EIO;
1136
1137 /*
1138 * Reset the head block to the starting block of the first bad
1139 * log record and set the tail block based on the last good
1140 * record.
1141 *
1142 * Bail out if the updated head/tail match as this indicates
1143 * possible corruption outside of the acceptable
1144 * (XLOG_MAX_ICLOGS) range. This is a job for xfs_repair...
1145 */
1146 *head_blk = first_bad;
1147 *tail_blk = BLOCK_LSN(be64_to_cpu((*rhead)->h_tail_lsn));
1148 if (*head_blk == *tail_blk) {
1149 ASSERT(0);
1150 return 0;
1151 }
1152 }
1153 if (error)
1154 return error;
1155
1156 return xlog_verify_tail(log, *head_blk, tail_blk,
1157 be32_to_cpu((*rhead)->h_size));
1158}
1159
1160/*
1161 * We need to make sure we handle log wrapping properly, so we can't use the
1162 * calculated logbno directly. Make sure it wraps to the correct bno inside the
1163 * log.
1164 *
1165 * The log is limited to 32 bit sizes, so we use the appropriate modulus
1166 * operation here and cast it back to a 64 bit daddr on return.
1167 */
1168static inline xfs_daddr_t
1169xlog_wrap_logbno(
1170 struct xlog *log,
1171 xfs_daddr_t bno)
1172{
1173 int mod;
1174
1175 div_s64_rem(bno, log->l_logBBsize, &mod);
1176 return mod;
1177}
1178
1179/*
1180 * Check whether the head of the log points to an unmount record. In other
1181 * words, determine whether the log is clean. If so, update the in-core state
1182 * appropriately.
1183 */
1184static int
1185xlog_check_unmount_rec(
1186 struct xlog *log,
1187 xfs_daddr_t *head_blk,
1188 xfs_daddr_t *tail_blk,
1189 struct xlog_rec_header *rhead,
1190 xfs_daddr_t rhead_blk,
1191 char *buffer,
1192 bool *clean)
1193{
1194 struct xlog_op_header *op_head;
1195 xfs_daddr_t umount_data_blk;
1196 xfs_daddr_t after_umount_blk;
1197 int hblks;
1198 int error;
1199 char *offset;
1200
1201 *clean = false;
1202
1203 /*
1204 * Look for unmount record. If we find it, then we know there was a
1205 * clean unmount. Since 'i' could be the last block in the physical
1206 * log, we convert to a log block before comparing to the head_blk.
1207 *
1208 * Save the current tail lsn to use to pass to xlog_clear_stale_blocks()
1209 * below. We won't want to clear the unmount record if there is one, so
1210 * we pass the lsn of the unmount record rather than the block after it.
1211 */
1212 if (xfs_sb_version_haslogv2(&log->l_mp->m_sb)) {
1213 int h_size = be32_to_cpu(rhead->h_size);
1214 int h_version = be32_to_cpu(rhead->h_version);
1215
1216 if ((h_version & XLOG_VERSION_2) &&
1217 (h_size > XLOG_HEADER_CYCLE_SIZE)) {
1218 hblks = h_size / XLOG_HEADER_CYCLE_SIZE;
1219 if (h_size % XLOG_HEADER_CYCLE_SIZE)
1220 hblks++;
1221 } else {
1222 hblks = 1;
1223 }
1224 } else {
1225 hblks = 1;
1226 }
1227
1228 after_umount_blk = xlog_wrap_logbno(log,
1229 rhead_blk + hblks + BTOBB(be32_to_cpu(rhead->h_len)));
1230
1231 if (*head_blk == after_umount_blk &&
1232 be32_to_cpu(rhead->h_num_logops) == 1) {
1233 umount_data_blk = xlog_wrap_logbno(log, rhead_blk + hblks);
1234 error = xlog_bread(log, umount_data_blk, 1, buffer, &offset);
1235 if (error)
1236 return error;
1237
1238 op_head = (struct xlog_op_header *)offset;
1239 if (op_head->oh_flags & XLOG_UNMOUNT_TRANS) {
1240 /*
1241 * Set tail and last sync so that newly written log
1242 * records will point recovery to after the current
1243 * unmount record.
1244 */
1245 xlog_assign_atomic_lsn(&log->l_tail_lsn,
1246 log->l_curr_cycle, after_umount_blk);
1247 xlog_assign_atomic_lsn(&log->l_last_sync_lsn,
1248 log->l_curr_cycle, after_umount_blk);
1249 *tail_blk = after_umount_blk;
1250
1251 *clean = true;
1252 }
1253 }
1254
1255 return 0;
1256}
1257
1258static void
1259xlog_set_state(
1260 struct xlog *log,
1261 xfs_daddr_t head_blk,
1262 struct xlog_rec_header *rhead,
1263 xfs_daddr_t rhead_blk,
1264 bool bump_cycle)
1265{
1266 /*
1267 * Reset log values according to the state of the log when we
1268 * crashed. In the case where head_blk == 0, we bump curr_cycle
1269 * one because the next write starts a new cycle rather than
1270 * continuing the cycle of the last good log record. At this
1271 * point we have guaranteed that all partial log records have been
1272 * accounted for. Therefore, we know that the last good log record
1273 * written was complete and ended exactly on the end boundary
1274 * of the physical log.
1275 */
1276 log->l_prev_block = rhead_blk;
1277 log->l_curr_block = (int)head_blk;
1278 log->l_curr_cycle = be32_to_cpu(rhead->h_cycle);
1279 if (bump_cycle)
1280 log->l_curr_cycle++;
1281 atomic64_set(&log->l_tail_lsn, be64_to_cpu(rhead->h_tail_lsn));
1282 atomic64_set(&log->l_last_sync_lsn, be64_to_cpu(rhead->h_lsn));
1283 xlog_assign_grant_head(&log->l_reserve_head.grant, log->l_curr_cycle,
1284 BBTOB(log->l_curr_block));
1285 xlog_assign_grant_head(&log->l_write_head.grant, log->l_curr_cycle,
1286 BBTOB(log->l_curr_block));
1287}
1288
1289/*
1290 * Find the sync block number or the tail of the log.
1291 *
1292 * This will be the block number of the last record to have its
1293 * associated buffers synced to disk. Every log record header has
1294 * a sync lsn embedded in it. LSNs hold block numbers, so it is easy
1295 * to get a sync block number. The only concern is to figure out which
1296 * log record header to believe.
1297 *
1298 * The following algorithm uses the log record header with the largest
1299 * lsn. The entire log record does not need to be valid. We only care
1300 * that the header is valid.
1301 *
1302 * We could speed up search by using current head_blk buffer, but it is not
1303 * available.
1304 */
1305STATIC int
1306xlog_find_tail(
1307 struct xlog *log,
1308 xfs_daddr_t *head_blk,
1309 xfs_daddr_t *tail_blk)
1310{
1311 xlog_rec_header_t *rhead;
1312 char *offset = NULL;
1313 char *buffer;
1314 int error;
1315 xfs_daddr_t rhead_blk;
1316 xfs_lsn_t tail_lsn;
1317 bool wrapped = false;
1318 bool clean = false;
1319
1320 /*
1321 * Find previous log record
1322 */
1323 if ((error = xlog_find_head(log, head_blk)))
1324 return error;
1325 ASSERT(*head_blk < INT_MAX);
1326
1327 buffer = xlog_alloc_buffer(log, 1);
1328 if (!buffer)
1329 return -ENOMEM;
1330 if (*head_blk == 0) { /* special case */
1331 error = xlog_bread(log, 0, 1, buffer, &offset);
1332 if (error)
1333 goto done;
1334
1335 if (xlog_get_cycle(offset) == 0) {
1336 *tail_blk = 0;
1337 /* leave all other log inited values alone */
1338 goto done;
1339 }
1340 }
1341
1342 /*
1343 * Search backwards through the log looking for the log record header
1344 * block. This wraps all the way back around to the head so something is
1345 * seriously wrong if we can't find it.
1346 */
1347 error = xlog_rseek_logrec_hdr(log, *head_blk, *head_blk, 1, buffer,
1348 &rhead_blk, &rhead, &wrapped);
1349 if (error < 0)
1350 return error;
1351 if (!error) {
1352 xfs_warn(log->l_mp, "%s: couldn't find sync record", __func__);
1353 return -EIO;
1354 }
1355 *tail_blk = BLOCK_LSN(be64_to_cpu(rhead->h_tail_lsn));
1356
1357 /*
1358 * Set the log state based on the current head record.
1359 */
1360 xlog_set_state(log, *head_blk, rhead, rhead_blk, wrapped);
1361 tail_lsn = atomic64_read(&log->l_tail_lsn);
1362
1363 /*
1364 * Look for an unmount record at the head of the log. This sets the log
1365 * state to determine whether recovery is necessary.
1366 */
1367 error = xlog_check_unmount_rec(log, head_blk, tail_blk, rhead,
1368 rhead_blk, buffer, &clean);
1369 if (error)
1370 goto done;
1371
1372 /*
1373 * Verify the log head if the log is not clean (e.g., we have anything
1374 * but an unmount record at the head). This uses CRC verification to
1375 * detect and trim torn writes. If discovered, CRC failures are
1376 * considered torn writes and the log head is trimmed accordingly.
1377 *
1378 * Note that we can only run CRC verification when the log is dirty
1379 * because there's no guarantee that the log data behind an unmount
1380 * record is compatible with the current architecture.
1381 */
1382 if (!clean) {
1383 xfs_daddr_t orig_head = *head_blk;
1384
1385 error = xlog_verify_head(log, head_blk, tail_blk, buffer,
1386 &rhead_blk, &rhead, &wrapped);
1387 if (error)
1388 goto done;
1389
1390 /* update in-core state again if the head changed */
1391 if (*head_blk != orig_head) {
1392 xlog_set_state(log, *head_blk, rhead, rhead_blk,
1393 wrapped);
1394 tail_lsn = atomic64_read(&log->l_tail_lsn);
1395 error = xlog_check_unmount_rec(log, head_blk, tail_blk,
1396 rhead, rhead_blk, buffer,
1397 &clean);
1398 if (error)
1399 goto done;
1400 }
1401 }
1402
1403 /*
1404 * Note that the unmount was clean. If the unmount was not clean, we
1405 * need to know this to rebuild the superblock counters from the perag
1406 * headers if we have a filesystem using non-persistent counters.
1407 */
1408 if (clean)
1409 log->l_mp->m_flags |= XFS_MOUNT_WAS_CLEAN;
1410
1411 /*
1412 * Make sure that there are no blocks in front of the head
1413 * with the same cycle number as the head. This can happen
1414 * because we allow multiple outstanding log writes concurrently,
1415 * and the later writes might make it out before earlier ones.
1416 *
1417 * We use the lsn from before modifying it so that we'll never
1418 * overwrite the unmount record after a clean unmount.
1419 *
1420 * Do this only if we are going to recover the filesystem
1421 *
1422 * NOTE: This used to say "if (!readonly)"
1423 * However on Linux, we can & do recover a read-only filesystem.
1424 * We only skip recovery if NORECOVERY is specified on mount,
1425 * in which case we would not be here.
1426 *
1427 * But... if the -device- itself is readonly, just skip this.
1428 * We can't recover this device anyway, so it won't matter.
1429 */
1430 if (!xfs_readonly_buftarg(log->l_targ))
1431 error = xlog_clear_stale_blocks(log, tail_lsn);
1432
1433done:
1434 kmem_free(buffer);
1435
1436 if (error)
1437 xfs_warn(log->l_mp, "failed to locate log tail");
1438 return error;
1439}
1440
1441/*
1442 * Is the log zeroed at all?
1443 *
1444 * The last binary search should be changed to perform an X block read
1445 * once X becomes small enough. You can then search linearly through
1446 * the X blocks. This will cut down on the number of reads we need to do.
1447 *
1448 * If the log is partially zeroed, this routine will pass back the blkno
1449 * of the first block with cycle number 0. It won't have a complete LR
1450 * preceding it.
1451 *
1452 * Return:
1453 * 0 => the log is completely written to
1454 * 1 => use *blk_no as the first block of the log
1455 * <0 => error has occurred
1456 */
1457STATIC int
1458xlog_find_zeroed(
1459 struct xlog *log,
1460 xfs_daddr_t *blk_no)
1461{
1462 char *buffer;
1463 char *offset;
1464 uint first_cycle, last_cycle;
1465 xfs_daddr_t new_blk, last_blk, start_blk;
1466 xfs_daddr_t num_scan_bblks;
1467 int error, log_bbnum = log->l_logBBsize;
1468
1469 *blk_no = 0;
1470
1471 /* check totally zeroed log */
1472 buffer = xlog_alloc_buffer(log, 1);
1473 if (!buffer)
1474 return -ENOMEM;
1475 error = xlog_bread(log, 0, 1, buffer, &offset);
1476 if (error)
1477 goto out_free_buffer;
1478
1479 first_cycle = xlog_get_cycle(offset);
1480 if (first_cycle == 0) { /* completely zeroed log */
1481 *blk_no = 0;
1482 kmem_free(buffer);
1483 return 1;
1484 }
1485
1486 /* check partially zeroed log */
1487 error = xlog_bread(log, log_bbnum-1, 1, buffer, &offset);
1488 if (error)
1489 goto out_free_buffer;
1490
1491 last_cycle = xlog_get_cycle(offset);
1492 if (last_cycle != 0) { /* log completely written to */
1493 kmem_free(buffer);
1494 return 0;
1495 }
1496
1497 /* we have a partially zeroed log */
1498 last_blk = log_bbnum-1;
1499 error = xlog_find_cycle_start(log, buffer, 0, &last_blk, 0);
1500 if (error)
1501 goto out_free_buffer;
1502
1503 /*
1504 * Validate the answer. Because there is no way to guarantee that
1505 * the entire log is made up of log records which are the same size,
1506 * we scan over the defined maximum blocks. At this point, the maximum
1507 * is not chosen to mean anything special. XXXmiken
1508 */
1509 num_scan_bblks = XLOG_TOTAL_REC_SHIFT(log);
1510 ASSERT(num_scan_bblks <= INT_MAX);
1511
1512 if (last_blk < num_scan_bblks)
1513 num_scan_bblks = last_blk;
1514 start_blk = last_blk - num_scan_bblks;
1515
1516 /*
1517 * We search for any instances of cycle number 0 that occur before
1518 * our current estimate of the head. What we're trying to detect is
1519 * 1 ... | 0 | 1 | 0...
1520 * ^ binary search ends here
1521 */
1522 if ((error = xlog_find_verify_cycle(log, start_blk,
1523 (int)num_scan_bblks, 0, &new_blk)))
1524 goto out_free_buffer;
1525 if (new_blk != -1)
1526 last_blk = new_blk;
1527
1528 /*
1529 * Potentially backup over partial log record write. We don't need
1530 * to search the end of the log because we know it is zero.
1531 */
1532 error = xlog_find_verify_log_record(log, start_blk, &last_blk, 0);
1533 if (error == 1)
1534 error = -EIO;
1535 if (error)
1536 goto out_free_buffer;
1537
1538 *blk_no = last_blk;
1539out_free_buffer:
1540 kmem_free(buffer);
1541 if (error)
1542 return error;
1543 return 1;
1544}
1545
1546/*
1547 * These are simple subroutines used by xlog_clear_stale_blocks() below
1548 * to initialize a buffer full of empty log record headers and write
1549 * them into the log.
1550 */
1551STATIC void
1552xlog_add_record(
1553 struct xlog *log,
1554 char *buf,
1555 int cycle,
1556 int block,
1557 int tail_cycle,
1558 int tail_block)
1559{
1560 xlog_rec_header_t *recp = (xlog_rec_header_t *)buf;
1561
1562 memset(buf, 0, BBSIZE);
1563 recp->h_magicno = cpu_to_be32(XLOG_HEADER_MAGIC_NUM);
1564 recp->h_cycle = cpu_to_be32(cycle);
1565 recp->h_version = cpu_to_be32(
1566 xfs_sb_version_haslogv2(&log->l_mp->m_sb) ? 2 : 1);
1567 recp->h_lsn = cpu_to_be64(xlog_assign_lsn(cycle, block));
1568 recp->h_tail_lsn = cpu_to_be64(xlog_assign_lsn(tail_cycle, tail_block));
1569 recp->h_fmt = cpu_to_be32(XLOG_FMT);
1570 memcpy(&recp->h_fs_uuid, &log->l_mp->m_sb.sb_uuid, sizeof(uuid_t));
1571}
1572
1573STATIC int
1574xlog_write_log_records(
1575 struct xlog *log,
1576 int cycle,
1577 int start_block,
1578 int blocks,
1579 int tail_cycle,
1580 int tail_block)
1581{
1582 char *offset;
1583 char *buffer;
1584 int balign, ealign;
1585 int sectbb = log->l_sectBBsize;
1586 int end_block = start_block + blocks;
1587 int bufblks;
1588 int error = 0;
1589 int i, j = 0;
1590
1591 /*
1592 * Greedily allocate a buffer big enough to handle the full
1593 * range of basic blocks to be written. If that fails, try
1594 * a smaller size. We need to be able to write at least a
1595 * log sector, or we're out of luck.
1596 */
1597 bufblks = 1 << ffs(blocks);
1598 while (bufblks > log->l_logBBsize)
1599 bufblks >>= 1;
1600 while (!(buffer = xlog_alloc_buffer(log, bufblks))) {
1601 bufblks >>= 1;
1602 if (bufblks < sectbb)
1603 return -ENOMEM;
1604 }
1605
1606 /* We may need to do a read at the start to fill in part of
1607 * the buffer in the starting sector not covered by the first
1608 * write below.
1609 */
1610 balign = round_down(start_block, sectbb);
1611 if (balign != start_block) {
1612 error = xlog_bread_noalign(log, start_block, 1, buffer);
1613 if (error)
1614 goto out_free_buffer;
1615
1616 j = start_block - balign;
1617 }
1618
1619 for (i = start_block; i < end_block; i += bufblks) {
1620 int bcount, endcount;
1621
1622 bcount = min(bufblks, end_block - start_block);
1623 endcount = bcount - j;
1624
1625 /* We may need to do a read at the end to fill in part of
1626 * the buffer in the final sector not covered by the write.
1627 * If this is the same sector as the above read, skip it.
1628 */
1629 ealign = round_down(end_block, sectbb);
1630 if (j == 0 && (start_block + endcount > ealign)) {
1631 error = xlog_bread_noalign(log, ealign, sectbb,
1632 buffer + BBTOB(ealign - start_block));
1633 if (error)
1634 break;
1635
1636 }
1637
1638 offset = buffer + xlog_align(log, start_block);
1639 for (; j < endcount; j++) {
1640 xlog_add_record(log, offset, cycle, i+j,
1641 tail_cycle, tail_block);
1642 offset += BBSIZE;
1643 }
1644 error = xlog_bwrite(log, start_block, endcount, buffer);
1645 if (error)
1646 break;
1647 start_block += endcount;
1648 j = 0;
1649 }
1650
1651out_free_buffer:
1652 kmem_free(buffer);
1653 return error;
1654}
1655
1656/*
1657 * This routine is called to blow away any incomplete log writes out
1658 * in front of the log head. We do this so that we won't become confused
1659 * if we come up, write only a little bit more, and then crash again.
1660 * If we leave the partial log records out there, this situation could
1661 * cause us to think those partial writes are valid blocks since they
1662 * have the current cycle number. We get rid of them by overwriting them
1663 * with empty log records with the old cycle number rather than the
1664 * current one.
1665 *
1666 * The tail lsn is passed in rather than taken from
1667 * the log so that we will not write over the unmount record after a
1668 * clean unmount in a 512 block log. Doing so would leave the log without
1669 * any valid log records in it until a new one was written. If we crashed
1670 * during that time we would not be able to recover.
1671 */
1672STATIC int
1673xlog_clear_stale_blocks(
1674 struct xlog *log,
1675 xfs_lsn_t tail_lsn)
1676{
1677 int tail_cycle, head_cycle;
1678 int tail_block, head_block;
1679 int tail_distance, max_distance;
1680 int distance;
1681 int error;
1682
1683 tail_cycle = CYCLE_LSN(tail_lsn);
1684 tail_block = BLOCK_LSN(tail_lsn);
1685 head_cycle = log->l_curr_cycle;
1686 head_block = log->l_curr_block;
1687
1688 /*
1689 * Figure out the distance between the new head of the log
1690 * and the tail. We want to write over any blocks beyond the
1691 * head that we may have written just before the crash, but
1692 * we don't want to overwrite the tail of the log.
1693 */
1694 if (head_cycle == tail_cycle) {
1695 /*
1696 * The tail is behind the head in the physical log,
1697 * so the distance from the head to the tail is the
1698 * distance from the head to the end of the log plus
1699 * the distance from the beginning of the log to the
1700 * tail.
1701 */
1702 if (unlikely(head_block < tail_block || head_block >= log->l_logBBsize)) {
1703 XFS_ERROR_REPORT("xlog_clear_stale_blocks(1)",
1704 XFS_ERRLEVEL_LOW, log->l_mp);
1705 return -EFSCORRUPTED;
1706 }
1707 tail_distance = tail_block + (log->l_logBBsize - head_block);
1708 } else {
1709 /*
1710 * The head is behind the tail in the physical log,
1711 * so the distance from the head to the tail is just
1712 * the tail block minus the head block.
1713 */
1714 if (unlikely(head_block >= tail_block || head_cycle != (tail_cycle + 1))){
1715 XFS_ERROR_REPORT("xlog_clear_stale_blocks(2)",
1716 XFS_ERRLEVEL_LOW, log->l_mp);
1717 return -EFSCORRUPTED;
1718 }
1719 tail_distance = tail_block - head_block;
1720 }
1721
1722 /*
1723 * If the head is right up against the tail, we can't clear
1724 * anything.
1725 */
1726 if (tail_distance <= 0) {
1727 ASSERT(tail_distance == 0);
1728 return 0;
1729 }
1730
1731 max_distance = XLOG_TOTAL_REC_SHIFT(log);
1732 /*
1733 * Take the smaller of the maximum amount of outstanding I/O
1734 * we could have and the distance to the tail to clear out.
1735 * We take the smaller so that we don't overwrite the tail and
1736 * we don't waste all day writing from the head to the tail
1737 * for no reason.
1738 */
1739 max_distance = min(max_distance, tail_distance);
1740
1741 if ((head_block + max_distance) <= log->l_logBBsize) {
1742 /*
1743 * We can stomp all the blocks we need to without
1744 * wrapping around the end of the log. Just do it
1745 * in a single write. Use the cycle number of the
1746 * current cycle minus one so that the log will look like:
1747 * n ... | n - 1 ...
1748 */
1749 error = xlog_write_log_records(log, (head_cycle - 1),
1750 head_block, max_distance, tail_cycle,
1751 tail_block);
1752 if (error)
1753 return error;
1754 } else {
1755 /*
1756 * We need to wrap around the end of the physical log in
1757 * order to clear all the blocks. Do it in two separate
1758 * I/Os. The first write should be from the head to the
1759 * end of the physical log, and it should use the current
1760 * cycle number minus one just like above.
1761 */
1762 distance = log->l_logBBsize - head_block;
1763 error = xlog_write_log_records(log, (head_cycle - 1),
1764 head_block, distance, tail_cycle,
1765 tail_block);
1766
1767 if (error)
1768 return error;
1769
1770 /*
1771 * Now write the blocks at the start of the physical log.
1772 * This writes the remainder of the blocks we want to clear.
1773 * It uses the current cycle number since we're now on the
1774 * same cycle as the head so that we get:
1775 * n ... n ... | n - 1 ...
1776 * ^^^^^ blocks we're writing
1777 */
1778 distance = max_distance - (log->l_logBBsize - head_block);
1779 error = xlog_write_log_records(log, head_cycle, 0, distance,
1780 tail_cycle, tail_block);
1781 if (error)
1782 return error;
1783 }
1784
1785 return 0;
1786}
1787
1788/******************************************************************************
1789 *
1790 * Log recover routines
1791 *
1792 ******************************************************************************
1793 */
1794
1795/*
1796 * Sort the log items in the transaction.
1797 *
1798 * The ordering constraints are defined by the inode allocation and unlink
1799 * behaviour. The rules are:
1800 *
1801 * 1. Every item is only logged once in a given transaction. Hence it
1802 * represents the last logged state of the item. Hence ordering is
1803 * dependent on the order in which operations need to be performed so
1804 * required initial conditions are always met.
1805 *
1806 * 2. Cancelled buffers are recorded in pass 1 in a separate table and
1807 * there's nothing to replay from them so we can simply cull them
1808 * from the transaction. However, we can't do that until after we've
1809 * replayed all the other items because they may be dependent on the
1810 * cancelled buffer and replaying the cancelled buffer can remove it
1811 * form the cancelled buffer table. Hence they have tobe done last.
1812 *
1813 * 3. Inode allocation buffers must be replayed before inode items that
1814 * read the buffer and replay changes into it. For filesystems using the
1815 * ICREATE transactions, this means XFS_LI_ICREATE objects need to get
1816 * treated the same as inode allocation buffers as they create and
1817 * initialise the buffers directly.
1818 *
1819 * 4. Inode unlink buffers must be replayed after inode items are replayed.
1820 * This ensures that inodes are completely flushed to the inode buffer
1821 * in a "free" state before we remove the unlinked inode list pointer.
1822 *
1823 * Hence the ordering needs to be inode allocation buffers first, inode items
1824 * second, inode unlink buffers third and cancelled buffers last.
1825 *
1826 * But there's a problem with that - we can't tell an inode allocation buffer
1827 * apart from a regular buffer, so we can't separate them. We can, however,
1828 * tell an inode unlink buffer from the others, and so we can separate them out
1829 * from all the other buffers and move them to last.
1830 *
1831 * Hence, 4 lists, in order from head to tail:
1832 * - buffer_list for all buffers except cancelled/inode unlink buffers
1833 * - item_list for all non-buffer items
1834 * - inode_buffer_list for inode unlink buffers
1835 * - cancel_list for the cancelled buffers
1836 *
1837 * Note that we add objects to the tail of the lists so that first-to-last
1838 * ordering is preserved within the lists. Adding objects to the head of the
1839 * list means when we traverse from the head we walk them in last-to-first
1840 * order. For cancelled buffers and inode unlink buffers this doesn't matter,
1841 * but for all other items there may be specific ordering that we need to
1842 * preserve.
1843 */
1844STATIC int
1845xlog_recover_reorder_trans(
1846 struct xlog *log,
1847 struct xlog_recover *trans,
1848 int pass)
1849{
1850 xlog_recover_item_t *item, *n;
1851 int error = 0;
1852 LIST_HEAD(sort_list);
1853 LIST_HEAD(cancel_list);
1854 LIST_HEAD(buffer_list);
1855 LIST_HEAD(inode_buffer_list);
1856 LIST_HEAD(inode_list);
1857
1858 list_splice_init(&trans->r_itemq, &sort_list);
1859 list_for_each_entry_safe(item, n, &sort_list, ri_list) {
1860 xfs_buf_log_format_t *buf_f = item->ri_buf[0].i_addr;
1861
1862 switch (ITEM_TYPE(item)) {
1863 case XFS_LI_ICREATE:
1864 list_move_tail(&item->ri_list, &buffer_list);
1865 break;
1866 case XFS_LI_BUF:
1867 if (buf_f->blf_flags & XFS_BLF_CANCEL) {
1868 trace_xfs_log_recover_item_reorder_head(log,
1869 trans, item, pass);
1870 list_move(&item->ri_list, &cancel_list);
1871 break;
1872 }
1873 if (buf_f->blf_flags & XFS_BLF_INODE_BUF) {
1874 list_move(&item->ri_list, &inode_buffer_list);
1875 break;
1876 }
1877 list_move_tail(&item->ri_list, &buffer_list);
1878 break;
1879 case XFS_LI_INODE:
1880 case XFS_LI_DQUOT:
1881 case XFS_LI_QUOTAOFF:
1882 case XFS_LI_EFD:
1883 case XFS_LI_EFI:
1884 case XFS_LI_RUI:
1885 case XFS_LI_RUD:
1886 case XFS_LI_CUI:
1887 case XFS_LI_CUD:
1888 case XFS_LI_BUI:
1889 case XFS_LI_BUD:
1890 trace_xfs_log_recover_item_reorder_tail(log,
1891 trans, item, pass);
1892 list_move_tail(&item->ri_list, &inode_list);
1893 break;
1894 default:
1895 xfs_warn(log->l_mp,
1896 "%s: unrecognized type of log operation",
1897 __func__);
1898 ASSERT(0);
1899 /*
1900 * return the remaining items back to the transaction
1901 * item list so they can be freed in caller.
1902 */
1903 if (!list_empty(&sort_list))
1904 list_splice_init(&sort_list, &trans->r_itemq);
1905 error = -EIO;
1906 goto out;
1907 }
1908 }
1909out:
1910 ASSERT(list_empty(&sort_list));
1911 if (!list_empty(&buffer_list))
1912 list_splice(&buffer_list, &trans->r_itemq);
1913 if (!list_empty(&inode_list))
1914 list_splice_tail(&inode_list, &trans->r_itemq);
1915 if (!list_empty(&inode_buffer_list))
1916 list_splice_tail(&inode_buffer_list, &trans->r_itemq);
1917 if (!list_empty(&cancel_list))
1918 list_splice_tail(&cancel_list, &trans->r_itemq);
1919 return error;
1920}
1921
1922/*
1923 * Build up the table of buf cancel records so that we don't replay
1924 * cancelled data in the second pass. For buffer records that are
1925 * not cancel records, there is nothing to do here so we just return.
1926 *
1927 * If we get a cancel record which is already in the table, this indicates
1928 * that the buffer was cancelled multiple times. In order to ensure
1929 * that during pass 2 we keep the record in the table until we reach its
1930 * last occurrence in the log, we keep a reference count in the cancel
1931 * record in the table to tell us how many times we expect to see this
1932 * record during the second pass.
1933 */
1934STATIC int
1935xlog_recover_buffer_pass1(
1936 struct xlog *log,
1937 struct xlog_recover_item *item)
1938{
1939 xfs_buf_log_format_t *buf_f = item->ri_buf[0].i_addr;
1940 struct list_head *bucket;
1941 struct xfs_buf_cancel *bcp;
1942
1943 /*
1944 * If this isn't a cancel buffer item, then just return.
1945 */
1946 if (!(buf_f->blf_flags & XFS_BLF_CANCEL)) {
1947 trace_xfs_log_recover_buf_not_cancel(log, buf_f);
1948 return 0;
1949 }
1950
1951 /*
1952 * Insert an xfs_buf_cancel record into the hash table of them.
1953 * If there is already an identical record, bump its reference count.
1954 */
1955 bucket = XLOG_BUF_CANCEL_BUCKET(log, buf_f->blf_blkno);
1956 list_for_each_entry(bcp, bucket, bc_list) {
1957 if (bcp->bc_blkno == buf_f->blf_blkno &&
1958 bcp->bc_len == buf_f->blf_len) {
1959 bcp->bc_refcount++;
1960 trace_xfs_log_recover_buf_cancel_ref_inc(log, buf_f);
1961 return 0;
1962 }
1963 }
1964
1965 bcp = kmem_alloc(sizeof(struct xfs_buf_cancel), 0);
1966 bcp->bc_blkno = buf_f->blf_blkno;
1967 bcp->bc_len = buf_f->blf_len;
1968 bcp->bc_refcount = 1;
1969 list_add_tail(&bcp->bc_list, bucket);
1970
1971 trace_xfs_log_recover_buf_cancel_add(log, buf_f);
1972 return 0;
1973}
1974
1975/*
1976 * Check to see whether the buffer being recovered has a corresponding
1977 * entry in the buffer cancel record table. If it is, return the cancel
1978 * buffer structure to the caller.
1979 */
1980STATIC struct xfs_buf_cancel *
1981xlog_peek_buffer_cancelled(
1982 struct xlog *log,
1983 xfs_daddr_t blkno,
1984 uint len,
1985 unsigned short flags)
1986{
1987 struct list_head *bucket;
1988 struct xfs_buf_cancel *bcp;
1989
1990 if (!log->l_buf_cancel_table) {
1991 /* empty table means no cancelled buffers in the log */
1992 ASSERT(!(flags & XFS_BLF_CANCEL));
1993 return NULL;
1994 }
1995
1996 bucket = XLOG_BUF_CANCEL_BUCKET(log, blkno);
1997 list_for_each_entry(bcp, bucket, bc_list) {
1998 if (bcp->bc_blkno == blkno && bcp->bc_len == len)
1999 return bcp;
2000 }
2001
2002 /*
2003 * We didn't find a corresponding entry in the table, so return 0 so
2004 * that the buffer is NOT cancelled.
2005 */
2006 ASSERT(!(flags & XFS_BLF_CANCEL));
2007 return NULL;
2008}
2009
2010/*
2011 * If the buffer is being cancelled then return 1 so that it will be cancelled,
2012 * otherwise return 0. If the buffer is actually a buffer cancel item
2013 * (XFS_BLF_CANCEL is set), then decrement the refcount on the entry in the
2014 * table and remove it from the table if this is the last reference.
2015 *
2016 * We remove the cancel record from the table when we encounter its last
2017 * occurrence in the log so that if the same buffer is re-used again after its
2018 * last cancellation we actually replay the changes made at that point.
2019 */
2020STATIC int
2021xlog_check_buffer_cancelled(
2022 struct xlog *log,
2023 xfs_daddr_t blkno,
2024 uint len,
2025 unsigned short flags)
2026{
2027 struct xfs_buf_cancel *bcp;
2028
2029 bcp = xlog_peek_buffer_cancelled(log, blkno, len, flags);
2030 if (!bcp)
2031 return 0;
2032
2033 /*
2034 * We've go a match, so return 1 so that the recovery of this buffer
2035 * is cancelled. If this buffer is actually a buffer cancel log
2036 * item, then decrement the refcount on the one in the table and
2037 * remove it if this is the last reference.
2038 */
2039 if (flags & XFS_BLF_CANCEL) {
2040 if (--bcp->bc_refcount == 0) {
2041 list_del(&bcp->bc_list);
2042 kmem_free(bcp);
2043 }
2044 }
2045 return 1;
2046}
2047
2048/*
2049 * Perform recovery for a buffer full of inodes. In these buffers, the only
2050 * data which should be recovered is that which corresponds to the
2051 * di_next_unlinked pointers in the on disk inode structures. The rest of the
2052 * data for the inodes is always logged through the inodes themselves rather
2053 * than the inode buffer and is recovered in xlog_recover_inode_pass2().
2054 *
2055 * The only time when buffers full of inodes are fully recovered is when the
2056 * buffer is full of newly allocated inodes. In this case the buffer will
2057 * not be marked as an inode buffer and so will be sent to
2058 * xlog_recover_do_reg_buffer() below during recovery.
2059 */
2060STATIC int
2061xlog_recover_do_inode_buffer(
2062 struct xfs_mount *mp,
2063 xlog_recover_item_t *item,
2064 struct xfs_buf *bp,
2065 xfs_buf_log_format_t *buf_f)
2066{
2067 int i;
2068 int item_index = 0;
2069 int bit = 0;
2070 int nbits = 0;
2071 int reg_buf_offset = 0;
2072 int reg_buf_bytes = 0;
2073 int next_unlinked_offset;
2074 int inodes_per_buf;
2075 xfs_agino_t *logged_nextp;
2076 xfs_agino_t *buffer_nextp;
2077
2078 trace_xfs_log_recover_buf_inode_buf(mp->m_log, buf_f);
2079
2080 /*
2081 * Post recovery validation only works properly on CRC enabled
2082 * filesystems.
2083 */
2084 if (xfs_sb_version_hascrc(&mp->m_sb))
2085 bp->b_ops = &xfs_inode_buf_ops;
2086
2087 inodes_per_buf = BBTOB(bp->b_length) >> mp->m_sb.sb_inodelog;
2088 for (i = 0; i < inodes_per_buf; i++) {
2089 next_unlinked_offset = (i * mp->m_sb.sb_inodesize) +
2090 offsetof(xfs_dinode_t, di_next_unlinked);
2091
2092 while (next_unlinked_offset >=
2093 (reg_buf_offset + reg_buf_bytes)) {
2094 /*
2095 * The next di_next_unlinked field is beyond
2096 * the current logged region. Find the next
2097 * logged region that contains or is beyond
2098 * the current di_next_unlinked field.
2099 */
2100 bit += nbits;
2101 bit = xfs_next_bit(buf_f->blf_data_map,
2102 buf_f->blf_map_size, bit);
2103
2104 /*
2105 * If there are no more logged regions in the
2106 * buffer, then we're done.
2107 */
2108 if (bit == -1)
2109 return 0;
2110
2111 nbits = xfs_contig_bits(buf_f->blf_data_map,
2112 buf_f->blf_map_size, bit);
2113 ASSERT(nbits > 0);
2114 reg_buf_offset = bit << XFS_BLF_SHIFT;
2115 reg_buf_bytes = nbits << XFS_BLF_SHIFT;
2116 item_index++;
2117 }
2118
2119 /*
2120 * If the current logged region starts after the current
2121 * di_next_unlinked field, then move on to the next
2122 * di_next_unlinked field.
2123 */
2124 if (next_unlinked_offset < reg_buf_offset)
2125 continue;
2126
2127 ASSERT(item->ri_buf[item_index].i_addr != NULL);
2128 ASSERT((item->ri_buf[item_index].i_len % XFS_BLF_CHUNK) == 0);
2129 ASSERT((reg_buf_offset + reg_buf_bytes) <= BBTOB(bp->b_length));
2130
2131 /*
2132 * The current logged region contains a copy of the
2133 * current di_next_unlinked field. Extract its value
2134 * and copy it to the buffer copy.
2135 */
2136 logged_nextp = item->ri_buf[item_index].i_addr +
2137 next_unlinked_offset - reg_buf_offset;
2138 if (unlikely(*logged_nextp == 0)) {
2139 xfs_alert(mp,
2140 "Bad inode buffer log record (ptr = "PTR_FMT", bp = "PTR_FMT"). "
2141 "Trying to replay bad (0) inode di_next_unlinked field.",
2142 item, bp);
2143 XFS_ERROR_REPORT("xlog_recover_do_inode_buf",
2144 XFS_ERRLEVEL_LOW, mp);
2145 return -EFSCORRUPTED;
2146 }
2147
2148 buffer_nextp = xfs_buf_offset(bp, next_unlinked_offset);
2149 *buffer_nextp = *logged_nextp;
2150
2151 /*
2152 * If necessary, recalculate the CRC in the on-disk inode. We
2153 * have to leave the inode in a consistent state for whoever
2154 * reads it next....
2155 */
2156 xfs_dinode_calc_crc(mp,
2157 xfs_buf_offset(bp, i * mp->m_sb.sb_inodesize));
2158
2159 }
2160
2161 return 0;
2162}
2163
2164/*
2165 * V5 filesystems know the age of the buffer on disk being recovered. We can
2166 * have newer objects on disk than we are replaying, and so for these cases we
2167 * don't want to replay the current change as that will make the buffer contents
2168 * temporarily invalid on disk.
2169 *
2170 * The magic number might not match the buffer type we are going to recover
2171 * (e.g. reallocated blocks), so we ignore the xfs_buf_log_format flags. Hence
2172 * extract the LSN of the existing object in the buffer based on it's current
2173 * magic number. If we don't recognise the magic number in the buffer, then
2174 * return a LSN of -1 so that the caller knows it was an unrecognised block and
2175 * so can recover the buffer.
2176 *
2177 * Note: we cannot rely solely on magic number matches to determine that the
2178 * buffer has a valid LSN - we also need to verify that it belongs to this
2179 * filesystem, so we need to extract the object's LSN and compare it to that
2180 * which we read from the superblock. If the UUIDs don't match, then we've got a
2181 * stale metadata block from an old filesystem instance that we need to recover
2182 * over the top of.
2183 */
2184static xfs_lsn_t
2185xlog_recover_get_buf_lsn(
2186 struct xfs_mount *mp,
2187 struct xfs_buf *bp)
2188{
2189 uint32_t magic32;
2190 uint16_t magic16;
2191 uint16_t magicda;
2192 void *blk = bp->b_addr;
2193 uuid_t *uuid;
2194 xfs_lsn_t lsn = -1;
2195
2196 /* v4 filesystems always recover immediately */
2197 if (!xfs_sb_version_hascrc(&mp->m_sb))
2198 goto recover_immediately;
2199
2200 magic32 = be32_to_cpu(*(__be32 *)blk);
2201 switch (magic32) {
2202 case XFS_ABTB_CRC_MAGIC:
2203 case XFS_ABTC_CRC_MAGIC:
2204 case XFS_ABTB_MAGIC:
2205 case XFS_ABTC_MAGIC:
2206 case XFS_RMAP_CRC_MAGIC:
2207 case XFS_REFC_CRC_MAGIC:
2208 case XFS_IBT_CRC_MAGIC:
2209 case XFS_IBT_MAGIC: {
2210 struct xfs_btree_block *btb = blk;
2211
2212 lsn = be64_to_cpu(btb->bb_u.s.bb_lsn);
2213 uuid = &btb->bb_u.s.bb_uuid;
2214 break;
2215 }
2216 case XFS_BMAP_CRC_MAGIC:
2217 case XFS_BMAP_MAGIC: {
2218 struct xfs_btree_block *btb = blk;
2219
2220 lsn = be64_to_cpu(btb->bb_u.l.bb_lsn);
2221 uuid = &btb->bb_u.l.bb_uuid;
2222 break;
2223 }
2224 case XFS_AGF_MAGIC:
2225 lsn = be64_to_cpu(((struct xfs_agf *)blk)->agf_lsn);
2226 uuid = &((struct xfs_agf *)blk)->agf_uuid;
2227 break;
2228 case XFS_AGFL_MAGIC:
2229 lsn = be64_to_cpu(((struct xfs_agfl *)blk)->agfl_lsn);
2230 uuid = &((struct xfs_agfl *)blk)->agfl_uuid;
2231 break;
2232 case XFS_AGI_MAGIC:
2233 lsn = be64_to_cpu(((struct xfs_agi *)blk)->agi_lsn);
2234 uuid = &((struct xfs_agi *)blk)->agi_uuid;
2235 break;
2236 case XFS_SYMLINK_MAGIC:
2237 lsn = be64_to_cpu(((struct xfs_dsymlink_hdr *)blk)->sl_lsn);
2238 uuid = &((struct xfs_dsymlink_hdr *)blk)->sl_uuid;
2239 break;
2240 case XFS_DIR3_BLOCK_MAGIC:
2241 case XFS_DIR3_DATA_MAGIC:
2242 case XFS_DIR3_FREE_MAGIC:
2243 lsn = be64_to_cpu(((struct xfs_dir3_blk_hdr *)blk)->lsn);
2244 uuid = &((struct xfs_dir3_blk_hdr *)blk)->uuid;
2245 break;
2246 case XFS_ATTR3_RMT_MAGIC:
2247 /*
2248 * Remote attr blocks are written synchronously, rather than
2249 * being logged. That means they do not contain a valid LSN
2250 * (i.e. transactionally ordered) in them, and hence any time we
2251 * see a buffer to replay over the top of a remote attribute
2252 * block we should simply do so.
2253 */
2254 goto recover_immediately;
2255 case XFS_SB_MAGIC:
2256 /*
2257 * superblock uuids are magic. We may or may not have a
2258 * sb_meta_uuid on disk, but it will be set in the in-core
2259 * superblock. We set the uuid pointer for verification
2260 * according to the superblock feature mask to ensure we check
2261 * the relevant UUID in the superblock.
2262 */
2263 lsn = be64_to_cpu(((struct xfs_dsb *)blk)->sb_lsn);
2264 if (xfs_sb_version_hasmetauuid(&mp->m_sb))
2265 uuid = &((struct xfs_dsb *)blk)->sb_meta_uuid;
2266 else
2267 uuid = &((struct xfs_dsb *)blk)->sb_uuid;
2268 break;
2269 default:
2270 break;
2271 }
2272
2273 if (lsn != (xfs_lsn_t)-1) {
2274 if (!uuid_equal(&mp->m_sb.sb_meta_uuid, uuid))
2275 goto recover_immediately;
2276 return lsn;
2277 }
2278
2279 magicda = be16_to_cpu(((struct xfs_da_blkinfo *)blk)->magic);
2280 switch (magicda) {
2281 case XFS_DIR3_LEAF1_MAGIC:
2282 case XFS_DIR3_LEAFN_MAGIC:
2283 case XFS_DA3_NODE_MAGIC:
2284 lsn = be64_to_cpu(((struct xfs_da3_blkinfo *)blk)->lsn);
2285 uuid = &((struct xfs_da3_blkinfo *)blk)->uuid;
2286 break;
2287 default:
2288 break;
2289 }
2290
2291 if (lsn != (xfs_lsn_t)-1) {
2292 if (!uuid_equal(&mp->m_sb.sb_uuid, uuid))
2293 goto recover_immediately;
2294 return lsn;
2295 }
2296
2297 /*
2298 * We do individual object checks on dquot and inode buffers as they
2299 * have their own individual LSN records. Also, we could have a stale
2300 * buffer here, so we have to at least recognise these buffer types.
2301 *
2302 * A notd complexity here is inode unlinked list processing - it logs
2303 * the inode directly in the buffer, but we don't know which inodes have
2304 * been modified, and there is no global buffer LSN. Hence we need to
2305 * recover all inode buffer types immediately. This problem will be
2306 * fixed by logical logging of the unlinked list modifications.
2307 */
2308 magic16 = be16_to_cpu(*(__be16 *)blk);
2309 switch (magic16) {
2310 case XFS_DQUOT_MAGIC:
2311 case XFS_DINODE_MAGIC:
2312 goto recover_immediately;
2313 default:
2314 break;
2315 }
2316
2317 /* unknown buffer contents, recover immediately */
2318
2319recover_immediately:
2320 return (xfs_lsn_t)-1;
2321
2322}
2323
2324/*
2325 * Validate the recovered buffer is of the correct type and attach the
2326 * appropriate buffer operations to them for writeback. Magic numbers are in a
2327 * few places:
2328 * the first 16 bits of the buffer (inode buffer, dquot buffer),
2329 * the first 32 bits of the buffer (most blocks),
2330 * inside a struct xfs_da_blkinfo at the start of the buffer.
2331 */
2332static void
2333xlog_recover_validate_buf_type(
2334 struct xfs_mount *mp,
2335 struct xfs_buf *bp,
2336 xfs_buf_log_format_t *buf_f,
2337 xfs_lsn_t current_lsn)
2338{
2339 struct xfs_da_blkinfo *info = bp->b_addr;
2340 uint32_t magic32;
2341 uint16_t magic16;
2342 uint16_t magicda;
2343 char *warnmsg = NULL;
2344
2345 /*
2346 * We can only do post recovery validation on items on CRC enabled
2347 * fielsystems as we need to know when the buffer was written to be able
2348 * to determine if we should have replayed the item. If we replay old
2349 * metadata over a newer buffer, then it will enter a temporarily
2350 * inconsistent state resulting in verification failures. Hence for now
2351 * just avoid the verification stage for non-crc filesystems
2352 */
2353 if (!xfs_sb_version_hascrc(&mp->m_sb))
2354 return;
2355
2356 magic32 = be32_to_cpu(*(__be32 *)bp->b_addr);
2357 magic16 = be16_to_cpu(*(__be16*)bp->b_addr);
2358 magicda = be16_to_cpu(info->magic);
2359 switch (xfs_blft_from_flags(buf_f)) {
2360 case XFS_BLFT_BTREE_BUF:
2361 switch (magic32) {
2362 case XFS_ABTB_CRC_MAGIC:
2363 case XFS_ABTB_MAGIC:
2364 bp->b_ops = &xfs_bnobt_buf_ops;
2365 break;
2366 case XFS_ABTC_CRC_MAGIC:
2367 case XFS_ABTC_MAGIC:
2368 bp->b_ops = &xfs_cntbt_buf_ops;
2369 break;
2370 case XFS_IBT_CRC_MAGIC:
2371 case XFS_IBT_MAGIC:
2372 bp->b_ops = &xfs_inobt_buf_ops;
2373 break;
2374 case XFS_FIBT_CRC_MAGIC:
2375 case XFS_FIBT_MAGIC:
2376 bp->b_ops = &xfs_finobt_buf_ops;
2377 break;
2378 case XFS_BMAP_CRC_MAGIC:
2379 case XFS_BMAP_MAGIC:
2380 bp->b_ops = &xfs_bmbt_buf_ops;
2381 break;
2382 case XFS_RMAP_CRC_MAGIC:
2383 bp->b_ops = &xfs_rmapbt_buf_ops;
2384 break;
2385 case XFS_REFC_CRC_MAGIC:
2386 bp->b_ops = &xfs_refcountbt_buf_ops;
2387 break;
2388 default:
2389 warnmsg = "Bad btree block magic!";
2390 break;
2391 }
2392 break;
2393 case XFS_BLFT_AGF_BUF:
2394 if (magic32 != XFS_AGF_MAGIC) {
2395 warnmsg = "Bad AGF block magic!";
2396 break;
2397 }
2398 bp->b_ops = &xfs_agf_buf_ops;
2399 break;
2400 case XFS_BLFT_AGFL_BUF:
2401 if (magic32 != XFS_AGFL_MAGIC) {
2402 warnmsg = "Bad AGFL block magic!";
2403 break;
2404 }
2405 bp->b_ops = &xfs_agfl_buf_ops;
2406 break;
2407 case XFS_BLFT_AGI_BUF:
2408 if (magic32 != XFS_AGI_MAGIC) {
2409 warnmsg = "Bad AGI block magic!";
2410 break;
2411 }
2412 bp->b_ops = &xfs_agi_buf_ops;
2413 break;
2414 case XFS_BLFT_UDQUOT_BUF:
2415 case XFS_BLFT_PDQUOT_BUF:
2416 case XFS_BLFT_GDQUOT_BUF:
2417#ifdef CONFIG_XFS_QUOTA
2418 if (magic16 != XFS_DQUOT_MAGIC) {
2419 warnmsg = "Bad DQUOT block magic!";
2420 break;
2421 }
2422 bp->b_ops = &xfs_dquot_buf_ops;
2423#else
2424 xfs_alert(mp,
2425 "Trying to recover dquots without QUOTA support built in!");
2426 ASSERT(0);
2427#endif
2428 break;
2429 case XFS_BLFT_DINO_BUF:
2430 if (magic16 != XFS_DINODE_MAGIC) {
2431 warnmsg = "Bad INODE block magic!";
2432 break;
2433 }
2434 bp->b_ops = &xfs_inode_buf_ops;
2435 break;
2436 case XFS_BLFT_SYMLINK_BUF:
2437 if (magic32 != XFS_SYMLINK_MAGIC) {
2438 warnmsg = "Bad symlink block magic!";
2439 break;
2440 }
2441 bp->b_ops = &xfs_symlink_buf_ops;
2442 break;
2443 case XFS_BLFT_DIR_BLOCK_BUF:
2444 if (magic32 != XFS_DIR2_BLOCK_MAGIC &&
2445 magic32 != XFS_DIR3_BLOCK_MAGIC) {
2446 warnmsg = "Bad dir block magic!";
2447 break;
2448 }
2449 bp->b_ops = &xfs_dir3_block_buf_ops;
2450 break;
2451 case XFS_BLFT_DIR_DATA_BUF:
2452 if (magic32 != XFS_DIR2_DATA_MAGIC &&
2453 magic32 != XFS_DIR3_DATA_MAGIC) {
2454 warnmsg = "Bad dir data magic!";
2455 break;
2456 }
2457 bp->b_ops = &xfs_dir3_data_buf_ops;
2458 break;
2459 case XFS_BLFT_DIR_FREE_BUF:
2460 if (magic32 != XFS_DIR2_FREE_MAGIC &&
2461 magic32 != XFS_DIR3_FREE_MAGIC) {
2462 warnmsg = "Bad dir3 free magic!";
2463 break;
2464 }
2465 bp->b_ops = &xfs_dir3_free_buf_ops;
2466 break;
2467 case XFS_BLFT_DIR_LEAF1_BUF:
2468 if (magicda != XFS_DIR2_LEAF1_MAGIC &&
2469 magicda != XFS_DIR3_LEAF1_MAGIC) {
2470 warnmsg = "Bad dir leaf1 magic!";
2471 break;
2472 }
2473 bp->b_ops = &xfs_dir3_leaf1_buf_ops;
2474 break;
2475 case XFS_BLFT_DIR_LEAFN_BUF:
2476 if (magicda != XFS_DIR2_LEAFN_MAGIC &&
2477 magicda != XFS_DIR3_LEAFN_MAGIC) {
2478 warnmsg = "Bad dir leafn magic!";
2479 break;
2480 }
2481 bp->b_ops = &xfs_dir3_leafn_buf_ops;
2482 break;
2483 case XFS_BLFT_DA_NODE_BUF:
2484 if (magicda != XFS_DA_NODE_MAGIC &&
2485 magicda != XFS_DA3_NODE_MAGIC) {
2486 warnmsg = "Bad da node magic!";
2487 break;
2488 }
2489 bp->b_ops = &xfs_da3_node_buf_ops;
2490 break;
2491 case XFS_BLFT_ATTR_LEAF_BUF:
2492 if (magicda != XFS_ATTR_LEAF_MAGIC &&
2493 magicda != XFS_ATTR3_LEAF_MAGIC) {
2494 warnmsg = "Bad attr leaf magic!";
2495 break;
2496 }
2497 bp->b_ops = &xfs_attr3_leaf_buf_ops;
2498 break;
2499 case XFS_BLFT_ATTR_RMT_BUF:
2500 if (magic32 != XFS_ATTR3_RMT_MAGIC) {
2501 warnmsg = "Bad attr remote magic!";
2502 break;
2503 }
2504 bp->b_ops = &xfs_attr3_rmt_buf_ops;
2505 break;
2506 case XFS_BLFT_SB_BUF:
2507 if (magic32 != XFS_SB_MAGIC) {
2508 warnmsg = "Bad SB block magic!";
2509 break;
2510 }
2511 bp->b_ops = &xfs_sb_buf_ops;
2512 break;
2513#ifdef CONFIG_XFS_RT
2514 case XFS_BLFT_RTBITMAP_BUF:
2515 case XFS_BLFT_RTSUMMARY_BUF:
2516 /* no magic numbers for verification of RT buffers */
2517 bp->b_ops = &xfs_rtbuf_ops;
2518 break;
2519#endif /* CONFIG_XFS_RT */
2520 default:
2521 xfs_warn(mp, "Unknown buffer type %d!",
2522 xfs_blft_from_flags(buf_f));
2523 break;
2524 }
2525
2526 /*
2527 * Nothing else to do in the case of a NULL current LSN as this means
2528 * the buffer is more recent than the change in the log and will be
2529 * skipped.
2530 */
2531 if (current_lsn == NULLCOMMITLSN)
2532 return;
2533
2534 if (warnmsg) {
2535 xfs_warn(mp, warnmsg);
2536 ASSERT(0);
2537 }
2538
2539 /*
2540 * We must update the metadata LSN of the buffer as it is written out to
2541 * ensure that older transactions never replay over this one and corrupt
2542 * the buffer. This can occur if log recovery is interrupted at some
2543 * point after the current transaction completes, at which point a
2544 * subsequent mount starts recovery from the beginning.
2545 *
2546 * Write verifiers update the metadata LSN from log items attached to
2547 * the buffer. Therefore, initialize a bli purely to carry the LSN to
2548 * the verifier. We'll clean it up in our ->iodone() callback.
2549 */
2550 if (bp->b_ops) {
2551 struct xfs_buf_log_item *bip;
2552
2553 ASSERT(!bp->b_iodone || bp->b_iodone == xlog_recover_iodone);
2554 bp->b_iodone = xlog_recover_iodone;
2555 xfs_buf_item_init(bp, mp);
2556 bip = bp->b_log_item;
2557 bip->bli_item.li_lsn = current_lsn;
2558 }
2559}
2560
2561/*
2562 * Perform a 'normal' buffer recovery. Each logged region of the
2563 * buffer should be copied over the corresponding region in the
2564 * given buffer. The bitmap in the buf log format structure indicates
2565 * where to place the logged data.
2566 */
2567STATIC void
2568xlog_recover_do_reg_buffer(
2569 struct xfs_mount *mp,
2570 xlog_recover_item_t *item,
2571 struct xfs_buf *bp,
2572 xfs_buf_log_format_t *buf_f,
2573 xfs_lsn_t current_lsn)
2574{
2575 int i;
2576 int bit;
2577 int nbits;
2578 xfs_failaddr_t fa;
2579
2580 trace_xfs_log_recover_buf_reg_buf(mp->m_log, buf_f);
2581
2582 bit = 0;
2583 i = 1; /* 0 is the buf format structure */
2584 while (1) {
2585 bit = xfs_next_bit(buf_f->blf_data_map,
2586 buf_f->blf_map_size, bit);
2587 if (bit == -1)
2588 break;
2589 nbits = xfs_contig_bits(buf_f->blf_data_map,
2590 buf_f->blf_map_size, bit);
2591 ASSERT(nbits > 0);
2592 ASSERT(item->ri_buf[i].i_addr != NULL);
2593 ASSERT(item->ri_buf[i].i_len % XFS_BLF_CHUNK == 0);
2594 ASSERT(BBTOB(bp->b_length) >=
2595 ((uint)bit << XFS_BLF_SHIFT) + (nbits << XFS_BLF_SHIFT));
2596
2597 /*
2598 * The dirty regions logged in the buffer, even though
2599 * contiguous, may span multiple chunks. This is because the
2600 * dirty region may span a physical page boundary in a buffer
2601 * and hence be split into two separate vectors for writing into
2602 * the log. Hence we need to trim nbits back to the length of
2603 * the current region being copied out of the log.
2604 */
2605 if (item->ri_buf[i].i_len < (nbits << XFS_BLF_SHIFT))
2606 nbits = item->ri_buf[i].i_len >> XFS_BLF_SHIFT;
2607
2608 /*
2609 * Do a sanity check if this is a dquot buffer. Just checking
2610 * the first dquot in the buffer should do. XXXThis is
2611 * probably a good thing to do for other buf types also.
2612 */
2613 fa = NULL;
2614 if (buf_f->blf_flags &
2615 (XFS_BLF_UDQUOT_BUF|XFS_BLF_PDQUOT_BUF|XFS_BLF_GDQUOT_BUF)) {
2616 if (item->ri_buf[i].i_addr == NULL) {
2617 xfs_alert(mp,
2618 "XFS: NULL dquot in %s.", __func__);
2619 goto next;
2620 }
2621 if (item->ri_buf[i].i_len < sizeof(xfs_disk_dquot_t)) {
2622 xfs_alert(mp,
2623 "XFS: dquot too small (%d) in %s.",
2624 item->ri_buf[i].i_len, __func__);
2625 goto next;
2626 }
2627 fa = xfs_dquot_verify(mp, item->ri_buf[i].i_addr,
2628 -1, 0);
2629 if (fa) {
2630 xfs_alert(mp,
2631 "dquot corrupt at %pS trying to replay into block 0x%llx",
2632 fa, bp->b_bn);
2633 goto next;
2634 }
2635 }
2636
2637 memcpy(xfs_buf_offset(bp,
2638 (uint)bit << XFS_BLF_SHIFT), /* dest */
2639 item->ri_buf[i].i_addr, /* source */
2640 nbits<<XFS_BLF_SHIFT); /* length */
2641 next:
2642 i++;
2643 bit += nbits;
2644 }
2645
2646 /* Shouldn't be any more regions */
2647 ASSERT(i == item->ri_total);
2648
2649 xlog_recover_validate_buf_type(mp, bp, buf_f, current_lsn);
2650}
2651
2652/*
2653 * Perform a dquot buffer recovery.
2654 * Simple algorithm: if we have found a QUOTAOFF log item of the same type
2655 * (ie. USR or GRP), then just toss this buffer away; don't recover it.
2656 * Else, treat it as a regular buffer and do recovery.
2657 *
2658 * Return false if the buffer was tossed and true if we recovered the buffer to
2659 * indicate to the caller if the buffer needs writing.
2660 */
2661STATIC bool
2662xlog_recover_do_dquot_buffer(
2663 struct xfs_mount *mp,
2664 struct xlog *log,
2665 struct xlog_recover_item *item,
2666 struct xfs_buf *bp,
2667 struct xfs_buf_log_format *buf_f)
2668{
2669 uint type;
2670
2671 trace_xfs_log_recover_buf_dquot_buf(log, buf_f);
2672
2673 /*
2674 * Filesystems are required to send in quota flags at mount time.
2675 */
2676 if (!mp->m_qflags)
2677 return false;
2678
2679 type = 0;
2680 if (buf_f->blf_flags & XFS_BLF_UDQUOT_BUF)
2681 type |= XFS_DQ_USER;
2682 if (buf_f->blf_flags & XFS_BLF_PDQUOT_BUF)
2683 type |= XFS_DQ_PROJ;
2684 if (buf_f->blf_flags & XFS_BLF_GDQUOT_BUF)
2685 type |= XFS_DQ_GROUP;
2686 /*
2687 * This type of quotas was turned off, so ignore this buffer
2688 */
2689 if (log->l_quotaoffs_flag & type)
2690 return false;
2691
2692 xlog_recover_do_reg_buffer(mp, item, bp, buf_f, NULLCOMMITLSN);
2693 return true;
2694}
2695
2696/*
2697 * This routine replays a modification made to a buffer at runtime.
2698 * There are actually two types of buffer, regular and inode, which
2699 * are handled differently. Inode buffers are handled differently
2700 * in that we only recover a specific set of data from them, namely
2701 * the inode di_next_unlinked fields. This is because all other inode
2702 * data is actually logged via inode records and any data we replay
2703 * here which overlaps that may be stale.
2704 *
2705 * When meta-data buffers are freed at run time we log a buffer item
2706 * with the XFS_BLF_CANCEL bit set to indicate that previous copies
2707 * of the buffer in the log should not be replayed at recovery time.
2708 * This is so that if the blocks covered by the buffer are reused for
2709 * file data before we crash we don't end up replaying old, freed
2710 * meta-data into a user's file.
2711 *
2712 * To handle the cancellation of buffer log items, we make two passes
2713 * over the log during recovery. During the first we build a table of
2714 * those buffers which have been cancelled, and during the second we
2715 * only replay those buffers which do not have corresponding cancel
2716 * records in the table. See xlog_recover_buffer_pass[1,2] above
2717 * for more details on the implementation of the table of cancel records.
2718 */
2719STATIC int
2720xlog_recover_buffer_pass2(
2721 struct xlog *log,
2722 struct list_head *buffer_list,
2723 struct xlog_recover_item *item,
2724 xfs_lsn_t current_lsn)
2725{
2726 xfs_buf_log_format_t *buf_f = item->ri_buf[0].i_addr;
2727 xfs_mount_t *mp = log->l_mp;
2728 xfs_buf_t *bp;
2729 int error;
2730 uint buf_flags;
2731 xfs_lsn_t lsn;
2732
2733 /*
2734 * In this pass we only want to recover all the buffers which have
2735 * not been cancelled and are not cancellation buffers themselves.
2736 */
2737 if (xlog_check_buffer_cancelled(log, buf_f->blf_blkno,
2738 buf_f->blf_len, buf_f->blf_flags)) {
2739 trace_xfs_log_recover_buf_cancel(log, buf_f);
2740 return 0;
2741 }
2742
2743 trace_xfs_log_recover_buf_recover(log, buf_f);
2744
2745 buf_flags = 0;
2746 if (buf_f->blf_flags & XFS_BLF_INODE_BUF)
2747 buf_flags |= XBF_UNMAPPED;
2748
2749 bp = xfs_buf_read(mp->m_ddev_targp, buf_f->blf_blkno, buf_f->blf_len,
2750 buf_flags, NULL);
2751 if (!bp)
2752 return -ENOMEM;
2753 error = bp->b_error;
2754 if (error) {
2755 xfs_buf_ioerror_alert(bp, "xlog_recover_do..(read#1)");
2756 goto out_release;
2757 }
2758
2759 /*
2760 * Recover the buffer only if we get an LSN from it and it's less than
2761 * the lsn of the transaction we are replaying.
2762 *
2763 * Note that we have to be extremely careful of readahead here.
2764 * Readahead does not attach verfiers to the buffers so if we don't
2765 * actually do any replay after readahead because of the LSN we found
2766 * in the buffer if more recent than that current transaction then we
2767 * need to attach the verifier directly. Failure to do so can lead to
2768 * future recovery actions (e.g. EFI and unlinked list recovery) can
2769 * operate on the buffers and they won't get the verifier attached. This
2770 * can lead to blocks on disk having the correct content but a stale
2771 * CRC.
2772 *
2773 * It is safe to assume these clean buffers are currently up to date.
2774 * If the buffer is dirtied by a later transaction being replayed, then
2775 * the verifier will be reset to match whatever recover turns that
2776 * buffer into.
2777 */
2778 lsn = xlog_recover_get_buf_lsn(mp, bp);
2779 if (lsn && lsn != -1 && XFS_LSN_CMP(lsn, current_lsn) >= 0) {
2780 trace_xfs_log_recover_buf_skip(log, buf_f);
2781 xlog_recover_validate_buf_type(mp, bp, buf_f, NULLCOMMITLSN);
2782 goto out_release;
2783 }
2784
2785 if (buf_f->blf_flags & XFS_BLF_INODE_BUF) {
2786 error = xlog_recover_do_inode_buffer(mp, item, bp, buf_f);
2787 if (error)
2788 goto out_release;
2789 } else if (buf_f->blf_flags &
2790 (XFS_BLF_UDQUOT_BUF|XFS_BLF_PDQUOT_BUF|XFS_BLF_GDQUOT_BUF)) {
2791 bool dirty;
2792
2793 dirty = xlog_recover_do_dquot_buffer(mp, log, item, bp, buf_f);
2794 if (!dirty)
2795 goto out_release;
2796 } else {
2797 xlog_recover_do_reg_buffer(mp, item, bp, buf_f, current_lsn);
2798 }
2799
2800 /*
2801 * Perform delayed write on the buffer. Asynchronous writes will be
2802 * slower when taking into account all the buffers to be flushed.
2803 *
2804 * Also make sure that only inode buffers with good sizes stay in
2805 * the buffer cache. The kernel moves inodes in buffers of 1 block
2806 * or inode_cluster_size bytes, whichever is bigger. The inode
2807 * buffers in the log can be a different size if the log was generated
2808 * by an older kernel using unclustered inode buffers or a newer kernel
2809 * running with a different inode cluster size. Regardless, if the
2810 * the inode buffer size isn't max(blocksize, inode_cluster_size)
2811 * for *our* value of inode_cluster_size, then we need to keep
2812 * the buffer out of the buffer cache so that the buffer won't
2813 * overlap with future reads of those inodes.
2814 */
2815 if (XFS_DINODE_MAGIC ==
2816 be16_to_cpu(*((__be16 *)xfs_buf_offset(bp, 0))) &&
2817 (BBTOB(bp->b_length) != M_IGEO(log->l_mp)->inode_cluster_size)) {
2818 xfs_buf_stale(bp);
2819 error = xfs_bwrite(bp);
2820 } else {
2821 ASSERT(bp->b_mount == mp);
2822 bp->b_iodone = xlog_recover_iodone;
2823 xfs_buf_delwri_queue(bp, buffer_list);
2824 }
2825
2826out_release:
2827 xfs_buf_relse(bp);
2828 return error;
2829}
2830
2831/*
2832 * Inode fork owner changes
2833 *
2834 * If we have been told that we have to reparent the inode fork, it's because an
2835 * extent swap operation on a CRC enabled filesystem has been done and we are
2836 * replaying it. We need to walk the BMBT of the appropriate fork and change the
2837 * owners of it.
2838 *
2839 * The complexity here is that we don't have an inode context to work with, so
2840 * after we've replayed the inode we need to instantiate one. This is where the
2841 * fun begins.
2842 *
2843 * We are in the middle of log recovery, so we can't run transactions. That
2844 * means we cannot use cache coherent inode instantiation via xfs_iget(), as
2845 * that will result in the corresponding iput() running the inode through
2846 * xfs_inactive(). If we've just replayed an inode core that changes the link
2847 * count to zero (i.e. it's been unlinked), then xfs_inactive() will run
2848 * transactions (bad!).
2849 *
2850 * So, to avoid this, we instantiate an inode directly from the inode core we've
2851 * just recovered. We have the buffer still locked, and all we really need to
2852 * instantiate is the inode core and the forks being modified. We can do this
2853 * manually, then run the inode btree owner change, and then tear down the
2854 * xfs_inode without having to run any transactions at all.
2855 *
2856 * Also, because we don't have a transaction context available here but need to
2857 * gather all the buffers we modify for writeback so we pass the buffer_list
2858 * instead for the operation to use.
2859 */
2860
2861STATIC int
2862xfs_recover_inode_owner_change(
2863 struct xfs_mount *mp,
2864 struct xfs_dinode *dip,
2865 struct xfs_inode_log_format *in_f,
2866 struct list_head *buffer_list)
2867{
2868 struct xfs_inode *ip;
2869 int error;
2870
2871 ASSERT(in_f->ilf_fields & (XFS_ILOG_DOWNER|XFS_ILOG_AOWNER));
2872
2873 ip = xfs_inode_alloc(mp, in_f->ilf_ino);
2874 if (!ip)
2875 return -ENOMEM;
2876
2877 /* instantiate the inode */
2878 xfs_inode_from_disk(ip, dip);
2879 ASSERT(ip->i_d.di_version >= 3);
2880
2881 error = xfs_iformat_fork(ip, dip);
2882 if (error)
2883 goto out_free_ip;
2884
2885 if (!xfs_inode_verify_forks(ip)) {
2886 error = -EFSCORRUPTED;
2887 goto out_free_ip;
2888 }
2889
2890 if (in_f->ilf_fields & XFS_ILOG_DOWNER) {
2891 ASSERT(in_f->ilf_fields & XFS_ILOG_DBROOT);
2892 error = xfs_bmbt_change_owner(NULL, ip, XFS_DATA_FORK,
2893 ip->i_ino, buffer_list);
2894 if (error)
2895 goto out_free_ip;
2896 }
2897
2898 if (in_f->ilf_fields & XFS_ILOG_AOWNER) {
2899 ASSERT(in_f->ilf_fields & XFS_ILOG_ABROOT);
2900 error = xfs_bmbt_change_owner(NULL, ip, XFS_ATTR_FORK,
2901 ip->i_ino, buffer_list);
2902 if (error)
2903 goto out_free_ip;
2904 }
2905
2906out_free_ip:
2907 xfs_inode_free(ip);
2908 return error;
2909}
2910
2911STATIC int
2912xlog_recover_inode_pass2(
2913 struct xlog *log,
2914 struct list_head *buffer_list,
2915 struct xlog_recover_item *item,
2916 xfs_lsn_t current_lsn)
2917{
2918 struct xfs_inode_log_format *in_f;
2919 xfs_mount_t *mp = log->l_mp;
2920 xfs_buf_t *bp;
2921 xfs_dinode_t *dip;
2922 int len;
2923 char *src;
2924 char *dest;
2925 int error;
2926 int attr_index;
2927 uint fields;
2928 struct xfs_log_dinode *ldip;
2929 uint isize;
2930 int need_free = 0;
2931
2932 if (item->ri_buf[0].i_len == sizeof(struct xfs_inode_log_format)) {
2933 in_f = item->ri_buf[0].i_addr;
2934 } else {
2935 in_f = kmem_alloc(sizeof(struct xfs_inode_log_format), 0);
2936 need_free = 1;
2937 error = xfs_inode_item_format_convert(&item->ri_buf[0], in_f);
2938 if (error)
2939 goto error;
2940 }
2941
2942 /*
2943 * Inode buffers can be freed, look out for it,
2944 * and do not replay the inode.
2945 */
2946 if (xlog_check_buffer_cancelled(log, in_f->ilf_blkno,
2947 in_f->ilf_len, 0)) {
2948 error = 0;
2949 trace_xfs_log_recover_inode_cancel(log, in_f);
2950 goto error;
2951 }
2952 trace_xfs_log_recover_inode_recover(log, in_f);
2953
2954 bp = xfs_buf_read(mp->m_ddev_targp, in_f->ilf_blkno, in_f->ilf_len, 0,
2955 &xfs_inode_buf_ops);
2956 if (!bp) {
2957 error = -ENOMEM;
2958 goto error;
2959 }
2960 error = bp->b_error;
2961 if (error) {
2962 xfs_buf_ioerror_alert(bp, "xlog_recover_do..(read#2)");
2963 goto out_release;
2964 }
2965 ASSERT(in_f->ilf_fields & XFS_ILOG_CORE);
2966 dip = xfs_buf_offset(bp, in_f->ilf_boffset);
2967
2968 /*
2969 * Make sure the place we're flushing out to really looks
2970 * like an inode!
2971 */
2972 if (unlikely(!xfs_verify_magic16(bp, dip->di_magic))) {
2973 xfs_alert(mp,
2974 "%s: Bad inode magic number, dip = "PTR_FMT", dino bp = "PTR_FMT", ino = %Ld",
2975 __func__, dip, bp, in_f->ilf_ino);
2976 XFS_ERROR_REPORT("xlog_recover_inode_pass2(1)",
2977 XFS_ERRLEVEL_LOW, mp);
2978 error = -EFSCORRUPTED;
2979 goto out_release;
2980 }
2981 ldip = item->ri_buf[1].i_addr;
2982 if (unlikely(ldip->di_magic != XFS_DINODE_MAGIC)) {
2983 xfs_alert(mp,
2984 "%s: Bad inode log record, rec ptr "PTR_FMT", ino %Ld",
2985 __func__, item, in_f->ilf_ino);
2986 XFS_ERROR_REPORT("xlog_recover_inode_pass2(2)",
2987 XFS_ERRLEVEL_LOW, mp);
2988 error = -EFSCORRUPTED;
2989 goto out_release;
2990 }
2991
2992 /*
2993 * If the inode has an LSN in it, recover the inode only if it's less
2994 * than the lsn of the transaction we are replaying. Note: we still
2995 * need to replay an owner change even though the inode is more recent
2996 * than the transaction as there is no guarantee that all the btree
2997 * blocks are more recent than this transaction, too.
2998 */
2999 if (dip->di_version >= 3) {
3000 xfs_lsn_t lsn = be64_to_cpu(dip->di_lsn);
3001
3002 if (lsn && lsn != -1 && XFS_LSN_CMP(lsn, current_lsn) >= 0) {
3003 trace_xfs_log_recover_inode_skip(log, in_f);
3004 error = 0;
3005 goto out_owner_change;
3006 }
3007 }
3008
3009 /*
3010 * di_flushiter is only valid for v1/2 inodes. All changes for v3 inodes
3011 * are transactional and if ordering is necessary we can determine that
3012 * more accurately by the LSN field in the V3 inode core. Don't trust
3013 * the inode versions we might be changing them here - use the
3014 * superblock flag to determine whether we need to look at di_flushiter
3015 * to skip replay when the on disk inode is newer than the log one
3016 */
3017 if (!xfs_sb_version_hascrc(&mp->m_sb) &&
3018 ldip->di_flushiter < be16_to_cpu(dip->di_flushiter)) {
3019 /*
3020 * Deal with the wrap case, DI_MAX_FLUSH is less
3021 * than smaller numbers
3022 */
3023 if (be16_to_cpu(dip->di_flushiter) == DI_MAX_FLUSH &&
3024 ldip->di_flushiter < (DI_MAX_FLUSH >> 1)) {
3025 /* do nothing */
3026 } else {
3027 trace_xfs_log_recover_inode_skip(log, in_f);
3028 error = 0;
3029 goto out_release;
3030 }
3031 }
3032
3033 /* Take the opportunity to reset the flush iteration count */
3034 ldip->di_flushiter = 0;
3035
3036 if (unlikely(S_ISREG(ldip->di_mode))) {
3037 if ((ldip->di_format != XFS_DINODE_FMT_EXTENTS) &&
3038 (ldip->di_format != XFS_DINODE_FMT_BTREE)) {
3039 XFS_CORRUPTION_ERROR("xlog_recover_inode_pass2(3)",
3040 XFS_ERRLEVEL_LOW, mp, ldip,
3041 sizeof(*ldip));
3042 xfs_alert(mp,
3043 "%s: Bad regular inode log record, rec ptr "PTR_FMT", "
3044 "ino ptr = "PTR_FMT", ino bp = "PTR_FMT", ino %Ld",
3045 __func__, item, dip, bp, in_f->ilf_ino);
3046 error = -EFSCORRUPTED;
3047 goto out_release;
3048 }
3049 } else if (unlikely(S_ISDIR(ldip->di_mode))) {
3050 if ((ldip->di_format != XFS_DINODE_FMT_EXTENTS) &&
3051 (ldip->di_format != XFS_DINODE_FMT_BTREE) &&
3052 (ldip->di_format != XFS_DINODE_FMT_LOCAL)) {
3053 XFS_CORRUPTION_ERROR("xlog_recover_inode_pass2(4)",
3054 XFS_ERRLEVEL_LOW, mp, ldip,
3055 sizeof(*ldip));
3056 xfs_alert(mp,
3057 "%s: Bad dir inode log record, rec ptr "PTR_FMT", "
3058 "ino ptr = "PTR_FMT", ino bp = "PTR_FMT", ino %Ld",
3059 __func__, item, dip, bp, in_f->ilf_ino);
3060 error = -EFSCORRUPTED;
3061 goto out_release;
3062 }
3063 }
3064 if (unlikely(ldip->di_nextents + ldip->di_anextents > ldip->di_nblocks)){
3065 XFS_CORRUPTION_ERROR("xlog_recover_inode_pass2(5)",
3066 XFS_ERRLEVEL_LOW, mp, ldip,
3067 sizeof(*ldip));
3068 xfs_alert(mp,
3069 "%s: Bad inode log record, rec ptr "PTR_FMT", dino ptr "PTR_FMT", "
3070 "dino bp "PTR_FMT", ino %Ld, total extents = %d, nblocks = %Ld",
3071 __func__, item, dip, bp, in_f->ilf_ino,
3072 ldip->di_nextents + ldip->di_anextents,
3073 ldip->di_nblocks);
3074 error = -EFSCORRUPTED;
3075 goto out_release;
3076 }
3077 if (unlikely(ldip->di_forkoff > mp->m_sb.sb_inodesize)) {
3078 XFS_CORRUPTION_ERROR("xlog_recover_inode_pass2(6)",
3079 XFS_ERRLEVEL_LOW, mp, ldip,
3080 sizeof(*ldip));
3081 xfs_alert(mp,
3082 "%s: Bad inode log record, rec ptr "PTR_FMT", dino ptr "PTR_FMT", "
3083 "dino bp "PTR_FMT", ino %Ld, forkoff 0x%x", __func__,
3084 item, dip, bp, in_f->ilf_ino, ldip->di_forkoff);
3085 error = -EFSCORRUPTED;
3086 goto out_release;
3087 }
3088 isize = xfs_log_dinode_size(ldip->di_version);
3089 if (unlikely(item->ri_buf[1].i_len > isize)) {
3090 XFS_CORRUPTION_ERROR("xlog_recover_inode_pass2(7)",
3091 XFS_ERRLEVEL_LOW, mp, ldip,
3092 sizeof(*ldip));
3093 xfs_alert(mp,
3094 "%s: Bad inode log record length %d, rec ptr "PTR_FMT,
3095 __func__, item->ri_buf[1].i_len, item);
3096 error = -EFSCORRUPTED;
3097 goto out_release;
3098 }
3099
3100 /* recover the log dinode inode into the on disk inode */
3101 xfs_log_dinode_to_disk(ldip, dip);
3102
3103 fields = in_f->ilf_fields;
3104 if (fields & XFS_ILOG_DEV)
3105 xfs_dinode_put_rdev(dip, in_f->ilf_u.ilfu_rdev);
3106
3107 if (in_f->ilf_size == 2)
3108 goto out_owner_change;
3109 len = item->ri_buf[2].i_len;
3110 src = item->ri_buf[2].i_addr;
3111 ASSERT(in_f->ilf_size <= 4);
3112 ASSERT((in_f->ilf_size == 3) || (fields & XFS_ILOG_AFORK));
3113 ASSERT(!(fields & XFS_ILOG_DFORK) ||
3114 (len == in_f->ilf_dsize));
3115
3116 switch (fields & XFS_ILOG_DFORK) {
3117 case XFS_ILOG_DDATA:
3118 case XFS_ILOG_DEXT:
3119 memcpy(XFS_DFORK_DPTR(dip), src, len);
3120 break;
3121
3122 case XFS_ILOG_DBROOT:
3123 xfs_bmbt_to_bmdr(mp, (struct xfs_btree_block *)src, len,
3124 (xfs_bmdr_block_t *)XFS_DFORK_DPTR(dip),
3125 XFS_DFORK_DSIZE(dip, mp));
3126 break;
3127
3128 default:
3129 /*
3130 * There are no data fork flags set.
3131 */
3132 ASSERT((fields & XFS_ILOG_DFORK) == 0);
3133 break;
3134 }
3135
3136 /*
3137 * If we logged any attribute data, recover it. There may or
3138 * may not have been any other non-core data logged in this
3139 * transaction.
3140 */
3141 if (in_f->ilf_fields & XFS_ILOG_AFORK) {
3142 if (in_f->ilf_fields & XFS_ILOG_DFORK) {
3143 attr_index = 3;
3144 } else {
3145 attr_index = 2;
3146 }
3147 len = item->ri_buf[attr_index].i_len;
3148 src = item->ri_buf[attr_index].i_addr;
3149 ASSERT(len == in_f->ilf_asize);
3150
3151 switch (in_f->ilf_fields & XFS_ILOG_AFORK) {
3152 case XFS_ILOG_ADATA:
3153 case XFS_ILOG_AEXT:
3154 dest = XFS_DFORK_APTR(dip);
3155 ASSERT(len <= XFS_DFORK_ASIZE(dip, mp));
3156 memcpy(dest, src, len);
3157 break;
3158
3159 case XFS_ILOG_ABROOT:
3160 dest = XFS_DFORK_APTR(dip);
3161 xfs_bmbt_to_bmdr(mp, (struct xfs_btree_block *)src,
3162 len, (xfs_bmdr_block_t*)dest,
3163 XFS_DFORK_ASIZE(dip, mp));
3164 break;
3165
3166 default:
3167 xfs_warn(log->l_mp, "%s: Invalid flag", __func__);
3168 ASSERT(0);
3169 error = -EIO;
3170 goto out_release;
3171 }
3172 }
3173
3174out_owner_change:
3175 /* Recover the swapext owner change unless inode has been deleted */
3176 if ((in_f->ilf_fields & (XFS_ILOG_DOWNER|XFS_ILOG_AOWNER)) &&
3177 (dip->di_mode != 0))
3178 error = xfs_recover_inode_owner_change(mp, dip, in_f,
3179 buffer_list);
3180 /* re-generate the checksum. */
3181 xfs_dinode_calc_crc(log->l_mp, dip);
3182
3183 ASSERT(bp->b_mount == mp);
3184 bp->b_iodone = xlog_recover_iodone;
3185 xfs_buf_delwri_queue(bp, buffer_list);
3186
3187out_release:
3188 xfs_buf_relse(bp);
3189error:
3190 if (need_free)
3191 kmem_free(in_f);
3192 return error;
3193}
3194
3195/*
3196 * Recover QUOTAOFF records. We simply make a note of it in the xlog
3197 * structure, so that we know not to do any dquot item or dquot buffer recovery,
3198 * of that type.
3199 */
3200STATIC int
3201xlog_recover_quotaoff_pass1(
3202 struct xlog *log,
3203 struct xlog_recover_item *item)
3204{
3205 xfs_qoff_logformat_t *qoff_f = item->ri_buf[0].i_addr;
3206 ASSERT(qoff_f);
3207
3208 /*
3209 * The logitem format's flag tells us if this was user quotaoff,
3210 * group/project quotaoff or both.
3211 */
3212 if (qoff_f->qf_flags & XFS_UQUOTA_ACCT)
3213 log->l_quotaoffs_flag |= XFS_DQ_USER;
3214 if (qoff_f->qf_flags & XFS_PQUOTA_ACCT)
3215 log->l_quotaoffs_flag |= XFS_DQ_PROJ;
3216 if (qoff_f->qf_flags & XFS_GQUOTA_ACCT)
3217 log->l_quotaoffs_flag |= XFS_DQ_GROUP;
3218
3219 return 0;
3220}
3221
3222/*
3223 * Recover a dquot record
3224 */
3225STATIC int
3226xlog_recover_dquot_pass2(
3227 struct xlog *log,
3228 struct list_head *buffer_list,
3229 struct xlog_recover_item *item,
3230 xfs_lsn_t current_lsn)
3231{
3232 xfs_mount_t *mp = log->l_mp;
3233 xfs_buf_t *bp;
3234 struct xfs_disk_dquot *ddq, *recddq;
3235 xfs_failaddr_t fa;
3236 int error;
3237 xfs_dq_logformat_t *dq_f;
3238 uint type;
3239
3240
3241 /*
3242 * Filesystems are required to send in quota flags at mount time.
3243 */
3244 if (mp->m_qflags == 0)
3245 return 0;
3246
3247 recddq = item->ri_buf[1].i_addr;
3248 if (recddq == NULL) {
3249 xfs_alert(log->l_mp, "NULL dquot in %s.", __func__);
3250 return -EIO;
3251 }
3252 if (item->ri_buf[1].i_len < sizeof(xfs_disk_dquot_t)) {
3253 xfs_alert(log->l_mp, "dquot too small (%d) in %s.",
3254 item->ri_buf[1].i_len, __func__);
3255 return -EIO;
3256 }
3257
3258 /*
3259 * This type of quotas was turned off, so ignore this record.
3260 */
3261 type = recddq->d_flags & (XFS_DQ_USER | XFS_DQ_PROJ | XFS_DQ_GROUP);
3262 ASSERT(type);
3263 if (log->l_quotaoffs_flag & type)
3264 return 0;
3265
3266 /*
3267 * At this point we know that quota was _not_ turned off.
3268 * Since the mount flags are not indicating to us otherwise, this
3269 * must mean that quota is on, and the dquot needs to be replayed.
3270 * Remember that we may not have fully recovered the superblock yet,
3271 * so we can't do the usual trick of looking at the SB quota bits.
3272 *
3273 * The other possibility, of course, is that the quota subsystem was
3274 * removed since the last mount - ENOSYS.
3275 */
3276 dq_f = item->ri_buf[0].i_addr;
3277 ASSERT(dq_f);
3278 fa = xfs_dquot_verify(mp, recddq, dq_f->qlf_id, 0);
3279 if (fa) {
3280 xfs_alert(mp, "corrupt dquot ID 0x%x in log at %pS",
3281 dq_f->qlf_id, fa);
3282 return -EIO;
3283 }
3284 ASSERT(dq_f->qlf_len == 1);
3285
3286 /*
3287 * At this point we are assuming that the dquots have been allocated
3288 * and hence the buffer has valid dquots stamped in it. It should,
3289 * therefore, pass verifier validation. If the dquot is bad, then the
3290 * we'll return an error here, so we don't need to specifically check
3291 * the dquot in the buffer after the verifier has run.
3292 */
3293 error = xfs_trans_read_buf(mp, NULL, mp->m_ddev_targp, dq_f->qlf_blkno,
3294 XFS_FSB_TO_BB(mp, dq_f->qlf_len), 0, &bp,
3295 &xfs_dquot_buf_ops);
3296 if (error)
3297 return error;
3298
3299 ASSERT(bp);
3300 ddq = xfs_buf_offset(bp, dq_f->qlf_boffset);
3301
3302 /*
3303 * If the dquot has an LSN in it, recover the dquot only if it's less
3304 * than the lsn of the transaction we are replaying.
3305 */
3306 if (xfs_sb_version_hascrc(&mp->m_sb)) {
3307 struct xfs_dqblk *dqb = (struct xfs_dqblk *)ddq;
3308 xfs_lsn_t lsn = be64_to_cpu(dqb->dd_lsn);
3309
3310 if (lsn && lsn != -1 && XFS_LSN_CMP(lsn, current_lsn) >= 0) {
3311 goto out_release;
3312 }
3313 }
3314
3315 memcpy(ddq, recddq, item->ri_buf[1].i_len);
3316 if (xfs_sb_version_hascrc(&mp->m_sb)) {
3317 xfs_update_cksum((char *)ddq, sizeof(struct xfs_dqblk),
3318 XFS_DQUOT_CRC_OFF);
3319 }
3320
3321 ASSERT(dq_f->qlf_size == 2);
3322 ASSERT(bp->b_mount == mp);
3323 bp->b_iodone = xlog_recover_iodone;
3324 xfs_buf_delwri_queue(bp, buffer_list);
3325
3326out_release:
3327 xfs_buf_relse(bp);
3328 return 0;
3329}
3330
3331/*
3332 * This routine is called to create an in-core extent free intent
3333 * item from the efi format structure which was logged on disk.
3334 * It allocates an in-core efi, copies the extents from the format
3335 * structure into it, and adds the efi to the AIL with the given
3336 * LSN.
3337 */
3338STATIC int
3339xlog_recover_efi_pass2(
3340 struct xlog *log,
3341 struct xlog_recover_item *item,
3342 xfs_lsn_t lsn)
3343{
3344 int error;
3345 struct xfs_mount *mp = log->l_mp;
3346 struct xfs_efi_log_item *efip;
3347 struct xfs_efi_log_format *efi_formatp;
3348
3349 efi_formatp = item->ri_buf[0].i_addr;
3350
3351 efip = xfs_efi_init(mp, efi_formatp->efi_nextents);
3352 error = xfs_efi_copy_format(&item->ri_buf[0], &efip->efi_format);
3353 if (error) {
3354 xfs_efi_item_free(efip);
3355 return error;
3356 }
3357 atomic_set(&efip->efi_next_extent, efi_formatp->efi_nextents);
3358
3359 spin_lock(&log->l_ailp->ail_lock);
3360 /*
3361 * The EFI has two references. One for the EFD and one for EFI to ensure
3362 * it makes it into the AIL. Insert the EFI into the AIL directly and
3363 * drop the EFI reference. Note that xfs_trans_ail_update() drops the
3364 * AIL lock.
3365 */
3366 xfs_trans_ail_update(log->l_ailp, &efip->efi_item, lsn);
3367 xfs_efi_release(efip);
3368 return 0;
3369}
3370
3371
3372/*
3373 * This routine is called when an EFD format structure is found in a committed
3374 * transaction in the log. Its purpose is to cancel the corresponding EFI if it
3375 * was still in the log. To do this it searches the AIL for the EFI with an id
3376 * equal to that in the EFD format structure. If we find it we drop the EFD
3377 * reference, which removes the EFI from the AIL and frees it.
3378 */
3379STATIC int
3380xlog_recover_efd_pass2(
3381 struct xlog *log,
3382 struct xlog_recover_item *item)
3383{
3384 xfs_efd_log_format_t *efd_formatp;
3385 xfs_efi_log_item_t *efip = NULL;
3386 struct xfs_log_item *lip;
3387 uint64_t efi_id;
3388 struct xfs_ail_cursor cur;
3389 struct xfs_ail *ailp = log->l_ailp;
3390
3391 efd_formatp = item->ri_buf[0].i_addr;
3392 ASSERT((item->ri_buf[0].i_len == (sizeof(xfs_efd_log_format_32_t) +
3393 ((efd_formatp->efd_nextents - 1) * sizeof(xfs_extent_32_t)))) ||
3394 (item->ri_buf[0].i_len == (sizeof(xfs_efd_log_format_64_t) +
3395 ((efd_formatp->efd_nextents - 1) * sizeof(xfs_extent_64_t)))));
3396 efi_id = efd_formatp->efd_efi_id;
3397
3398 /*
3399 * Search for the EFI with the id in the EFD format structure in the
3400 * AIL.
3401 */
3402 spin_lock(&ailp->ail_lock);
3403 lip = xfs_trans_ail_cursor_first(ailp, &cur, 0);
3404 while (lip != NULL) {
3405 if (lip->li_type == XFS_LI_EFI) {
3406 efip = (xfs_efi_log_item_t *)lip;
3407 if (efip->efi_format.efi_id == efi_id) {
3408 /*
3409 * Drop the EFD reference to the EFI. This
3410 * removes the EFI from the AIL and frees it.
3411 */
3412 spin_unlock(&ailp->ail_lock);
3413 xfs_efi_release(efip);
3414 spin_lock(&ailp->ail_lock);
3415 break;
3416 }
3417 }
3418 lip = xfs_trans_ail_cursor_next(ailp, &cur);
3419 }
3420
3421 xfs_trans_ail_cursor_done(&cur);
3422 spin_unlock(&ailp->ail_lock);
3423
3424 return 0;
3425}
3426
3427/*
3428 * This routine is called to create an in-core extent rmap update
3429 * item from the rui format structure which was logged on disk.
3430 * It allocates an in-core rui, copies the extents from the format
3431 * structure into it, and adds the rui to the AIL with the given
3432 * LSN.
3433 */
3434STATIC int
3435xlog_recover_rui_pass2(
3436 struct xlog *log,
3437 struct xlog_recover_item *item,
3438 xfs_lsn_t lsn)
3439{
3440 int error;
3441 struct xfs_mount *mp = log->l_mp;
3442 struct xfs_rui_log_item *ruip;
3443 struct xfs_rui_log_format *rui_formatp;
3444
3445 rui_formatp = item->ri_buf[0].i_addr;
3446
3447 ruip = xfs_rui_init(mp, rui_formatp->rui_nextents);
3448 error = xfs_rui_copy_format(&item->ri_buf[0], &ruip->rui_format);
3449 if (error) {
3450 xfs_rui_item_free(ruip);
3451 return error;
3452 }
3453 atomic_set(&ruip->rui_next_extent, rui_formatp->rui_nextents);
3454
3455 spin_lock(&log->l_ailp->ail_lock);
3456 /*
3457 * The RUI has two references. One for the RUD and one for RUI to ensure
3458 * it makes it into the AIL. Insert the RUI into the AIL directly and
3459 * drop the RUI reference. Note that xfs_trans_ail_update() drops the
3460 * AIL lock.
3461 */
3462 xfs_trans_ail_update(log->l_ailp, &ruip->rui_item, lsn);
3463 xfs_rui_release(ruip);
3464 return 0;
3465}
3466
3467
3468/*
3469 * This routine is called when an RUD format structure is found in a committed
3470 * transaction in the log. Its purpose is to cancel the corresponding RUI if it
3471 * was still in the log. To do this it searches the AIL for the RUI with an id
3472 * equal to that in the RUD format structure. If we find it we drop the RUD
3473 * reference, which removes the RUI from the AIL and frees it.
3474 */
3475STATIC int
3476xlog_recover_rud_pass2(
3477 struct xlog *log,
3478 struct xlog_recover_item *item)
3479{
3480 struct xfs_rud_log_format *rud_formatp;
3481 struct xfs_rui_log_item *ruip = NULL;
3482 struct xfs_log_item *lip;
3483 uint64_t rui_id;
3484 struct xfs_ail_cursor cur;
3485 struct xfs_ail *ailp = log->l_ailp;
3486
3487 rud_formatp = item->ri_buf[0].i_addr;
3488 ASSERT(item->ri_buf[0].i_len == sizeof(struct xfs_rud_log_format));
3489 rui_id = rud_formatp->rud_rui_id;
3490
3491 /*
3492 * Search for the RUI with the id in the RUD format structure in the
3493 * AIL.
3494 */
3495 spin_lock(&ailp->ail_lock);
3496 lip = xfs_trans_ail_cursor_first(ailp, &cur, 0);
3497 while (lip != NULL) {
3498 if (lip->li_type == XFS_LI_RUI) {
3499 ruip = (struct xfs_rui_log_item *)lip;
3500 if (ruip->rui_format.rui_id == rui_id) {
3501 /*
3502 * Drop the RUD reference to the RUI. This
3503 * removes the RUI from the AIL and frees it.
3504 */
3505 spin_unlock(&ailp->ail_lock);
3506 xfs_rui_release(ruip);
3507 spin_lock(&ailp->ail_lock);
3508 break;
3509 }
3510 }
3511 lip = xfs_trans_ail_cursor_next(ailp, &cur);
3512 }
3513
3514 xfs_trans_ail_cursor_done(&cur);
3515 spin_unlock(&ailp->ail_lock);
3516
3517 return 0;
3518}
3519
3520/*
3521 * Copy an CUI format buffer from the given buf, and into the destination
3522 * CUI format structure. The CUI/CUD items were designed not to need any
3523 * special alignment handling.
3524 */
3525static int
3526xfs_cui_copy_format(
3527 struct xfs_log_iovec *buf,
3528 struct xfs_cui_log_format *dst_cui_fmt)
3529{
3530 struct xfs_cui_log_format *src_cui_fmt;
3531 uint len;
3532
3533 src_cui_fmt = buf->i_addr;
3534 len = xfs_cui_log_format_sizeof(src_cui_fmt->cui_nextents);
3535
3536 if (buf->i_len == len) {
3537 memcpy(dst_cui_fmt, src_cui_fmt, len);
3538 return 0;
3539 }
3540 return -EFSCORRUPTED;
3541}
3542
3543/*
3544 * This routine is called to create an in-core extent refcount update
3545 * item from the cui format structure which was logged on disk.
3546 * It allocates an in-core cui, copies the extents from the format
3547 * structure into it, and adds the cui to the AIL with the given
3548 * LSN.
3549 */
3550STATIC int
3551xlog_recover_cui_pass2(
3552 struct xlog *log,
3553 struct xlog_recover_item *item,
3554 xfs_lsn_t lsn)
3555{
3556 int error;
3557 struct xfs_mount *mp = log->l_mp;
3558 struct xfs_cui_log_item *cuip;
3559 struct xfs_cui_log_format *cui_formatp;
3560
3561 cui_formatp = item->ri_buf[0].i_addr;
3562
3563 cuip = xfs_cui_init(mp, cui_formatp->cui_nextents);
3564 error = xfs_cui_copy_format(&item->ri_buf[0], &cuip->cui_format);
3565 if (error) {
3566 xfs_cui_item_free(cuip);
3567 return error;
3568 }
3569 atomic_set(&cuip->cui_next_extent, cui_formatp->cui_nextents);
3570
3571 spin_lock(&log->l_ailp->ail_lock);
3572 /*
3573 * The CUI has two references. One for the CUD and one for CUI to ensure
3574 * it makes it into the AIL. Insert the CUI into the AIL directly and
3575 * drop the CUI reference. Note that xfs_trans_ail_update() drops the
3576 * AIL lock.
3577 */
3578 xfs_trans_ail_update(log->l_ailp, &cuip->cui_item, lsn);
3579 xfs_cui_release(cuip);
3580 return 0;
3581}
3582
3583
3584/*
3585 * This routine is called when an CUD format structure is found in a committed
3586 * transaction in the log. Its purpose is to cancel the corresponding CUI if it
3587 * was still in the log. To do this it searches the AIL for the CUI with an id
3588 * equal to that in the CUD format structure. If we find it we drop the CUD
3589 * reference, which removes the CUI from the AIL and frees it.
3590 */
3591STATIC int
3592xlog_recover_cud_pass2(
3593 struct xlog *log,
3594 struct xlog_recover_item *item)
3595{
3596 struct xfs_cud_log_format *cud_formatp;
3597 struct xfs_cui_log_item *cuip = NULL;
3598 struct xfs_log_item *lip;
3599 uint64_t cui_id;
3600 struct xfs_ail_cursor cur;
3601 struct xfs_ail *ailp = log->l_ailp;
3602
3603 cud_formatp = item->ri_buf[0].i_addr;
3604 if (item->ri_buf[0].i_len != sizeof(struct xfs_cud_log_format))
3605 return -EFSCORRUPTED;
3606 cui_id = cud_formatp->cud_cui_id;
3607
3608 /*
3609 * Search for the CUI with the id in the CUD format structure in the
3610 * AIL.
3611 */
3612 spin_lock(&ailp->ail_lock);
3613 lip = xfs_trans_ail_cursor_first(ailp, &cur, 0);
3614 while (lip != NULL) {
3615 if (lip->li_type == XFS_LI_CUI) {
3616 cuip = (struct xfs_cui_log_item *)lip;
3617 if (cuip->cui_format.cui_id == cui_id) {
3618 /*
3619 * Drop the CUD reference to the CUI. This
3620 * removes the CUI from the AIL and frees it.
3621 */
3622 spin_unlock(&ailp->ail_lock);
3623 xfs_cui_release(cuip);
3624 spin_lock(&ailp->ail_lock);
3625 break;
3626 }
3627 }
3628 lip = xfs_trans_ail_cursor_next(ailp, &cur);
3629 }
3630
3631 xfs_trans_ail_cursor_done(&cur);
3632 spin_unlock(&ailp->ail_lock);
3633
3634 return 0;
3635}
3636
3637/*
3638 * Copy an BUI format buffer from the given buf, and into the destination
3639 * BUI format structure. The BUI/BUD items were designed not to need any
3640 * special alignment handling.
3641 */
3642static int
3643xfs_bui_copy_format(
3644 struct xfs_log_iovec *buf,
3645 struct xfs_bui_log_format *dst_bui_fmt)
3646{
3647 struct xfs_bui_log_format *src_bui_fmt;
3648 uint len;
3649
3650 src_bui_fmt = buf->i_addr;
3651 len = xfs_bui_log_format_sizeof(src_bui_fmt->bui_nextents);
3652
3653 if (buf->i_len == len) {
3654 memcpy(dst_bui_fmt, src_bui_fmt, len);
3655 return 0;
3656 }
3657 return -EFSCORRUPTED;
3658}
3659
3660/*
3661 * This routine is called to create an in-core extent bmap update
3662 * item from the bui format structure which was logged on disk.
3663 * It allocates an in-core bui, copies the extents from the format
3664 * structure into it, and adds the bui to the AIL with the given
3665 * LSN.
3666 */
3667STATIC int
3668xlog_recover_bui_pass2(
3669 struct xlog *log,
3670 struct xlog_recover_item *item,
3671 xfs_lsn_t lsn)
3672{
3673 int error;
3674 struct xfs_mount *mp = log->l_mp;
3675 struct xfs_bui_log_item *buip;
3676 struct xfs_bui_log_format *bui_formatp;
3677
3678 bui_formatp = item->ri_buf[0].i_addr;
3679
3680 if (bui_formatp->bui_nextents != XFS_BUI_MAX_FAST_EXTENTS)
3681 return -EFSCORRUPTED;
3682 buip = xfs_bui_init(mp);
3683 error = xfs_bui_copy_format(&item->ri_buf[0], &buip->bui_format);
3684 if (error) {
3685 xfs_bui_item_free(buip);
3686 return error;
3687 }
3688 atomic_set(&buip->bui_next_extent, bui_formatp->bui_nextents);
3689
3690 spin_lock(&log->l_ailp->ail_lock);
3691 /*
3692 * The RUI has two references. One for the RUD and one for RUI to ensure
3693 * it makes it into the AIL. Insert the RUI into the AIL directly and
3694 * drop the RUI reference. Note that xfs_trans_ail_update() drops the
3695 * AIL lock.
3696 */
3697 xfs_trans_ail_update(log->l_ailp, &buip->bui_item, lsn);
3698 xfs_bui_release(buip);
3699 return 0;
3700}
3701
3702
3703/*
3704 * This routine is called when an BUD format structure is found in a committed
3705 * transaction in the log. Its purpose is to cancel the corresponding BUI if it
3706 * was still in the log. To do this it searches the AIL for the BUI with an id
3707 * equal to that in the BUD format structure. If we find it we drop the BUD
3708 * reference, which removes the BUI from the AIL and frees it.
3709 */
3710STATIC int
3711xlog_recover_bud_pass2(
3712 struct xlog *log,
3713 struct xlog_recover_item *item)
3714{
3715 struct xfs_bud_log_format *bud_formatp;
3716 struct xfs_bui_log_item *buip = NULL;
3717 struct xfs_log_item *lip;
3718 uint64_t bui_id;
3719 struct xfs_ail_cursor cur;
3720 struct xfs_ail *ailp = log->l_ailp;
3721
3722 bud_formatp = item->ri_buf[0].i_addr;
3723 if (item->ri_buf[0].i_len != sizeof(struct xfs_bud_log_format))
3724 return -EFSCORRUPTED;
3725 bui_id = bud_formatp->bud_bui_id;
3726
3727 /*
3728 * Search for the BUI with the id in the BUD format structure in the
3729 * AIL.
3730 */
3731 spin_lock(&ailp->ail_lock);
3732 lip = xfs_trans_ail_cursor_first(ailp, &cur, 0);
3733 while (lip != NULL) {
3734 if (lip->li_type == XFS_LI_BUI) {
3735 buip = (struct xfs_bui_log_item *)lip;
3736 if (buip->bui_format.bui_id == bui_id) {
3737 /*
3738 * Drop the BUD reference to the BUI. This
3739 * removes the BUI from the AIL and frees it.
3740 */
3741 spin_unlock(&ailp->ail_lock);
3742 xfs_bui_release(buip);
3743 spin_lock(&ailp->ail_lock);
3744 break;
3745 }
3746 }
3747 lip = xfs_trans_ail_cursor_next(ailp, &cur);
3748 }
3749
3750 xfs_trans_ail_cursor_done(&cur);
3751 spin_unlock(&ailp->ail_lock);
3752
3753 return 0;
3754}
3755
3756/*
3757 * This routine is called when an inode create format structure is found in a
3758 * committed transaction in the log. It's purpose is to initialise the inodes
3759 * being allocated on disk. This requires us to get inode cluster buffers that
3760 * match the range to be initialised, stamped with inode templates and written
3761 * by delayed write so that subsequent modifications will hit the cached buffer
3762 * and only need writing out at the end of recovery.
3763 */
3764STATIC int
3765xlog_recover_do_icreate_pass2(
3766 struct xlog *log,
3767 struct list_head *buffer_list,
3768 xlog_recover_item_t *item)
3769{
3770 struct xfs_mount *mp = log->l_mp;
3771 struct xfs_icreate_log *icl;
3772 struct xfs_ino_geometry *igeo = M_IGEO(mp);
3773 xfs_agnumber_t agno;
3774 xfs_agblock_t agbno;
3775 unsigned int count;
3776 unsigned int isize;
3777 xfs_agblock_t length;
3778 int bb_per_cluster;
3779 int cancel_count;
3780 int nbufs;
3781 int i;
3782
3783 icl = (struct xfs_icreate_log *)item->ri_buf[0].i_addr;
3784 if (icl->icl_type != XFS_LI_ICREATE) {
3785 xfs_warn(log->l_mp, "xlog_recover_do_icreate_trans: bad type");
3786 return -EINVAL;
3787 }
3788
3789 if (icl->icl_size != 1) {
3790 xfs_warn(log->l_mp, "xlog_recover_do_icreate_trans: bad icl size");
3791 return -EINVAL;
3792 }
3793
3794 agno = be32_to_cpu(icl->icl_ag);
3795 if (agno >= mp->m_sb.sb_agcount) {
3796 xfs_warn(log->l_mp, "xlog_recover_do_icreate_trans: bad agno");
3797 return -EINVAL;
3798 }
3799 agbno = be32_to_cpu(icl->icl_agbno);
3800 if (!agbno || agbno == NULLAGBLOCK || agbno >= mp->m_sb.sb_agblocks) {
3801 xfs_warn(log->l_mp, "xlog_recover_do_icreate_trans: bad agbno");
3802 return -EINVAL;
3803 }
3804 isize = be32_to_cpu(icl->icl_isize);
3805 if (isize != mp->m_sb.sb_inodesize) {
3806 xfs_warn(log->l_mp, "xlog_recover_do_icreate_trans: bad isize");
3807 return -EINVAL;
3808 }
3809 count = be32_to_cpu(icl->icl_count);
3810 if (!count) {
3811 xfs_warn(log->l_mp, "xlog_recover_do_icreate_trans: bad count");
3812 return -EINVAL;
3813 }
3814 length = be32_to_cpu(icl->icl_length);
3815 if (!length || length >= mp->m_sb.sb_agblocks) {
3816 xfs_warn(log->l_mp, "xlog_recover_do_icreate_trans: bad length");
3817 return -EINVAL;
3818 }
3819
3820 /*
3821 * The inode chunk is either full or sparse and we only support
3822 * m_ino_geo.ialloc_min_blks sized sparse allocations at this time.
3823 */
3824 if (length != igeo->ialloc_blks &&
3825 length != igeo->ialloc_min_blks) {
3826 xfs_warn(log->l_mp,
3827 "%s: unsupported chunk length", __FUNCTION__);
3828 return -EINVAL;
3829 }
3830
3831 /* verify inode count is consistent with extent length */
3832 if ((count >> mp->m_sb.sb_inopblog) != length) {
3833 xfs_warn(log->l_mp,
3834 "%s: inconsistent inode count and chunk length",
3835 __FUNCTION__);
3836 return -EINVAL;
3837 }
3838
3839 /*
3840 * The icreate transaction can cover multiple cluster buffers and these
3841 * buffers could have been freed and reused. Check the individual
3842 * buffers for cancellation so we don't overwrite anything written after
3843 * a cancellation.
3844 */
3845 bb_per_cluster = XFS_FSB_TO_BB(mp, igeo->blocks_per_cluster);
3846 nbufs = length / igeo->blocks_per_cluster;
3847 for (i = 0, cancel_count = 0; i < nbufs; i++) {
3848 xfs_daddr_t daddr;
3849
3850 daddr = XFS_AGB_TO_DADDR(mp, agno,
3851 agbno + i * igeo->blocks_per_cluster);
3852 if (xlog_check_buffer_cancelled(log, daddr, bb_per_cluster, 0))
3853 cancel_count++;
3854 }
3855
3856 /*
3857 * We currently only use icreate for a single allocation at a time. This
3858 * means we should expect either all or none of the buffers to be
3859 * cancelled. Be conservative and skip replay if at least one buffer is
3860 * cancelled, but warn the user that something is awry if the buffers
3861 * are not consistent.
3862 *
3863 * XXX: This must be refined to only skip cancelled clusters once we use
3864 * icreate for multiple chunk allocations.
3865 */
3866 ASSERT(!cancel_count || cancel_count == nbufs);
3867 if (cancel_count) {
3868 if (cancel_count != nbufs)
3869 xfs_warn(mp,
3870 "WARNING: partial inode chunk cancellation, skipped icreate.");
3871 trace_xfs_log_recover_icreate_cancel(log, icl);
3872 return 0;
3873 }
3874
3875 trace_xfs_log_recover_icreate_recover(log, icl);
3876 return xfs_ialloc_inode_init(mp, NULL, buffer_list, count, agno, agbno,
3877 length, be32_to_cpu(icl->icl_gen));
3878}
3879
3880STATIC void
3881xlog_recover_buffer_ra_pass2(
3882 struct xlog *log,
3883 struct xlog_recover_item *item)
3884{
3885 struct xfs_buf_log_format *buf_f = item->ri_buf[0].i_addr;
3886 struct xfs_mount *mp = log->l_mp;
3887
3888 if (xlog_peek_buffer_cancelled(log, buf_f->blf_blkno,
3889 buf_f->blf_len, buf_f->blf_flags)) {
3890 return;
3891 }
3892
3893 xfs_buf_readahead(mp->m_ddev_targp, buf_f->blf_blkno,
3894 buf_f->blf_len, NULL);
3895}
3896
3897STATIC void
3898xlog_recover_inode_ra_pass2(
3899 struct xlog *log,
3900 struct xlog_recover_item *item)
3901{
3902 struct xfs_inode_log_format ilf_buf;
3903 struct xfs_inode_log_format *ilfp;
3904 struct xfs_mount *mp = log->l_mp;
3905 int error;
3906
3907 if (item->ri_buf[0].i_len == sizeof(struct xfs_inode_log_format)) {
3908 ilfp = item->ri_buf[0].i_addr;
3909 } else {
3910 ilfp = &ilf_buf;
3911 memset(ilfp, 0, sizeof(*ilfp));
3912 error = xfs_inode_item_format_convert(&item->ri_buf[0], ilfp);
3913 if (error)
3914 return;
3915 }
3916
3917 if (xlog_peek_buffer_cancelled(log, ilfp->ilf_blkno, ilfp->ilf_len, 0))
3918 return;
3919
3920 xfs_buf_readahead(mp->m_ddev_targp, ilfp->ilf_blkno,
3921 ilfp->ilf_len, &xfs_inode_buf_ra_ops);
3922}
3923
3924STATIC void
3925xlog_recover_dquot_ra_pass2(
3926 struct xlog *log,
3927 struct xlog_recover_item *item)
3928{
3929 struct xfs_mount *mp = log->l_mp;
3930 struct xfs_disk_dquot *recddq;
3931 struct xfs_dq_logformat *dq_f;
3932 uint type;
3933 int len;
3934
3935
3936 if (mp->m_qflags == 0)
3937 return;
3938
3939 recddq = item->ri_buf[1].i_addr;
3940 if (recddq == NULL)
3941 return;
3942 if (item->ri_buf[1].i_len < sizeof(struct xfs_disk_dquot))
3943 return;
3944
3945 type = recddq->d_flags & (XFS_DQ_USER | XFS_DQ_PROJ | XFS_DQ_GROUP);
3946 ASSERT(type);
3947 if (log->l_quotaoffs_flag & type)
3948 return;
3949
3950 dq_f = item->ri_buf[0].i_addr;
3951 ASSERT(dq_f);
3952 ASSERT(dq_f->qlf_len == 1);
3953
3954 len = XFS_FSB_TO_BB(mp, dq_f->qlf_len);
3955 if (xlog_peek_buffer_cancelled(log, dq_f->qlf_blkno, len, 0))
3956 return;
3957
3958 xfs_buf_readahead(mp->m_ddev_targp, dq_f->qlf_blkno, len,
3959 &xfs_dquot_buf_ra_ops);
3960}
3961
3962STATIC void
3963xlog_recover_ra_pass2(
3964 struct xlog *log,
3965 struct xlog_recover_item *item)
3966{
3967 switch (ITEM_TYPE(item)) {
3968 case XFS_LI_BUF:
3969 xlog_recover_buffer_ra_pass2(log, item);
3970 break;
3971 case XFS_LI_INODE:
3972 xlog_recover_inode_ra_pass2(log, item);
3973 break;
3974 case XFS_LI_DQUOT:
3975 xlog_recover_dquot_ra_pass2(log, item);
3976 break;
3977 case XFS_LI_EFI:
3978 case XFS_LI_EFD:
3979 case XFS_LI_QUOTAOFF:
3980 case XFS_LI_RUI:
3981 case XFS_LI_RUD:
3982 case XFS_LI_CUI:
3983 case XFS_LI_CUD:
3984 case XFS_LI_BUI:
3985 case XFS_LI_BUD:
3986 default:
3987 break;
3988 }
3989}
3990
3991STATIC int
3992xlog_recover_commit_pass1(
3993 struct xlog *log,
3994 struct xlog_recover *trans,
3995 struct xlog_recover_item *item)
3996{
3997 trace_xfs_log_recover_item_recover(log, trans, item, XLOG_RECOVER_PASS1);
3998
3999 switch (ITEM_TYPE(item)) {
4000 case XFS_LI_BUF:
4001 return xlog_recover_buffer_pass1(log, item);
4002 case XFS_LI_QUOTAOFF:
4003 return xlog_recover_quotaoff_pass1(log, item);
4004 case XFS_LI_INODE:
4005 case XFS_LI_EFI:
4006 case XFS_LI_EFD:
4007 case XFS_LI_DQUOT:
4008 case XFS_LI_ICREATE:
4009 case XFS_LI_RUI:
4010 case XFS_LI_RUD:
4011 case XFS_LI_CUI:
4012 case XFS_LI_CUD:
4013 case XFS_LI_BUI:
4014 case XFS_LI_BUD:
4015 /* nothing to do in pass 1 */
4016 return 0;
4017 default:
4018 xfs_warn(log->l_mp, "%s: invalid item type (%d)",
4019 __func__, ITEM_TYPE(item));
4020 ASSERT(0);
4021 return -EIO;
4022 }
4023}
4024
4025STATIC int
4026xlog_recover_commit_pass2(
4027 struct xlog *log,
4028 struct xlog_recover *trans,
4029 struct list_head *buffer_list,
4030 struct xlog_recover_item *item)
4031{
4032 trace_xfs_log_recover_item_recover(log, trans, item, XLOG_RECOVER_PASS2);
4033
4034 switch (ITEM_TYPE(item)) {
4035 case XFS_LI_BUF:
4036 return xlog_recover_buffer_pass2(log, buffer_list, item,
4037 trans->r_lsn);
4038 case XFS_LI_INODE:
4039 return xlog_recover_inode_pass2(log, buffer_list, item,
4040 trans->r_lsn);
4041 case XFS_LI_EFI:
4042 return xlog_recover_efi_pass2(log, item, trans->r_lsn);
4043 case XFS_LI_EFD:
4044 return xlog_recover_efd_pass2(log, item);
4045 case XFS_LI_RUI:
4046 return xlog_recover_rui_pass2(log, item, trans->r_lsn);
4047 case XFS_LI_RUD:
4048 return xlog_recover_rud_pass2(log, item);
4049 case XFS_LI_CUI:
4050 return xlog_recover_cui_pass2(log, item, trans->r_lsn);
4051 case XFS_LI_CUD:
4052 return xlog_recover_cud_pass2(log, item);
4053 case XFS_LI_BUI:
4054 return xlog_recover_bui_pass2(log, item, trans->r_lsn);
4055 case XFS_LI_BUD:
4056 return xlog_recover_bud_pass2(log, item);
4057 case XFS_LI_DQUOT:
4058 return xlog_recover_dquot_pass2(log, buffer_list, item,
4059 trans->r_lsn);
4060 case XFS_LI_ICREATE:
4061 return xlog_recover_do_icreate_pass2(log, buffer_list, item);
4062 case XFS_LI_QUOTAOFF:
4063 /* nothing to do in pass2 */
4064 return 0;
4065 default:
4066 xfs_warn(log->l_mp, "%s: invalid item type (%d)",
4067 __func__, ITEM_TYPE(item));
4068 ASSERT(0);
4069 return -EIO;
4070 }
4071}
4072
4073STATIC int
4074xlog_recover_items_pass2(
4075 struct xlog *log,
4076 struct xlog_recover *trans,
4077 struct list_head *buffer_list,
4078 struct list_head *item_list)
4079{
4080 struct xlog_recover_item *item;
4081 int error = 0;
4082
4083 list_for_each_entry(item, item_list, ri_list) {
4084 error = xlog_recover_commit_pass2(log, trans,
4085 buffer_list, item);
4086 if (error)
4087 return error;
4088 }
4089
4090 return error;
4091}
4092
4093/*
4094 * Perform the transaction.
4095 *
4096 * If the transaction modifies a buffer or inode, do it now. Otherwise,
4097 * EFIs and EFDs get queued up by adding entries into the AIL for them.
4098 */
4099STATIC int
4100xlog_recover_commit_trans(
4101 struct xlog *log,
4102 struct xlog_recover *trans,
4103 int pass,
4104 struct list_head *buffer_list)
4105{
4106 int error = 0;
4107 int items_queued = 0;
4108 struct xlog_recover_item *item;
4109 struct xlog_recover_item *next;
4110 LIST_HEAD (ra_list);
4111 LIST_HEAD (done_list);
4112
4113 #define XLOG_RECOVER_COMMIT_QUEUE_MAX 100
4114
4115 hlist_del_init(&trans->r_list);
4116
4117 error = xlog_recover_reorder_trans(log, trans, pass);
4118 if (error)
4119 return error;
4120
4121 list_for_each_entry_safe(item, next, &trans->r_itemq, ri_list) {
4122 switch (pass) {
4123 case XLOG_RECOVER_PASS1:
4124 error = xlog_recover_commit_pass1(log, trans, item);
4125 break;
4126 case XLOG_RECOVER_PASS2:
4127 xlog_recover_ra_pass2(log, item);
4128 list_move_tail(&item->ri_list, &ra_list);
4129 items_queued++;
4130 if (items_queued >= XLOG_RECOVER_COMMIT_QUEUE_MAX) {
4131 error = xlog_recover_items_pass2(log, trans,
4132 buffer_list, &ra_list);
4133 list_splice_tail_init(&ra_list, &done_list);
4134 items_queued = 0;
4135 }
4136
4137 break;
4138 default:
4139 ASSERT(0);
4140 }
4141
4142 if (error)
4143 goto out;
4144 }
4145
4146out:
4147 if (!list_empty(&ra_list)) {
4148 if (!error)
4149 error = xlog_recover_items_pass2(log, trans,
4150 buffer_list, &ra_list);
4151 list_splice_tail_init(&ra_list, &done_list);
4152 }
4153
4154 if (!list_empty(&done_list))
4155 list_splice_init(&done_list, &trans->r_itemq);
4156
4157 return error;
4158}
4159
4160STATIC void
4161xlog_recover_add_item(
4162 struct list_head *head)
4163{
4164 xlog_recover_item_t *item;
4165
4166 item = kmem_zalloc(sizeof(xlog_recover_item_t), 0);
4167 INIT_LIST_HEAD(&item->ri_list);
4168 list_add_tail(&item->ri_list, head);
4169}
4170
4171STATIC int
4172xlog_recover_add_to_cont_trans(
4173 struct xlog *log,
4174 struct xlog_recover *trans,
4175 char *dp,
4176 int len)
4177{
4178 xlog_recover_item_t *item;
4179 char *ptr, *old_ptr;
4180 int old_len;
4181
4182 /*
4183 * If the transaction is empty, the header was split across this and the
4184 * previous record. Copy the rest of the header.
4185 */
4186 if (list_empty(&trans->r_itemq)) {
4187 ASSERT(len <= sizeof(struct xfs_trans_header));
4188 if (len > sizeof(struct xfs_trans_header)) {
4189 xfs_warn(log->l_mp, "%s: bad header length", __func__);
4190 return -EIO;
4191 }
4192
4193 xlog_recover_add_item(&trans->r_itemq);
4194 ptr = (char *)&trans->r_theader +
4195 sizeof(struct xfs_trans_header) - len;
4196 memcpy(ptr, dp, len);
4197 return 0;
4198 }
4199
4200 /* take the tail entry */
4201 item = list_entry(trans->r_itemq.prev, xlog_recover_item_t, ri_list);
4202
4203 old_ptr = item->ri_buf[item->ri_cnt-1].i_addr;
4204 old_len = item->ri_buf[item->ri_cnt-1].i_len;
4205
4206 ptr = kmem_realloc(old_ptr, len + old_len, 0);
4207 memcpy(&ptr[old_len], dp, len);
4208 item->ri_buf[item->ri_cnt-1].i_len += len;
4209 item->ri_buf[item->ri_cnt-1].i_addr = ptr;
4210 trace_xfs_log_recover_item_add_cont(log, trans, item, 0);
4211 return 0;
4212}
4213
4214/*
4215 * The next region to add is the start of a new region. It could be
4216 * a whole region or it could be the first part of a new region. Because
4217 * of this, the assumption here is that the type and size fields of all
4218 * format structures fit into the first 32 bits of the structure.
4219 *
4220 * This works because all regions must be 32 bit aligned. Therefore, we
4221 * either have both fields or we have neither field. In the case we have
4222 * neither field, the data part of the region is zero length. We only have
4223 * a log_op_header and can throw away the header since a new one will appear
4224 * later. If we have at least 4 bytes, then we can determine how many regions
4225 * will appear in the current log item.
4226 */
4227STATIC int
4228xlog_recover_add_to_trans(
4229 struct xlog *log,
4230 struct xlog_recover *trans,
4231 char *dp,
4232 int len)
4233{
4234 struct xfs_inode_log_format *in_f; /* any will do */
4235 xlog_recover_item_t *item;
4236 char *ptr;
4237
4238 if (!len)
4239 return 0;
4240 if (list_empty(&trans->r_itemq)) {
4241 /* we need to catch log corruptions here */
4242 if (*(uint *)dp != XFS_TRANS_HEADER_MAGIC) {
4243 xfs_warn(log->l_mp, "%s: bad header magic number",
4244 __func__);
4245 ASSERT(0);
4246 return -EIO;
4247 }
4248
4249 if (len > sizeof(struct xfs_trans_header)) {
4250 xfs_warn(log->l_mp, "%s: bad header length", __func__);
4251 ASSERT(0);
4252 return -EIO;
4253 }
4254
4255 /*
4256 * The transaction header can be arbitrarily split across op
4257 * records. If we don't have the whole thing here, copy what we
4258 * do have and handle the rest in the next record.
4259 */
4260 if (len == sizeof(struct xfs_trans_header))
4261 xlog_recover_add_item(&trans->r_itemq);
4262 memcpy(&trans->r_theader, dp, len);
4263 return 0;
4264 }
4265
4266 ptr = kmem_alloc(len, 0);
4267 memcpy(ptr, dp, len);
4268 in_f = (struct xfs_inode_log_format *)ptr;
4269
4270 /* take the tail entry */
4271 item = list_entry(trans->r_itemq.prev, xlog_recover_item_t, ri_list);
4272 if (item->ri_total != 0 &&
4273 item->ri_total == item->ri_cnt) {
4274 /* tail item is in use, get a new one */
4275 xlog_recover_add_item(&trans->r_itemq);
4276 item = list_entry(trans->r_itemq.prev,
4277 xlog_recover_item_t, ri_list);
4278 }
4279
4280 if (item->ri_total == 0) { /* first region to be added */
4281 if (in_f->ilf_size == 0 ||
4282 in_f->ilf_size > XLOG_MAX_REGIONS_IN_ITEM) {
4283 xfs_warn(log->l_mp,
4284 "bad number of regions (%d) in inode log format",
4285 in_f->ilf_size);
4286 ASSERT(0);
4287 kmem_free(ptr);
4288 return -EIO;
4289 }
4290
4291 item->ri_total = in_f->ilf_size;
4292 item->ri_buf =
4293 kmem_zalloc(item->ri_total * sizeof(xfs_log_iovec_t),
4294 0);
4295 }
4296 ASSERT(item->ri_total > item->ri_cnt);
4297 /* Description region is ri_buf[0] */
4298 item->ri_buf[item->ri_cnt].i_addr = ptr;
4299 item->ri_buf[item->ri_cnt].i_len = len;
4300 item->ri_cnt++;
4301 trace_xfs_log_recover_item_add(log, trans, item, 0);
4302 return 0;
4303}
4304
4305/*
4306 * Free up any resources allocated by the transaction
4307 *
4308 * Remember that EFIs, EFDs, and IUNLINKs are handled later.
4309 */
4310STATIC void
4311xlog_recover_free_trans(
4312 struct xlog_recover *trans)
4313{
4314 xlog_recover_item_t *item, *n;
4315 int i;
4316
4317 hlist_del_init(&trans->r_list);
4318
4319 list_for_each_entry_safe(item, n, &trans->r_itemq, ri_list) {
4320 /* Free the regions in the item. */
4321 list_del(&item->ri_list);
4322 for (i = 0; i < item->ri_cnt; i++)
4323 kmem_free(item->ri_buf[i].i_addr);
4324 /* Free the item itself */
4325 kmem_free(item->ri_buf);
4326 kmem_free(item);
4327 }
4328 /* Free the transaction recover structure */
4329 kmem_free(trans);
4330}
4331
4332/*
4333 * On error or completion, trans is freed.
4334 */
4335STATIC int
4336xlog_recovery_process_trans(
4337 struct xlog *log,
4338 struct xlog_recover *trans,
4339 char *dp,
4340 unsigned int len,
4341 unsigned int flags,
4342 int pass,
4343 struct list_head *buffer_list)
4344{
4345 int error = 0;
4346 bool freeit = false;
4347
4348 /* mask off ophdr transaction container flags */
4349 flags &= ~XLOG_END_TRANS;
4350 if (flags & XLOG_WAS_CONT_TRANS)
4351 flags &= ~XLOG_CONTINUE_TRANS;
4352
4353 /*
4354 * Callees must not free the trans structure. We'll decide if we need to
4355 * free it or not based on the operation being done and it's result.
4356 */
4357 switch (flags) {
4358 /* expected flag values */
4359 case 0:
4360 case XLOG_CONTINUE_TRANS:
4361 error = xlog_recover_add_to_trans(log, trans, dp, len);
4362 break;
4363 case XLOG_WAS_CONT_TRANS:
4364 error = xlog_recover_add_to_cont_trans(log, trans, dp, len);
4365 break;
4366 case XLOG_COMMIT_TRANS:
4367 error = xlog_recover_commit_trans(log, trans, pass,
4368 buffer_list);
4369 /* success or fail, we are now done with this transaction. */
4370 freeit = true;
4371 break;
4372
4373 /* unexpected flag values */
4374 case XLOG_UNMOUNT_TRANS:
4375 /* just skip trans */
4376 xfs_warn(log->l_mp, "%s: Unmount LR", __func__);
4377 freeit = true;
4378 break;
4379 case XLOG_START_TRANS:
4380 default:
4381 xfs_warn(log->l_mp, "%s: bad flag 0x%x", __func__, flags);
4382 ASSERT(0);
4383 error = -EIO;
4384 break;
4385 }
4386 if (error || freeit)
4387 xlog_recover_free_trans(trans);
4388 return error;
4389}
4390
4391/*
4392 * Lookup the transaction recovery structure associated with the ID in the
4393 * current ophdr. If the transaction doesn't exist and the start flag is set in
4394 * the ophdr, then allocate a new transaction for future ID matches to find.
4395 * Either way, return what we found during the lookup - an existing transaction
4396 * or nothing.
4397 */
4398STATIC struct xlog_recover *
4399xlog_recover_ophdr_to_trans(
4400 struct hlist_head rhash[],
4401 struct xlog_rec_header *rhead,
4402 struct xlog_op_header *ohead)
4403{
4404 struct xlog_recover *trans;
4405 xlog_tid_t tid;
4406 struct hlist_head *rhp;
4407
4408 tid = be32_to_cpu(ohead->oh_tid);
4409 rhp = &rhash[XLOG_RHASH(tid)];
4410 hlist_for_each_entry(trans, rhp, r_list) {
4411 if (trans->r_log_tid == tid)
4412 return trans;
4413 }
4414
4415 /*
4416 * skip over non-start transaction headers - we could be
4417 * processing slack space before the next transaction starts
4418 */
4419 if (!(ohead->oh_flags & XLOG_START_TRANS))
4420 return NULL;
4421
4422 ASSERT(be32_to_cpu(ohead->oh_len) == 0);
4423
4424 /*
4425 * This is a new transaction so allocate a new recovery container to
4426 * hold the recovery ops that will follow.
4427 */
4428 trans = kmem_zalloc(sizeof(struct xlog_recover), 0);
4429 trans->r_log_tid = tid;
4430 trans->r_lsn = be64_to_cpu(rhead->h_lsn);
4431 INIT_LIST_HEAD(&trans->r_itemq);
4432 INIT_HLIST_NODE(&trans->r_list);
4433 hlist_add_head(&trans->r_list, rhp);
4434
4435 /*
4436 * Nothing more to do for this ophdr. Items to be added to this new
4437 * transaction will be in subsequent ophdr containers.
4438 */
4439 return NULL;
4440}
4441
4442STATIC int
4443xlog_recover_process_ophdr(
4444 struct xlog *log,
4445 struct hlist_head rhash[],
4446 struct xlog_rec_header *rhead,
4447 struct xlog_op_header *ohead,
4448 char *dp,
4449 char *end,
4450 int pass,
4451 struct list_head *buffer_list)
4452{
4453 struct xlog_recover *trans;
4454 unsigned int len;
4455 int error;
4456
4457 /* Do we understand who wrote this op? */
4458 if (ohead->oh_clientid != XFS_TRANSACTION &&
4459 ohead->oh_clientid != XFS_LOG) {
4460 xfs_warn(log->l_mp, "%s: bad clientid 0x%x",
4461 __func__, ohead->oh_clientid);
4462 ASSERT(0);
4463 return -EIO;
4464 }
4465
4466 /*
4467 * Check the ophdr contains all the data it is supposed to contain.
4468 */
4469 len = be32_to_cpu(ohead->oh_len);
4470 if (dp + len > end) {
4471 xfs_warn(log->l_mp, "%s: bad length 0x%x", __func__, len);
4472 WARN_ON(1);
4473 return -EIO;
4474 }
4475
4476 trans = xlog_recover_ophdr_to_trans(rhash, rhead, ohead);
4477 if (!trans) {
4478 /* nothing to do, so skip over this ophdr */
4479 return 0;
4480 }
4481
4482 /*
4483 * The recovered buffer queue is drained only once we know that all
4484 * recovery items for the current LSN have been processed. This is
4485 * required because:
4486 *
4487 * - Buffer write submission updates the metadata LSN of the buffer.
4488 * - Log recovery skips items with a metadata LSN >= the current LSN of
4489 * the recovery item.
4490 * - Separate recovery items against the same metadata buffer can share
4491 * a current LSN. I.e., consider that the LSN of a recovery item is
4492 * defined as the starting LSN of the first record in which its
4493 * transaction appears, that a record can hold multiple transactions,
4494 * and/or that a transaction can span multiple records.
4495 *
4496 * In other words, we are allowed to submit a buffer from log recovery
4497 * once per current LSN. Otherwise, we may incorrectly skip recovery
4498 * items and cause corruption.
4499 *
4500 * We don't know up front whether buffers are updated multiple times per
4501 * LSN. Therefore, track the current LSN of each commit log record as it
4502 * is processed and drain the queue when it changes. Use commit records
4503 * because they are ordered correctly by the logging code.
4504 */
4505 if (log->l_recovery_lsn != trans->r_lsn &&
4506 ohead->oh_flags & XLOG_COMMIT_TRANS) {
4507 error = xfs_buf_delwri_submit(buffer_list);
4508 if (error)
4509 return error;
4510 log->l_recovery_lsn = trans->r_lsn;
4511 }
4512
4513 return xlog_recovery_process_trans(log, trans, dp, len,
4514 ohead->oh_flags, pass, buffer_list);
4515}
4516
4517/*
4518 * There are two valid states of the r_state field. 0 indicates that the
4519 * transaction structure is in a normal state. We have either seen the
4520 * start of the transaction or the last operation we added was not a partial
4521 * operation. If the last operation we added to the transaction was a
4522 * partial operation, we need to mark r_state with XLOG_WAS_CONT_TRANS.
4523 *
4524 * NOTE: skip LRs with 0 data length.
4525 */
4526STATIC int
4527xlog_recover_process_data(
4528 struct xlog *log,
4529 struct hlist_head rhash[],
4530 struct xlog_rec_header *rhead,
4531 char *dp,
4532 int pass,
4533 struct list_head *buffer_list)
4534{
4535 struct xlog_op_header *ohead;
4536 char *end;
4537 int num_logops;
4538 int error;
4539
4540 end = dp + be32_to_cpu(rhead->h_len);
4541 num_logops = be32_to_cpu(rhead->h_num_logops);
4542
4543 /* check the log format matches our own - else we can't recover */
4544 if (xlog_header_check_recover(log->l_mp, rhead))
4545 return -EIO;
4546
4547 trace_xfs_log_recover_record(log, rhead, pass);
4548 while ((dp < end) && num_logops) {
4549
4550 ohead = (struct xlog_op_header *)dp;
4551 dp += sizeof(*ohead);
4552 ASSERT(dp <= end);
4553
4554 /* errors will abort recovery */
4555 error = xlog_recover_process_ophdr(log, rhash, rhead, ohead,
4556 dp, end, pass, buffer_list);
4557 if (error)
4558 return error;
4559
4560 dp += be32_to_cpu(ohead->oh_len);
4561 num_logops--;
4562 }
4563 return 0;
4564}
4565
4566/* Recover the EFI if necessary. */
4567STATIC int
4568xlog_recover_process_efi(
4569 struct xfs_mount *mp,
4570 struct xfs_ail *ailp,
4571 struct xfs_log_item *lip)
4572{
4573 struct xfs_efi_log_item *efip;
4574 int error;
4575
4576 /*
4577 * Skip EFIs that we've already processed.
4578 */
4579 efip = container_of(lip, struct xfs_efi_log_item, efi_item);
4580 if (test_bit(XFS_EFI_RECOVERED, &efip->efi_flags))
4581 return 0;
4582
4583 spin_unlock(&ailp->ail_lock);
4584 error = xfs_efi_recover(mp, efip);
4585 spin_lock(&ailp->ail_lock);
4586
4587 return error;
4588}
4589
4590/* Release the EFI since we're cancelling everything. */
4591STATIC void
4592xlog_recover_cancel_efi(
4593 struct xfs_mount *mp,
4594 struct xfs_ail *ailp,
4595 struct xfs_log_item *lip)
4596{
4597 struct xfs_efi_log_item *efip;
4598
4599 efip = container_of(lip, struct xfs_efi_log_item, efi_item);
4600
4601 spin_unlock(&ailp->ail_lock);
4602 xfs_efi_release(efip);
4603 spin_lock(&ailp->ail_lock);
4604}
4605
4606/* Recover the RUI if necessary. */
4607STATIC int
4608xlog_recover_process_rui(
4609 struct xfs_mount *mp,
4610 struct xfs_ail *ailp,
4611 struct xfs_log_item *lip)
4612{
4613 struct xfs_rui_log_item *ruip;
4614 int error;
4615
4616 /*
4617 * Skip RUIs that we've already processed.
4618 */
4619 ruip = container_of(lip, struct xfs_rui_log_item, rui_item);
4620 if (test_bit(XFS_RUI_RECOVERED, &ruip->rui_flags))
4621 return 0;
4622
4623 spin_unlock(&ailp->ail_lock);
4624 error = xfs_rui_recover(mp, ruip);
4625 spin_lock(&ailp->ail_lock);
4626
4627 return error;
4628}
4629
4630/* Release the RUI since we're cancelling everything. */
4631STATIC void
4632xlog_recover_cancel_rui(
4633 struct xfs_mount *mp,
4634 struct xfs_ail *ailp,
4635 struct xfs_log_item *lip)
4636{
4637 struct xfs_rui_log_item *ruip;
4638
4639 ruip = container_of(lip, struct xfs_rui_log_item, rui_item);
4640
4641 spin_unlock(&ailp->ail_lock);
4642 xfs_rui_release(ruip);
4643 spin_lock(&ailp->ail_lock);
4644}
4645
4646/* Recover the CUI if necessary. */
4647STATIC int
4648xlog_recover_process_cui(
4649 struct xfs_trans *parent_tp,
4650 struct xfs_ail *ailp,
4651 struct xfs_log_item *lip)
4652{
4653 struct xfs_cui_log_item *cuip;
4654 int error;
4655
4656 /*
4657 * Skip CUIs that we've already processed.
4658 */
4659 cuip = container_of(lip, struct xfs_cui_log_item, cui_item);
4660 if (test_bit(XFS_CUI_RECOVERED, &cuip->cui_flags))
4661 return 0;
4662
4663 spin_unlock(&ailp->ail_lock);
4664 error = xfs_cui_recover(parent_tp, cuip);
4665 spin_lock(&ailp->ail_lock);
4666
4667 return error;
4668}
4669
4670/* Release the CUI since we're cancelling everything. */
4671STATIC void
4672xlog_recover_cancel_cui(
4673 struct xfs_mount *mp,
4674 struct xfs_ail *ailp,
4675 struct xfs_log_item *lip)
4676{
4677 struct xfs_cui_log_item *cuip;
4678
4679 cuip = container_of(lip, struct xfs_cui_log_item, cui_item);
4680
4681 spin_unlock(&ailp->ail_lock);
4682 xfs_cui_release(cuip);
4683 spin_lock(&ailp->ail_lock);
4684}
4685
4686/* Recover the BUI if necessary. */
4687STATIC int
4688xlog_recover_process_bui(
4689 struct xfs_trans *parent_tp,
4690 struct xfs_ail *ailp,
4691 struct xfs_log_item *lip)
4692{
4693 struct xfs_bui_log_item *buip;
4694 int error;
4695
4696 /*
4697 * Skip BUIs that we've already processed.
4698 */
4699 buip = container_of(lip, struct xfs_bui_log_item, bui_item);
4700 if (test_bit(XFS_BUI_RECOVERED, &buip->bui_flags))
4701 return 0;
4702
4703 spin_unlock(&ailp->ail_lock);
4704 error = xfs_bui_recover(parent_tp, buip);
4705 spin_lock(&ailp->ail_lock);
4706
4707 return error;
4708}
4709
4710/* Release the BUI since we're cancelling everything. */
4711STATIC void
4712xlog_recover_cancel_bui(
4713 struct xfs_mount *mp,
4714 struct xfs_ail *ailp,
4715 struct xfs_log_item *lip)
4716{
4717 struct xfs_bui_log_item *buip;
4718
4719 buip = container_of(lip, struct xfs_bui_log_item, bui_item);
4720
4721 spin_unlock(&ailp->ail_lock);
4722 xfs_bui_release(buip);
4723 spin_lock(&ailp->ail_lock);
4724}
4725
4726/* Is this log item a deferred action intent? */
4727static inline bool xlog_item_is_intent(struct xfs_log_item *lip)
4728{
4729 switch (lip->li_type) {
4730 case XFS_LI_EFI:
4731 case XFS_LI_RUI:
4732 case XFS_LI_CUI:
4733 case XFS_LI_BUI:
4734 return true;
4735 default:
4736 return false;
4737 }
4738}
4739
4740/* Take all the collected deferred ops and finish them in order. */
4741static int
4742xlog_finish_defer_ops(
4743 struct xfs_trans *parent_tp)
4744{
4745 struct xfs_mount *mp = parent_tp->t_mountp;
4746 struct xfs_trans *tp;
4747 int64_t freeblks;
4748 uint resblks;
4749 int error;
4750
4751 /*
4752 * We're finishing the defer_ops that accumulated as a result of
4753 * recovering unfinished intent items during log recovery. We
4754 * reserve an itruncate transaction because it is the largest
4755 * permanent transaction type. Since we're the only user of the fs
4756 * right now, take 93% (15/16) of the available free blocks. Use
4757 * weird math to avoid a 64-bit division.
4758 */
4759 freeblks = percpu_counter_sum(&mp->m_fdblocks);
4760 if (freeblks <= 0)
4761 return -ENOSPC;
4762 resblks = min_t(int64_t, UINT_MAX, freeblks);
4763 resblks = (resblks * 15) >> 4;
4764 error = xfs_trans_alloc(mp, &M_RES(mp)->tr_itruncate, resblks,
4765 0, XFS_TRANS_RESERVE, &tp);
4766 if (error)
4767 return error;
4768 /* transfer all collected dfops to this transaction */
4769 xfs_defer_move(tp, parent_tp);
4770
4771 return xfs_trans_commit(tp);
4772}
4773
4774/*
4775 * When this is called, all of the log intent items which did not have
4776 * corresponding log done items should be in the AIL. What we do now
4777 * is update the data structures associated with each one.
4778 *
4779 * Since we process the log intent items in normal transactions, they
4780 * will be removed at some point after the commit. This prevents us
4781 * from just walking down the list processing each one. We'll use a
4782 * flag in the intent item to skip those that we've already processed
4783 * and use the AIL iteration mechanism's generation count to try to
4784 * speed this up at least a bit.
4785 *
4786 * When we start, we know that the intents are the only things in the
4787 * AIL. As we process them, however, other items are added to the
4788 * AIL.
4789 */
4790STATIC int
4791xlog_recover_process_intents(
4792 struct xlog *log)
4793{
4794 struct xfs_trans *parent_tp;
4795 struct xfs_ail_cursor cur;
4796 struct xfs_log_item *lip;
4797 struct xfs_ail *ailp;
4798 int error;
4799#if defined(DEBUG) || defined(XFS_WARN)
4800 xfs_lsn_t last_lsn;
4801#endif
4802
4803 /*
4804 * The intent recovery handlers commit transactions to complete recovery
4805 * for individual intents, but any new deferred operations that are
4806 * queued during that process are held off until the very end. The
4807 * purpose of this transaction is to serve as a container for deferred
4808 * operations. Each intent recovery handler must transfer dfops here
4809 * before its local transaction commits, and we'll finish the entire
4810 * list below.
4811 */
4812 error = xfs_trans_alloc_empty(log->l_mp, &parent_tp);
4813 if (error)
4814 return error;
4815
4816 ailp = log->l_ailp;
4817 spin_lock(&ailp->ail_lock);
4818 lip = xfs_trans_ail_cursor_first(ailp, &cur, 0);
4819#if defined(DEBUG) || defined(XFS_WARN)
4820 last_lsn = xlog_assign_lsn(log->l_curr_cycle, log->l_curr_block);
4821#endif
4822 while (lip != NULL) {
4823 /*
4824 * We're done when we see something other than an intent.
4825 * There should be no intents left in the AIL now.
4826 */
4827 if (!xlog_item_is_intent(lip)) {
4828#ifdef DEBUG
4829 for (; lip; lip = xfs_trans_ail_cursor_next(ailp, &cur))
4830 ASSERT(!xlog_item_is_intent(lip));
4831#endif
4832 break;
4833 }
4834
4835 /*
4836 * We should never see a redo item with a LSN higher than
4837 * the last transaction we found in the log at the start
4838 * of recovery.
4839 */
4840 ASSERT(XFS_LSN_CMP(last_lsn, lip->li_lsn) >= 0);
4841
4842 /*
4843 * NOTE: If your intent processing routine can create more
4844 * deferred ops, you /must/ attach them to the dfops in this
4845 * routine or else those subsequent intents will get
4846 * replayed in the wrong order!
4847 */
4848 switch (lip->li_type) {
4849 case XFS_LI_EFI:
4850 error = xlog_recover_process_efi(log->l_mp, ailp, lip);
4851 break;
4852 case XFS_LI_RUI:
4853 error = xlog_recover_process_rui(log->l_mp, ailp, lip);
4854 break;
4855 case XFS_LI_CUI:
4856 error = xlog_recover_process_cui(parent_tp, ailp, lip);
4857 break;
4858 case XFS_LI_BUI:
4859 error = xlog_recover_process_bui(parent_tp, ailp, lip);
4860 break;
4861 }
4862 if (error)
4863 goto out;
4864 lip = xfs_trans_ail_cursor_next(ailp, &cur);
4865 }
4866out:
4867 xfs_trans_ail_cursor_done(&cur);
4868 spin_unlock(&ailp->ail_lock);
4869 if (!error)
4870 error = xlog_finish_defer_ops(parent_tp);
4871 xfs_trans_cancel(parent_tp);
4872
4873 return error;
4874}
4875
4876/*
4877 * A cancel occurs when the mount has failed and we're bailing out.
4878 * Release all pending log intent items so they don't pin the AIL.
4879 */
4880STATIC void
4881xlog_recover_cancel_intents(
4882 struct xlog *log)
4883{
4884 struct xfs_log_item *lip;
4885 struct xfs_ail_cursor cur;
4886 struct xfs_ail *ailp;
4887
4888 ailp = log->l_ailp;
4889 spin_lock(&ailp->ail_lock);
4890 lip = xfs_trans_ail_cursor_first(ailp, &cur, 0);
4891 while (lip != NULL) {
4892 /*
4893 * We're done when we see something other than an intent.
4894 * There should be no intents left in the AIL now.
4895 */
4896 if (!xlog_item_is_intent(lip)) {
4897#ifdef DEBUG
4898 for (; lip; lip = xfs_trans_ail_cursor_next(ailp, &cur))
4899 ASSERT(!xlog_item_is_intent(lip));
4900#endif
4901 break;
4902 }
4903
4904 switch (lip->li_type) {
4905 case XFS_LI_EFI:
4906 xlog_recover_cancel_efi(log->l_mp, ailp, lip);
4907 break;
4908 case XFS_LI_RUI:
4909 xlog_recover_cancel_rui(log->l_mp, ailp, lip);
4910 break;
4911 case XFS_LI_CUI:
4912 xlog_recover_cancel_cui(log->l_mp, ailp, lip);
4913 break;
4914 case XFS_LI_BUI:
4915 xlog_recover_cancel_bui(log->l_mp, ailp, lip);
4916 break;
4917 }
4918
4919 lip = xfs_trans_ail_cursor_next(ailp, &cur);
4920 }
4921
4922 xfs_trans_ail_cursor_done(&cur);
4923 spin_unlock(&ailp->ail_lock);
4924}
4925
4926/*
4927 * This routine performs a transaction to null out a bad inode pointer
4928 * in an agi unlinked inode hash bucket.
4929 */
4930STATIC void
4931xlog_recover_clear_agi_bucket(
4932 xfs_mount_t *mp,
4933 xfs_agnumber_t agno,
4934 int bucket)
4935{
4936 xfs_trans_t *tp;
4937 xfs_agi_t *agi;
4938 xfs_buf_t *agibp;
4939 int offset;
4940 int error;
4941
4942 error = xfs_trans_alloc(mp, &M_RES(mp)->tr_clearagi, 0, 0, 0, &tp);
4943 if (error)
4944 goto out_error;
4945
4946 error = xfs_read_agi(mp, tp, agno, &agibp);
4947 if (error)
4948 goto out_abort;
4949
4950 agi = XFS_BUF_TO_AGI(agibp);
4951 agi->agi_unlinked[bucket] = cpu_to_be32(NULLAGINO);
4952 offset = offsetof(xfs_agi_t, agi_unlinked) +
4953 (sizeof(xfs_agino_t) * bucket);
4954 xfs_trans_log_buf(tp, agibp, offset,
4955 (offset + sizeof(xfs_agino_t) - 1));
4956
4957 error = xfs_trans_commit(tp);
4958 if (error)
4959 goto out_error;
4960 return;
4961
4962out_abort:
4963 xfs_trans_cancel(tp);
4964out_error:
4965 xfs_warn(mp, "%s: failed to clear agi %d. Continuing.", __func__, agno);
4966 return;
4967}
4968
4969STATIC xfs_agino_t
4970xlog_recover_process_one_iunlink(
4971 struct xfs_mount *mp,
4972 xfs_agnumber_t agno,
4973 xfs_agino_t agino,
4974 int bucket)
4975{
4976 struct xfs_buf *ibp;
4977 struct xfs_dinode *dip;
4978 struct xfs_inode *ip;
4979 xfs_ino_t ino;
4980 int error;
4981
4982 ino = XFS_AGINO_TO_INO(mp, agno, agino);
4983 error = xfs_iget(mp, NULL, ino, 0, 0, &ip);
4984 if (error)
4985 goto fail;
4986
4987 /*
4988 * Get the on disk inode to find the next inode in the bucket.
4989 */
4990 error = xfs_imap_to_bp(mp, NULL, &ip->i_imap, &dip, &ibp, 0, 0);
4991 if (error)
4992 goto fail_iput;
4993
4994 xfs_iflags_clear(ip, XFS_IRECOVERY);
4995 ASSERT(VFS_I(ip)->i_nlink == 0);
4996 ASSERT(VFS_I(ip)->i_mode != 0);
4997
4998 /* setup for the next pass */
4999 agino = be32_to_cpu(dip->di_next_unlinked);
5000 xfs_buf_relse(ibp);
5001
5002 /*
5003 * Prevent any DMAPI event from being sent when the reference on
5004 * the inode is dropped.
5005 */
5006 ip->i_d.di_dmevmask = 0;
5007
5008 xfs_irele(ip);
5009 return agino;
5010
5011 fail_iput:
5012 xfs_irele(ip);
5013 fail:
5014 /*
5015 * We can't read in the inode this bucket points to, or this inode
5016 * is messed up. Just ditch this bucket of inodes. We will lose
5017 * some inodes and space, but at least we won't hang.
5018 *
5019 * Call xlog_recover_clear_agi_bucket() to perform a transaction to
5020 * clear the inode pointer in the bucket.
5021 */
5022 xlog_recover_clear_agi_bucket(mp, agno, bucket);
5023 return NULLAGINO;
5024}
5025
5026/*
5027 * Recover AGI unlinked lists
5028 *
5029 * This is called during recovery to process any inodes which we unlinked but
5030 * not freed when the system crashed. These inodes will be on the lists in the
5031 * AGI blocks. What we do here is scan all the AGIs and fully truncate and free
5032 * any inodes found on the lists. Each inode is removed from the lists when it
5033 * has been fully truncated and is freed. The freeing of the inode and its
5034 * removal from the list must be atomic.
5035 *
5036 * If everything we touch in the agi processing loop is already in memory, this
5037 * loop can hold the cpu for a long time. It runs without lock contention,
5038 * memory allocation contention, the need wait for IO, etc, and so will run
5039 * until we either run out of inodes to process, run low on memory or we run out
5040 * of log space.
5041 *
5042 * This behaviour is bad for latency on single CPU and non-preemptible kernels,
5043 * and can prevent other filesytem work (such as CIL pushes) from running. This
5044 * can lead to deadlocks if the recovery process runs out of log reservation
5045 * space. Hence we need to yield the CPU when there is other kernel work
5046 * scheduled on this CPU to ensure other scheduled work can run without undue
5047 * latency.
5048 */
5049STATIC void
5050xlog_recover_process_iunlinks(
5051 struct xlog *log)
5052{
5053 xfs_mount_t *mp;
5054 xfs_agnumber_t agno;
5055 xfs_agi_t *agi;
5056 xfs_buf_t *agibp;
5057 xfs_agino_t agino;
5058 int bucket;
5059 int error;
5060
5061 mp = log->l_mp;
5062
5063 for (agno = 0; agno < mp->m_sb.sb_agcount; agno++) {
5064 /*
5065 * Find the agi for this ag.
5066 */
5067 error = xfs_read_agi(mp, NULL, agno, &agibp);
5068 if (error) {
5069 /*
5070 * AGI is b0rked. Don't process it.
5071 *
5072 * We should probably mark the filesystem as corrupt
5073 * after we've recovered all the ag's we can....
5074 */
5075 continue;
5076 }
5077 /*
5078 * Unlock the buffer so that it can be acquired in the normal
5079 * course of the transaction to truncate and free each inode.
5080 * Because we are not racing with anyone else here for the AGI
5081 * buffer, we don't even need to hold it locked to read the
5082 * initial unlinked bucket entries out of the buffer. We keep
5083 * buffer reference though, so that it stays pinned in memory
5084 * while we need the buffer.
5085 */
5086 agi = XFS_BUF_TO_AGI(agibp);
5087 xfs_buf_unlock(agibp);
5088
5089 for (bucket = 0; bucket < XFS_AGI_UNLINKED_BUCKETS; bucket++) {
5090 agino = be32_to_cpu(agi->agi_unlinked[bucket]);
5091 while (agino != NULLAGINO) {
5092 agino = xlog_recover_process_one_iunlink(mp,
5093 agno, agino, bucket);
5094 cond_resched();
5095 }
5096 }
5097 xfs_buf_rele(agibp);
5098 }
5099}
5100
5101STATIC void
5102xlog_unpack_data(
5103 struct xlog_rec_header *rhead,
5104 char *dp,
5105 struct xlog *log)
5106{
5107 int i, j, k;
5108
5109 for (i = 0; i < BTOBB(be32_to_cpu(rhead->h_len)) &&
5110 i < (XLOG_HEADER_CYCLE_SIZE / BBSIZE); i++) {
5111 *(__be32 *)dp = *(__be32 *)&rhead->h_cycle_data[i];
5112 dp += BBSIZE;
5113 }
5114
5115 if (xfs_sb_version_haslogv2(&log->l_mp->m_sb)) {
5116 xlog_in_core_2_t *xhdr = (xlog_in_core_2_t *)rhead;
5117 for ( ; i < BTOBB(be32_to_cpu(rhead->h_len)); i++) {
5118 j = i / (XLOG_HEADER_CYCLE_SIZE / BBSIZE);
5119 k = i % (XLOG_HEADER_CYCLE_SIZE / BBSIZE);
5120 *(__be32 *)dp = xhdr[j].hic_xheader.xh_cycle_data[k];
5121 dp += BBSIZE;
5122 }
5123 }
5124}
5125
5126/*
5127 * CRC check, unpack and process a log record.
5128 */
5129STATIC int
5130xlog_recover_process(
5131 struct xlog *log,
5132 struct hlist_head rhash[],
5133 struct xlog_rec_header *rhead,
5134 char *dp,
5135 int pass,
5136 struct list_head *buffer_list)
5137{
5138 __le32 old_crc = rhead->h_crc;
5139 __le32 crc;
5140
5141 crc = xlog_cksum(log, rhead, dp, be32_to_cpu(rhead->h_len));
5142
5143 /*
5144 * Nothing else to do if this is a CRC verification pass. Just return
5145 * if this a record with a non-zero crc. Unfortunately, mkfs always
5146 * sets old_crc to 0 so we must consider this valid even on v5 supers.
5147 * Otherwise, return EFSBADCRC on failure so the callers up the stack
5148 * know precisely what failed.
5149 */
5150 if (pass == XLOG_RECOVER_CRCPASS) {
5151 if (old_crc && crc != old_crc)
5152 return -EFSBADCRC;
5153 return 0;
5154 }
5155
5156 /*
5157 * We're in the normal recovery path. Issue a warning if and only if the
5158 * CRC in the header is non-zero. This is an advisory warning and the
5159 * zero CRC check prevents warnings from being emitted when upgrading
5160 * the kernel from one that does not add CRCs by default.
5161 */
5162 if (crc != old_crc) {
5163 if (old_crc || xfs_sb_version_hascrc(&log->l_mp->m_sb)) {
5164 xfs_alert(log->l_mp,
5165 "log record CRC mismatch: found 0x%x, expected 0x%x.",
5166 le32_to_cpu(old_crc),
5167 le32_to_cpu(crc));
5168 xfs_hex_dump(dp, 32);
5169 }
5170
5171 /*
5172 * If the filesystem is CRC enabled, this mismatch becomes a
5173 * fatal log corruption failure.
5174 */
5175 if (xfs_sb_version_hascrc(&log->l_mp->m_sb))
5176 return -EFSCORRUPTED;
5177 }
5178
5179 xlog_unpack_data(rhead, dp, log);
5180
5181 return xlog_recover_process_data(log, rhash, rhead, dp, pass,
5182 buffer_list);
5183}
5184
5185STATIC int
5186xlog_valid_rec_header(
5187 struct xlog *log,
5188 struct xlog_rec_header *rhead,
5189 xfs_daddr_t blkno)
5190{
5191 int hlen;
5192
5193 if (unlikely(rhead->h_magicno != cpu_to_be32(XLOG_HEADER_MAGIC_NUM))) {
5194 XFS_ERROR_REPORT("xlog_valid_rec_header(1)",
5195 XFS_ERRLEVEL_LOW, log->l_mp);
5196 return -EFSCORRUPTED;
5197 }
5198 if (unlikely(
5199 (!rhead->h_version ||
5200 (be32_to_cpu(rhead->h_version) & (~XLOG_VERSION_OKBITS))))) {
5201 xfs_warn(log->l_mp, "%s: unrecognised log version (%d).",
5202 __func__, be32_to_cpu(rhead->h_version));
5203 return -EIO;
5204 }
5205
5206 /* LR body must have data or it wouldn't have been written */
5207 hlen = be32_to_cpu(rhead->h_len);
5208 if (unlikely( hlen <= 0 || hlen > INT_MAX )) {
5209 XFS_ERROR_REPORT("xlog_valid_rec_header(2)",
5210 XFS_ERRLEVEL_LOW, log->l_mp);
5211 return -EFSCORRUPTED;
5212 }
5213 if (unlikely( blkno > log->l_logBBsize || blkno > INT_MAX )) {
5214 XFS_ERROR_REPORT("xlog_valid_rec_header(3)",
5215 XFS_ERRLEVEL_LOW, log->l_mp);
5216 return -EFSCORRUPTED;
5217 }
5218 return 0;
5219}
5220
5221/*
5222 * Read the log from tail to head and process the log records found.
5223 * Handle the two cases where the tail and head are in the same cycle
5224 * and where the active portion of the log wraps around the end of
5225 * the physical log separately. The pass parameter is passed through
5226 * to the routines called to process the data and is not looked at
5227 * here.
5228 */
5229STATIC int
5230xlog_do_recovery_pass(
5231 struct xlog *log,
5232 xfs_daddr_t head_blk,
5233 xfs_daddr_t tail_blk,
5234 int pass,
5235 xfs_daddr_t *first_bad) /* out: first bad log rec */
5236{
5237 xlog_rec_header_t *rhead;
5238 xfs_daddr_t blk_no, rblk_no;
5239 xfs_daddr_t rhead_blk;
5240 char *offset;
5241 char *hbp, *dbp;
5242 int error = 0, h_size, h_len;
5243 int error2 = 0;
5244 int bblks, split_bblks;
5245 int hblks, split_hblks, wrapped_hblks;
5246 int i;
5247 struct hlist_head rhash[XLOG_RHASH_SIZE];
5248 LIST_HEAD (buffer_list);
5249
5250 ASSERT(head_blk != tail_blk);
5251 blk_no = rhead_blk = tail_blk;
5252
5253 for (i = 0; i < XLOG_RHASH_SIZE; i++)
5254 INIT_HLIST_HEAD(&rhash[i]);
5255
5256 /*
5257 * Read the header of the tail block and get the iclog buffer size from
5258 * h_size. Use this to tell how many sectors make up the log header.
5259 */
5260 if (xfs_sb_version_haslogv2(&log->l_mp->m_sb)) {
5261 /*
5262 * When using variable length iclogs, read first sector of
5263 * iclog header and extract the header size from it. Get a
5264 * new hbp that is the correct size.
5265 */
5266 hbp = xlog_alloc_buffer(log, 1);
5267 if (!hbp)
5268 return -ENOMEM;
5269
5270 error = xlog_bread(log, tail_blk, 1, hbp, &offset);
5271 if (error)
5272 goto bread_err1;
5273
5274 rhead = (xlog_rec_header_t *)offset;
5275 error = xlog_valid_rec_header(log, rhead, tail_blk);
5276 if (error)
5277 goto bread_err1;
5278
5279 /*
5280 * xfsprogs has a bug where record length is based on lsunit but
5281 * h_size (iclog size) is hardcoded to 32k. Now that we
5282 * unconditionally CRC verify the unmount record, this means the
5283 * log buffer can be too small for the record and cause an
5284 * overrun.
5285 *
5286 * Detect this condition here. Use lsunit for the buffer size as
5287 * long as this looks like the mkfs case. Otherwise, return an
5288 * error to avoid a buffer overrun.
5289 */
5290 h_size = be32_to_cpu(rhead->h_size);
5291 h_len = be32_to_cpu(rhead->h_len);
5292 if (h_len > h_size) {
5293 if (h_len <= log->l_mp->m_logbsize &&
5294 be32_to_cpu(rhead->h_num_logops) == 1) {
5295 xfs_warn(log->l_mp,
5296 "invalid iclog size (%d bytes), using lsunit (%d bytes)",
5297 h_size, log->l_mp->m_logbsize);
5298 h_size = log->l_mp->m_logbsize;
5299 } else
5300 return -EFSCORRUPTED;
5301 }
5302
5303 if ((be32_to_cpu(rhead->h_version) & XLOG_VERSION_2) &&
5304 (h_size > XLOG_HEADER_CYCLE_SIZE)) {
5305 hblks = h_size / XLOG_HEADER_CYCLE_SIZE;
5306 if (h_size % XLOG_HEADER_CYCLE_SIZE)
5307 hblks++;
5308 kmem_free(hbp);
5309 hbp = xlog_alloc_buffer(log, hblks);
5310 } else {
5311 hblks = 1;
5312 }
5313 } else {
5314 ASSERT(log->l_sectBBsize == 1);
5315 hblks = 1;
5316 hbp = xlog_alloc_buffer(log, 1);
5317 h_size = XLOG_BIG_RECORD_BSIZE;
5318 }
5319
5320 if (!hbp)
5321 return -ENOMEM;
5322 dbp = xlog_alloc_buffer(log, BTOBB(h_size));
5323 if (!dbp) {
5324 kmem_free(hbp);
5325 return -ENOMEM;
5326 }
5327
5328 memset(rhash, 0, sizeof(rhash));
5329 if (tail_blk > head_blk) {
5330 /*
5331 * Perform recovery around the end of the physical log.
5332 * When the head is not on the same cycle number as the tail,
5333 * we can't do a sequential recovery.
5334 */
5335 while (blk_no < log->l_logBBsize) {
5336 /*
5337 * Check for header wrapping around physical end-of-log
5338 */
5339 offset = hbp;
5340 split_hblks = 0;
5341 wrapped_hblks = 0;
5342 if (blk_no + hblks <= log->l_logBBsize) {
5343 /* Read header in one read */
5344 error = xlog_bread(log, blk_no, hblks, hbp,
5345 &offset);
5346 if (error)
5347 goto bread_err2;
5348 } else {
5349 /* This LR is split across physical log end */
5350 if (blk_no != log->l_logBBsize) {
5351 /* some data before physical log end */
5352 ASSERT(blk_no <= INT_MAX);
5353 split_hblks = log->l_logBBsize - (int)blk_no;
5354 ASSERT(split_hblks > 0);
5355 error = xlog_bread(log, blk_no,
5356 split_hblks, hbp,
5357 &offset);
5358 if (error)
5359 goto bread_err2;
5360 }
5361
5362 /*
5363 * Note: this black magic still works with
5364 * large sector sizes (non-512) only because:
5365 * - we increased the buffer size originally
5366 * by 1 sector giving us enough extra space
5367 * for the second read;
5368 * - the log start is guaranteed to be sector
5369 * aligned;
5370 * - we read the log end (LR header start)
5371 * _first_, then the log start (LR header end)
5372 * - order is important.
5373 */
5374 wrapped_hblks = hblks - split_hblks;
5375 error = xlog_bread_noalign(log, 0,
5376 wrapped_hblks,
5377 offset + BBTOB(split_hblks));
5378 if (error)
5379 goto bread_err2;
5380 }
5381 rhead = (xlog_rec_header_t *)offset;
5382 error = xlog_valid_rec_header(log, rhead,
5383 split_hblks ? blk_no : 0);
5384 if (error)
5385 goto bread_err2;
5386
5387 bblks = (int)BTOBB(be32_to_cpu(rhead->h_len));
5388 blk_no += hblks;
5389
5390 /*
5391 * Read the log record data in multiple reads if it
5392 * wraps around the end of the log. Note that if the
5393 * header already wrapped, blk_no could point past the
5394 * end of the log. The record data is contiguous in
5395 * that case.
5396 */
5397 if (blk_no + bblks <= log->l_logBBsize ||
5398 blk_no >= log->l_logBBsize) {
5399 rblk_no = xlog_wrap_logbno(log, blk_no);
5400 error = xlog_bread(log, rblk_no, bblks, dbp,
5401 &offset);
5402 if (error)
5403 goto bread_err2;
5404 } else {
5405 /* This log record is split across the
5406 * physical end of log */
5407 offset = dbp;
5408 split_bblks = 0;
5409 if (blk_no != log->l_logBBsize) {
5410 /* some data is before the physical
5411 * end of log */
5412 ASSERT(!wrapped_hblks);
5413 ASSERT(blk_no <= INT_MAX);
5414 split_bblks =
5415 log->l_logBBsize - (int)blk_no;
5416 ASSERT(split_bblks > 0);
5417 error = xlog_bread(log, blk_no,
5418 split_bblks, dbp,
5419 &offset);
5420 if (error)
5421 goto bread_err2;
5422 }
5423
5424 /*
5425 * Note: this black magic still works with
5426 * large sector sizes (non-512) only because:
5427 * - we increased the buffer size originally
5428 * by 1 sector giving us enough extra space
5429 * for the second read;
5430 * - the log start is guaranteed to be sector
5431 * aligned;
5432 * - we read the log end (LR header start)
5433 * _first_, then the log start (LR header end)
5434 * - order is important.
5435 */
5436 error = xlog_bread_noalign(log, 0,
5437 bblks - split_bblks,
5438 offset + BBTOB(split_bblks));
5439 if (error)
5440 goto bread_err2;
5441 }
5442
5443 error = xlog_recover_process(log, rhash, rhead, offset,
5444 pass, &buffer_list);
5445 if (error)
5446 goto bread_err2;
5447
5448 blk_no += bblks;
5449 rhead_blk = blk_no;
5450 }
5451
5452 ASSERT(blk_no >= log->l_logBBsize);
5453 blk_no -= log->l_logBBsize;
5454 rhead_blk = blk_no;
5455 }
5456
5457 /* read first part of physical log */
5458 while (blk_no < head_blk) {
5459 error = xlog_bread(log, blk_no, hblks, hbp, &offset);
5460 if (error)
5461 goto bread_err2;
5462
5463 rhead = (xlog_rec_header_t *)offset;
5464 error = xlog_valid_rec_header(log, rhead, blk_no);
5465 if (error)
5466 goto bread_err2;
5467
5468 /* blocks in data section */
5469 bblks = (int)BTOBB(be32_to_cpu(rhead->h_len));
5470 error = xlog_bread(log, blk_no+hblks, bblks, dbp,
5471 &offset);
5472 if (error)
5473 goto bread_err2;
5474
5475 error = xlog_recover_process(log, rhash, rhead, offset, pass,
5476 &buffer_list);
5477 if (error)
5478 goto bread_err2;
5479
5480 blk_no += bblks + hblks;
5481 rhead_blk = blk_no;
5482 }
5483
5484 bread_err2:
5485 kmem_free(dbp);
5486 bread_err1:
5487 kmem_free(hbp);
5488
5489 /*
5490 * Submit buffers that have been added from the last record processed,
5491 * regardless of error status.
5492 */
5493 if (!list_empty(&buffer_list))
5494 error2 = xfs_buf_delwri_submit(&buffer_list);
5495
5496 if (error && first_bad)
5497 *first_bad = rhead_blk;
5498
5499 /*
5500 * Transactions are freed at commit time but transactions without commit
5501 * records on disk are never committed. Free any that may be left in the
5502 * hash table.
5503 */
5504 for (i = 0; i < XLOG_RHASH_SIZE; i++) {
5505 struct hlist_node *tmp;
5506 struct xlog_recover *trans;
5507
5508 hlist_for_each_entry_safe(trans, tmp, &rhash[i], r_list)
5509 xlog_recover_free_trans(trans);
5510 }
5511
5512 return error ? error : error2;
5513}
5514
5515/*
5516 * Do the recovery of the log. We actually do this in two phases.
5517 * The two passes are necessary in order to implement the function
5518 * of cancelling a record written into the log. The first pass
5519 * determines those things which have been cancelled, and the
5520 * second pass replays log items normally except for those which
5521 * have been cancelled. The handling of the replay and cancellations
5522 * takes place in the log item type specific routines.
5523 *
5524 * The table of items which have cancel records in the log is allocated
5525 * and freed at this level, since only here do we know when all of
5526 * the log recovery has been completed.
5527 */
5528STATIC int
5529xlog_do_log_recovery(
5530 struct xlog *log,
5531 xfs_daddr_t head_blk,
5532 xfs_daddr_t tail_blk)
5533{
5534 int error, i;
5535
5536 ASSERT(head_blk != tail_blk);
5537
5538 /*
5539 * First do a pass to find all of the cancelled buf log items.
5540 * Store them in the buf_cancel_table for use in the second pass.
5541 */
5542 log->l_buf_cancel_table = kmem_zalloc(XLOG_BC_TABLE_SIZE *
5543 sizeof(struct list_head),
5544 0);
5545 for (i = 0; i < XLOG_BC_TABLE_SIZE; i++)
5546 INIT_LIST_HEAD(&log->l_buf_cancel_table[i]);
5547
5548 error = xlog_do_recovery_pass(log, head_blk, tail_blk,
5549 XLOG_RECOVER_PASS1, NULL);
5550 if (error != 0) {
5551 kmem_free(log->l_buf_cancel_table);
5552 log->l_buf_cancel_table = NULL;
5553 return error;
5554 }
5555 /*
5556 * Then do a second pass to actually recover the items in the log.
5557 * When it is complete free the table of buf cancel items.
5558 */
5559 error = xlog_do_recovery_pass(log, head_blk, tail_blk,
5560 XLOG_RECOVER_PASS2, NULL);
5561#ifdef DEBUG
5562 if (!error) {
5563 int i;
5564
5565 for (i = 0; i < XLOG_BC_TABLE_SIZE; i++)
5566 ASSERT(list_empty(&log->l_buf_cancel_table[i]));
5567 }
5568#endif /* DEBUG */
5569
5570 kmem_free(log->l_buf_cancel_table);
5571 log->l_buf_cancel_table = NULL;
5572
5573 return error;
5574}
5575
5576/*
5577 * Do the actual recovery
5578 */
5579STATIC int
5580xlog_do_recover(
5581 struct xlog *log,
5582 xfs_daddr_t head_blk,
5583 xfs_daddr_t tail_blk)
5584{
5585 struct xfs_mount *mp = log->l_mp;
5586 int error;
5587 xfs_buf_t *bp;
5588 xfs_sb_t *sbp;
5589
5590 trace_xfs_log_recover(log, head_blk, tail_blk);
5591
5592 /*
5593 * First replay the images in the log.
5594 */
5595 error = xlog_do_log_recovery(log, head_blk, tail_blk);
5596 if (error)
5597 return error;
5598
5599 /*
5600 * If IO errors happened during recovery, bail out.
5601 */
5602 if (XFS_FORCED_SHUTDOWN(mp)) {
5603 return -EIO;
5604 }
5605
5606 /*
5607 * We now update the tail_lsn since much of the recovery has completed
5608 * and there may be space available to use. If there were no extent
5609 * or iunlinks, we can free up the entire log and set the tail_lsn to
5610 * be the last_sync_lsn. This was set in xlog_find_tail to be the
5611 * lsn of the last known good LR on disk. If there are extent frees
5612 * or iunlinks they will have some entries in the AIL; so we look at
5613 * the AIL to determine how to set the tail_lsn.
5614 */
5615 xlog_assign_tail_lsn(mp);
5616
5617 /*
5618 * Now that we've finished replaying all buffer and inode
5619 * updates, re-read in the superblock and reverify it.
5620 */
5621 bp = xfs_getsb(mp);
5622 bp->b_flags &= ~(XBF_DONE | XBF_ASYNC);
5623 ASSERT(!(bp->b_flags & XBF_WRITE));
5624 bp->b_flags |= XBF_READ;
5625 bp->b_ops = &xfs_sb_buf_ops;
5626
5627 error = xfs_buf_submit(bp);
5628 if (error) {
5629 if (!XFS_FORCED_SHUTDOWN(mp)) {
5630 xfs_buf_ioerror_alert(bp, __func__);
5631 ASSERT(0);
5632 }
5633 xfs_buf_relse(bp);
5634 return error;
5635 }
5636
5637 /* Convert superblock from on-disk format */
5638 sbp = &mp->m_sb;
5639 xfs_sb_from_disk(sbp, XFS_BUF_TO_SBP(bp));
5640 xfs_buf_relse(bp);
5641
5642 /* re-initialise in-core superblock and geometry structures */
5643 xfs_reinit_percpu_counters(mp);
5644 error = xfs_initialize_perag(mp, sbp->sb_agcount, &mp->m_maxagi);
5645 if (error) {
5646 xfs_warn(mp, "Failed post-recovery per-ag init: %d", error);
5647 return error;
5648 }
5649 mp->m_alloc_set_aside = xfs_alloc_set_aside(mp);
5650
5651 xlog_recover_check_summary(log);
5652
5653 /* Normal transactions can now occur */
5654 log->l_flags &= ~XLOG_ACTIVE_RECOVERY;
5655 return 0;
5656}
5657
5658/*
5659 * Perform recovery and re-initialize some log variables in xlog_find_tail.
5660 *
5661 * Return error or zero.
5662 */
5663int
5664xlog_recover(
5665 struct xlog *log)
5666{
5667 xfs_daddr_t head_blk, tail_blk;
5668 int error;
5669
5670 /* find the tail of the log */
5671 error = xlog_find_tail(log, &head_blk, &tail_blk);
5672 if (error)
5673 return error;
5674
5675 /*
5676 * The superblock was read before the log was available and thus the LSN
5677 * could not be verified. Check the superblock LSN against the current
5678 * LSN now that it's known.
5679 */
5680 if (xfs_sb_version_hascrc(&log->l_mp->m_sb) &&
5681 !xfs_log_check_lsn(log->l_mp, log->l_mp->m_sb.sb_lsn))
5682 return -EINVAL;
5683
5684 if (tail_blk != head_blk) {
5685 /* There used to be a comment here:
5686 *
5687 * disallow recovery on read-only mounts. note -- mount
5688 * checks for ENOSPC and turns it into an intelligent
5689 * error message.
5690 * ...but this is no longer true. Now, unless you specify
5691 * NORECOVERY (in which case this function would never be
5692 * called), we just go ahead and recover. We do this all
5693 * under the vfs layer, so we can get away with it unless
5694 * the device itself is read-only, in which case we fail.
5695 */
5696 if ((error = xfs_dev_is_read_only(log->l_mp, "recovery"))) {
5697 return error;
5698 }
5699
5700 /*
5701 * Version 5 superblock log feature mask validation. We know the
5702 * log is dirty so check if there are any unknown log features
5703 * in what we need to recover. If there are unknown features
5704 * (e.g. unsupported transactions, then simply reject the
5705 * attempt at recovery before touching anything.
5706 */
5707 if (XFS_SB_VERSION_NUM(&log->l_mp->m_sb) == XFS_SB_VERSION_5 &&
5708 xfs_sb_has_incompat_log_feature(&log->l_mp->m_sb,
5709 XFS_SB_FEAT_INCOMPAT_LOG_UNKNOWN)) {
5710 xfs_warn(log->l_mp,
5711"Superblock has unknown incompatible log features (0x%x) enabled.",
5712 (log->l_mp->m_sb.sb_features_log_incompat &
5713 XFS_SB_FEAT_INCOMPAT_LOG_UNKNOWN));
5714 xfs_warn(log->l_mp,
5715"The log can not be fully and/or safely recovered by this kernel.");
5716 xfs_warn(log->l_mp,
5717"Please recover the log on a kernel that supports the unknown features.");
5718 return -EINVAL;
5719 }
5720
5721 /*
5722 * Delay log recovery if the debug hook is set. This is debug
5723 * instrumention to coordinate simulation of I/O failures with
5724 * log recovery.
5725 */
5726 if (xfs_globals.log_recovery_delay) {
5727 xfs_notice(log->l_mp,
5728 "Delaying log recovery for %d seconds.",
5729 xfs_globals.log_recovery_delay);
5730 msleep(xfs_globals.log_recovery_delay * 1000);
5731 }
5732
5733 xfs_notice(log->l_mp, "Starting recovery (logdev: %s)",
5734 log->l_mp->m_logname ? log->l_mp->m_logname
5735 : "internal");
5736
5737 error = xlog_do_recover(log, head_blk, tail_blk);
5738 log->l_flags |= XLOG_RECOVERY_NEEDED;
5739 }
5740 return error;
5741}
5742
5743/*
5744 * In the first part of recovery we replay inodes and buffers and build
5745 * up the list of extent free items which need to be processed. Here
5746 * we process the extent free items and clean up the on disk unlinked
5747 * inode lists. This is separated from the first part of recovery so
5748 * that the root and real-time bitmap inodes can be read in from disk in
5749 * between the two stages. This is necessary so that we can free space
5750 * in the real-time portion of the file system.
5751 */
5752int
5753xlog_recover_finish(
5754 struct xlog *log)
5755{
5756 /*
5757 * Now we're ready to do the transactions needed for the
5758 * rest of recovery. Start with completing all the extent
5759 * free intent records and then process the unlinked inode
5760 * lists. At this point, we essentially run in normal mode
5761 * except that we're still performing recovery actions
5762 * rather than accepting new requests.
5763 */
5764 if (log->l_flags & XLOG_RECOVERY_NEEDED) {
5765 int error;
5766 error = xlog_recover_process_intents(log);
5767 if (error) {
5768 xfs_alert(log->l_mp, "Failed to recover intents");
5769 return error;
5770 }
5771
5772 /*
5773 * Sync the log to get all the intents out of the AIL.
5774 * This isn't absolutely necessary, but it helps in
5775 * case the unlink transactions would have problems
5776 * pushing the intents out of the way.
5777 */
5778 xfs_log_force(log->l_mp, XFS_LOG_SYNC);
5779
5780 xlog_recover_process_iunlinks(log);
5781
5782 xlog_recover_check_summary(log);
5783
5784 xfs_notice(log->l_mp, "Ending recovery (logdev: %s)",
5785 log->l_mp->m_logname ? log->l_mp->m_logname
5786 : "internal");
5787 log->l_flags &= ~XLOG_RECOVERY_NEEDED;
5788 } else {
5789 xfs_info(log->l_mp, "Ending clean mount");
5790 }
5791 return 0;
5792}
5793
5794void
5795xlog_recover_cancel(
5796 struct xlog *log)
5797{
5798 if (log->l_flags & XLOG_RECOVERY_NEEDED)
5799 xlog_recover_cancel_intents(log);
5800}
5801
5802#if defined(DEBUG)
5803/*
5804 * Read all of the agf and agi counters and check that they
5805 * are consistent with the superblock counters.
5806 */
5807STATIC void
5808xlog_recover_check_summary(
5809 struct xlog *log)
5810{
5811 xfs_mount_t *mp;
5812 xfs_agf_t *agfp;
5813 xfs_buf_t *agfbp;
5814 xfs_buf_t *agibp;
5815 xfs_agnumber_t agno;
5816 uint64_t freeblks;
5817 uint64_t itotal;
5818 uint64_t ifree;
5819 int error;
5820
5821 mp = log->l_mp;
5822
5823 freeblks = 0LL;
5824 itotal = 0LL;
5825 ifree = 0LL;
5826 for (agno = 0; agno < mp->m_sb.sb_agcount; agno++) {
5827 error = xfs_read_agf(mp, NULL, agno, 0, &agfbp);
5828 if (error) {
5829 xfs_alert(mp, "%s agf read failed agno %d error %d",
5830 __func__, agno, error);
5831 } else {
5832 agfp = XFS_BUF_TO_AGF(agfbp);
5833 freeblks += be32_to_cpu(agfp->agf_freeblks) +
5834 be32_to_cpu(agfp->agf_flcount);
5835 xfs_buf_relse(agfbp);
5836 }
5837
5838 error = xfs_read_agi(mp, NULL, agno, &agibp);
5839 if (error) {
5840 xfs_alert(mp, "%s agi read failed agno %d error %d",
5841 __func__, agno, error);
5842 } else {
5843 struct xfs_agi *agi = XFS_BUF_TO_AGI(agibp);
5844
5845 itotal += be32_to_cpu(agi->agi_count);
5846 ifree += be32_to_cpu(agi->agi_freecount);
5847 xfs_buf_relse(agibp);
5848 }
5849 }
5850}
5851#endif /* DEBUG */