Linux Audio

Check our new training course

Loading...
v6.13.7
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 * Copyright (c) 2000-2006 Silicon Graphics, Inc.
   4 * All Rights Reserved.
 
 
 
 
 
 
 
 
 
 
 
 
 
   5 */
   6#include "xfs.h"
   7#include "xfs_fs.h"
   8#include "xfs_shared.h"
   9#include "xfs_format.h"
  10#include "xfs_log_format.h"
  11#include "xfs_trans_resv.h"
  12#include "xfs_bit.h"
 
 
 
  13#include "xfs_sb.h"
 
  14#include "xfs_mount.h"
  15#include "xfs_defer.h"
 
 
 
 
  16#include "xfs_inode.h"
  17#include "xfs_trans.h"
  18#include "xfs_log.h"
  19#include "xfs_log_priv.h"
  20#include "xfs_log_recover.h"
  21#include "xfs_trans_priv.h"
  22#include "xfs_alloc.h"
  23#include "xfs_ialloc.h"
  24#include "xfs_trace.h"
  25#include "xfs_icache.h"
  26#include "xfs_error.h"
  27#include "xfs_buf_item.h"
  28#include "xfs_ag.h"
 
 
  29#include "xfs_quota.h"
  30#include "xfs_reflink.h"
 
  31
  32#define BLK_AVG(blk1, blk2)	((blk1+blk2) >> 1)
 
 
 
 
 
 
  33
  34STATIC int
  35xlog_find_zeroed(
  36	struct xlog	*,
  37	xfs_daddr_t	*);
  38STATIC int
  39xlog_clear_stale_blocks(
  40	struct xlog	*,
  41	xfs_lsn_t);
  42STATIC int
  43xlog_do_recovery_pass(
  44        struct xlog *, xfs_daddr_t, xfs_daddr_t, int, xfs_daddr_t *);
  45
  46/*
  47 * Sector aligned buffer routines for buffer create/read/write/access
  48 */
  49
  50/*
  51 * Verify the log-relative block number and length in basic blocks are valid for
  52 * an operation involving the given XFS log buffer. Returns true if the fields
  53 * are valid, false otherwise.
  54 */
  55static inline bool
  56xlog_verify_bno(
  57	struct xlog	*log,
  58	xfs_daddr_t	blk_no,
  59	int		bbcount)
  60{
  61	if (blk_no < 0 || blk_no >= log->l_logBBsize)
  62		return false;
  63	if (bbcount <= 0 || (blk_no + bbcount) > log->l_logBBsize)
  64		return false;
  65	return true;
  66}
  67
  68/*
  69 * Allocate a buffer to hold log data.  The buffer needs to be able to map to
  70 * a range of nbblks basic blocks at any valid offset within the log.
 
  71 */
  72static char *
  73xlog_alloc_buffer(
  74	struct xlog	*log,
  75	int		nbblks)
  76{
  77	/*
  78	 * Pass log block 0 since we don't have an addr yet, buffer will be
  79	 * verified on read.
  80	 */
  81	if (XFS_IS_CORRUPT(log->l_mp, !xlog_verify_bno(log, 0, nbblks))) {
  82		xfs_warn(log->l_mp, "Invalid block length (0x%x) for buffer",
  83			nbblks);
 
  84		return NULL;
  85	}
  86
  87	/*
  88	 * We do log I/O in units of log sectors (a power-of-2 multiple of the
  89	 * basic block size), so we round up the requested size to accommodate
  90	 * the basic blocks required for complete log sectors.
 
  91	 *
  92	 * In addition, the buffer may be used for a non-sector-aligned block
  93	 * offset, in which case an I/O of the requested size could extend
  94	 * beyond the end of the buffer.  If the requested size is only 1 basic
  95	 * block it will never straddle a sector boundary, so this won't be an
  96	 * issue.  Nor will this be a problem if the log I/O is done in basic
  97	 * blocks (sector size 1).  But otherwise we extend the buffer by one
  98	 * extra log sector to ensure there's space to accommodate this
  99	 * possibility.
 
 100	 */
 101	if (nbblks > 1 && log->l_sectBBsize > 1)
 102		nbblks += log->l_sectBBsize;
 103	nbblks = round_up(nbblks, log->l_sectBBsize);
 104	return kvzalloc(BBTOB(nbblks), GFP_KERNEL | __GFP_RETRY_MAYFAIL);
 
 
 
 
 
 
 
 
 
 
 
 105}
 106
 107/*
 108 * Return the address of the start of the given block number's data
 109 * in a log buffer.  The buffer covers a log sector-aligned region.
 110 */
 111static inline unsigned int
 112xlog_align(
 113	struct xlog	*log,
 114	xfs_daddr_t	blk_no)
 
 
 115{
 116	return BBTOB(blk_no & ((xfs_daddr_t)log->l_sectBBsize - 1));
 
 
 
 117}
 118
 119static int
 120xlog_do_io(
 121	struct xlog		*log,
 122	xfs_daddr_t		blk_no,
 123	unsigned int		nbblks,
 124	char			*data,
 125	enum req_op		op)
 
 
 
 126{
 127	int			error;
 128
 129	if (XFS_IS_CORRUPT(log->l_mp, !xlog_verify_bno(log, blk_no, nbblks))) {
 130		xfs_warn(log->l_mp,
 131			 "Invalid log block/length (0x%llx, 0x%x) for buffer",
 132			 blk_no, nbblks);
 133		return -EFSCORRUPTED;
 134	}
 135
 136	blk_no = round_down(blk_no, log->l_sectBBsize);
 137	nbblks = round_up(nbblks, log->l_sectBBsize);
 
 138	ASSERT(nbblks > 0);
 
 139
 140	error = xfs_rw_bdev(log->l_targ->bt_bdev, log->l_logBBstart + blk_no,
 141			BBTOB(nbblks), data, op);
 142	if (error && !xlog_is_shutdown(log)) {
 143		xfs_alert(log->l_mp,
 144			  "log recovery %s I/O error at daddr 0x%llx len %d error %d",
 145			  op == REQ_OP_WRITE ? "write" : "read",
 146			  blk_no, nbblks, error);
 147	}
 
 148	return error;
 149}
 150
 151STATIC int
 152xlog_bread_noalign(
 153	struct xlog	*log,
 154	xfs_daddr_t	blk_no,
 155	int		nbblks,
 156	char		*data)
 
 157{
 158	return xlog_do_io(log, blk_no, nbblks, data, REQ_OP_READ);
 
 
 
 
 
 
 
 159}
 160
 
 
 
 
 161STATIC int
 162xlog_bread(
 163	struct xlog	*log,
 164	xfs_daddr_t	blk_no,
 165	int		nbblks,
 166	char		*data,
 167	char		**offset)
 168{
 169	int		error;
 
 
 170
 171	error = xlog_do_io(log, blk_no, nbblks, data, REQ_OP_READ);
 172	if (!error)
 173		*offset = data + xlog_align(log, blk_no);
 174	return error;
 
 
 
 
 
 
 
 175}
 176
 
 
 
 
 
 177STATIC int
 178xlog_bwrite(
 179	struct xlog	*log,
 180	xfs_daddr_t	blk_no,
 181	int		nbblks,
 182	char		*data)
 183{
 184	return xlog_do_io(log, blk_no, nbblks, data, REQ_OP_WRITE);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 185}
 186
 187#ifdef DEBUG
 188/*
 189 * dump debug superblock and log record information
 190 */
 191STATIC void
 192xlog_header_check_dump(
 193	xfs_mount_t		*mp,
 194	xlog_rec_header_t	*head)
 195{
 196	xfs_debug(mp, "%s:  SB : uuid = %pU, fmt = %d",
 197		__func__, &mp->m_sb.sb_uuid, XLOG_FMT);
 198	xfs_debug(mp, "    log : uuid = %pU, fmt = %d",
 199		&head->h_fs_uuid, be32_to_cpu(head->h_fmt));
 200}
 201#else
 202#define xlog_header_check_dump(mp, head)
 203#endif
 204
 205/*
 206 * check log record header for recovery
 207 */
 208STATIC int
 209xlog_header_check_recover(
 210	xfs_mount_t		*mp,
 211	xlog_rec_header_t	*head)
 212{
 213	ASSERT(head->h_magicno == cpu_to_be32(XLOG_HEADER_MAGIC_NUM));
 214
 215	/*
 216	 * IRIX doesn't write the h_fmt field and leaves it zeroed
 217	 * (XLOG_FMT_UNKNOWN). This stops us from trying to recover
 218	 * a dirty log created in IRIX.
 219	 */
 220	if (XFS_IS_CORRUPT(mp, head->h_fmt != cpu_to_be32(XLOG_FMT))) {
 221		xfs_warn(mp,
 222	"dirty log written in incompatible format - can't recover");
 223		xlog_header_check_dump(mp, head);
 224		return -EFSCORRUPTED;
 225	}
 226	if (XFS_IS_CORRUPT(mp, !uuid_equal(&mp->m_sb.sb_uuid,
 227					   &head->h_fs_uuid))) {
 228		xfs_warn(mp,
 229	"dirty log entry has mismatched uuid - can't recover");
 230		xlog_header_check_dump(mp, head);
 231		return -EFSCORRUPTED;
 
 
 232	}
 233	return 0;
 234}
 235
 236/*
 237 * read the head block of the log and check the header
 238 */
 239STATIC int
 240xlog_header_check_mount(
 241	xfs_mount_t		*mp,
 242	xlog_rec_header_t	*head)
 243{
 244	ASSERT(head->h_magicno == cpu_to_be32(XLOG_HEADER_MAGIC_NUM));
 245
 246	if (uuid_is_null(&head->h_fs_uuid)) {
 247		/*
 248		 * IRIX doesn't write the h_fs_uuid or h_fmt fields. If
 249		 * h_fs_uuid is null, we assume this log was last mounted
 250		 * by IRIX and continue.
 251		 */
 252		xfs_warn(mp, "null uuid in log - IRIX style log");
 253	} else if (XFS_IS_CORRUPT(mp, !uuid_equal(&mp->m_sb.sb_uuid,
 254						  &head->h_fs_uuid))) {
 255		xfs_warn(mp, "log has mismatched uuid - can't recover");
 256		xlog_header_check_dump(mp, head);
 257		return -EFSCORRUPTED;
 
 
 258	}
 259	return 0;
 260}
 261
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 262/*
 263 * This routine finds (to an approximation) the first block in the physical
 264 * log which contains the given cycle.  It uses a binary search algorithm.
 265 * Note that the algorithm can not be perfect because the disk will not
 266 * necessarily be perfect.
 267 */
 268STATIC int
 269xlog_find_cycle_start(
 270	struct xlog	*log,
 271	char		*buffer,
 272	xfs_daddr_t	first_blk,
 273	xfs_daddr_t	*last_blk,
 274	uint		cycle)
 275{
 276	char		*offset;
 277	xfs_daddr_t	mid_blk;
 278	xfs_daddr_t	end_blk;
 279	uint		mid_cycle;
 280	int		error;
 281
 282	end_blk = *last_blk;
 283	mid_blk = BLK_AVG(first_blk, end_blk);
 284	while (mid_blk != first_blk && mid_blk != end_blk) {
 285		error = xlog_bread(log, mid_blk, 1, buffer, &offset);
 286		if (error)
 287			return error;
 288		mid_cycle = xlog_get_cycle(offset);
 289		if (mid_cycle == cycle)
 290			end_blk = mid_blk;   /* last_half_cycle == mid_cycle */
 291		else
 292			first_blk = mid_blk; /* first_half_cycle == mid_cycle */
 293		mid_blk = BLK_AVG(first_blk, end_blk);
 294	}
 295	ASSERT((mid_blk == first_blk && mid_blk+1 == end_blk) ||
 296	       (mid_blk == end_blk && mid_blk-1 == first_blk));
 297
 298	*last_blk = end_blk;
 299
 300	return 0;
 301}
 302
 303/*
 304 * Check that a range of blocks does not contain stop_on_cycle_no.
 305 * Fill in *new_blk with the block offset where such a block is
 306 * found, or with -1 (an invalid block number) if there is no such
 307 * block in the range.  The scan needs to occur from front to back
 308 * and the pointer into the region must be updated since a later
 309 * routine will need to perform another test.
 310 */
 311STATIC int
 312xlog_find_verify_cycle(
 313	struct xlog	*log,
 314	xfs_daddr_t	start_blk,
 315	int		nbblks,
 316	uint		stop_on_cycle_no,
 317	xfs_daddr_t	*new_blk)
 318{
 319	xfs_daddr_t	i, j;
 320	uint		cycle;
 321	char		*buffer;
 322	xfs_daddr_t	bufblks;
 323	char		*buf = NULL;
 324	int		error = 0;
 325
 326	/*
 327	 * Greedily allocate a buffer big enough to handle the full
 328	 * range of basic blocks we'll be examining.  If that fails,
 329	 * try a smaller size.  We need to be able to read at least
 330	 * a log sector, or we're out of luck.
 331	 */
 332	bufblks = roundup_pow_of_two(nbblks);
 333	while (bufblks > log->l_logBBsize)
 334		bufblks >>= 1;
 335	while (!(buffer = xlog_alloc_buffer(log, bufblks))) {
 336		bufblks >>= 1;
 337		if (bufblks < log->l_sectBBsize)
 338			return -ENOMEM;
 339	}
 340
 341	for (i = start_blk; i < start_blk + nbblks; i += bufblks) {
 342		int	bcount;
 343
 344		bcount = min(bufblks, (start_blk + nbblks - i));
 345
 346		error = xlog_bread(log, i, bcount, buffer, &buf);
 347		if (error)
 348			goto out;
 349
 350		for (j = 0; j < bcount; j++) {
 351			cycle = xlog_get_cycle(buf);
 352			if (cycle == stop_on_cycle_no) {
 353				*new_blk = i+j;
 354				goto out;
 355			}
 356
 357			buf += BBSIZE;
 358		}
 359	}
 360
 361	*new_blk = -1;
 362
 363out:
 364	kvfree(buffer);
 365	return error;
 366}
 367
 368static inline int
 369xlog_logrec_hblks(struct xlog *log, struct xlog_rec_header *rh)
 370{
 371	if (xfs_has_logv2(log->l_mp)) {
 372		int	h_size = be32_to_cpu(rh->h_size);
 373
 374		if ((be32_to_cpu(rh->h_version) & XLOG_VERSION_2) &&
 375		    h_size > XLOG_HEADER_CYCLE_SIZE)
 376			return DIV_ROUND_UP(h_size, XLOG_HEADER_CYCLE_SIZE);
 377	}
 378	return 1;
 379}
 380
 381/*
 382 * Potentially backup over partial log record write.
 383 *
 384 * In the typical case, last_blk is the number of the block directly after
 385 * a good log record.  Therefore, we subtract one to get the block number
 386 * of the last block in the given buffer.  extra_bblks contains the number
 387 * of blocks we would have read on a previous read.  This happens when the
 388 * last log record is split over the end of the physical log.
 389 *
 390 * extra_bblks is the number of blocks potentially verified on a previous
 391 * call to this routine.
 392 */
 393STATIC int
 394xlog_find_verify_log_record(
 395	struct xlog		*log,
 396	xfs_daddr_t		start_blk,
 397	xfs_daddr_t		*last_blk,
 398	int			extra_bblks)
 399{
 400	xfs_daddr_t		i;
 401	char			*buffer;
 402	char			*offset = NULL;
 403	xlog_rec_header_t	*head = NULL;
 404	int			error = 0;
 405	int			smallmem = 0;
 406	int			num_blks = *last_blk - start_blk;
 407	int			xhdrs;
 408
 409	ASSERT(start_blk != 0 || *last_blk != start_blk);
 410
 411	buffer = xlog_alloc_buffer(log, num_blks);
 412	if (!buffer) {
 413		buffer = xlog_alloc_buffer(log, 1);
 414		if (!buffer)
 415			return -ENOMEM;
 416		smallmem = 1;
 417	} else {
 418		error = xlog_bread(log, start_blk, num_blks, buffer, &offset);
 419		if (error)
 420			goto out;
 421		offset += ((num_blks - 1) << BBSHIFT);
 422	}
 423
 424	for (i = (*last_blk) - 1; i >= 0; i--) {
 425		if (i < start_blk) {
 426			/* valid log record not found */
 427			xfs_warn(log->l_mp,
 428		"Log inconsistent (didn't find previous header)");
 429			ASSERT(0);
 430			error = -EFSCORRUPTED;
 431			goto out;
 432		}
 433
 434		if (smallmem) {
 435			error = xlog_bread(log, i, 1, buffer, &offset);
 436			if (error)
 437				goto out;
 438		}
 439
 440		head = (xlog_rec_header_t *)offset;
 441
 442		if (head->h_magicno == cpu_to_be32(XLOG_HEADER_MAGIC_NUM))
 443			break;
 444
 445		if (!smallmem)
 446			offset -= BBSIZE;
 447	}
 448
 449	/*
 450	 * We hit the beginning of the physical log & still no header.  Return
 451	 * to caller.  If caller can handle a return of -1, then this routine
 452	 * will be called again for the end of the physical log.
 453	 */
 454	if (i == -1) {
 455		error = 1;
 456		goto out;
 457	}
 458
 459	/*
 460	 * We have the final block of the good log (the first block
 461	 * of the log record _before_ the head. So we check the uuid.
 462	 */
 463	if ((error = xlog_header_check_mount(log->l_mp, head)))
 464		goto out;
 465
 466	/*
 467	 * We may have found a log record header before we expected one.
 468	 * last_blk will be the 1st block # with a given cycle #.  We may end
 469	 * up reading an entire log record.  In this case, we don't want to
 470	 * reset last_blk.  Only when last_blk points in the middle of a log
 471	 * record do we update last_blk.
 472	 */
 473	xhdrs = xlog_logrec_hblks(log, head);
 
 
 
 
 
 
 
 
 474
 475	if (*last_blk - i + extra_bblks !=
 476	    BTOBB(be32_to_cpu(head->h_len)) + xhdrs)
 477		*last_blk = i;
 478
 479out:
 480	kvfree(buffer);
 481	return error;
 482}
 483
 484/*
 485 * Head is defined to be the point of the log where the next log write
 486 * could go.  This means that incomplete LR writes at the end are
 487 * eliminated when calculating the head.  We aren't guaranteed that previous
 488 * LR have complete transactions.  We only know that a cycle number of
 489 * current cycle number -1 won't be present in the log if we start writing
 490 * from our current block number.
 491 *
 492 * last_blk contains the block number of the first block with a given
 493 * cycle number.
 494 *
 495 * Return: zero if normal, non-zero if error.
 496 */
 497STATIC int
 498xlog_find_head(
 499	struct xlog	*log,
 500	xfs_daddr_t	*return_head_blk)
 501{
 502	char		*buffer;
 503	char		*offset;
 504	xfs_daddr_t	new_blk, first_blk, start_blk, last_blk, head_blk;
 505	int		num_scan_bblks;
 506	uint		first_half_cycle, last_half_cycle;
 507	uint		stop_on_cycle;
 508	int		error, log_bbnum = log->l_logBBsize;
 509
 510	/* Is the end of the log device zeroed? */
 511	error = xlog_find_zeroed(log, &first_blk);
 512	if (error < 0) {
 513		xfs_warn(log->l_mp, "empty log check failed");
 514		return error;
 515	}
 516	if (error == 1) {
 517		*return_head_blk = first_blk;
 518
 519		/* Is the whole lot zeroed? */
 520		if (!first_blk) {
 521			/* Linux XFS shouldn't generate totally zeroed logs -
 522			 * mkfs etc write a dummy unmount record to a fresh
 523			 * log so we can store the uuid in there
 524			 */
 525			xfs_warn(log->l_mp, "totally zeroed log");
 526		}
 527
 528		return 0;
 
 
 
 529	}
 530
 531	first_blk = 0;			/* get cycle # of 1st block */
 532	buffer = xlog_alloc_buffer(log, 1);
 533	if (!buffer)
 534		return -ENOMEM;
 535
 536	error = xlog_bread(log, 0, 1, buffer, &offset);
 537	if (error)
 538		goto out_free_buffer;
 539
 540	first_half_cycle = xlog_get_cycle(offset);
 541
 542	last_blk = head_blk = log_bbnum - 1;	/* get cycle # of last block */
 543	error = xlog_bread(log, last_blk, 1, buffer, &offset);
 544	if (error)
 545		goto out_free_buffer;
 546
 547	last_half_cycle = xlog_get_cycle(offset);
 548	ASSERT(last_half_cycle != 0);
 549
 550	/*
 551	 * If the 1st half cycle number is equal to the last half cycle number,
 552	 * then the entire log is stamped with the same cycle number.  In this
 553	 * case, head_blk can't be set to zero (which makes sense).  The below
 554	 * math doesn't work out properly with head_blk equal to zero.  Instead,
 555	 * we set it to log_bbnum which is an invalid block number, but this
 556	 * value makes the math correct.  If head_blk doesn't changed through
 557	 * all the tests below, *head_blk is set to zero at the very end rather
 558	 * than log_bbnum.  In a sense, log_bbnum and zero are the same block
 559	 * in a circular file.
 560	 */
 561	if (first_half_cycle == last_half_cycle) {
 562		/*
 563		 * In this case we believe that the entire log should have
 564		 * cycle number last_half_cycle.  We need to scan backwards
 565		 * from the end verifying that there are no holes still
 566		 * containing last_half_cycle - 1.  If we find such a hole,
 567		 * then the start of that hole will be the new head.  The
 568		 * simple case looks like
 569		 *        x | x ... | x - 1 | x
 570		 * Another case that fits this picture would be
 571		 *        x | x + 1 | x ... | x
 572		 * In this case the head really is somewhere at the end of the
 573		 * log, as one of the latest writes at the beginning was
 574		 * incomplete.
 575		 * One more case is
 576		 *        x | x + 1 | x ... | x - 1 | x
 577		 * This is really the combination of the above two cases, and
 578		 * the head has to end up at the start of the x-1 hole at the
 579		 * end of the log.
 580		 *
 581		 * In the 256k log case, we will read from the beginning to the
 582		 * end of the log and search for cycle numbers equal to x-1.
 583		 * We don't worry about the x+1 blocks that we encounter,
 584		 * because we know that they cannot be the head since the log
 585		 * started with x.
 586		 */
 587		head_blk = log_bbnum;
 588		stop_on_cycle = last_half_cycle - 1;
 589	} else {
 590		/*
 591		 * In this case we want to find the first block with cycle
 592		 * number matching last_half_cycle.  We expect the log to be
 593		 * some variation on
 594		 *        x + 1 ... | x ... | x
 595		 * The first block with cycle number x (last_half_cycle) will
 596		 * be where the new head belongs.  First we do a binary search
 597		 * for the first occurrence of last_half_cycle.  The binary
 598		 * search may not be totally accurate, so then we scan back
 599		 * from there looking for occurrences of last_half_cycle before
 600		 * us.  If that backwards scan wraps around the beginning of
 601		 * the log, then we look for occurrences of last_half_cycle - 1
 602		 * at the end of the log.  The cases we're looking for look
 603		 * like
 604		 *                               v binary search stopped here
 605		 *        x + 1 ... | x | x + 1 | x ... | x
 606		 *                   ^ but we want to locate this spot
 607		 * or
 608		 *        <---------> less than scan distance
 609		 *        x + 1 ... | x ... | x - 1 | x
 610		 *                           ^ we want to locate this spot
 611		 */
 612		stop_on_cycle = last_half_cycle;
 613		error = xlog_find_cycle_start(log, buffer, first_blk, &head_blk,
 614				last_half_cycle);
 615		if (error)
 616			goto out_free_buffer;
 617	}
 618
 619	/*
 620	 * Now validate the answer.  Scan back some number of maximum possible
 621	 * blocks and make sure each one has the expected cycle number.  The
 622	 * maximum is determined by the total possible amount of buffering
 623	 * in the in-core log.  The following number can be made tighter if
 624	 * we actually look at the block size of the filesystem.
 625	 */
 626	num_scan_bblks = min_t(int, log_bbnum, XLOG_TOTAL_REC_SHIFT(log));
 627	if (head_blk >= num_scan_bblks) {
 628		/*
 629		 * We are guaranteed that the entire check can be performed
 630		 * in one buffer.
 631		 */
 632		start_blk = head_blk - num_scan_bblks;
 633		if ((error = xlog_find_verify_cycle(log,
 634						start_blk, num_scan_bblks,
 635						stop_on_cycle, &new_blk)))
 636			goto out_free_buffer;
 637		if (new_blk != -1)
 638			head_blk = new_blk;
 639	} else {		/* need to read 2 parts of log */
 640		/*
 641		 * We are going to scan backwards in the log in two parts.
 642		 * First we scan the physical end of the log.  In this part
 643		 * of the log, we are looking for blocks with cycle number
 644		 * last_half_cycle - 1.
 645		 * If we find one, then we know that the log starts there, as
 646		 * we've found a hole that didn't get written in going around
 647		 * the end of the physical log.  The simple case for this is
 648		 *        x + 1 ... | x ... | x - 1 | x
 649		 *        <---------> less than scan distance
 650		 * If all of the blocks at the end of the log have cycle number
 651		 * last_half_cycle, then we check the blocks at the start of
 652		 * the log looking for occurrences of last_half_cycle.  If we
 653		 * find one, then our current estimate for the location of the
 654		 * first occurrence of last_half_cycle is wrong and we move
 655		 * back to the hole we've found.  This case looks like
 656		 *        x + 1 ... | x | x + 1 | x ...
 657		 *                               ^ binary search stopped here
 658		 * Another case we need to handle that only occurs in 256k
 659		 * logs is
 660		 *        x + 1 ... | x ... | x+1 | x ...
 661		 *                   ^ binary search stops here
 662		 * In a 256k log, the scan at the end of the log will see the
 663		 * x + 1 blocks.  We need to skip past those since that is
 664		 * certainly not the head of the log.  By searching for
 665		 * last_half_cycle-1 we accomplish that.
 666		 */
 667		ASSERT(head_blk <= INT_MAX &&
 668			(xfs_daddr_t) num_scan_bblks >= head_blk);
 669		start_blk = log_bbnum - (num_scan_bblks - head_blk);
 670		if ((error = xlog_find_verify_cycle(log, start_blk,
 671					num_scan_bblks - (int)head_blk,
 672					(stop_on_cycle - 1), &new_blk)))
 673			goto out_free_buffer;
 674		if (new_blk != -1) {
 675			head_blk = new_blk;
 676			goto validate_head;
 677		}
 678
 679		/*
 680		 * Scan beginning of log now.  The last part of the physical
 681		 * log is good.  This scan needs to verify that it doesn't find
 682		 * the last_half_cycle.
 683		 */
 684		start_blk = 0;
 685		ASSERT(head_blk <= INT_MAX);
 686		if ((error = xlog_find_verify_cycle(log,
 687					start_blk, (int)head_blk,
 688					stop_on_cycle, &new_blk)))
 689			goto out_free_buffer;
 690		if (new_blk != -1)
 691			head_blk = new_blk;
 692	}
 693
 694validate_head:
 695	/*
 696	 * Now we need to make sure head_blk is not pointing to a block in
 697	 * the middle of a log record.
 698	 */
 699	num_scan_bblks = XLOG_REC_SHIFT(log);
 700	if (head_blk >= num_scan_bblks) {
 701		start_blk = head_blk - num_scan_bblks; /* don't read head_blk */
 702
 703		/* start ptr at last block ptr before head_blk */
 704		error = xlog_find_verify_log_record(log, start_blk, &head_blk, 0);
 705		if (error == 1)
 706			error = -EIO;
 707		if (error)
 708			goto out_free_buffer;
 
 709	} else {
 710		start_blk = 0;
 711		ASSERT(head_blk <= INT_MAX);
 712		error = xlog_find_verify_log_record(log, start_blk, &head_blk, 0);
 713		if (error < 0)
 714			goto out_free_buffer;
 715		if (error == 1) {
 716			/* We hit the beginning of the log during our search */
 717			start_blk = log_bbnum - (num_scan_bblks - head_blk);
 718			new_blk = log_bbnum;
 719			ASSERT(start_blk <= INT_MAX &&
 720				(xfs_daddr_t) log_bbnum-start_blk >= 0);
 721			ASSERT(head_blk <= INT_MAX);
 722			error = xlog_find_verify_log_record(log, start_blk,
 723							&new_blk, (int)head_blk);
 724			if (error == 1)
 725				error = -EIO;
 726			if (error)
 727				goto out_free_buffer;
 
 728			if (new_blk != log_bbnum)
 729				head_blk = new_blk;
 730		} else if (error)
 731			goto out_free_buffer;
 732	}
 733
 734	kvfree(buffer);
 735	if (head_blk == log_bbnum)
 736		*return_head_blk = 0;
 737	else
 738		*return_head_blk = head_blk;
 739	/*
 740	 * When returning here, we have a good block number.  Bad block
 741	 * means that during a previous crash, we didn't have a clean break
 742	 * from cycle number N to cycle number N-1.  In this case, we need
 743	 * to find the first block with cycle number N-1.
 744	 */
 745	return 0;
 746
 747out_free_buffer:
 748	kvfree(buffer);
 
 749	if (error)
 750		xfs_warn(log->l_mp, "failed to find log head");
 751	return error;
 752}
 753
 754/*
 755 * Seek backwards in the log for log record headers.
 756 *
 757 * Given a starting log block, walk backwards until we find the provided number
 758 * of records or hit the provided tail block. The return value is the number of
 759 * records encountered or a negative error code. The log block and buffer
 760 * pointer of the last record seen are returned in rblk and rhead respectively.
 761 */
 762STATIC int
 763xlog_rseek_logrec_hdr(
 764	struct xlog		*log,
 765	xfs_daddr_t		head_blk,
 766	xfs_daddr_t		tail_blk,
 767	int			count,
 768	char			*buffer,
 769	xfs_daddr_t		*rblk,
 770	struct xlog_rec_header	**rhead,
 771	bool			*wrapped)
 772{
 773	int			i;
 774	int			error;
 775	int			found = 0;
 776	char			*offset = NULL;
 777	xfs_daddr_t		end_blk;
 778
 779	*wrapped = false;
 780
 781	/*
 782	 * Walk backwards from the head block until we hit the tail or the first
 783	 * block in the log.
 784	 */
 785	end_blk = head_blk > tail_blk ? tail_blk : 0;
 786	for (i = (int) head_blk - 1; i >= end_blk; i--) {
 787		error = xlog_bread(log, i, 1, buffer, &offset);
 788		if (error)
 789			goto out_error;
 790
 791		if (*(__be32 *) offset == cpu_to_be32(XLOG_HEADER_MAGIC_NUM)) {
 792			*rblk = i;
 793			*rhead = (struct xlog_rec_header *) offset;
 794			if (++found == count)
 795				break;
 796		}
 797	}
 798
 799	/*
 800	 * If we haven't hit the tail block or the log record header count,
 801	 * start looking again from the end of the physical log. Note that
 802	 * callers can pass head == tail if the tail is not yet known.
 803	 */
 804	if (tail_blk >= head_blk && found != count) {
 805		for (i = log->l_logBBsize - 1; i >= (int) tail_blk; i--) {
 806			error = xlog_bread(log, i, 1, buffer, &offset);
 807			if (error)
 808				goto out_error;
 809
 810			if (*(__be32 *)offset ==
 811			    cpu_to_be32(XLOG_HEADER_MAGIC_NUM)) {
 812				*wrapped = true;
 813				*rblk = i;
 814				*rhead = (struct xlog_rec_header *) offset;
 815				if (++found == count)
 816					break;
 817			}
 818		}
 819	}
 820
 821	return found;
 822
 823out_error:
 824	return error;
 825}
 826
 827/*
 828 * Seek forward in the log for log record headers.
 829 *
 830 * Given head and tail blocks, walk forward from the tail block until we find
 831 * the provided number of records or hit the head block. The return value is the
 832 * number of records encountered or a negative error code. The log block and
 833 * buffer pointer of the last record seen are returned in rblk and rhead
 834 * respectively.
 835 */
 836STATIC int
 837xlog_seek_logrec_hdr(
 838	struct xlog		*log,
 839	xfs_daddr_t		head_blk,
 840	xfs_daddr_t		tail_blk,
 841	int			count,
 842	char			*buffer,
 843	xfs_daddr_t		*rblk,
 844	struct xlog_rec_header	**rhead,
 845	bool			*wrapped)
 846{
 847	int			i;
 848	int			error;
 849	int			found = 0;
 850	char			*offset = NULL;
 851	xfs_daddr_t		end_blk;
 852
 853	*wrapped = false;
 854
 855	/*
 856	 * Walk forward from the tail block until we hit the head or the last
 857	 * block in the log.
 858	 */
 859	end_blk = head_blk > tail_blk ? head_blk : log->l_logBBsize - 1;
 860	for (i = (int) tail_blk; i <= end_blk; i++) {
 861		error = xlog_bread(log, i, 1, buffer, &offset);
 862		if (error)
 863			goto out_error;
 864
 865		if (*(__be32 *) offset == cpu_to_be32(XLOG_HEADER_MAGIC_NUM)) {
 866			*rblk = i;
 867			*rhead = (struct xlog_rec_header *) offset;
 868			if (++found == count)
 869				break;
 870		}
 871	}
 872
 873	/*
 874	 * If we haven't hit the head block or the log record header count,
 875	 * start looking again from the start of the physical log.
 876	 */
 877	if (tail_blk > head_blk && found != count) {
 878		for (i = 0; i < (int) head_blk; i++) {
 879			error = xlog_bread(log, i, 1, buffer, &offset);
 880			if (error)
 881				goto out_error;
 882
 883			if (*(__be32 *)offset ==
 884			    cpu_to_be32(XLOG_HEADER_MAGIC_NUM)) {
 885				*wrapped = true;
 886				*rblk = i;
 887				*rhead = (struct xlog_rec_header *) offset;
 888				if (++found == count)
 889					break;
 890			}
 891		}
 892	}
 893
 894	return found;
 895
 896out_error:
 897	return error;
 898}
 899
 900/*
 901 * Calculate distance from head to tail (i.e., unused space in the log).
 902 */
 903static inline int
 904xlog_tail_distance(
 905	struct xlog	*log,
 906	xfs_daddr_t	head_blk,
 907	xfs_daddr_t	tail_blk)
 908{
 909	if (head_blk < tail_blk)
 910		return tail_blk - head_blk;
 911
 912	return tail_blk + (log->l_logBBsize - head_blk);
 913}
 914
 915/*
 916 * Verify the log tail. This is particularly important when torn or incomplete
 917 * writes have been detected near the front of the log and the head has been
 918 * walked back accordingly.
 919 *
 920 * We also have to handle the case where the tail was pinned and the head
 921 * blocked behind the tail right before a crash. If the tail had been pushed
 922 * immediately prior to the crash and the subsequent checkpoint was only
 923 * partially written, it's possible it overwrote the last referenced tail in the
 924 * log with garbage. This is not a coherency problem because the tail must have
 925 * been pushed before it can be overwritten, but appears as log corruption to
 926 * recovery because we have no way to know the tail was updated if the
 927 * subsequent checkpoint didn't write successfully.
 928 *
 929 * Therefore, CRC check the log from tail to head. If a failure occurs and the
 930 * offending record is within max iclog bufs from the head, walk the tail
 931 * forward and retry until a valid tail is found or corruption is detected out
 932 * of the range of a possible overwrite.
 933 */
 934STATIC int
 935xlog_verify_tail(
 936	struct xlog		*log,
 937	xfs_daddr_t		head_blk,
 938	xfs_daddr_t		*tail_blk,
 939	int			hsize)
 940{
 941	struct xlog_rec_header	*thead;
 942	char			*buffer;
 943	xfs_daddr_t		first_bad;
 944	int			error = 0;
 945	bool			wrapped;
 946	xfs_daddr_t		tmp_tail;
 947	xfs_daddr_t		orig_tail = *tail_blk;
 948
 949	buffer = xlog_alloc_buffer(log, 1);
 950	if (!buffer)
 951		return -ENOMEM;
 952
 953	/*
 954	 * Make sure the tail points to a record (returns positive count on
 955	 * success).
 956	 */
 957	error = xlog_seek_logrec_hdr(log, head_blk, *tail_blk, 1, buffer,
 958			&tmp_tail, &thead, &wrapped);
 959	if (error < 0)
 960		goto out;
 961	if (*tail_blk != tmp_tail)
 962		*tail_blk = tmp_tail;
 963
 964	/*
 965	 * Run a CRC check from the tail to the head. We can't just check
 966	 * MAX_ICLOGS records past the tail because the tail may point to stale
 967	 * blocks cleared during the search for the head/tail. These blocks are
 968	 * overwritten with zero-length records and thus record count is not a
 969	 * reliable indicator of the iclog state before a crash.
 970	 */
 971	first_bad = 0;
 972	error = xlog_do_recovery_pass(log, head_blk, *tail_blk,
 973				      XLOG_RECOVER_CRCPASS, &first_bad);
 974	while ((error == -EFSBADCRC || error == -EFSCORRUPTED) && first_bad) {
 975		int	tail_distance;
 976
 977		/*
 978		 * Is corruption within range of the head? If so, retry from
 979		 * the next record. Otherwise return an error.
 980		 */
 981		tail_distance = xlog_tail_distance(log, head_blk, first_bad);
 982		if (tail_distance > BTOBB(XLOG_MAX_ICLOGS * hsize))
 983			break;
 984
 985		/* skip to the next record; returns positive count on success */
 986		error = xlog_seek_logrec_hdr(log, head_blk, first_bad, 2,
 987				buffer, &tmp_tail, &thead, &wrapped);
 988		if (error < 0)
 989			goto out;
 990
 991		*tail_blk = tmp_tail;
 992		first_bad = 0;
 993		error = xlog_do_recovery_pass(log, head_blk, *tail_blk,
 994					      XLOG_RECOVER_CRCPASS, &first_bad);
 995	}
 996
 997	if (!error && *tail_blk != orig_tail)
 998		xfs_warn(log->l_mp,
 999		"Tail block (0x%llx) overwrite detected. Updated to 0x%llx",
1000			 orig_tail, *tail_blk);
1001out:
1002	kvfree(buffer);
1003	return error;
1004}
1005
1006/*
1007 * Detect and trim torn writes from the head of the log.
1008 *
1009 * Storage without sector atomicity guarantees can result in torn writes in the
1010 * log in the event of a crash. Our only means to detect this scenario is via
1011 * CRC verification. While we can't always be certain that CRC verification
1012 * failure is due to a torn write vs. an unrelated corruption, we do know that
1013 * only a certain number (XLOG_MAX_ICLOGS) of log records can be written out at
1014 * one time. Therefore, CRC verify up to XLOG_MAX_ICLOGS records at the head of
1015 * the log and treat failures in this range as torn writes as a matter of
1016 * policy. In the event of CRC failure, the head is walked back to the last good
1017 * record in the log and the tail is updated from that record and verified.
1018 */
1019STATIC int
1020xlog_verify_head(
1021	struct xlog		*log,
1022	xfs_daddr_t		*head_blk,	/* in/out: unverified head */
1023	xfs_daddr_t		*tail_blk,	/* out: tail block */
1024	char			*buffer,
1025	xfs_daddr_t		*rhead_blk,	/* start blk of last record */
1026	struct xlog_rec_header	**rhead,	/* ptr to last record */
1027	bool			*wrapped)	/* last rec. wraps phys. log */
1028{
1029	struct xlog_rec_header	*tmp_rhead;
1030	char			*tmp_buffer;
1031	xfs_daddr_t		first_bad;
1032	xfs_daddr_t		tmp_rhead_blk;
1033	int			found;
1034	int			error;
1035	bool			tmp_wrapped;
1036
1037	/*
1038	 * Check the head of the log for torn writes. Search backwards from the
1039	 * head until we hit the tail or the maximum number of log record I/Os
1040	 * that could have been in flight at one time. Use a temporary buffer so
1041	 * we don't trash the rhead/buffer pointers from the caller.
1042	 */
1043	tmp_buffer = xlog_alloc_buffer(log, 1);
1044	if (!tmp_buffer)
1045		return -ENOMEM;
1046	error = xlog_rseek_logrec_hdr(log, *head_blk, *tail_blk,
1047				      XLOG_MAX_ICLOGS, tmp_buffer,
1048				      &tmp_rhead_blk, &tmp_rhead, &tmp_wrapped);
1049	kvfree(tmp_buffer);
1050	if (error < 0)
1051		return error;
1052
1053	/*
1054	 * Now run a CRC verification pass over the records starting at the
1055	 * block found above to the current head. If a CRC failure occurs, the
1056	 * log block of the first bad record is saved in first_bad.
1057	 */
1058	error = xlog_do_recovery_pass(log, *head_blk, tmp_rhead_blk,
1059				      XLOG_RECOVER_CRCPASS, &first_bad);
1060	if ((error == -EFSBADCRC || error == -EFSCORRUPTED) && first_bad) {
1061		/*
1062		 * We've hit a potential torn write. Reset the error and warn
1063		 * about it.
1064		 */
1065		error = 0;
1066		xfs_warn(log->l_mp,
1067"Torn write (CRC failure) detected at log block 0x%llx. Truncating head block from 0x%llx.",
1068			 first_bad, *head_blk);
1069
1070		/*
1071		 * Get the header block and buffer pointer for the last good
1072		 * record before the bad record.
1073		 *
1074		 * Note that xlog_find_tail() clears the blocks at the new head
1075		 * (i.e., the records with invalid CRC) if the cycle number
1076		 * matches the current cycle.
1077		 */
1078		found = xlog_rseek_logrec_hdr(log, first_bad, *tail_blk, 1,
1079				buffer, rhead_blk, rhead, wrapped);
1080		if (found < 0)
1081			return found;
1082		if (found == 0)		/* XXX: right thing to do here? */
1083			return -EIO;
1084
1085		/*
1086		 * Reset the head block to the starting block of the first bad
1087		 * log record and set the tail block based on the last good
1088		 * record.
1089		 *
1090		 * Bail out if the updated head/tail match as this indicates
1091		 * possible corruption outside of the acceptable
1092		 * (XLOG_MAX_ICLOGS) range. This is a job for xfs_repair...
1093		 */
1094		*head_blk = first_bad;
1095		*tail_blk = BLOCK_LSN(be64_to_cpu((*rhead)->h_tail_lsn));
1096		if (*head_blk == *tail_blk) {
1097			ASSERT(0);
1098			return 0;
1099		}
1100	}
1101	if (error)
1102		return error;
1103
1104	return xlog_verify_tail(log, *head_blk, tail_blk,
1105				be32_to_cpu((*rhead)->h_size));
1106}
1107
1108/*
1109 * We need to make sure we handle log wrapping properly, so we can't use the
1110 * calculated logbno directly. Make sure it wraps to the correct bno inside the
1111 * log.
1112 *
1113 * The log is limited to 32 bit sizes, so we use the appropriate modulus
1114 * operation here and cast it back to a 64 bit daddr on return.
1115 */
1116static inline xfs_daddr_t
1117xlog_wrap_logbno(
1118	struct xlog		*log,
1119	xfs_daddr_t		bno)
1120{
1121	int			mod;
1122
1123	div_s64_rem(bno, log->l_logBBsize, &mod);
1124	return mod;
1125}
1126
1127/*
1128 * Check whether the head of the log points to an unmount record. In other
1129 * words, determine whether the log is clean. If so, update the in-core state
1130 * appropriately.
1131 */
1132static int
1133xlog_check_unmount_rec(
1134	struct xlog		*log,
1135	xfs_daddr_t		*head_blk,
1136	xfs_daddr_t		*tail_blk,
1137	struct xlog_rec_header	*rhead,
1138	xfs_daddr_t		rhead_blk,
1139	char			*buffer,
1140	bool			*clean)
1141{
1142	struct xlog_op_header	*op_head;
1143	xfs_daddr_t		umount_data_blk;
1144	xfs_daddr_t		after_umount_blk;
1145	int			hblks;
1146	int			error;
1147	char			*offset;
1148
1149	*clean = false;
1150
1151	/*
1152	 * Look for unmount record. If we find it, then we know there was a
1153	 * clean unmount. Since 'i' could be the last block in the physical
1154	 * log, we convert to a log block before comparing to the head_blk.
1155	 *
1156	 * Save the current tail lsn to use to pass to xlog_clear_stale_blocks()
1157	 * below. We won't want to clear the unmount record if there is one, so
1158	 * we pass the lsn of the unmount record rather than the block after it.
1159	 */
1160	hblks = xlog_logrec_hblks(log, rhead);
1161	after_umount_blk = xlog_wrap_logbno(log,
1162			rhead_blk + hblks + BTOBB(be32_to_cpu(rhead->h_len)));
1163
1164	if (*head_blk == after_umount_blk &&
1165	    be32_to_cpu(rhead->h_num_logops) == 1) {
1166		umount_data_blk = xlog_wrap_logbno(log, rhead_blk + hblks);
1167		error = xlog_bread(log, umount_data_blk, 1, buffer, &offset);
1168		if (error)
1169			return error;
1170
1171		op_head = (struct xlog_op_header *)offset;
1172		if (op_head->oh_flags & XLOG_UNMOUNT_TRANS) {
1173			/*
1174			 * Set tail and last sync so that newly written log
1175			 * records will point recovery to after the current
1176			 * unmount record.
1177			 */
1178			xlog_assign_atomic_lsn(&log->l_tail_lsn,
1179					log->l_curr_cycle, after_umount_blk);
1180			log->l_ailp->ail_head_lsn =
1181					atomic64_read(&log->l_tail_lsn);
1182			*tail_blk = after_umount_blk;
1183
1184			*clean = true;
1185		}
1186	}
1187
1188	return 0;
1189}
1190
1191static void
1192xlog_set_state(
1193	struct xlog		*log,
1194	xfs_daddr_t		head_blk,
1195	struct xlog_rec_header	*rhead,
1196	xfs_daddr_t		rhead_blk,
1197	bool			bump_cycle)
1198{
1199	/*
1200	 * Reset log values according to the state of the log when we
1201	 * crashed.  In the case where head_blk == 0, we bump curr_cycle
1202	 * one because the next write starts a new cycle rather than
1203	 * continuing the cycle of the last good log record.  At this
1204	 * point we have guaranteed that all partial log records have been
1205	 * accounted for.  Therefore, we know that the last good log record
1206	 * written was complete and ended exactly on the end boundary
1207	 * of the physical log.
1208	 */
1209	log->l_prev_block = rhead_blk;
1210	log->l_curr_block = (int)head_blk;
1211	log->l_curr_cycle = be32_to_cpu(rhead->h_cycle);
1212	if (bump_cycle)
1213		log->l_curr_cycle++;
1214	atomic64_set(&log->l_tail_lsn, be64_to_cpu(rhead->h_tail_lsn));
1215	log->l_ailp->ail_head_lsn = be64_to_cpu(rhead->h_lsn);
1216}
1217
1218/*
1219 * Find the sync block number or the tail of the log.
1220 *
1221 * This will be the block number of the last record to have its
1222 * associated buffers synced to disk.  Every log record header has
1223 * a sync lsn embedded in it.  LSNs hold block numbers, so it is easy
1224 * to get a sync block number.  The only concern is to figure out which
1225 * log record header to believe.
1226 *
1227 * The following algorithm uses the log record header with the largest
1228 * lsn.  The entire log record does not need to be valid.  We only care
1229 * that the header is valid.
1230 *
1231 * We could speed up search by using current head_blk buffer, but it is not
1232 * available.
1233 */
1234STATIC int
1235xlog_find_tail(
1236	struct xlog		*log,
1237	xfs_daddr_t		*head_blk,
1238	xfs_daddr_t		*tail_blk)
1239{
1240	xlog_rec_header_t	*rhead;
1241	char			*offset = NULL;
1242	char			*buffer;
1243	int			error;
1244	xfs_daddr_t		rhead_blk;
 
 
1245	xfs_lsn_t		tail_lsn;
1246	bool			wrapped = false;
1247	bool			clean = false;
 
1248
1249	/*
1250	 * Find previous log record
1251	 */
1252	if ((error = xlog_find_head(log, head_blk)))
1253		return error;
1254	ASSERT(*head_blk < INT_MAX);
1255
1256	buffer = xlog_alloc_buffer(log, 1);
1257	if (!buffer)
1258		return -ENOMEM;
1259	if (*head_blk == 0) {				/* special case */
1260		error = xlog_bread(log, 0, 1, buffer, &offset);
1261		if (error)
1262			goto done;
1263
1264		if (xlog_get_cycle(offset) == 0) {
1265			*tail_blk = 0;
1266			/* leave all other log inited values alone */
1267			goto done;
1268		}
1269	}
1270
1271	/*
1272	 * Search backwards through the log looking for the log record header
1273	 * block. This wraps all the way back around to the head so something is
1274	 * seriously wrong if we can't find it.
1275	 */
1276	error = xlog_rseek_logrec_hdr(log, *head_blk, *head_blk, 1, buffer,
1277				      &rhead_blk, &rhead, &wrapped);
1278	if (error < 0)
1279		goto done;
1280	if (!error) {
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1281		xfs_warn(log->l_mp, "%s: couldn't find sync record", __func__);
1282		error = -EFSCORRUPTED;
1283		goto done;
1284	}
1285	*tail_blk = BLOCK_LSN(be64_to_cpu(rhead->h_tail_lsn));
1286
1287	/*
1288	 * Set the log state based on the current head record.
1289	 */
1290	xlog_set_state(log, *head_blk, rhead, rhead_blk, wrapped);
1291	tail_lsn = atomic64_read(&log->l_tail_lsn);
1292
1293	/*
1294	 * Look for an unmount record at the head of the log. This sets the log
1295	 * state to determine whether recovery is necessary.
 
 
 
 
 
 
1296	 */
1297	error = xlog_check_unmount_rec(log, head_blk, tail_blk, rhead,
1298				       rhead_blk, buffer, &clean);
1299	if (error)
1300		goto done;
 
 
 
 
 
 
 
1301
1302	/*
1303	 * Verify the log head if the log is not clean (e.g., we have anything
1304	 * but an unmount record at the head). This uses CRC verification to
1305	 * detect and trim torn writes. If discovered, CRC failures are
1306	 * considered torn writes and the log head is trimmed accordingly.
1307	 *
1308	 * Note that we can only run CRC verification when the log is dirty
1309	 * because there's no guarantee that the log data behind an unmount
1310	 * record is compatible with the current architecture.
1311	 */
1312	if (!clean) {
1313		xfs_daddr_t	orig_head = *head_blk;
1314
1315		error = xlog_verify_head(log, head_blk, tail_blk, buffer,
1316					 &rhead_blk, &rhead, &wrapped);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1317		if (error)
1318			goto done;
1319
1320		/* update in-core state again if the head changed */
1321		if (*head_blk != orig_head) {
1322			xlog_set_state(log, *head_blk, rhead, rhead_blk,
1323				       wrapped);
1324			tail_lsn = atomic64_read(&log->l_tail_lsn);
1325			error = xlog_check_unmount_rec(log, head_blk, tail_blk,
1326						       rhead, rhead_blk, buffer,
1327						       &clean);
1328			if (error)
1329				goto done;
 
 
 
 
 
 
 
 
 
 
1330		}
1331	}
1332
1333	/*
1334	 * Note that the unmount was clean. If the unmount was not clean, we
1335	 * need to know this to rebuild the superblock counters from the perag
1336	 * headers if we have a filesystem using non-persistent counters.
1337	 */
1338	if (clean)
1339		xfs_set_clean(log->l_mp);
1340
1341	/*
1342	 * Make sure that there are no blocks in front of the head
1343	 * with the same cycle number as the head.  This can happen
1344	 * because we allow multiple outstanding log writes concurrently,
1345	 * and the later writes might make it out before earlier ones.
1346	 *
1347	 * We use the lsn from before modifying it so that we'll never
1348	 * overwrite the unmount record after a clean unmount.
1349	 *
1350	 * Do this only if we are going to recover the filesystem
1351	 *
1352	 * NOTE: This used to say "if (!readonly)"
1353	 * However on Linux, we can & do recover a read-only filesystem.
1354	 * We only skip recovery if NORECOVERY is specified on mount,
1355	 * in which case we would not be here.
1356	 *
1357	 * But... if the -device- itself is readonly, just skip this.
1358	 * We can't recover this device anyway, so it won't matter.
1359	 */
1360	if (!xfs_readonly_buftarg(log->l_targ))
1361		error = xlog_clear_stale_blocks(log, tail_lsn);
1362
1363done:
1364	kvfree(buffer);
1365
1366	if (error)
1367		xfs_warn(log->l_mp, "failed to locate log tail");
1368	return error;
1369}
1370
1371/*
1372 * Is the log zeroed at all?
1373 *
1374 * The last binary search should be changed to perform an X block read
1375 * once X becomes small enough.  You can then search linearly through
1376 * the X blocks.  This will cut down on the number of reads we need to do.
1377 *
1378 * If the log is partially zeroed, this routine will pass back the blkno
1379 * of the first block with cycle number 0.  It won't have a complete LR
1380 * preceding it.
1381 *
1382 * Return:
1383 *	0  => the log is completely written to
1384 *	1 => use *blk_no as the first block of the log
1385 *	<0 => error has occurred
1386 */
1387STATIC int
1388xlog_find_zeroed(
1389	struct xlog	*log,
1390	xfs_daddr_t	*blk_no)
1391{
1392	char		*buffer;
1393	char		*offset;
1394	uint	        first_cycle, last_cycle;
1395	xfs_daddr_t	new_blk, last_blk, start_blk;
1396	xfs_daddr_t     num_scan_bblks;
1397	int	        error, log_bbnum = log->l_logBBsize;
1398	int		ret = 1;
1399
1400	*blk_no = 0;
1401
1402	/* check totally zeroed log */
1403	buffer = xlog_alloc_buffer(log, 1);
1404	if (!buffer)
1405		return -ENOMEM;
1406	error = xlog_bread(log, 0, 1, buffer, &offset);
1407	if (error)
1408		goto out_free_buffer;
1409
1410	first_cycle = xlog_get_cycle(offset);
1411	if (first_cycle == 0) {		/* completely zeroed log */
1412		*blk_no = 0;
1413		goto out_free_buffer;
 
1414	}
1415
1416	/* check partially zeroed log */
1417	error = xlog_bread(log, log_bbnum-1, 1, buffer, &offset);
1418	if (error)
1419		goto out_free_buffer;
1420
1421	last_cycle = xlog_get_cycle(offset);
1422	if (last_cycle != 0) {		/* log completely written to */
1423		ret = 0;
1424		goto out_free_buffer;
 
 
 
 
 
 
 
 
 
1425	}
1426
1427	/* we have a partially zeroed log */
1428	last_blk = log_bbnum-1;
1429	error = xlog_find_cycle_start(log, buffer, 0, &last_blk, 0);
1430	if (error)
1431		goto out_free_buffer;
1432
1433	/*
1434	 * Validate the answer.  Because there is no way to guarantee that
1435	 * the entire log is made up of log records which are the same size,
1436	 * we scan over the defined maximum blocks.  At this point, the maximum
1437	 * is not chosen to mean anything special.   XXXmiken
1438	 */
1439	num_scan_bblks = XLOG_TOTAL_REC_SHIFT(log);
1440	ASSERT(num_scan_bblks <= INT_MAX);
1441
1442	if (last_blk < num_scan_bblks)
1443		num_scan_bblks = last_blk;
1444	start_blk = last_blk - num_scan_bblks;
1445
1446	/*
1447	 * We search for any instances of cycle number 0 that occur before
1448	 * our current estimate of the head.  What we're trying to detect is
1449	 *        1 ... | 0 | 1 | 0...
1450	 *                       ^ binary search ends here
1451	 */
1452	if ((error = xlog_find_verify_cycle(log, start_blk,
1453					 (int)num_scan_bblks, 0, &new_blk)))
1454		goto out_free_buffer;
1455	if (new_blk != -1)
1456		last_blk = new_blk;
1457
1458	/*
1459	 * Potentially backup over partial log record write.  We don't need
1460	 * to search the end of the log because we know it is zero.
1461	 */
1462	error = xlog_find_verify_log_record(log, start_blk, &last_blk, 0);
1463	if (error == 1)
1464		error = -EIO;
1465	if (error)
1466		goto out_free_buffer;
 
1467
1468	*blk_no = last_blk;
1469out_free_buffer:
1470	kvfree(buffer);
1471	if (error)
1472		return error;
1473	return ret;
1474}
1475
1476/*
1477 * These are simple subroutines used by xlog_clear_stale_blocks() below
1478 * to initialize a buffer full of empty log record headers and write
1479 * them into the log.
1480 */
1481STATIC void
1482xlog_add_record(
1483	struct xlog		*log,
1484	char			*buf,
1485	int			cycle,
1486	int			block,
1487	int			tail_cycle,
1488	int			tail_block)
1489{
1490	xlog_rec_header_t	*recp = (xlog_rec_header_t *)buf;
1491
1492	memset(buf, 0, BBSIZE);
1493	recp->h_magicno = cpu_to_be32(XLOG_HEADER_MAGIC_NUM);
1494	recp->h_cycle = cpu_to_be32(cycle);
1495	recp->h_version = cpu_to_be32(
1496			xfs_has_logv2(log->l_mp) ? 2 : 1);
1497	recp->h_lsn = cpu_to_be64(xlog_assign_lsn(cycle, block));
1498	recp->h_tail_lsn = cpu_to_be64(xlog_assign_lsn(tail_cycle, tail_block));
1499	recp->h_fmt = cpu_to_be32(XLOG_FMT);
1500	memcpy(&recp->h_fs_uuid, &log->l_mp->m_sb.sb_uuid, sizeof(uuid_t));
1501}
1502
1503STATIC int
1504xlog_write_log_records(
1505	struct xlog	*log,
1506	int		cycle,
1507	int		start_block,
1508	int		blocks,
1509	int		tail_cycle,
1510	int		tail_block)
1511{
1512	char		*offset;
1513	char		*buffer;
1514	int		balign, ealign;
1515	int		sectbb = log->l_sectBBsize;
1516	int		end_block = start_block + blocks;
1517	int		bufblks;
1518	int		error = 0;
1519	int		i, j = 0;
1520
1521	/*
1522	 * Greedily allocate a buffer big enough to handle the full
1523	 * range of basic blocks to be written.  If that fails, try
1524	 * a smaller size.  We need to be able to write at least a
1525	 * log sector, or we're out of luck.
1526	 */
1527	bufblks = roundup_pow_of_two(blocks);
1528	while (bufblks > log->l_logBBsize)
1529		bufblks >>= 1;
1530	while (!(buffer = xlog_alloc_buffer(log, bufblks))) {
1531		bufblks >>= 1;
1532		if (bufblks < sectbb)
1533			return -ENOMEM;
1534	}
1535
1536	/* We may need to do a read at the start to fill in part of
1537	 * the buffer in the starting sector not covered by the first
1538	 * write below.
1539	 */
1540	balign = round_down(start_block, sectbb);
1541	if (balign != start_block) {
1542		error = xlog_bread_noalign(log, start_block, 1, buffer);
1543		if (error)
1544			goto out_free_buffer;
1545
1546		j = start_block - balign;
1547	}
1548
1549	for (i = start_block; i < end_block; i += bufblks) {
1550		int		bcount, endcount;
1551
1552		bcount = min(bufblks, end_block - start_block);
1553		endcount = bcount - j;
1554
1555		/* We may need to do a read at the end to fill in part of
1556		 * the buffer in the final sector not covered by the write.
1557		 * If this is the same sector as the above read, skip it.
1558		 */
1559		ealign = round_down(end_block, sectbb);
1560		if (j == 0 && (start_block + endcount > ealign)) {
1561			error = xlog_bread_noalign(log, ealign, sectbb,
1562					buffer + BBTOB(ealign - start_block));
 
1563			if (error)
1564				break;
1565
1566		}
1567
1568		offset = buffer + xlog_align(log, start_block);
1569		for (; j < endcount; j++) {
1570			xlog_add_record(log, offset, cycle, i+j,
1571					tail_cycle, tail_block);
1572			offset += BBSIZE;
1573		}
1574		error = xlog_bwrite(log, start_block, endcount, buffer);
1575		if (error)
1576			break;
1577		start_block += endcount;
1578		j = 0;
1579	}
1580
1581out_free_buffer:
1582	kvfree(buffer);
1583	return error;
1584}
1585
1586/*
1587 * This routine is called to blow away any incomplete log writes out
1588 * in front of the log head.  We do this so that we won't become confused
1589 * if we come up, write only a little bit more, and then crash again.
1590 * If we leave the partial log records out there, this situation could
1591 * cause us to think those partial writes are valid blocks since they
1592 * have the current cycle number.  We get rid of them by overwriting them
1593 * with empty log records with the old cycle number rather than the
1594 * current one.
1595 *
1596 * The tail lsn is passed in rather than taken from
1597 * the log so that we will not write over the unmount record after a
1598 * clean unmount in a 512 block log.  Doing so would leave the log without
1599 * any valid log records in it until a new one was written.  If we crashed
1600 * during that time we would not be able to recover.
1601 */
1602STATIC int
1603xlog_clear_stale_blocks(
1604	struct xlog	*log,
1605	xfs_lsn_t	tail_lsn)
1606{
1607	int		tail_cycle, head_cycle;
1608	int		tail_block, head_block;
1609	int		tail_distance, max_distance;
1610	int		distance;
1611	int		error;
1612
1613	tail_cycle = CYCLE_LSN(tail_lsn);
1614	tail_block = BLOCK_LSN(tail_lsn);
1615	head_cycle = log->l_curr_cycle;
1616	head_block = log->l_curr_block;
1617
1618	/*
1619	 * Figure out the distance between the new head of the log
1620	 * and the tail.  We want to write over any blocks beyond the
1621	 * head that we may have written just before the crash, but
1622	 * we don't want to overwrite the tail of the log.
1623	 */
1624	if (head_cycle == tail_cycle) {
1625		/*
1626		 * The tail is behind the head in the physical log,
1627		 * so the distance from the head to the tail is the
1628		 * distance from the head to the end of the log plus
1629		 * the distance from the beginning of the log to the
1630		 * tail.
1631		 */
1632		if (XFS_IS_CORRUPT(log->l_mp,
1633				   head_block < tail_block ||
1634				   head_block >= log->l_logBBsize))
1635			return -EFSCORRUPTED;
 
1636		tail_distance = tail_block + (log->l_logBBsize - head_block);
1637	} else {
1638		/*
1639		 * The head is behind the tail in the physical log,
1640		 * so the distance from the head to the tail is just
1641		 * the tail block minus the head block.
1642		 */
1643		if (XFS_IS_CORRUPT(log->l_mp,
1644				   head_block >= tail_block ||
1645				   head_cycle != tail_cycle + 1))
1646			return -EFSCORRUPTED;
 
1647		tail_distance = tail_block - head_block;
1648	}
1649
1650	/*
1651	 * If the head is right up against the tail, we can't clear
1652	 * anything.
1653	 */
1654	if (tail_distance <= 0) {
1655		ASSERT(tail_distance == 0);
1656		return 0;
1657	}
1658
1659	max_distance = XLOG_TOTAL_REC_SHIFT(log);
1660	/*
1661	 * Take the smaller of the maximum amount of outstanding I/O
1662	 * we could have and the distance to the tail to clear out.
1663	 * We take the smaller so that we don't overwrite the tail and
1664	 * we don't waste all day writing from the head to the tail
1665	 * for no reason.
1666	 */
1667	max_distance = min(max_distance, tail_distance);
1668
1669	if ((head_block + max_distance) <= log->l_logBBsize) {
1670		/*
1671		 * We can stomp all the blocks we need to without
1672		 * wrapping around the end of the log.  Just do it
1673		 * in a single write.  Use the cycle number of the
1674		 * current cycle minus one so that the log will look like:
1675		 *     n ... | n - 1 ...
1676		 */
1677		error = xlog_write_log_records(log, (head_cycle - 1),
1678				head_block, max_distance, tail_cycle,
1679				tail_block);
1680		if (error)
1681			return error;
1682	} else {
1683		/*
1684		 * We need to wrap around the end of the physical log in
1685		 * order to clear all the blocks.  Do it in two separate
1686		 * I/Os.  The first write should be from the head to the
1687		 * end of the physical log, and it should use the current
1688		 * cycle number minus one just like above.
1689		 */
1690		distance = log->l_logBBsize - head_block;
1691		error = xlog_write_log_records(log, (head_cycle - 1),
1692				head_block, distance, tail_cycle,
1693				tail_block);
1694
1695		if (error)
1696			return error;
1697
1698		/*
1699		 * Now write the blocks at the start of the physical log.
1700		 * This writes the remainder of the blocks we want to clear.
1701		 * It uses the current cycle number since we're now on the
1702		 * same cycle as the head so that we get:
1703		 *    n ... n ... | n - 1 ...
1704		 *    ^^^^^ blocks we're writing
1705		 */
1706		distance = max_distance - (log->l_logBBsize - head_block);
1707		error = xlog_write_log_records(log, head_cycle, 0, distance,
1708				tail_cycle, tail_block);
1709		if (error)
1710			return error;
1711	}
1712
1713	return 0;
1714}
1715
1716/*
1717 * Release the recovered intent item in the AIL that matches the given intent
1718 * type and intent id.
 
 
1719 */
1720void
1721xlog_recover_release_intent(
1722	struct xlog			*log,
1723	unsigned short			intent_type,
1724	uint64_t			intent_id)
1725{
1726	struct xfs_defer_pending	*dfp, *n;
1727
1728	list_for_each_entry_safe(dfp, n, &log->r_dfops, dfp_list) {
1729		struct xfs_log_item	*lip = dfp->dfp_intent;
 
 
 
 
 
1730
1731		if (lip->li_type != intent_type)
1732			continue;
1733		if (!lip->li_ops->iop_match(lip, intent_id))
1734			continue;
 
 
1735
1736		ASSERT(xlog_item_is_intent(lip));
 
 
 
 
 
 
 
 
 
 
 
1737
1738		xfs_defer_cancel_recovery(log->l_mp, dfp);
1739	}
1740}
1741
1742int
1743xlog_recover_iget(
1744	struct xfs_mount	*mp,
1745	xfs_ino_t		ino,
1746	struct xfs_inode	**ipp)
1747{
1748	int			error;
1749
1750	error = xfs_iget(mp, NULL, ino, 0, 0, ipp);
1751	if (error)
1752		return error;
 
1753
1754	error = xfs_qm_dqattach(*ipp);
1755	if (error) {
1756		xfs_irele(*ipp);
1757		return error;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1758	}
 
 
1759
1760	if (VFS_I(*ipp)->i_nlink == 0)
1761		xfs_iflags_set(*ipp, XFS_IRECOVERY);
1762
 
 
 
 
 
1763	return 0;
1764}
1765
1766/*
1767 * Get an inode so that we can recover a log operation.
 
 
 
1768 *
1769 * Log intent items that target inodes effectively contain a file handle.
1770 * Check that the generation number matches the intent item like we do for
1771 * other file handles.  Log intent items defined after this validation weakness
1772 * was identified must use this function.
 
 
1773 */
1774int
1775xlog_recover_iget_handle(
1776	struct xfs_mount	*mp,
1777	xfs_ino_t		ino,
1778	uint32_t		gen,
1779	struct xfs_inode	**ipp)
1780{
1781	struct xfs_inode	*ip;
1782	int			error;
1783
1784	error = xlog_recover_iget(mp, ino, &ip);
1785	if (error)
1786		return error;
1787
1788	if (VFS_I(ip)->i_generation != gen) {
1789		xfs_irele(ip);
1790		return -EFSCORRUPTED;
 
 
 
 
 
 
 
 
 
 
 
1791	}
1792
1793	*ipp = ip;
1794	return 0;
1795}
1796
1797/******************************************************************************
1798 *
1799 *		Log recover routines
1800 *
1801 ******************************************************************************
1802 */
1803static const struct xlog_recover_item_ops *xlog_recover_item_ops[] = {
1804	&xlog_buf_item_ops,
1805	&xlog_inode_item_ops,
1806	&xlog_dquot_item_ops,
1807	&xlog_quotaoff_item_ops,
1808	&xlog_icreate_item_ops,
1809	&xlog_efi_item_ops,
1810	&xlog_efd_item_ops,
1811	&xlog_rui_item_ops,
1812	&xlog_rud_item_ops,
1813	&xlog_cui_item_ops,
1814	&xlog_cud_item_ops,
1815	&xlog_bui_item_ops,
1816	&xlog_bud_item_ops,
1817	&xlog_attri_item_ops,
1818	&xlog_attrd_item_ops,
1819	&xlog_xmi_item_ops,
1820	&xlog_xmd_item_ops,
1821	&xlog_rtefi_item_ops,
1822	&xlog_rtefd_item_ops,
1823};
1824
1825static const struct xlog_recover_item_ops *
1826xlog_find_item_ops(
1827	struct xlog_recover_item		*item)
1828{
1829	unsigned int				i;
1830
1831	for (i = 0; i < ARRAY_SIZE(xlog_recover_item_ops); i++)
1832		if (ITEM_TYPE(item) == xlog_recover_item_ops[i]->item_type)
1833			return xlog_recover_item_ops[i];
1834
1835	return NULL;
 
 
 
 
 
 
 
 
 
 
 
1836}
1837
1838/*
1839 * Sort the log items in the transaction.
1840 *
1841 * The ordering constraints are defined by the inode allocation and unlink
1842 * behaviour. The rules are:
1843 *
1844 *	1. Every item is only logged once in a given transaction. Hence it
1845 *	   represents the last logged state of the item. Hence ordering is
1846 *	   dependent on the order in which operations need to be performed so
1847 *	   required initial conditions are always met.
1848 *
1849 *	2. Cancelled buffers are recorded in pass 1 in a separate table and
1850 *	   there's nothing to replay from them so we can simply cull them
1851 *	   from the transaction. However, we can't do that until after we've
1852 *	   replayed all the other items because they may be dependent on the
1853 *	   cancelled buffer and replaying the cancelled buffer can remove it
1854 *	   form the cancelled buffer table. Hence they have to be done last.
1855 *
1856 *	3. Inode allocation buffers must be replayed before inode items that
1857 *	   read the buffer and replay changes into it. For filesystems using the
1858 *	   ICREATE transactions, this means XFS_LI_ICREATE objects need to get
1859 *	   treated the same as inode allocation buffers as they create and
1860 *	   initialise the buffers directly.
1861 *
1862 *	4. Inode unlink buffers must be replayed after inode items are replayed.
1863 *	   This ensures that inodes are completely flushed to the inode buffer
1864 *	   in a "free" state before we remove the unlinked inode list pointer.
1865 *
1866 * Hence the ordering needs to be inode allocation buffers first, inode items
1867 * second, inode unlink buffers third and cancelled buffers last.
1868 *
1869 * But there's a problem with that - we can't tell an inode allocation buffer
1870 * apart from a regular buffer, so we can't separate them. We can, however,
1871 * tell an inode unlink buffer from the others, and so we can separate them out
1872 * from all the other buffers and move them to last.
1873 *
1874 * Hence, 4 lists, in order from head to tail:
1875 *	- buffer_list for all buffers except cancelled/inode unlink buffers
1876 *	- item_list for all non-buffer items
1877 *	- inode_buffer_list for inode unlink buffers
1878 *	- cancel_list for the cancelled buffers
1879 *
1880 * Note that we add objects to the tail of the lists so that first-to-last
1881 * ordering is preserved within the lists. Adding objects to the head of the
1882 * list means when we traverse from the head we walk them in last-to-first
1883 * order. For cancelled buffers and inode unlink buffers this doesn't matter,
1884 * but for all other items there may be specific ordering that we need to
1885 * preserve.
1886 */
1887STATIC int
1888xlog_recover_reorder_trans(
1889	struct xlog		*log,
1890	struct xlog_recover	*trans,
1891	int			pass)
1892{
1893	struct xlog_recover_item *item, *n;
1894	int			error = 0;
1895	LIST_HEAD(sort_list);
1896	LIST_HEAD(cancel_list);
1897	LIST_HEAD(buffer_list);
1898	LIST_HEAD(inode_buffer_list);
1899	LIST_HEAD(item_list);
1900
1901	list_splice_init(&trans->r_itemq, &sort_list);
1902	list_for_each_entry_safe(item, n, &sort_list, ri_list) {
1903		enum xlog_recover_reorder	fate = XLOG_REORDER_ITEM_LIST;
1904
1905		item->ri_ops = xlog_find_item_ops(item);
1906		if (!item->ri_ops) {
1907			xfs_warn(log->l_mp,
1908				"%s: unrecognized type of log operation (%d)",
1909				__func__, ITEM_TYPE(item));
1910			ASSERT(0);
1911			/*
1912			 * return the remaining items back to the transaction
1913			 * item list so they can be freed in caller.
1914			 */
1915			if (!list_empty(&sort_list))
1916				list_splice_init(&sort_list, &trans->r_itemq);
1917			error = -EFSCORRUPTED;
1918			break;
1919		}
1920
1921		if (item->ri_ops->reorder)
1922			fate = item->ri_ops->reorder(item);
1923
1924		switch (fate) {
1925		case XLOG_REORDER_BUFFER_LIST:
1926			list_move_tail(&item->ri_list, &buffer_list);
1927			break;
1928		case XLOG_REORDER_CANCEL_LIST:
1929			trace_xfs_log_recover_item_reorder_head(log,
1930					trans, item, pass);
1931			list_move(&item->ri_list, &cancel_list);
1932			break;
1933		case XLOG_REORDER_INODE_BUFFER_LIST:
1934			list_move(&item->ri_list, &inode_buffer_list);
1935			break;
1936		case XLOG_REORDER_ITEM_LIST:
1937			trace_xfs_log_recover_item_reorder_tail(log,
1938							trans, item, pass);
1939			list_move_tail(&item->ri_list, &item_list);
1940			break;
 
 
 
 
 
 
1941		}
1942	}
1943
1944	ASSERT(list_empty(&sort_list));
1945	if (!list_empty(&buffer_list))
1946		list_splice(&buffer_list, &trans->r_itemq);
1947	if (!list_empty(&item_list))
1948		list_splice_tail(&item_list, &trans->r_itemq);
1949	if (!list_empty(&inode_buffer_list))
1950		list_splice_tail(&inode_buffer_list, &trans->r_itemq);
1951	if (!list_empty(&cancel_list))
1952		list_splice_tail(&cancel_list, &trans->r_itemq);
1953	return error;
1954}
1955
1956void
1957xlog_buf_readahead(
1958	struct xlog		*log,
1959	xfs_daddr_t		blkno,
1960	uint			len,
1961	const struct xfs_buf_ops *ops)
1962{
1963	if (!xlog_is_buffer_cancelled(log, blkno, len))
1964		xfs_buf_readahead(log->l_mp->m_ddev_targp, blkno, len, ops);
1965}
1966
1967/*
1968 * Create a deferred work structure for resuming and tracking the progress of a
1969 * log intent item that was found during recovery.
 
 
 
 
 
 
 
 
1970 */
1971void
1972xlog_recover_intent_item(
1973	struct xlog			*log,
1974	struct xfs_log_item		*lip,
1975	xfs_lsn_t			lsn,
1976	const struct xfs_defer_op_type	*ops)
1977{
1978	ASSERT(xlog_item_is_intent(lip));
 
 
1979
1980	xfs_defer_start_recovery(lip, &log->r_dfops, ops);
 
 
 
 
 
 
1981
1982	/*
1983	 * Insert the intent into the AIL directly and drop one reference so
1984	 * that finishing or canceling the work will drop the other.
1985	 */
1986	xfs_trans_ail_insert(log->l_ailp, lip, lsn);
1987	lip->li_ops->iop_unpin(lip, 0);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1988}
1989
 
 
 
 
 
 
 
 
 
 
 
 
 
1990STATIC int
1991xlog_recover_items_pass2(
1992	struct xlog                     *log,
1993	struct xlog_recover             *trans,
1994	struct list_head                *buffer_list,
1995	struct list_head                *item_list)
1996{
1997	struct xlog_recover_item	*item;
1998	int				error = 0;
1999
2000	list_for_each_entry(item, item_list, ri_list) {
2001		trace_xfs_log_recover_item_recover(log, trans, item,
2002				XLOG_RECOVER_PASS2);
 
 
 
 
 
2003
2004		if (item->ri_ops->commit_pass2)
2005			error = item->ri_ops->commit_pass2(log, buffer_list,
2006					item, trans->r_lsn);
2007		if (error)
2008			return error;
 
 
2009	}
2010
2011	return error;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2012}
2013
2014/*
2015 * Perform the transaction.
2016 *
2017 * If the transaction modifies a buffer or inode, do it now.  Otherwise,
2018 * EFIs and EFDs get queued up by adding entries into the AIL for them.
 
 
 
 
 
 
2019 */
2020STATIC int
2021xlog_recover_commit_trans(
2022	struct xlog		*log,
2023	struct xlog_recover	*trans,
2024	int			pass,
2025	struct list_head	*buffer_list)
2026{
2027	int				error = 0;
2028	int				items_queued = 0;
2029	struct xlog_recover_item	*item;
2030	struct xlog_recover_item	*next;
2031	LIST_HEAD			(ra_list);
2032	LIST_HEAD			(done_list);
 
 
 
 
 
 
 
 
 
 
 
2033
2034	#define XLOG_RECOVER_COMMIT_QUEUE_MAX 100
 
 
 
 
 
 
 
 
 
 
2035
2036	hlist_del_init(&trans->r_list);
 
 
 
 
 
2037
2038	error = xlog_recover_reorder_trans(log, trans, pass);
2039	if (error)
2040		return error;
 
 
 
 
2041
2042	list_for_each_entry_safe(item, next, &trans->r_itemq, ri_list) {
2043		trace_xfs_log_recover_item_recover(log, trans, item, pass);
 
 
 
 
 
2044
2045		switch (pass) {
2046		case XLOG_RECOVER_PASS1:
2047			if (item->ri_ops->commit_pass1)
2048				error = item->ri_ops->commit_pass1(log, item);
2049			break;
2050		case XLOG_RECOVER_PASS2:
2051			if (item->ri_ops->ra_pass2)
2052				item->ri_ops->ra_pass2(log, item);
2053			list_move_tail(&item->ri_list, &ra_list);
2054			items_queued++;
2055			if (items_queued >= XLOG_RECOVER_COMMIT_QUEUE_MAX) {
2056				error = xlog_recover_items_pass2(log, trans,
2057						buffer_list, &ra_list);
2058				list_splice_tail_init(&ra_list, &done_list);
2059				items_queued = 0;
2060			}
 
 
 
 
 
 
 
 
 
 
2061
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2062			break;
2063		default:
2064			ASSERT(0);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2065		}
2066
2067		if (error)
2068			goto out;
 
 
 
 
 
2069	}
2070
2071out:
2072	if (!list_empty(&ra_list)) {
2073		if (!error)
2074			error = xlog_recover_items_pass2(log, trans,
2075					buffer_list, &ra_list);
2076		list_splice_tail_init(&ra_list, &done_list);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2077	}
2078
2079	if (!list_empty(&done_list))
2080		list_splice_init(&done_list, &trans->r_itemq);
2081
2082	return error;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2083}
2084
 
 
 
 
 
 
2085STATIC void
2086xlog_recover_add_item(
2087	struct list_head	*head)
 
 
 
 
2088{
2089	struct xlog_recover_item *item;
2090
2091	item = kzalloc(sizeof(struct xlog_recover_item),
2092			GFP_KERNEL | __GFP_NOFAIL);
2093	INIT_LIST_HEAD(&item->ri_list);
2094	list_add_tail(&item->ri_list, head);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2095}
2096
2097STATIC int
2098xlog_recover_add_to_cont_trans(
2099	struct xlog		*log,
2100	struct xlog_recover	*trans,
2101	char			*dp,
2102	int			len)
2103{
2104	struct xlog_recover_item *item;
2105	char			*ptr, *old_ptr;
2106	int			old_len;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2107
2108	/*
2109	 * If the transaction is empty, the header was split across this and the
2110	 * previous record. Copy the rest of the header.
2111	 */
2112	if (list_empty(&trans->r_itemq)) {
2113		ASSERT(len <= sizeof(struct xfs_trans_header));
2114		if (len > sizeof(struct xfs_trans_header)) {
2115			xfs_warn(log->l_mp, "%s: bad header length", __func__);
2116			return -EFSCORRUPTED;
2117		}
 
2118
2119		xlog_recover_add_item(&trans->r_itemq);
2120		ptr = (char *)&trans->r_theader +
2121				sizeof(struct xfs_trans_header) - len;
2122		memcpy(ptr, dp, len);
2123		return 0;
 
 
 
 
 
2124	}
 
 
2125
2126	/* take the tail entry */
2127	item = list_entry(trans->r_itemq.prev, struct xlog_recover_item,
2128			  ri_list);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2129
2130	old_ptr = item->ri_buf[item->ri_cnt-1].i_addr;
2131	old_len = item->ri_buf[item->ri_cnt-1].i_len;
 
 
 
 
 
 
 
 
 
 
 
 
2132
2133	ptr = kvrealloc(old_ptr, len + old_len, GFP_KERNEL);
2134	if (!ptr)
2135		return -ENOMEM;
2136	memcpy(&ptr[old_len], dp, len);
2137	item->ri_buf[item->ri_cnt-1].i_len += len;
2138	item->ri_buf[item->ri_cnt-1].i_addr = ptr;
2139	trace_xfs_log_recover_item_add_cont(log, trans, item, 0);
2140	return 0;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2141}
2142
2143/*
2144 * The next region to add is the start of a new region.  It could be
2145 * a whole region or it could be the first part of a new region.  Because
2146 * of this, the assumption here is that the type and size fields of all
2147 * format structures fit into the first 32 bits of the structure.
2148 *
2149 * This works because all regions must be 32 bit aligned.  Therefore, we
2150 * either have both fields or we have neither field.  In the case we have
2151 * neither field, the data part of the region is zero length.  We only have
2152 * a log_op_header and can throw away the header since a new one will appear
2153 * later.  If we have at least 4 bytes, then we can determine how many regions
2154 * will appear in the current log item.
2155 */
2156STATIC int
2157xlog_recover_add_to_trans(
2158	struct xlog		*log,
2159	struct xlog_recover	*trans,
2160	char			*dp,
2161	int			len)
2162{
2163	struct xfs_inode_log_format	*in_f;			/* any will do */
2164	struct xlog_recover_item *item;
2165	char			*ptr;
2166
2167	if (!len)
2168		return 0;
2169	if (list_empty(&trans->r_itemq)) {
2170		/* we need to catch log corruptions here */
2171		if (*(uint *)dp != XFS_TRANS_HEADER_MAGIC) {
2172			xfs_warn(log->l_mp, "%s: bad header magic number",
2173				__func__);
2174			ASSERT(0);
2175			return -EFSCORRUPTED;
2176		}
2177
2178		if (len > sizeof(struct xfs_trans_header)) {
2179			xfs_warn(log->l_mp, "%s: bad header length", __func__);
2180			ASSERT(0);
2181			return -EFSCORRUPTED;
2182		}
2183
2184		/*
2185		 * The transaction header can be arbitrarily split across op
2186		 * records. If we don't have the whole thing here, copy what we
2187		 * do have and handle the rest in the next record.
2188		 */
2189		if (len == sizeof(struct xfs_trans_header))
2190			xlog_recover_add_item(&trans->r_itemq);
2191		memcpy(&trans->r_theader, dp, len);
2192		return 0;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2193	}
2194
2195	ptr = xlog_kvmalloc(len);
2196	memcpy(ptr, dp, len);
2197	in_f = (struct xfs_inode_log_format *)ptr;
 
 
 
 
2198
2199	/* take the tail entry */
2200	item = list_entry(trans->r_itemq.prev, struct xlog_recover_item,
2201			  ri_list);
2202	if (item->ri_total != 0 &&
2203	     item->ri_total == item->ri_cnt) {
2204		/* tail item is in use, get a new one */
2205		xlog_recover_add_item(&trans->r_itemq);
2206		item = list_entry(trans->r_itemq.prev,
2207					struct xlog_recover_item, ri_list);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2208	}
2209
2210	if (item->ri_total == 0) {		/* first region to be added */
2211		if (in_f->ilf_size == 0 ||
2212		    in_f->ilf_size > XLOG_MAX_REGIONS_IN_ITEM) {
2213			xfs_warn(log->l_mp,
2214		"bad number of regions (%d) in inode log format",
2215				  in_f->ilf_size);
2216			ASSERT(0);
2217			kvfree(ptr);
2218			return -EFSCORRUPTED;
2219		}
2220
2221		item->ri_total = in_f->ilf_size;
2222		item->ri_buf = kzalloc(item->ri_total * sizeof(xfs_log_iovec_t),
2223				GFP_KERNEL | __GFP_NOFAIL);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2224	}
 
2225
2226	if (item->ri_total <= item->ri_cnt) {
2227		xfs_warn(log->l_mp,
2228	"log item region count (%d) overflowed size (%d)",
2229				item->ri_cnt, item->ri_total);
2230		ASSERT(0);
2231		kvfree(ptr);
2232		return -EFSCORRUPTED;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2233	}
 
 
2234
2235	/* Description region is ri_buf[0] */
2236	item->ri_buf[item->ri_cnt].i_addr = ptr;
2237	item->ri_buf[item->ri_cnt].i_len  = len;
2238	item->ri_cnt++;
2239	trace_xfs_log_recover_item_add(log, trans, item, 0);
2240	return 0;
2241}
2242
2243/*
2244 * Free up any resources allocated by the transaction
2245 *
2246 * Remember that EFIs, EFDs, and IUNLINKs are handled later.
2247 */
2248STATIC void
2249xlog_recover_free_trans(
2250	struct xlog_recover	*trans)
2251{
2252	struct xlog_recover_item *item, *n;
2253	int			i;
2254
2255	hlist_del_init(&trans->r_list);
2256
2257	list_for_each_entry_safe(item, n, &trans->r_itemq, ri_list) {
2258		/* Free the regions in the item. */
2259		list_del(&item->ri_list);
2260		for (i = 0; i < item->ri_cnt; i++)
2261			kvfree(item->ri_buf[i].i_addr);
2262		/* Free the item itself */
2263		kfree(item->ri_buf);
2264		kfree(item);
2265	}
2266	/* Free the transaction recover structure */
2267	kfree(trans);
2268}
2269
2270/*
2271 * On error or completion, trans is freed.
2272 */
2273STATIC int
2274xlog_recovery_process_trans(
2275	struct xlog		*log,
2276	struct xlog_recover	*trans,
2277	char			*dp,
2278	unsigned int		len,
2279	unsigned int		flags,
2280	int			pass,
2281	struct list_head	*buffer_list)
2282{
2283	int			error = 0;
2284	bool			freeit = false;
2285
2286	/* mask off ophdr transaction container flags */
2287	flags &= ~XLOG_END_TRANS;
2288	if (flags & XLOG_WAS_CONT_TRANS)
2289		flags &= ~XLOG_CONTINUE_TRANS;
2290
2291	/*
2292	 * Callees must not free the trans structure. We'll decide if we need to
2293	 * free it or not based on the operation being done and it's result.
2294	 */
2295	switch (flags) {
2296	/* expected flag values */
2297	case 0:
2298	case XLOG_CONTINUE_TRANS:
2299		error = xlog_recover_add_to_trans(log, trans, dp, len);
2300		break;
2301	case XLOG_WAS_CONT_TRANS:
2302		error = xlog_recover_add_to_cont_trans(log, trans, dp, len);
2303		break;
2304	case XLOG_COMMIT_TRANS:
2305		error = xlog_recover_commit_trans(log, trans, pass,
2306						  buffer_list);
2307		/* success or fail, we are now done with this transaction. */
2308		freeit = true;
2309		break;
2310
2311	/* unexpected flag values */
2312	case XLOG_UNMOUNT_TRANS:
2313		/* just skip trans */
2314		xfs_warn(log->l_mp, "%s: Unmount LR", __func__);
2315		freeit = true;
2316		break;
2317	case XLOG_START_TRANS:
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2318	default:
2319		xfs_warn(log->l_mp, "%s: bad flag 0x%x", __func__, flags);
 
2320		ASSERT(0);
2321		error = -EFSCORRUPTED;
2322		break;
2323	}
2324	if (error || freeit)
2325		xlog_recover_free_trans(trans);
2326	return error;
2327}
2328
2329/*
2330 * Lookup the transaction recovery structure associated with the ID in the
2331 * current ophdr. If the transaction doesn't exist and the start flag is set in
2332 * the ophdr, then allocate a new transaction for future ID matches to find.
2333 * Either way, return what we found during the lookup - an existing transaction
2334 * or nothing.
2335 */
2336STATIC struct xlog_recover *
2337xlog_recover_ophdr_to_trans(
2338	struct hlist_head	rhash[],
2339	struct xlog_rec_header	*rhead,
2340	struct xlog_op_header	*ohead)
2341{
2342	struct xlog_recover	*trans;
2343	xlog_tid_t		tid;
2344	struct hlist_head	*rhp;
2345
2346	tid = be32_to_cpu(ohead->oh_tid);
2347	rhp = &rhash[XLOG_RHASH(tid)];
2348	hlist_for_each_entry(trans, rhp, r_list) {
2349		if (trans->r_log_tid == tid)
2350			return trans;
2351	}
2352
2353	/*
2354	 * skip over non-start transaction headers - we could be
2355	 * processing slack space before the next transaction starts
2356	 */
2357	if (!(ohead->oh_flags & XLOG_START_TRANS))
2358		return NULL;
 
 
 
 
 
 
 
 
 
 
2359
2360	ASSERT(be32_to_cpu(ohead->oh_len) == 0);
 
 
2361
2362	/*
2363	 * This is a new transaction so allocate a new recovery container to
2364	 * hold the recovery ops that will follow.
2365	 */
2366	trans = kzalloc(sizeof(struct xlog_recover), GFP_KERNEL | __GFP_NOFAIL);
2367	trans->r_log_tid = tid;
2368	trans->r_lsn = be64_to_cpu(rhead->h_lsn);
2369	INIT_LIST_HEAD(&trans->r_itemq);
2370	INIT_HLIST_NODE(&trans->r_list);
2371	hlist_add_head(&trans->r_list, rhp);
2372
2373	/*
2374	 * Nothing more to do for this ophdr. Items to be added to this new
2375	 * transaction will be in subsequent ophdr containers.
2376	 */
2377	return NULL;
2378}
2379
2380STATIC int
2381xlog_recover_process_ophdr(
2382	struct xlog		*log,
2383	struct hlist_head	rhash[],
2384	struct xlog_rec_header	*rhead,
2385	struct xlog_op_header	*ohead,
2386	char			*dp,
2387	char			*end,
2388	int			pass,
2389	struct list_head	*buffer_list)
2390{
2391	struct xlog_recover	*trans;
2392	unsigned int		len;
2393	int			error;
2394
2395	/* Do we understand who wrote this op? */
2396	if (ohead->oh_clientid != XFS_TRANSACTION &&
2397	    ohead->oh_clientid != XFS_LOG) {
2398		xfs_warn(log->l_mp, "%s: bad clientid 0x%x",
2399			__func__, ohead->oh_clientid);
2400		ASSERT(0);
2401		return -EFSCORRUPTED;
2402	}
2403
2404	/*
2405	 * Check the ophdr contains all the data it is supposed to contain.
2406	 */
2407	len = be32_to_cpu(ohead->oh_len);
2408	if (dp + len > end) {
2409		xfs_warn(log->l_mp, "%s: bad length 0x%x", __func__, len);
2410		WARN_ON(1);
2411		return -EFSCORRUPTED;
2412	}
2413
2414	trans = xlog_recover_ophdr_to_trans(rhash, rhead, ohead);
2415	if (!trans) {
2416		/* nothing to do, so skip over this ophdr */
2417		return 0;
2418	}
2419
2420	/*
2421	 * The recovered buffer queue is drained only once we know that all
2422	 * recovery items for the current LSN have been processed. This is
2423	 * required because:
2424	 *
2425	 * - Buffer write submission updates the metadata LSN of the buffer.
2426	 * - Log recovery skips items with a metadata LSN >= the current LSN of
2427	 *   the recovery item.
2428	 * - Separate recovery items against the same metadata buffer can share
2429	 *   a current LSN. I.e., consider that the LSN of a recovery item is
2430	 *   defined as the starting LSN of the first record in which its
2431	 *   transaction appears, that a record can hold multiple transactions,
2432	 *   and/or that a transaction can span multiple records.
2433	 *
2434	 * In other words, we are allowed to submit a buffer from log recovery
2435	 * once per current LSN. Otherwise, we may incorrectly skip recovery
2436	 * items and cause corruption.
2437	 *
2438	 * We don't know up front whether buffers are updated multiple times per
2439	 * LSN. Therefore, track the current LSN of each commit log record as it
2440	 * is processed and drain the queue when it changes. Use commit records
2441	 * because they are ordered correctly by the logging code.
2442	 */
2443	if (log->l_recovery_lsn != trans->r_lsn &&
2444	    ohead->oh_flags & XLOG_COMMIT_TRANS) {
2445		error = xfs_buf_delwri_submit(buffer_list);
2446		if (error)
2447			return error;
2448		log->l_recovery_lsn = trans->r_lsn;
2449	}
2450
2451	return xlog_recovery_process_trans(log, trans, dp, len,
2452					   ohead->oh_flags, pass, buffer_list);
2453}
2454
2455/*
2456 * There are two valid states of the r_state field.  0 indicates that the
2457 * transaction structure is in a normal state.  We have either seen the
2458 * start of the transaction or the last operation we added was not a partial
2459 * operation.  If the last operation we added to the transaction was a
2460 * partial operation, we need to mark r_state with XLOG_WAS_CONT_TRANS.
2461 *
2462 * NOTE: skip LRs with 0 data length.
2463 */
2464STATIC int
2465xlog_recover_process_data(
2466	struct xlog		*log,
2467	struct hlist_head	rhash[],
2468	struct xlog_rec_header	*rhead,
2469	char			*dp,
2470	int			pass,
2471	struct list_head	*buffer_list)
2472{
2473	struct xlog_op_header	*ohead;
2474	char			*end;
2475	int			num_logops;
 
 
 
2476	int			error;
 
 
2477
2478	end = dp + be32_to_cpu(rhead->h_len);
2479	num_logops = be32_to_cpu(rhead->h_num_logops);
2480
2481	/* check the log format matches our own - else we can't recover */
2482	if (xlog_header_check_recover(log->l_mp, rhead))
2483		return -EIO;
2484
2485	trace_xfs_log_recover_record(log, rhead, pass);
2486	while ((dp < end) && num_logops) {
2487
2488		ohead = (struct xlog_op_header *)dp;
2489		dp += sizeof(*ohead);
2490		if (dp > end) {
2491			xfs_warn(log->l_mp, "%s: op header overrun", __func__);
2492			return -EFSCORRUPTED;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2493		}
2494
2495		/* errors will abort recovery */
2496		error = xlog_recover_process_ophdr(log, rhash, rhead, ohead,
2497						   dp, end, pass, buffer_list);
2498		if (error)
2499			return error;
2500
2501		dp += be32_to_cpu(ohead->oh_len);
2502		num_logops--;
2503	}
2504	return 0;
2505}
2506
2507/* Take all the collected deferred ops and finish them in order. */
2508static int
2509xlog_finish_defer_ops(
2510	struct xfs_mount	*mp,
2511	struct list_head	*capture_list)
 
 
 
2512{
2513	struct xfs_defer_capture *dfc, *next;
2514	struct xfs_trans	*tp;
 
2515	int			error = 0;
 
 
2516
2517	list_for_each_entry_safe(dfc, next, capture_list, dfc_list) {
2518		struct xfs_trans_res	resv;
2519		struct xfs_defer_resources dres;
2520
2521		/*
2522		 * Create a new transaction reservation from the captured
2523		 * information.  Set logcount to 1 to force the new transaction
2524		 * to regrant every roll so that we can make forward progress
2525		 * in recovery no matter how full the log might be.
2526		 */
2527		resv.tr_logres = dfc->dfc_logres;
2528		resv.tr_logcount = 1;
2529		resv.tr_logflags = XFS_TRANS_PERM_LOG_RES;
2530
2531		error = xfs_trans_alloc(mp, &resv, dfc->dfc_blkres,
2532				dfc->dfc_rtxres, XFS_TRANS_RESERVE, &tp);
2533		if (error) {
2534			xlog_force_shutdown(mp->m_log, SHUTDOWN_LOG_IO_ERROR);
2535			return error;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2536		}
 
2537
2538		/*
2539		 * Transfer to this new transaction all the dfops we captured
2540		 * from recovering a single intent item.
2541		 */
2542		list_del_init(&dfc->dfc_list);
2543		xfs_defer_ops_continue(dfc, tp, &dres);
2544		error = xfs_trans_commit(tp);
2545		xfs_defer_resources_rele(&dres);
 
2546		if (error)
2547			return error;
 
 
2548	}
2549
2550	ASSERT(list_empty(capture_list));
2551	return 0;
2552}
2553
2554/* Release all the captured defer ops and capture structures in this list. */
2555static void
2556xlog_abort_defer_ops(
2557	struct xfs_mount		*mp,
2558	struct list_head		*capture_list)
2559{
2560	struct xfs_defer_capture	*dfc;
2561	struct xfs_defer_capture	*next;
2562
2563	list_for_each_entry_safe(dfc, next, capture_list, dfc_list) {
2564		list_del_init(&dfc->dfc_list);
2565		xfs_defer_ops_capture_abort(mp, dfc);
2566	}
2567}
2568
2569/*
2570 * When this is called, all of the log intent items which did not have
2571 * corresponding log done items should be in the AIL.  What we do now is update
2572 * the data structures associated with each one.
2573 *
2574 * Since we process the log intent items in normal transactions, they will be
2575 * removed at some point after the commit.  This prevents us from just walking
2576 * down the list processing each one.  We'll use a flag in the intent item to
2577 * skip those that we've already processed and use the AIL iteration mechanism's
2578 * generation count to try to speed this up at least a bit.
2579 *
2580 * When we start, we know that the intents are the only things in the AIL. As we
2581 * process them, however, other items are added to the AIL. Hence we know we
2582 * have started recovery on all the pending intents when we find an non-intent
2583 * item in the AIL.
 
 
2584 */
2585STATIC int
2586xlog_recover_process_intents(
2587	struct xlog			*log)
2588{
2589	LIST_HEAD(capture_list);
2590	struct xfs_defer_pending	*dfp, *n;
2591	int				error = 0;
2592#if defined(DEBUG) || defined(XFS_WARN)
2593	xfs_lsn_t			last_lsn;
2594
2595	last_lsn = xlog_assign_lsn(log->l_curr_cycle, log->l_curr_block);
2596#endif
2597
2598	list_for_each_entry_safe(dfp, n, &log->r_dfops, dfp_list) {
2599		ASSERT(xlog_item_is_intent(dfp->dfp_intent));
2600
 
 
 
 
2601		/*
2602		 * We should never see a redo item with a LSN higher than
2603		 * the last transaction we found in the log at the start
2604		 * of recovery.
2605		 */
2606		ASSERT(XFS_LSN_CMP(last_lsn, dfp->dfp_intent->li_lsn) >= 0);
 
 
 
 
 
 
2607
2608		/*
2609		 * NOTE: If your intent processing routine can create more
2610		 * deferred ops, you /must/ attach them to the capture list in
2611		 * the recover routine or else those subsequent intents will be
2612		 * replayed in the wrong order!
2613		 *
2614		 * The recovery function can free the log item, so we must not
2615		 * access dfp->dfp_intent after it returns.  It must dispose of
2616		 * @dfp if it returns 0.
2617		 */
2618		error = xfs_defer_finish_recovery(log->l_mp, dfp,
2619				&capture_list);
2620		if (error)
2621			break;
2622	}
2623	if (error)
2624		goto err;
2625
2626	error = xlog_finish_defer_ops(log->l_mp, &capture_list);
2627	if (error)
2628		goto err;
2629
2630	return 0;
2631err:
2632	xlog_abort_defer_ops(log->l_mp, &capture_list);
2633	return error;
2634}
2635
2636/*
2637 * A cancel occurs when the mount has failed and we're bailing out.  Release all
2638 * pending log intent items that we haven't started recovery on so they don't
2639 * pin the AIL.
2640 */
2641STATIC void
2642xlog_recover_cancel_intents(
2643	struct xlog			*log)
2644{
2645	struct xfs_defer_pending	*dfp, *n;
2646
2647	list_for_each_entry_safe(dfp, n, &log->r_dfops, dfp_list) {
2648		ASSERT(xlog_item_is_intent(dfp->dfp_intent));
2649
2650		xfs_defer_cancel_recovery(log->l_mp, dfp);
 
 
 
 
 
2651	}
2652}
2653
2654/*
2655 * Transfer ownership of the recovered pending work to the recovery transaction
2656 * and try to finish the work.  If there is more work to be done, the dfp will
2657 * remain attached to the transaction.  If not, the dfp is freed.
2658 */
2659int
2660xlog_recover_finish_intent(
2661	struct xfs_trans		*tp,
2662	struct xfs_defer_pending	*dfp)
2663{
2664	int				error;
2665
2666	list_move(&dfp->dfp_list, &tp->t_dfops);
2667	error = xfs_defer_finish_one(tp, dfp);
2668	if (error == -EAGAIN)
2669		return 0;
2670	return error;
2671}
2672
2673/*
2674 * This routine performs a transaction to null out a bad inode pointer
2675 * in an agi unlinked inode hash bucket.
2676 */
2677STATIC void
2678xlog_recover_clear_agi_bucket(
2679	struct xfs_perag	*pag,
2680	int			bucket)
2681{
2682	struct xfs_mount	*mp = pag_mount(pag);
2683	struct xfs_trans	*tp;
2684	struct xfs_agi		*agi;
2685	struct xfs_buf		*agibp;
2686	int			offset;
2687	int			error;
2688
2689	error = xfs_trans_alloc(mp, &M_RES(mp)->tr_clearagi, 0, 0, 0, &tp);
 
 
2690	if (error)
2691		goto out_error;
2692
2693	error = xfs_read_agi(pag, tp, 0, &agibp);
2694	if (error)
2695		goto out_abort;
2696
2697	agi = agibp->b_addr;
2698	agi->agi_unlinked[bucket] = cpu_to_be32(NULLAGINO);
2699	offset = offsetof(xfs_agi_t, agi_unlinked) +
2700		 (sizeof(xfs_agino_t) * bucket);
2701	xfs_trans_log_buf(tp, agibp, offset,
2702			  (offset + sizeof(xfs_agino_t) - 1));
2703
2704	error = xfs_trans_commit(tp);
2705	if (error)
2706		goto out_error;
2707	return;
2708
2709out_abort:
2710	xfs_trans_cancel(tp);
2711out_error:
2712	xfs_warn(mp, "%s: failed to clear agi %d. Continuing.", __func__,
2713			pag_agno(pag));
2714	return;
2715}
2716
2717static int
2718xlog_recover_iunlink_bucket(
2719	struct xfs_perag	*pag,
2720	struct xfs_agi		*agi,
2721	int			bucket)
2722{
2723	struct xfs_mount	*mp = pag_mount(pag);
2724	struct xfs_inode	*prev_ip = NULL;
2725	struct xfs_inode	*ip;
2726	xfs_agino_t		prev_agino, agino;
2727	int			error = 0;
 
2728
2729	agino = be32_to_cpu(agi->agi_unlinked[bucket]);
2730	while (agino != NULLAGINO) {
2731		error = xfs_iget(mp, NULL, xfs_agino_to_ino(pag, agino), 0, 0,
2732				&ip);
2733		if (error)
2734			break;
2735
2736		ASSERT(VFS_I(ip)->i_nlink == 0);
2737		ASSERT(VFS_I(ip)->i_mode != 0);
2738		xfs_iflags_clear(ip, XFS_IRECOVERY);
2739		agino = ip->i_next_unlinked;
2740
2741		if (prev_ip) {
2742			ip->i_prev_unlinked = prev_agino;
2743			xfs_irele(prev_ip);
2744
2745			/*
2746			 * Ensure the inode is removed from the unlinked list
2747			 * before we continue so that it won't race with
2748			 * building the in-memory list here. This could be
2749			 * serialised with the agibp lock, but that just
2750			 * serialises via lockstepping and it's much simpler
2751			 * just to flush the inodegc queue and wait for it to
2752			 * complete.
2753			 */
2754			error = xfs_inodegc_flush(mp);
2755			if (error)
2756				break;
2757		}
2758
2759		prev_agino = agino;
2760		prev_ip = ip;
2761	}
2762
2763	if (prev_ip) {
2764		int	error2;
 
 
 
2765
2766		ip->i_prev_unlinked = prev_agino;
2767		xfs_irele(prev_ip);
2768
2769		error2 = xfs_inodegc_flush(mp);
2770		if (error2 && !error)
2771			return error2;
2772	}
2773	return error;
 
 
 
 
 
 
 
 
2774}
2775
2776/*
2777 * Recover AGI unlinked lists
2778 *
2779 * This is called during recovery to process any inodes which we unlinked but
2780 * not freed when the system crashed.  These inodes will be on the lists in the
2781 * AGI blocks. What we do here is scan all the AGIs and fully truncate and free
2782 * any inodes found on the lists. Each inode is removed from the lists when it
2783 * has been fully truncated and is freed. The freeing of the inode and its
2784 * removal from the list must be atomic.
2785 *
2786 * If everything we touch in the agi processing loop is already in memory, this
2787 * loop can hold the cpu for a long time. It runs without lock contention,
2788 * memory allocation contention, the need wait for IO, etc, and so will run
2789 * until we either run out of inodes to process, run low on memory or we run out
2790 * of log space.
2791 *
2792 * This behaviour is bad for latency on single CPU and non-preemptible kernels,
2793 * and can prevent other filesystem work (such as CIL pushes) from running. This
2794 * can lead to deadlocks if the recovery process runs out of log reservation
2795 * space. Hence we need to yield the CPU when there is other kernel work
2796 * scheduled on this CPU to ensure other scheduled work can run without undue
2797 * latency.
2798 */
2799static void
2800xlog_recover_iunlink_ag(
2801	struct xfs_perag	*pag)
2802{
2803	struct xfs_agi		*agi;
2804	struct xfs_buf		*agibp;
2805	int			bucket;
2806	int			error;
2807
2808	error = xfs_read_agi(pag, NULL, 0, &agibp);
2809	if (error) {
2810		/*
2811		 * AGI is b0rked. Don't process it.
2812		 *
2813		 * We should probably mark the filesystem as corrupt after we've
2814		 * recovered all the ag's we can....
2815		 */
2816		return;
2817	}
2818
2819	/*
2820	 * Unlock the buffer so that it can be acquired in the normal course of
2821	 * the transaction to truncate and free each inode.  Because we are not
2822	 * racing with anyone else here for the AGI buffer, we don't even need
2823	 * to hold it locked to read the initial unlinked bucket entries out of
2824	 * the buffer. We keep buffer reference though, so that it stays pinned
2825	 * in memory while we need the buffer.
2826	 */
2827	agi = agibp->b_addr;
2828	xfs_buf_unlock(agibp);
2829
2830	for (bucket = 0; bucket < XFS_AGI_UNLINKED_BUCKETS; bucket++) {
2831		error = xlog_recover_iunlink_bucket(pag, agi, bucket);
 
 
 
2832		if (error) {
2833			/*
2834			 * Bucket is unrecoverable, so only a repair scan can
2835			 * free the remaining unlinked inodes. Just empty the
2836			 * bucket and remaining inodes on it unreferenced and
2837			 * unfreeable.
2838			 */
2839			xlog_recover_clear_agi_bucket(pag, bucket);
2840		}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2841	}
2842
2843	xfs_buf_rele(agibp);
2844}
2845
2846static void
2847xlog_recover_process_iunlinks(
2848	struct xlog	*log)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2849{
2850	struct xfs_perag	*pag = NULL;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2851
2852	while ((pag = xfs_perag_next(log->l_mp, pag)))
2853		xlog_recover_iunlink_ag(pag);
 
 
 
 
 
 
 
 
 
 
2854}
2855
2856STATIC void
2857xlog_unpack_data(
2858	struct xlog_rec_header	*rhead,
2859	char			*dp,
2860	struct xlog		*log)
2861{
2862	int			i, j, k;
2863
2864	for (i = 0; i < BTOBB(be32_to_cpu(rhead->h_len)) &&
2865		  i < (XLOG_HEADER_CYCLE_SIZE / BBSIZE); i++) {
2866		*(__be32 *)dp = *(__be32 *)&rhead->h_cycle_data[i];
2867		dp += BBSIZE;
2868	}
2869
2870	if (xfs_has_logv2(log->l_mp)) {
2871		xlog_in_core_2_t *xhdr = (xlog_in_core_2_t *)rhead;
2872		for ( ; i < BTOBB(be32_to_cpu(rhead->h_len)); i++) {
2873			j = i / (XLOG_HEADER_CYCLE_SIZE / BBSIZE);
2874			k = i % (XLOG_HEADER_CYCLE_SIZE / BBSIZE);
2875			*(__be32 *)dp = xhdr[j].hic_xheader.xh_cycle_data[k];
2876			dp += BBSIZE;
2877		}
2878	}
2879}
2880
2881/*
2882 * CRC check, unpack and process a log record.
2883 */
2884STATIC int
2885xlog_recover_process(
2886	struct xlog		*log,
2887	struct hlist_head	rhash[],
2888	struct xlog_rec_header	*rhead,
2889	char			*dp,
2890	int			pass,
2891	struct list_head	*buffer_list)
2892{
2893	__le32			old_crc = rhead->h_crc;
2894	__le32			crc;
2895
2896	crc = xlog_cksum(log, rhead, dp, be32_to_cpu(rhead->h_len));
2897
2898	/*
2899	 * Nothing else to do if this is a CRC verification pass. Just return
2900	 * if this a record with a non-zero crc. Unfortunately, mkfs always
2901	 * sets old_crc to 0 so we must consider this valid even on v5 supers.
2902	 * Otherwise, return EFSBADCRC on failure so the callers up the stack
2903	 * know precisely what failed.
2904	 */
2905	if (pass == XLOG_RECOVER_CRCPASS) {
2906		if (old_crc && crc != old_crc)
2907			return -EFSBADCRC;
2908		return 0;
2909	}
2910
2911	/*
2912	 * We're in the normal recovery path. Issue a warning if and only if the
2913	 * CRC in the header is non-zero. This is an advisory warning and the
2914	 * zero CRC check prevents warnings from being emitted when upgrading
2915	 * the kernel from one that does not add CRCs by default.
2916	 */
2917	if (crc != old_crc) {
2918		if (old_crc || xfs_has_crc(log->l_mp)) {
2919			xfs_alert(log->l_mp,
2920		"log record CRC mismatch: found 0x%x, expected 0x%x.",
2921					le32_to_cpu(old_crc),
2922					le32_to_cpu(crc));
2923			xfs_hex_dump(dp, 32);
2924		}
2925
2926		/*
2927		 * If the filesystem is CRC enabled, this mismatch becomes a
2928		 * fatal log corruption failure.
2929		 */
2930		if (xfs_has_crc(log->l_mp)) {
2931			XFS_ERROR_REPORT(__func__, XFS_ERRLEVEL_LOW, log->l_mp);
2932			return -EFSCORRUPTED;
2933		}
2934	}
2935
2936	xlog_unpack_data(rhead, dp, log);
2937
2938	return xlog_recover_process_data(log, rhash, rhead, dp, pass,
2939					 buffer_list);
2940}
2941
2942STATIC int
2943xlog_valid_rec_header(
2944	struct xlog		*log,
2945	struct xlog_rec_header	*rhead,
2946	xfs_daddr_t		blkno,
2947	int			bufsize)
2948{
2949	int			hlen;
2950
2951	if (XFS_IS_CORRUPT(log->l_mp,
2952			   rhead->h_magicno != cpu_to_be32(XLOG_HEADER_MAGIC_NUM)))
2953		return -EFSCORRUPTED;
2954	if (XFS_IS_CORRUPT(log->l_mp,
2955			   (!rhead->h_version ||
2956			   (be32_to_cpu(rhead->h_version) &
2957			    (~XLOG_VERSION_OKBITS))))) {
 
2958		xfs_warn(log->l_mp, "%s: unrecognised log version (%d).",
2959			__func__, be32_to_cpu(rhead->h_version));
2960		return -EFSCORRUPTED;
2961	}
2962
2963	/*
2964	 * LR body must have data (or it wouldn't have been written)
2965	 * and h_len must not be greater than LR buffer size.
2966	 */
2967	hlen = be32_to_cpu(rhead->h_len);
2968	if (XFS_IS_CORRUPT(log->l_mp, hlen <= 0 || hlen > bufsize))
2969		return -EFSCORRUPTED;
2970
2971	if (XFS_IS_CORRUPT(log->l_mp,
2972			   blkno > log->l_logBBsize || blkno > INT_MAX))
2973		return -EFSCORRUPTED;
 
 
 
 
2974	return 0;
2975}
2976
2977/*
2978 * Read the log from tail to head and process the log records found.
2979 * Handle the two cases where the tail and head are in the same cycle
2980 * and where the active portion of the log wraps around the end of
2981 * the physical log separately.  The pass parameter is passed through
2982 * to the routines called to process the data and is not looked at
2983 * here.
2984 */
2985STATIC int
2986xlog_do_recovery_pass(
2987	struct xlog		*log,
2988	xfs_daddr_t		head_blk,
2989	xfs_daddr_t		tail_blk,
2990	int			pass,
2991	xfs_daddr_t		*first_bad)	/* out: first bad log rec */
2992{
2993	xlog_rec_header_t	*rhead;
2994	xfs_daddr_t		blk_no, rblk_no;
2995	xfs_daddr_t		rhead_blk;
2996	char			*offset;
2997	char			*hbp, *dbp;
2998	int			error = 0, h_size, h_len;
2999	int			error2 = 0;
3000	int			bblks, split_bblks;
3001	int			hblks = 1, split_hblks, wrapped_hblks;
3002	int			i;
3003	struct hlist_head	rhash[XLOG_RHASH_SIZE];
3004	LIST_HEAD		(buffer_list);
3005
3006	ASSERT(head_blk != tail_blk);
3007	blk_no = rhead_blk = tail_blk;
3008
3009	for (i = 0; i < XLOG_RHASH_SIZE; i++)
3010		INIT_HLIST_HEAD(&rhash[i]);
3011
3012	hbp = xlog_alloc_buffer(log, hblks);
3013	if (!hbp)
3014		return -ENOMEM;
3015
3016	/*
3017	 * Read the header of the tail block and get the iclog buffer size from
3018	 * h_size.  Use this to tell how many sectors make up the log header.
3019	 */
3020	if (xfs_has_logv2(log->l_mp)) {
3021		/*
3022		 * When using variable length iclogs, read first sector of
3023		 * iclog header and extract the header size from it.  Get a
3024		 * new hbp that is the correct size.
3025		 */
 
 
 
 
3026		error = xlog_bread(log, tail_blk, 1, hbp, &offset);
3027		if (error)
3028			goto bread_err1;
3029
3030		rhead = (xlog_rec_header_t *)offset;
3031
3032		/*
3033		 * xfsprogs has a bug where record length is based on lsunit but
3034		 * h_size (iclog size) is hardcoded to 32k. Now that we
3035		 * unconditionally CRC verify the unmount record, this means the
3036		 * log buffer can be too small for the record and cause an
3037		 * overrun.
3038		 *
3039		 * Detect this condition here. Use lsunit for the buffer size as
3040		 * long as this looks like the mkfs case. Otherwise, return an
3041		 * error to avoid a buffer overrun.
3042		 */
3043		h_size = be32_to_cpu(rhead->h_size);
3044		h_len = be32_to_cpu(rhead->h_len);
3045		if (h_len > h_size && h_len <= log->l_mp->m_logbsize &&
3046		    rhead->h_num_logops == cpu_to_be32(1)) {
3047			xfs_warn(log->l_mp,
3048		"invalid iclog size (%d bytes), using lsunit (%d bytes)",
3049				 h_size, log->l_mp->m_logbsize);
3050			h_size = log->l_mp->m_logbsize;
3051		}
3052
3053		error = xlog_valid_rec_header(log, rhead, tail_blk, h_size);
3054		if (error)
3055			goto bread_err1;
3056
3057		/*
3058		 * This open codes xlog_logrec_hblks so that we can reuse the
3059		 * fixed up h_size value calculated above.  Without that we'd
3060		 * still allocate the buffer based on the incorrect on-disk
3061		 * size.
3062		 */
3063		if (h_size > XLOG_HEADER_CYCLE_SIZE &&
3064		    (rhead->h_version & cpu_to_be32(XLOG_VERSION_2))) {
3065			hblks = DIV_ROUND_UP(h_size, XLOG_HEADER_CYCLE_SIZE);
3066			if (hblks > 1) {
3067				kvfree(hbp);
3068				hbp = xlog_alloc_buffer(log, hblks);
3069				if (!hbp)
3070					return -ENOMEM;
3071			}
3072		}
3073	} else {
3074		ASSERT(log->l_sectBBsize == 1);
 
 
3075		h_size = XLOG_BIG_RECORD_BSIZE;
3076	}
3077
3078	dbp = xlog_alloc_buffer(log, BTOBB(h_size));
 
 
3079	if (!dbp) {
3080		kvfree(hbp);
3081		return -ENOMEM;
3082	}
3083
3084	memset(rhash, 0, sizeof(rhash));
3085	if (tail_blk > head_blk) {
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3086		/*
3087		 * Perform recovery around the end of the physical log.
3088		 * When the head is not on the same cycle number as the tail,
3089		 * we can't do a sequential recovery.
3090		 */
 
3091		while (blk_no < log->l_logBBsize) {
3092			/*
3093			 * Check for header wrapping around physical end-of-log
3094			 */
3095			offset = hbp;
3096			split_hblks = 0;
3097			wrapped_hblks = 0;
3098			if (blk_no + hblks <= log->l_logBBsize) {
3099				/* Read header in one read */
3100				error = xlog_bread(log, blk_no, hblks, hbp,
3101						   &offset);
3102				if (error)
3103					goto bread_err2;
3104			} else {
3105				/* This LR is split across physical log end */
3106				if (blk_no != log->l_logBBsize) {
3107					/* some data before physical log end */
3108					ASSERT(blk_no <= INT_MAX);
3109					split_hblks = log->l_logBBsize - (int)blk_no;
3110					ASSERT(split_hblks > 0);
3111					error = xlog_bread(log, blk_no,
3112							   split_hblks, hbp,
3113							   &offset);
3114					if (error)
3115						goto bread_err2;
3116				}
3117
3118				/*
3119				 * Note: this black magic still works with
3120				 * large sector sizes (non-512) only because:
3121				 * - we increased the buffer size originally
3122				 *   by 1 sector giving us enough extra space
3123				 *   for the second read;
3124				 * - the log start is guaranteed to be sector
3125				 *   aligned;
3126				 * - we read the log end (LR header start)
3127				 *   _first_, then the log start (LR header end)
3128				 *   - order is important.
3129				 */
3130				wrapped_hblks = hblks - split_hblks;
3131				error = xlog_bread_noalign(log, 0,
3132						wrapped_hblks,
3133						offset + BBTOB(split_hblks));
3134				if (error)
3135					goto bread_err2;
3136			}
3137			rhead = (xlog_rec_header_t *)offset;
3138			error = xlog_valid_rec_header(log, rhead,
3139					split_hblks ? blk_no : 0, h_size);
3140			if (error)
3141				goto bread_err2;
3142
3143			bblks = (int)BTOBB(be32_to_cpu(rhead->h_len));
3144			blk_no += hblks;
3145
3146			/*
3147			 * Read the log record data in multiple reads if it
3148			 * wraps around the end of the log. Note that if the
3149			 * header already wrapped, blk_no could point past the
3150			 * end of the log. The record data is contiguous in
3151			 * that case.
3152			 */
3153			if (blk_no + bblks <= log->l_logBBsize ||
3154			    blk_no >= log->l_logBBsize) {
3155				rblk_no = xlog_wrap_logbno(log, blk_no);
3156				error = xlog_bread(log, rblk_no, bblks, dbp,
3157						   &offset);
3158				if (error)
3159					goto bread_err2;
3160			} else {
3161				/* This log record is split across the
3162				 * physical end of log */
3163				offset = dbp;
3164				split_bblks = 0;
3165				if (blk_no != log->l_logBBsize) {
3166					/* some data is before the physical
3167					 * end of log */
3168					ASSERT(!wrapped_hblks);
3169					ASSERT(blk_no <= INT_MAX);
3170					split_bblks =
3171						log->l_logBBsize - (int)blk_no;
3172					ASSERT(split_bblks > 0);
3173					error = xlog_bread(log, blk_no,
3174							split_bblks, dbp,
3175							&offset);
3176					if (error)
3177						goto bread_err2;
3178				}
3179
3180				/*
3181				 * Note: this black magic still works with
3182				 * large sector sizes (non-512) only because:
3183				 * - we increased the buffer size originally
3184				 *   by 1 sector giving us enough extra space
3185				 *   for the second read;
3186				 * - the log start is guaranteed to be sector
3187				 *   aligned;
3188				 * - we read the log end (LR header start)
3189				 *   _first_, then the log start (LR header end)
3190				 *   - order is important.
3191				 */
3192				error = xlog_bread_noalign(log, 0,
3193						bblks - split_bblks,
3194						offset + BBTOB(split_bblks));
3195				if (error)
3196					goto bread_err2;
3197			}
3198
3199			error = xlog_recover_process(log, rhash, rhead, offset,
3200						     pass, &buffer_list);
3201			if (error)
3202				goto bread_err2;
3203
3204			blk_no += bblks;
3205			rhead_blk = blk_no;
3206		}
3207
3208		ASSERT(blk_no >= log->l_logBBsize);
3209		blk_no -= log->l_logBBsize;
3210		rhead_blk = blk_no;
3211	}
3212
3213	/* read first part of physical log */
3214	while (blk_no < head_blk) {
3215		error = xlog_bread(log, blk_no, hblks, hbp, &offset);
3216		if (error)
3217			goto bread_err2;
3218
3219		rhead = (xlog_rec_header_t *)offset;
3220		error = xlog_valid_rec_header(log, rhead, blk_no, h_size);
3221		if (error)
3222			goto bread_err2;
 
3223
3224		/* blocks in data section */
3225		bblks = (int)BTOBB(be32_to_cpu(rhead->h_len));
3226		error = xlog_bread(log, blk_no+hblks, bblks, dbp,
3227				   &offset);
3228		if (error)
3229			goto bread_err2;
3230
3231		error = xlog_recover_process(log, rhash, rhead, offset, pass,
3232					     &buffer_list);
3233		if (error)
3234			goto bread_err2;
 
3235
3236		blk_no += bblks + hblks;
3237		rhead_blk = blk_no;
 
 
 
 
3238	}
3239
3240 bread_err2:
3241	kvfree(dbp);
3242 bread_err1:
3243	kvfree(hbp);
3244
3245	/*
3246	 * Submit buffers that have been dirtied by the last record recovered.
3247	 */
3248	if (!list_empty(&buffer_list)) {
3249		if (error) {
3250			/*
3251			 * If there has been an item recovery error then we
3252			 * cannot allow partial checkpoint writeback to
3253			 * occur.  We might have multiple checkpoints with the
3254			 * same start LSN in this buffer list, and partial
3255			 * writeback of a checkpoint in this situation can
3256			 * prevent future recovery of all the changes in the
3257			 * checkpoints at this start LSN.
3258			 *
3259			 * Note: Shutting down the filesystem will result in the
3260			 * delwri submission marking all the buffers stale,
3261			 * completing them and cleaning up _XBF_LOGRECOVERY
3262			 * state without doing any IO.
3263			 */
3264			xlog_force_shutdown(log, SHUTDOWN_LOG_IO_ERROR);
3265		}
3266		error2 = xfs_buf_delwri_submit(&buffer_list);
3267	}
3268
3269	if (error && first_bad)
3270		*first_bad = rhead_blk;
3271
3272	/*
3273	 * Transactions are freed at commit time but transactions without commit
3274	 * records on disk are never committed. Free any that may be left in the
3275	 * hash table.
3276	 */
3277	for (i = 0; i < XLOG_RHASH_SIZE; i++) {
3278		struct hlist_node	*tmp;
3279		struct xlog_recover	*trans;
3280
3281		hlist_for_each_entry_safe(trans, tmp, &rhash[i], r_list)
3282			xlog_recover_free_trans(trans);
3283	}
3284
3285	return error ? error : error2;
3286}
3287
3288/*
3289 * Do the recovery of the log.  We actually do this in two phases.
3290 * The two passes are necessary in order to implement the function
3291 * of cancelling a record written into the log.  The first pass
3292 * determines those things which have been cancelled, and the
3293 * second pass replays log items normally except for those which
3294 * have been cancelled.  The handling of the replay and cancellations
3295 * takes place in the log item type specific routines.
3296 *
3297 * The table of items which have cancel records in the log is allocated
3298 * and freed at this level, since only here do we know when all of
3299 * the log recovery has been completed.
3300 */
3301STATIC int
3302xlog_do_log_recovery(
3303	struct xlog	*log,
3304	xfs_daddr_t	head_blk,
3305	xfs_daddr_t	tail_blk)
3306{
3307	int		error;
3308
3309	ASSERT(head_blk != tail_blk);
3310
3311	/*
3312	 * First do a pass to find all of the cancelled buf log items.
3313	 * Store them in the buf_cancel_table for use in the second pass.
3314	 */
3315	error = xlog_alloc_buf_cancel_table(log);
3316	if (error)
3317		return error;
 
 
3318
3319	error = xlog_do_recovery_pass(log, head_blk, tail_blk,
3320				      XLOG_RECOVER_PASS1, NULL);
3321	if (error != 0)
3322		goto out_cancel;
3323
 
 
3324	/*
3325	 * Then do a second pass to actually recover the items in the log.
3326	 * When it is complete free the table of buf cancel items.
3327	 */
3328	error = xlog_do_recovery_pass(log, head_blk, tail_blk,
3329				      XLOG_RECOVER_PASS2, NULL);
3330	if (!error)
3331		xlog_check_buf_cancel_table(log);
3332out_cancel:
3333	xlog_free_buf_cancel_table(log);
 
 
 
 
 
 
 
 
3334	return error;
3335}
3336
3337/*
3338 * Do the actual recovery
3339 */
3340STATIC int
3341xlog_do_recover(
3342	struct xlog		*log,
3343	xfs_daddr_t		head_blk,
3344	xfs_daddr_t		tail_blk)
3345{
3346	struct xfs_mount	*mp = log->l_mp;
3347	struct xfs_buf		*bp = mp->m_sb_bp;
3348	struct xfs_sb		*sbp = &mp->m_sb;
3349	int			error;
3350
3351	trace_xfs_log_recover(log, head_blk, tail_blk);
3352
3353	/*
3354	 * First replay the images in the log.
3355	 */
3356	error = xlog_do_log_recovery(log, head_blk, tail_blk);
3357	if (error)
3358		return error;
3359
3360	if (xlog_is_shutdown(log))
3361		return -EIO;
3362
3363	/*
3364	 * We now update the tail_lsn since much of the recovery has completed
3365	 * and there may be space available to use.  If there were no extent or
3366	 * iunlinks, we can free up the entire log.  This was set in
3367	 * xlog_find_tail to be the lsn of the last known good LR on disk.  If
3368	 * there are extent frees or iunlinks they will have some entries in the
3369	 * AIL; so we look at the AIL to determine how to set the tail_lsn.
3370	 */
3371	xfs_ail_assign_tail_lsn(log->l_ailp);
 
 
3372
3373	/*
3374	 * Now that we've finished replaying all buffer and inode updates,
3375	 * re-read the superblock and reverify it.
3376	 */
3377	xfs_buf_lock(bp);
3378	xfs_buf_hold(bp);
3379	error = _xfs_buf_read(bp, XBF_READ);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3380	if (error) {
3381		if (!xlog_is_shutdown(log)) {
3382			xfs_buf_ioerror_alert(bp, __this_address);
3383			ASSERT(0);
3384		}
3385		xfs_buf_relse(bp);
3386		return error;
3387	}
3388
3389	/* Convert superblock from on-disk format */
3390	xfs_sb_from_disk(sbp, bp->b_addr);
 
 
 
3391	xfs_buf_relse(bp);
3392
3393	/* re-initialise in-core superblock and geometry structures */
3394	mp->m_features |= xfs_sb_version_to_features(sbp);
3395	xfs_reinit_percpu_counters(mp);
 
3396
3397	/* Normal transactions can now occur */
3398	clear_bit(XLOG_ACTIVE_RECOVERY, &log->l_opstate);
3399	return 0;
3400}
3401
3402/*
3403 * Perform recovery and re-initialize some log variables in xlog_find_tail.
3404 *
3405 * Return error or zero.
3406 */
3407int
3408xlog_recover(
3409	struct xlog	*log)
3410{
3411	xfs_daddr_t	head_blk, tail_blk;
3412	int		error;
3413
3414	/* find the tail of the log */
3415	error = xlog_find_tail(log, &head_blk, &tail_blk);
3416	if (error)
3417		return error;
3418
3419	/*
3420	 * The superblock was read before the log was available and thus the LSN
3421	 * could not be verified. Check the superblock LSN against the current
3422	 * LSN now that it's known.
3423	 */
3424	if (xfs_has_crc(log->l_mp) &&
3425	    !xfs_log_check_lsn(log->l_mp, log->l_mp->m_sb.sb_lsn))
3426		return -EINVAL;
3427
3428	if (tail_blk != head_blk) {
3429		/* There used to be a comment here:
3430		 *
3431		 * disallow recovery on read-only mounts.  note -- mount
3432		 * checks for ENOSPC and turns it into an intelligent
3433		 * error message.
3434		 * ...but this is no longer true.  Now, unless you specify
3435		 * NORECOVERY (in which case this function would never be
3436		 * called), we just go ahead and recover.  We do this all
3437		 * under the vfs layer, so we can get away with it unless
3438		 * the device itself is read-only, in which case we fail.
3439		 */
3440		if ((error = xfs_dev_is_read_only(log->l_mp, "recovery"))) {
3441			return error;
3442		}
3443
3444		/*
3445		 * Version 5 superblock log feature mask validation. We know the
3446		 * log is dirty so check if there are any unknown log features
3447		 * in what we need to recover. If there are unknown features
3448		 * (e.g. unsupported transactions, then simply reject the
3449		 * attempt at recovery before touching anything.
3450		 */
3451		if (xfs_sb_is_v5(&log->l_mp->m_sb) &&
3452		    xfs_sb_has_incompat_log_feature(&log->l_mp->m_sb,
3453					XFS_SB_FEAT_INCOMPAT_LOG_UNKNOWN)) {
3454			xfs_warn(log->l_mp,
3455"Superblock has unknown incompatible log features (0x%x) enabled.",
3456				(log->l_mp->m_sb.sb_features_log_incompat &
3457					XFS_SB_FEAT_INCOMPAT_LOG_UNKNOWN));
3458			xfs_warn(log->l_mp,
3459"The log can not be fully and/or safely recovered by this kernel.");
3460			xfs_warn(log->l_mp,
3461"Please recover the log on a kernel that supports the unknown features.");
3462			return -EINVAL;
3463		}
3464
3465		/*
3466		 * Delay log recovery if the debug hook is set. This is debug
3467		 * instrumentation to coordinate simulation of I/O failures with
3468		 * log recovery.
3469		 */
3470		if (xfs_globals.log_recovery_delay) {
3471			xfs_notice(log->l_mp,
3472				"Delaying log recovery for %d seconds.",
3473				xfs_globals.log_recovery_delay);
3474			msleep(xfs_globals.log_recovery_delay * 1000);
3475		}
3476
3477		xfs_notice(log->l_mp, "Starting recovery (logdev: %s)",
3478				log->l_mp->m_logname ? log->l_mp->m_logname
3479						     : "internal");
3480
3481		error = xlog_do_recover(log, head_blk, tail_blk);
3482		set_bit(XLOG_RECOVERY_NEEDED, &log->l_opstate);
3483	}
3484	return error;
3485}
3486
3487/*
3488 * In the first part of recovery we replay inodes and buffers and build up the
3489 * list of intents which need to be processed. Here we process the intents and
3490 * clean up the on disk unlinked inode lists. This is separated from the first
3491 * part of recovery so that the root and real-time bitmap inodes can be read in
3492 * from disk in between the two stages.  This is necessary so that we can free
3493 * space in the real-time portion of the file system.
3494 *
3495 * We run this whole process under GFP_NOFS allocation context. We do a
3496 * combination of non-transactional and transactional work, yet we really don't
3497 * want to recurse into the filesystem from direct reclaim during any of this
3498 * processing. This allows all the recovery code run here not to care about the
3499 * memory allocation context it is running in.
3500 */
3501int
3502xlog_recover_finish(
3503	struct xlog	*log)
3504{
3505	unsigned int	nofs_flags = memalloc_nofs_save();
3506	int		error;
3507
3508	error = xlog_recover_process_intents(log);
3509	if (error) {
 
 
 
 
 
 
 
 
 
 
3510		/*
3511		 * Cancel all the unprocessed intent items now so that we don't
3512		 * leave them pinned in the AIL.  This can cause the AIL to
3513		 * livelock on the pinned item if anyone tries to push the AIL
3514		 * (inode reclaim does this) before we get around to
3515		 * xfs_log_mount_cancel.
3516		 */
3517		xlog_recover_cancel_intents(log);
3518		xfs_alert(log->l_mp, "Failed to recover intents");
3519		xlog_force_shutdown(log, SHUTDOWN_LOG_IO_ERROR);
3520		goto out_error;
3521	}
3522
3523	/*
3524	 * Sync the log to get all the intents out of the AIL.  This isn't
3525	 * absolutely necessary, but it helps in case the unlink transactions
3526	 * would have problems pushing the intents out of the way.
3527	 */
3528	xfs_log_force(log->l_mp, XFS_LOG_SYNC);
3529
3530	xlog_recover_process_iunlinks(log);
3531
3532	/*
3533	 * Recover any CoW staging blocks that are still referenced by the
3534	 * ondisk refcount metadata.  During mount there cannot be any live
3535	 * staging extents as we have not permitted any user modifications.
3536	 * Therefore, it is safe to free them all right now, even on a
3537	 * read-only mount.
3538	 */
3539	error = xfs_reflink_recover_cow(log->l_mp);
3540	if (error) {
3541		xfs_alert(log->l_mp,
3542	"Failed to recover leftover CoW staging extents, err %d.",
3543				error);
3544		/*
3545		 * If we get an error here, make sure the log is shut down
3546		 * but return zero so that any log items committed since the
3547		 * end of intents processing can be pushed through the CIL
3548		 * and AIL.
3549		 */
3550		xlog_force_shutdown(log, SHUTDOWN_LOG_IO_ERROR);
3551		error = 0;
3552		goto out_error;
3553	}
3554
3555out_error:
3556	memalloc_nofs_restore(nofs_flags);
3557	return error;
3558}
3559
 
 
 
 
 
 
3560void
3561xlog_recover_cancel(
3562	struct xlog	*log)
3563{
3564	if (xlog_recovery_needed(log))
3565		xlog_recover_cancel_intents(log);
3566}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3567
v3.5.6
 
   1/*
   2 * Copyright (c) 2000-2006 Silicon Graphics, Inc.
   3 * All Rights Reserved.
   4 *
   5 * This program is free software; you can redistribute it and/or
   6 * modify it under the terms of the GNU General Public License as
   7 * published by the Free Software Foundation.
   8 *
   9 * This program is distributed in the hope that it would be useful,
  10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
  11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  12 * GNU General Public License for more details.
  13 *
  14 * You should have received a copy of the GNU General Public License
  15 * along with this program; if not, write the Free Software Foundation,
  16 * Inc.,  51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
  17 */
  18#include "xfs.h"
  19#include "xfs_fs.h"
  20#include "xfs_types.h"
 
 
 
  21#include "xfs_bit.h"
  22#include "xfs_log.h"
  23#include "xfs_inum.h"
  24#include "xfs_trans.h"
  25#include "xfs_sb.h"
  26#include "xfs_ag.h"
  27#include "xfs_mount.h"
  28#include "xfs_error.h"
  29#include "xfs_bmap_btree.h"
  30#include "xfs_alloc_btree.h"
  31#include "xfs_ialloc_btree.h"
  32#include "xfs_dinode.h"
  33#include "xfs_inode.h"
  34#include "xfs_inode_item.h"
 
 
 
 
  35#include "xfs_alloc.h"
  36#include "xfs_ialloc.h"
  37#include "xfs_log_priv.h"
 
 
  38#include "xfs_buf_item.h"
  39#include "xfs_log_recover.h"
  40#include "xfs_extfree_item.h"
  41#include "xfs_trans_priv.h"
  42#include "xfs_quota.h"
  43#include "xfs_utils.h"
  44#include "xfs_trace.h"
  45
  46STATIC int	xlog_find_zeroed(xlog_t *, xfs_daddr_t *);
  47STATIC int	xlog_clear_stale_blocks(xlog_t *, xfs_lsn_t);
  48#if defined(DEBUG)
  49STATIC void	xlog_recover_check_summary(xlog_t *);
  50#else
  51#define	xlog_recover_check_summary(log)
  52#endif
  53
  54/*
  55 * This structure is used during recovery to record the buf log items which
  56 * have been canceled and should not be replayed.
  57 */
  58struct xfs_buf_cancel {
  59	xfs_daddr_t		bc_blkno;
  60	uint			bc_len;
  61	int			bc_refcount;
  62	struct list_head	bc_list;
  63};
 
  64
  65/*
  66 * Sector aligned buffer routines for buffer create/read/write/access
  67 */
  68
  69/*
  70 * Verify the given count of basic blocks is valid number of blocks
  71 * to specify for an operation involving the given XFS log buffer.
  72 * Returns nonzero if the count is valid, 0 otherwise.
  73 */
  74
  75static inline int
  76xlog_buf_bbcount_valid(
  77	xlog_t		*log,
  78	int		bbcount)
  79{
  80	return bbcount > 0 && bbcount <= log->l_logBBsize;
 
 
 
 
  81}
  82
  83/*
  84 * Allocate a buffer to hold log data.  The buffer needs to be able
  85 * to map to a range of nbblks basic blocks at any valid (basic
  86 * block) offset within the log.
  87 */
  88STATIC xfs_buf_t *
  89xlog_get_bp(
  90	xlog_t		*log,
  91	int		nbblks)
  92{
  93	struct xfs_buf	*bp;
  94
  95	if (!xlog_buf_bbcount_valid(log, nbblks)) {
 
 
  96		xfs_warn(log->l_mp, "Invalid block length (0x%x) for buffer",
  97			nbblks);
  98		XFS_ERROR_REPORT(__func__, XFS_ERRLEVEL_HIGH, log->l_mp);
  99		return NULL;
 100	}
 101
 102	/*
 103	 * We do log I/O in units of log sectors (a power-of-2
 104	 * multiple of the basic block size), so we round up the
 105	 * requested size to accommodate the basic blocks required
 106	 * for complete log sectors.
 107	 *
 108	 * In addition, the buffer may be used for a non-sector-
 109	 * aligned block offset, in which case an I/O of the
 110	 * requested size could extend beyond the end of the
 111	 * buffer.  If the requested size is only 1 basic block it
 112	 * will never straddle a sector boundary, so this won't be
 113	 * an issue.  Nor will this be a problem if the log I/O is
 114	 * done in basic blocks (sector size 1).  But otherwise we
 115	 * extend the buffer by one extra log sector to ensure
 116	 * there's space to accommodate this possibility.
 117	 */
 118	if (nbblks > 1 && log->l_sectBBsize > 1)
 119		nbblks += log->l_sectBBsize;
 120	nbblks = round_up(nbblks, log->l_sectBBsize);
 121
 122	bp = xfs_buf_get_uncached(log->l_mp->m_logdev_targp, nbblks, 0);
 123	if (bp)
 124		xfs_buf_unlock(bp);
 125	return bp;
 126}
 127
 128STATIC void
 129xlog_put_bp(
 130	xfs_buf_t	*bp)
 131{
 132	xfs_buf_free(bp);
 133}
 134
 135/*
 136 * Return the address of the start of the given block number's data
 137 * in a log buffer.  The buffer covers a log sector-aligned region.
 138 */
 139STATIC xfs_caddr_t
 140xlog_align(
 141	xlog_t		*log,
 142	xfs_daddr_t	blk_no,
 143	int		nbblks,
 144	xfs_buf_t	*bp)
 145{
 146	xfs_daddr_t	offset = blk_no & ((xfs_daddr_t)log->l_sectBBsize - 1);
 147
 148	ASSERT(offset + nbblks <= bp->b_length);
 149	return bp->b_addr + BBTOB(offset);
 150}
 151
 152
 153/*
 154 * nbblks should be uint, but oh well.  Just want to catch that 32-bit length.
 155 */
 156STATIC int
 157xlog_bread_noalign(
 158	xlog_t		*log,
 159	xfs_daddr_t	blk_no,
 160	int		nbblks,
 161	xfs_buf_t	*bp)
 162{
 163	int		error;
 164
 165	if (!xlog_buf_bbcount_valid(log, nbblks)) {
 166		xfs_warn(log->l_mp, "Invalid block length (0x%x) for buffer",
 167			nbblks);
 168		XFS_ERROR_REPORT(__func__, XFS_ERRLEVEL_HIGH, log->l_mp);
 169		return EFSCORRUPTED;
 170	}
 171
 172	blk_no = round_down(blk_no, log->l_sectBBsize);
 173	nbblks = round_up(nbblks, log->l_sectBBsize);
 174
 175	ASSERT(nbblks > 0);
 176	ASSERT(nbblks <= bp->b_length);
 177
 178	XFS_BUF_SET_ADDR(bp, log->l_logBBstart + blk_no);
 179	XFS_BUF_READ(bp);
 180	bp->b_io_length = nbblks;
 181	bp->b_error = 0;
 182
 183	xfsbdstrat(log->l_mp, bp);
 184	error = xfs_buf_iowait(bp);
 185	if (error)
 186		xfs_buf_ioerror_alert(bp, __func__);
 187	return error;
 188}
 189
 190STATIC int
 191xlog_bread(
 192	xlog_t		*log,
 193	xfs_daddr_t	blk_no,
 194	int		nbblks,
 195	xfs_buf_t	*bp,
 196	xfs_caddr_t	*offset)
 197{
 198	int		error;
 199
 200	error = xlog_bread_noalign(log, blk_no, nbblks, bp);
 201	if (error)
 202		return error;
 203
 204	*offset = xlog_align(log, blk_no, nbblks, bp);
 205	return 0;
 206}
 207
 208/*
 209 * Read at an offset into the buffer. Returns with the buffer in it's original
 210 * state regardless of the result of the read.
 211 */
 212STATIC int
 213xlog_bread_offset(
 214	xlog_t		*log,
 215	xfs_daddr_t	blk_no,		/* block to read from */
 216	int		nbblks,		/* blocks to read */
 217	xfs_buf_t	*bp,
 218	xfs_caddr_t	offset)
 219{
 220	xfs_caddr_t	orig_offset = bp->b_addr;
 221	int		orig_len = BBTOB(bp->b_length);
 222	int		error, error2;
 223
 224	error = xfs_buf_associate_memory(bp, offset, BBTOB(nbblks));
 225	if (error)
 226		return error;
 227
 228	error = xlog_bread_noalign(log, blk_no, nbblks, bp);
 229
 230	/* must reset buffer pointer even on error */
 231	error2 = xfs_buf_associate_memory(bp, orig_offset, orig_len);
 232	if (error)
 233		return error;
 234	return error2;
 235}
 236
 237/*
 238 * Write out the buffer at the given block for the given number of blocks.
 239 * The buffer is kept locked across the write and is returned locked.
 240 * This can only be used for synchronous log writes.
 241 */
 242STATIC int
 243xlog_bwrite(
 244	xlog_t		*log,
 245	xfs_daddr_t	blk_no,
 246	int		nbblks,
 247	xfs_buf_t	*bp)
 248{
 249	int		error;
 250
 251	if (!xlog_buf_bbcount_valid(log, nbblks)) {
 252		xfs_warn(log->l_mp, "Invalid block length (0x%x) for buffer",
 253			nbblks);
 254		XFS_ERROR_REPORT(__func__, XFS_ERRLEVEL_HIGH, log->l_mp);
 255		return EFSCORRUPTED;
 256	}
 257
 258	blk_no = round_down(blk_no, log->l_sectBBsize);
 259	nbblks = round_up(nbblks, log->l_sectBBsize);
 260
 261	ASSERT(nbblks > 0);
 262	ASSERT(nbblks <= bp->b_length);
 263
 264	XFS_BUF_SET_ADDR(bp, log->l_logBBstart + blk_no);
 265	XFS_BUF_ZEROFLAGS(bp);
 266	xfs_buf_hold(bp);
 267	xfs_buf_lock(bp);
 268	bp->b_io_length = nbblks;
 269	bp->b_error = 0;
 270
 271	error = xfs_bwrite(bp);
 272	if (error)
 273		xfs_buf_ioerror_alert(bp, __func__);
 274	xfs_buf_relse(bp);
 275	return error;
 276}
 277
 278#ifdef DEBUG
 279/*
 280 * dump debug superblock and log record information
 281 */
 282STATIC void
 283xlog_header_check_dump(
 284	xfs_mount_t		*mp,
 285	xlog_rec_header_t	*head)
 286{
 287	xfs_debug(mp, "%s:  SB : uuid = %pU, fmt = %d\n",
 288		__func__, &mp->m_sb.sb_uuid, XLOG_FMT);
 289	xfs_debug(mp, "    log : uuid = %pU, fmt = %d\n",
 290		&head->h_fs_uuid, be32_to_cpu(head->h_fmt));
 291}
 292#else
 293#define xlog_header_check_dump(mp, head)
 294#endif
 295
 296/*
 297 * check log record header for recovery
 298 */
 299STATIC int
 300xlog_header_check_recover(
 301	xfs_mount_t		*mp,
 302	xlog_rec_header_t	*head)
 303{
 304	ASSERT(head->h_magicno == cpu_to_be32(XLOG_HEADER_MAGIC_NUM));
 305
 306	/*
 307	 * IRIX doesn't write the h_fmt field and leaves it zeroed
 308	 * (XLOG_FMT_UNKNOWN). This stops us from trying to recover
 309	 * a dirty log created in IRIX.
 310	 */
 311	if (unlikely(head->h_fmt != cpu_to_be32(XLOG_FMT))) {
 312		xfs_warn(mp,
 313	"dirty log written in incompatible format - can't recover");
 314		xlog_header_check_dump(mp, head);
 315		XFS_ERROR_REPORT("xlog_header_check_recover(1)",
 316				 XFS_ERRLEVEL_HIGH, mp);
 317		return XFS_ERROR(EFSCORRUPTED);
 318	} else if (unlikely(!uuid_equal(&mp->m_sb.sb_uuid, &head->h_fs_uuid))) {
 319		xfs_warn(mp,
 320	"dirty log entry has mismatched uuid - can't recover");
 321		xlog_header_check_dump(mp, head);
 322		XFS_ERROR_REPORT("xlog_header_check_recover(2)",
 323				 XFS_ERRLEVEL_HIGH, mp);
 324		return XFS_ERROR(EFSCORRUPTED);
 325	}
 326	return 0;
 327}
 328
 329/*
 330 * read the head block of the log and check the header
 331 */
 332STATIC int
 333xlog_header_check_mount(
 334	xfs_mount_t		*mp,
 335	xlog_rec_header_t	*head)
 336{
 337	ASSERT(head->h_magicno == cpu_to_be32(XLOG_HEADER_MAGIC_NUM));
 338
 339	if (uuid_is_nil(&head->h_fs_uuid)) {
 340		/*
 341		 * IRIX doesn't write the h_fs_uuid or h_fmt fields. If
 342		 * h_fs_uuid is nil, we assume this log was last mounted
 343		 * by IRIX and continue.
 344		 */
 345		xfs_warn(mp, "nil uuid in log - IRIX style log");
 346	} else if (unlikely(!uuid_equal(&mp->m_sb.sb_uuid, &head->h_fs_uuid))) {
 
 347		xfs_warn(mp, "log has mismatched uuid - can't recover");
 348		xlog_header_check_dump(mp, head);
 349		XFS_ERROR_REPORT("xlog_header_check_mount",
 350				 XFS_ERRLEVEL_HIGH, mp);
 351		return XFS_ERROR(EFSCORRUPTED);
 352	}
 353	return 0;
 354}
 355
 356STATIC void
 357xlog_recover_iodone(
 358	struct xfs_buf	*bp)
 359{
 360	if (bp->b_error) {
 361		/*
 362		 * We're not going to bother about retrying
 363		 * this during recovery. One strike!
 364		 */
 365		xfs_buf_ioerror_alert(bp, __func__);
 366		xfs_force_shutdown(bp->b_target->bt_mount,
 367					SHUTDOWN_META_IO_ERROR);
 368	}
 369	bp->b_iodone = NULL;
 370	xfs_buf_ioend(bp, 0);
 371}
 372
 373/*
 374 * This routine finds (to an approximation) the first block in the physical
 375 * log which contains the given cycle.  It uses a binary search algorithm.
 376 * Note that the algorithm can not be perfect because the disk will not
 377 * necessarily be perfect.
 378 */
 379STATIC int
 380xlog_find_cycle_start(
 381	xlog_t		*log,
 382	xfs_buf_t	*bp,
 383	xfs_daddr_t	first_blk,
 384	xfs_daddr_t	*last_blk,
 385	uint		cycle)
 386{
 387	xfs_caddr_t	offset;
 388	xfs_daddr_t	mid_blk;
 389	xfs_daddr_t	end_blk;
 390	uint		mid_cycle;
 391	int		error;
 392
 393	end_blk = *last_blk;
 394	mid_blk = BLK_AVG(first_blk, end_blk);
 395	while (mid_blk != first_blk && mid_blk != end_blk) {
 396		error = xlog_bread(log, mid_blk, 1, bp, &offset);
 397		if (error)
 398			return error;
 399		mid_cycle = xlog_get_cycle(offset);
 400		if (mid_cycle == cycle)
 401			end_blk = mid_blk;   /* last_half_cycle == mid_cycle */
 402		else
 403			first_blk = mid_blk; /* first_half_cycle == mid_cycle */
 404		mid_blk = BLK_AVG(first_blk, end_blk);
 405	}
 406	ASSERT((mid_blk == first_blk && mid_blk+1 == end_blk) ||
 407	       (mid_blk == end_blk && mid_blk-1 == first_blk));
 408
 409	*last_blk = end_blk;
 410
 411	return 0;
 412}
 413
 414/*
 415 * Check that a range of blocks does not contain stop_on_cycle_no.
 416 * Fill in *new_blk with the block offset where such a block is
 417 * found, or with -1 (an invalid block number) if there is no such
 418 * block in the range.  The scan needs to occur from front to back
 419 * and the pointer into the region must be updated since a later
 420 * routine will need to perform another test.
 421 */
 422STATIC int
 423xlog_find_verify_cycle(
 424	xlog_t		*log,
 425	xfs_daddr_t	start_blk,
 426	int		nbblks,
 427	uint		stop_on_cycle_no,
 428	xfs_daddr_t	*new_blk)
 429{
 430	xfs_daddr_t	i, j;
 431	uint		cycle;
 432	xfs_buf_t	*bp;
 433	xfs_daddr_t	bufblks;
 434	xfs_caddr_t	buf = NULL;
 435	int		error = 0;
 436
 437	/*
 438	 * Greedily allocate a buffer big enough to handle the full
 439	 * range of basic blocks we'll be examining.  If that fails,
 440	 * try a smaller size.  We need to be able to read at least
 441	 * a log sector, or we're out of luck.
 442	 */
 443	bufblks = 1 << ffs(nbblks);
 444	while (bufblks > log->l_logBBsize)
 445		bufblks >>= 1;
 446	while (!(bp = xlog_get_bp(log, bufblks))) {
 447		bufblks >>= 1;
 448		if (bufblks < log->l_sectBBsize)
 449			return ENOMEM;
 450	}
 451
 452	for (i = start_blk; i < start_blk + nbblks; i += bufblks) {
 453		int	bcount;
 454
 455		bcount = min(bufblks, (start_blk + nbblks - i));
 456
 457		error = xlog_bread(log, i, bcount, bp, &buf);
 458		if (error)
 459			goto out;
 460
 461		for (j = 0; j < bcount; j++) {
 462			cycle = xlog_get_cycle(buf);
 463			if (cycle == stop_on_cycle_no) {
 464				*new_blk = i+j;
 465				goto out;
 466			}
 467
 468			buf += BBSIZE;
 469		}
 470	}
 471
 472	*new_blk = -1;
 473
 474out:
 475	xlog_put_bp(bp);
 476	return error;
 477}
 478
 
 
 
 
 
 
 
 
 
 
 
 
 
 479/*
 480 * Potentially backup over partial log record write.
 481 *
 482 * In the typical case, last_blk is the number of the block directly after
 483 * a good log record.  Therefore, we subtract one to get the block number
 484 * of the last block in the given buffer.  extra_bblks contains the number
 485 * of blocks we would have read on a previous read.  This happens when the
 486 * last log record is split over the end of the physical log.
 487 *
 488 * extra_bblks is the number of blocks potentially verified on a previous
 489 * call to this routine.
 490 */
 491STATIC int
 492xlog_find_verify_log_record(
 493	xlog_t			*log,
 494	xfs_daddr_t		start_blk,
 495	xfs_daddr_t		*last_blk,
 496	int			extra_bblks)
 497{
 498	xfs_daddr_t		i;
 499	xfs_buf_t		*bp;
 500	xfs_caddr_t		offset = NULL;
 501	xlog_rec_header_t	*head = NULL;
 502	int			error = 0;
 503	int			smallmem = 0;
 504	int			num_blks = *last_blk - start_blk;
 505	int			xhdrs;
 506
 507	ASSERT(start_blk != 0 || *last_blk != start_blk);
 508
 509	if (!(bp = xlog_get_bp(log, num_blks))) {
 510		if (!(bp = xlog_get_bp(log, 1)))
 511			return ENOMEM;
 
 
 512		smallmem = 1;
 513	} else {
 514		error = xlog_bread(log, start_blk, num_blks, bp, &offset);
 515		if (error)
 516			goto out;
 517		offset += ((num_blks - 1) << BBSHIFT);
 518	}
 519
 520	for (i = (*last_blk) - 1; i >= 0; i--) {
 521		if (i < start_blk) {
 522			/* valid log record not found */
 523			xfs_warn(log->l_mp,
 524		"Log inconsistent (didn't find previous header)");
 525			ASSERT(0);
 526			error = XFS_ERROR(EIO);
 527			goto out;
 528		}
 529
 530		if (smallmem) {
 531			error = xlog_bread(log, i, 1, bp, &offset);
 532			if (error)
 533				goto out;
 534		}
 535
 536		head = (xlog_rec_header_t *)offset;
 537
 538		if (head->h_magicno == cpu_to_be32(XLOG_HEADER_MAGIC_NUM))
 539			break;
 540
 541		if (!smallmem)
 542			offset -= BBSIZE;
 543	}
 544
 545	/*
 546	 * We hit the beginning of the physical log & still no header.  Return
 547	 * to caller.  If caller can handle a return of -1, then this routine
 548	 * will be called again for the end of the physical log.
 549	 */
 550	if (i == -1) {
 551		error = -1;
 552		goto out;
 553	}
 554
 555	/*
 556	 * We have the final block of the good log (the first block
 557	 * of the log record _before_ the head. So we check the uuid.
 558	 */
 559	if ((error = xlog_header_check_mount(log->l_mp, head)))
 560		goto out;
 561
 562	/*
 563	 * We may have found a log record header before we expected one.
 564	 * last_blk will be the 1st block # with a given cycle #.  We may end
 565	 * up reading an entire log record.  In this case, we don't want to
 566	 * reset last_blk.  Only when last_blk points in the middle of a log
 567	 * record do we update last_blk.
 568	 */
 569	if (xfs_sb_version_haslogv2(&log->l_mp->m_sb)) {
 570		uint	h_size = be32_to_cpu(head->h_size);
 571
 572		xhdrs = h_size / XLOG_HEADER_CYCLE_SIZE;
 573		if (h_size % XLOG_HEADER_CYCLE_SIZE)
 574			xhdrs++;
 575	} else {
 576		xhdrs = 1;
 577	}
 578
 579	if (*last_blk - i + extra_bblks !=
 580	    BTOBB(be32_to_cpu(head->h_len)) + xhdrs)
 581		*last_blk = i;
 582
 583out:
 584	xlog_put_bp(bp);
 585	return error;
 586}
 587
 588/*
 589 * Head is defined to be the point of the log where the next log write
 590 * write could go.  This means that incomplete LR writes at the end are
 591 * eliminated when calculating the head.  We aren't guaranteed that previous
 592 * LR have complete transactions.  We only know that a cycle number of
 593 * current cycle number -1 won't be present in the log if we start writing
 594 * from our current block number.
 595 *
 596 * last_blk contains the block number of the first block with a given
 597 * cycle number.
 598 *
 599 * Return: zero if normal, non-zero if error.
 600 */
 601STATIC int
 602xlog_find_head(
 603	xlog_t 		*log,
 604	xfs_daddr_t	*return_head_blk)
 605{
 606	xfs_buf_t	*bp;
 607	xfs_caddr_t	offset;
 608	xfs_daddr_t	new_blk, first_blk, start_blk, last_blk, head_blk;
 609	int		num_scan_bblks;
 610	uint		first_half_cycle, last_half_cycle;
 611	uint		stop_on_cycle;
 612	int		error, log_bbnum = log->l_logBBsize;
 613
 614	/* Is the end of the log device zeroed? */
 615	if ((error = xlog_find_zeroed(log, &first_blk)) == -1) {
 
 
 
 
 
 616		*return_head_blk = first_blk;
 617
 618		/* Is the whole lot zeroed? */
 619		if (!first_blk) {
 620			/* Linux XFS shouldn't generate totally zeroed logs -
 621			 * mkfs etc write a dummy unmount record to a fresh
 622			 * log so we can store the uuid in there
 623			 */
 624			xfs_warn(log->l_mp, "totally zeroed log");
 625		}
 626
 627		return 0;
 628	} else if (error) {
 629		xfs_warn(log->l_mp, "empty log check failed");
 630		return error;
 631	}
 632
 633	first_blk = 0;			/* get cycle # of 1st block */
 634	bp = xlog_get_bp(log, 1);
 635	if (!bp)
 636		return ENOMEM;
 637
 638	error = xlog_bread(log, 0, 1, bp, &offset);
 639	if (error)
 640		goto bp_err;
 641
 642	first_half_cycle = xlog_get_cycle(offset);
 643
 644	last_blk = head_blk = log_bbnum - 1;	/* get cycle # of last block */
 645	error = xlog_bread(log, last_blk, 1, bp, &offset);
 646	if (error)
 647		goto bp_err;
 648
 649	last_half_cycle = xlog_get_cycle(offset);
 650	ASSERT(last_half_cycle != 0);
 651
 652	/*
 653	 * If the 1st half cycle number is equal to the last half cycle number,
 654	 * then the entire log is stamped with the same cycle number.  In this
 655	 * case, head_blk can't be set to zero (which makes sense).  The below
 656	 * math doesn't work out properly with head_blk equal to zero.  Instead,
 657	 * we set it to log_bbnum which is an invalid block number, but this
 658	 * value makes the math correct.  If head_blk doesn't changed through
 659	 * all the tests below, *head_blk is set to zero at the very end rather
 660	 * than log_bbnum.  In a sense, log_bbnum and zero are the same block
 661	 * in a circular file.
 662	 */
 663	if (first_half_cycle == last_half_cycle) {
 664		/*
 665		 * In this case we believe that the entire log should have
 666		 * cycle number last_half_cycle.  We need to scan backwards
 667		 * from the end verifying that there are no holes still
 668		 * containing last_half_cycle - 1.  If we find such a hole,
 669		 * then the start of that hole will be the new head.  The
 670		 * simple case looks like
 671		 *        x | x ... | x - 1 | x
 672		 * Another case that fits this picture would be
 673		 *        x | x + 1 | x ... | x
 674		 * In this case the head really is somewhere at the end of the
 675		 * log, as one of the latest writes at the beginning was
 676		 * incomplete.
 677		 * One more case is
 678		 *        x | x + 1 | x ... | x - 1 | x
 679		 * This is really the combination of the above two cases, and
 680		 * the head has to end up at the start of the x-1 hole at the
 681		 * end of the log.
 682		 *
 683		 * In the 256k log case, we will read from the beginning to the
 684		 * end of the log and search for cycle numbers equal to x-1.
 685		 * We don't worry about the x+1 blocks that we encounter,
 686		 * because we know that they cannot be the head since the log
 687		 * started with x.
 688		 */
 689		head_blk = log_bbnum;
 690		stop_on_cycle = last_half_cycle - 1;
 691	} else {
 692		/*
 693		 * In this case we want to find the first block with cycle
 694		 * number matching last_half_cycle.  We expect the log to be
 695		 * some variation on
 696		 *        x + 1 ... | x ... | x
 697		 * The first block with cycle number x (last_half_cycle) will
 698		 * be where the new head belongs.  First we do a binary search
 699		 * for the first occurrence of last_half_cycle.  The binary
 700		 * search may not be totally accurate, so then we scan back
 701		 * from there looking for occurrences of last_half_cycle before
 702		 * us.  If that backwards scan wraps around the beginning of
 703		 * the log, then we look for occurrences of last_half_cycle - 1
 704		 * at the end of the log.  The cases we're looking for look
 705		 * like
 706		 *                               v binary search stopped here
 707		 *        x + 1 ... | x | x + 1 | x ... | x
 708		 *                   ^ but we want to locate this spot
 709		 * or
 710		 *        <---------> less than scan distance
 711		 *        x + 1 ... | x ... | x - 1 | x
 712		 *                           ^ we want to locate this spot
 713		 */
 714		stop_on_cycle = last_half_cycle;
 715		if ((error = xlog_find_cycle_start(log, bp, first_blk,
 716						&head_blk, last_half_cycle)))
 717			goto bp_err;
 
 718	}
 719
 720	/*
 721	 * Now validate the answer.  Scan back some number of maximum possible
 722	 * blocks and make sure each one has the expected cycle number.  The
 723	 * maximum is determined by the total possible amount of buffering
 724	 * in the in-core log.  The following number can be made tighter if
 725	 * we actually look at the block size of the filesystem.
 726	 */
 727	num_scan_bblks = XLOG_TOTAL_REC_SHIFT(log);
 728	if (head_blk >= num_scan_bblks) {
 729		/*
 730		 * We are guaranteed that the entire check can be performed
 731		 * in one buffer.
 732		 */
 733		start_blk = head_blk - num_scan_bblks;
 734		if ((error = xlog_find_verify_cycle(log,
 735						start_blk, num_scan_bblks,
 736						stop_on_cycle, &new_blk)))
 737			goto bp_err;
 738		if (new_blk != -1)
 739			head_blk = new_blk;
 740	} else {		/* need to read 2 parts of log */
 741		/*
 742		 * We are going to scan backwards in the log in two parts.
 743		 * First we scan the physical end of the log.  In this part
 744		 * of the log, we are looking for blocks with cycle number
 745		 * last_half_cycle - 1.
 746		 * If we find one, then we know that the log starts there, as
 747		 * we've found a hole that didn't get written in going around
 748		 * the end of the physical log.  The simple case for this is
 749		 *        x + 1 ... | x ... | x - 1 | x
 750		 *        <---------> less than scan distance
 751		 * If all of the blocks at the end of the log have cycle number
 752		 * last_half_cycle, then we check the blocks at the start of
 753		 * the log looking for occurrences of last_half_cycle.  If we
 754		 * find one, then our current estimate for the location of the
 755		 * first occurrence of last_half_cycle is wrong and we move
 756		 * back to the hole we've found.  This case looks like
 757		 *        x + 1 ... | x | x + 1 | x ...
 758		 *                               ^ binary search stopped here
 759		 * Another case we need to handle that only occurs in 256k
 760		 * logs is
 761		 *        x + 1 ... | x ... | x+1 | x ...
 762		 *                   ^ binary search stops here
 763		 * In a 256k log, the scan at the end of the log will see the
 764		 * x + 1 blocks.  We need to skip past those since that is
 765		 * certainly not the head of the log.  By searching for
 766		 * last_half_cycle-1 we accomplish that.
 767		 */
 768		ASSERT(head_blk <= INT_MAX &&
 769			(xfs_daddr_t) num_scan_bblks >= head_blk);
 770		start_blk = log_bbnum - (num_scan_bblks - head_blk);
 771		if ((error = xlog_find_verify_cycle(log, start_blk,
 772					num_scan_bblks - (int)head_blk,
 773					(stop_on_cycle - 1), &new_blk)))
 774			goto bp_err;
 775		if (new_blk != -1) {
 776			head_blk = new_blk;
 777			goto validate_head;
 778		}
 779
 780		/*
 781		 * Scan beginning of log now.  The last part of the physical
 782		 * log is good.  This scan needs to verify that it doesn't find
 783		 * the last_half_cycle.
 784		 */
 785		start_blk = 0;
 786		ASSERT(head_blk <= INT_MAX);
 787		if ((error = xlog_find_verify_cycle(log,
 788					start_blk, (int)head_blk,
 789					stop_on_cycle, &new_blk)))
 790			goto bp_err;
 791		if (new_blk != -1)
 792			head_blk = new_blk;
 793	}
 794
 795validate_head:
 796	/*
 797	 * Now we need to make sure head_blk is not pointing to a block in
 798	 * the middle of a log record.
 799	 */
 800	num_scan_bblks = XLOG_REC_SHIFT(log);
 801	if (head_blk >= num_scan_bblks) {
 802		start_blk = head_blk - num_scan_bblks; /* don't read head_blk */
 803
 804		/* start ptr at last block ptr before head_blk */
 805		if ((error = xlog_find_verify_log_record(log, start_blk,
 806							&head_blk, 0)) == -1) {
 807			error = XFS_ERROR(EIO);
 808			goto bp_err;
 809		} else if (error)
 810			goto bp_err;
 811	} else {
 812		start_blk = 0;
 813		ASSERT(head_blk <= INT_MAX);
 814		if ((error = xlog_find_verify_log_record(log, start_blk,
 815							&head_blk, 0)) == -1) {
 
 
 816			/* We hit the beginning of the log during our search */
 817			start_blk = log_bbnum - (num_scan_bblks - head_blk);
 818			new_blk = log_bbnum;
 819			ASSERT(start_blk <= INT_MAX &&
 820				(xfs_daddr_t) log_bbnum-start_blk >= 0);
 821			ASSERT(head_blk <= INT_MAX);
 822			if ((error = xlog_find_verify_log_record(log,
 823							start_blk, &new_blk,
 824							(int)head_blk)) == -1) {
 825				error = XFS_ERROR(EIO);
 826				goto bp_err;
 827			} else if (error)
 828				goto bp_err;
 829			if (new_blk != log_bbnum)
 830				head_blk = new_blk;
 831		} else if (error)
 832			goto bp_err;
 833	}
 834
 835	xlog_put_bp(bp);
 836	if (head_blk == log_bbnum)
 837		*return_head_blk = 0;
 838	else
 839		*return_head_blk = head_blk;
 840	/*
 841	 * When returning here, we have a good block number.  Bad block
 842	 * means that during a previous crash, we didn't have a clean break
 843	 * from cycle number N to cycle number N-1.  In this case, we need
 844	 * to find the first block with cycle number N-1.
 845	 */
 846	return 0;
 847
 848 bp_err:
 849	xlog_put_bp(bp);
 850
 851	if (error)
 852		xfs_warn(log->l_mp, "failed to find log head");
 853	return error;
 854}
 855
 856/*
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 857 * Find the sync block number or the tail of the log.
 858 *
 859 * This will be the block number of the last record to have its
 860 * associated buffers synced to disk.  Every log record header has
 861 * a sync lsn embedded in it.  LSNs hold block numbers, so it is easy
 862 * to get a sync block number.  The only concern is to figure out which
 863 * log record header to believe.
 864 *
 865 * The following algorithm uses the log record header with the largest
 866 * lsn.  The entire log record does not need to be valid.  We only care
 867 * that the header is valid.
 868 *
 869 * We could speed up search by using current head_blk buffer, but it is not
 870 * available.
 871 */
 872STATIC int
 873xlog_find_tail(
 874	xlog_t			*log,
 875	xfs_daddr_t		*head_blk,
 876	xfs_daddr_t		*tail_blk)
 877{
 878	xlog_rec_header_t	*rhead;
 879	xlog_op_header_t	*op_head;
 880	xfs_caddr_t		offset = NULL;
 881	xfs_buf_t		*bp;
 882	int			error, i, found;
 883	xfs_daddr_t		umount_data_blk;
 884	xfs_daddr_t		after_umount_blk;
 885	xfs_lsn_t		tail_lsn;
 886	int			hblks;
 887
 888	found = 0;
 889
 890	/*
 891	 * Find previous log record
 892	 */
 893	if ((error = xlog_find_head(log, head_blk)))
 894		return error;
 
 895
 896	bp = xlog_get_bp(log, 1);
 897	if (!bp)
 898		return ENOMEM;
 899	if (*head_blk == 0) {				/* special case */
 900		error = xlog_bread(log, 0, 1, bp, &offset);
 901		if (error)
 902			goto done;
 903
 904		if (xlog_get_cycle(offset) == 0) {
 905			*tail_blk = 0;
 906			/* leave all other log inited values alone */
 907			goto done;
 908		}
 909	}
 910
 911	/*
 912	 * Search backwards looking for log record header block
 913	 */
 914	ASSERT(*head_blk < INT_MAX);
 915	for (i = (int)(*head_blk) - 1; i >= 0; i--) {
 916		error = xlog_bread(log, i, 1, bp, &offset);
 917		if (error)
 918			goto done;
 919
 920		if (*(__be32 *)offset == cpu_to_be32(XLOG_HEADER_MAGIC_NUM)) {
 921			found = 1;
 922			break;
 923		}
 924	}
 925	/*
 926	 * If we haven't found the log record header block, start looking
 927	 * again from the end of the physical log.  XXXmiken: There should be
 928	 * a check here to make sure we didn't search more than N blocks in
 929	 * the previous code.
 930	 */
 931	if (!found) {
 932		for (i = log->l_logBBsize - 1; i >= (int)(*head_blk); i--) {
 933			error = xlog_bread(log, i, 1, bp, &offset);
 934			if (error)
 935				goto done;
 936
 937			if (*(__be32 *)offset ==
 938			    cpu_to_be32(XLOG_HEADER_MAGIC_NUM)) {
 939				found = 2;
 940				break;
 941			}
 942		}
 943	}
 944	if (!found) {
 945		xfs_warn(log->l_mp, "%s: couldn't find sync record", __func__);
 946		ASSERT(0);
 947		return XFS_ERROR(EIO);
 948	}
 
 949
 950	/* find blk_no of tail of log */
 951	rhead = (xlog_rec_header_t *)offset;
 952	*tail_blk = BLOCK_LSN(be64_to_cpu(rhead->h_tail_lsn));
 
 
 953
 954	/*
 955	 * Reset log values according to the state of the log when we
 956	 * crashed.  In the case where head_blk == 0, we bump curr_cycle
 957	 * one because the next write starts a new cycle rather than
 958	 * continuing the cycle of the last good log record.  At this
 959	 * point we have guaranteed that all partial log records have been
 960	 * accounted for.  Therefore, we know that the last good log record
 961	 * written was complete and ended exactly on the end boundary
 962	 * of the physical log.
 963	 */
 964	log->l_prev_block = i;
 965	log->l_curr_block = (int)*head_blk;
 966	log->l_curr_cycle = be32_to_cpu(rhead->h_cycle);
 967	if (found == 2)
 968		log->l_curr_cycle++;
 969	atomic64_set(&log->l_tail_lsn, be64_to_cpu(rhead->h_tail_lsn));
 970	atomic64_set(&log->l_last_sync_lsn, be64_to_cpu(rhead->h_lsn));
 971	xlog_assign_grant_head(&log->l_reserve_head.grant, log->l_curr_cycle,
 972					BBTOB(log->l_curr_block));
 973	xlog_assign_grant_head(&log->l_write_head.grant, log->l_curr_cycle,
 974					BBTOB(log->l_curr_block));
 975
 976	/*
 977	 * Look for unmount record.  If we find it, then we know there
 978	 * was a clean unmount.  Since 'i' could be the last block in
 979	 * the physical log, we convert to a log block before comparing
 980	 * to the head_blk.
 981	 *
 982	 * Save the current tail lsn to use to pass to
 983	 * xlog_clear_stale_blocks() below.  We won't want to clear the
 984	 * unmount record if there is one, so we pass the lsn of the
 985	 * unmount record rather than the block after it.
 986	 */
 987	if (xfs_sb_version_haslogv2(&log->l_mp->m_sb)) {
 988		int	h_size = be32_to_cpu(rhead->h_size);
 989		int	h_version = be32_to_cpu(rhead->h_version);
 990
 991		if ((h_version & XLOG_VERSION_2) &&
 992		    (h_size > XLOG_HEADER_CYCLE_SIZE)) {
 993			hblks = h_size / XLOG_HEADER_CYCLE_SIZE;
 994			if (h_size % XLOG_HEADER_CYCLE_SIZE)
 995				hblks++;
 996		} else {
 997			hblks = 1;
 998		}
 999	} else {
1000		hblks = 1;
1001	}
1002	after_umount_blk = (i + hblks + (int)
1003		BTOBB(be32_to_cpu(rhead->h_len))) % log->l_logBBsize;
1004	tail_lsn = atomic64_read(&log->l_tail_lsn);
1005	if (*head_blk == after_umount_blk &&
1006	    be32_to_cpu(rhead->h_num_logops) == 1) {
1007		umount_data_blk = (i + hblks) % log->l_logBBsize;
1008		error = xlog_bread(log, umount_data_blk, 1, bp, &offset);
1009		if (error)
1010			goto done;
1011
1012		op_head = (xlog_op_header_t *)offset;
1013		if (op_head->oh_flags & XLOG_UNMOUNT_TRANS) {
1014			/*
1015			 * Set tail and last sync so that newly written
1016			 * log records will point recovery to after the
1017			 * current unmount record.
1018			 */
1019			xlog_assign_atomic_lsn(&log->l_tail_lsn,
1020					log->l_curr_cycle, after_umount_blk);
1021			xlog_assign_atomic_lsn(&log->l_last_sync_lsn,
1022					log->l_curr_cycle, after_umount_blk);
1023			*tail_blk = after_umount_blk;
1024
1025			/*
1026			 * Note that the unmount was clean. If the unmount
1027			 * was not clean, we need to know this to rebuild the
1028			 * superblock counters from the perag headers if we
1029			 * have a filesystem using non-persistent counters.
1030			 */
1031			log->l_mp->m_flags |= XFS_MOUNT_WAS_CLEAN;
1032		}
1033	}
1034
1035	/*
 
 
 
 
 
 
 
 
1036	 * Make sure that there are no blocks in front of the head
1037	 * with the same cycle number as the head.  This can happen
1038	 * because we allow multiple outstanding log writes concurrently,
1039	 * and the later writes might make it out before earlier ones.
1040	 *
1041	 * We use the lsn from before modifying it so that we'll never
1042	 * overwrite the unmount record after a clean unmount.
1043	 *
1044	 * Do this only if we are going to recover the filesystem
1045	 *
1046	 * NOTE: This used to say "if (!readonly)"
1047	 * However on Linux, we can & do recover a read-only filesystem.
1048	 * We only skip recovery if NORECOVERY is specified on mount,
1049	 * in which case we would not be here.
1050	 *
1051	 * But... if the -device- itself is readonly, just skip this.
1052	 * We can't recover this device anyway, so it won't matter.
1053	 */
1054	if (!xfs_readonly_buftarg(log->l_mp->m_logdev_targp))
1055		error = xlog_clear_stale_blocks(log, tail_lsn);
1056
1057done:
1058	xlog_put_bp(bp);
1059
1060	if (error)
1061		xfs_warn(log->l_mp, "failed to locate log tail");
1062	return error;
1063}
1064
1065/*
1066 * Is the log zeroed at all?
1067 *
1068 * The last binary search should be changed to perform an X block read
1069 * once X becomes small enough.  You can then search linearly through
1070 * the X blocks.  This will cut down on the number of reads we need to do.
1071 *
1072 * If the log is partially zeroed, this routine will pass back the blkno
1073 * of the first block with cycle number 0.  It won't have a complete LR
1074 * preceding it.
1075 *
1076 * Return:
1077 *	0  => the log is completely written to
1078 *	-1 => use *blk_no as the first block of the log
1079 *	>0 => error has occurred
1080 */
1081STATIC int
1082xlog_find_zeroed(
1083	xlog_t		*log,
1084	xfs_daddr_t	*blk_no)
1085{
1086	xfs_buf_t	*bp;
1087	xfs_caddr_t	offset;
1088	uint	        first_cycle, last_cycle;
1089	xfs_daddr_t	new_blk, last_blk, start_blk;
1090	xfs_daddr_t     num_scan_bblks;
1091	int	        error, log_bbnum = log->l_logBBsize;
 
1092
1093	*blk_no = 0;
1094
1095	/* check totally zeroed log */
1096	bp = xlog_get_bp(log, 1);
1097	if (!bp)
1098		return ENOMEM;
1099	error = xlog_bread(log, 0, 1, bp, &offset);
1100	if (error)
1101		goto bp_err;
1102
1103	first_cycle = xlog_get_cycle(offset);
1104	if (first_cycle == 0) {		/* completely zeroed log */
1105		*blk_no = 0;
1106		xlog_put_bp(bp);
1107		return -1;
1108	}
1109
1110	/* check partially zeroed log */
1111	error = xlog_bread(log, log_bbnum-1, 1, bp, &offset);
1112	if (error)
1113		goto bp_err;
1114
1115	last_cycle = xlog_get_cycle(offset);
1116	if (last_cycle != 0) {		/* log completely written to */
1117		xlog_put_bp(bp);
1118		return 0;
1119	} else if (first_cycle != 1) {
1120		/*
1121		 * If the cycle of the last block is zero, the cycle of
1122		 * the first block must be 1. If it's not, maybe we're
1123		 * not looking at a log... Bail out.
1124		 */
1125		xfs_warn(log->l_mp,
1126			"Log inconsistent or not a log (last==0, first!=1)");
1127		return XFS_ERROR(EINVAL);
1128	}
1129
1130	/* we have a partially zeroed log */
1131	last_blk = log_bbnum-1;
1132	if ((error = xlog_find_cycle_start(log, bp, 0, &last_blk, 0)))
1133		goto bp_err;
 
1134
1135	/*
1136	 * Validate the answer.  Because there is no way to guarantee that
1137	 * the entire log is made up of log records which are the same size,
1138	 * we scan over the defined maximum blocks.  At this point, the maximum
1139	 * is not chosen to mean anything special.   XXXmiken
1140	 */
1141	num_scan_bblks = XLOG_TOTAL_REC_SHIFT(log);
1142	ASSERT(num_scan_bblks <= INT_MAX);
1143
1144	if (last_blk < num_scan_bblks)
1145		num_scan_bblks = last_blk;
1146	start_blk = last_blk - num_scan_bblks;
1147
1148	/*
1149	 * We search for any instances of cycle number 0 that occur before
1150	 * our current estimate of the head.  What we're trying to detect is
1151	 *        1 ... | 0 | 1 | 0...
1152	 *                       ^ binary search ends here
1153	 */
1154	if ((error = xlog_find_verify_cycle(log, start_blk,
1155					 (int)num_scan_bblks, 0, &new_blk)))
1156		goto bp_err;
1157	if (new_blk != -1)
1158		last_blk = new_blk;
1159
1160	/*
1161	 * Potentially backup over partial log record write.  We don't need
1162	 * to search the end of the log because we know it is zero.
1163	 */
1164	if ((error = xlog_find_verify_log_record(log, start_blk,
1165				&last_blk, 0)) == -1) {
1166	    error = XFS_ERROR(EIO);
1167	    goto bp_err;
1168	} else if (error)
1169	    goto bp_err;
1170
1171	*blk_no = last_blk;
1172bp_err:
1173	xlog_put_bp(bp);
1174	if (error)
1175		return error;
1176	return -1;
1177}
1178
1179/*
1180 * These are simple subroutines used by xlog_clear_stale_blocks() below
1181 * to initialize a buffer full of empty log record headers and write
1182 * them into the log.
1183 */
1184STATIC void
1185xlog_add_record(
1186	xlog_t			*log,
1187	xfs_caddr_t		buf,
1188	int			cycle,
1189	int			block,
1190	int			tail_cycle,
1191	int			tail_block)
1192{
1193	xlog_rec_header_t	*recp = (xlog_rec_header_t *)buf;
1194
1195	memset(buf, 0, BBSIZE);
1196	recp->h_magicno = cpu_to_be32(XLOG_HEADER_MAGIC_NUM);
1197	recp->h_cycle = cpu_to_be32(cycle);
1198	recp->h_version = cpu_to_be32(
1199			xfs_sb_version_haslogv2(&log->l_mp->m_sb) ? 2 : 1);
1200	recp->h_lsn = cpu_to_be64(xlog_assign_lsn(cycle, block));
1201	recp->h_tail_lsn = cpu_to_be64(xlog_assign_lsn(tail_cycle, tail_block));
1202	recp->h_fmt = cpu_to_be32(XLOG_FMT);
1203	memcpy(&recp->h_fs_uuid, &log->l_mp->m_sb.sb_uuid, sizeof(uuid_t));
1204}
1205
1206STATIC int
1207xlog_write_log_records(
1208	xlog_t		*log,
1209	int		cycle,
1210	int		start_block,
1211	int		blocks,
1212	int		tail_cycle,
1213	int		tail_block)
1214{
1215	xfs_caddr_t	offset;
1216	xfs_buf_t	*bp;
1217	int		balign, ealign;
1218	int		sectbb = log->l_sectBBsize;
1219	int		end_block = start_block + blocks;
1220	int		bufblks;
1221	int		error = 0;
1222	int		i, j = 0;
1223
1224	/*
1225	 * Greedily allocate a buffer big enough to handle the full
1226	 * range of basic blocks to be written.  If that fails, try
1227	 * a smaller size.  We need to be able to write at least a
1228	 * log sector, or we're out of luck.
1229	 */
1230	bufblks = 1 << ffs(blocks);
1231	while (bufblks > log->l_logBBsize)
1232		bufblks >>= 1;
1233	while (!(bp = xlog_get_bp(log, bufblks))) {
1234		bufblks >>= 1;
1235		if (bufblks < sectbb)
1236			return ENOMEM;
1237	}
1238
1239	/* We may need to do a read at the start to fill in part of
1240	 * the buffer in the starting sector not covered by the first
1241	 * write below.
1242	 */
1243	balign = round_down(start_block, sectbb);
1244	if (balign != start_block) {
1245		error = xlog_bread_noalign(log, start_block, 1, bp);
1246		if (error)
1247			goto out_put_bp;
1248
1249		j = start_block - balign;
1250	}
1251
1252	for (i = start_block; i < end_block; i += bufblks) {
1253		int		bcount, endcount;
1254
1255		bcount = min(bufblks, end_block - start_block);
1256		endcount = bcount - j;
1257
1258		/* We may need to do a read at the end to fill in part of
1259		 * the buffer in the final sector not covered by the write.
1260		 * If this is the same sector as the above read, skip it.
1261		 */
1262		ealign = round_down(end_block, sectbb);
1263		if (j == 0 && (start_block + endcount > ealign)) {
1264			offset = bp->b_addr + BBTOB(ealign - start_block);
1265			error = xlog_bread_offset(log, ealign, sectbb,
1266							bp, offset);
1267			if (error)
1268				break;
1269
1270		}
1271
1272		offset = xlog_align(log, start_block, endcount, bp);
1273		for (; j < endcount; j++) {
1274			xlog_add_record(log, offset, cycle, i+j,
1275					tail_cycle, tail_block);
1276			offset += BBSIZE;
1277		}
1278		error = xlog_bwrite(log, start_block, endcount, bp);
1279		if (error)
1280			break;
1281		start_block += endcount;
1282		j = 0;
1283	}
1284
1285 out_put_bp:
1286	xlog_put_bp(bp);
1287	return error;
1288}
1289
1290/*
1291 * This routine is called to blow away any incomplete log writes out
1292 * in front of the log head.  We do this so that we won't become confused
1293 * if we come up, write only a little bit more, and then crash again.
1294 * If we leave the partial log records out there, this situation could
1295 * cause us to think those partial writes are valid blocks since they
1296 * have the current cycle number.  We get rid of them by overwriting them
1297 * with empty log records with the old cycle number rather than the
1298 * current one.
1299 *
1300 * The tail lsn is passed in rather than taken from
1301 * the log so that we will not write over the unmount record after a
1302 * clean unmount in a 512 block log.  Doing so would leave the log without
1303 * any valid log records in it until a new one was written.  If we crashed
1304 * during that time we would not be able to recover.
1305 */
1306STATIC int
1307xlog_clear_stale_blocks(
1308	xlog_t		*log,
1309	xfs_lsn_t	tail_lsn)
1310{
1311	int		tail_cycle, head_cycle;
1312	int		tail_block, head_block;
1313	int		tail_distance, max_distance;
1314	int		distance;
1315	int		error;
1316
1317	tail_cycle = CYCLE_LSN(tail_lsn);
1318	tail_block = BLOCK_LSN(tail_lsn);
1319	head_cycle = log->l_curr_cycle;
1320	head_block = log->l_curr_block;
1321
1322	/*
1323	 * Figure out the distance between the new head of the log
1324	 * and the tail.  We want to write over any blocks beyond the
1325	 * head that we may have written just before the crash, but
1326	 * we don't want to overwrite the tail of the log.
1327	 */
1328	if (head_cycle == tail_cycle) {
1329		/*
1330		 * The tail is behind the head in the physical log,
1331		 * so the distance from the head to the tail is the
1332		 * distance from the head to the end of the log plus
1333		 * the distance from the beginning of the log to the
1334		 * tail.
1335		 */
1336		if (unlikely(head_block < tail_block || head_block >= log->l_logBBsize)) {
1337			XFS_ERROR_REPORT("xlog_clear_stale_blocks(1)",
1338					 XFS_ERRLEVEL_LOW, log->l_mp);
1339			return XFS_ERROR(EFSCORRUPTED);
1340		}
1341		tail_distance = tail_block + (log->l_logBBsize - head_block);
1342	} else {
1343		/*
1344		 * The head is behind the tail in the physical log,
1345		 * so the distance from the head to the tail is just
1346		 * the tail block minus the head block.
1347		 */
1348		if (unlikely(head_block >= tail_block || head_cycle != (tail_cycle + 1))){
1349			XFS_ERROR_REPORT("xlog_clear_stale_blocks(2)",
1350					 XFS_ERRLEVEL_LOW, log->l_mp);
1351			return XFS_ERROR(EFSCORRUPTED);
1352		}
1353		tail_distance = tail_block - head_block;
1354	}
1355
1356	/*
1357	 * If the head is right up against the tail, we can't clear
1358	 * anything.
1359	 */
1360	if (tail_distance <= 0) {
1361		ASSERT(tail_distance == 0);
1362		return 0;
1363	}
1364
1365	max_distance = XLOG_TOTAL_REC_SHIFT(log);
1366	/*
1367	 * Take the smaller of the maximum amount of outstanding I/O
1368	 * we could have and the distance to the tail to clear out.
1369	 * We take the smaller so that we don't overwrite the tail and
1370	 * we don't waste all day writing from the head to the tail
1371	 * for no reason.
1372	 */
1373	max_distance = MIN(max_distance, tail_distance);
1374
1375	if ((head_block + max_distance) <= log->l_logBBsize) {
1376		/*
1377		 * We can stomp all the blocks we need to without
1378		 * wrapping around the end of the log.  Just do it
1379		 * in a single write.  Use the cycle number of the
1380		 * current cycle minus one so that the log will look like:
1381		 *     n ... | n - 1 ...
1382		 */
1383		error = xlog_write_log_records(log, (head_cycle - 1),
1384				head_block, max_distance, tail_cycle,
1385				tail_block);
1386		if (error)
1387			return error;
1388	} else {
1389		/*
1390		 * We need to wrap around the end of the physical log in
1391		 * order to clear all the blocks.  Do it in two separate
1392		 * I/Os.  The first write should be from the head to the
1393		 * end of the physical log, and it should use the current
1394		 * cycle number minus one just like above.
1395		 */
1396		distance = log->l_logBBsize - head_block;
1397		error = xlog_write_log_records(log, (head_cycle - 1),
1398				head_block, distance, tail_cycle,
1399				tail_block);
1400
1401		if (error)
1402			return error;
1403
1404		/*
1405		 * Now write the blocks at the start of the physical log.
1406		 * This writes the remainder of the blocks we want to clear.
1407		 * It uses the current cycle number since we're now on the
1408		 * same cycle as the head so that we get:
1409		 *    n ... n ... | n - 1 ...
1410		 *    ^^^^^ blocks we're writing
1411		 */
1412		distance = max_distance - (log->l_logBBsize - head_block);
1413		error = xlog_write_log_records(log, head_cycle, 0, distance,
1414				tail_cycle, tail_block);
1415		if (error)
1416			return error;
1417	}
1418
1419	return 0;
1420}
1421
1422/******************************************************************************
1423 *
1424 *		Log recover routines
1425 *
1426 ******************************************************************************
1427 */
 
 
 
 
 
 
 
1428
1429STATIC xlog_recover_t *
1430xlog_recover_find_tid(
1431	struct hlist_head	*head,
1432	xlog_tid_t		tid)
1433{
1434	xlog_recover_t		*trans;
1435	struct hlist_node	*n;
1436
1437	hlist_for_each_entry(trans, n, head, r_list) {
1438		if (trans->r_log_tid == tid)
1439			return trans;
1440	}
1441	return NULL;
1442}
1443
1444STATIC void
1445xlog_recover_new_tid(
1446	struct hlist_head	*head,
1447	xlog_tid_t		tid,
1448	xfs_lsn_t		lsn)
1449{
1450	xlog_recover_t		*trans;
1451
1452	trans = kmem_zalloc(sizeof(xlog_recover_t), KM_SLEEP);
1453	trans->r_log_tid   = tid;
1454	trans->r_lsn	   = lsn;
1455	INIT_LIST_HEAD(&trans->r_itemq);
1456
1457	INIT_HLIST_NODE(&trans->r_list);
1458	hlist_add_head(&trans->r_list, head);
1459}
1460
1461STATIC void
1462xlog_recover_add_item(
1463	struct list_head	*head)
 
 
1464{
1465	xlog_recover_item_t	*item;
1466
1467	item = kmem_zalloc(sizeof(xlog_recover_item_t), KM_SLEEP);
1468	INIT_LIST_HEAD(&item->ri_list);
1469	list_add_tail(&item->ri_list, head);
1470}
1471
1472STATIC int
1473xlog_recover_add_to_cont_trans(
1474	struct xlog		*log,
1475	struct xlog_recover	*trans,
1476	xfs_caddr_t		dp,
1477	int			len)
1478{
1479	xlog_recover_item_t	*item;
1480	xfs_caddr_t		ptr, old_ptr;
1481	int			old_len;
1482
1483	if (list_empty(&trans->r_itemq)) {
1484		/* finish copying rest of trans header */
1485		xlog_recover_add_item(&trans->r_itemq);
1486		ptr = (xfs_caddr_t) &trans->r_theader +
1487				sizeof(xfs_trans_header_t) - len;
1488		memcpy(ptr, dp, len); /* d, s, l */
1489		return 0;
1490	}
1491	/* take the tail entry */
1492	item = list_entry(trans->r_itemq.prev, xlog_recover_item_t, ri_list);
1493
1494	old_ptr = item->ri_buf[item->ri_cnt-1].i_addr;
1495	old_len = item->ri_buf[item->ri_cnt-1].i_len;
1496
1497	ptr = kmem_realloc(old_ptr, len+old_len, old_len, KM_SLEEP);
1498	memcpy(&ptr[old_len], dp, len); /* d, s, l */
1499	item->ri_buf[item->ri_cnt-1].i_len += len;
1500	item->ri_buf[item->ri_cnt-1].i_addr = ptr;
1501	trace_xfs_log_recover_item_add_cont(log, trans, item, 0);
1502	return 0;
1503}
1504
1505/*
1506 * The next region to add is the start of a new region.  It could be
1507 * a whole region or it could be the first part of a new region.  Because
1508 * of this, the assumption here is that the type and size fields of all
1509 * format structures fit into the first 32 bits of the structure.
1510 *
1511 * This works because all regions must be 32 bit aligned.  Therefore, we
1512 * either have both fields or we have neither field.  In the case we have
1513 * neither field, the data part of the region is zero length.  We only have
1514 * a log_op_header and can throw away the header since a new one will appear
1515 * later.  If we have at least 4 bytes, then we can determine how many regions
1516 * will appear in the current log item.
1517 */
1518STATIC int
1519xlog_recover_add_to_trans(
1520	struct xlog		*log,
1521	struct xlog_recover	*trans,
1522	xfs_caddr_t		dp,
1523	int			len)
1524{
1525	xfs_inode_log_format_t	*in_f;			/* any will do */
1526	xlog_recover_item_t	*item;
1527	xfs_caddr_t		ptr;
 
 
 
1528
1529	if (!len)
1530		return 0;
1531	if (list_empty(&trans->r_itemq)) {
1532		/* we need to catch log corruptions here */
1533		if (*(uint *)dp != XFS_TRANS_HEADER_MAGIC) {
1534			xfs_warn(log->l_mp, "%s: bad header magic number",
1535				__func__);
1536			ASSERT(0);
1537			return XFS_ERROR(EIO);
1538		}
1539		if (len == sizeof(xfs_trans_header_t))
1540			xlog_recover_add_item(&trans->r_itemq);
1541		memcpy(&trans->r_theader, dp, len); /* d, s, l */
1542		return 0;
1543	}
1544
1545	ptr = kmem_alloc(len, KM_SLEEP);
1546	memcpy(ptr, dp, len);
1547	in_f = (xfs_inode_log_format_t *)ptr;
1548
1549	/* take the tail entry */
1550	item = list_entry(trans->r_itemq.prev, xlog_recover_item_t, ri_list);
1551	if (item->ri_total != 0 &&
1552	     item->ri_total == item->ri_cnt) {
1553		/* tail item is in use, get a new one */
1554		xlog_recover_add_item(&trans->r_itemq);
1555		item = list_entry(trans->r_itemq.prev,
1556					xlog_recover_item_t, ri_list);
1557	}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1558
1559	if (item->ri_total == 0) {		/* first region to be added */
1560		if (in_f->ilf_size == 0 ||
1561		    in_f->ilf_size > XLOG_MAX_REGIONS_IN_ITEM) {
1562			xfs_warn(log->l_mp,
1563		"bad number of regions (%d) in inode log format",
1564				  in_f->ilf_size);
1565			ASSERT(0);
1566			return XFS_ERROR(EIO);
1567		}
1568
1569		item->ri_total = in_f->ilf_size;
1570		item->ri_buf =
1571			kmem_zalloc(item->ri_total * sizeof(xfs_log_iovec_t),
1572				    KM_SLEEP);
1573	}
1574	ASSERT(item->ri_total > item->ri_cnt);
1575	/* Description region is ri_buf[0] */
1576	item->ri_buf[item->ri_cnt].i_addr = ptr;
1577	item->ri_buf[item->ri_cnt].i_len  = len;
1578	item->ri_cnt++;
1579	trace_xfs_log_recover_item_add(log, trans, item, 0);
1580	return 0;
1581}
1582
1583/*
1584 * Sort the log items in the transaction. Cancelled buffers need
1585 * to be put first so they are processed before any items that might
1586 * modify the buffers. If they are cancelled, then the modifications
1587 * don't need to be replayed.
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1588 */
1589STATIC int
1590xlog_recover_reorder_trans(
1591	struct xlog		*log,
1592	struct xlog_recover	*trans,
1593	int			pass)
1594{
1595	xlog_recover_item_t	*item, *n;
 
1596	LIST_HEAD(sort_list);
 
 
 
 
1597
1598	list_splice_init(&trans->r_itemq, &sort_list);
1599	list_for_each_entry_safe(item, n, &sort_list, ri_list) {
1600		xfs_buf_log_format_t	*buf_f = item->ri_buf[0].i_addr;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1601
1602		switch (ITEM_TYPE(item)) {
1603		case XFS_LI_BUF:
1604			if (!(buf_f->blf_flags & XFS_BLF_CANCEL)) {
1605				trace_xfs_log_recover_item_reorder_head(log,
1606							trans, item, pass);
1607				list_move(&item->ri_list, &trans->r_itemq);
1608				break;
1609			}
1610		case XFS_LI_INODE:
1611		case XFS_LI_DQUOT:
1612		case XFS_LI_QUOTAOFF:
1613		case XFS_LI_EFD:
1614		case XFS_LI_EFI:
 
 
 
1615			trace_xfs_log_recover_item_reorder_tail(log,
1616							trans, item, pass);
1617			list_move_tail(&item->ri_list, &trans->r_itemq);
1618			break;
1619		default:
1620			xfs_warn(log->l_mp,
1621				"%s: unrecognized type of log operation",
1622				__func__);
1623			ASSERT(0);
1624			return XFS_ERROR(EIO);
1625		}
1626	}
 
1627	ASSERT(list_empty(&sort_list));
1628	return 0;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1629}
1630
1631/*
1632 * Build up the table of buf cancel records so that we don't replay
1633 * cancelled data in the second pass.  For buffer records that are
1634 * not cancel records, there is nothing to do here so we just return.
1635 *
1636 * If we get a cancel record which is already in the table, this indicates
1637 * that the buffer was cancelled multiple times.  In order to ensure
1638 * that during pass 2 we keep the record in the table until we reach its
1639 * last occurrence in the log, we keep a reference count in the cancel
1640 * record in the table to tell us how many times we expect to see this
1641 * record during the second pass.
1642 */
1643STATIC int
1644xlog_recover_buffer_pass1(
1645	struct xlog			*log,
1646	struct xlog_recover_item	*item)
 
 
1647{
1648	xfs_buf_log_format_t	*buf_f = item->ri_buf[0].i_addr;
1649	struct list_head	*bucket;
1650	struct xfs_buf_cancel	*bcp;
1651
1652	/*
1653	 * If this isn't a cancel buffer item, then just return.
1654	 */
1655	if (!(buf_f->blf_flags & XFS_BLF_CANCEL)) {
1656		trace_xfs_log_recover_buf_not_cancel(log, buf_f);
1657		return 0;
1658	}
1659
1660	/*
1661	 * Insert an xfs_buf_cancel record into the hash table of them.
1662	 * If there is already an identical record, bump its reference count.
1663	 */
1664	bucket = XLOG_BUF_CANCEL_BUCKET(log, buf_f->blf_blkno);
1665	list_for_each_entry(bcp, bucket, bc_list) {
1666		if (bcp->bc_blkno == buf_f->blf_blkno &&
1667		    bcp->bc_len == buf_f->blf_len) {
1668			bcp->bc_refcount++;
1669			trace_xfs_log_recover_buf_cancel_ref_inc(log, buf_f);
1670			return 0;
1671		}
1672	}
1673
1674	bcp = kmem_alloc(sizeof(struct xfs_buf_cancel), KM_SLEEP);
1675	bcp->bc_blkno = buf_f->blf_blkno;
1676	bcp->bc_len = buf_f->blf_len;
1677	bcp->bc_refcount = 1;
1678	list_add_tail(&bcp->bc_list, bucket);
1679
1680	trace_xfs_log_recover_buf_cancel_add(log, buf_f);
1681	return 0;
1682}
1683
1684/*
1685 * Check to see whether the buffer being recovered has a corresponding
1686 * entry in the buffer cancel record table.  If it does then return 1
1687 * so that it will be cancelled, otherwise return 0.  If the buffer is
1688 * actually a buffer cancel item (XFS_BLF_CANCEL is set), then decrement
1689 * the refcount on the entry in the table and remove it from the table
1690 * if this is the last reference.
1691 *
1692 * We remove the cancel record from the table when we encounter its
1693 * last occurrence in the log so that if the same buffer is re-used
1694 * again after its last cancellation we actually replay the changes
1695 * made at that point.
1696 */
1697STATIC int
1698xlog_check_buffer_cancelled(
1699	struct xlog		*log,
1700	xfs_daddr_t		blkno,
1701	uint			len,
1702	ushort			flags)
1703{
1704	struct list_head	*bucket;
1705	struct xfs_buf_cancel	*bcp;
1706
1707	if (log->l_buf_cancel_table == NULL) {
1708		/*
1709		 * There is nothing in the table built in pass one,
1710		 * so this buffer must not be cancelled.
1711		 */
1712		ASSERT(!(flags & XFS_BLF_CANCEL));
1713		return 0;
1714	}
1715
1716	/*
1717	 * Search for an entry in the  cancel table that matches our buffer.
1718	 */
1719	bucket = XLOG_BUF_CANCEL_BUCKET(log, blkno);
1720	list_for_each_entry(bcp, bucket, bc_list) {
1721		if (bcp->bc_blkno == blkno && bcp->bc_len == len)
1722			goto found;
1723	}
1724
1725	/*
1726	 * We didn't find a corresponding entry in the table, so return 0 so
1727	 * that the buffer is NOT cancelled.
1728	 */
1729	ASSERT(!(flags & XFS_BLF_CANCEL));
1730	return 0;
1731
1732found:
1733	/*
1734	 * We've go a match, so return 1 so that the recovery of this buffer
1735	 * is cancelled.  If this buffer is actually a buffer cancel log
1736	 * item, then decrement the refcount on the one in the table and
1737	 * remove it if this is the last reference.
1738	 */
1739	if (flags & XFS_BLF_CANCEL) {
1740		if (--bcp->bc_refcount == 0) {
1741			list_del(&bcp->bc_list);
1742			kmem_free(bcp);
1743		}
1744	}
1745	return 1;
1746}
1747
1748/*
1749 * Perform recovery for a buffer full of inodes.  In these buffers, the only
1750 * data which should be recovered is that which corresponds to the
1751 * di_next_unlinked pointers in the on disk inode structures.  The rest of the
1752 * data for the inodes is always logged through the inodes themselves rather
1753 * than the inode buffer and is recovered in xlog_recover_inode_pass2().
1754 *
1755 * The only time when buffers full of inodes are fully recovered is when the
1756 * buffer is full of newly allocated inodes.  In this case the buffer will
1757 * not be marked as an inode buffer and so will be sent to
1758 * xlog_recover_do_reg_buffer() below during recovery.
1759 */
1760STATIC int
1761xlog_recover_do_inode_buffer(
1762	struct xfs_mount	*mp,
1763	xlog_recover_item_t	*item,
1764	struct xfs_buf		*bp,
1765	xfs_buf_log_format_t	*buf_f)
1766{
1767	int			i;
1768	int			item_index = 0;
1769	int			bit = 0;
1770	int			nbits = 0;
1771	int			reg_buf_offset = 0;
1772	int			reg_buf_bytes = 0;
1773	int			next_unlinked_offset;
1774	int			inodes_per_buf;
1775	xfs_agino_t		*logged_nextp;
1776	xfs_agino_t		*buffer_nextp;
1777
1778	trace_xfs_log_recover_buf_inode_buf(mp->m_log, buf_f);
1779
1780	inodes_per_buf = BBTOB(bp->b_io_length) >> mp->m_sb.sb_inodelog;
1781	for (i = 0; i < inodes_per_buf; i++) {
1782		next_unlinked_offset = (i * mp->m_sb.sb_inodesize) +
1783			offsetof(xfs_dinode_t, di_next_unlinked);
1784
1785		while (next_unlinked_offset >=
1786		       (reg_buf_offset + reg_buf_bytes)) {
1787			/*
1788			 * The next di_next_unlinked field is beyond
1789			 * the current logged region.  Find the next
1790			 * logged region that contains or is beyond
1791			 * the current di_next_unlinked field.
1792			 */
1793			bit += nbits;
1794			bit = xfs_next_bit(buf_f->blf_data_map,
1795					   buf_f->blf_map_size, bit);
1796
1797			/*
1798			 * If there are no more logged regions in the
1799			 * buffer, then we're done.
1800			 */
1801			if (bit == -1)
1802				return 0;
1803
1804			nbits = xfs_contig_bits(buf_f->blf_data_map,
1805						buf_f->blf_map_size, bit);
1806			ASSERT(nbits > 0);
1807			reg_buf_offset = bit << XFS_BLF_SHIFT;
1808			reg_buf_bytes = nbits << XFS_BLF_SHIFT;
1809			item_index++;
1810		}
1811
1812		/*
1813		 * If the current logged region starts after the current
1814		 * di_next_unlinked field, then move on to the next
1815		 * di_next_unlinked field.
1816		 */
1817		if (next_unlinked_offset < reg_buf_offset)
1818			continue;
1819
1820		ASSERT(item->ri_buf[item_index].i_addr != NULL);
1821		ASSERT((item->ri_buf[item_index].i_len % XFS_BLF_CHUNK) == 0);
1822		ASSERT((reg_buf_offset + reg_buf_bytes) <=
1823							BBTOB(bp->b_io_length));
1824
1825		/*
1826		 * The current logged region contains a copy of the
1827		 * current di_next_unlinked field.  Extract its value
1828		 * and copy it to the buffer copy.
1829		 */
1830		logged_nextp = item->ri_buf[item_index].i_addr +
1831				next_unlinked_offset - reg_buf_offset;
1832		if (unlikely(*logged_nextp == 0)) {
1833			xfs_alert(mp,
1834		"Bad inode buffer log record (ptr = 0x%p, bp = 0x%p). "
1835		"Trying to replay bad (0) inode di_next_unlinked field.",
1836				item, bp);
1837			XFS_ERROR_REPORT("xlog_recover_do_inode_buf",
1838					 XFS_ERRLEVEL_LOW, mp);
1839			return XFS_ERROR(EFSCORRUPTED);
1840		}
1841
1842		buffer_nextp = (xfs_agino_t *)xfs_buf_offset(bp,
1843					      next_unlinked_offset);
1844		*buffer_nextp = *logged_nextp;
1845	}
1846
1847	return 0;
1848}
1849
1850/*
1851 * Perform a 'normal' buffer recovery.  Each logged region of the
1852 * buffer should be copied over the corresponding region in the
1853 * given buffer.  The bitmap in the buf log format structure indicates
1854 * where to place the logged data.
1855 */
1856STATIC void
1857xlog_recover_do_reg_buffer(
1858	struct xfs_mount	*mp,
1859	xlog_recover_item_t	*item,
1860	struct xfs_buf		*bp,
1861	xfs_buf_log_format_t	*buf_f)
1862{
1863	int			i;
1864	int			bit;
1865	int			nbits;
1866	int                     error;
1867
1868	trace_xfs_log_recover_buf_reg_buf(mp->m_log, buf_f);
1869
1870	bit = 0;
1871	i = 1;  /* 0 is the buf format structure */
1872	while (1) {
1873		bit = xfs_next_bit(buf_f->blf_data_map,
1874				   buf_f->blf_map_size, bit);
1875		if (bit == -1)
1876			break;
1877		nbits = xfs_contig_bits(buf_f->blf_data_map,
1878					buf_f->blf_map_size, bit);
1879		ASSERT(nbits > 0);
1880		ASSERT(item->ri_buf[i].i_addr != NULL);
1881		ASSERT(item->ri_buf[i].i_len % XFS_BLF_CHUNK == 0);
1882		ASSERT(BBTOB(bp->b_io_length) >=
1883		       ((uint)bit << XFS_BLF_SHIFT) + (nbits << XFS_BLF_SHIFT));
1884
1885		/*
1886		 * Do a sanity check if this is a dquot buffer. Just checking
1887		 * the first dquot in the buffer should do. XXXThis is
1888		 * probably a good thing to do for other buf types also.
1889		 */
1890		error = 0;
1891		if (buf_f->blf_flags &
1892		   (XFS_BLF_UDQUOT_BUF|XFS_BLF_PDQUOT_BUF|XFS_BLF_GDQUOT_BUF)) {
1893			if (item->ri_buf[i].i_addr == NULL) {
1894				xfs_alert(mp,
1895					"XFS: NULL dquot in %s.", __func__);
1896				goto next;
1897			}
1898			if (item->ri_buf[i].i_len < sizeof(xfs_disk_dquot_t)) {
1899				xfs_alert(mp,
1900					"XFS: dquot too small (%d) in %s.",
1901					item->ri_buf[i].i_len, __func__);
1902				goto next;
1903			}
1904			error = xfs_qm_dqcheck(mp, item->ri_buf[i].i_addr,
1905					       -1, 0, XFS_QMOPT_DOWARN,
1906					       "dquot_buf_recover");
1907			if (error)
1908				goto next;
1909		}
1910
1911		memcpy(xfs_buf_offset(bp,
1912			(uint)bit << XFS_BLF_SHIFT),	/* dest */
1913			item->ri_buf[i].i_addr,		/* source */
1914			nbits<<XFS_BLF_SHIFT);		/* length */
1915 next:
1916		i++;
1917		bit += nbits;
1918	}
1919
1920	/* Shouldn't be any more regions */
1921	ASSERT(i == item->ri_total);
1922}
1923
1924/*
1925 * Do some primitive error checking on ondisk dquot data structures.
1926 */
1927int
1928xfs_qm_dqcheck(
1929	struct xfs_mount *mp,
1930	xfs_disk_dquot_t *ddq,
1931	xfs_dqid_t	 id,
1932	uint		 type,	  /* used only when IO_dorepair is true */
1933	uint		 flags,
1934	char		 *str)
1935{
1936	xfs_dqblk_t	 *d = (xfs_dqblk_t *)ddq;
1937	int		errs = 0;
1938
1939	/*
1940	 * We can encounter an uninitialized dquot buffer for 2 reasons:
1941	 * 1. If we crash while deleting the quotainode(s), and those blks got
1942	 *    used for user data. This is because we take the path of regular
1943	 *    file deletion; however, the size field of quotainodes is never
1944	 *    updated, so all the tricks that we play in itruncate_finish
1945	 *    don't quite matter.
1946	 *
1947	 * 2. We don't play the quota buffers when there's a quotaoff logitem.
1948	 *    But the allocation will be replayed so we'll end up with an
1949	 *    uninitialized quota block.
1950	 *
1951	 * This is all fine; things are still consistent, and we haven't lost
1952	 * any quota information. Just don't complain about bad dquot blks.
1953	 */
1954	if (ddq->d_magic != cpu_to_be16(XFS_DQUOT_MAGIC)) {
1955		if (flags & XFS_QMOPT_DOWARN)
1956			xfs_alert(mp,
1957			"%s : XFS dquot ID 0x%x, magic 0x%x != 0x%x",
1958			str, id, be16_to_cpu(ddq->d_magic), XFS_DQUOT_MAGIC);
1959		errs++;
1960	}
1961	if (ddq->d_version != XFS_DQUOT_VERSION) {
1962		if (flags & XFS_QMOPT_DOWARN)
1963			xfs_alert(mp,
1964			"%s : XFS dquot ID 0x%x, version 0x%x != 0x%x",
1965			str, id, ddq->d_version, XFS_DQUOT_VERSION);
1966		errs++;
1967	}
1968
1969	if (ddq->d_flags != XFS_DQ_USER &&
1970	    ddq->d_flags != XFS_DQ_PROJ &&
1971	    ddq->d_flags != XFS_DQ_GROUP) {
1972		if (flags & XFS_QMOPT_DOWARN)
1973			xfs_alert(mp,
1974			"%s : XFS dquot ID 0x%x, unknown flags 0x%x",
1975			str, id, ddq->d_flags);
1976		errs++;
1977	}
1978
1979	if (id != -1 && id != be32_to_cpu(ddq->d_id)) {
1980		if (flags & XFS_QMOPT_DOWARN)
1981			xfs_alert(mp,
1982			"%s : ondisk-dquot 0x%p, ID mismatch: "
1983			"0x%x expected, found id 0x%x",
1984			str, ddq, id, be32_to_cpu(ddq->d_id));
1985		errs++;
1986	}
1987
1988	if (!errs && ddq->d_id) {
1989		if (ddq->d_blk_softlimit &&
1990		    be64_to_cpu(ddq->d_bcount) >
1991				be64_to_cpu(ddq->d_blk_softlimit)) {
1992			if (!ddq->d_btimer) {
1993				if (flags & XFS_QMOPT_DOWARN)
1994					xfs_alert(mp,
1995			"%s : Dquot ID 0x%x (0x%p) BLK TIMER NOT STARTED",
1996					str, (int)be32_to_cpu(ddq->d_id), ddq);
1997				errs++;
1998			}
1999		}
2000		if (ddq->d_ino_softlimit &&
2001		    be64_to_cpu(ddq->d_icount) >
2002				be64_to_cpu(ddq->d_ino_softlimit)) {
2003			if (!ddq->d_itimer) {
2004				if (flags & XFS_QMOPT_DOWARN)
2005					xfs_alert(mp,
2006			"%s : Dquot ID 0x%x (0x%p) INODE TIMER NOT STARTED",
2007					str, (int)be32_to_cpu(ddq->d_id), ddq);
2008				errs++;
2009			}
2010		}
2011		if (ddq->d_rtb_softlimit &&
2012		    be64_to_cpu(ddq->d_rtbcount) >
2013				be64_to_cpu(ddq->d_rtb_softlimit)) {
2014			if (!ddq->d_rtbtimer) {
2015				if (flags & XFS_QMOPT_DOWARN)
2016					xfs_alert(mp,
2017			"%s : Dquot ID 0x%x (0x%p) RTBLK TIMER NOT STARTED",
2018					str, (int)be32_to_cpu(ddq->d_id), ddq);
2019				errs++;
2020			}
2021		}
2022	}
2023
2024	if (!errs || !(flags & XFS_QMOPT_DQREPAIR))
2025		return errs;
2026
2027	if (flags & XFS_QMOPT_DOWARN)
2028		xfs_notice(mp, "Re-initializing dquot ID 0x%x", id);
2029
2030	/*
2031	 * Typically, a repair is only requested by quotacheck.
2032	 */
2033	ASSERT(id != -1);
2034	ASSERT(flags & XFS_QMOPT_DQREPAIR);
2035	memset(d, 0, sizeof(xfs_dqblk_t));
2036
2037	d->dd_diskdq.d_magic = cpu_to_be16(XFS_DQUOT_MAGIC);
2038	d->dd_diskdq.d_version = XFS_DQUOT_VERSION;
2039	d->dd_diskdq.d_flags = type;
2040	d->dd_diskdq.d_id = cpu_to_be32(id);
2041
2042	return errs;
2043}
2044
2045/*
2046 * Perform a dquot buffer recovery.
2047 * Simple algorithm: if we have found a QUOTAOFF logitem of the same type
2048 * (ie. USR or GRP), then just toss this buffer away; don't recover it.
2049 * Else, treat it as a regular buffer and do recovery.
2050 */
2051STATIC void
2052xlog_recover_do_dquot_buffer(
2053	xfs_mount_t		*mp,
2054	xlog_t			*log,
2055	xlog_recover_item_t	*item,
2056	xfs_buf_t		*bp,
2057	xfs_buf_log_format_t	*buf_f)
2058{
2059	uint			type;
2060
2061	trace_xfs_log_recover_buf_dquot_buf(log, buf_f);
2062
2063	/*
2064	 * Filesystems are required to send in quota flags at mount time.
2065	 */
2066	if (mp->m_qflags == 0) {
2067		return;
2068	}
2069
2070	type = 0;
2071	if (buf_f->blf_flags & XFS_BLF_UDQUOT_BUF)
2072		type |= XFS_DQ_USER;
2073	if (buf_f->blf_flags & XFS_BLF_PDQUOT_BUF)
2074		type |= XFS_DQ_PROJ;
2075	if (buf_f->blf_flags & XFS_BLF_GDQUOT_BUF)
2076		type |= XFS_DQ_GROUP;
2077	/*
2078	 * This type of quotas was turned off, so ignore this buffer
2079	 */
2080	if (log->l_quotaoffs_flag & type)
2081		return;
2082
2083	xlog_recover_do_reg_buffer(mp, item, bp, buf_f);
2084}
2085
2086/*
2087 * This routine replays a modification made to a buffer at runtime.
2088 * There are actually two types of buffer, regular and inode, which
2089 * are handled differently.  Inode buffers are handled differently
2090 * in that we only recover a specific set of data from them, namely
2091 * the inode di_next_unlinked fields.  This is because all other inode
2092 * data is actually logged via inode records and any data we replay
2093 * here which overlaps that may be stale.
2094 *
2095 * When meta-data buffers are freed at run time we log a buffer item
2096 * with the XFS_BLF_CANCEL bit set to indicate that previous copies
2097 * of the buffer in the log should not be replayed at recovery time.
2098 * This is so that if the blocks covered by the buffer are reused for
2099 * file data before we crash we don't end up replaying old, freed
2100 * meta-data into a user's file.
2101 *
2102 * To handle the cancellation of buffer log items, we make two passes
2103 * over the log during recovery.  During the first we build a table of
2104 * those buffers which have been cancelled, and during the second we
2105 * only replay those buffers which do not have corresponding cancel
2106 * records in the table.  See xlog_recover_do_buffer_pass[1,2] above
2107 * for more details on the implementation of the table of cancel records.
2108 */
2109STATIC int
2110xlog_recover_buffer_pass2(
2111	xlog_t			*log,
2112	struct list_head	*buffer_list,
2113	xlog_recover_item_t	*item)
2114{
2115	xfs_buf_log_format_t	*buf_f = item->ri_buf[0].i_addr;
2116	xfs_mount_t		*mp = log->l_mp;
2117	xfs_buf_t		*bp;
2118	int			error;
2119	uint			buf_flags;
2120
2121	/*
2122	 * In this pass we only want to recover all the buffers which have
2123	 * not been cancelled and are not cancellation buffers themselves.
2124	 */
2125	if (xlog_check_buffer_cancelled(log, buf_f->blf_blkno,
2126			buf_f->blf_len, buf_f->blf_flags)) {
2127		trace_xfs_log_recover_buf_cancel(log, buf_f);
2128		return 0;
2129	}
2130
2131	trace_xfs_log_recover_buf_recover(log, buf_f);
2132
2133	buf_flags = 0;
2134	if (buf_f->blf_flags & XFS_BLF_INODE_BUF)
2135		buf_flags |= XBF_UNMAPPED;
2136
2137	bp = xfs_buf_read(mp->m_ddev_targp, buf_f->blf_blkno, buf_f->blf_len,
2138			  buf_flags);
2139	if (!bp)
2140		return XFS_ERROR(ENOMEM);
2141	error = bp->b_error;
2142	if (error) {
2143		xfs_buf_ioerror_alert(bp, "xlog_recover_do..(read#1)");
2144		xfs_buf_relse(bp);
2145		return error;
2146	}
2147
2148	if (buf_f->blf_flags & XFS_BLF_INODE_BUF) {
2149		error = xlog_recover_do_inode_buffer(mp, item, bp, buf_f);
2150	} else if (buf_f->blf_flags &
2151		  (XFS_BLF_UDQUOT_BUF|XFS_BLF_PDQUOT_BUF|XFS_BLF_GDQUOT_BUF)) {
2152		xlog_recover_do_dquot_buffer(mp, log, item, bp, buf_f);
2153	} else {
2154		xlog_recover_do_reg_buffer(mp, item, bp, buf_f);
2155	}
2156	if (error)
2157		return XFS_ERROR(error);
2158
2159	/*
2160	 * Perform delayed write on the buffer.  Asynchronous writes will be
2161	 * slower when taking into account all the buffers to be flushed.
2162	 *
2163	 * Also make sure that only inode buffers with good sizes stay in
2164	 * the buffer cache.  The kernel moves inodes in buffers of 1 block
2165	 * or XFS_INODE_CLUSTER_SIZE bytes, whichever is bigger.  The inode
2166	 * buffers in the log can be a different size if the log was generated
2167	 * by an older kernel using unclustered inode buffers or a newer kernel
2168	 * running with a different inode cluster size.  Regardless, if the
2169	 * the inode buffer size isn't MAX(blocksize, XFS_INODE_CLUSTER_SIZE)
2170	 * for *our* value of XFS_INODE_CLUSTER_SIZE, then we need to keep
2171	 * the buffer out of the buffer cache so that the buffer won't
2172	 * overlap with future reads of those inodes.
2173	 */
2174	if (XFS_DINODE_MAGIC ==
2175	    be16_to_cpu(*((__be16 *)xfs_buf_offset(bp, 0))) &&
2176	    (BBTOB(bp->b_io_length) != MAX(log->l_mp->m_sb.sb_blocksize,
2177			(__uint32_t)XFS_INODE_CLUSTER_SIZE(log->l_mp)))) {
2178		xfs_buf_stale(bp);
2179		error = xfs_bwrite(bp);
2180	} else {
2181		ASSERT(bp->b_target->bt_mount == mp);
2182		bp->b_iodone = xlog_recover_iodone;
2183		xfs_buf_delwri_queue(bp, buffer_list);
2184	}
2185
2186	xfs_buf_relse(bp);
2187	return error;
2188}
2189
2190STATIC int
2191xlog_recover_inode_pass2(
2192	xlog_t			*log,
2193	struct list_head	*buffer_list,
2194	xlog_recover_item_t	*item)
2195{
2196	xfs_inode_log_format_t	*in_f;
2197	xfs_mount_t		*mp = log->l_mp;
2198	xfs_buf_t		*bp;
2199	xfs_dinode_t		*dip;
2200	int			len;
2201	xfs_caddr_t		src;
2202	xfs_caddr_t		dest;
2203	int			error;
2204	int			attr_index;
2205	uint			fields;
2206	xfs_icdinode_t		*dicp;
2207	int			need_free = 0;
2208
2209	if (item->ri_buf[0].i_len == sizeof(xfs_inode_log_format_t)) {
2210		in_f = item->ri_buf[0].i_addr;
2211	} else {
2212		in_f = kmem_alloc(sizeof(xfs_inode_log_format_t), KM_SLEEP);
2213		need_free = 1;
2214		error = xfs_inode_item_format_convert(&item->ri_buf[0], in_f);
2215		if (error)
2216			goto error;
2217	}
2218
2219	/*
2220	 * Inode buffers can be freed, look out for it,
2221	 * and do not replay the inode.
2222	 */
2223	if (xlog_check_buffer_cancelled(log, in_f->ilf_blkno,
2224					in_f->ilf_len, 0)) {
2225		error = 0;
2226		trace_xfs_log_recover_inode_cancel(log, in_f);
2227		goto error;
2228	}
2229	trace_xfs_log_recover_inode_recover(log, in_f);
2230
2231	bp = xfs_buf_read(mp->m_ddev_targp, in_f->ilf_blkno, in_f->ilf_len, 0);
2232	if (!bp) {
2233		error = ENOMEM;
2234		goto error;
2235	}
2236	error = bp->b_error;
2237	if (error) {
2238		xfs_buf_ioerror_alert(bp, "xlog_recover_do..(read#2)");
2239		xfs_buf_relse(bp);
2240		goto error;
2241	}
2242	ASSERT(in_f->ilf_fields & XFS_ILOG_CORE);
2243	dip = (xfs_dinode_t *)xfs_buf_offset(bp, in_f->ilf_boffset);
2244
2245	/*
2246	 * Make sure the place we're flushing out to really looks
2247	 * like an inode!
2248	 */
2249	if (unlikely(dip->di_magic != cpu_to_be16(XFS_DINODE_MAGIC))) {
2250		xfs_buf_relse(bp);
2251		xfs_alert(mp,
2252	"%s: Bad inode magic number, dip = 0x%p, dino bp = 0x%p, ino = %Ld",
2253			__func__, dip, bp, in_f->ilf_ino);
2254		XFS_ERROR_REPORT("xlog_recover_inode_pass2(1)",
2255				 XFS_ERRLEVEL_LOW, mp);
2256		error = EFSCORRUPTED;
2257		goto error;
2258	}
2259	dicp = item->ri_buf[1].i_addr;
2260	if (unlikely(dicp->di_magic != XFS_DINODE_MAGIC)) {
2261		xfs_buf_relse(bp);
2262		xfs_alert(mp,
2263			"%s: Bad inode log record, rec ptr 0x%p, ino %Ld",
2264			__func__, item, in_f->ilf_ino);
2265		XFS_ERROR_REPORT("xlog_recover_inode_pass2(2)",
2266				 XFS_ERRLEVEL_LOW, mp);
2267		error = EFSCORRUPTED;
2268		goto error;
2269	}
2270
2271	/* Skip replay when the on disk inode is newer than the log one */
2272	if (dicp->di_flushiter < be16_to_cpu(dip->di_flushiter)) {
2273		/*
2274		 * Deal with the wrap case, DI_MAX_FLUSH is less
2275		 * than smaller numbers
2276		 */
2277		if (be16_to_cpu(dip->di_flushiter) == DI_MAX_FLUSH &&
2278		    dicp->di_flushiter < (DI_MAX_FLUSH >> 1)) {
2279			/* do nothing */
2280		} else {
2281			xfs_buf_relse(bp);
2282			trace_xfs_log_recover_inode_skip(log, in_f);
2283			error = 0;
2284			goto error;
2285		}
2286	}
2287	/* Take the opportunity to reset the flush iteration count */
2288	dicp->di_flushiter = 0;
2289
2290	if (unlikely(S_ISREG(dicp->di_mode))) {
2291		if ((dicp->di_format != XFS_DINODE_FMT_EXTENTS) &&
2292		    (dicp->di_format != XFS_DINODE_FMT_BTREE)) {
2293			XFS_CORRUPTION_ERROR("xlog_recover_inode_pass2(3)",
2294					 XFS_ERRLEVEL_LOW, mp, dicp);
2295			xfs_buf_relse(bp);
2296			xfs_alert(mp,
2297		"%s: Bad regular inode log record, rec ptr 0x%p, "
2298		"ino ptr = 0x%p, ino bp = 0x%p, ino %Ld",
2299				__func__, item, dip, bp, in_f->ilf_ino);
2300			error = EFSCORRUPTED;
2301			goto error;
2302		}
2303	} else if (unlikely(S_ISDIR(dicp->di_mode))) {
2304		if ((dicp->di_format != XFS_DINODE_FMT_EXTENTS) &&
2305		    (dicp->di_format != XFS_DINODE_FMT_BTREE) &&
2306		    (dicp->di_format != XFS_DINODE_FMT_LOCAL)) {
2307			XFS_CORRUPTION_ERROR("xlog_recover_inode_pass2(4)",
2308					     XFS_ERRLEVEL_LOW, mp, dicp);
2309			xfs_buf_relse(bp);
2310			xfs_alert(mp,
2311		"%s: Bad dir inode log record, rec ptr 0x%p, "
2312		"ino ptr = 0x%p, ino bp = 0x%p, ino %Ld",
2313				__func__, item, dip, bp, in_f->ilf_ino);
2314			error = EFSCORRUPTED;
2315			goto error;
2316		}
2317	}
2318	if (unlikely(dicp->di_nextents + dicp->di_anextents > dicp->di_nblocks)){
2319		XFS_CORRUPTION_ERROR("xlog_recover_inode_pass2(5)",
2320				     XFS_ERRLEVEL_LOW, mp, dicp);
2321		xfs_buf_relse(bp);
2322		xfs_alert(mp,
2323	"%s: Bad inode log record, rec ptr 0x%p, dino ptr 0x%p, "
2324	"dino bp 0x%p, ino %Ld, total extents = %d, nblocks = %Ld",
2325			__func__, item, dip, bp, in_f->ilf_ino,
2326			dicp->di_nextents + dicp->di_anextents,
2327			dicp->di_nblocks);
2328		error = EFSCORRUPTED;
2329		goto error;
2330	}
2331	if (unlikely(dicp->di_forkoff > mp->m_sb.sb_inodesize)) {
2332		XFS_CORRUPTION_ERROR("xlog_recover_inode_pass2(6)",
2333				     XFS_ERRLEVEL_LOW, mp, dicp);
2334		xfs_buf_relse(bp);
2335		xfs_alert(mp,
2336	"%s: Bad inode log record, rec ptr 0x%p, dino ptr 0x%p, "
2337	"dino bp 0x%p, ino %Ld, forkoff 0x%x", __func__,
2338			item, dip, bp, in_f->ilf_ino, dicp->di_forkoff);
2339		error = EFSCORRUPTED;
2340		goto error;
2341	}
2342	if (unlikely(item->ri_buf[1].i_len > sizeof(struct xfs_icdinode))) {
2343		XFS_CORRUPTION_ERROR("xlog_recover_inode_pass2(7)",
2344				     XFS_ERRLEVEL_LOW, mp, dicp);
2345		xfs_buf_relse(bp);
2346		xfs_alert(mp,
2347			"%s: Bad inode log record length %d, rec ptr 0x%p",
2348			__func__, item->ri_buf[1].i_len, item);
2349		error = EFSCORRUPTED;
2350		goto error;
2351	}
2352
2353	/* The core is in in-core format */
2354	xfs_dinode_to_disk(dip, item->ri_buf[1].i_addr);
2355
2356	/* the rest is in on-disk format */
2357	if (item->ri_buf[1].i_len > sizeof(struct xfs_icdinode)) {
2358		memcpy((xfs_caddr_t) dip + sizeof(struct xfs_icdinode),
2359			item->ri_buf[1].i_addr + sizeof(struct xfs_icdinode),
2360			item->ri_buf[1].i_len  - sizeof(struct xfs_icdinode));
2361	}
2362
2363	fields = in_f->ilf_fields;
2364	switch (fields & (XFS_ILOG_DEV | XFS_ILOG_UUID)) {
2365	case XFS_ILOG_DEV:
2366		xfs_dinode_put_rdev(dip, in_f->ilf_u.ilfu_rdev);
2367		break;
2368	case XFS_ILOG_UUID:
2369		memcpy(XFS_DFORK_DPTR(dip),
2370		       &in_f->ilf_u.ilfu_uuid,
2371		       sizeof(uuid_t));
2372		break;
2373	}
2374
2375	if (in_f->ilf_size == 2)
2376		goto write_inode_buffer;
2377	len = item->ri_buf[2].i_len;
2378	src = item->ri_buf[2].i_addr;
2379	ASSERT(in_f->ilf_size <= 4);
2380	ASSERT((in_f->ilf_size == 3) || (fields & XFS_ILOG_AFORK));
2381	ASSERT(!(fields & XFS_ILOG_DFORK) ||
2382	       (len == in_f->ilf_dsize));
2383
2384	switch (fields & XFS_ILOG_DFORK) {
2385	case XFS_ILOG_DDATA:
2386	case XFS_ILOG_DEXT:
2387		memcpy(XFS_DFORK_DPTR(dip), src, len);
2388		break;
2389
2390	case XFS_ILOG_DBROOT:
2391		xfs_bmbt_to_bmdr(mp, (struct xfs_btree_block *)src, len,
2392				 (xfs_bmdr_block_t *)XFS_DFORK_DPTR(dip),
2393				 XFS_DFORK_DSIZE(dip, mp));
2394		break;
2395
2396	default:
2397		/*
2398		 * There are no data fork flags set.
2399		 */
2400		ASSERT((fields & XFS_ILOG_DFORK) == 0);
2401		break;
2402	}
2403
2404	/*
2405	 * If we logged any attribute data, recover it.  There may or
2406	 * may not have been any other non-core data logged in this
2407	 * transaction.
2408	 */
2409	if (in_f->ilf_fields & XFS_ILOG_AFORK) {
2410		if (in_f->ilf_fields & XFS_ILOG_DFORK) {
2411			attr_index = 3;
2412		} else {
2413			attr_index = 2;
2414		}
2415		len = item->ri_buf[attr_index].i_len;
2416		src = item->ri_buf[attr_index].i_addr;
2417		ASSERT(len == in_f->ilf_asize);
2418
2419		switch (in_f->ilf_fields & XFS_ILOG_AFORK) {
2420		case XFS_ILOG_ADATA:
2421		case XFS_ILOG_AEXT:
2422			dest = XFS_DFORK_APTR(dip);
2423			ASSERT(len <= XFS_DFORK_ASIZE(dip, mp));
2424			memcpy(dest, src, len);
2425			break;
2426
2427		case XFS_ILOG_ABROOT:
2428			dest = XFS_DFORK_APTR(dip);
2429			xfs_bmbt_to_bmdr(mp, (struct xfs_btree_block *)src,
2430					 len, (xfs_bmdr_block_t*)dest,
2431					 XFS_DFORK_ASIZE(dip, mp));
2432			break;
2433
2434		default:
2435			xfs_warn(log->l_mp, "%s: Invalid flag", __func__);
2436			ASSERT(0);
2437			xfs_buf_relse(bp);
2438			error = EIO;
2439			goto error;
2440		}
2441	}
2442
2443write_inode_buffer:
2444	ASSERT(bp->b_target->bt_mount == mp);
2445	bp->b_iodone = xlog_recover_iodone;
2446	xfs_buf_delwri_queue(bp, buffer_list);
2447	xfs_buf_relse(bp);
2448error:
2449	if (need_free)
2450		kmem_free(in_f);
2451	return XFS_ERROR(error);
2452}
2453
2454/*
2455 * Recover QUOTAOFF records. We simply make a note of it in the xlog_t
2456 * structure, so that we know not to do any dquot item or dquot buffer recovery,
2457 * of that type.
 
 
 
 
 
 
 
 
2458 */
2459STATIC int
2460xlog_recover_quotaoff_pass1(
2461	xlog_t			*log,
2462	xlog_recover_item_t	*item)
 
 
2463{
2464	xfs_qoff_logformat_t	*qoff_f = item->ri_buf[0].i_addr;
2465	ASSERT(qoff_f);
 
2466
2467	/*
2468	 * The logitem format's flag tells us if this was user quotaoff,
2469	 * group/project quotaoff or both.
2470	 */
2471	if (qoff_f->qf_flags & XFS_UQUOTA_ACCT)
2472		log->l_quotaoffs_flag |= XFS_DQ_USER;
2473	if (qoff_f->qf_flags & XFS_PQUOTA_ACCT)
2474		log->l_quotaoffs_flag |= XFS_DQ_PROJ;
2475	if (qoff_f->qf_flags & XFS_GQUOTA_ACCT)
2476		log->l_quotaoffs_flag |= XFS_DQ_GROUP;
2477
2478	return (0);
2479}
 
 
 
2480
2481/*
2482 * Recover a dquot record
2483 */
2484STATIC int
2485xlog_recover_dquot_pass2(
2486	xlog_t			*log,
2487	struct list_head	*buffer_list,
2488	xlog_recover_item_t	*item)
2489{
2490	xfs_mount_t		*mp = log->l_mp;
2491	xfs_buf_t		*bp;
2492	struct xfs_disk_dquot	*ddq, *recddq;
2493	int			error;
2494	xfs_dq_logformat_t	*dq_f;
2495	uint			type;
2496
2497
2498	/*
2499	 * Filesystems are required to send in quota flags at mount time.
2500	 */
2501	if (mp->m_qflags == 0)
2502		return (0);
2503
2504	recddq = item->ri_buf[1].i_addr;
2505	if (recddq == NULL) {
2506		xfs_alert(log->l_mp, "NULL dquot in %s.", __func__);
2507		return XFS_ERROR(EIO);
2508	}
2509	if (item->ri_buf[1].i_len < sizeof(xfs_disk_dquot_t)) {
2510		xfs_alert(log->l_mp, "dquot too small (%d) in %s.",
2511			item->ri_buf[1].i_len, __func__);
2512		return XFS_ERROR(EIO);
2513	}
2514
2515	/*
2516	 * This type of quotas was turned off, so ignore this record.
2517	 */
2518	type = recddq->d_flags & (XFS_DQ_USER | XFS_DQ_PROJ | XFS_DQ_GROUP);
2519	ASSERT(type);
2520	if (log->l_quotaoffs_flag & type)
2521		return (0);
2522
2523	/*
2524	 * At this point we know that quota was _not_ turned off.
2525	 * Since the mount flags are not indicating to us otherwise, this
2526	 * must mean that quota is on, and the dquot needs to be replayed.
2527	 * Remember that we may not have fully recovered the superblock yet,
2528	 * so we can't do the usual trick of looking at the SB quota bits.
2529	 *
2530	 * The other possibility, of course, is that the quota subsystem was
2531	 * removed since the last mount - ENOSYS.
2532	 */
2533	dq_f = item->ri_buf[0].i_addr;
2534	ASSERT(dq_f);
2535	error = xfs_qm_dqcheck(mp, recddq, dq_f->qlf_id, 0, XFS_QMOPT_DOWARN,
2536			   "xlog_recover_dquot_pass2 (log copy)");
2537	if (error)
2538		return XFS_ERROR(EIO);
2539	ASSERT(dq_f->qlf_len == 1);
2540
2541	error = xfs_trans_read_buf(mp, NULL, mp->m_ddev_targp, dq_f->qlf_blkno,
2542				   XFS_FSB_TO_BB(mp, dq_f->qlf_len), 0, &bp);
2543	if (error)
2544		return error;
2545
2546	ASSERT(bp);
2547	ddq = (xfs_disk_dquot_t *)xfs_buf_offset(bp, dq_f->qlf_boffset);
2548
2549	/*
2550	 * At least the magic num portion should be on disk because this
2551	 * was among a chunk of dquots created earlier, and we did some
2552	 * minimal initialization then.
2553	 */
2554	error = xfs_qm_dqcheck(mp, ddq, dq_f->qlf_id, 0, XFS_QMOPT_DOWARN,
2555			   "xlog_recover_dquot_pass2");
2556	if (error) {
2557		xfs_buf_relse(bp);
2558		return XFS_ERROR(EIO);
2559	}
2560
2561	memcpy(ddq, recddq, item->ri_buf[1].i_len);
 
 
 
 
 
 
 
 
 
2562
2563	ASSERT(dq_f->qlf_size == 2);
2564	ASSERT(bp->b_target->bt_mount == mp);
2565	bp->b_iodone = xlog_recover_iodone;
2566	xfs_buf_delwri_queue(bp, buffer_list);
2567	xfs_buf_relse(bp);
2568
2569	return (0);
2570}
2571
2572/*
2573 * This routine is called to create an in-core extent free intent
2574 * item from the efi format structure which was logged on disk.
2575 * It allocates an in-core efi, copies the extents from the format
2576 * structure into it, and adds the efi to the AIL with the given
2577 * LSN.
2578 */
2579STATIC int
2580xlog_recover_efi_pass2(
2581	xlog_t			*log,
2582	xlog_recover_item_t	*item,
2583	xfs_lsn_t		lsn)
2584{
2585	int			error;
2586	xfs_mount_t		*mp = log->l_mp;
2587	xfs_efi_log_item_t	*efip;
2588	xfs_efi_log_format_t	*efi_formatp;
2589
2590	efi_formatp = item->ri_buf[0].i_addr;
2591
2592	efip = xfs_efi_init(mp, efi_formatp->efi_nextents);
2593	if ((error = xfs_efi_copy_format(&(item->ri_buf[0]),
2594					 &(efip->efi_format)))) {
2595		xfs_efi_item_free(efip);
2596		return error;
2597	}
2598	atomic_set(&efip->efi_next_extent, efi_formatp->efi_nextents);
2599
2600	spin_lock(&log->l_ailp->xa_lock);
2601	/*
2602	 * xfs_trans_ail_update() drops the AIL lock.
2603	 */
2604	xfs_trans_ail_update(log->l_ailp, &efip->efi_item, lsn);
2605	return 0;
2606}
2607
2608
2609/*
2610 * This routine is called when an efd format structure is found in
2611 * a committed transaction in the log.  It's purpose is to cancel
2612 * the corresponding efi if it was still in the log.  To do this
2613 * it searches the AIL for the efi with an id equal to that in the
2614 * efd format structure.  If we find it, we remove the efi from the
2615 * AIL and free it.
2616 */
2617STATIC int
2618xlog_recover_efd_pass2(
2619	xlog_t			*log,
2620	xlog_recover_item_t	*item)
2621{
2622	xfs_efd_log_format_t	*efd_formatp;
2623	xfs_efi_log_item_t	*efip = NULL;
2624	xfs_log_item_t		*lip;
2625	__uint64_t		efi_id;
2626	struct xfs_ail_cursor	cur;
2627	struct xfs_ail		*ailp = log->l_ailp;
2628
2629	efd_formatp = item->ri_buf[0].i_addr;
2630	ASSERT((item->ri_buf[0].i_len == (sizeof(xfs_efd_log_format_32_t) +
2631		((efd_formatp->efd_nextents - 1) * sizeof(xfs_extent_32_t)))) ||
2632	       (item->ri_buf[0].i_len == (sizeof(xfs_efd_log_format_64_t) +
2633		((efd_formatp->efd_nextents - 1) * sizeof(xfs_extent_64_t)))));
2634	efi_id = efd_formatp->efd_efi_id;
2635
2636	/*
2637	 * Search for the efi with the id in the efd format structure
2638	 * in the AIL.
2639	 */
2640	spin_lock(&ailp->xa_lock);
2641	lip = xfs_trans_ail_cursor_first(ailp, &cur, 0);
2642	while (lip != NULL) {
2643		if (lip->li_type == XFS_LI_EFI) {
2644			efip = (xfs_efi_log_item_t *)lip;
2645			if (efip->efi_format.efi_id == efi_id) {
2646				/*
2647				 * xfs_trans_ail_delete() drops the
2648				 * AIL lock.
2649				 */
2650				xfs_trans_ail_delete(ailp, lip,
2651						     SHUTDOWN_CORRUPT_INCORE);
2652				xfs_efi_item_free(efip);
2653				spin_lock(&ailp->xa_lock);
2654				break;
2655			}
2656		}
2657		lip = xfs_trans_ail_cursor_next(ailp, &cur);
2658	}
2659	xfs_trans_ail_cursor_done(ailp, &cur);
2660	spin_unlock(&ailp->xa_lock);
2661
 
 
 
 
 
2662	return 0;
2663}
2664
2665/*
2666 * Free up any resources allocated by the transaction
2667 *
2668 * Remember that EFIs, EFDs, and IUNLINKs are handled later.
2669 */
2670STATIC void
2671xlog_recover_free_trans(
2672	struct xlog_recover	*trans)
2673{
2674	xlog_recover_item_t	*item, *n;
2675	int			i;
2676
 
 
2677	list_for_each_entry_safe(item, n, &trans->r_itemq, ri_list) {
2678		/* Free the regions in the item. */
2679		list_del(&item->ri_list);
2680		for (i = 0; i < item->ri_cnt; i++)
2681			kmem_free(item->ri_buf[i].i_addr);
2682		/* Free the item itself */
2683		kmem_free(item->ri_buf);
2684		kmem_free(item);
2685	}
2686	/* Free the transaction recover structure */
2687	kmem_free(trans);
2688}
2689
 
 
 
2690STATIC int
2691xlog_recover_commit_pass1(
2692	struct xlog			*log,
2693	struct xlog_recover		*trans,
2694	struct xlog_recover_item	*item)
 
 
 
 
2695{
2696	trace_xfs_log_recover_item_recover(log, trans, item, XLOG_RECOVER_PASS1);
 
2697
2698	switch (ITEM_TYPE(item)) {
2699	case XFS_LI_BUF:
2700		return xlog_recover_buffer_pass1(log, item);
2701	case XFS_LI_QUOTAOFF:
2702		return xlog_recover_quotaoff_pass1(log, item);
2703	case XFS_LI_INODE:
2704	case XFS_LI_EFI:
2705	case XFS_LI_EFD:
2706	case XFS_LI_DQUOT:
2707		/* nothing to do in pass 1 */
2708		return 0;
2709	default:
2710		xfs_warn(log->l_mp, "%s: invalid item type (%d)",
2711			__func__, ITEM_TYPE(item));
2712		ASSERT(0);
2713		return XFS_ERROR(EIO);
2714	}
2715}
 
 
 
 
 
 
2716
2717STATIC int
2718xlog_recover_commit_pass2(
2719	struct xlog			*log,
2720	struct xlog_recover		*trans,
2721	struct list_head		*buffer_list,
2722	struct xlog_recover_item	*item)
2723{
2724	trace_xfs_log_recover_item_recover(log, trans, item, XLOG_RECOVER_PASS2);
2725
2726	switch (ITEM_TYPE(item)) {
2727	case XFS_LI_BUF:
2728		return xlog_recover_buffer_pass2(log, buffer_list, item);
2729	case XFS_LI_INODE:
2730		return xlog_recover_inode_pass2(log, buffer_list, item);
2731	case XFS_LI_EFI:
2732		return xlog_recover_efi_pass2(log, item, trans->r_lsn);
2733	case XFS_LI_EFD:
2734		return xlog_recover_efd_pass2(log, item);
2735	case XFS_LI_DQUOT:
2736		return xlog_recover_dquot_pass2(log, buffer_list, item);
2737	case XFS_LI_QUOTAOFF:
2738		/* nothing to do in pass2 */
2739		return 0;
2740	default:
2741		xfs_warn(log->l_mp, "%s: invalid item type (%d)",
2742			__func__, ITEM_TYPE(item));
2743		ASSERT(0);
2744		return XFS_ERROR(EIO);
 
2745	}
 
 
 
2746}
2747
2748/*
2749 * Perform the transaction.
2750 *
2751 * If the transaction modifies a buffer or inode, do it now.  Otherwise,
2752 * EFIs and EFDs get queued up by adding entries into the AIL for them.
 
2753 */
2754STATIC int
2755xlog_recover_commit_trans(
2756	struct xlog		*log,
2757	struct xlog_recover	*trans,
2758	int			pass)
2759{
2760	int			error = 0, error2;
2761	xlog_recover_item_t	*item;
2762	LIST_HEAD		(buffer_list);
2763
2764	hlist_del(&trans->r_list);
 
 
 
 
 
2765
2766	error = xlog_recover_reorder_trans(log, trans, pass);
2767	if (error)
2768		return error;
2769
2770	list_for_each_entry(item, &trans->r_itemq, ri_list) {
2771		switch (pass) {
2772		case XLOG_RECOVER_PASS1:
2773			error = xlog_recover_commit_pass1(log, trans, item);
2774			break;
2775		case XLOG_RECOVER_PASS2:
2776			error = xlog_recover_commit_pass2(log, trans,
2777							  &buffer_list, item);
2778			break;
2779		default:
2780			ASSERT(0);
2781		}
2782
2783		if (error)
2784			goto out;
2785	}
2786
2787	xlog_recover_free_trans(trans);
 
 
 
 
 
 
 
 
 
2788
2789out:
2790	error2 = xfs_buf_delwri_submit(&buffer_list);
2791	return error ? error : error2;
 
 
2792}
2793
2794STATIC int
2795xlog_recover_unmount_trans(
2796	struct xlog		*log,
2797	struct xlog_recover	*trans)
 
 
 
 
 
 
2798{
2799	/* Do nothing now */
2800	xfs_warn(log->l_mp, "%s: Unmount LR", __func__);
2801	return 0;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2802}
2803
2804/*
2805 * There are two valid states of the r_state field.  0 indicates that the
2806 * transaction structure is in a normal state.  We have either seen the
2807 * start of the transaction or the last operation we added was not a partial
2808 * operation.  If the last operation we added to the transaction was a
2809 * partial operation, we need to mark r_state with XLOG_WAS_CONT_TRANS.
2810 *
2811 * NOTE: skip LRs with 0 data length.
2812 */
2813STATIC int
2814xlog_recover_process_data(
2815	xlog_t			*log,
2816	struct hlist_head	rhash[],
2817	xlog_rec_header_t	*rhead,
2818	xfs_caddr_t		dp,
2819	int			pass)
 
2820{
2821	xfs_caddr_t		lp;
 
2822	int			num_logops;
2823	xlog_op_header_t	*ohead;
2824	xlog_recover_t		*trans;
2825	xlog_tid_t		tid;
2826	int			error;
2827	unsigned long		hash;
2828	uint			flags;
2829
2830	lp = dp + be32_to_cpu(rhead->h_len);
2831	num_logops = be32_to_cpu(rhead->h_num_logops);
2832
2833	/* check the log format matches our own - else we can't recover */
2834	if (xlog_header_check_recover(log->l_mp, rhead))
2835		return (XFS_ERROR(EIO));
 
 
 
2836
2837	while ((dp < lp) && num_logops) {
2838		ASSERT(dp + sizeof(xlog_op_header_t) <= lp);
2839		ohead = (xlog_op_header_t *)dp;
2840		dp += sizeof(xlog_op_header_t);
2841		if (ohead->oh_clientid != XFS_TRANSACTION &&
2842		    ohead->oh_clientid != XFS_LOG) {
2843			xfs_warn(log->l_mp, "%s: bad clientid 0x%x",
2844					__func__, ohead->oh_clientid);
2845			ASSERT(0);
2846			return (XFS_ERROR(EIO));
2847		}
2848		tid = be32_to_cpu(ohead->oh_tid);
2849		hash = XLOG_RHASH(tid);
2850		trans = xlog_recover_find_tid(&rhash[hash], tid);
2851		if (trans == NULL) {		   /* not found; add new tid */
2852			if (ohead->oh_flags & XLOG_START_TRANS)
2853				xlog_recover_new_tid(&rhash[hash], tid,
2854					be64_to_cpu(rhead->h_lsn));
2855		} else {
2856			if (dp + be32_to_cpu(ohead->oh_len) > lp) {
2857				xfs_warn(log->l_mp, "%s: bad length 0x%x",
2858					__func__, be32_to_cpu(ohead->oh_len));
2859				WARN_ON(1);
2860				return (XFS_ERROR(EIO));
2861			}
2862			flags = ohead->oh_flags & ~XLOG_END_TRANS;
2863			if (flags & XLOG_WAS_CONT_TRANS)
2864				flags &= ~XLOG_CONTINUE_TRANS;
2865			switch (flags) {
2866			case XLOG_COMMIT_TRANS:
2867				error = xlog_recover_commit_trans(log,
2868								trans, pass);
2869				break;
2870			case XLOG_UNMOUNT_TRANS:
2871				error = xlog_recover_unmount_trans(log, trans);
2872				break;
2873			case XLOG_WAS_CONT_TRANS:
2874				error = xlog_recover_add_to_cont_trans(log,
2875						trans, dp,
2876						be32_to_cpu(ohead->oh_len));
2877				break;
2878			case XLOG_START_TRANS:
2879				xfs_warn(log->l_mp, "%s: bad transaction",
2880					__func__);
2881				ASSERT(0);
2882				error = XFS_ERROR(EIO);
2883				break;
2884			case 0:
2885			case XLOG_CONTINUE_TRANS:
2886				error = xlog_recover_add_to_trans(log, trans,
2887						dp, be32_to_cpu(ohead->oh_len));
2888				break;
2889			default:
2890				xfs_warn(log->l_mp, "%s: bad flag 0x%x",
2891					__func__, flags);
2892				ASSERT(0);
2893				error = XFS_ERROR(EIO);
2894				break;
2895			}
2896			if (error)
2897				return error;
2898		}
 
 
 
 
 
 
 
2899		dp += be32_to_cpu(ohead->oh_len);
2900		num_logops--;
2901	}
2902	return 0;
2903}
2904
2905/*
2906 * Process an extent free intent item that was recovered from
2907 * the log.  We need to free the extents that it describes.
2908 */
2909STATIC int
2910xlog_recover_process_efi(
2911	xfs_mount_t		*mp,
2912	xfs_efi_log_item_t	*efip)
2913{
2914	xfs_efd_log_item_t	*efdp;
2915	xfs_trans_t		*tp;
2916	int			i;
2917	int			error = 0;
2918	xfs_extent_t		*extp;
2919	xfs_fsblock_t		startblock_fsb;
2920
2921	ASSERT(!test_bit(XFS_EFI_RECOVERED, &efip->efi_flags));
 
 
 
 
 
 
 
 
 
 
 
 
2922
2923	/*
2924	 * First check the validity of the extents described by the
2925	 * EFI.  If any are bad, then assume that all are bad and
2926	 * just toss the EFI.
2927	 */
2928	for (i = 0; i < efip->efi_format.efi_nextents; i++) {
2929		extp = &(efip->efi_format.efi_extents[i]);
2930		startblock_fsb = XFS_BB_TO_FSB(mp,
2931				   XFS_FSB_TO_DADDR(mp, extp->ext_start));
2932		if ((startblock_fsb == 0) ||
2933		    (extp->ext_len == 0) ||
2934		    (startblock_fsb >= mp->m_sb.sb_dblocks) ||
2935		    (extp->ext_len >= mp->m_sb.sb_agblocks)) {
2936			/*
2937			 * This will pull the EFI from the AIL and
2938			 * free the memory associated with it.
2939			 */
2940			xfs_efi_release(efip, efip->efi_format.efi_nextents);
2941			return XFS_ERROR(EIO);
2942		}
2943	}
2944
2945	tp = xfs_trans_alloc(mp, 0);
2946	error = xfs_trans_reserve(tp, 0, XFS_ITRUNCATE_LOG_RES(mp), 0, 0, 0);
2947	if (error)
2948		goto abort_error;
2949	efdp = xfs_trans_get_efd(tp, efip, efip->efi_format.efi_nextents);
2950
2951	for (i = 0; i < efip->efi_format.efi_nextents; i++) {
2952		extp = &(efip->efi_format.efi_extents[i]);
2953		error = xfs_free_extent(tp, extp->ext_start, extp->ext_len);
2954		if (error)
2955			goto abort_error;
2956		xfs_trans_log_efd_extent(tp, efdp, extp->ext_start,
2957					 extp->ext_len);
2958	}
2959
2960	set_bit(XFS_EFI_RECOVERED, &efip->efi_flags);
2961	error = xfs_trans_commit(tp, 0);
2962	return error;
 
 
 
 
 
 
 
 
 
2963
2964abort_error:
2965	xfs_trans_cancel(tp, XFS_TRANS_ABORT);
2966	return error;
 
2967}
2968
2969/*
2970 * When this is called, all of the EFIs which did not have
2971 * corresponding EFDs should be in the AIL.  What we do now
2972 * is free the extents associated with each one.
2973 *
2974 * Since we process the EFIs in normal transactions, they
2975 * will be removed at some point after the commit.  This prevents
2976 * us from just walking down the list processing each one.
2977 * We'll use a flag in the EFI to skip those that we've already
2978 * processed and use the AIL iteration mechanism's generation
2979 * count to try to speed this up at least a bit.
2980 *
2981 * When we start, we know that the EFIs are the only things in
2982 * the AIL.  As we process them, however, other items are added
2983 * to the AIL.  Since everything added to the AIL must come after
2984 * everything already in the AIL, we stop processing as soon as
2985 * we see something other than an EFI in the AIL.
2986 */
2987STATIC int
2988xlog_recover_process_efis(
2989	xlog_t			*log)
2990{
2991	xfs_log_item_t		*lip;
2992	xfs_efi_log_item_t	*efip;
2993	int			error = 0;
2994	struct xfs_ail_cursor	cur;
2995	struct xfs_ail		*ailp;
 
 
 
 
 
 
2996
2997	ailp = log->l_ailp;
2998	spin_lock(&ailp->xa_lock);
2999	lip = xfs_trans_ail_cursor_first(ailp, &cur, 0);
3000	while (lip != NULL) {
3001		/*
3002		 * We're done when we see something other than an EFI.
3003		 * There should be no EFIs left in the AIL now.
 
3004		 */
3005		if (lip->li_type != XFS_LI_EFI) {
3006#ifdef DEBUG
3007			for (; lip; lip = xfs_trans_ail_cursor_next(ailp, &cur))
3008				ASSERT(lip->li_type != XFS_LI_EFI);
3009#endif
3010			break;
3011		}
3012
3013		/*
3014		 * Skip EFIs that we've already processed.
 
 
 
 
 
 
 
3015		 */
3016		efip = (xfs_efi_log_item_t *)lip;
3017		if (test_bit(XFS_EFI_RECOVERED, &efip->efi_flags)) {
3018			lip = xfs_trans_ail_cursor_next(ailp, &cur);
3019			continue;
3020		}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3021
3022		spin_unlock(&ailp->xa_lock);
3023		error = xlog_recover_process_efi(log->l_mp, efip);
3024		spin_lock(&ailp->xa_lock);
3025		if (error)
3026			goto out;
3027		lip = xfs_trans_ail_cursor_next(ailp, &cur);
3028	}
3029out:
3030	xfs_trans_ail_cursor_done(ailp, &cur);
3031	spin_unlock(&ailp->xa_lock);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3032	return error;
3033}
3034
3035/*
3036 * This routine performs a transaction to null out a bad inode pointer
3037 * in an agi unlinked inode hash bucket.
3038 */
3039STATIC void
3040xlog_recover_clear_agi_bucket(
3041	xfs_mount_t	*mp,
3042	xfs_agnumber_t	agno,
3043	int		bucket)
3044{
3045	xfs_trans_t	*tp;
3046	xfs_agi_t	*agi;
3047	xfs_buf_t	*agibp;
3048	int		offset;
3049	int		error;
3050
3051	tp = xfs_trans_alloc(mp, XFS_TRANS_CLEAR_AGI_BUCKET);
3052	error = xfs_trans_reserve(tp, 0, XFS_CLEAR_AGI_BUCKET_LOG_RES(mp),
3053				  0, 0, 0);
3054	if (error)
3055		goto out_abort;
3056
3057	error = xfs_read_agi(mp, tp, agno, &agibp);
3058	if (error)
3059		goto out_abort;
3060
3061	agi = XFS_BUF_TO_AGI(agibp);
3062	agi->agi_unlinked[bucket] = cpu_to_be32(NULLAGINO);
3063	offset = offsetof(xfs_agi_t, agi_unlinked) +
3064		 (sizeof(xfs_agino_t) * bucket);
3065	xfs_trans_log_buf(tp, agibp, offset,
3066			  (offset + sizeof(xfs_agino_t) - 1));
3067
3068	error = xfs_trans_commit(tp, 0);
3069	if (error)
3070		goto out_error;
3071	return;
3072
3073out_abort:
3074	xfs_trans_cancel(tp, XFS_TRANS_ABORT);
3075out_error:
3076	xfs_warn(mp, "%s: failed to clear agi %d. Continuing.", __func__, agno);
 
3077	return;
3078}
3079
3080STATIC xfs_agino_t
3081xlog_recover_process_one_iunlink(
3082	struct xfs_mount		*mp,
3083	xfs_agnumber_t			agno,
3084	xfs_agino_t			agino,
3085	int				bucket)
3086{
3087	struct xfs_buf			*ibp;
3088	struct xfs_dinode		*dip;
3089	struct xfs_inode		*ip;
3090	xfs_ino_t			ino;
3091	int				error;
3092
3093	ino = XFS_AGINO_TO_INO(mp, agno, agino);
3094	error = xfs_iget(mp, NULL, ino, 0, 0, &ip);
3095	if (error)
3096		goto fail;
 
 
3097
3098	/*
3099	 * Get the on disk inode to find the next inode in the bucket.
3100	 */
3101	error = xfs_itobp(mp, NULL, ip, &dip, &ibp, 0);
3102	if (error)
3103		goto fail_iput;
 
 
3104
3105	ASSERT(ip->i_d.di_nlink == 0);
3106	ASSERT(ip->i_d.di_mode != 0);
 
 
 
 
 
 
 
 
 
 
 
3107
3108	/* setup for the next pass */
3109	agino = be32_to_cpu(dip->di_next_unlinked);
3110	xfs_buf_relse(ibp);
3111
3112	/*
3113	 * Prevent any DMAPI event from being sent when the reference on
3114	 * the inode is dropped.
3115	 */
3116	ip->i_d.di_dmevmask = 0;
3117
3118	IRELE(ip);
3119	return agino;
3120
3121 fail_iput:
3122	IRELE(ip);
3123 fail:
3124	/*
3125	 * We can't read in the inode this bucket points to, or this inode
3126	 * is messed up.  Just ditch this bucket of inodes.  We will lose
3127	 * some inodes and space, but at least we won't hang.
3128	 *
3129	 * Call xlog_recover_clear_agi_bucket() to perform a transaction to
3130	 * clear the inode pointer in the bucket.
3131	 */
3132	xlog_recover_clear_agi_bucket(mp, agno, bucket);
3133	return NULLAGINO;
3134}
3135
3136/*
3137 * xlog_iunlink_recover
3138 *
3139 * This is called during recovery to process any inodes which
3140 * we unlinked but not freed when the system crashed.  These
3141 * inodes will be on the lists in the AGI blocks.  What we do
3142 * here is scan all the AGIs and fully truncate and free any
3143 * inodes found on the lists.  Each inode is removed from the
3144 * lists when it has been fully truncated and is freed.  The
3145 * freeing of the inode and its removal from the list must be
3146 * atomic.
3147 */
3148STATIC void
3149xlog_recover_process_iunlinks(
3150	xlog_t		*log)
3151{
3152	xfs_mount_t	*mp;
3153	xfs_agnumber_t	agno;
3154	xfs_agi_t	*agi;
3155	xfs_buf_t	*agibp;
3156	xfs_agino_t	agino;
3157	int		bucket;
3158	int		error;
3159	uint		mp_dmevmask;
 
 
 
 
 
 
 
3160
3161	mp = log->l_mp;
 
 
 
 
 
 
 
 
 
3162
3163	/*
3164	 * Prevent any DMAPI event from being sent while in this function.
 
 
 
 
 
3165	 */
3166	mp_dmevmask = mp->m_dmevmask;
3167	mp->m_dmevmask = 0;
3168
3169	for (agno = 0; agno < mp->m_sb.sb_agcount; agno++) {
3170		/*
3171		 * Find the agi for this ag.
3172		 */
3173		error = xfs_read_agi(mp, NULL, agno, &agibp);
3174		if (error) {
3175			/*
3176			 * AGI is b0rked. Don't process it.
3177			 *
3178			 * We should probably mark the filesystem as corrupt
3179			 * after we've recovered all the ag's we can....
3180			 */
3181			continue;
3182		}
3183		/*
3184		 * Unlock the buffer so that it can be acquired in the normal
3185		 * course of the transaction to truncate and free each inode.
3186		 * Because we are not racing with anyone else here for the AGI
3187		 * buffer, we don't even need to hold it locked to read the
3188		 * initial unlinked bucket entries out of the buffer. We keep
3189		 * buffer reference though, so that it stays pinned in memory
3190		 * while we need the buffer.
3191		 */
3192		agi = XFS_BUF_TO_AGI(agibp);
3193		xfs_buf_unlock(agibp);
3194
3195		for (bucket = 0; bucket < XFS_AGI_UNLINKED_BUCKETS; bucket++) {
3196			agino = be32_to_cpu(agi->agi_unlinked[bucket]);
3197			while (agino != NULLAGINO) {
3198				agino = xlog_recover_process_one_iunlink(mp,
3199							agno, agino, bucket);
3200			}
3201		}
3202		xfs_buf_rele(agibp);
3203	}
3204
3205	mp->m_dmevmask = mp_dmevmask;
3206}
3207
3208
3209#ifdef DEBUG
3210STATIC void
3211xlog_pack_data_checksum(
3212	xlog_t		*log,
3213	xlog_in_core_t	*iclog,
3214	int		size)
3215{
3216	int		i;
3217	__be32		*up;
3218	uint		chksum = 0;
3219
3220	up = (__be32 *)iclog->ic_datap;
3221	/* divide length by 4 to get # words */
3222	for (i = 0; i < (size >> 2); i++) {
3223		chksum ^= be32_to_cpu(*up);
3224		up++;
3225	}
3226	iclog->ic_header.h_chksum = cpu_to_be32(chksum);
3227}
3228#else
3229#define xlog_pack_data_checksum(log, iclog, size)
3230#endif
3231
3232/*
3233 * Stamp cycle number in every block
3234 */
3235void
3236xlog_pack_data(
3237	xlog_t			*log,
3238	xlog_in_core_t		*iclog,
3239	int			roundoff)
3240{
3241	int			i, j, k;
3242	int			size = iclog->ic_offset + roundoff;
3243	__be32			cycle_lsn;
3244	xfs_caddr_t		dp;
3245
3246	xlog_pack_data_checksum(log, iclog, size);
3247
3248	cycle_lsn = CYCLE_LSN_DISK(iclog->ic_header.h_lsn);
3249
3250	dp = iclog->ic_datap;
3251	for (i = 0; i < BTOBB(size) &&
3252		i < (XLOG_HEADER_CYCLE_SIZE / BBSIZE); i++) {
3253		iclog->ic_header.h_cycle_data[i] = *(__be32 *)dp;
3254		*(__be32 *)dp = cycle_lsn;
3255		dp += BBSIZE;
3256	}
3257
3258	if (xfs_sb_version_haslogv2(&log->l_mp->m_sb)) {
3259		xlog_in_core_2_t *xhdr = iclog->ic_data;
3260
3261		for ( ; i < BTOBB(size); i++) {
3262			j = i / (XLOG_HEADER_CYCLE_SIZE / BBSIZE);
3263			k = i % (XLOG_HEADER_CYCLE_SIZE / BBSIZE);
3264			xhdr[j].hic_xheader.xh_cycle_data[k] = *(__be32 *)dp;
3265			*(__be32 *)dp = cycle_lsn;
3266			dp += BBSIZE;
3267		}
3268
3269		for (i = 1; i < log->l_iclog_heads; i++) {
3270			xhdr[i].hic_xheader.xh_cycle = cycle_lsn;
3271		}
3272	}
3273}
3274
3275STATIC void
3276xlog_unpack_data(
3277	xlog_rec_header_t	*rhead,
3278	xfs_caddr_t		dp,
3279	xlog_t			*log)
3280{
3281	int			i, j, k;
3282
3283	for (i = 0; i < BTOBB(be32_to_cpu(rhead->h_len)) &&
3284		  i < (XLOG_HEADER_CYCLE_SIZE / BBSIZE); i++) {
3285		*(__be32 *)dp = *(__be32 *)&rhead->h_cycle_data[i];
3286		dp += BBSIZE;
3287	}
3288
3289	if (xfs_sb_version_haslogv2(&log->l_mp->m_sb)) {
3290		xlog_in_core_2_t *xhdr = (xlog_in_core_2_t *)rhead;
3291		for ( ; i < BTOBB(be32_to_cpu(rhead->h_len)); i++) {
3292			j = i / (XLOG_HEADER_CYCLE_SIZE / BBSIZE);
3293			k = i % (XLOG_HEADER_CYCLE_SIZE / BBSIZE);
3294			*(__be32 *)dp = xhdr[j].hic_xheader.xh_cycle_data[k];
3295			dp += BBSIZE;
3296		}
3297	}
3298}
3299
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3300STATIC int
3301xlog_valid_rec_header(
3302	xlog_t			*log,
3303	xlog_rec_header_t	*rhead,
3304	xfs_daddr_t		blkno)
 
3305{
3306	int			hlen;
3307
3308	if (unlikely(rhead->h_magicno != cpu_to_be32(XLOG_HEADER_MAGIC_NUM))) {
3309		XFS_ERROR_REPORT("xlog_valid_rec_header(1)",
3310				XFS_ERRLEVEL_LOW, log->l_mp);
3311		return XFS_ERROR(EFSCORRUPTED);
3312	}
3313	if (unlikely(
3314	    (!rhead->h_version ||
3315	    (be32_to_cpu(rhead->h_version) & (~XLOG_VERSION_OKBITS))))) {
3316		xfs_warn(log->l_mp, "%s: unrecognised log version (%d).",
3317			__func__, be32_to_cpu(rhead->h_version));
3318		return XFS_ERROR(EIO);
3319	}
3320
3321	/* LR body must have data or it wouldn't have been written */
 
 
 
3322	hlen = be32_to_cpu(rhead->h_len);
3323	if (unlikely( hlen <= 0 || hlen > INT_MAX )) {
3324		XFS_ERROR_REPORT("xlog_valid_rec_header(2)",
3325				XFS_ERRLEVEL_LOW, log->l_mp);
3326		return XFS_ERROR(EFSCORRUPTED);
3327	}
3328	if (unlikely( blkno > log->l_logBBsize || blkno > INT_MAX )) {
3329		XFS_ERROR_REPORT("xlog_valid_rec_header(3)",
3330				XFS_ERRLEVEL_LOW, log->l_mp);
3331		return XFS_ERROR(EFSCORRUPTED);
3332	}
3333	return 0;
3334}
3335
3336/*
3337 * Read the log from tail to head and process the log records found.
3338 * Handle the two cases where the tail and head are in the same cycle
3339 * and where the active portion of the log wraps around the end of
3340 * the physical log separately.  The pass parameter is passed through
3341 * to the routines called to process the data and is not looked at
3342 * here.
3343 */
3344STATIC int
3345xlog_do_recovery_pass(
3346	xlog_t			*log,
3347	xfs_daddr_t		head_blk,
3348	xfs_daddr_t		tail_blk,
3349	int			pass)
 
3350{
3351	xlog_rec_header_t	*rhead;
3352	xfs_daddr_t		blk_no;
3353	xfs_caddr_t		offset;
3354	xfs_buf_t		*hbp, *dbp;
3355	int			error = 0, h_size;
 
 
3356	int			bblks, split_bblks;
3357	int			hblks, split_hblks, wrapped_hblks;
 
3358	struct hlist_head	rhash[XLOG_RHASH_SIZE];
 
3359
3360	ASSERT(head_blk != tail_blk);
 
 
 
 
 
 
 
 
3361
3362	/*
3363	 * Read the header of the tail block and get the iclog buffer size from
3364	 * h_size.  Use this to tell how many sectors make up the log header.
3365	 */
3366	if (xfs_sb_version_haslogv2(&log->l_mp->m_sb)) {
3367		/*
3368		 * When using variable length iclogs, read first sector of
3369		 * iclog header and extract the header size from it.  Get a
3370		 * new hbp that is the correct size.
3371		 */
3372		hbp = xlog_get_bp(log, 1);
3373		if (!hbp)
3374			return ENOMEM;
3375
3376		error = xlog_bread(log, tail_blk, 1, hbp, &offset);
3377		if (error)
3378			goto bread_err1;
3379
3380		rhead = (xlog_rec_header_t *)offset;
3381		error = xlog_valid_rec_header(log, rhead, tail_blk);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3382		if (error)
3383			goto bread_err1;
3384		h_size = be32_to_cpu(rhead->h_size);
3385		if ((be32_to_cpu(rhead->h_version) & XLOG_VERSION_2) &&
3386		    (h_size > XLOG_HEADER_CYCLE_SIZE)) {
3387			hblks = h_size / XLOG_HEADER_CYCLE_SIZE;
3388			if (h_size % XLOG_HEADER_CYCLE_SIZE)
3389				hblks++;
3390			xlog_put_bp(hbp);
3391			hbp = xlog_get_bp(log, hblks);
3392		} else {
3393			hblks = 1;
 
 
 
 
 
 
3394		}
3395	} else {
3396		ASSERT(log->l_sectBBsize == 1);
3397		hblks = 1;
3398		hbp = xlog_get_bp(log, 1);
3399		h_size = XLOG_BIG_RECORD_BSIZE;
3400	}
3401
3402	if (!hbp)
3403		return ENOMEM;
3404	dbp = xlog_get_bp(log, BTOBB(h_size));
3405	if (!dbp) {
3406		xlog_put_bp(hbp);
3407		return ENOMEM;
3408	}
3409
3410	memset(rhash, 0, sizeof(rhash));
3411	if (tail_blk <= head_blk) {
3412		for (blk_no = tail_blk; blk_no < head_blk; ) {
3413			error = xlog_bread(log, blk_no, hblks, hbp, &offset);
3414			if (error)
3415				goto bread_err2;
3416
3417			rhead = (xlog_rec_header_t *)offset;
3418			error = xlog_valid_rec_header(log, rhead, blk_no);
3419			if (error)
3420				goto bread_err2;
3421
3422			/* blocks in data section */
3423			bblks = (int)BTOBB(be32_to_cpu(rhead->h_len));
3424			error = xlog_bread(log, blk_no + hblks, bblks, dbp,
3425					   &offset);
3426			if (error)
3427				goto bread_err2;
3428
3429			xlog_unpack_data(rhead, offset, log);
3430			if ((error = xlog_recover_process_data(log,
3431						rhash, rhead, offset, pass)))
3432				goto bread_err2;
3433			blk_no += bblks + hblks;
3434		}
3435	} else {
3436		/*
3437		 * Perform recovery around the end of the physical log.
3438		 * When the head is not on the same cycle number as the tail,
3439		 * we can't do a sequential recovery as above.
3440		 */
3441		blk_no = tail_blk;
3442		while (blk_no < log->l_logBBsize) {
3443			/*
3444			 * Check for header wrapping around physical end-of-log
3445			 */
3446			offset = hbp->b_addr;
3447			split_hblks = 0;
3448			wrapped_hblks = 0;
3449			if (blk_no + hblks <= log->l_logBBsize) {
3450				/* Read header in one read */
3451				error = xlog_bread(log, blk_no, hblks, hbp,
3452						   &offset);
3453				if (error)
3454					goto bread_err2;
3455			} else {
3456				/* This LR is split across physical log end */
3457				if (blk_no != log->l_logBBsize) {
3458					/* some data before physical log end */
3459					ASSERT(blk_no <= INT_MAX);
3460					split_hblks = log->l_logBBsize - (int)blk_no;
3461					ASSERT(split_hblks > 0);
3462					error = xlog_bread(log, blk_no,
3463							   split_hblks, hbp,
3464							   &offset);
3465					if (error)
3466						goto bread_err2;
3467				}
3468
3469				/*
3470				 * Note: this black magic still works with
3471				 * large sector sizes (non-512) only because:
3472				 * - we increased the buffer size originally
3473				 *   by 1 sector giving us enough extra space
3474				 *   for the second read;
3475				 * - the log start is guaranteed to be sector
3476				 *   aligned;
3477				 * - we read the log end (LR header start)
3478				 *   _first_, then the log start (LR header end)
3479				 *   - order is important.
3480				 */
3481				wrapped_hblks = hblks - split_hblks;
3482				error = xlog_bread_offset(log, 0,
3483						wrapped_hblks, hbp,
3484						offset + BBTOB(split_hblks));
3485				if (error)
3486					goto bread_err2;
3487			}
3488			rhead = (xlog_rec_header_t *)offset;
3489			error = xlog_valid_rec_header(log, rhead,
3490						split_hblks ? blk_no : 0);
3491			if (error)
3492				goto bread_err2;
3493
3494			bblks = (int)BTOBB(be32_to_cpu(rhead->h_len));
3495			blk_no += hblks;
3496
3497			/* Read in data for log record */
3498			if (blk_no + bblks <= log->l_logBBsize) {
3499				error = xlog_bread(log, blk_no, bblks, dbp,
 
 
 
 
 
 
 
 
3500						   &offset);
3501				if (error)
3502					goto bread_err2;
3503			} else {
3504				/* This log record is split across the
3505				 * physical end of log */
3506				offset = dbp->b_addr;
3507				split_bblks = 0;
3508				if (blk_no != log->l_logBBsize) {
3509					/* some data is before the physical
3510					 * end of log */
3511					ASSERT(!wrapped_hblks);
3512					ASSERT(blk_no <= INT_MAX);
3513					split_bblks =
3514						log->l_logBBsize - (int)blk_no;
3515					ASSERT(split_bblks > 0);
3516					error = xlog_bread(log, blk_no,
3517							split_bblks, dbp,
3518							&offset);
3519					if (error)
3520						goto bread_err2;
3521				}
3522
3523				/*
3524				 * Note: this black magic still works with
3525				 * large sector sizes (non-512) only because:
3526				 * - we increased the buffer size originally
3527				 *   by 1 sector giving us enough extra space
3528				 *   for the second read;
3529				 * - the log start is guaranteed to be sector
3530				 *   aligned;
3531				 * - we read the log end (LR header start)
3532				 *   _first_, then the log start (LR header end)
3533				 *   - order is important.
3534				 */
3535				error = xlog_bread_offset(log, 0,
3536						bblks - split_bblks, hbp,
3537						offset + BBTOB(split_bblks));
3538				if (error)
3539					goto bread_err2;
3540			}
3541			xlog_unpack_data(rhead, offset, log);
3542			if ((error = xlog_recover_process_data(log, rhash,
3543							rhead, offset, pass)))
 
3544				goto bread_err2;
 
3545			blk_no += bblks;
 
3546		}
3547
3548		ASSERT(blk_no >= log->l_logBBsize);
3549		blk_no -= log->l_logBBsize;
 
 
 
 
 
 
 
 
3550
3551		/* read first part of physical log */
3552		while (blk_no < head_blk) {
3553			error = xlog_bread(log, blk_no, hblks, hbp, &offset);
3554			if (error)
3555				goto bread_err2;
3556
3557			rhead = (xlog_rec_header_t *)offset;
3558			error = xlog_valid_rec_header(log, rhead, blk_no);
3559			if (error)
3560				goto bread_err2;
 
 
3561
3562			bblks = (int)BTOBB(be32_to_cpu(rhead->h_len));
3563			error = xlog_bread(log, blk_no+hblks, bblks, dbp,
3564					   &offset);
3565			if (error)
3566				goto bread_err2;
3567
3568			xlog_unpack_data(rhead, offset, log);
3569			if ((error = xlog_recover_process_data(log, rhash,
3570							rhead, offset, pass)))
3571				goto bread_err2;
3572			blk_no += bblks + hblks;
3573		}
3574	}
3575
3576 bread_err2:
3577	xlog_put_bp(dbp);
3578 bread_err1:
3579	xlog_put_bp(hbp);
3580	return error;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3581}
3582
3583/*
3584 * Do the recovery of the log.  We actually do this in two phases.
3585 * The two passes are necessary in order to implement the function
3586 * of cancelling a record written into the log.  The first pass
3587 * determines those things which have been cancelled, and the
3588 * second pass replays log items normally except for those which
3589 * have been cancelled.  The handling of the replay and cancellations
3590 * takes place in the log item type specific routines.
3591 *
3592 * The table of items which have cancel records in the log is allocated
3593 * and freed at this level, since only here do we know when all of
3594 * the log recovery has been completed.
3595 */
3596STATIC int
3597xlog_do_log_recovery(
3598	xlog_t		*log,
3599	xfs_daddr_t	head_blk,
3600	xfs_daddr_t	tail_blk)
3601{
3602	int		error, i;
3603
3604	ASSERT(head_blk != tail_blk);
3605
3606	/*
3607	 * First do a pass to find all of the cancelled buf log items.
3608	 * Store them in the buf_cancel_table for use in the second pass.
3609	 */
3610	log->l_buf_cancel_table = kmem_zalloc(XLOG_BC_TABLE_SIZE *
3611						 sizeof(struct list_head),
3612						 KM_SLEEP);
3613	for (i = 0; i < XLOG_BC_TABLE_SIZE; i++)
3614		INIT_LIST_HEAD(&log->l_buf_cancel_table[i]);
3615
3616	error = xlog_do_recovery_pass(log, head_blk, tail_blk,
3617				      XLOG_RECOVER_PASS1);
3618	if (error != 0) {
3619		kmem_free(log->l_buf_cancel_table);
3620		log->l_buf_cancel_table = NULL;
3621		return error;
3622	}
3623	/*
3624	 * Then do a second pass to actually recover the items in the log.
3625	 * When it is complete free the table of buf cancel items.
3626	 */
3627	error = xlog_do_recovery_pass(log, head_blk, tail_blk,
3628				      XLOG_RECOVER_PASS2);
3629#ifdef DEBUG
3630	if (!error) {
3631		int	i;
3632
3633		for (i = 0; i < XLOG_BC_TABLE_SIZE; i++)
3634			ASSERT(list_empty(&log->l_buf_cancel_table[i]));
3635	}
3636#endif	/* DEBUG */
3637
3638	kmem_free(log->l_buf_cancel_table);
3639	log->l_buf_cancel_table = NULL;
3640
3641	return error;
3642}
3643
3644/*
3645 * Do the actual recovery
3646 */
3647STATIC int
3648xlog_do_recover(
3649	xlog_t		*log,
3650	xfs_daddr_t	head_blk,
3651	xfs_daddr_t	tail_blk)
3652{
3653	int		error;
3654	xfs_buf_t	*bp;
3655	xfs_sb_t	*sbp;
 
 
 
3656
3657	/*
3658	 * First replay the images in the log.
3659	 */
3660	error = xlog_do_log_recovery(log, head_blk, tail_blk);
3661	if (error)
3662		return error;
3663
 
 
 
3664	/*
3665	 * If IO errors happened during recovery, bail out.
 
 
 
 
 
3666	 */
3667	if (XFS_FORCED_SHUTDOWN(log->l_mp)) {
3668		return (EIO);
3669	}
3670
3671	/*
3672	 * We now update the tail_lsn since much of the recovery has completed
3673	 * and there may be space available to use.  If there were no extent
3674	 * or iunlinks, we can free up the entire log and set the tail_lsn to
3675	 * be the last_sync_lsn.  This was set in xlog_find_tail to be the
3676	 * lsn of the last known good LR on disk.  If there are extent frees
3677	 * or iunlinks they will have some entries in the AIL; so we look at
3678	 * the AIL to determine how to set the tail_lsn.
3679	 */
3680	xlog_assign_tail_lsn(log->l_mp);
3681
3682	/*
3683	 * Now that we've finished replaying all buffer and inode
3684	 * updates, re-read in the superblock.
3685	 */
3686	bp = xfs_getsb(log->l_mp, 0);
3687	XFS_BUF_UNDONE(bp);
3688	ASSERT(!(XFS_BUF_ISWRITE(bp)));
3689	XFS_BUF_READ(bp);
3690	XFS_BUF_UNASYNC(bp);
3691	xfsbdstrat(log->l_mp, bp);
3692	error = xfs_buf_iowait(bp);
3693	if (error) {
3694		xfs_buf_ioerror_alert(bp, __func__);
3695		ASSERT(0);
 
 
3696		xfs_buf_relse(bp);
3697		return error;
3698	}
3699
3700	/* Convert superblock from on-disk format */
3701	sbp = &log->l_mp->m_sb;
3702	xfs_sb_from_disk(log->l_mp, XFS_BUF_TO_SBP(bp));
3703	ASSERT(sbp->sb_magicnum == XFS_SB_MAGIC);
3704	ASSERT(xfs_sb_good_version(sbp));
3705	xfs_buf_relse(bp);
3706
3707	/* We've re-read the superblock so re-initialize per-cpu counters */
3708	xfs_icsb_reinit_counters(log->l_mp);
3709
3710	xlog_recover_check_summary(log);
3711
3712	/* Normal transactions can now occur */
3713	log->l_flags &= ~XLOG_ACTIVE_RECOVERY;
3714	return 0;
3715}
3716
3717/*
3718 * Perform recovery and re-initialize some log variables in xlog_find_tail.
3719 *
3720 * Return error or zero.
3721 */
3722int
3723xlog_recover(
3724	xlog_t		*log)
3725{
3726	xfs_daddr_t	head_blk, tail_blk;
3727	int		error;
3728
3729	/* find the tail of the log */
3730	if ((error = xlog_find_tail(log, &head_blk, &tail_blk)))
 
3731		return error;
3732
 
 
 
 
 
 
 
 
 
3733	if (tail_blk != head_blk) {
3734		/* There used to be a comment here:
3735		 *
3736		 * disallow recovery on read-only mounts.  note -- mount
3737		 * checks for ENOSPC and turns it into an intelligent
3738		 * error message.
3739		 * ...but this is no longer true.  Now, unless you specify
3740		 * NORECOVERY (in which case this function would never be
3741		 * called), we just go ahead and recover.  We do this all
3742		 * under the vfs layer, so we can get away with it unless
3743		 * the device itself is read-only, in which case we fail.
3744		 */
3745		if ((error = xfs_dev_is_read_only(log->l_mp, "recovery"))) {
3746			return error;
3747		}
3748
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3749		xfs_notice(log->l_mp, "Starting recovery (logdev: %s)",
3750				log->l_mp->m_logname ? log->l_mp->m_logname
3751						     : "internal");
3752
3753		error = xlog_do_recover(log, head_blk, tail_blk);
3754		log->l_flags |= XLOG_RECOVERY_NEEDED;
3755	}
3756	return error;
3757}
3758
3759/*
3760 * In the first part of recovery we replay inodes and buffers and build
3761 * up the list of extent free items which need to be processed.  Here
3762 * we process the extent free items and clean up the on disk unlinked
3763 * inode lists.  This is separated from the first part of recovery so
3764 * that the root and real-time bitmap inodes can be read in from disk in
3765 * between the two stages.  This is necessary so that we can free space
3766 * in the real-time portion of the file system.
 
 
 
 
 
3767 */
3768int
3769xlog_recover_finish(
3770	xlog_t		*log)
3771{
3772	/*
3773	 * Now we're ready to do the transactions needed for the
3774	 * rest of recovery.  Start with completing all the extent
3775	 * free intent records and then process the unlinked inode
3776	 * lists.  At this point, we essentially run in normal mode
3777	 * except that we're still performing recovery actions
3778	 * rather than accepting new requests.
3779	 */
3780	if (log->l_flags & XLOG_RECOVERY_NEEDED) {
3781		int	error;
3782		error = xlog_recover_process_efis(log);
3783		if (error) {
3784			xfs_alert(log->l_mp, "Failed to recover EFIs");
3785			return error;
3786		}
3787		/*
3788		 * Sync the log to get all the EFIs out of the AIL.
3789		 * This isn't absolutely necessary, but it helps in
3790		 * case the unlink transactions would have problems
3791		 * pushing the EFIs out of the way.
3792		 */
3793		xfs_log_force(log->l_mp, XFS_LOG_SYNC);
 
 
 
 
 
3794
3795		xlog_recover_process_iunlinks(log);
 
 
 
 
 
3796
3797		xlog_recover_check_summary(log);
3798
3799		xfs_notice(log->l_mp, "Ending recovery (logdev: %s)",
3800				log->l_mp->m_logname ? log->l_mp->m_logname
3801						     : "internal");
3802		log->l_flags &= ~XLOG_RECOVERY_NEEDED;
3803	} else {
3804		xfs_info(log->l_mp, "Ending clean mount");
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3805	}
3806	return 0;
 
 
 
3807}
3808
3809
3810#if defined(DEBUG)
3811/*
3812 * Read all of the agf and agi counters and check that they
3813 * are consistent with the superblock counters.
3814 */
3815void
3816xlog_recover_check_summary(
3817	xlog_t		*log)
3818{
3819	xfs_mount_t	*mp;
3820	xfs_agf_t	*agfp;
3821	xfs_buf_t	*agfbp;
3822	xfs_buf_t	*agibp;
3823	xfs_agnumber_t	agno;
3824	__uint64_t	freeblks;
3825	__uint64_t	itotal;
3826	__uint64_t	ifree;
3827	int		error;
3828
3829	mp = log->l_mp;
3830
3831	freeblks = 0LL;
3832	itotal = 0LL;
3833	ifree = 0LL;
3834	for (agno = 0; agno < mp->m_sb.sb_agcount; agno++) {
3835		error = xfs_read_agf(mp, NULL, agno, 0, &agfbp);
3836		if (error) {
3837			xfs_alert(mp, "%s agf read failed agno %d error %d",
3838						__func__, agno, error);
3839		} else {
3840			agfp = XFS_BUF_TO_AGF(agfbp);
3841			freeblks += be32_to_cpu(agfp->agf_freeblks) +
3842				    be32_to_cpu(agfp->agf_flcount);
3843			xfs_buf_relse(agfbp);
3844		}
3845
3846		error = xfs_read_agi(mp, NULL, agno, &agibp);
3847		if (error) {
3848			xfs_alert(mp, "%s agi read failed agno %d error %d",
3849						__func__, agno, error);
3850		} else {
3851			struct xfs_agi	*agi = XFS_BUF_TO_AGI(agibp);
3852
3853			itotal += be32_to_cpu(agi->agi_count);
3854			ifree += be32_to_cpu(agi->agi_freecount);
3855			xfs_buf_relse(agibp);
3856		}
3857	}
3858}
3859#endif /* DEBUG */