Linux Audio

Check our new training course

Loading...
v5.4
  1// SPDX-License-Identifier: GPL-2.0+
  2/*
  3 * Copyright (C) 2017 Oracle.  All Rights Reserved.
 
  4 * Author: Darrick J. Wong <darrick.wong@oracle.com>
 
 
 
 
 
 
 
 
 
 
 
 
 
 
  5 */
  6#include "xfs.h"
  7#include "xfs_fs.h"
  8#include "xfs_shared.h"
  9#include "xfs_format.h"
 10#include "xfs_trans_resv.h"
 11#include "xfs_mount.h"
 
 12#include "xfs_btree.h"
 
 13#include "xfs_log_format.h"
 14#include "xfs_trans.h"
 15#include "xfs_sb.h"
 16#include "xfs_inode.h"
 17#include "xfs_icache.h"
 
 18#include "xfs_alloc.h"
 19#include "xfs_alloc_btree.h"
 
 
 20#include "xfs_ialloc.h"
 21#include "xfs_ialloc_btree.h"
 
 22#include "xfs_refcount_btree.h"
 23#include "xfs_rmap.h"
 24#include "xfs_rmap_btree.h"
 25#include "xfs_log.h"
 26#include "xfs_trans_priv.h"
 27#include "xfs_attr.h"
 28#include "xfs_reflink.h"
 29#include "scrub/scrub.h"
 30#include "scrub/common.h"
 31#include "scrub/trace.h"
 32#include "scrub/repair.h"
 33#include "scrub/health.h"
 34
 35/* Common code for the metadata scrubbers. */
 36
 37/*
 38 * Handling operational errors.
 39 *
 40 * The *_process_error() family of functions are used to process error return
 41 * codes from functions called as part of a scrub operation.
 42 *
 43 * If there's no error, we return true to tell the caller that it's ok
 44 * to move on to the next check in its list.
 45 *
 46 * For non-verifier errors (e.g. ENOMEM) we return false to tell the
 47 * caller that something bad happened, and we preserve *error so that
 48 * the caller can return the *error up the stack to userspace.
 49 *
 50 * Verifier errors (EFSBADCRC/EFSCORRUPTED) are recorded by setting
 51 * OFLAG_CORRUPT in sm_flags and the *error is cleared.  In other words,
 52 * we track verifier errors (and failed scrub checks) via OFLAG_CORRUPT,
 53 * not via return codes.  We return false to tell the caller that
 54 * something bad happened.  Since the error has been cleared, the caller
 55 * will (presumably) return that zero and scrubbing will move on to
 56 * whatever's next.
 57 *
 58 * ftrace can be used to record the precise metadata location and the
 59 * approximate code location of the failed operation.
 60 */
 61
 62/* Check for operational errors. */
 63static bool
 64__xchk_process_error(
 65	struct xfs_scrub	*sc,
 66	xfs_agnumber_t		agno,
 67	xfs_agblock_t		bno,
 68	int			*error,
 69	__u32			errflag,
 70	void			*ret_ip)
 71{
 72	switch (*error) {
 73	case 0:
 74		return true;
 75	case -EDEADLOCK:
 76		/* Used to restart an op with deadlock avoidance. */
 77		trace_xchk_deadlock_retry(sc->ip, sc->sm, *error);
 78		break;
 79	case -EFSBADCRC:
 80	case -EFSCORRUPTED:
 81		/* Note the badness but don't abort. */
 82		sc->sm->sm_flags |= errflag;
 83		*error = 0;
 84		/* fall through */
 85	default:
 86		trace_xchk_op_error(sc, agno, bno, *error,
 87				ret_ip);
 88		break;
 89	}
 90	return false;
 91}
 92
 93bool
 94xchk_process_error(
 95	struct xfs_scrub	*sc,
 96	xfs_agnumber_t		agno,
 97	xfs_agblock_t		bno,
 98	int			*error)
 99{
100	return __xchk_process_error(sc, agno, bno, error,
101			XFS_SCRUB_OFLAG_CORRUPT, __return_address);
102}
103
104bool
105xchk_xref_process_error(
106	struct xfs_scrub	*sc,
107	xfs_agnumber_t		agno,
108	xfs_agblock_t		bno,
109	int			*error)
110{
111	return __xchk_process_error(sc, agno, bno, error,
112			XFS_SCRUB_OFLAG_XFAIL, __return_address);
113}
114
115/* Check for operational errors for a file offset. */
116static bool
117__xchk_fblock_process_error(
118	struct xfs_scrub	*sc,
119	int			whichfork,
120	xfs_fileoff_t		offset,
121	int			*error,
122	__u32			errflag,
123	void			*ret_ip)
124{
125	switch (*error) {
126	case 0:
127		return true;
128	case -EDEADLOCK:
129		/* Used to restart an op with deadlock avoidance. */
130		trace_xchk_deadlock_retry(sc->ip, sc->sm, *error);
131		break;
132	case -EFSBADCRC:
133	case -EFSCORRUPTED:
134		/* Note the badness but don't abort. */
135		sc->sm->sm_flags |= errflag;
136		*error = 0;
137		/* fall through */
138	default:
139		trace_xchk_file_op_error(sc, whichfork, offset, *error,
140				ret_ip);
141		break;
142	}
143	return false;
144}
145
146bool
147xchk_fblock_process_error(
148	struct xfs_scrub	*sc,
149	int			whichfork,
150	xfs_fileoff_t		offset,
151	int			*error)
152{
153	return __xchk_fblock_process_error(sc, whichfork, offset, error,
154			XFS_SCRUB_OFLAG_CORRUPT, __return_address);
155}
156
157bool
158xchk_fblock_xref_process_error(
159	struct xfs_scrub	*sc,
160	int			whichfork,
161	xfs_fileoff_t		offset,
162	int			*error)
163{
164	return __xchk_fblock_process_error(sc, whichfork, offset, error,
165			XFS_SCRUB_OFLAG_XFAIL, __return_address);
166}
167
168/*
169 * Handling scrub corruption/optimization/warning checks.
170 *
171 * The *_set_{corrupt,preen,warning}() family of functions are used to
172 * record the presence of metadata that is incorrect (corrupt), could be
173 * optimized somehow (preen), or should be flagged for administrative
174 * review but is not incorrect (warn).
175 *
176 * ftrace can be used to record the precise metadata location and
177 * approximate code location of the failed check.
178 */
179
180/* Record a block which could be optimized. */
181void
182xchk_block_set_preen(
183	struct xfs_scrub	*sc,
184	struct xfs_buf		*bp)
185{
186	sc->sm->sm_flags |= XFS_SCRUB_OFLAG_PREEN;
187	trace_xchk_block_preen(sc, bp->b_bn, __return_address);
188}
189
190/*
191 * Record an inode which could be optimized.  The trace data will
192 * include the block given by bp if bp is given; otherwise it will use
193 * the block location of the inode record itself.
194 */
195void
196xchk_ino_set_preen(
197	struct xfs_scrub	*sc,
198	xfs_ino_t		ino)
199{
200	sc->sm->sm_flags |= XFS_SCRUB_OFLAG_PREEN;
201	trace_xchk_ino_preen(sc, ino, __return_address);
202}
203
204/* Record something being wrong with the filesystem primary superblock. */
205void
206xchk_set_corrupt(
207	struct xfs_scrub	*sc)
208{
209	sc->sm->sm_flags |= XFS_SCRUB_OFLAG_CORRUPT;
210	trace_xchk_fs_error(sc, 0, __return_address);
211}
212
213/* Record a corrupt block. */
214void
215xchk_block_set_corrupt(
216	struct xfs_scrub	*sc,
217	struct xfs_buf		*bp)
218{
219	sc->sm->sm_flags |= XFS_SCRUB_OFLAG_CORRUPT;
220	trace_xchk_block_error(sc, bp->b_bn, __return_address);
221}
222
223/* Record a corruption while cross-referencing. */
224void
225xchk_block_xref_set_corrupt(
226	struct xfs_scrub	*sc,
227	struct xfs_buf		*bp)
228{
229	sc->sm->sm_flags |= XFS_SCRUB_OFLAG_XCORRUPT;
230	trace_xchk_block_error(sc, bp->b_bn, __return_address);
231}
232
233/*
234 * Record a corrupt inode.  The trace data will include the block given
235 * by bp if bp is given; otherwise it will use the block location of the
236 * inode record itself.
237 */
238void
239xchk_ino_set_corrupt(
240	struct xfs_scrub	*sc,
241	xfs_ino_t		ino)
242{
243	sc->sm->sm_flags |= XFS_SCRUB_OFLAG_CORRUPT;
244	trace_xchk_ino_error(sc, ino, __return_address);
245}
246
247/* Record a corruption while cross-referencing with an inode. */
248void
249xchk_ino_xref_set_corrupt(
250	struct xfs_scrub	*sc,
251	xfs_ino_t		ino)
252{
253	sc->sm->sm_flags |= XFS_SCRUB_OFLAG_XCORRUPT;
254	trace_xchk_ino_error(sc, ino, __return_address);
255}
256
257/* Record corruption in a block indexed by a file fork. */
258void
259xchk_fblock_set_corrupt(
260	struct xfs_scrub	*sc,
261	int			whichfork,
262	xfs_fileoff_t		offset)
263{
264	sc->sm->sm_flags |= XFS_SCRUB_OFLAG_CORRUPT;
265	trace_xchk_fblock_error(sc, whichfork, offset, __return_address);
266}
267
268/* Record a corruption while cross-referencing a fork block. */
269void
270xchk_fblock_xref_set_corrupt(
271	struct xfs_scrub	*sc,
272	int			whichfork,
273	xfs_fileoff_t		offset)
274{
275	sc->sm->sm_flags |= XFS_SCRUB_OFLAG_XCORRUPT;
276	trace_xchk_fblock_error(sc, whichfork, offset, __return_address);
277}
278
279/*
280 * Warn about inodes that need administrative review but is not
281 * incorrect.
282 */
283void
284xchk_ino_set_warning(
285	struct xfs_scrub	*sc,
286	xfs_ino_t		ino)
287{
288	sc->sm->sm_flags |= XFS_SCRUB_OFLAG_WARNING;
289	trace_xchk_ino_warning(sc, ino, __return_address);
290}
291
292/* Warn about a block indexed by a file fork that needs review. */
293void
294xchk_fblock_set_warning(
295	struct xfs_scrub	*sc,
296	int			whichfork,
297	xfs_fileoff_t		offset)
298{
299	sc->sm->sm_flags |= XFS_SCRUB_OFLAG_WARNING;
300	trace_xchk_fblock_warning(sc, whichfork, offset, __return_address);
301}
302
303/* Signal an incomplete scrub. */
304void
305xchk_set_incomplete(
306	struct xfs_scrub	*sc)
307{
308	sc->sm->sm_flags |= XFS_SCRUB_OFLAG_INCOMPLETE;
309	trace_xchk_incomplete(sc, __return_address);
310}
311
312/*
313 * rmap scrubbing -- compute the number of blocks with a given owner,
314 * at least according to the reverse mapping data.
315 */
316
317struct xchk_rmap_ownedby_info {
318	const struct xfs_owner_info	*oinfo;
319	xfs_filblks_t			*blocks;
320};
321
322STATIC int
323xchk_count_rmap_ownedby_irec(
324	struct xfs_btree_cur		*cur,
325	struct xfs_rmap_irec		*rec,
326	void				*priv)
327{
328	struct xchk_rmap_ownedby_info	*sroi = priv;
329	bool				irec_attr;
330	bool				oinfo_attr;
331
332	irec_attr = rec->rm_flags & XFS_RMAP_ATTR_FORK;
333	oinfo_attr = sroi->oinfo->oi_flags & XFS_OWNER_INFO_ATTR_FORK;
334
335	if (rec->rm_owner != sroi->oinfo->oi_owner)
336		return 0;
337
338	if (XFS_RMAP_NON_INODE_OWNER(rec->rm_owner) || irec_attr == oinfo_attr)
339		(*sroi->blocks) += rec->rm_blockcount;
340
341	return 0;
342}
343
344/*
345 * Calculate the number of blocks the rmap thinks are owned by something.
346 * The caller should pass us an rmapbt cursor.
347 */
348int
349xchk_count_rmap_ownedby_ag(
350	struct xfs_scrub		*sc,
351	struct xfs_btree_cur		*cur,
352	const struct xfs_owner_info	*oinfo,
353	xfs_filblks_t			*blocks)
354{
355	struct xchk_rmap_ownedby_info	sroi = {
356		.oinfo			= oinfo,
357		.blocks			= blocks,
358	};
359
 
360	*blocks = 0;
361	return xfs_rmap_query_all(cur, xchk_count_rmap_ownedby_irec,
 
 
362			&sroi);
363}
364
365/*
366 * AG scrubbing
367 *
368 * These helpers facilitate locking an allocation group's header
369 * buffers, setting up cursors for all btrees that are present, and
370 * cleaning everything up once we're through.
371 */
372
373/* Decide if we want to return an AG header read failure. */
374static inline bool
375want_ag_read_header_failure(
376	struct xfs_scrub	*sc,
377	unsigned int		type)
378{
379	/* Return all AG header read failures when scanning btrees. */
380	if (sc->sm->sm_type != XFS_SCRUB_TYPE_AGF &&
381	    sc->sm->sm_type != XFS_SCRUB_TYPE_AGFL &&
382	    sc->sm->sm_type != XFS_SCRUB_TYPE_AGI)
383		return true;
384	/*
385	 * If we're scanning a given type of AG header, we only want to
386	 * see read failures from that specific header.  We'd like the
387	 * other headers to cross-check them, but this isn't required.
388	 */
389	if (sc->sm->sm_type == type)
390		return true;
391	return false;
392}
393
394/*
395 * Grab all the headers for an AG.
396 *
397 * The headers should be released by xchk_ag_free, but as a fail
398 * safe we attach all the buffers we grab to the scrub transaction so
399 * they'll all be freed when we cancel it.
400 */
401int
402xchk_ag_read_headers(
403	struct xfs_scrub	*sc,
404	xfs_agnumber_t		agno,
405	struct xfs_buf		**agi,
406	struct xfs_buf		**agf,
407	struct xfs_buf		**agfl)
408{
409	struct xfs_mount	*mp = sc->mp;
410	int			error;
411
412	error = xfs_ialloc_read_agi(mp, sc->tp, agno, agi);
413	if (error && want_ag_read_header_failure(sc, XFS_SCRUB_TYPE_AGI))
414		goto out;
415
416	error = xfs_alloc_read_agf(mp, sc->tp, agno, 0, agf);
417	if (error && want_ag_read_header_failure(sc, XFS_SCRUB_TYPE_AGF))
418		goto out;
419
420	error = xfs_alloc_read_agfl(mp, sc->tp, agno, agfl);
421	if (error && want_ag_read_header_failure(sc, XFS_SCRUB_TYPE_AGFL))
422		goto out;
423	error = 0;
424out:
425	return error;
426}
427
428/* Release all the AG btree cursors. */
429void
430xchk_ag_btcur_free(
431	struct xchk_ag		*sa)
432{
433	if (sa->refc_cur)
434		xfs_btree_del_cursor(sa->refc_cur, XFS_BTREE_ERROR);
435	if (sa->rmap_cur)
436		xfs_btree_del_cursor(sa->rmap_cur, XFS_BTREE_ERROR);
437	if (sa->fino_cur)
438		xfs_btree_del_cursor(sa->fino_cur, XFS_BTREE_ERROR);
439	if (sa->ino_cur)
440		xfs_btree_del_cursor(sa->ino_cur, XFS_BTREE_ERROR);
441	if (sa->cnt_cur)
442		xfs_btree_del_cursor(sa->cnt_cur, XFS_BTREE_ERROR);
443	if (sa->bno_cur)
444		xfs_btree_del_cursor(sa->bno_cur, XFS_BTREE_ERROR);
445
446	sa->refc_cur = NULL;
447	sa->rmap_cur = NULL;
448	sa->fino_cur = NULL;
449	sa->ino_cur = NULL;
450	sa->bno_cur = NULL;
451	sa->cnt_cur = NULL;
452}
453
454/* Initialize all the btree cursors for an AG. */
455int
456xchk_ag_btcur_init(
457	struct xfs_scrub	*sc,
458	struct xchk_ag		*sa)
459{
460	struct xfs_mount	*mp = sc->mp;
461	xfs_agnumber_t		agno = sa->agno;
462
463	xchk_perag_get(sc->mp, sa);
464	if (sa->agf_bp &&
465	    xchk_ag_btree_healthy_enough(sc, sa->pag, XFS_BTNUM_BNO)) {
466		/* Set up a bnobt cursor for cross-referencing. */
467		sa->bno_cur = xfs_allocbt_init_cursor(mp, sc->tp, sa->agf_bp,
468				agno, XFS_BTNUM_BNO);
469		if (!sa->bno_cur)
470			goto err;
471	}
472
473	if (sa->agf_bp &&
474	    xchk_ag_btree_healthy_enough(sc, sa->pag, XFS_BTNUM_CNT)) {
475		/* Set up a cntbt cursor for cross-referencing. */
476		sa->cnt_cur = xfs_allocbt_init_cursor(mp, sc->tp, sa->agf_bp,
477				agno, XFS_BTNUM_CNT);
478		if (!sa->cnt_cur)
479			goto err;
480	}
481
482	/* Set up a inobt cursor for cross-referencing. */
483	if (sa->agi_bp &&
484	    xchk_ag_btree_healthy_enough(sc, sa->pag, XFS_BTNUM_INO)) {
485		sa->ino_cur = xfs_inobt_init_cursor(mp, sc->tp, sa->agi_bp,
486					agno, XFS_BTNUM_INO);
487		if (!sa->ino_cur)
488			goto err;
489	}
490
491	/* Set up a finobt cursor for cross-referencing. */
492	if (sa->agi_bp && xfs_sb_version_hasfinobt(&mp->m_sb) &&
493	    xchk_ag_btree_healthy_enough(sc, sa->pag, XFS_BTNUM_FINO)) {
494		sa->fino_cur = xfs_inobt_init_cursor(mp, sc->tp, sa->agi_bp,
495				agno, XFS_BTNUM_FINO);
496		if (!sa->fino_cur)
497			goto err;
498	}
499
500	/* Set up a rmapbt cursor for cross-referencing. */
501	if (sa->agf_bp && xfs_sb_version_hasrmapbt(&mp->m_sb) &&
502	    xchk_ag_btree_healthy_enough(sc, sa->pag, XFS_BTNUM_RMAP)) {
503		sa->rmap_cur = xfs_rmapbt_init_cursor(mp, sc->tp, sa->agf_bp,
504				agno);
505		if (!sa->rmap_cur)
506			goto err;
507	}
508
509	/* Set up a refcountbt cursor for cross-referencing. */
510	if (sa->agf_bp && xfs_sb_version_hasreflink(&mp->m_sb) &&
511	    xchk_ag_btree_healthy_enough(sc, sa->pag, XFS_BTNUM_REFC)) {
512		sa->refc_cur = xfs_refcountbt_init_cursor(mp, sc->tp,
513				sa->agf_bp, agno);
514		if (!sa->refc_cur)
515			goto err;
516	}
517
518	return 0;
519err:
520	return -ENOMEM;
521}
522
523/* Release the AG header context and btree cursors. */
524void
525xchk_ag_free(
526	struct xfs_scrub	*sc,
527	struct xchk_ag		*sa)
528{
529	xchk_ag_btcur_free(sa);
530	if (sa->agfl_bp) {
531		xfs_trans_brelse(sc->tp, sa->agfl_bp);
532		sa->agfl_bp = NULL;
533	}
534	if (sa->agf_bp) {
535		xfs_trans_brelse(sc->tp, sa->agf_bp);
536		sa->agf_bp = NULL;
537	}
538	if (sa->agi_bp) {
539		xfs_trans_brelse(sc->tp, sa->agi_bp);
540		sa->agi_bp = NULL;
541	}
542	if (sa->pag) {
543		xfs_perag_put(sa->pag);
544		sa->pag = NULL;
545	}
546	sa->agno = NULLAGNUMBER;
547}
548
549/*
550 * For scrub, grab the AGI and the AGF headers, in that order.  Locking
551 * order requires us to get the AGI before the AGF.  We use the
552 * transaction to avoid deadlocking on crosslinked metadata buffers;
553 * either the caller passes one in (bmap scrub) or we have to create a
554 * transaction ourselves.
555 */
556int
557xchk_ag_init(
558	struct xfs_scrub	*sc,
559	xfs_agnumber_t		agno,
560	struct xchk_ag		*sa)
561{
562	int			error;
563
564	sa->agno = agno;
565	error = xchk_ag_read_headers(sc, agno, &sa->agi_bp,
566			&sa->agf_bp, &sa->agfl_bp);
567	if (error)
568		return error;
569
570	return xchk_ag_btcur_init(sc, sa);
571}
572
573/*
574 * Grab the per-ag structure if we haven't already gotten it.  Teardown of the
575 * xchk_ag will release it for us.
576 */
577void
578xchk_perag_get(
579	struct xfs_mount	*mp,
580	struct xchk_ag		*sa)
581{
582	if (!sa->pag)
583		sa->pag = xfs_perag_get(mp, sa->agno);
584}
585
586/* Per-scrubber setup functions */
587
588/*
589 * Grab an empty transaction so that we can re-grab locked buffers if
590 * one of our btrees turns out to be cyclic.
591 *
592 * If we're going to repair something, we need to ask for the largest possible
593 * log reservation so that we can handle the worst case scenario for metadata
594 * updates while rebuilding a metadata item.  We also need to reserve as many
595 * blocks in the head transaction as we think we're going to need to rebuild
596 * the metadata object.
597 */
598int
599xchk_trans_alloc(
600	struct xfs_scrub	*sc,
601	uint			resblks)
602{
603	if (sc->sm->sm_flags & XFS_SCRUB_IFLAG_REPAIR)
604		return xfs_trans_alloc(sc->mp, &M_RES(sc->mp)->tr_itruncate,
605				resblks, 0, 0, &sc->tp);
606
607	return xfs_trans_alloc_empty(sc->mp, &sc->tp);
608}
609
610/* Set us up with a transaction and an empty context. */
611int
612xchk_setup_fs(
613	struct xfs_scrub	*sc,
614	struct xfs_inode	*ip)
615{
616	uint			resblks;
617
618	resblks = xrep_calc_ag_resblks(sc);
619	return xchk_trans_alloc(sc, resblks);
620}
621
622/* Set us up with AG headers and btree cursors. */
623int
624xchk_setup_ag_btree(
625	struct xfs_scrub	*sc,
626	struct xfs_inode	*ip,
627	bool			force_log)
628{
629	struct xfs_mount	*mp = sc->mp;
630	int			error;
631
632	/*
633	 * If the caller asks us to checkpont the log, do so.  This
634	 * expensive operation should be performed infrequently and only
635	 * as a last resort.  Any caller that sets force_log should
636	 * document why they need to do so.
637	 */
638	if (force_log) {
639		error = xchk_checkpoint_log(mp);
640		if (error)
641			return error;
642	}
643
644	error = xchk_setup_fs(sc, ip);
645	if (error)
646		return error;
647
648	return xchk_ag_init(sc, sc->sm->sm_agno, &sc->sa);
649}
650
651/* Push everything out of the log onto disk. */
652int
653xchk_checkpoint_log(
654	struct xfs_mount	*mp)
655{
656	int			error;
657
658	error = xfs_log_force(mp, XFS_LOG_SYNC);
659	if (error)
660		return error;
661	xfs_ail_push_all_sync(mp->m_ail);
662	return 0;
663}
664
665/*
666 * Given an inode and the scrub control structure, grab either the
667 * inode referenced in the control structure or the inode passed in.
668 * The inode is not locked.
669 */
670int
671xchk_get_inode(
672	struct xfs_scrub	*sc,
673	struct xfs_inode	*ip_in)
674{
675	struct xfs_imap		imap;
676	struct xfs_mount	*mp = sc->mp;
677	struct xfs_inode	*ip = NULL;
678	int			error;
679
680	/* We want to scan the inode we already had opened. */
681	if (sc->sm->sm_ino == 0 || sc->sm->sm_ino == ip_in->i_ino) {
682		sc->ip = ip_in;
683		return 0;
684	}
685
686	/* Look up the inode, see if the generation number matches. */
687	if (xfs_internal_inum(mp, sc->sm->sm_ino))
688		return -ENOENT;
689	error = xfs_iget(mp, NULL, sc->sm->sm_ino,
690			XFS_IGET_UNTRUSTED | XFS_IGET_DONTCACHE, 0, &ip);
691	switch (error) {
692	case -ENOENT:
693		/* Inode doesn't exist, just bail out. */
694		return error;
695	case 0:
696		/* Got an inode, continue. */
697		break;
698	case -EINVAL:
699		/*
700		 * -EINVAL with IGET_UNTRUSTED could mean one of several
701		 * things: userspace gave us an inode number that doesn't
702		 * correspond to fs space, or doesn't have an inobt entry;
703		 * or it could simply mean that the inode buffer failed the
704		 * read verifiers.
705		 *
706		 * Try just the inode mapping lookup -- if it succeeds, then
707		 * the inode buffer verifier failed and something needs fixing.
708		 * Otherwise, we really couldn't find it so tell userspace
709		 * that it no longer exists.
710		 */
711		error = xfs_imap(sc->mp, sc->tp, sc->sm->sm_ino, &imap,
712				XFS_IGET_UNTRUSTED | XFS_IGET_DONTCACHE);
713		if (error)
714			return -ENOENT;
715		error = -EFSCORRUPTED;
716		/* fall through */
717	default:
718		trace_xchk_op_error(sc,
719				XFS_INO_TO_AGNO(mp, sc->sm->sm_ino),
720				XFS_INO_TO_AGBNO(mp, sc->sm->sm_ino),
721				error, __return_address);
722		return error;
723	}
724	if (VFS_I(ip)->i_generation != sc->sm->sm_gen) {
725		xfs_irele(ip);
726		return -ENOENT;
727	}
728
729	sc->ip = ip;
730	return 0;
731}
732
733/* Set us up to scrub a file's contents. */
734int
735xchk_setup_inode_contents(
736	struct xfs_scrub	*sc,
737	struct xfs_inode	*ip,
738	unsigned int		resblks)
739{
740	int			error;
 
741
742	error = xchk_get_inode(sc, ip);
743	if (error)
744		return error;
745
746	/* Got the inode, lock it and we're ready to go. */
747	sc->ilock_flags = XFS_IOLOCK_EXCL | XFS_MMAPLOCK_EXCL;
748	xfs_ilock(sc->ip, sc->ilock_flags);
749	error = xchk_trans_alloc(sc, resblks);
750	if (error)
751		goto out;
752	sc->ilock_flags |= XFS_ILOCK_EXCL;
753	xfs_ilock(sc->ip, XFS_ILOCK_EXCL);
754
755out:
756	/* scrub teardown will unlock and release the inode for us */
757	return error;
758}
759
760/*
761 * Predicate that decides if we need to evaluate the cross-reference check.
762 * If there was an error accessing the cross-reference btree, just delete
763 * the cursor and skip the check.
764 */
765bool
766xchk_should_check_xref(
767	struct xfs_scrub	*sc,
768	int			*error,
769	struct xfs_btree_cur	**curpp)
770{
771	/* No point in xref if we already know we're corrupt. */
772	if (xchk_skip_xref(sc->sm))
773		return false;
774
775	if (*error == 0)
776		return true;
777
778	if (curpp) {
779		/* If we've already given up on xref, just bail out. */
780		if (!*curpp)
781			return false;
782
783		/* xref error, delete cursor and bail out. */
784		xfs_btree_del_cursor(*curpp, XFS_BTREE_ERROR);
785		*curpp = NULL;
786	}
787
788	sc->sm->sm_flags |= XFS_SCRUB_OFLAG_XFAIL;
789	trace_xchk_xref_error(sc, *error, __return_address);
790
791	/*
792	 * Errors encountered during cross-referencing with another
793	 * data structure should not cause this scrubber to abort.
794	 */
795	*error = 0;
796	return false;
797}
798
799/* Run the structure verifiers on in-memory buffers to detect bad memory. */
800void
801xchk_buffer_recheck(
802	struct xfs_scrub	*sc,
803	struct xfs_buf		*bp)
804{
805	xfs_failaddr_t		fa;
806
807	if (bp->b_ops == NULL) {
808		xchk_block_set_corrupt(sc, bp);
809		return;
810	}
811	if (bp->b_ops->verify_struct == NULL) {
812		xchk_set_incomplete(sc);
813		return;
814	}
815	fa = bp->b_ops->verify_struct(bp);
816	if (!fa)
817		return;
818	sc->sm->sm_flags |= XFS_SCRUB_OFLAG_CORRUPT;
819	trace_xchk_block_error(sc, bp->b_bn, fa);
820}
821
822/*
823 * Scrub the attr/data forks of a metadata inode.  The metadata inode must be
824 * pointed to by sc->ip and the ILOCK must be held.
825 */
826int
827xchk_metadata_inode_forks(
828	struct xfs_scrub	*sc)
829{
830	__u32			smtype;
831	bool			shared;
832	int			error;
833
834	if (sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT)
835		return 0;
836
837	/* Metadata inodes don't live on the rt device. */
838	if (sc->ip->i_d.di_flags & XFS_DIFLAG_REALTIME) {
839		xchk_ino_set_corrupt(sc, sc->ip->i_ino);
840		return 0;
841	}
842
843	/* They should never participate in reflink. */
844	if (xfs_is_reflink_inode(sc->ip)) {
845		xchk_ino_set_corrupt(sc, sc->ip->i_ino);
846		return 0;
847	}
848
849	/* They also should never have extended attributes. */
850	if (xfs_inode_hasattr(sc->ip)) {
851		xchk_ino_set_corrupt(sc, sc->ip->i_ino);
852		return 0;
853	}
854
855	/* Invoke the data fork scrubber. */
856	smtype = sc->sm->sm_type;
857	sc->sm->sm_type = XFS_SCRUB_TYPE_BMBTD;
858	error = xchk_bmap_data(sc);
859	sc->sm->sm_type = smtype;
860	if (error || (sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT))
861		return error;
862
863	/* Look for incorrect shared blocks. */
864	if (xfs_sb_version_hasreflink(&sc->mp->m_sb)) {
865		error = xfs_reflink_inode_has_shared_extents(sc->tp, sc->ip,
866				&shared);
867		if (!xchk_fblock_process_error(sc, XFS_DATA_FORK, 0,
868				&error))
869			return error;
870		if (shared)
871			xchk_ino_set_corrupt(sc, sc->ip->i_ino);
872	}
873
874	return error;
875}
876
877/*
878 * Try to lock an inode in violation of the usual locking order rules.  For
879 * example, trying to get the IOLOCK while in transaction context, or just
880 * plain breaking AG-order or inode-order inode locking rules.  Either way,
881 * the only way to avoid an ABBA deadlock is to use trylock and back off if
882 * we can't.
883 */
884int
885xchk_ilock_inverted(
886	struct xfs_inode	*ip,
887	uint			lock_mode)
888{
889	int			i;
890
891	for (i = 0; i < 20; i++) {
892		if (xfs_ilock_nowait(ip, lock_mode))
893			return 0;
894		delay(1);
895	}
896	return -EDEADLOCK;
897}
898
899/* Pause background reaping of resources. */
900void
901xchk_stop_reaping(
902	struct xfs_scrub	*sc)
903{
904	sc->flags |= XCHK_REAPING_DISABLED;
905	xfs_stop_block_reaping(sc->mp);
906}
907
908/* Restart background reaping of resources. */
909void
910xchk_start_reaping(
911	struct xfs_scrub	*sc)
912{
913	xfs_start_block_reaping(sc->mp);
914	sc->flags &= ~XCHK_REAPING_DISABLED;
915}
v4.17
 
  1/*
  2 * Copyright (C) 2017 Oracle.  All Rights Reserved.
  3 *
  4 * Author: Darrick J. Wong <darrick.wong@oracle.com>
  5 *
  6 * This program is free software; you can redistribute it and/or
  7 * modify it under the terms of the GNU General Public License
  8 * as published by the Free Software Foundation; either version 2
  9 * of the License, or (at your option) any later version.
 10 *
 11 * This program is distributed in the hope that it would be useful,
 12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
 13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 14 * GNU General Public License for more details.
 15 *
 16 * You should have received a copy of the GNU General Public License
 17 * along with this program; if not, write the Free Software Foundation,
 18 * Inc.,  51 Franklin St, Fifth Floor, Boston, MA  02110-1301, USA.
 19 */
 20#include "xfs.h"
 21#include "xfs_fs.h"
 22#include "xfs_shared.h"
 23#include "xfs_format.h"
 24#include "xfs_trans_resv.h"
 25#include "xfs_mount.h"
 26#include "xfs_defer.h"
 27#include "xfs_btree.h"
 28#include "xfs_bit.h"
 29#include "xfs_log_format.h"
 30#include "xfs_trans.h"
 31#include "xfs_sb.h"
 32#include "xfs_inode.h"
 33#include "xfs_icache.h"
 34#include "xfs_itable.h"
 35#include "xfs_alloc.h"
 36#include "xfs_alloc_btree.h"
 37#include "xfs_bmap.h"
 38#include "xfs_bmap_btree.h"
 39#include "xfs_ialloc.h"
 40#include "xfs_ialloc_btree.h"
 41#include "xfs_refcount.h"
 42#include "xfs_refcount_btree.h"
 43#include "xfs_rmap.h"
 44#include "xfs_rmap_btree.h"
 45#include "xfs_log.h"
 46#include "xfs_trans_priv.h"
 47#include "scrub/xfs_scrub.h"
 
 48#include "scrub/scrub.h"
 49#include "scrub/common.h"
 50#include "scrub/trace.h"
 51#include "scrub/btree.h"
 
 52
 53/* Common code for the metadata scrubbers. */
 54
 55/*
 56 * Handling operational errors.
 57 *
 58 * The *_process_error() family of functions are used to process error return
 59 * codes from functions called as part of a scrub operation.
 60 *
 61 * If there's no error, we return true to tell the caller that it's ok
 62 * to move on to the next check in its list.
 63 *
 64 * For non-verifier errors (e.g. ENOMEM) we return false to tell the
 65 * caller that something bad happened, and we preserve *error so that
 66 * the caller can return the *error up the stack to userspace.
 67 *
 68 * Verifier errors (EFSBADCRC/EFSCORRUPTED) are recorded by setting
 69 * OFLAG_CORRUPT in sm_flags and the *error is cleared.  In other words,
 70 * we track verifier errors (and failed scrub checks) via OFLAG_CORRUPT,
 71 * not via return codes.  We return false to tell the caller that
 72 * something bad happened.  Since the error has been cleared, the caller
 73 * will (presumably) return that zero and scrubbing will move on to
 74 * whatever's next.
 75 *
 76 * ftrace can be used to record the precise metadata location and the
 77 * approximate code location of the failed operation.
 78 */
 79
 80/* Check for operational errors. */
 81static bool
 82__xfs_scrub_process_error(
 83	struct xfs_scrub_context	*sc,
 84	xfs_agnumber_t			agno,
 85	xfs_agblock_t			bno,
 86	int				*error,
 87	__u32				errflag,
 88	void				*ret_ip)
 89{
 90	switch (*error) {
 91	case 0:
 92		return true;
 93	case -EDEADLOCK:
 94		/* Used to restart an op with deadlock avoidance. */
 95		trace_xfs_scrub_deadlock_retry(sc->ip, sc->sm, *error);
 96		break;
 97	case -EFSBADCRC:
 98	case -EFSCORRUPTED:
 99		/* Note the badness but don't abort. */
100		sc->sm->sm_flags |= errflag;
101		*error = 0;
102		/* fall through */
103	default:
104		trace_xfs_scrub_op_error(sc, agno, bno, *error,
105				ret_ip);
106		break;
107	}
108	return false;
109}
110
111bool
112xfs_scrub_process_error(
113	struct xfs_scrub_context	*sc,
114	xfs_agnumber_t			agno,
115	xfs_agblock_t			bno,
116	int				*error)
117{
118	return __xfs_scrub_process_error(sc, agno, bno, error,
119			XFS_SCRUB_OFLAG_CORRUPT, __return_address);
120}
121
122bool
123xfs_scrub_xref_process_error(
124	struct xfs_scrub_context	*sc,
125	xfs_agnumber_t			agno,
126	xfs_agblock_t			bno,
127	int				*error)
128{
129	return __xfs_scrub_process_error(sc, agno, bno, error,
130			XFS_SCRUB_OFLAG_XFAIL, __return_address);
131}
132
133/* Check for operational errors for a file offset. */
134static bool
135__xfs_scrub_fblock_process_error(
136	struct xfs_scrub_context	*sc,
137	int				whichfork,
138	xfs_fileoff_t			offset,
139	int				*error,
140	__u32				errflag,
141	void				*ret_ip)
142{
143	switch (*error) {
144	case 0:
145		return true;
146	case -EDEADLOCK:
147		/* Used to restart an op with deadlock avoidance. */
148		trace_xfs_scrub_deadlock_retry(sc->ip, sc->sm, *error);
149		break;
150	case -EFSBADCRC:
151	case -EFSCORRUPTED:
152		/* Note the badness but don't abort. */
153		sc->sm->sm_flags |= errflag;
154		*error = 0;
155		/* fall through */
156	default:
157		trace_xfs_scrub_file_op_error(sc, whichfork, offset, *error,
158				ret_ip);
159		break;
160	}
161	return false;
162}
163
164bool
165xfs_scrub_fblock_process_error(
166	struct xfs_scrub_context	*sc,
167	int				whichfork,
168	xfs_fileoff_t			offset,
169	int				*error)
170{
171	return __xfs_scrub_fblock_process_error(sc, whichfork, offset, error,
172			XFS_SCRUB_OFLAG_CORRUPT, __return_address);
173}
174
175bool
176xfs_scrub_fblock_xref_process_error(
177	struct xfs_scrub_context	*sc,
178	int				whichfork,
179	xfs_fileoff_t			offset,
180	int				*error)
181{
182	return __xfs_scrub_fblock_process_error(sc, whichfork, offset, error,
183			XFS_SCRUB_OFLAG_XFAIL, __return_address);
184}
185
186/*
187 * Handling scrub corruption/optimization/warning checks.
188 *
189 * The *_set_{corrupt,preen,warning}() family of functions are used to
190 * record the presence of metadata that is incorrect (corrupt), could be
191 * optimized somehow (preen), or should be flagged for administrative
192 * review but is not incorrect (warn).
193 *
194 * ftrace can be used to record the precise metadata location and
195 * approximate code location of the failed check.
196 */
197
198/* Record a block which could be optimized. */
199void
200xfs_scrub_block_set_preen(
201	struct xfs_scrub_context	*sc,
202	struct xfs_buf			*bp)
203{
204	sc->sm->sm_flags |= XFS_SCRUB_OFLAG_PREEN;
205	trace_xfs_scrub_block_preen(sc, bp->b_bn, __return_address);
206}
207
208/*
209 * Record an inode which could be optimized.  The trace data will
210 * include the block given by bp if bp is given; otherwise it will use
211 * the block location of the inode record itself.
212 */
213void
214xfs_scrub_ino_set_preen(
215	struct xfs_scrub_context	*sc,
216	xfs_ino_t			ino)
217{
218	sc->sm->sm_flags |= XFS_SCRUB_OFLAG_PREEN;
219	trace_xfs_scrub_ino_preen(sc, ino, __return_address);
 
 
 
 
 
 
 
 
 
220}
221
222/* Record a corrupt block. */
223void
224xfs_scrub_block_set_corrupt(
225	struct xfs_scrub_context	*sc,
226	struct xfs_buf			*bp)
227{
228	sc->sm->sm_flags |= XFS_SCRUB_OFLAG_CORRUPT;
229	trace_xfs_scrub_block_error(sc, bp->b_bn, __return_address);
230}
231
232/* Record a corruption while cross-referencing. */
233void
234xfs_scrub_block_xref_set_corrupt(
235	struct xfs_scrub_context	*sc,
236	struct xfs_buf			*bp)
237{
238	sc->sm->sm_flags |= XFS_SCRUB_OFLAG_XCORRUPT;
239	trace_xfs_scrub_block_error(sc, bp->b_bn, __return_address);
240}
241
242/*
243 * Record a corrupt inode.  The trace data will include the block given
244 * by bp if bp is given; otherwise it will use the block location of the
245 * inode record itself.
246 */
247void
248xfs_scrub_ino_set_corrupt(
249	struct xfs_scrub_context	*sc,
250	xfs_ino_t			ino)
251{
252	sc->sm->sm_flags |= XFS_SCRUB_OFLAG_CORRUPT;
253	trace_xfs_scrub_ino_error(sc, ino, __return_address);
254}
255
256/* Record a corruption while cross-referencing with an inode. */
257void
258xfs_scrub_ino_xref_set_corrupt(
259	struct xfs_scrub_context	*sc,
260	xfs_ino_t			ino)
261{
262	sc->sm->sm_flags |= XFS_SCRUB_OFLAG_XCORRUPT;
263	trace_xfs_scrub_ino_error(sc, ino, __return_address);
264}
265
266/* Record corruption in a block indexed by a file fork. */
267void
268xfs_scrub_fblock_set_corrupt(
269	struct xfs_scrub_context	*sc,
270	int				whichfork,
271	xfs_fileoff_t			offset)
272{
273	sc->sm->sm_flags |= XFS_SCRUB_OFLAG_CORRUPT;
274	trace_xfs_scrub_fblock_error(sc, whichfork, offset, __return_address);
275}
276
277/* Record a corruption while cross-referencing a fork block. */
278void
279xfs_scrub_fblock_xref_set_corrupt(
280	struct xfs_scrub_context	*sc,
281	int				whichfork,
282	xfs_fileoff_t			offset)
283{
284	sc->sm->sm_flags |= XFS_SCRUB_OFLAG_XCORRUPT;
285	trace_xfs_scrub_fblock_error(sc, whichfork, offset, __return_address);
286}
287
288/*
289 * Warn about inodes that need administrative review but is not
290 * incorrect.
291 */
292void
293xfs_scrub_ino_set_warning(
294	struct xfs_scrub_context	*sc,
295	xfs_ino_t			ino)
296{
297	sc->sm->sm_flags |= XFS_SCRUB_OFLAG_WARNING;
298	trace_xfs_scrub_ino_warning(sc, ino, __return_address);
299}
300
301/* Warn about a block indexed by a file fork that needs review. */
302void
303xfs_scrub_fblock_set_warning(
304	struct xfs_scrub_context	*sc,
305	int				whichfork,
306	xfs_fileoff_t			offset)
307{
308	sc->sm->sm_flags |= XFS_SCRUB_OFLAG_WARNING;
309	trace_xfs_scrub_fblock_warning(sc, whichfork, offset, __return_address);
310}
311
312/* Signal an incomplete scrub. */
313void
314xfs_scrub_set_incomplete(
315	struct xfs_scrub_context	*sc)
316{
317	sc->sm->sm_flags |= XFS_SCRUB_OFLAG_INCOMPLETE;
318	trace_xfs_scrub_incomplete(sc, __return_address);
319}
320
321/*
322 * rmap scrubbing -- compute the number of blocks with a given owner,
323 * at least according to the reverse mapping data.
324 */
325
326struct xfs_scrub_rmap_ownedby_info {
327	struct xfs_owner_info	*oinfo;
328	xfs_filblks_t		*blocks;
329};
330
331STATIC int
332xfs_scrub_count_rmap_ownedby_irec(
333	struct xfs_btree_cur			*cur,
334	struct xfs_rmap_irec			*rec,
335	void					*priv)
336{
337	struct xfs_scrub_rmap_ownedby_info	*sroi = priv;
338	bool					irec_attr;
339	bool					oinfo_attr;
340
341	irec_attr = rec->rm_flags & XFS_RMAP_ATTR_FORK;
342	oinfo_attr = sroi->oinfo->oi_flags & XFS_OWNER_INFO_ATTR_FORK;
343
344	if (rec->rm_owner != sroi->oinfo->oi_owner)
345		return 0;
346
347	if (XFS_RMAP_NON_INODE_OWNER(rec->rm_owner) || irec_attr == oinfo_attr)
348		(*sroi->blocks) += rec->rm_blockcount;
349
350	return 0;
351}
352
353/*
354 * Calculate the number of blocks the rmap thinks are owned by something.
355 * The caller should pass us an rmapbt cursor.
356 */
357int
358xfs_scrub_count_rmap_ownedby_ag(
359	struct xfs_scrub_context		*sc,
360	struct xfs_btree_cur			*cur,
361	struct xfs_owner_info			*oinfo,
362	xfs_filblks_t				*blocks)
363{
364	struct xfs_scrub_rmap_ownedby_info	sroi;
 
 
 
365
366	sroi.oinfo = oinfo;
367	*blocks = 0;
368	sroi.blocks = blocks;
369
370	return xfs_rmap_query_all(cur, xfs_scrub_count_rmap_ownedby_irec,
371			&sroi);
372}
373
374/*
375 * AG scrubbing
376 *
377 * These helpers facilitate locking an allocation group's header
378 * buffers, setting up cursors for all btrees that are present, and
379 * cleaning everything up once we're through.
380 */
381
382/* Decide if we want to return an AG header read failure. */
383static inline bool
384want_ag_read_header_failure(
385	struct xfs_scrub_context	*sc,
386	unsigned int			type)
387{
388	/* Return all AG header read failures when scanning btrees. */
389	if (sc->sm->sm_type != XFS_SCRUB_TYPE_AGF &&
390	    sc->sm->sm_type != XFS_SCRUB_TYPE_AGFL &&
391	    sc->sm->sm_type != XFS_SCRUB_TYPE_AGI)
392		return true;
393	/*
394	 * If we're scanning a given type of AG header, we only want to
395	 * see read failures from that specific header.  We'd like the
396	 * other headers to cross-check them, but this isn't required.
397	 */
398	if (sc->sm->sm_type == type)
399		return true;
400	return false;
401}
402
403/*
404 * Grab all the headers for an AG.
405 *
406 * The headers should be released by xfs_scrub_ag_free, but as a fail
407 * safe we attach all the buffers we grab to the scrub transaction so
408 * they'll all be freed when we cancel it.
409 */
410int
411xfs_scrub_ag_read_headers(
412	struct xfs_scrub_context	*sc,
413	xfs_agnumber_t			agno,
414	struct xfs_buf			**agi,
415	struct xfs_buf			**agf,
416	struct xfs_buf			**agfl)
417{
418	struct xfs_mount		*mp = sc->mp;
419	int				error;
420
421	error = xfs_ialloc_read_agi(mp, sc->tp, agno, agi);
422	if (error && want_ag_read_header_failure(sc, XFS_SCRUB_TYPE_AGI))
423		goto out;
424
425	error = xfs_alloc_read_agf(mp, sc->tp, agno, 0, agf);
426	if (error && want_ag_read_header_failure(sc, XFS_SCRUB_TYPE_AGF))
427		goto out;
428
429	error = xfs_alloc_read_agfl(mp, sc->tp, agno, agfl);
430	if (error && want_ag_read_header_failure(sc, XFS_SCRUB_TYPE_AGFL))
431		goto out;
432	error = 0;
433out:
434	return error;
435}
436
437/* Release all the AG btree cursors. */
438void
439xfs_scrub_ag_btcur_free(
440	struct xfs_scrub_ag		*sa)
441{
442	if (sa->refc_cur)
443		xfs_btree_del_cursor(sa->refc_cur, XFS_BTREE_ERROR);
444	if (sa->rmap_cur)
445		xfs_btree_del_cursor(sa->rmap_cur, XFS_BTREE_ERROR);
446	if (sa->fino_cur)
447		xfs_btree_del_cursor(sa->fino_cur, XFS_BTREE_ERROR);
448	if (sa->ino_cur)
449		xfs_btree_del_cursor(sa->ino_cur, XFS_BTREE_ERROR);
450	if (sa->cnt_cur)
451		xfs_btree_del_cursor(sa->cnt_cur, XFS_BTREE_ERROR);
452	if (sa->bno_cur)
453		xfs_btree_del_cursor(sa->bno_cur, XFS_BTREE_ERROR);
454
455	sa->refc_cur = NULL;
456	sa->rmap_cur = NULL;
457	sa->fino_cur = NULL;
458	sa->ino_cur = NULL;
459	sa->bno_cur = NULL;
460	sa->cnt_cur = NULL;
461}
462
463/* Initialize all the btree cursors for an AG. */
464int
465xfs_scrub_ag_btcur_init(
466	struct xfs_scrub_context	*sc,
467	struct xfs_scrub_ag		*sa)
468{
469	struct xfs_mount		*mp = sc->mp;
470	xfs_agnumber_t			agno = sa->agno;
471
472	if (sa->agf_bp) {
 
 
473		/* Set up a bnobt cursor for cross-referencing. */
474		sa->bno_cur = xfs_allocbt_init_cursor(mp, sc->tp, sa->agf_bp,
475				agno, XFS_BTNUM_BNO);
476		if (!sa->bno_cur)
477			goto err;
 
478
 
 
479		/* Set up a cntbt cursor for cross-referencing. */
480		sa->cnt_cur = xfs_allocbt_init_cursor(mp, sc->tp, sa->agf_bp,
481				agno, XFS_BTNUM_CNT);
482		if (!sa->cnt_cur)
483			goto err;
484	}
485
486	/* Set up a inobt cursor for cross-referencing. */
487	if (sa->agi_bp) {
 
488		sa->ino_cur = xfs_inobt_init_cursor(mp, sc->tp, sa->agi_bp,
489					agno, XFS_BTNUM_INO);
490		if (!sa->ino_cur)
491			goto err;
492	}
493
494	/* Set up a finobt cursor for cross-referencing. */
495	if (sa->agi_bp && xfs_sb_version_hasfinobt(&mp->m_sb)) {
 
496		sa->fino_cur = xfs_inobt_init_cursor(mp, sc->tp, sa->agi_bp,
497				agno, XFS_BTNUM_FINO);
498		if (!sa->fino_cur)
499			goto err;
500	}
501
502	/* Set up a rmapbt cursor for cross-referencing. */
503	if (sa->agf_bp && xfs_sb_version_hasrmapbt(&mp->m_sb)) {
 
504		sa->rmap_cur = xfs_rmapbt_init_cursor(mp, sc->tp, sa->agf_bp,
505				agno);
506		if (!sa->rmap_cur)
507			goto err;
508	}
509
510	/* Set up a refcountbt cursor for cross-referencing. */
511	if (sa->agf_bp && xfs_sb_version_hasreflink(&mp->m_sb)) {
 
512		sa->refc_cur = xfs_refcountbt_init_cursor(mp, sc->tp,
513				sa->agf_bp, agno, NULL);
514		if (!sa->refc_cur)
515			goto err;
516	}
517
518	return 0;
519err:
520	return -ENOMEM;
521}
522
523/* Release the AG header context and btree cursors. */
524void
525xfs_scrub_ag_free(
526	struct xfs_scrub_context	*sc,
527	struct xfs_scrub_ag		*sa)
528{
529	xfs_scrub_ag_btcur_free(sa);
530	if (sa->agfl_bp) {
531		xfs_trans_brelse(sc->tp, sa->agfl_bp);
532		sa->agfl_bp = NULL;
533	}
534	if (sa->agf_bp) {
535		xfs_trans_brelse(sc->tp, sa->agf_bp);
536		sa->agf_bp = NULL;
537	}
538	if (sa->agi_bp) {
539		xfs_trans_brelse(sc->tp, sa->agi_bp);
540		sa->agi_bp = NULL;
541	}
 
 
 
 
542	sa->agno = NULLAGNUMBER;
543}
544
545/*
546 * For scrub, grab the AGI and the AGF headers, in that order.  Locking
547 * order requires us to get the AGI before the AGF.  We use the
548 * transaction to avoid deadlocking on crosslinked metadata buffers;
549 * either the caller passes one in (bmap scrub) or we have to create a
550 * transaction ourselves.
551 */
552int
553xfs_scrub_ag_init(
554	struct xfs_scrub_context	*sc,
555	xfs_agnumber_t			agno,
556	struct xfs_scrub_ag		*sa)
557{
558	int				error;
559
560	sa->agno = agno;
561	error = xfs_scrub_ag_read_headers(sc, agno, &sa->agi_bp,
562			&sa->agf_bp, &sa->agfl_bp);
563	if (error)
564		return error;
565
566	return xfs_scrub_ag_btcur_init(sc, sa);
 
 
 
 
 
 
 
 
 
 
 
 
 
567}
568
569/* Per-scrubber setup functions */
570
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
571/* Set us up with a transaction and an empty context. */
572int
573xfs_scrub_setup_fs(
574	struct xfs_scrub_context	*sc,
575	struct xfs_inode		*ip)
576{
577	return xfs_scrub_trans_alloc(sc->sm, sc->mp, &sc->tp);
 
 
 
578}
579
580/* Set us up with AG headers and btree cursors. */
581int
582xfs_scrub_setup_ag_btree(
583	struct xfs_scrub_context	*sc,
584	struct xfs_inode		*ip,
585	bool				force_log)
586{
587	struct xfs_mount		*mp = sc->mp;
588	int				error;
589
590	/*
591	 * If the caller asks us to checkpont the log, do so.  This
592	 * expensive operation should be performed infrequently and only
593	 * as a last resort.  Any caller that sets force_log should
594	 * document why they need to do so.
595	 */
596	if (force_log) {
597		error = xfs_scrub_checkpoint_log(mp);
598		if (error)
599			return error;
600	}
601
602	error = xfs_scrub_setup_fs(sc, ip);
603	if (error)
604		return error;
605
606	return xfs_scrub_ag_init(sc, sc->sm->sm_agno, &sc->sa);
607}
608
609/* Push everything out of the log onto disk. */
610int
611xfs_scrub_checkpoint_log(
612	struct xfs_mount	*mp)
613{
614	int			error;
615
616	error = xfs_log_force(mp, XFS_LOG_SYNC);
617	if (error)
618		return error;
619	xfs_ail_push_all_sync(mp->m_ail);
620	return 0;
621}
622
623/*
624 * Given an inode and the scrub control structure, grab either the
625 * inode referenced in the control structure or the inode passed in.
626 * The inode is not locked.
627 */
628int
629xfs_scrub_get_inode(
630	struct xfs_scrub_context	*sc,
631	struct xfs_inode		*ip_in)
632{
633	struct xfs_imap			imap;
634	struct xfs_mount		*mp = sc->mp;
635	struct xfs_inode		*ip = NULL;
636	int				error;
637
638	/* We want to scan the inode we already had opened. */
639	if (sc->sm->sm_ino == 0 || sc->sm->sm_ino == ip_in->i_ino) {
640		sc->ip = ip_in;
641		return 0;
642	}
643
644	/* Look up the inode, see if the generation number matches. */
645	if (xfs_internal_inum(mp, sc->sm->sm_ino))
646		return -ENOENT;
647	error = xfs_iget(mp, NULL, sc->sm->sm_ino,
648			XFS_IGET_UNTRUSTED | XFS_IGET_DONTCACHE, 0, &ip);
649	switch (error) {
650	case -ENOENT:
651		/* Inode doesn't exist, just bail out. */
652		return error;
653	case 0:
654		/* Got an inode, continue. */
655		break;
656	case -EINVAL:
657		/*
658		 * -EINVAL with IGET_UNTRUSTED could mean one of several
659		 * things: userspace gave us an inode number that doesn't
660		 * correspond to fs space, or doesn't have an inobt entry;
661		 * or it could simply mean that the inode buffer failed the
662		 * read verifiers.
663		 *
664		 * Try just the inode mapping lookup -- if it succeeds, then
665		 * the inode buffer verifier failed and something needs fixing.
666		 * Otherwise, we really couldn't find it so tell userspace
667		 * that it no longer exists.
668		 */
669		error = xfs_imap(sc->mp, sc->tp, sc->sm->sm_ino, &imap,
670				XFS_IGET_UNTRUSTED | XFS_IGET_DONTCACHE);
671		if (error)
672			return -ENOENT;
673		error = -EFSCORRUPTED;
674		/* fall through */
675	default:
676		trace_xfs_scrub_op_error(sc,
677				XFS_INO_TO_AGNO(mp, sc->sm->sm_ino),
678				XFS_INO_TO_AGBNO(mp, sc->sm->sm_ino),
679				error, __return_address);
680		return error;
681	}
682	if (VFS_I(ip)->i_generation != sc->sm->sm_gen) {
683		iput(VFS_I(ip));
684		return -ENOENT;
685	}
686
687	sc->ip = ip;
688	return 0;
689}
690
691/* Set us up to scrub a file's contents. */
692int
693xfs_scrub_setup_inode_contents(
694	struct xfs_scrub_context	*sc,
695	struct xfs_inode		*ip,
696	unsigned int			resblks)
697{
698	struct xfs_mount		*mp = sc->mp;
699	int				error;
700
701	error = xfs_scrub_get_inode(sc, ip);
702	if (error)
703		return error;
704
705	/* Got the inode, lock it and we're ready to go. */
706	sc->ilock_flags = XFS_IOLOCK_EXCL | XFS_MMAPLOCK_EXCL;
707	xfs_ilock(sc->ip, sc->ilock_flags);
708	error = xfs_scrub_trans_alloc(sc->sm, mp, &sc->tp);
709	if (error)
710		goto out;
711	sc->ilock_flags |= XFS_ILOCK_EXCL;
712	xfs_ilock(sc->ip, XFS_ILOCK_EXCL);
713
714out:
715	/* scrub teardown will unlock and release the inode for us */
716	return error;
717}
718
719/*
720 * Predicate that decides if we need to evaluate the cross-reference check.
721 * If there was an error accessing the cross-reference btree, just delete
722 * the cursor and skip the check.
723 */
724bool
725xfs_scrub_should_check_xref(
726	struct xfs_scrub_context	*sc,
727	int				*error,
728	struct xfs_btree_cur		**curpp)
729{
 
 
 
 
730	if (*error == 0)
731		return true;
732
733	if (curpp) {
734		/* If we've already given up on xref, just bail out. */
735		if (!*curpp)
736			return false;
737
738		/* xref error, delete cursor and bail out. */
739		xfs_btree_del_cursor(*curpp, XFS_BTREE_ERROR);
740		*curpp = NULL;
741	}
742
743	sc->sm->sm_flags |= XFS_SCRUB_OFLAG_XFAIL;
744	trace_xfs_scrub_xref_error(sc, *error, __return_address);
745
746	/*
747	 * Errors encountered during cross-referencing with another
748	 * data structure should not cause this scrubber to abort.
749	 */
750	*error = 0;
751	return false;
752}
753
754/* Run the structure verifiers on in-memory buffers to detect bad memory. */
755void
756xfs_scrub_buffer_recheck(
757	struct xfs_scrub_context	*sc,
758	struct xfs_buf			*bp)
759{
760	xfs_failaddr_t			fa;
761
762	if (bp->b_ops == NULL) {
763		xfs_scrub_block_set_corrupt(sc, bp);
764		return;
765	}
766	if (bp->b_ops->verify_struct == NULL) {
767		xfs_scrub_set_incomplete(sc);
768		return;
769	}
770	fa = bp->b_ops->verify_struct(bp);
771	if (!fa)
772		return;
773	sc->sm->sm_flags |= XFS_SCRUB_OFLAG_CORRUPT;
774	trace_xfs_scrub_block_error(sc, bp->b_bn, fa);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
775}