Linux Audio

Check our new training course

Loading...
Note: File does not exist in v4.10.11.
  1// SPDX-License-Identifier: GPL-2.0-or-later
  2/*
  3 * Copyright (C) 2017-2023 Oracle.  All Rights Reserved.
  4 * Author: Darrick J. Wong <djwong@kernel.org>
  5 */
  6#ifndef __XFS_SCRUB_SCRUB_H__
  7#define __XFS_SCRUB_SCRUB_H__
  8
  9struct xfs_scrub;
 10
 11/*
 12 * Standard flags for allocating memory within scrub.  NOFS context is
 13 * configured by the process allocation scope.  Scrub and repair must be able
 14 * to back out gracefully if there isn't enough memory.  Force-cast to avoid
 15 * complaints from static checkers.
 16 */
 17#define XCHK_GFP_FLAGS	((__force gfp_t)(GFP_KERNEL | __GFP_NOWARN | \
 18					 __GFP_RETRY_MAYFAIL))
 19
 20/* Type info and names for the scrub types. */
 21enum xchk_type {
 22	ST_NONE = 1,	/* disabled */
 23	ST_PERAG,	/* per-AG metadata */
 24	ST_FS,		/* per-FS metadata */
 25	ST_INODE,	/* per-inode metadata */
 26};
 27
 28struct xchk_meta_ops {
 29	/* Acquire whatever resources are needed for the operation. */
 30	int		(*setup)(struct xfs_scrub *sc);
 31
 32	/* Examine metadata for errors. */
 33	int		(*scrub)(struct xfs_scrub *);
 34
 35	/* Repair or optimize the metadata. */
 36	int		(*repair)(struct xfs_scrub *);
 37
 38	/*
 39	 * Re-scrub the metadata we repaired, in case there's extra work that
 40	 * we need to do to check our repair work.  If this is NULL, we'll use
 41	 * the ->scrub function pointer, assuming that the regular scrub is
 42	 * sufficient.
 43	 */
 44	int		(*repair_eval)(struct xfs_scrub *sc);
 45
 46	/* Decide if we even have this piece of metadata. */
 47	bool		(*has)(struct xfs_mount *);
 48
 49	/* type describing required/allowed inputs */
 50	enum xchk_type	type;
 51};
 52
 53/* Buffer pointers and btree cursors for an entire AG. */
 54struct xchk_ag {
 55	struct xfs_perag	*pag;
 56
 57	/* AG btree roots */
 58	struct xfs_buf		*agf_bp;
 59	struct xfs_buf		*agi_bp;
 60
 61	/* AG btrees */
 62	struct xfs_btree_cur	*bno_cur;
 63	struct xfs_btree_cur	*cnt_cur;
 64	struct xfs_btree_cur	*ino_cur;
 65	struct xfs_btree_cur	*fino_cur;
 66	struct xfs_btree_cur	*rmap_cur;
 67	struct xfs_btree_cur	*refc_cur;
 68};
 69
 70struct xfs_scrub {
 71	/* General scrub state. */
 72	struct xfs_mount		*mp;
 73	struct xfs_scrub_metadata	*sm;
 74	const struct xchk_meta_ops	*ops;
 75	struct xfs_trans		*tp;
 76
 77	/* File that scrub was called with. */
 78	struct file			*file;
 79
 80	/*
 81	 * File that is undergoing the scrub operation.  This can differ from
 82	 * the file that scrub was called with if we're checking file-based fs
 83	 * metadata (e.g. rt bitmaps) or if we're doing a scrub-by-handle for
 84	 * something that can't be opened directly (e.g. symlinks).
 85	 */
 86	struct xfs_inode		*ip;
 87
 88	/* Kernel memory buffer used by scrubbers; freed at teardown. */
 89	void				*buf;
 90
 91	/*
 92	 * Clean up resources owned by whatever is in the buffer.  Cleanup can
 93	 * be deferred with this hook as a means for scrub functions to pass
 94	 * data to repair functions.  This function must not free the buffer
 95	 * itself.
 96	 */
 97	void				(*buf_cleanup)(void *buf);
 98
 99	/* xfile used by the scrubbers; freed at teardown. */
100	struct xfile			*xfile;
101
102	/* buffer target for in-memory btrees; also freed at teardown. */
103	struct xfs_buftarg		*xmbtp;
104
105	/* Lock flags for @ip. */
106	uint				ilock_flags;
107
108	/* See the XCHK/XREP state flags below. */
109	unsigned int			flags;
110
111	/*
112	 * The XFS_SICK_* flags that correspond to the metadata being scrubbed
113	 * or repaired.  We will use this mask to update the in-core fs health
114	 * status with whatever we find.
115	 */
116	unsigned int			sick_mask;
117
118	/* State tracking for single-AG operations. */
119	struct xchk_ag			sa;
120};
121
122/* XCHK state flags grow up from zero, XREP state flags grown down from 2^31 */
123#define XCHK_TRY_HARDER		(1U << 0)  /* can't get resources, try again */
124#define XCHK_HAVE_FREEZE_PROT	(1U << 1)  /* do we have freeze protection? */
125#define XCHK_FSGATES_DRAIN	(1U << 2)  /* defer ops draining enabled */
126#define XCHK_NEED_DRAIN		(1U << 3)  /* scrub needs to drain defer ops */
127#define XCHK_FSGATES_QUOTA	(1U << 4)  /* quota live update enabled */
128#define XCHK_FSGATES_DIRENTS	(1U << 5)  /* directory live update enabled */
129#define XCHK_FSGATES_RMAP	(1U << 6)  /* rmapbt live update enabled */
130#define XREP_RESET_PERAG_RESV	(1U << 30) /* must reset AG space reservation */
131#define XREP_ALREADY_FIXED	(1U << 31) /* checking our repair work */
132
133/*
134 * The XCHK_FSGATES* flags reflect functionality in the main filesystem that
135 * are only enabled for this particular online fsck.  When not in use, the
136 * features are gated off via dynamic code patching, which is why the state
137 * must be enabled during scrub setup and can only be torn down afterwards.
138 */
139#define XCHK_FSGATES_ALL	(XCHK_FSGATES_DRAIN | \
140				 XCHK_FSGATES_QUOTA | \
141				 XCHK_FSGATES_DIRENTS | \
142				 XCHK_FSGATES_RMAP)
143
144/* Metadata scrubbers */
145int xchk_tester(struct xfs_scrub *sc);
146int xchk_superblock(struct xfs_scrub *sc);
147int xchk_agf(struct xfs_scrub *sc);
148int xchk_agfl(struct xfs_scrub *sc);
149int xchk_agi(struct xfs_scrub *sc);
150int xchk_allocbt(struct xfs_scrub *sc);
151int xchk_iallocbt(struct xfs_scrub *sc);
152int xchk_rmapbt(struct xfs_scrub *sc);
153int xchk_refcountbt(struct xfs_scrub *sc);
154int xchk_inode(struct xfs_scrub *sc);
155int xchk_bmap_data(struct xfs_scrub *sc);
156int xchk_bmap_attr(struct xfs_scrub *sc);
157int xchk_bmap_cow(struct xfs_scrub *sc);
158int xchk_directory(struct xfs_scrub *sc);
159int xchk_xattr(struct xfs_scrub *sc);
160int xchk_symlink(struct xfs_scrub *sc);
161int xchk_parent(struct xfs_scrub *sc);
162#ifdef CONFIG_XFS_RT
163int xchk_rtbitmap(struct xfs_scrub *sc);
164int xchk_rtsummary(struct xfs_scrub *sc);
165#else
166static inline int
167xchk_rtbitmap(struct xfs_scrub *sc)
168{
169	return -ENOENT;
170}
171static inline int
172xchk_rtsummary(struct xfs_scrub *sc)
173{
174	return -ENOENT;
175}
176#endif
177#ifdef CONFIG_XFS_QUOTA
178int xchk_quota(struct xfs_scrub *sc);
179int xchk_quotacheck(struct xfs_scrub *sc);
180#else
181static inline int
182xchk_quota(struct xfs_scrub *sc)
183{
184	return -ENOENT;
185}
186static inline int
187xchk_quotacheck(struct xfs_scrub *sc)
188{
189	return -ENOENT;
190}
191#endif
192int xchk_fscounters(struct xfs_scrub *sc);
193int xchk_nlinks(struct xfs_scrub *sc);
194
195/* cross-referencing helpers */
196void xchk_xref_is_used_space(struct xfs_scrub *sc, xfs_agblock_t agbno,
197		xfs_extlen_t len);
198void xchk_xref_is_not_inode_chunk(struct xfs_scrub *sc, xfs_agblock_t agbno,
199		xfs_extlen_t len);
200void xchk_xref_is_inode_chunk(struct xfs_scrub *sc, xfs_agblock_t agbno,
201		xfs_extlen_t len);
202void xchk_xref_is_only_owned_by(struct xfs_scrub *sc, xfs_agblock_t agbno,
203		xfs_extlen_t len, const struct xfs_owner_info *oinfo);
204void xchk_xref_is_not_owned_by(struct xfs_scrub *sc, xfs_agblock_t agbno,
205		xfs_extlen_t len, const struct xfs_owner_info *oinfo);
206void xchk_xref_has_no_owner(struct xfs_scrub *sc, xfs_agblock_t agbno,
207		xfs_extlen_t len);
208void xchk_xref_is_cow_staging(struct xfs_scrub *sc, xfs_agblock_t bno,
209		xfs_extlen_t len);
210void xchk_xref_is_not_shared(struct xfs_scrub *sc, xfs_agblock_t bno,
211		xfs_extlen_t len);
212void xchk_xref_is_not_cow_staging(struct xfs_scrub *sc, xfs_agblock_t bno,
213		xfs_extlen_t len);
214#ifdef CONFIG_XFS_RT
215void xchk_xref_is_used_rt_space(struct xfs_scrub *sc, xfs_rtblock_t rtbno,
216		xfs_extlen_t len);
217#else
218# define xchk_xref_is_used_rt_space(sc, rtbno, len) do { } while (0)
219#endif
220
221#endif	/* __XFS_SCRUB_SCRUB_H__ */