Loading...
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 | // SPDX-License-Identifier: GPL-2.0-or-later /* * Copyright (c) 2021-2024 Oracle. All Rights Reserved. * Author: Darrick J. Wong <djwong@kernel.org> */ #include "xfs.h" #include "xfs_fs.h" #include "xfs_shared.h" #include "xfs_format.h" #include "xfs_trans_resv.h" #include "xfs_mount.h" #include "xfs_log_format.h" #include "xfs_trans.h" #include "xfs_inode.h" #include "xfs_icache.h" #include "xfs_bmap_util.h" #include "xfs_iwalk.h" #include "xfs_ialloc.h" #include "xfs_sb.h" #include "scrub/scrub.h" #include "scrub/common.h" #include "scrub/repair.h" #include "scrub/xfile.h" #include "scrub/xfarray.h" #include "scrub/iscan.h" #include "scrub/nlinks.h" #include "scrub/trace.h" /* * Live Inode Link Count Repair * ============================ * * Use the live inode link count information that we collected to replace the * nlink values of the incore inodes. A scrub->repair cycle should have left * the live data and hooks active, so this is safe so long as we make sure the * inode is locked. */ /* * Correct the link count of the given inode. Because we have to grab locks * and resources in a certain order, it's possible that this will be a no-op. */ STATIC int xrep_nlinks_repair_inode( struct xchk_nlink_ctrs *xnc) { struct xchk_nlink obs; struct xfs_scrub *sc = xnc->sc; struct xfs_mount *mp = sc->mp; struct xfs_inode *ip = sc->ip; uint64_t total_links; uint64_t actual_nlink; bool dirty = false; int error; xchk_ilock(sc, XFS_IOLOCK_EXCL); error = xfs_trans_alloc(mp, &M_RES(mp)->tr_link, 0, 0, 0, &sc->tp); if (error) return error; xchk_ilock(sc, XFS_ILOCK_EXCL); xfs_trans_ijoin(sc->tp, ip, 0); mutex_lock(&xnc->lock); if (xchk_iscan_aborted(&xnc->collect_iscan)) { error = -ECANCELED; goto out_scanlock; } error = xfarray_load_sparse(xnc->nlinks, ip->i_ino, &obs); if (error) goto out_scanlock; /* * We're done accessing the shared scan data, so we can drop the lock. * We still hold @ip's ILOCK, so its link count cannot change. */ mutex_unlock(&xnc->lock); total_links = xchk_nlink_total(ip, &obs); actual_nlink = VFS_I(ip)->i_nlink; /* * Non-directories cannot have directories pointing up to them. * * We previously set error to zero, but set it again because one static * checker author fears that programmers will fail to maintain this * invariant and built their tool to flag this as a security risk. A * different tool author made their bot complain about the redundant * store. This is a never-ending and stupid battle; both tools missed * *actual bugs* elsewhere; and I no longer care. */ if (!S_ISDIR(VFS_I(ip)->i_mode) && obs.children != 0) { trace_xrep_nlinks_unfixable_inode(mp, ip, &obs); error = 0; goto out_trans; } /* * We did not find any links to this inode. If the inode agrees, we * have nothing further to do. If not, the inode has a nonzero link * count and we don't have anywhere to graft the child onto. Dropping * a live inode's link count to zero can cause unexpected shutdowns in * inactivation, so leave it alone. */ if (total_links == 0) { if (actual_nlink != 0) trace_xrep_nlinks_unfixable_inode(mp, ip, &obs); goto out_trans; } /* Commit the new link count if it changed. */ if (total_links != actual_nlink) { if (total_links > XFS_MAXLINK) { trace_xrep_nlinks_unfixable_inode(mp, ip, &obs); goto out_trans; } trace_xrep_nlinks_update_inode(mp, ip, &obs); set_nlink(VFS_I(ip), total_links); dirty = true; } if (!dirty) { error = 0; goto out_trans; } xfs_trans_log_inode(sc->tp, ip, XFS_ILOG_CORE); error = xrep_trans_commit(sc); xchk_iunlock(sc, XFS_ILOCK_EXCL | XFS_IOLOCK_EXCL); return error; out_scanlock: mutex_unlock(&xnc->lock); out_trans: xchk_trans_cancel(sc); xchk_iunlock(sc, XFS_ILOCK_EXCL | XFS_IOLOCK_EXCL); return error; } /* * Try to visit every inode in the filesystem for repairs. Move on if we can't * grab an inode, since we're still making forward progress. */ static int xrep_nlinks_iter( struct xchk_nlink_ctrs *xnc, struct xfs_inode **ipp) { int error; do { error = xchk_iscan_iter(&xnc->compare_iscan, ipp); } while (error == -EBUSY); return error; } /* Commit the new inode link counters. */ int xrep_nlinks( struct xfs_scrub *sc) { struct xchk_nlink_ctrs *xnc = sc->buf; int error; /* * We need ftype for an accurate count of the number of child * subdirectory links. Child subdirectories with a back link (dotdot * entry) but no forward link are unfixable, so we cannot repair the * link count of the parent directory based on the back link count * alone. Filesystems without ftype support are rare (old V4) so we * just skip out here. */ if (!xfs_has_ftype(sc->mp)) return -EOPNOTSUPP; /* * Use the inobt to walk all allocated inodes to compare and fix the * link counts. Retry iget every tenth of a second for up to 30 * seconds -- even if repair misses a few inodes, we still try to fix * as many of them as we can. */ xchk_iscan_start(sc, 30000, 100, &xnc->compare_iscan); ASSERT(sc->ip == NULL); while ((error = xrep_nlinks_iter(xnc, &sc->ip)) == 1) { /* * Commit the scrub transaction so that we can create repair * transactions with the correct reservations. */ xchk_trans_cancel(sc); error = xrep_nlinks_repair_inode(xnc); xchk_iscan_mark_visited(&xnc->compare_iscan, sc->ip); xchk_irele(sc, sc->ip); sc->ip = NULL; if (error) break; if (xchk_should_terminate(sc, &error)) break; /* * Create a new empty transaction so that we can advance the * iscan cursor without deadlocking if the inobt has a cycle. * We can only push the inactivation workqueues with an empty * transaction. */ error = xchk_trans_alloc_empty(sc); if (error) break; } xchk_iscan_iter_finish(&xnc->compare_iscan); xchk_iscan_teardown(&xnc->compare_iscan); return error; } |