Loading...
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 | // SPDX-License-Identifier: GPL-2.0 /* * Copyright (c) 2000-2006 Silicon Graphics, Inc. * Copyright (c) 2013 Red Hat, Inc. * All Rights Reserved. */ #include "xfs.h" #include "xfs_fs.h" #include "xfs_shared.h" #include "xfs_format.h" #include "xfs_log_format.h" #include "xfs_trans_resv.h" #include "xfs_mount.h" #include "xfs_inode.h" #include "xfs_quota.h" #include "xfs_trans.h" #include "xfs_qm.h" #include "xfs_error.h" int xfs_calc_dquots_per_chunk( unsigned int nbblks) /* basic block units */ { ASSERT(nbblks > 0); return BBTOB(nbblks) / sizeof(xfs_dqblk_t); } /* * Do some primitive error checking on ondisk dquot data structures. * * The xfs_dqblk structure /contains/ the xfs_disk_dquot structure; * we verify them separately because at some points we have only the * smaller xfs_disk_dquot structure available. */ xfs_failaddr_t xfs_dquot_verify( struct xfs_mount *mp, struct xfs_disk_dquot *ddq, xfs_dqid_t id) /* used only during quotacheck */ { __u8 ddq_type; /* * We can encounter an uninitialized dquot buffer for 2 reasons: * 1. If we crash while deleting the quotainode(s), and those blks got * used for user data. This is because we take the path of regular * file deletion; however, the size field of quotainodes is never * updated, so all the tricks that we play in itruncate_finish * don't quite matter. * * 2. We don't play the quota buffers when there's a quotaoff logitem. * But the allocation will be replayed so we'll end up with an * uninitialized quota block. * * This is all fine; things are still consistent, and we haven't lost * any quota information. Just don't complain about bad dquot blks. */ if (ddq->d_magic != cpu_to_be16(XFS_DQUOT_MAGIC)) return __this_address; if (ddq->d_version != XFS_DQUOT_VERSION) return __this_address; if (ddq->d_type & ~XFS_DQTYPE_ANY) return __this_address; ddq_type = ddq->d_type & XFS_DQTYPE_REC_MASK; if (ddq_type != XFS_DQTYPE_USER && ddq_type != XFS_DQTYPE_PROJ && ddq_type != XFS_DQTYPE_GROUP) return __this_address; if ((ddq->d_type & XFS_DQTYPE_BIGTIME) && !xfs_sb_version_hasbigtime(&mp->m_sb)) return __this_address; if ((ddq->d_type & XFS_DQTYPE_BIGTIME) && !ddq->d_id) return __this_address; if (id != -1 && id != be32_to_cpu(ddq->d_id)) return __this_address; if (!ddq->d_id) return NULL; if (ddq->d_blk_softlimit && be64_to_cpu(ddq->d_bcount) > be64_to_cpu(ddq->d_blk_softlimit) && !ddq->d_btimer) return __this_address; if (ddq->d_ino_softlimit && be64_to_cpu(ddq->d_icount) > be64_to_cpu(ddq->d_ino_softlimit) && !ddq->d_itimer) return __this_address; if (ddq->d_rtb_softlimit && be64_to_cpu(ddq->d_rtbcount) > be64_to_cpu(ddq->d_rtb_softlimit) && !ddq->d_rtbtimer) return __this_address; return NULL; } xfs_failaddr_t xfs_dqblk_verify( struct xfs_mount *mp, struct xfs_dqblk *dqb, xfs_dqid_t id) /* used only during quotacheck */ { if (xfs_sb_version_hascrc(&mp->m_sb) && !uuid_equal(&dqb->dd_uuid, &mp->m_sb.sb_meta_uuid)) return __this_address; return xfs_dquot_verify(mp, &dqb->dd_diskdq, id); } /* * Do some primitive error checking on ondisk dquot data structures. */ void xfs_dqblk_repair( struct xfs_mount *mp, struct xfs_dqblk *dqb, xfs_dqid_t id, xfs_dqtype_t type) { /* * Typically, a repair is only requested by quotacheck. */ ASSERT(id != -1); memset(dqb, 0, sizeof(xfs_dqblk_t)); dqb->dd_diskdq.d_magic = cpu_to_be16(XFS_DQUOT_MAGIC); dqb->dd_diskdq.d_version = XFS_DQUOT_VERSION; dqb->dd_diskdq.d_type = type; dqb->dd_diskdq.d_id = cpu_to_be32(id); if (xfs_sb_version_hascrc(&mp->m_sb)) { uuid_copy(&dqb->dd_uuid, &mp->m_sb.sb_meta_uuid); xfs_update_cksum((char *)dqb, sizeof(struct xfs_dqblk), XFS_DQUOT_CRC_OFF); } } STATIC bool xfs_dquot_buf_verify_crc( struct xfs_mount *mp, struct xfs_buf *bp, bool readahead) { struct xfs_dqblk *d = (struct xfs_dqblk *)bp->b_addr; int ndquots; int i; if (!xfs_sb_version_hascrc(&mp->m_sb)) return true; /* * if we are in log recovery, the quota subsystem has not been * initialised so we have no quotainfo structure. In that case, we need * to manually calculate the number of dquots in the buffer. */ if (mp->m_quotainfo) ndquots = mp->m_quotainfo->qi_dqperchunk; else ndquots = xfs_calc_dquots_per_chunk(bp->b_length); for (i = 0; i < ndquots; i++, d++) { if (!xfs_verify_cksum((char *)d, sizeof(struct xfs_dqblk), XFS_DQUOT_CRC_OFF)) { if (!readahead) xfs_buf_verifier_error(bp, -EFSBADCRC, __func__, d, sizeof(*d), __this_address); return false; } } return true; } STATIC xfs_failaddr_t xfs_dquot_buf_verify( struct xfs_mount *mp, struct xfs_buf *bp, bool readahead) { struct xfs_dqblk *dqb = bp->b_addr; xfs_failaddr_t fa; xfs_dqid_t id = 0; int ndquots; int i; /* * if we are in log recovery, the quota subsystem has not been * initialised so we have no quotainfo structure. In that case, we need * to manually calculate the number of dquots in the buffer. */ if (mp->m_quotainfo) ndquots = mp->m_quotainfo->qi_dqperchunk; else ndquots = xfs_calc_dquots_per_chunk(bp->b_length); /* * On the first read of the buffer, verify that each dquot is valid. * We don't know what the id of the dquot is supposed to be, just that * they should be increasing monotonically within the buffer. If the * first id is corrupt, then it will fail on the second dquot in the * buffer so corruptions could point to the wrong dquot in this case. */ for (i = 0; i < ndquots; i++) { struct xfs_disk_dquot *ddq; ddq = &dqb[i].dd_diskdq; if (i == 0) id = be32_to_cpu(ddq->d_id); fa = xfs_dqblk_verify(mp, &dqb[i], id + i); if (fa) { if (!readahead) xfs_buf_verifier_error(bp, -EFSCORRUPTED, __func__, &dqb[i], sizeof(struct xfs_dqblk), fa); return fa; } } return NULL; } static xfs_failaddr_t xfs_dquot_buf_verify_struct( struct xfs_buf *bp) { struct xfs_mount *mp = bp->b_mount; return xfs_dquot_buf_verify(mp, bp, false); } static void xfs_dquot_buf_read_verify( struct xfs_buf *bp) { struct xfs_mount *mp = bp->b_mount; if (!xfs_dquot_buf_verify_crc(mp, bp, false)) return; xfs_dquot_buf_verify(mp, bp, false); } /* * readahead errors are silent and simply leave the buffer as !done so a real * read will then be run with the xfs_dquot_buf_ops verifier. See * xfs_inode_buf_verify() for why we use EIO and ~XBF_DONE here rather than * reporting the failure. */ static void xfs_dquot_buf_readahead_verify( struct xfs_buf *bp) { struct xfs_mount *mp = bp->b_mount; if (!xfs_dquot_buf_verify_crc(mp, bp, true) || xfs_dquot_buf_verify(mp, bp, true) != NULL) { xfs_buf_ioerror(bp, -EIO); bp->b_flags &= ~XBF_DONE; } } /* * we don't calculate the CRC here as that is done when the dquot is flushed to * the buffer after the update is done. This ensures that the dquot in the * buffer always has an up-to-date CRC value. */ static void xfs_dquot_buf_write_verify( struct xfs_buf *bp) { struct xfs_mount *mp = bp->b_mount; xfs_dquot_buf_verify(mp, bp, false); } const struct xfs_buf_ops xfs_dquot_buf_ops = { .name = "xfs_dquot", .magic16 = { cpu_to_be16(XFS_DQUOT_MAGIC), cpu_to_be16(XFS_DQUOT_MAGIC) }, .verify_read = xfs_dquot_buf_read_verify, .verify_write = xfs_dquot_buf_write_verify, .verify_struct = xfs_dquot_buf_verify_struct, }; const struct xfs_buf_ops xfs_dquot_buf_ra_ops = { .name = "xfs_dquot_ra", .magic16 = { cpu_to_be16(XFS_DQUOT_MAGIC), cpu_to_be16(XFS_DQUOT_MAGIC) }, .verify_read = xfs_dquot_buf_readahead_verify, .verify_write = xfs_dquot_buf_write_verify, }; /* Convert an on-disk timer value into an incore timer value. */ time64_t xfs_dquot_from_disk_ts( struct xfs_disk_dquot *ddq, __be32 dtimer) { uint32_t t = be32_to_cpu(dtimer); if (t != 0 && (ddq->d_type & XFS_DQTYPE_BIGTIME)) return xfs_dq_bigtime_to_unix(t); return t; } /* Convert an incore timer value into an on-disk timer value. */ __be32 xfs_dquot_to_disk_ts( struct xfs_dquot *dqp, time64_t timer) { uint32_t t = timer; if (timer != 0 && (dqp->q_type & XFS_DQTYPE_BIGTIME)) t = xfs_dq_unix_to_bigtime(timer); return cpu_to_be32(t); } |