Loading...
1/*
2 * Copyright (c) 2000-2006 Silicon Graphics, Inc.
3 * All Rights Reserved.
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it would be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write the Free Software Foundation,
16 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
17 */
18#include "xfs.h"
19#include "xfs_fs.h"
20#include "xfs_shared.h"
21#include "xfs_format.h"
22#include "xfs_log_format.h"
23#include "xfs_trans_resv.h"
24#include "xfs_mount.h"
25#include "xfs_defer.h"
26#include "xfs_inode.h"
27#include "xfs_errortag.h"
28#include "xfs_error.h"
29#include "xfs_cksum.h"
30#include "xfs_icache.h"
31#include "xfs_trans.h"
32#include "xfs_ialloc.h"
33#include "xfs_dir2.h"
34
35#include <linux/iversion.h>
36
37/*
38 * Check that none of the inode's in the buffer have a next
39 * unlinked field of 0.
40 */
41#if defined(DEBUG)
42void
43xfs_inobp_check(
44 xfs_mount_t *mp,
45 xfs_buf_t *bp)
46{
47 int i;
48 int j;
49 xfs_dinode_t *dip;
50
51 j = mp->m_inode_cluster_size >> mp->m_sb.sb_inodelog;
52
53 for (i = 0; i < j; i++) {
54 dip = xfs_buf_offset(bp, i * mp->m_sb.sb_inodesize);
55 if (!dip->di_next_unlinked) {
56 xfs_alert(mp,
57 "Detected bogus zero next_unlinked field in inode %d buffer 0x%llx.",
58 i, (long long)bp->b_bn);
59 }
60 }
61}
62#endif
63
64bool
65xfs_dinode_good_version(
66 struct xfs_mount *mp,
67 __u8 version)
68{
69 if (xfs_sb_version_hascrc(&mp->m_sb))
70 return version == 3;
71
72 return version == 1 || version == 2;
73}
74
75/*
76 * If we are doing readahead on an inode buffer, we might be in log recovery
77 * reading an inode allocation buffer that hasn't yet been replayed, and hence
78 * has not had the inode cores stamped into it. Hence for readahead, the buffer
79 * may be potentially invalid.
80 *
81 * If the readahead buffer is invalid, we need to mark it with an error and
82 * clear the DONE status of the buffer so that a followup read will re-read it
83 * from disk. We don't report the error otherwise to avoid warnings during log
84 * recovery and we don't get unnecssary panics on debug kernels. We use EIO here
85 * because all we want to do is say readahead failed; there is no-one to report
86 * the error to, so this will distinguish it from a non-ra verifier failure.
87 * Changes to this readahead error behavour also need to be reflected in
88 * xfs_dquot_buf_readahead_verify().
89 */
90static void
91xfs_inode_buf_verify(
92 struct xfs_buf *bp,
93 bool readahead)
94{
95 struct xfs_mount *mp = bp->b_target->bt_mount;
96 xfs_agnumber_t agno;
97 int i;
98 int ni;
99
100 /*
101 * Validate the magic number and version of every inode in the buffer
102 */
103 agno = xfs_daddr_to_agno(mp, XFS_BUF_ADDR(bp));
104 ni = XFS_BB_TO_FSB(mp, bp->b_length) * mp->m_sb.sb_inopblock;
105 for (i = 0; i < ni; i++) {
106 int di_ok;
107 xfs_dinode_t *dip;
108 xfs_agino_t unlinked_ino;
109
110 dip = xfs_buf_offset(bp, (i << mp->m_sb.sb_inodelog));
111 unlinked_ino = be32_to_cpu(dip->di_next_unlinked);
112 di_ok = dip->di_magic == cpu_to_be16(XFS_DINODE_MAGIC) &&
113 xfs_dinode_good_version(mp, dip->di_version) &&
114 (unlinked_ino == NULLAGINO ||
115 xfs_verify_agino(mp, agno, unlinked_ino));
116 if (unlikely(XFS_TEST_ERROR(!di_ok, mp,
117 XFS_ERRTAG_ITOBP_INOTOBP))) {
118 if (readahead) {
119 bp->b_flags &= ~XBF_DONE;
120 xfs_buf_ioerror(bp, -EIO);
121 return;
122 }
123
124#ifdef DEBUG
125 xfs_alert(mp,
126 "bad inode magic/vsn daddr %lld #%d (magic=%x)",
127 (unsigned long long)bp->b_bn, i,
128 be16_to_cpu(dip->di_magic));
129#endif
130 xfs_buf_verifier_error(bp, -EFSCORRUPTED,
131 __func__, dip, sizeof(*dip),
132 NULL);
133 return;
134 }
135 }
136}
137
138
139static void
140xfs_inode_buf_read_verify(
141 struct xfs_buf *bp)
142{
143 xfs_inode_buf_verify(bp, false);
144}
145
146static void
147xfs_inode_buf_readahead_verify(
148 struct xfs_buf *bp)
149{
150 xfs_inode_buf_verify(bp, true);
151}
152
153static void
154xfs_inode_buf_write_verify(
155 struct xfs_buf *bp)
156{
157 xfs_inode_buf_verify(bp, false);
158}
159
160const struct xfs_buf_ops xfs_inode_buf_ops = {
161 .name = "xfs_inode",
162 .verify_read = xfs_inode_buf_read_verify,
163 .verify_write = xfs_inode_buf_write_verify,
164};
165
166const struct xfs_buf_ops xfs_inode_buf_ra_ops = {
167 .name = "xxfs_inode_ra",
168 .verify_read = xfs_inode_buf_readahead_verify,
169 .verify_write = xfs_inode_buf_write_verify,
170};
171
172
173/*
174 * This routine is called to map an inode to the buffer containing the on-disk
175 * version of the inode. It returns a pointer to the buffer containing the
176 * on-disk inode in the bpp parameter, and in the dipp parameter it returns a
177 * pointer to the on-disk inode within that buffer.
178 *
179 * If a non-zero error is returned, then the contents of bpp and dipp are
180 * undefined.
181 */
182int
183xfs_imap_to_bp(
184 struct xfs_mount *mp,
185 struct xfs_trans *tp,
186 struct xfs_imap *imap,
187 struct xfs_dinode **dipp,
188 struct xfs_buf **bpp,
189 uint buf_flags,
190 uint iget_flags)
191{
192 struct xfs_buf *bp;
193 int error;
194
195 buf_flags |= XBF_UNMAPPED;
196 error = xfs_trans_read_buf(mp, tp, mp->m_ddev_targp, imap->im_blkno,
197 (int)imap->im_len, buf_flags, &bp,
198 &xfs_inode_buf_ops);
199 if (error) {
200 if (error == -EAGAIN) {
201 ASSERT(buf_flags & XBF_TRYLOCK);
202 return error;
203 }
204
205 if (error == -EFSCORRUPTED &&
206 (iget_flags & XFS_IGET_UNTRUSTED))
207 return -EINVAL;
208
209 xfs_warn(mp, "%s: xfs_trans_read_buf() returned error %d.",
210 __func__, error);
211 return error;
212 }
213
214 *bpp = bp;
215 *dipp = xfs_buf_offset(bp, imap->im_boffset);
216 return 0;
217}
218
219void
220xfs_inode_from_disk(
221 struct xfs_inode *ip,
222 struct xfs_dinode *from)
223{
224 struct xfs_icdinode *to = &ip->i_d;
225 struct inode *inode = VFS_I(ip);
226
227
228 /*
229 * Convert v1 inodes immediately to v2 inode format as this is the
230 * minimum inode version format we support in the rest of the code.
231 */
232 to->di_version = from->di_version;
233 if (to->di_version == 1) {
234 set_nlink(inode, be16_to_cpu(from->di_onlink));
235 to->di_projid_lo = 0;
236 to->di_projid_hi = 0;
237 to->di_version = 2;
238 } else {
239 set_nlink(inode, be32_to_cpu(from->di_nlink));
240 to->di_projid_lo = be16_to_cpu(from->di_projid_lo);
241 to->di_projid_hi = be16_to_cpu(from->di_projid_hi);
242 }
243
244 to->di_format = from->di_format;
245 to->di_uid = be32_to_cpu(from->di_uid);
246 to->di_gid = be32_to_cpu(from->di_gid);
247 to->di_flushiter = be16_to_cpu(from->di_flushiter);
248
249 /*
250 * Time is signed, so need to convert to signed 32 bit before
251 * storing in inode timestamp which may be 64 bit. Otherwise
252 * a time before epoch is converted to a time long after epoch
253 * on 64 bit systems.
254 */
255 inode->i_atime.tv_sec = (int)be32_to_cpu(from->di_atime.t_sec);
256 inode->i_atime.tv_nsec = (int)be32_to_cpu(from->di_atime.t_nsec);
257 inode->i_mtime.tv_sec = (int)be32_to_cpu(from->di_mtime.t_sec);
258 inode->i_mtime.tv_nsec = (int)be32_to_cpu(from->di_mtime.t_nsec);
259 inode->i_ctime.tv_sec = (int)be32_to_cpu(from->di_ctime.t_sec);
260 inode->i_ctime.tv_nsec = (int)be32_to_cpu(from->di_ctime.t_nsec);
261 inode->i_generation = be32_to_cpu(from->di_gen);
262 inode->i_mode = be16_to_cpu(from->di_mode);
263
264 to->di_size = be64_to_cpu(from->di_size);
265 to->di_nblocks = be64_to_cpu(from->di_nblocks);
266 to->di_extsize = be32_to_cpu(from->di_extsize);
267 to->di_nextents = be32_to_cpu(from->di_nextents);
268 to->di_anextents = be16_to_cpu(from->di_anextents);
269 to->di_forkoff = from->di_forkoff;
270 to->di_aformat = from->di_aformat;
271 to->di_dmevmask = be32_to_cpu(from->di_dmevmask);
272 to->di_dmstate = be16_to_cpu(from->di_dmstate);
273 to->di_flags = be16_to_cpu(from->di_flags);
274
275 if (to->di_version == 3) {
276 inode_set_iversion_queried(inode,
277 be64_to_cpu(from->di_changecount));
278 to->di_crtime.t_sec = be32_to_cpu(from->di_crtime.t_sec);
279 to->di_crtime.t_nsec = be32_to_cpu(from->di_crtime.t_nsec);
280 to->di_flags2 = be64_to_cpu(from->di_flags2);
281 to->di_cowextsize = be32_to_cpu(from->di_cowextsize);
282 }
283}
284
285void
286xfs_inode_to_disk(
287 struct xfs_inode *ip,
288 struct xfs_dinode *to,
289 xfs_lsn_t lsn)
290{
291 struct xfs_icdinode *from = &ip->i_d;
292 struct inode *inode = VFS_I(ip);
293
294 to->di_magic = cpu_to_be16(XFS_DINODE_MAGIC);
295 to->di_onlink = 0;
296
297 to->di_version = from->di_version;
298 to->di_format = from->di_format;
299 to->di_uid = cpu_to_be32(from->di_uid);
300 to->di_gid = cpu_to_be32(from->di_gid);
301 to->di_projid_lo = cpu_to_be16(from->di_projid_lo);
302 to->di_projid_hi = cpu_to_be16(from->di_projid_hi);
303
304 memset(to->di_pad, 0, sizeof(to->di_pad));
305 to->di_atime.t_sec = cpu_to_be32(inode->i_atime.tv_sec);
306 to->di_atime.t_nsec = cpu_to_be32(inode->i_atime.tv_nsec);
307 to->di_mtime.t_sec = cpu_to_be32(inode->i_mtime.tv_sec);
308 to->di_mtime.t_nsec = cpu_to_be32(inode->i_mtime.tv_nsec);
309 to->di_ctime.t_sec = cpu_to_be32(inode->i_ctime.tv_sec);
310 to->di_ctime.t_nsec = cpu_to_be32(inode->i_ctime.tv_nsec);
311 to->di_nlink = cpu_to_be32(inode->i_nlink);
312 to->di_gen = cpu_to_be32(inode->i_generation);
313 to->di_mode = cpu_to_be16(inode->i_mode);
314
315 to->di_size = cpu_to_be64(from->di_size);
316 to->di_nblocks = cpu_to_be64(from->di_nblocks);
317 to->di_extsize = cpu_to_be32(from->di_extsize);
318 to->di_nextents = cpu_to_be32(from->di_nextents);
319 to->di_anextents = cpu_to_be16(from->di_anextents);
320 to->di_forkoff = from->di_forkoff;
321 to->di_aformat = from->di_aformat;
322 to->di_dmevmask = cpu_to_be32(from->di_dmevmask);
323 to->di_dmstate = cpu_to_be16(from->di_dmstate);
324 to->di_flags = cpu_to_be16(from->di_flags);
325
326 if (from->di_version == 3) {
327 to->di_changecount = cpu_to_be64(inode_peek_iversion(inode));
328 to->di_crtime.t_sec = cpu_to_be32(from->di_crtime.t_sec);
329 to->di_crtime.t_nsec = cpu_to_be32(from->di_crtime.t_nsec);
330 to->di_flags2 = cpu_to_be64(from->di_flags2);
331 to->di_cowextsize = cpu_to_be32(from->di_cowextsize);
332 to->di_ino = cpu_to_be64(ip->i_ino);
333 to->di_lsn = cpu_to_be64(lsn);
334 memset(to->di_pad2, 0, sizeof(to->di_pad2));
335 uuid_copy(&to->di_uuid, &ip->i_mount->m_sb.sb_meta_uuid);
336 to->di_flushiter = 0;
337 } else {
338 to->di_flushiter = cpu_to_be16(from->di_flushiter);
339 }
340}
341
342void
343xfs_log_dinode_to_disk(
344 struct xfs_log_dinode *from,
345 struct xfs_dinode *to)
346{
347 to->di_magic = cpu_to_be16(from->di_magic);
348 to->di_mode = cpu_to_be16(from->di_mode);
349 to->di_version = from->di_version;
350 to->di_format = from->di_format;
351 to->di_onlink = 0;
352 to->di_uid = cpu_to_be32(from->di_uid);
353 to->di_gid = cpu_to_be32(from->di_gid);
354 to->di_nlink = cpu_to_be32(from->di_nlink);
355 to->di_projid_lo = cpu_to_be16(from->di_projid_lo);
356 to->di_projid_hi = cpu_to_be16(from->di_projid_hi);
357 memcpy(to->di_pad, from->di_pad, sizeof(to->di_pad));
358
359 to->di_atime.t_sec = cpu_to_be32(from->di_atime.t_sec);
360 to->di_atime.t_nsec = cpu_to_be32(from->di_atime.t_nsec);
361 to->di_mtime.t_sec = cpu_to_be32(from->di_mtime.t_sec);
362 to->di_mtime.t_nsec = cpu_to_be32(from->di_mtime.t_nsec);
363 to->di_ctime.t_sec = cpu_to_be32(from->di_ctime.t_sec);
364 to->di_ctime.t_nsec = cpu_to_be32(from->di_ctime.t_nsec);
365
366 to->di_size = cpu_to_be64(from->di_size);
367 to->di_nblocks = cpu_to_be64(from->di_nblocks);
368 to->di_extsize = cpu_to_be32(from->di_extsize);
369 to->di_nextents = cpu_to_be32(from->di_nextents);
370 to->di_anextents = cpu_to_be16(from->di_anextents);
371 to->di_forkoff = from->di_forkoff;
372 to->di_aformat = from->di_aformat;
373 to->di_dmevmask = cpu_to_be32(from->di_dmevmask);
374 to->di_dmstate = cpu_to_be16(from->di_dmstate);
375 to->di_flags = cpu_to_be16(from->di_flags);
376 to->di_gen = cpu_to_be32(from->di_gen);
377
378 if (from->di_version == 3) {
379 to->di_changecount = cpu_to_be64(from->di_changecount);
380 to->di_crtime.t_sec = cpu_to_be32(from->di_crtime.t_sec);
381 to->di_crtime.t_nsec = cpu_to_be32(from->di_crtime.t_nsec);
382 to->di_flags2 = cpu_to_be64(from->di_flags2);
383 to->di_cowextsize = cpu_to_be32(from->di_cowextsize);
384 to->di_ino = cpu_to_be64(from->di_ino);
385 to->di_lsn = cpu_to_be64(from->di_lsn);
386 memcpy(to->di_pad2, from->di_pad2, sizeof(to->di_pad2));
387 uuid_copy(&to->di_uuid, &from->di_uuid);
388 to->di_flushiter = 0;
389 } else {
390 to->di_flushiter = cpu_to_be16(from->di_flushiter);
391 }
392}
393
394xfs_failaddr_t
395xfs_dinode_verify(
396 struct xfs_mount *mp,
397 xfs_ino_t ino,
398 struct xfs_dinode *dip)
399{
400 uint16_t mode;
401 uint16_t flags;
402 uint64_t flags2;
403 uint64_t di_size;
404
405 if (dip->di_magic != cpu_to_be16(XFS_DINODE_MAGIC))
406 return __this_address;
407
408 /* Verify v3 integrity information first */
409 if (dip->di_version >= 3) {
410 if (!xfs_sb_version_hascrc(&mp->m_sb))
411 return __this_address;
412 if (!xfs_verify_cksum((char *)dip, mp->m_sb.sb_inodesize,
413 XFS_DINODE_CRC_OFF))
414 return __this_address;
415 if (be64_to_cpu(dip->di_ino) != ino)
416 return __this_address;
417 if (!uuid_equal(&dip->di_uuid, &mp->m_sb.sb_meta_uuid))
418 return __this_address;
419 }
420
421 /* don't allow invalid i_size */
422 di_size = be64_to_cpu(dip->di_size);
423 if (di_size & (1ULL << 63))
424 return __this_address;
425
426 mode = be16_to_cpu(dip->di_mode);
427 if (mode && xfs_mode_to_ftype(mode) == XFS_DIR3_FT_UNKNOWN)
428 return __this_address;
429
430 /* No zero-length symlinks/dirs. */
431 if ((S_ISLNK(mode) || S_ISDIR(mode)) && di_size == 0)
432 return __this_address;
433
434 /* Fork checks carried over from xfs_iformat_fork */
435 if (mode &&
436 be32_to_cpu(dip->di_nextents) + be16_to_cpu(dip->di_anextents) >
437 be64_to_cpu(dip->di_nblocks))
438 return __this_address;
439
440 if (mode && XFS_DFORK_BOFF(dip) > mp->m_sb.sb_inodesize)
441 return __this_address;
442
443 flags = be16_to_cpu(dip->di_flags);
444
445 if (mode && (flags & XFS_DIFLAG_REALTIME) && !mp->m_rtdev_targp)
446 return __this_address;
447
448 /* Do we have appropriate data fork formats for the mode? */
449 switch (mode & S_IFMT) {
450 case S_IFIFO:
451 case S_IFCHR:
452 case S_IFBLK:
453 case S_IFSOCK:
454 if (dip->di_format != XFS_DINODE_FMT_DEV)
455 return __this_address;
456 break;
457 case S_IFREG:
458 case S_IFLNK:
459 case S_IFDIR:
460 switch (dip->di_format) {
461 case XFS_DINODE_FMT_LOCAL:
462 /*
463 * no local regular files yet
464 */
465 if (S_ISREG(mode))
466 return __this_address;
467 if (di_size > XFS_DFORK_DSIZE(dip, mp))
468 return __this_address;
469 if (dip->di_nextents)
470 return __this_address;
471 /* fall through */
472 case XFS_DINODE_FMT_EXTENTS:
473 case XFS_DINODE_FMT_BTREE:
474 break;
475 default:
476 return __this_address;
477 }
478 break;
479 case 0:
480 /* Uninitialized inode ok. */
481 break;
482 default:
483 return __this_address;
484 }
485
486 if (XFS_DFORK_Q(dip)) {
487 switch (dip->di_aformat) {
488 case XFS_DINODE_FMT_LOCAL:
489 if (dip->di_anextents)
490 return __this_address;
491 /* fall through */
492 case XFS_DINODE_FMT_EXTENTS:
493 case XFS_DINODE_FMT_BTREE:
494 break;
495 default:
496 return __this_address;
497 }
498 } else {
499 /*
500 * If there is no fork offset, this may be a freshly-made inode
501 * in a new disk cluster, in which case di_aformat is zeroed.
502 * Otherwise, such an inode must be in EXTENTS format; this goes
503 * for freed inodes as well.
504 */
505 switch (dip->di_aformat) {
506 case 0:
507 case XFS_DINODE_FMT_EXTENTS:
508 break;
509 default:
510 return __this_address;
511 }
512 if (dip->di_anextents)
513 return __this_address;
514 }
515
516 /* only version 3 or greater inodes are extensively verified here */
517 if (dip->di_version < 3)
518 return NULL;
519
520 flags2 = be64_to_cpu(dip->di_flags2);
521
522 /* don't allow reflink/cowextsize if we don't have reflink */
523 if ((flags2 & (XFS_DIFLAG2_REFLINK | XFS_DIFLAG2_COWEXTSIZE)) &&
524 !xfs_sb_version_hasreflink(&mp->m_sb))
525 return __this_address;
526
527 /* only regular files get reflink */
528 if ((flags2 & XFS_DIFLAG2_REFLINK) && (mode & S_IFMT) != S_IFREG)
529 return __this_address;
530
531 /* don't let reflink and realtime mix */
532 if ((flags2 & XFS_DIFLAG2_REFLINK) && (flags & XFS_DIFLAG_REALTIME))
533 return __this_address;
534
535 /* don't let reflink and dax mix */
536 if ((flags2 & XFS_DIFLAG2_REFLINK) && (flags2 & XFS_DIFLAG2_DAX))
537 return __this_address;
538
539 return NULL;
540}
541
542void
543xfs_dinode_calc_crc(
544 struct xfs_mount *mp,
545 struct xfs_dinode *dip)
546{
547 uint32_t crc;
548
549 if (dip->di_version < 3)
550 return;
551
552 ASSERT(xfs_sb_version_hascrc(&mp->m_sb));
553 crc = xfs_start_cksum_update((char *)dip, mp->m_sb.sb_inodesize,
554 XFS_DINODE_CRC_OFF);
555 dip->di_crc = xfs_end_cksum(crc);
556}
557
558/*
559 * Read the disk inode attributes into the in-core inode structure.
560 *
561 * For version 5 superblocks, if we are initialising a new inode and we are not
562 * utilising the XFS_MOUNT_IKEEP inode cluster mode, we can simple build the new
563 * inode core with a random generation number. If we are keeping inodes around,
564 * we need to read the inode cluster to get the existing generation number off
565 * disk. Further, if we are using version 4 superblocks (i.e. v1/v2 inode
566 * format) then log recovery is dependent on the di_flushiter field being
567 * initialised from the current on-disk value and hence we must also read the
568 * inode off disk.
569 */
570int
571xfs_iread(
572 xfs_mount_t *mp,
573 xfs_trans_t *tp,
574 xfs_inode_t *ip,
575 uint iget_flags)
576{
577 xfs_buf_t *bp;
578 xfs_dinode_t *dip;
579 xfs_failaddr_t fa;
580 int error;
581
582 /*
583 * Fill in the location information in the in-core inode.
584 */
585 error = xfs_imap(mp, tp, ip->i_ino, &ip->i_imap, iget_flags);
586 if (error)
587 return error;
588
589 /* shortcut IO on inode allocation if possible */
590 if ((iget_flags & XFS_IGET_CREATE) &&
591 xfs_sb_version_hascrc(&mp->m_sb) &&
592 !(mp->m_flags & XFS_MOUNT_IKEEP)) {
593 /* initialise the on-disk inode core */
594 memset(&ip->i_d, 0, sizeof(ip->i_d));
595 VFS_I(ip)->i_generation = prandom_u32();
596 ip->i_d.di_version = 3;
597 return 0;
598 }
599
600 /*
601 * Get pointers to the on-disk inode and the buffer containing it.
602 */
603 error = xfs_imap_to_bp(mp, tp, &ip->i_imap, &dip, &bp, 0, iget_flags);
604 if (error)
605 return error;
606
607 /* even unallocated inodes are verified */
608 fa = xfs_dinode_verify(mp, ip->i_ino, dip);
609 if (fa) {
610 xfs_inode_verifier_error(ip, -EFSCORRUPTED, "dinode", dip,
611 sizeof(*dip), fa);
612 error = -EFSCORRUPTED;
613 goto out_brelse;
614 }
615
616 /*
617 * If the on-disk inode is already linked to a directory
618 * entry, copy all of the inode into the in-core inode.
619 * xfs_iformat_fork() handles copying in the inode format
620 * specific information.
621 * Otherwise, just get the truly permanent information.
622 */
623 if (dip->di_mode) {
624 xfs_inode_from_disk(ip, dip);
625 error = xfs_iformat_fork(ip, dip);
626 if (error) {
627#ifdef DEBUG
628 xfs_alert(mp, "%s: xfs_iformat() returned error %d",
629 __func__, error);
630#endif /* DEBUG */
631 goto out_brelse;
632 }
633 } else {
634 /*
635 * Partial initialisation of the in-core inode. Just the bits
636 * that xfs_ialloc won't overwrite or relies on being correct.
637 */
638 ip->i_d.di_version = dip->di_version;
639 VFS_I(ip)->i_generation = be32_to_cpu(dip->di_gen);
640 ip->i_d.di_flushiter = be16_to_cpu(dip->di_flushiter);
641
642 /*
643 * Make sure to pull in the mode here as well in
644 * case the inode is released without being used.
645 * This ensures that xfs_inactive() will see that
646 * the inode is already free and not try to mess
647 * with the uninitialized part of it.
648 */
649 VFS_I(ip)->i_mode = 0;
650 }
651
652 ASSERT(ip->i_d.di_version >= 2);
653 ip->i_delayed_blks = 0;
654
655 /*
656 * Mark the buffer containing the inode as something to keep
657 * around for a while. This helps to keep recently accessed
658 * meta-data in-core longer.
659 */
660 xfs_buf_set_ref(bp, XFS_INO_REF);
661
662 /*
663 * Use xfs_trans_brelse() to release the buffer containing the on-disk
664 * inode, because it was acquired with xfs_trans_read_buf() in
665 * xfs_imap_to_bp() above. If tp is NULL, this is just a normal
666 * brelse(). If we're within a transaction, then xfs_trans_brelse()
667 * will only release the buffer if it is not dirty within the
668 * transaction. It will be OK to release the buffer in this case,
669 * because inodes on disk are never destroyed and we will be locking the
670 * new in-core inode before putting it in the cache where other
671 * processes can find it. Thus we don't have to worry about the inode
672 * being changed just because we released the buffer.
673 */
674 out_brelse:
675 xfs_trans_brelse(tp, bp);
676 return error;
677}
678
679/*
680 * Validate di_extsize hint.
681 *
682 * The rules are documented at xfs_ioctl_setattr_check_extsize().
683 * These functions must be kept in sync with each other.
684 */
685xfs_failaddr_t
686xfs_inode_validate_extsize(
687 struct xfs_mount *mp,
688 uint32_t extsize,
689 uint16_t mode,
690 uint16_t flags)
691{
692 bool rt_flag;
693 bool hint_flag;
694 bool inherit_flag;
695 uint32_t extsize_bytes;
696 uint32_t blocksize_bytes;
697
698 rt_flag = (flags & XFS_DIFLAG_REALTIME);
699 hint_flag = (flags & XFS_DIFLAG_EXTSIZE);
700 inherit_flag = (flags & XFS_DIFLAG_EXTSZINHERIT);
701 extsize_bytes = XFS_FSB_TO_B(mp, extsize);
702
703 if (rt_flag)
704 blocksize_bytes = mp->m_sb.sb_rextsize << mp->m_sb.sb_blocklog;
705 else
706 blocksize_bytes = mp->m_sb.sb_blocksize;
707
708 if ((hint_flag || inherit_flag) && !(S_ISDIR(mode) || S_ISREG(mode)))
709 return __this_address;
710
711 if (hint_flag && !S_ISREG(mode))
712 return __this_address;
713
714 if (inherit_flag && !S_ISDIR(mode))
715 return __this_address;
716
717 if ((hint_flag || inherit_flag) && extsize == 0)
718 return __this_address;
719
720 if (!(hint_flag || inherit_flag) && extsize != 0)
721 return __this_address;
722
723 if (extsize_bytes % blocksize_bytes)
724 return __this_address;
725
726 if (extsize > MAXEXTLEN)
727 return __this_address;
728
729 if (!rt_flag && extsize > mp->m_sb.sb_agblocks / 2)
730 return __this_address;
731
732 return NULL;
733}
734
735/*
736 * Validate di_cowextsize hint.
737 *
738 * The rules are documented at xfs_ioctl_setattr_check_cowextsize().
739 * These functions must be kept in sync with each other.
740 */
741xfs_failaddr_t
742xfs_inode_validate_cowextsize(
743 struct xfs_mount *mp,
744 uint32_t cowextsize,
745 uint16_t mode,
746 uint16_t flags,
747 uint64_t flags2)
748{
749 bool rt_flag;
750 bool hint_flag;
751 uint32_t cowextsize_bytes;
752
753 rt_flag = (flags & XFS_DIFLAG_REALTIME);
754 hint_flag = (flags2 & XFS_DIFLAG2_COWEXTSIZE);
755 cowextsize_bytes = XFS_FSB_TO_B(mp, cowextsize);
756
757 if (hint_flag && !xfs_sb_version_hasreflink(&mp->m_sb))
758 return __this_address;
759
760 if (hint_flag && !(S_ISDIR(mode) || S_ISREG(mode)))
761 return __this_address;
762
763 if (hint_flag && cowextsize == 0)
764 return __this_address;
765
766 if (!hint_flag && cowextsize != 0)
767 return __this_address;
768
769 if (hint_flag && rt_flag)
770 return __this_address;
771
772 if (cowextsize_bytes % mp->m_sb.sb_blocksize)
773 return __this_address;
774
775 if (cowextsize > MAXEXTLEN)
776 return __this_address;
777
778 if (cowextsize > mp->m_sb.sb_agblocks / 2)
779 return __this_address;
780
781 return NULL;
782}
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Copyright (c) 2000-2006 Silicon Graphics, Inc.
4 * All Rights Reserved.
5 */
6#include "xfs.h"
7#include "xfs_fs.h"
8#include "xfs_shared.h"
9#include "xfs_format.h"
10#include "xfs_log_format.h"
11#include "xfs_trans_resv.h"
12#include "xfs_mount.h"
13#include "xfs_inode.h"
14#include "xfs_errortag.h"
15#include "xfs_error.h"
16#include "xfs_icache.h"
17#include "xfs_trans.h"
18#include "xfs_ialloc.h"
19#include "xfs_dir2.h"
20
21#include <linux/iversion.h>
22
23/*
24 * If we are doing readahead on an inode buffer, we might be in log recovery
25 * reading an inode allocation buffer that hasn't yet been replayed, and hence
26 * has not had the inode cores stamped into it. Hence for readahead, the buffer
27 * may be potentially invalid.
28 *
29 * If the readahead buffer is invalid, we need to mark it with an error and
30 * clear the DONE status of the buffer so that a followup read will re-read it
31 * from disk. We don't report the error otherwise to avoid warnings during log
32 * recovery and we don't get unnecessary panics on debug kernels. We use EIO here
33 * because all we want to do is say readahead failed; there is no-one to report
34 * the error to, so this will distinguish it from a non-ra verifier failure.
35 * Changes to this readahead error behaviour also need to be reflected in
36 * xfs_dquot_buf_readahead_verify().
37 */
38static void
39xfs_inode_buf_verify(
40 struct xfs_buf *bp,
41 bool readahead)
42{
43 struct xfs_mount *mp = bp->b_mount;
44 xfs_agnumber_t agno;
45 int i;
46 int ni;
47
48 /*
49 * Validate the magic number and version of every inode in the buffer
50 */
51 agno = xfs_daddr_to_agno(mp, XFS_BUF_ADDR(bp));
52 ni = XFS_BB_TO_FSB(mp, bp->b_length) * mp->m_sb.sb_inopblock;
53 for (i = 0; i < ni; i++) {
54 int di_ok;
55 xfs_dinode_t *dip;
56 xfs_agino_t unlinked_ino;
57
58 dip = xfs_buf_offset(bp, (i << mp->m_sb.sb_inodelog));
59 unlinked_ino = be32_to_cpu(dip->di_next_unlinked);
60 di_ok = xfs_verify_magic16(bp, dip->di_magic) &&
61 xfs_dinode_good_version(&mp->m_sb, dip->di_version) &&
62 xfs_verify_agino_or_null(mp, agno, unlinked_ino);
63 if (unlikely(XFS_TEST_ERROR(!di_ok, mp,
64 XFS_ERRTAG_ITOBP_INOTOBP))) {
65 if (readahead) {
66 bp->b_flags &= ~XBF_DONE;
67 xfs_buf_ioerror(bp, -EIO);
68 return;
69 }
70
71#ifdef DEBUG
72 xfs_alert(mp,
73 "bad inode magic/vsn daddr %lld #%d (magic=%x)",
74 (unsigned long long)bp->b_bn, i,
75 be16_to_cpu(dip->di_magic));
76#endif
77 xfs_buf_verifier_error(bp, -EFSCORRUPTED,
78 __func__, dip, sizeof(*dip),
79 NULL);
80 return;
81 }
82 }
83}
84
85
86static void
87xfs_inode_buf_read_verify(
88 struct xfs_buf *bp)
89{
90 xfs_inode_buf_verify(bp, false);
91}
92
93static void
94xfs_inode_buf_readahead_verify(
95 struct xfs_buf *bp)
96{
97 xfs_inode_buf_verify(bp, true);
98}
99
100static void
101xfs_inode_buf_write_verify(
102 struct xfs_buf *bp)
103{
104 xfs_inode_buf_verify(bp, false);
105}
106
107const struct xfs_buf_ops xfs_inode_buf_ops = {
108 .name = "xfs_inode",
109 .magic16 = { cpu_to_be16(XFS_DINODE_MAGIC),
110 cpu_to_be16(XFS_DINODE_MAGIC) },
111 .verify_read = xfs_inode_buf_read_verify,
112 .verify_write = xfs_inode_buf_write_verify,
113};
114
115const struct xfs_buf_ops xfs_inode_buf_ra_ops = {
116 .name = "xfs_inode_ra",
117 .magic16 = { cpu_to_be16(XFS_DINODE_MAGIC),
118 cpu_to_be16(XFS_DINODE_MAGIC) },
119 .verify_read = xfs_inode_buf_readahead_verify,
120 .verify_write = xfs_inode_buf_write_verify,
121};
122
123
124/*
125 * This routine is called to map an inode to the buffer containing the on-disk
126 * version of the inode. It returns a pointer to the buffer containing the
127 * on-disk inode in the bpp parameter, and in the dipp parameter it returns a
128 * pointer to the on-disk inode within that buffer.
129 *
130 * If a non-zero error is returned, then the contents of bpp and dipp are
131 * undefined.
132 */
133int
134xfs_imap_to_bp(
135 struct xfs_mount *mp,
136 struct xfs_trans *tp,
137 struct xfs_imap *imap,
138 struct xfs_dinode **dipp,
139 struct xfs_buf **bpp,
140 uint buf_flags)
141{
142 struct xfs_buf *bp;
143 int error;
144
145 buf_flags |= XBF_UNMAPPED;
146 error = xfs_trans_read_buf(mp, tp, mp->m_ddev_targp, imap->im_blkno,
147 (int)imap->im_len, buf_flags, &bp,
148 &xfs_inode_buf_ops);
149 if (error) {
150 ASSERT(error != -EAGAIN || (buf_flags & XBF_TRYLOCK));
151 return error;
152 }
153
154 *bpp = bp;
155 if (dipp)
156 *dipp = xfs_buf_offset(bp, imap->im_boffset);
157 return 0;
158}
159
160int
161xfs_inode_from_disk(
162 struct xfs_inode *ip,
163 struct xfs_dinode *from)
164{
165 struct xfs_icdinode *to = &ip->i_d;
166 struct inode *inode = VFS_I(ip);
167 int error;
168 xfs_failaddr_t fa;
169
170 ASSERT(ip->i_cowfp == NULL);
171 ASSERT(ip->i_afp == NULL);
172
173 fa = xfs_dinode_verify(ip->i_mount, ip->i_ino, from);
174 if (fa) {
175 xfs_inode_verifier_error(ip, -EFSCORRUPTED, "dinode", from,
176 sizeof(*from), fa);
177 return -EFSCORRUPTED;
178 }
179
180 /*
181 * First get the permanent information that is needed to allocate an
182 * inode. If the inode is unused, mode is zero and we shouldn't mess
183 * with the uninitialized part of it.
184 */
185 to->di_flushiter = be16_to_cpu(from->di_flushiter);
186 inode->i_generation = be32_to_cpu(from->di_gen);
187 inode->i_mode = be16_to_cpu(from->di_mode);
188 if (!inode->i_mode)
189 return 0;
190
191 /*
192 * Convert v1 inodes immediately to v2 inode format as this is the
193 * minimum inode version format we support in the rest of the code.
194 * They will also be unconditionally written back to disk as v2 inodes.
195 */
196 if (unlikely(from->di_version == 1)) {
197 set_nlink(inode, be16_to_cpu(from->di_onlink));
198 to->di_projid = 0;
199 } else {
200 set_nlink(inode, be32_to_cpu(from->di_nlink));
201 to->di_projid = (prid_t)be16_to_cpu(from->di_projid_hi) << 16 |
202 be16_to_cpu(from->di_projid_lo);
203 }
204
205 i_uid_write(inode, be32_to_cpu(from->di_uid));
206 i_gid_write(inode, be32_to_cpu(from->di_gid));
207
208 /*
209 * Time is signed, so need to convert to signed 32 bit before
210 * storing in inode timestamp which may be 64 bit. Otherwise
211 * a time before epoch is converted to a time long after epoch
212 * on 64 bit systems.
213 */
214 inode->i_atime.tv_sec = (int)be32_to_cpu(from->di_atime.t_sec);
215 inode->i_atime.tv_nsec = (int)be32_to_cpu(from->di_atime.t_nsec);
216 inode->i_mtime.tv_sec = (int)be32_to_cpu(from->di_mtime.t_sec);
217 inode->i_mtime.tv_nsec = (int)be32_to_cpu(from->di_mtime.t_nsec);
218 inode->i_ctime.tv_sec = (int)be32_to_cpu(from->di_ctime.t_sec);
219 inode->i_ctime.tv_nsec = (int)be32_to_cpu(from->di_ctime.t_nsec);
220
221 to->di_size = be64_to_cpu(from->di_size);
222 to->di_nblocks = be64_to_cpu(from->di_nblocks);
223 to->di_extsize = be32_to_cpu(from->di_extsize);
224 to->di_forkoff = from->di_forkoff;
225 to->di_dmevmask = be32_to_cpu(from->di_dmevmask);
226 to->di_dmstate = be16_to_cpu(from->di_dmstate);
227 to->di_flags = be16_to_cpu(from->di_flags);
228
229 if (xfs_sb_version_has_v3inode(&ip->i_mount->m_sb)) {
230 inode_set_iversion_queried(inode,
231 be64_to_cpu(from->di_changecount));
232 to->di_crtime.tv_sec = be32_to_cpu(from->di_crtime.t_sec);
233 to->di_crtime.tv_nsec = be32_to_cpu(from->di_crtime.t_nsec);
234 to->di_flags2 = be64_to_cpu(from->di_flags2);
235 to->di_cowextsize = be32_to_cpu(from->di_cowextsize);
236 }
237
238 error = xfs_iformat_data_fork(ip, from);
239 if (error)
240 return error;
241 if (from->di_forkoff) {
242 error = xfs_iformat_attr_fork(ip, from);
243 if (error)
244 goto out_destroy_data_fork;
245 }
246 if (xfs_is_reflink_inode(ip))
247 xfs_ifork_init_cow(ip);
248 return 0;
249
250out_destroy_data_fork:
251 xfs_idestroy_fork(&ip->i_df);
252 return error;
253}
254
255void
256xfs_inode_to_disk(
257 struct xfs_inode *ip,
258 struct xfs_dinode *to,
259 xfs_lsn_t lsn)
260{
261 struct xfs_icdinode *from = &ip->i_d;
262 struct inode *inode = VFS_I(ip);
263
264 to->di_magic = cpu_to_be16(XFS_DINODE_MAGIC);
265 to->di_onlink = 0;
266
267 to->di_format = xfs_ifork_format(&ip->i_df);
268 to->di_uid = cpu_to_be32(i_uid_read(inode));
269 to->di_gid = cpu_to_be32(i_gid_read(inode));
270 to->di_projid_lo = cpu_to_be16(from->di_projid & 0xffff);
271 to->di_projid_hi = cpu_to_be16(from->di_projid >> 16);
272
273 memset(to->di_pad, 0, sizeof(to->di_pad));
274 to->di_atime.t_sec = cpu_to_be32(inode->i_atime.tv_sec);
275 to->di_atime.t_nsec = cpu_to_be32(inode->i_atime.tv_nsec);
276 to->di_mtime.t_sec = cpu_to_be32(inode->i_mtime.tv_sec);
277 to->di_mtime.t_nsec = cpu_to_be32(inode->i_mtime.tv_nsec);
278 to->di_ctime.t_sec = cpu_to_be32(inode->i_ctime.tv_sec);
279 to->di_ctime.t_nsec = cpu_to_be32(inode->i_ctime.tv_nsec);
280 to->di_nlink = cpu_to_be32(inode->i_nlink);
281 to->di_gen = cpu_to_be32(inode->i_generation);
282 to->di_mode = cpu_to_be16(inode->i_mode);
283
284 to->di_size = cpu_to_be64(from->di_size);
285 to->di_nblocks = cpu_to_be64(from->di_nblocks);
286 to->di_extsize = cpu_to_be32(from->di_extsize);
287 to->di_nextents = cpu_to_be32(xfs_ifork_nextents(&ip->i_df));
288 to->di_anextents = cpu_to_be16(xfs_ifork_nextents(ip->i_afp));
289 to->di_forkoff = from->di_forkoff;
290 to->di_aformat = xfs_ifork_format(ip->i_afp);
291 to->di_dmevmask = cpu_to_be32(from->di_dmevmask);
292 to->di_dmstate = cpu_to_be16(from->di_dmstate);
293 to->di_flags = cpu_to_be16(from->di_flags);
294
295 if (xfs_sb_version_has_v3inode(&ip->i_mount->m_sb)) {
296 to->di_version = 3;
297 to->di_changecount = cpu_to_be64(inode_peek_iversion(inode));
298 to->di_crtime.t_sec = cpu_to_be32(from->di_crtime.tv_sec);
299 to->di_crtime.t_nsec = cpu_to_be32(from->di_crtime.tv_nsec);
300 to->di_flags2 = cpu_to_be64(from->di_flags2);
301 to->di_cowextsize = cpu_to_be32(from->di_cowextsize);
302 to->di_ino = cpu_to_be64(ip->i_ino);
303 to->di_lsn = cpu_to_be64(lsn);
304 memset(to->di_pad2, 0, sizeof(to->di_pad2));
305 uuid_copy(&to->di_uuid, &ip->i_mount->m_sb.sb_meta_uuid);
306 to->di_flushiter = 0;
307 } else {
308 to->di_version = 2;
309 to->di_flushiter = cpu_to_be16(from->di_flushiter);
310 }
311}
312
313void
314xfs_log_dinode_to_disk(
315 struct xfs_log_dinode *from,
316 struct xfs_dinode *to)
317{
318 to->di_magic = cpu_to_be16(from->di_magic);
319 to->di_mode = cpu_to_be16(from->di_mode);
320 to->di_version = from->di_version;
321 to->di_format = from->di_format;
322 to->di_onlink = 0;
323 to->di_uid = cpu_to_be32(from->di_uid);
324 to->di_gid = cpu_to_be32(from->di_gid);
325 to->di_nlink = cpu_to_be32(from->di_nlink);
326 to->di_projid_lo = cpu_to_be16(from->di_projid_lo);
327 to->di_projid_hi = cpu_to_be16(from->di_projid_hi);
328 memcpy(to->di_pad, from->di_pad, sizeof(to->di_pad));
329
330 to->di_atime.t_sec = cpu_to_be32(from->di_atime.t_sec);
331 to->di_atime.t_nsec = cpu_to_be32(from->di_atime.t_nsec);
332 to->di_mtime.t_sec = cpu_to_be32(from->di_mtime.t_sec);
333 to->di_mtime.t_nsec = cpu_to_be32(from->di_mtime.t_nsec);
334 to->di_ctime.t_sec = cpu_to_be32(from->di_ctime.t_sec);
335 to->di_ctime.t_nsec = cpu_to_be32(from->di_ctime.t_nsec);
336
337 to->di_size = cpu_to_be64(from->di_size);
338 to->di_nblocks = cpu_to_be64(from->di_nblocks);
339 to->di_extsize = cpu_to_be32(from->di_extsize);
340 to->di_nextents = cpu_to_be32(from->di_nextents);
341 to->di_anextents = cpu_to_be16(from->di_anextents);
342 to->di_forkoff = from->di_forkoff;
343 to->di_aformat = from->di_aformat;
344 to->di_dmevmask = cpu_to_be32(from->di_dmevmask);
345 to->di_dmstate = cpu_to_be16(from->di_dmstate);
346 to->di_flags = cpu_to_be16(from->di_flags);
347 to->di_gen = cpu_to_be32(from->di_gen);
348
349 if (from->di_version == 3) {
350 to->di_changecount = cpu_to_be64(from->di_changecount);
351 to->di_crtime.t_sec = cpu_to_be32(from->di_crtime.t_sec);
352 to->di_crtime.t_nsec = cpu_to_be32(from->di_crtime.t_nsec);
353 to->di_flags2 = cpu_to_be64(from->di_flags2);
354 to->di_cowextsize = cpu_to_be32(from->di_cowextsize);
355 to->di_ino = cpu_to_be64(from->di_ino);
356 to->di_lsn = cpu_to_be64(from->di_lsn);
357 memcpy(to->di_pad2, from->di_pad2, sizeof(to->di_pad2));
358 uuid_copy(&to->di_uuid, &from->di_uuid);
359 to->di_flushiter = 0;
360 } else {
361 to->di_flushiter = cpu_to_be16(from->di_flushiter);
362 }
363}
364
365static xfs_failaddr_t
366xfs_dinode_verify_fork(
367 struct xfs_dinode *dip,
368 struct xfs_mount *mp,
369 int whichfork)
370{
371 uint32_t di_nextents = XFS_DFORK_NEXTENTS(dip, whichfork);
372
373 switch (XFS_DFORK_FORMAT(dip, whichfork)) {
374 case XFS_DINODE_FMT_LOCAL:
375 /*
376 * no local regular files yet
377 */
378 if (whichfork == XFS_DATA_FORK) {
379 if (S_ISREG(be16_to_cpu(dip->di_mode)))
380 return __this_address;
381 if (be64_to_cpu(dip->di_size) >
382 XFS_DFORK_SIZE(dip, mp, whichfork))
383 return __this_address;
384 }
385 if (di_nextents)
386 return __this_address;
387 break;
388 case XFS_DINODE_FMT_EXTENTS:
389 if (di_nextents > XFS_DFORK_MAXEXT(dip, mp, whichfork))
390 return __this_address;
391 break;
392 case XFS_DINODE_FMT_BTREE:
393 if (whichfork == XFS_ATTR_FORK) {
394 if (di_nextents > MAXAEXTNUM)
395 return __this_address;
396 } else if (di_nextents > MAXEXTNUM) {
397 return __this_address;
398 }
399 break;
400 default:
401 return __this_address;
402 }
403 return NULL;
404}
405
406static xfs_failaddr_t
407xfs_dinode_verify_forkoff(
408 struct xfs_dinode *dip,
409 struct xfs_mount *mp)
410{
411 if (!dip->di_forkoff)
412 return NULL;
413
414 switch (dip->di_format) {
415 case XFS_DINODE_FMT_DEV:
416 if (dip->di_forkoff != (roundup(sizeof(xfs_dev_t), 8) >> 3))
417 return __this_address;
418 break;
419 case XFS_DINODE_FMT_LOCAL: /* fall through ... */
420 case XFS_DINODE_FMT_EXTENTS: /* fall through ... */
421 case XFS_DINODE_FMT_BTREE:
422 if (dip->di_forkoff >= (XFS_LITINO(mp) >> 3))
423 return __this_address;
424 break;
425 default:
426 return __this_address;
427 }
428 return NULL;
429}
430
431xfs_failaddr_t
432xfs_dinode_verify(
433 struct xfs_mount *mp,
434 xfs_ino_t ino,
435 struct xfs_dinode *dip)
436{
437 xfs_failaddr_t fa;
438 uint16_t mode;
439 uint16_t flags;
440 uint64_t flags2;
441 uint64_t di_size;
442
443 if (dip->di_magic != cpu_to_be16(XFS_DINODE_MAGIC))
444 return __this_address;
445
446 /* Verify v3 integrity information first */
447 if (dip->di_version >= 3) {
448 if (!xfs_sb_version_has_v3inode(&mp->m_sb))
449 return __this_address;
450 if (!xfs_verify_cksum((char *)dip, mp->m_sb.sb_inodesize,
451 XFS_DINODE_CRC_OFF))
452 return __this_address;
453 if (be64_to_cpu(dip->di_ino) != ino)
454 return __this_address;
455 if (!uuid_equal(&dip->di_uuid, &mp->m_sb.sb_meta_uuid))
456 return __this_address;
457 }
458
459 /* don't allow invalid i_size */
460 di_size = be64_to_cpu(dip->di_size);
461 if (di_size & (1ULL << 63))
462 return __this_address;
463
464 mode = be16_to_cpu(dip->di_mode);
465 if (mode && xfs_mode_to_ftype(mode) == XFS_DIR3_FT_UNKNOWN)
466 return __this_address;
467
468 /* No zero-length symlinks/dirs. */
469 if ((S_ISLNK(mode) || S_ISDIR(mode)) && di_size == 0)
470 return __this_address;
471
472 /* Fork checks carried over from xfs_iformat_fork */
473 if (mode &&
474 be32_to_cpu(dip->di_nextents) + be16_to_cpu(dip->di_anextents) >
475 be64_to_cpu(dip->di_nblocks))
476 return __this_address;
477
478 if (mode && XFS_DFORK_BOFF(dip) > mp->m_sb.sb_inodesize)
479 return __this_address;
480
481 flags = be16_to_cpu(dip->di_flags);
482
483 if (mode && (flags & XFS_DIFLAG_REALTIME) && !mp->m_rtdev_targp)
484 return __this_address;
485
486 /* check for illegal values of forkoff */
487 fa = xfs_dinode_verify_forkoff(dip, mp);
488 if (fa)
489 return fa;
490
491 /* Do we have appropriate data fork formats for the mode? */
492 switch (mode & S_IFMT) {
493 case S_IFIFO:
494 case S_IFCHR:
495 case S_IFBLK:
496 case S_IFSOCK:
497 if (dip->di_format != XFS_DINODE_FMT_DEV)
498 return __this_address;
499 break;
500 case S_IFREG:
501 case S_IFLNK:
502 case S_IFDIR:
503 fa = xfs_dinode_verify_fork(dip, mp, XFS_DATA_FORK);
504 if (fa)
505 return fa;
506 break;
507 case 0:
508 /* Uninitialized inode ok. */
509 break;
510 default:
511 return __this_address;
512 }
513
514 if (dip->di_forkoff) {
515 fa = xfs_dinode_verify_fork(dip, mp, XFS_ATTR_FORK);
516 if (fa)
517 return fa;
518 } else {
519 /*
520 * If there is no fork offset, this may be a freshly-made inode
521 * in a new disk cluster, in which case di_aformat is zeroed.
522 * Otherwise, such an inode must be in EXTENTS format; this goes
523 * for freed inodes as well.
524 */
525 switch (dip->di_aformat) {
526 case 0:
527 case XFS_DINODE_FMT_EXTENTS:
528 break;
529 default:
530 return __this_address;
531 }
532 if (dip->di_anextents)
533 return __this_address;
534 }
535
536 /* extent size hint validation */
537 fa = xfs_inode_validate_extsize(mp, be32_to_cpu(dip->di_extsize),
538 mode, flags);
539 if (fa)
540 return fa;
541
542 /* only version 3 or greater inodes are extensively verified here */
543 if (dip->di_version < 3)
544 return NULL;
545
546 flags2 = be64_to_cpu(dip->di_flags2);
547
548 /* don't allow reflink/cowextsize if we don't have reflink */
549 if ((flags2 & (XFS_DIFLAG2_REFLINK | XFS_DIFLAG2_COWEXTSIZE)) &&
550 !xfs_sb_version_hasreflink(&mp->m_sb))
551 return __this_address;
552
553 /* only regular files get reflink */
554 if ((flags2 & XFS_DIFLAG2_REFLINK) && (mode & S_IFMT) != S_IFREG)
555 return __this_address;
556
557 /* don't let reflink and realtime mix */
558 if ((flags2 & XFS_DIFLAG2_REFLINK) && (flags & XFS_DIFLAG_REALTIME))
559 return __this_address;
560
561 /* don't let reflink and dax mix */
562 if ((flags2 & XFS_DIFLAG2_REFLINK) && (flags2 & XFS_DIFLAG2_DAX))
563 return __this_address;
564
565 /* COW extent size hint validation */
566 fa = xfs_inode_validate_cowextsize(mp, be32_to_cpu(dip->di_cowextsize),
567 mode, flags, flags2);
568 if (fa)
569 return fa;
570
571 return NULL;
572}
573
574void
575xfs_dinode_calc_crc(
576 struct xfs_mount *mp,
577 struct xfs_dinode *dip)
578{
579 uint32_t crc;
580
581 if (dip->di_version < 3)
582 return;
583
584 ASSERT(xfs_sb_version_hascrc(&mp->m_sb));
585 crc = xfs_start_cksum_update((char *)dip, mp->m_sb.sb_inodesize,
586 XFS_DINODE_CRC_OFF);
587 dip->di_crc = xfs_end_cksum(crc);
588}
589
590/*
591 * Validate di_extsize hint.
592 *
593 * The rules are documented at xfs_ioctl_setattr_check_extsize().
594 * These functions must be kept in sync with each other.
595 */
596xfs_failaddr_t
597xfs_inode_validate_extsize(
598 struct xfs_mount *mp,
599 uint32_t extsize,
600 uint16_t mode,
601 uint16_t flags)
602{
603 bool rt_flag;
604 bool hint_flag;
605 bool inherit_flag;
606 uint32_t extsize_bytes;
607 uint32_t blocksize_bytes;
608
609 rt_flag = (flags & XFS_DIFLAG_REALTIME);
610 hint_flag = (flags & XFS_DIFLAG_EXTSIZE);
611 inherit_flag = (flags & XFS_DIFLAG_EXTSZINHERIT);
612 extsize_bytes = XFS_FSB_TO_B(mp, extsize);
613
614 if (rt_flag)
615 blocksize_bytes = mp->m_sb.sb_rextsize << mp->m_sb.sb_blocklog;
616 else
617 blocksize_bytes = mp->m_sb.sb_blocksize;
618
619 if ((hint_flag || inherit_flag) && !(S_ISDIR(mode) || S_ISREG(mode)))
620 return __this_address;
621
622 if (hint_flag && !S_ISREG(mode))
623 return __this_address;
624
625 if (inherit_flag && !S_ISDIR(mode))
626 return __this_address;
627
628 if ((hint_flag || inherit_flag) && extsize == 0)
629 return __this_address;
630
631 /* free inodes get flags set to zero but extsize remains */
632 if (mode && !(hint_flag || inherit_flag) && extsize != 0)
633 return __this_address;
634
635 if (extsize_bytes % blocksize_bytes)
636 return __this_address;
637
638 if (extsize > MAXEXTLEN)
639 return __this_address;
640
641 if (!rt_flag && extsize > mp->m_sb.sb_agblocks / 2)
642 return __this_address;
643
644 return NULL;
645}
646
647/*
648 * Validate di_cowextsize hint.
649 *
650 * The rules are documented at xfs_ioctl_setattr_check_cowextsize().
651 * These functions must be kept in sync with each other.
652 */
653xfs_failaddr_t
654xfs_inode_validate_cowextsize(
655 struct xfs_mount *mp,
656 uint32_t cowextsize,
657 uint16_t mode,
658 uint16_t flags,
659 uint64_t flags2)
660{
661 bool rt_flag;
662 bool hint_flag;
663 uint32_t cowextsize_bytes;
664
665 rt_flag = (flags & XFS_DIFLAG_REALTIME);
666 hint_flag = (flags2 & XFS_DIFLAG2_COWEXTSIZE);
667 cowextsize_bytes = XFS_FSB_TO_B(mp, cowextsize);
668
669 if (hint_flag && !xfs_sb_version_hasreflink(&mp->m_sb))
670 return __this_address;
671
672 if (hint_flag && !(S_ISDIR(mode) || S_ISREG(mode)))
673 return __this_address;
674
675 if (hint_flag && cowextsize == 0)
676 return __this_address;
677
678 /* free inodes get flags set to zero but cowextsize remains */
679 if (mode && !hint_flag && cowextsize != 0)
680 return __this_address;
681
682 if (hint_flag && rt_flag)
683 return __this_address;
684
685 if (cowextsize_bytes % mp->m_sb.sb_blocksize)
686 return __this_address;
687
688 if (cowextsize > MAXEXTLEN)
689 return __this_address;
690
691 if (cowextsize > mp->m_sb.sb_agblocks / 2)
692 return __this_address;
693
694 return NULL;
695}