Loading...
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Copyright (c) 2000-2006 Silicon Graphics, Inc.
4 * All Rights Reserved.
5 */
6#include "xfs.h"
7#include "xfs_fs.h"
8#include "xfs_shared.h"
9#include "xfs_format.h"
10#include "xfs_log_format.h"
11#include "xfs_trans_resv.h"
12#include "xfs_mount.h"
13#include "xfs_ag.h"
14#include "xfs_inode.h"
15#include "xfs_errortag.h"
16#include "xfs_error.h"
17#include "xfs_icache.h"
18#include "xfs_trans.h"
19#include "xfs_ialloc.h"
20#include "xfs_dir2.h"
21#include "xfs_health.h"
22#include "xfs_metafile.h"
23
24#include <linux/iversion.h>
25
26/*
27 * If we are doing readahead on an inode buffer, we might be in log recovery
28 * reading an inode allocation buffer that hasn't yet been replayed, and hence
29 * has not had the inode cores stamped into it. Hence for readahead, the buffer
30 * may be potentially invalid.
31 *
32 * If the readahead buffer is invalid, we need to mark it with an error and
33 * clear the DONE status of the buffer so that a followup read will re-read it
34 * from disk. We don't report the error otherwise to avoid warnings during log
35 * recovery and we don't get unnecessary panics on debug kernels. We use EIO here
36 * because all we want to do is say readahead failed; there is no-one to report
37 * the error to, so this will distinguish it from a non-ra verifier failure.
38 * Changes to this readahead error behaviour also need to be reflected in
39 * xfs_dquot_buf_readahead_verify().
40 */
41static void
42xfs_inode_buf_verify(
43 struct xfs_buf *bp,
44 bool readahead)
45{
46 struct xfs_mount *mp = bp->b_mount;
47 int i;
48 int ni;
49
50 /*
51 * Validate the magic number and version of every inode in the buffer
52 */
53 ni = XFS_BB_TO_FSB(mp, bp->b_length) * mp->m_sb.sb_inopblock;
54 for (i = 0; i < ni; i++) {
55 struct xfs_dinode *dip;
56 xfs_agino_t unlinked_ino;
57 int di_ok;
58
59 dip = xfs_buf_offset(bp, (i << mp->m_sb.sb_inodelog));
60 unlinked_ino = be32_to_cpu(dip->di_next_unlinked);
61 di_ok = xfs_verify_magic16(bp, dip->di_magic) &&
62 xfs_dinode_good_version(mp, dip->di_version) &&
63 xfs_verify_agino_or_null(bp->b_pag, unlinked_ino);
64 if (unlikely(XFS_TEST_ERROR(!di_ok, mp,
65 XFS_ERRTAG_ITOBP_INOTOBP))) {
66 if (readahead) {
67 bp->b_flags &= ~XBF_DONE;
68 xfs_buf_ioerror(bp, -EIO);
69 return;
70 }
71
72#ifdef DEBUG
73 xfs_alert(mp,
74 "bad inode magic/vsn daddr %lld #%d (magic=%x)",
75 (unsigned long long)xfs_buf_daddr(bp), i,
76 be16_to_cpu(dip->di_magic));
77#endif
78 xfs_buf_verifier_error(bp, -EFSCORRUPTED,
79 __func__, dip, sizeof(*dip),
80 NULL);
81 return;
82 }
83 }
84}
85
86
87static void
88xfs_inode_buf_read_verify(
89 struct xfs_buf *bp)
90{
91 xfs_inode_buf_verify(bp, false);
92}
93
94static void
95xfs_inode_buf_readahead_verify(
96 struct xfs_buf *bp)
97{
98 xfs_inode_buf_verify(bp, true);
99}
100
101static void
102xfs_inode_buf_write_verify(
103 struct xfs_buf *bp)
104{
105 xfs_inode_buf_verify(bp, false);
106}
107
108const struct xfs_buf_ops xfs_inode_buf_ops = {
109 .name = "xfs_inode",
110 .magic16 = { cpu_to_be16(XFS_DINODE_MAGIC),
111 cpu_to_be16(XFS_DINODE_MAGIC) },
112 .verify_read = xfs_inode_buf_read_verify,
113 .verify_write = xfs_inode_buf_write_verify,
114};
115
116const struct xfs_buf_ops xfs_inode_buf_ra_ops = {
117 .name = "xfs_inode_ra",
118 .magic16 = { cpu_to_be16(XFS_DINODE_MAGIC),
119 cpu_to_be16(XFS_DINODE_MAGIC) },
120 .verify_read = xfs_inode_buf_readahead_verify,
121 .verify_write = xfs_inode_buf_write_verify,
122};
123
124
125/*
126 * This routine is called to map an inode to the buffer containing the on-disk
127 * version of the inode. It returns a pointer to the buffer containing the
128 * on-disk inode in the bpp parameter.
129 */
130int
131xfs_imap_to_bp(
132 struct xfs_mount *mp,
133 struct xfs_trans *tp,
134 struct xfs_imap *imap,
135 struct xfs_buf **bpp)
136{
137 int error;
138
139 error = xfs_trans_read_buf(mp, tp, mp->m_ddev_targp, imap->im_blkno,
140 imap->im_len, XBF_UNMAPPED, bpp, &xfs_inode_buf_ops);
141 if (xfs_metadata_is_sick(error))
142 xfs_agno_mark_sick(mp, xfs_daddr_to_agno(mp, imap->im_blkno),
143 XFS_SICK_AG_INODES);
144 return error;
145}
146
147static inline struct timespec64 xfs_inode_decode_bigtime(uint64_t ts)
148{
149 struct timespec64 tv;
150 uint32_t n;
151
152 tv.tv_sec = xfs_bigtime_to_unix(div_u64_rem(ts, NSEC_PER_SEC, &n));
153 tv.tv_nsec = n;
154
155 return tv;
156}
157
158/* Convert an ondisk timestamp to an incore timestamp. */
159struct timespec64
160xfs_inode_from_disk_ts(
161 struct xfs_dinode *dip,
162 const xfs_timestamp_t ts)
163{
164 struct timespec64 tv;
165 struct xfs_legacy_timestamp *lts;
166
167 if (xfs_dinode_has_bigtime(dip))
168 return xfs_inode_decode_bigtime(be64_to_cpu(ts));
169
170 lts = (struct xfs_legacy_timestamp *)&ts;
171 tv.tv_sec = (int)be32_to_cpu(lts->t_sec);
172 tv.tv_nsec = (int)be32_to_cpu(lts->t_nsec);
173
174 return tv;
175}
176
177int
178xfs_inode_from_disk(
179 struct xfs_inode *ip,
180 struct xfs_dinode *from)
181{
182 struct inode *inode = VFS_I(ip);
183 int error;
184 xfs_failaddr_t fa;
185
186 ASSERT(ip->i_cowfp == NULL);
187
188 fa = xfs_dinode_verify(ip->i_mount, ip->i_ino, from);
189 if (fa) {
190 xfs_inode_verifier_error(ip, -EFSCORRUPTED, "dinode", from,
191 sizeof(*from), fa);
192 return -EFSCORRUPTED;
193 }
194
195 /*
196 * First get the permanent information that is needed to allocate an
197 * inode. If the inode is unused, mode is zero and we shouldn't mess
198 * with the uninitialized part of it.
199 */
200 if (!xfs_has_v3inodes(ip->i_mount))
201 ip->i_flushiter = be16_to_cpu(from->di_flushiter);
202 inode->i_generation = be32_to_cpu(from->di_gen);
203 inode->i_mode = be16_to_cpu(from->di_mode);
204 if (!inode->i_mode)
205 return 0;
206
207 /*
208 * Convert v1 inodes immediately to v2 inode format as this is the
209 * minimum inode version format we support in the rest of the code.
210 * They will also be unconditionally written back to disk as v2 inodes.
211 */
212 if (unlikely(from->di_version == 1)) {
213 /* di_metatype used to be di_onlink */
214 set_nlink(inode, be16_to_cpu(from->di_metatype));
215 ip->i_projid = 0;
216 } else {
217 set_nlink(inode, be32_to_cpu(from->di_nlink));
218 ip->i_projid = (prid_t)be16_to_cpu(from->di_projid_hi) << 16 |
219 be16_to_cpu(from->di_projid_lo);
220 if (xfs_dinode_is_metadir(from))
221 ip->i_metatype = be16_to_cpu(from->di_metatype);
222 }
223
224 i_uid_write(inode, be32_to_cpu(from->di_uid));
225 i_gid_write(inode, be32_to_cpu(from->di_gid));
226
227 /*
228 * Time is signed, so need to convert to signed 32 bit before
229 * storing in inode timestamp which may be 64 bit. Otherwise
230 * a time before epoch is converted to a time long after epoch
231 * on 64 bit systems.
232 */
233 inode_set_atime_to_ts(inode,
234 xfs_inode_from_disk_ts(from, from->di_atime));
235 inode_set_mtime_to_ts(inode,
236 xfs_inode_from_disk_ts(from, from->di_mtime));
237 inode_set_ctime_to_ts(inode,
238 xfs_inode_from_disk_ts(from, from->di_ctime));
239
240 ip->i_disk_size = be64_to_cpu(from->di_size);
241 ip->i_nblocks = be64_to_cpu(from->di_nblocks);
242 ip->i_extsize = be32_to_cpu(from->di_extsize);
243 ip->i_forkoff = from->di_forkoff;
244 ip->i_diflags = be16_to_cpu(from->di_flags);
245 ip->i_next_unlinked = be32_to_cpu(from->di_next_unlinked);
246
247 if (from->di_dmevmask || from->di_dmstate)
248 xfs_iflags_set(ip, XFS_IPRESERVE_DM_FIELDS);
249
250 if (xfs_has_v3inodes(ip->i_mount)) {
251 inode_set_iversion_queried(inode,
252 be64_to_cpu(from->di_changecount));
253 ip->i_crtime = xfs_inode_from_disk_ts(from, from->di_crtime);
254 ip->i_diflags2 = be64_to_cpu(from->di_flags2);
255 ip->i_cowextsize = be32_to_cpu(from->di_cowextsize);
256 }
257
258 error = xfs_iformat_data_fork(ip, from);
259 if (error)
260 return error;
261 if (from->di_forkoff) {
262 error = xfs_iformat_attr_fork(ip, from);
263 if (error)
264 goto out_destroy_data_fork;
265 }
266 if (xfs_is_reflink_inode(ip))
267 xfs_ifork_init_cow(ip);
268 return 0;
269
270out_destroy_data_fork:
271 xfs_idestroy_fork(&ip->i_df);
272 return error;
273}
274
275/* Convert an incore timestamp to an ondisk timestamp. */
276static inline xfs_timestamp_t
277xfs_inode_to_disk_ts(
278 struct xfs_inode *ip,
279 const struct timespec64 tv)
280{
281 struct xfs_legacy_timestamp *lts;
282 xfs_timestamp_t ts;
283
284 if (xfs_inode_has_bigtime(ip))
285 return cpu_to_be64(xfs_inode_encode_bigtime(tv));
286
287 lts = (struct xfs_legacy_timestamp *)&ts;
288 lts->t_sec = cpu_to_be32(tv.tv_sec);
289 lts->t_nsec = cpu_to_be32(tv.tv_nsec);
290
291 return ts;
292}
293
294static inline void
295xfs_inode_to_disk_iext_counters(
296 struct xfs_inode *ip,
297 struct xfs_dinode *to)
298{
299 if (xfs_inode_has_large_extent_counts(ip)) {
300 to->di_big_nextents = cpu_to_be64(xfs_ifork_nextents(&ip->i_df));
301 to->di_big_anextents = cpu_to_be32(xfs_ifork_nextents(&ip->i_af));
302 /*
303 * We might be upgrading the inode to use larger extent counters
304 * than was previously used. Hence zero the unused field.
305 */
306 to->di_nrext64_pad = cpu_to_be16(0);
307 } else {
308 to->di_nextents = cpu_to_be32(xfs_ifork_nextents(&ip->i_df));
309 to->di_anextents = cpu_to_be16(xfs_ifork_nextents(&ip->i_af));
310 }
311}
312
313void
314xfs_inode_to_disk(
315 struct xfs_inode *ip,
316 struct xfs_dinode *to,
317 xfs_lsn_t lsn)
318{
319 struct inode *inode = VFS_I(ip);
320
321 to->di_magic = cpu_to_be16(XFS_DINODE_MAGIC);
322 if (xfs_is_metadir_inode(ip))
323 to->di_metatype = cpu_to_be16(ip->i_metatype);
324 else
325 to->di_metatype = 0;
326
327 to->di_format = xfs_ifork_format(&ip->i_df);
328 to->di_uid = cpu_to_be32(i_uid_read(inode));
329 to->di_gid = cpu_to_be32(i_gid_read(inode));
330 to->di_projid_lo = cpu_to_be16(ip->i_projid & 0xffff);
331 to->di_projid_hi = cpu_to_be16(ip->i_projid >> 16);
332
333 to->di_atime = xfs_inode_to_disk_ts(ip, inode_get_atime(inode));
334 to->di_mtime = xfs_inode_to_disk_ts(ip, inode_get_mtime(inode));
335 to->di_ctime = xfs_inode_to_disk_ts(ip, inode_get_ctime(inode));
336 to->di_nlink = cpu_to_be32(inode->i_nlink);
337 to->di_gen = cpu_to_be32(inode->i_generation);
338 to->di_mode = cpu_to_be16(inode->i_mode);
339
340 to->di_size = cpu_to_be64(ip->i_disk_size);
341 to->di_nblocks = cpu_to_be64(ip->i_nblocks);
342 to->di_extsize = cpu_to_be32(ip->i_extsize);
343 to->di_forkoff = ip->i_forkoff;
344 to->di_aformat = xfs_ifork_format(&ip->i_af);
345 to->di_flags = cpu_to_be16(ip->i_diflags);
346
347 if (xfs_has_v3inodes(ip->i_mount)) {
348 to->di_version = 3;
349 to->di_changecount = cpu_to_be64(inode_peek_iversion(inode));
350 to->di_crtime = xfs_inode_to_disk_ts(ip, ip->i_crtime);
351 to->di_flags2 = cpu_to_be64(ip->i_diflags2);
352 to->di_cowextsize = cpu_to_be32(ip->i_cowextsize);
353 to->di_ino = cpu_to_be64(ip->i_ino);
354 to->di_lsn = cpu_to_be64(lsn);
355 memset(to->di_pad2, 0, sizeof(to->di_pad2));
356 uuid_copy(&to->di_uuid, &ip->i_mount->m_sb.sb_meta_uuid);
357 to->di_v3_pad = 0;
358 } else {
359 to->di_version = 2;
360 to->di_flushiter = cpu_to_be16(ip->i_flushiter);
361 memset(to->di_v2_pad, 0, sizeof(to->di_v2_pad));
362 }
363
364 xfs_inode_to_disk_iext_counters(ip, to);
365}
366
367static xfs_failaddr_t
368xfs_dinode_verify_fork(
369 struct xfs_dinode *dip,
370 struct xfs_mount *mp,
371 int whichfork)
372{
373 xfs_extnum_t di_nextents;
374 xfs_extnum_t max_extents;
375 mode_t mode = be16_to_cpu(dip->di_mode);
376 uint32_t fork_size = XFS_DFORK_SIZE(dip, mp, whichfork);
377 uint32_t fork_format = XFS_DFORK_FORMAT(dip, whichfork);
378
379 di_nextents = xfs_dfork_nextents(dip, whichfork);
380
381 /*
382 * For fork types that can contain local data, check that the fork
383 * format matches the size of local data contained within the fork.
384 */
385 if (whichfork == XFS_DATA_FORK) {
386 /*
387 * A directory small enough to fit in the inode must be stored
388 * in local format. The directory sf <-> extents conversion
389 * code updates the directory size accordingly. Directories
390 * being truncated have zero size and are not subject to this
391 * check.
392 */
393 if (S_ISDIR(mode)) {
394 if (dip->di_size &&
395 be64_to_cpu(dip->di_size) <= fork_size &&
396 fork_format != XFS_DINODE_FMT_LOCAL)
397 return __this_address;
398 }
399
400 /*
401 * A symlink with a target small enough to fit in the inode can
402 * be stored in extents format if xattrs were added (thus
403 * converting the data fork from shortform to remote format)
404 * and then removed.
405 */
406 if (S_ISLNK(mode)) {
407 if (be64_to_cpu(dip->di_size) <= fork_size &&
408 fork_format != XFS_DINODE_FMT_EXTENTS &&
409 fork_format != XFS_DINODE_FMT_LOCAL)
410 return __this_address;
411 }
412
413 /*
414 * For all types, check that when the size says the fork should
415 * be in extent or btree format, the inode isn't claiming to be
416 * in local format.
417 */
418 if (be64_to_cpu(dip->di_size) > fork_size &&
419 fork_format == XFS_DINODE_FMT_LOCAL)
420 return __this_address;
421 }
422
423 switch (fork_format) {
424 case XFS_DINODE_FMT_LOCAL:
425 /*
426 * No local regular files yet.
427 */
428 if (S_ISREG(mode) && whichfork == XFS_DATA_FORK)
429 return __this_address;
430 if (di_nextents)
431 return __this_address;
432 break;
433 case XFS_DINODE_FMT_EXTENTS:
434 if (di_nextents > XFS_DFORK_MAXEXT(dip, mp, whichfork))
435 return __this_address;
436 break;
437 case XFS_DINODE_FMT_BTREE:
438 max_extents = xfs_iext_max_nextents(
439 xfs_dinode_has_large_extent_counts(dip),
440 whichfork);
441 if (di_nextents > max_extents)
442 return __this_address;
443 break;
444 default:
445 return __this_address;
446 }
447 return NULL;
448}
449
450static xfs_failaddr_t
451xfs_dinode_verify_forkoff(
452 struct xfs_dinode *dip,
453 struct xfs_mount *mp)
454{
455 if (!dip->di_forkoff)
456 return NULL;
457
458 switch (dip->di_format) {
459 case XFS_DINODE_FMT_DEV:
460 if (dip->di_forkoff != (roundup(sizeof(xfs_dev_t), 8) >> 3))
461 return __this_address;
462 break;
463 case XFS_DINODE_FMT_LOCAL: /* fall through ... */
464 case XFS_DINODE_FMT_EXTENTS: /* fall through ... */
465 case XFS_DINODE_FMT_BTREE:
466 if (dip->di_forkoff >= (XFS_LITINO(mp) >> 3))
467 return __this_address;
468 break;
469 default:
470 return __this_address;
471 }
472 return NULL;
473}
474
475static xfs_failaddr_t
476xfs_dinode_verify_nrext64(
477 struct xfs_mount *mp,
478 struct xfs_dinode *dip)
479{
480 if (xfs_dinode_has_large_extent_counts(dip)) {
481 if (!xfs_has_large_extent_counts(mp))
482 return __this_address;
483 if (dip->di_nrext64_pad != 0)
484 return __this_address;
485 } else if (dip->di_version >= 3) {
486 if (dip->di_v3_pad != 0)
487 return __this_address;
488 }
489
490 return NULL;
491}
492
493/*
494 * Validate all the picky requirements we have for a file that claims to be
495 * filesystem metadata.
496 */
497xfs_failaddr_t
498xfs_dinode_verify_metadir(
499 struct xfs_mount *mp,
500 struct xfs_dinode *dip,
501 uint16_t mode,
502 uint16_t flags,
503 uint64_t flags2)
504{
505 if (!xfs_has_metadir(mp))
506 return __this_address;
507
508 /* V5 filesystem only */
509 if (dip->di_version < 3)
510 return __this_address;
511
512 if (be16_to_cpu(dip->di_metatype) >= XFS_METAFILE_MAX)
513 return __this_address;
514
515 /* V3 inode fields that are always zero */
516 if ((flags2 & XFS_DIFLAG2_NREXT64) && dip->di_nrext64_pad)
517 return __this_address;
518 if (!(flags2 & XFS_DIFLAG2_NREXT64) && dip->di_flushiter)
519 return __this_address;
520
521 /* Metadata files can only be directories or regular files */
522 if (!S_ISDIR(mode) && !S_ISREG(mode))
523 return __this_address;
524
525 /* They must have zero access permissions */
526 if (mode & 0777)
527 return __this_address;
528
529 /* DMAPI event and state masks are zero */
530 if (dip->di_dmevmask || dip->di_dmstate)
531 return __this_address;
532
533 /*
534 * User and group IDs must be zero. The project ID is used for
535 * grouping inodes. Metadata inodes are never accounted to quotas.
536 */
537 if (dip->di_uid || dip->di_gid)
538 return __this_address;
539
540 /* Mandatory inode flags must be set */
541 if (S_ISDIR(mode)) {
542 if ((flags & XFS_METADIR_DIFLAGS) != XFS_METADIR_DIFLAGS)
543 return __this_address;
544 } else {
545 if ((flags & XFS_METAFILE_DIFLAGS) != XFS_METAFILE_DIFLAGS)
546 return __this_address;
547 }
548
549 /* dax flags2 must not be set */
550 if (flags2 & XFS_DIFLAG2_DAX)
551 return __this_address;
552
553 return NULL;
554}
555
556xfs_failaddr_t
557xfs_dinode_verify(
558 struct xfs_mount *mp,
559 xfs_ino_t ino,
560 struct xfs_dinode *dip)
561{
562 xfs_failaddr_t fa;
563 uint16_t mode;
564 uint16_t flags;
565 uint64_t flags2;
566 uint64_t di_size;
567 xfs_extnum_t nextents;
568 xfs_extnum_t naextents;
569 xfs_filblks_t nblocks;
570
571 if (dip->di_magic != cpu_to_be16(XFS_DINODE_MAGIC))
572 return __this_address;
573
574 /* Verify v3 integrity information first */
575 if (dip->di_version >= 3) {
576 if (!xfs_has_v3inodes(mp))
577 return __this_address;
578 if (!xfs_verify_cksum((char *)dip, mp->m_sb.sb_inodesize,
579 XFS_DINODE_CRC_OFF))
580 return __this_address;
581 if (be64_to_cpu(dip->di_ino) != ino)
582 return __this_address;
583 if (!uuid_equal(&dip->di_uuid, &mp->m_sb.sb_meta_uuid))
584 return __this_address;
585 }
586
587 /*
588 * Historical note: xfsprogs in the 3.2 era set up its incore inodes to
589 * have di_nlink track the link count, even if the actual filesystem
590 * only supported V1 inodes (i.e. di_onlink). When writing out the
591 * ondisk inode, it would set both the ondisk di_nlink and di_onlink to
592 * the the incore di_nlink value, which is why we cannot check for
593 * di_nlink==0 on a V1 inode. V2/3 inodes would get written out with
594 * di_onlink==0, so we can check that.
595 */
596 if (dip->di_version == 2) {
597 if (dip->di_metatype)
598 return __this_address;
599 } else if (dip->di_version >= 3) {
600 if (!xfs_dinode_is_metadir(dip) && dip->di_metatype)
601 return __this_address;
602 }
603
604 /* don't allow invalid i_size */
605 di_size = be64_to_cpu(dip->di_size);
606 if (di_size & (1ULL << 63))
607 return __this_address;
608
609 mode = be16_to_cpu(dip->di_mode);
610 if (mode && xfs_mode_to_ftype(mode) == XFS_DIR3_FT_UNKNOWN)
611 return __this_address;
612
613 /*
614 * No zero-length symlinks/dirs unless they're unlinked and hence being
615 * inactivated.
616 */
617 if ((S_ISLNK(mode) || S_ISDIR(mode)) && di_size == 0) {
618 if (dip->di_version > 1) {
619 if (dip->di_nlink)
620 return __this_address;
621 } else {
622 /* di_metatype used to be di_onlink */
623 if (dip->di_metatype)
624 return __this_address;
625 }
626 }
627
628 fa = xfs_dinode_verify_nrext64(mp, dip);
629 if (fa)
630 return fa;
631
632 nextents = xfs_dfork_data_extents(dip);
633 naextents = xfs_dfork_attr_extents(dip);
634 nblocks = be64_to_cpu(dip->di_nblocks);
635
636 /* Fork checks carried over from xfs_iformat_fork */
637 if (mode && nextents + naextents > nblocks)
638 return __this_address;
639
640 if (nextents + naextents == 0 && nblocks != 0)
641 return __this_address;
642
643 if (S_ISDIR(mode) && nextents > mp->m_dir_geo->max_extents)
644 return __this_address;
645
646 if (mode && XFS_DFORK_BOFF(dip) > mp->m_sb.sb_inodesize)
647 return __this_address;
648
649 flags = be16_to_cpu(dip->di_flags);
650
651 if (mode && (flags & XFS_DIFLAG_REALTIME) && !mp->m_rtdev_targp)
652 return __this_address;
653
654 /* check for illegal values of forkoff */
655 fa = xfs_dinode_verify_forkoff(dip, mp);
656 if (fa)
657 return fa;
658
659 /* Do we have appropriate data fork formats for the mode? */
660 switch (mode & S_IFMT) {
661 case S_IFIFO:
662 case S_IFCHR:
663 case S_IFBLK:
664 case S_IFSOCK:
665 if (dip->di_format != XFS_DINODE_FMT_DEV)
666 return __this_address;
667 break;
668 case S_IFREG:
669 case S_IFLNK:
670 case S_IFDIR:
671 fa = xfs_dinode_verify_fork(dip, mp, XFS_DATA_FORK);
672 if (fa)
673 return fa;
674 break;
675 case 0:
676 /* Uninitialized inode ok. */
677 break;
678 default:
679 return __this_address;
680 }
681
682 if (dip->di_forkoff) {
683 fa = xfs_dinode_verify_fork(dip, mp, XFS_ATTR_FORK);
684 if (fa)
685 return fa;
686 } else {
687 /*
688 * If there is no fork offset, this may be a freshly-made inode
689 * in a new disk cluster, in which case di_aformat is zeroed.
690 * Otherwise, such an inode must be in EXTENTS format; this goes
691 * for freed inodes as well.
692 */
693 switch (dip->di_aformat) {
694 case 0:
695 case XFS_DINODE_FMT_EXTENTS:
696 break;
697 default:
698 return __this_address;
699 }
700 if (naextents)
701 return __this_address;
702 }
703
704 /* extent size hint validation */
705 fa = xfs_inode_validate_extsize(mp, be32_to_cpu(dip->di_extsize),
706 mode, flags);
707 if (fa)
708 return fa;
709
710 /* only version 3 or greater inodes are extensively verified here */
711 if (dip->di_version < 3)
712 return NULL;
713
714 flags2 = be64_to_cpu(dip->di_flags2);
715
716 /* don't allow reflink/cowextsize if we don't have reflink */
717 if ((flags2 & (XFS_DIFLAG2_REFLINK | XFS_DIFLAG2_COWEXTSIZE)) &&
718 !xfs_has_reflink(mp))
719 return __this_address;
720
721 /* only regular files get reflink */
722 if ((flags2 & XFS_DIFLAG2_REFLINK) && (mode & S_IFMT) != S_IFREG)
723 return __this_address;
724
725 /* don't let reflink and realtime mix */
726 if ((flags2 & XFS_DIFLAG2_REFLINK) && (flags & XFS_DIFLAG_REALTIME))
727 return __this_address;
728
729 /* COW extent size hint validation */
730 fa = xfs_inode_validate_cowextsize(mp, be32_to_cpu(dip->di_cowextsize),
731 mode, flags, flags2);
732 if (fa)
733 return fa;
734
735 /* bigtime iflag can only happen on bigtime filesystems */
736 if (xfs_dinode_has_bigtime(dip) &&
737 !xfs_has_bigtime(mp))
738 return __this_address;
739
740 if (flags2 & XFS_DIFLAG2_METADATA) {
741 fa = xfs_dinode_verify_metadir(mp, dip, mode, flags, flags2);
742 if (fa)
743 return fa;
744 }
745
746 return NULL;
747}
748
749void
750xfs_dinode_calc_crc(
751 struct xfs_mount *mp,
752 struct xfs_dinode *dip)
753{
754 uint32_t crc;
755
756 if (dip->di_version < 3)
757 return;
758
759 ASSERT(xfs_has_crc(mp));
760 crc = xfs_start_cksum_update((char *)dip, mp->m_sb.sb_inodesize,
761 XFS_DINODE_CRC_OFF);
762 dip->di_crc = xfs_end_cksum(crc);
763}
764
765/*
766 * Validate di_extsize hint.
767 *
768 * 1. Extent size hint is only valid for directories and regular files.
769 * 2. FS_XFLAG_EXTSIZE is only valid for regular files.
770 * 3. FS_XFLAG_EXTSZINHERIT is only valid for directories.
771 * 4. Hint cannot be larger than MAXTEXTLEN.
772 * 5. Can be changed on directories at any time.
773 * 6. Hint value of 0 turns off hints, clears inode flags.
774 * 7. Extent size must be a multiple of the appropriate block size.
775 * For realtime files, this is the rt extent size.
776 * 8. For non-realtime files, the extent size hint must be limited
777 * to half the AG size to avoid alignment extending the extent beyond the
778 * limits of the AG.
779 */
780xfs_failaddr_t
781xfs_inode_validate_extsize(
782 struct xfs_mount *mp,
783 uint32_t extsize,
784 uint16_t mode,
785 uint16_t flags)
786{
787 bool rt_flag;
788 bool hint_flag;
789 bool inherit_flag;
790 uint32_t extsize_bytes;
791 uint32_t blocksize_bytes;
792
793 rt_flag = (flags & XFS_DIFLAG_REALTIME);
794 hint_flag = (flags & XFS_DIFLAG_EXTSIZE);
795 inherit_flag = (flags & XFS_DIFLAG_EXTSZINHERIT);
796 extsize_bytes = XFS_FSB_TO_B(mp, extsize);
797
798 /*
799 * This comment describes a historic gap in this verifier function.
800 *
801 * For a directory with both RTINHERIT and EXTSZINHERIT flags set, this
802 * function has never checked that the extent size hint is an integer
803 * multiple of the realtime extent size. Since we allow users to set
804 * this combination on non-rt filesystems /and/ to change the rt
805 * extent size when adding a rt device to a filesystem, the net effect
806 * is that users can configure a filesystem anticipating one rt
807 * geometry and change their minds later. Directories do not use the
808 * extent size hint, so this is harmless for them.
809 *
810 * If a directory with a misaligned extent size hint is allowed to
811 * propagate that hint into a new regular realtime file, the result
812 * is that the inode cluster buffer verifier will trigger a corruption
813 * shutdown the next time it is run, because the verifier has always
814 * enforced the alignment rule for regular files.
815 *
816 * Because we allow administrators to set a new rt extent size when
817 * adding a rt section, we cannot add a check to this verifier because
818 * that will result a new source of directory corruption errors when
819 * reading an existing filesystem. Instead, we rely on callers to
820 * decide when alignment checks are appropriate, and fix things up as
821 * needed.
822 */
823
824 if (rt_flag)
825 blocksize_bytes = XFS_FSB_TO_B(mp, mp->m_sb.sb_rextsize);
826 else
827 blocksize_bytes = mp->m_sb.sb_blocksize;
828
829 if ((hint_flag || inherit_flag) && !(S_ISDIR(mode) || S_ISREG(mode)))
830 return __this_address;
831
832 if (hint_flag && !S_ISREG(mode))
833 return __this_address;
834
835 if (inherit_flag && !S_ISDIR(mode))
836 return __this_address;
837
838 if ((hint_flag || inherit_flag) && extsize == 0)
839 return __this_address;
840
841 /* free inodes get flags set to zero but extsize remains */
842 if (mode && !(hint_flag || inherit_flag) && extsize != 0)
843 return __this_address;
844
845 if (extsize_bytes % blocksize_bytes)
846 return __this_address;
847
848 if (extsize > XFS_MAX_BMBT_EXTLEN)
849 return __this_address;
850
851 if (!rt_flag && extsize > mp->m_sb.sb_agblocks / 2)
852 return __this_address;
853
854 return NULL;
855}
856
857/*
858 * Validate di_cowextsize hint.
859 *
860 * 1. CoW extent size hint can only be set if reflink is enabled on the fs.
861 * The inode does not have to have any shared blocks, but it must be a v3.
862 * 2. FS_XFLAG_COWEXTSIZE is only valid for directories and regular files;
863 * for a directory, the hint is propagated to new files.
864 * 3. Can be changed on files & directories at any time.
865 * 4. Hint value of 0 turns off hints, clears inode flags.
866 * 5. Extent size must be a multiple of the appropriate block size.
867 * 6. The extent size hint must be limited to half the AG size to avoid
868 * alignment extending the extent beyond the limits of the AG.
869 */
870xfs_failaddr_t
871xfs_inode_validate_cowextsize(
872 struct xfs_mount *mp,
873 uint32_t cowextsize,
874 uint16_t mode,
875 uint16_t flags,
876 uint64_t flags2)
877{
878 bool rt_flag;
879 bool hint_flag;
880 uint32_t cowextsize_bytes;
881
882 rt_flag = (flags & XFS_DIFLAG_REALTIME);
883 hint_flag = (flags2 & XFS_DIFLAG2_COWEXTSIZE);
884 cowextsize_bytes = XFS_FSB_TO_B(mp, cowextsize);
885
886 if (hint_flag && !xfs_has_reflink(mp))
887 return __this_address;
888
889 if (hint_flag && !(S_ISDIR(mode) || S_ISREG(mode)))
890 return __this_address;
891
892 if (hint_flag && cowextsize == 0)
893 return __this_address;
894
895 /* free inodes get flags set to zero but cowextsize remains */
896 if (mode && !hint_flag && cowextsize != 0)
897 return __this_address;
898
899 if (hint_flag && rt_flag)
900 return __this_address;
901
902 if (cowextsize_bytes % mp->m_sb.sb_blocksize)
903 return __this_address;
904
905 if (cowextsize > XFS_MAX_BMBT_EXTLEN)
906 return __this_address;
907
908 if (cowextsize > mp->m_sb.sb_agblocks / 2)
909 return __this_address;
910
911 return NULL;
912}
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Copyright (c) 2000-2006 Silicon Graphics, Inc.
4 * All Rights Reserved.
5 */
6#include "xfs.h"
7#include "xfs_fs.h"
8#include "xfs_shared.h"
9#include "xfs_format.h"
10#include "xfs_log_format.h"
11#include "xfs_trans_resv.h"
12#include "xfs_mount.h"
13#include "xfs_inode.h"
14#include "xfs_errortag.h"
15#include "xfs_error.h"
16#include "xfs_icache.h"
17#include "xfs_trans.h"
18#include "xfs_ialloc.h"
19#include "xfs_dir2.h"
20
21#include <linux/iversion.h>
22
23/*
24 * Check that none of the inode's in the buffer have a next
25 * unlinked field of 0.
26 */
27#if defined(DEBUG)
28void
29xfs_inobp_check(
30 xfs_mount_t *mp,
31 xfs_buf_t *bp)
32{
33 int i;
34 xfs_dinode_t *dip;
35
36 for (i = 0; i < M_IGEO(mp)->inodes_per_cluster; i++) {
37 dip = xfs_buf_offset(bp, i * mp->m_sb.sb_inodesize);
38 if (!dip->di_next_unlinked) {
39 xfs_alert(mp,
40 "Detected bogus zero next_unlinked field in inode %d buffer 0x%llx.",
41 i, (long long)bp->b_bn);
42 }
43 }
44}
45#endif
46
47bool
48xfs_dinode_good_version(
49 struct xfs_mount *mp,
50 __u8 version)
51{
52 if (xfs_sb_version_hascrc(&mp->m_sb))
53 return version == 3;
54
55 return version == 1 || version == 2;
56}
57
58/*
59 * If we are doing readahead on an inode buffer, we might be in log recovery
60 * reading an inode allocation buffer that hasn't yet been replayed, and hence
61 * has not had the inode cores stamped into it. Hence for readahead, the buffer
62 * may be potentially invalid.
63 *
64 * If the readahead buffer is invalid, we need to mark it with an error and
65 * clear the DONE status of the buffer so that a followup read will re-read it
66 * from disk. We don't report the error otherwise to avoid warnings during log
67 * recovery and we don't get unnecssary panics on debug kernels. We use EIO here
68 * because all we want to do is say readahead failed; there is no-one to report
69 * the error to, so this will distinguish it from a non-ra verifier failure.
70 * Changes to this readahead error behavour also need to be reflected in
71 * xfs_dquot_buf_readahead_verify().
72 */
73static void
74xfs_inode_buf_verify(
75 struct xfs_buf *bp,
76 bool readahead)
77{
78 struct xfs_mount *mp = bp->b_mount;
79 xfs_agnumber_t agno;
80 int i;
81 int ni;
82
83 /*
84 * Validate the magic number and version of every inode in the buffer
85 */
86 agno = xfs_daddr_to_agno(mp, XFS_BUF_ADDR(bp));
87 ni = XFS_BB_TO_FSB(mp, bp->b_length) * mp->m_sb.sb_inopblock;
88 for (i = 0; i < ni; i++) {
89 int di_ok;
90 xfs_dinode_t *dip;
91 xfs_agino_t unlinked_ino;
92
93 dip = xfs_buf_offset(bp, (i << mp->m_sb.sb_inodelog));
94 unlinked_ino = be32_to_cpu(dip->di_next_unlinked);
95 di_ok = xfs_verify_magic16(bp, dip->di_magic) &&
96 xfs_dinode_good_version(mp, dip->di_version) &&
97 xfs_verify_agino_or_null(mp, agno, unlinked_ino);
98 if (unlikely(XFS_TEST_ERROR(!di_ok, mp,
99 XFS_ERRTAG_ITOBP_INOTOBP))) {
100 if (readahead) {
101 bp->b_flags &= ~XBF_DONE;
102 xfs_buf_ioerror(bp, -EIO);
103 return;
104 }
105
106#ifdef DEBUG
107 xfs_alert(mp,
108 "bad inode magic/vsn daddr %lld #%d (magic=%x)",
109 (unsigned long long)bp->b_bn, i,
110 be16_to_cpu(dip->di_magic));
111#endif
112 xfs_buf_verifier_error(bp, -EFSCORRUPTED,
113 __func__, dip, sizeof(*dip),
114 NULL);
115 return;
116 }
117 }
118}
119
120
121static void
122xfs_inode_buf_read_verify(
123 struct xfs_buf *bp)
124{
125 xfs_inode_buf_verify(bp, false);
126}
127
128static void
129xfs_inode_buf_readahead_verify(
130 struct xfs_buf *bp)
131{
132 xfs_inode_buf_verify(bp, true);
133}
134
135static void
136xfs_inode_buf_write_verify(
137 struct xfs_buf *bp)
138{
139 xfs_inode_buf_verify(bp, false);
140}
141
142const struct xfs_buf_ops xfs_inode_buf_ops = {
143 .name = "xfs_inode",
144 .magic16 = { cpu_to_be16(XFS_DINODE_MAGIC),
145 cpu_to_be16(XFS_DINODE_MAGIC) },
146 .verify_read = xfs_inode_buf_read_verify,
147 .verify_write = xfs_inode_buf_write_verify,
148};
149
150const struct xfs_buf_ops xfs_inode_buf_ra_ops = {
151 .name = "xfs_inode_ra",
152 .magic16 = { cpu_to_be16(XFS_DINODE_MAGIC),
153 cpu_to_be16(XFS_DINODE_MAGIC) },
154 .verify_read = xfs_inode_buf_readahead_verify,
155 .verify_write = xfs_inode_buf_write_verify,
156};
157
158
159/*
160 * This routine is called to map an inode to the buffer containing the on-disk
161 * version of the inode. It returns a pointer to the buffer containing the
162 * on-disk inode in the bpp parameter, and in the dipp parameter it returns a
163 * pointer to the on-disk inode within that buffer.
164 *
165 * If a non-zero error is returned, then the contents of bpp and dipp are
166 * undefined.
167 */
168int
169xfs_imap_to_bp(
170 struct xfs_mount *mp,
171 struct xfs_trans *tp,
172 struct xfs_imap *imap,
173 struct xfs_dinode **dipp,
174 struct xfs_buf **bpp,
175 uint buf_flags,
176 uint iget_flags)
177{
178 struct xfs_buf *bp;
179 int error;
180
181 buf_flags |= XBF_UNMAPPED;
182 error = xfs_trans_read_buf(mp, tp, mp->m_ddev_targp, imap->im_blkno,
183 (int)imap->im_len, buf_flags, &bp,
184 &xfs_inode_buf_ops);
185 if (error) {
186 if (error == -EAGAIN) {
187 ASSERT(buf_flags & XBF_TRYLOCK);
188 return error;
189 }
190 xfs_warn(mp, "%s: xfs_trans_read_buf() returned error %d.",
191 __func__, error);
192 return error;
193 }
194
195 *bpp = bp;
196 *dipp = xfs_buf_offset(bp, imap->im_boffset);
197 return 0;
198}
199
200void
201xfs_inode_from_disk(
202 struct xfs_inode *ip,
203 struct xfs_dinode *from)
204{
205 struct xfs_icdinode *to = &ip->i_d;
206 struct inode *inode = VFS_I(ip);
207
208
209 /*
210 * Convert v1 inodes immediately to v2 inode format as this is the
211 * minimum inode version format we support in the rest of the code.
212 */
213 to->di_version = from->di_version;
214 if (to->di_version == 1) {
215 set_nlink(inode, be16_to_cpu(from->di_onlink));
216 to->di_projid_lo = 0;
217 to->di_projid_hi = 0;
218 to->di_version = 2;
219 } else {
220 set_nlink(inode, be32_to_cpu(from->di_nlink));
221 to->di_projid_lo = be16_to_cpu(from->di_projid_lo);
222 to->di_projid_hi = be16_to_cpu(from->di_projid_hi);
223 }
224
225 to->di_format = from->di_format;
226 to->di_uid = be32_to_cpu(from->di_uid);
227 to->di_gid = be32_to_cpu(from->di_gid);
228 to->di_flushiter = be16_to_cpu(from->di_flushiter);
229
230 /*
231 * Time is signed, so need to convert to signed 32 bit before
232 * storing in inode timestamp which may be 64 bit. Otherwise
233 * a time before epoch is converted to a time long after epoch
234 * on 64 bit systems.
235 */
236 inode->i_atime.tv_sec = (int)be32_to_cpu(from->di_atime.t_sec);
237 inode->i_atime.tv_nsec = (int)be32_to_cpu(from->di_atime.t_nsec);
238 inode->i_mtime.tv_sec = (int)be32_to_cpu(from->di_mtime.t_sec);
239 inode->i_mtime.tv_nsec = (int)be32_to_cpu(from->di_mtime.t_nsec);
240 inode->i_ctime.tv_sec = (int)be32_to_cpu(from->di_ctime.t_sec);
241 inode->i_ctime.tv_nsec = (int)be32_to_cpu(from->di_ctime.t_nsec);
242 inode->i_generation = be32_to_cpu(from->di_gen);
243 inode->i_mode = be16_to_cpu(from->di_mode);
244
245 to->di_size = be64_to_cpu(from->di_size);
246 to->di_nblocks = be64_to_cpu(from->di_nblocks);
247 to->di_extsize = be32_to_cpu(from->di_extsize);
248 to->di_nextents = be32_to_cpu(from->di_nextents);
249 to->di_anextents = be16_to_cpu(from->di_anextents);
250 to->di_forkoff = from->di_forkoff;
251 to->di_aformat = from->di_aformat;
252 to->di_dmevmask = be32_to_cpu(from->di_dmevmask);
253 to->di_dmstate = be16_to_cpu(from->di_dmstate);
254 to->di_flags = be16_to_cpu(from->di_flags);
255
256 if (to->di_version == 3) {
257 inode_set_iversion_queried(inode,
258 be64_to_cpu(from->di_changecount));
259 to->di_crtime.t_sec = be32_to_cpu(from->di_crtime.t_sec);
260 to->di_crtime.t_nsec = be32_to_cpu(from->di_crtime.t_nsec);
261 to->di_flags2 = be64_to_cpu(from->di_flags2);
262 to->di_cowextsize = be32_to_cpu(from->di_cowextsize);
263 }
264}
265
266void
267xfs_inode_to_disk(
268 struct xfs_inode *ip,
269 struct xfs_dinode *to,
270 xfs_lsn_t lsn)
271{
272 struct xfs_icdinode *from = &ip->i_d;
273 struct inode *inode = VFS_I(ip);
274
275 to->di_magic = cpu_to_be16(XFS_DINODE_MAGIC);
276 to->di_onlink = 0;
277
278 to->di_version = from->di_version;
279 to->di_format = from->di_format;
280 to->di_uid = cpu_to_be32(from->di_uid);
281 to->di_gid = cpu_to_be32(from->di_gid);
282 to->di_projid_lo = cpu_to_be16(from->di_projid_lo);
283 to->di_projid_hi = cpu_to_be16(from->di_projid_hi);
284
285 memset(to->di_pad, 0, sizeof(to->di_pad));
286 to->di_atime.t_sec = cpu_to_be32(inode->i_atime.tv_sec);
287 to->di_atime.t_nsec = cpu_to_be32(inode->i_atime.tv_nsec);
288 to->di_mtime.t_sec = cpu_to_be32(inode->i_mtime.tv_sec);
289 to->di_mtime.t_nsec = cpu_to_be32(inode->i_mtime.tv_nsec);
290 to->di_ctime.t_sec = cpu_to_be32(inode->i_ctime.tv_sec);
291 to->di_ctime.t_nsec = cpu_to_be32(inode->i_ctime.tv_nsec);
292 to->di_nlink = cpu_to_be32(inode->i_nlink);
293 to->di_gen = cpu_to_be32(inode->i_generation);
294 to->di_mode = cpu_to_be16(inode->i_mode);
295
296 to->di_size = cpu_to_be64(from->di_size);
297 to->di_nblocks = cpu_to_be64(from->di_nblocks);
298 to->di_extsize = cpu_to_be32(from->di_extsize);
299 to->di_nextents = cpu_to_be32(from->di_nextents);
300 to->di_anextents = cpu_to_be16(from->di_anextents);
301 to->di_forkoff = from->di_forkoff;
302 to->di_aformat = from->di_aformat;
303 to->di_dmevmask = cpu_to_be32(from->di_dmevmask);
304 to->di_dmstate = cpu_to_be16(from->di_dmstate);
305 to->di_flags = cpu_to_be16(from->di_flags);
306
307 if (from->di_version == 3) {
308 to->di_changecount = cpu_to_be64(inode_peek_iversion(inode));
309 to->di_crtime.t_sec = cpu_to_be32(from->di_crtime.t_sec);
310 to->di_crtime.t_nsec = cpu_to_be32(from->di_crtime.t_nsec);
311 to->di_flags2 = cpu_to_be64(from->di_flags2);
312 to->di_cowextsize = cpu_to_be32(from->di_cowextsize);
313 to->di_ino = cpu_to_be64(ip->i_ino);
314 to->di_lsn = cpu_to_be64(lsn);
315 memset(to->di_pad2, 0, sizeof(to->di_pad2));
316 uuid_copy(&to->di_uuid, &ip->i_mount->m_sb.sb_meta_uuid);
317 to->di_flushiter = 0;
318 } else {
319 to->di_flushiter = cpu_to_be16(from->di_flushiter);
320 }
321}
322
323void
324xfs_log_dinode_to_disk(
325 struct xfs_log_dinode *from,
326 struct xfs_dinode *to)
327{
328 to->di_magic = cpu_to_be16(from->di_magic);
329 to->di_mode = cpu_to_be16(from->di_mode);
330 to->di_version = from->di_version;
331 to->di_format = from->di_format;
332 to->di_onlink = 0;
333 to->di_uid = cpu_to_be32(from->di_uid);
334 to->di_gid = cpu_to_be32(from->di_gid);
335 to->di_nlink = cpu_to_be32(from->di_nlink);
336 to->di_projid_lo = cpu_to_be16(from->di_projid_lo);
337 to->di_projid_hi = cpu_to_be16(from->di_projid_hi);
338 memcpy(to->di_pad, from->di_pad, sizeof(to->di_pad));
339
340 to->di_atime.t_sec = cpu_to_be32(from->di_atime.t_sec);
341 to->di_atime.t_nsec = cpu_to_be32(from->di_atime.t_nsec);
342 to->di_mtime.t_sec = cpu_to_be32(from->di_mtime.t_sec);
343 to->di_mtime.t_nsec = cpu_to_be32(from->di_mtime.t_nsec);
344 to->di_ctime.t_sec = cpu_to_be32(from->di_ctime.t_sec);
345 to->di_ctime.t_nsec = cpu_to_be32(from->di_ctime.t_nsec);
346
347 to->di_size = cpu_to_be64(from->di_size);
348 to->di_nblocks = cpu_to_be64(from->di_nblocks);
349 to->di_extsize = cpu_to_be32(from->di_extsize);
350 to->di_nextents = cpu_to_be32(from->di_nextents);
351 to->di_anextents = cpu_to_be16(from->di_anextents);
352 to->di_forkoff = from->di_forkoff;
353 to->di_aformat = from->di_aformat;
354 to->di_dmevmask = cpu_to_be32(from->di_dmevmask);
355 to->di_dmstate = cpu_to_be16(from->di_dmstate);
356 to->di_flags = cpu_to_be16(from->di_flags);
357 to->di_gen = cpu_to_be32(from->di_gen);
358
359 if (from->di_version == 3) {
360 to->di_changecount = cpu_to_be64(from->di_changecount);
361 to->di_crtime.t_sec = cpu_to_be32(from->di_crtime.t_sec);
362 to->di_crtime.t_nsec = cpu_to_be32(from->di_crtime.t_nsec);
363 to->di_flags2 = cpu_to_be64(from->di_flags2);
364 to->di_cowextsize = cpu_to_be32(from->di_cowextsize);
365 to->di_ino = cpu_to_be64(from->di_ino);
366 to->di_lsn = cpu_to_be64(from->di_lsn);
367 memcpy(to->di_pad2, from->di_pad2, sizeof(to->di_pad2));
368 uuid_copy(&to->di_uuid, &from->di_uuid);
369 to->di_flushiter = 0;
370 } else {
371 to->di_flushiter = cpu_to_be16(from->di_flushiter);
372 }
373}
374
375static xfs_failaddr_t
376xfs_dinode_verify_fork(
377 struct xfs_dinode *dip,
378 struct xfs_mount *mp,
379 int whichfork)
380{
381 uint32_t di_nextents = XFS_DFORK_NEXTENTS(dip, whichfork);
382
383 switch (XFS_DFORK_FORMAT(dip, whichfork)) {
384 case XFS_DINODE_FMT_LOCAL:
385 /*
386 * no local regular files yet
387 */
388 if (whichfork == XFS_DATA_FORK) {
389 if (S_ISREG(be16_to_cpu(dip->di_mode)))
390 return __this_address;
391 if (be64_to_cpu(dip->di_size) >
392 XFS_DFORK_SIZE(dip, mp, whichfork))
393 return __this_address;
394 }
395 if (di_nextents)
396 return __this_address;
397 break;
398 case XFS_DINODE_FMT_EXTENTS:
399 if (di_nextents > XFS_DFORK_MAXEXT(dip, mp, whichfork))
400 return __this_address;
401 break;
402 case XFS_DINODE_FMT_BTREE:
403 if (whichfork == XFS_ATTR_FORK) {
404 if (di_nextents > MAXAEXTNUM)
405 return __this_address;
406 } else if (di_nextents > MAXEXTNUM) {
407 return __this_address;
408 }
409 break;
410 default:
411 return __this_address;
412 }
413 return NULL;
414}
415
416static xfs_failaddr_t
417xfs_dinode_verify_forkoff(
418 struct xfs_dinode *dip,
419 struct xfs_mount *mp)
420{
421 if (!XFS_DFORK_Q(dip))
422 return NULL;
423
424 switch (dip->di_format) {
425 case XFS_DINODE_FMT_DEV:
426 if (dip->di_forkoff != (roundup(sizeof(xfs_dev_t), 8) >> 3))
427 return __this_address;
428 break;
429 case XFS_DINODE_FMT_LOCAL: /* fall through ... */
430 case XFS_DINODE_FMT_EXTENTS: /* fall through ... */
431 case XFS_DINODE_FMT_BTREE:
432 if (dip->di_forkoff >= (XFS_LITINO(mp, dip->di_version) >> 3))
433 return __this_address;
434 break;
435 default:
436 return __this_address;
437 }
438 return NULL;
439}
440
441xfs_failaddr_t
442xfs_dinode_verify(
443 struct xfs_mount *mp,
444 xfs_ino_t ino,
445 struct xfs_dinode *dip)
446{
447 xfs_failaddr_t fa;
448 uint16_t mode;
449 uint16_t flags;
450 uint64_t flags2;
451 uint64_t di_size;
452
453 if (dip->di_magic != cpu_to_be16(XFS_DINODE_MAGIC))
454 return __this_address;
455
456 /* Verify v3 integrity information first */
457 if (dip->di_version >= 3) {
458 if (!xfs_sb_version_hascrc(&mp->m_sb))
459 return __this_address;
460 if (!xfs_verify_cksum((char *)dip, mp->m_sb.sb_inodesize,
461 XFS_DINODE_CRC_OFF))
462 return __this_address;
463 if (be64_to_cpu(dip->di_ino) != ino)
464 return __this_address;
465 if (!uuid_equal(&dip->di_uuid, &mp->m_sb.sb_meta_uuid))
466 return __this_address;
467 }
468
469 /* don't allow invalid i_size */
470 di_size = be64_to_cpu(dip->di_size);
471 if (di_size & (1ULL << 63))
472 return __this_address;
473
474 mode = be16_to_cpu(dip->di_mode);
475 if (mode && xfs_mode_to_ftype(mode) == XFS_DIR3_FT_UNKNOWN)
476 return __this_address;
477
478 /* No zero-length symlinks/dirs. */
479 if ((S_ISLNK(mode) || S_ISDIR(mode)) && di_size == 0)
480 return __this_address;
481
482 /* Fork checks carried over from xfs_iformat_fork */
483 if (mode &&
484 be32_to_cpu(dip->di_nextents) + be16_to_cpu(dip->di_anextents) >
485 be64_to_cpu(dip->di_nblocks))
486 return __this_address;
487
488 if (mode && XFS_DFORK_BOFF(dip) > mp->m_sb.sb_inodesize)
489 return __this_address;
490
491 flags = be16_to_cpu(dip->di_flags);
492
493 if (mode && (flags & XFS_DIFLAG_REALTIME) && !mp->m_rtdev_targp)
494 return __this_address;
495
496 /* check for illegal values of forkoff */
497 fa = xfs_dinode_verify_forkoff(dip, mp);
498 if (fa)
499 return fa;
500
501 /* Do we have appropriate data fork formats for the mode? */
502 switch (mode & S_IFMT) {
503 case S_IFIFO:
504 case S_IFCHR:
505 case S_IFBLK:
506 case S_IFSOCK:
507 if (dip->di_format != XFS_DINODE_FMT_DEV)
508 return __this_address;
509 break;
510 case S_IFREG:
511 case S_IFLNK:
512 case S_IFDIR:
513 fa = xfs_dinode_verify_fork(dip, mp, XFS_DATA_FORK);
514 if (fa)
515 return fa;
516 break;
517 case 0:
518 /* Uninitialized inode ok. */
519 break;
520 default:
521 return __this_address;
522 }
523
524 if (XFS_DFORK_Q(dip)) {
525 fa = xfs_dinode_verify_fork(dip, mp, XFS_ATTR_FORK);
526 if (fa)
527 return fa;
528 } else {
529 /*
530 * If there is no fork offset, this may be a freshly-made inode
531 * in a new disk cluster, in which case di_aformat is zeroed.
532 * Otherwise, such an inode must be in EXTENTS format; this goes
533 * for freed inodes as well.
534 */
535 switch (dip->di_aformat) {
536 case 0:
537 case XFS_DINODE_FMT_EXTENTS:
538 break;
539 default:
540 return __this_address;
541 }
542 if (dip->di_anextents)
543 return __this_address;
544 }
545
546 /* extent size hint validation */
547 fa = xfs_inode_validate_extsize(mp, be32_to_cpu(dip->di_extsize),
548 mode, flags);
549 if (fa)
550 return fa;
551
552 /* only version 3 or greater inodes are extensively verified here */
553 if (dip->di_version < 3)
554 return NULL;
555
556 flags2 = be64_to_cpu(dip->di_flags2);
557
558 /* don't allow reflink/cowextsize if we don't have reflink */
559 if ((flags2 & (XFS_DIFLAG2_REFLINK | XFS_DIFLAG2_COWEXTSIZE)) &&
560 !xfs_sb_version_hasreflink(&mp->m_sb))
561 return __this_address;
562
563 /* only regular files get reflink */
564 if ((flags2 & XFS_DIFLAG2_REFLINK) && (mode & S_IFMT) != S_IFREG)
565 return __this_address;
566
567 /* don't let reflink and realtime mix */
568 if ((flags2 & XFS_DIFLAG2_REFLINK) && (flags & XFS_DIFLAG_REALTIME))
569 return __this_address;
570
571 /* don't let reflink and dax mix */
572 if ((flags2 & XFS_DIFLAG2_REFLINK) && (flags2 & XFS_DIFLAG2_DAX))
573 return __this_address;
574
575 /* COW extent size hint validation */
576 fa = xfs_inode_validate_cowextsize(mp, be32_to_cpu(dip->di_cowextsize),
577 mode, flags, flags2);
578 if (fa)
579 return fa;
580
581 return NULL;
582}
583
584void
585xfs_dinode_calc_crc(
586 struct xfs_mount *mp,
587 struct xfs_dinode *dip)
588{
589 uint32_t crc;
590
591 if (dip->di_version < 3)
592 return;
593
594 ASSERT(xfs_sb_version_hascrc(&mp->m_sb));
595 crc = xfs_start_cksum_update((char *)dip, mp->m_sb.sb_inodesize,
596 XFS_DINODE_CRC_OFF);
597 dip->di_crc = xfs_end_cksum(crc);
598}
599
600/*
601 * Read the disk inode attributes into the in-core inode structure.
602 *
603 * For version 5 superblocks, if we are initialising a new inode and we are not
604 * utilising the XFS_MOUNT_IKEEP inode cluster mode, we can simple build the new
605 * inode core with a random generation number. If we are keeping inodes around,
606 * we need to read the inode cluster to get the existing generation number off
607 * disk. Further, if we are using version 4 superblocks (i.e. v1/v2 inode
608 * format) then log recovery is dependent on the di_flushiter field being
609 * initialised from the current on-disk value and hence we must also read the
610 * inode off disk.
611 */
612int
613xfs_iread(
614 xfs_mount_t *mp,
615 xfs_trans_t *tp,
616 xfs_inode_t *ip,
617 uint iget_flags)
618{
619 xfs_buf_t *bp;
620 xfs_dinode_t *dip;
621 xfs_failaddr_t fa;
622 int error;
623
624 /*
625 * Fill in the location information in the in-core inode.
626 */
627 error = xfs_imap(mp, tp, ip->i_ino, &ip->i_imap, iget_flags);
628 if (error)
629 return error;
630
631 /* shortcut IO on inode allocation if possible */
632 if ((iget_flags & XFS_IGET_CREATE) &&
633 xfs_sb_version_hascrc(&mp->m_sb) &&
634 !(mp->m_flags & XFS_MOUNT_IKEEP)) {
635 /* initialise the on-disk inode core */
636 memset(&ip->i_d, 0, sizeof(ip->i_d));
637 VFS_I(ip)->i_generation = prandom_u32();
638 ip->i_d.di_version = 3;
639 return 0;
640 }
641
642 /*
643 * Get pointers to the on-disk inode and the buffer containing it.
644 */
645 error = xfs_imap_to_bp(mp, tp, &ip->i_imap, &dip, &bp, 0, iget_flags);
646 if (error)
647 return error;
648
649 /* even unallocated inodes are verified */
650 fa = xfs_dinode_verify(mp, ip->i_ino, dip);
651 if (fa) {
652 xfs_inode_verifier_error(ip, -EFSCORRUPTED, "dinode", dip,
653 sizeof(*dip), fa);
654 error = -EFSCORRUPTED;
655 goto out_brelse;
656 }
657
658 /*
659 * If the on-disk inode is already linked to a directory
660 * entry, copy all of the inode into the in-core inode.
661 * xfs_iformat_fork() handles copying in the inode format
662 * specific information.
663 * Otherwise, just get the truly permanent information.
664 */
665 if (dip->di_mode) {
666 xfs_inode_from_disk(ip, dip);
667 error = xfs_iformat_fork(ip, dip);
668 if (error) {
669#ifdef DEBUG
670 xfs_alert(mp, "%s: xfs_iformat() returned error %d",
671 __func__, error);
672#endif /* DEBUG */
673 goto out_brelse;
674 }
675 } else {
676 /*
677 * Partial initialisation of the in-core inode. Just the bits
678 * that xfs_ialloc won't overwrite or relies on being correct.
679 */
680 ip->i_d.di_version = dip->di_version;
681 VFS_I(ip)->i_generation = be32_to_cpu(dip->di_gen);
682 ip->i_d.di_flushiter = be16_to_cpu(dip->di_flushiter);
683
684 /*
685 * Make sure to pull in the mode here as well in
686 * case the inode is released without being used.
687 * This ensures that xfs_inactive() will see that
688 * the inode is already free and not try to mess
689 * with the uninitialized part of it.
690 */
691 VFS_I(ip)->i_mode = 0;
692 }
693
694 ASSERT(ip->i_d.di_version >= 2);
695 ip->i_delayed_blks = 0;
696
697 /*
698 * Mark the buffer containing the inode as something to keep
699 * around for a while. This helps to keep recently accessed
700 * meta-data in-core longer.
701 */
702 xfs_buf_set_ref(bp, XFS_INO_REF);
703
704 /*
705 * Use xfs_trans_brelse() to release the buffer containing the on-disk
706 * inode, because it was acquired with xfs_trans_read_buf() in
707 * xfs_imap_to_bp() above. If tp is NULL, this is just a normal
708 * brelse(). If we're within a transaction, then xfs_trans_brelse()
709 * will only release the buffer if it is not dirty within the
710 * transaction. It will be OK to release the buffer in this case,
711 * because inodes on disk are never destroyed and we will be locking the
712 * new in-core inode before putting it in the cache where other
713 * processes can find it. Thus we don't have to worry about the inode
714 * being changed just because we released the buffer.
715 */
716 out_brelse:
717 xfs_trans_brelse(tp, bp);
718 return error;
719}
720
721/*
722 * Validate di_extsize hint.
723 *
724 * The rules are documented at xfs_ioctl_setattr_check_extsize().
725 * These functions must be kept in sync with each other.
726 */
727xfs_failaddr_t
728xfs_inode_validate_extsize(
729 struct xfs_mount *mp,
730 uint32_t extsize,
731 uint16_t mode,
732 uint16_t flags)
733{
734 bool rt_flag;
735 bool hint_flag;
736 bool inherit_flag;
737 uint32_t extsize_bytes;
738 uint32_t blocksize_bytes;
739
740 rt_flag = (flags & XFS_DIFLAG_REALTIME);
741 hint_flag = (flags & XFS_DIFLAG_EXTSIZE);
742 inherit_flag = (flags & XFS_DIFLAG_EXTSZINHERIT);
743 extsize_bytes = XFS_FSB_TO_B(mp, extsize);
744
745 if (rt_flag)
746 blocksize_bytes = mp->m_sb.sb_rextsize << mp->m_sb.sb_blocklog;
747 else
748 blocksize_bytes = mp->m_sb.sb_blocksize;
749
750 if ((hint_flag || inherit_flag) && !(S_ISDIR(mode) || S_ISREG(mode)))
751 return __this_address;
752
753 if (hint_flag && !S_ISREG(mode))
754 return __this_address;
755
756 if (inherit_flag && !S_ISDIR(mode))
757 return __this_address;
758
759 if ((hint_flag || inherit_flag) && extsize == 0)
760 return __this_address;
761
762 /* free inodes get flags set to zero but extsize remains */
763 if (mode && !(hint_flag || inherit_flag) && extsize != 0)
764 return __this_address;
765
766 if (extsize_bytes % blocksize_bytes)
767 return __this_address;
768
769 if (extsize > MAXEXTLEN)
770 return __this_address;
771
772 if (!rt_flag && extsize > mp->m_sb.sb_agblocks / 2)
773 return __this_address;
774
775 return NULL;
776}
777
778/*
779 * Validate di_cowextsize hint.
780 *
781 * The rules are documented at xfs_ioctl_setattr_check_cowextsize().
782 * These functions must be kept in sync with each other.
783 */
784xfs_failaddr_t
785xfs_inode_validate_cowextsize(
786 struct xfs_mount *mp,
787 uint32_t cowextsize,
788 uint16_t mode,
789 uint16_t flags,
790 uint64_t flags2)
791{
792 bool rt_flag;
793 bool hint_flag;
794 uint32_t cowextsize_bytes;
795
796 rt_flag = (flags & XFS_DIFLAG_REALTIME);
797 hint_flag = (flags2 & XFS_DIFLAG2_COWEXTSIZE);
798 cowextsize_bytes = XFS_FSB_TO_B(mp, cowextsize);
799
800 if (hint_flag && !xfs_sb_version_hasreflink(&mp->m_sb))
801 return __this_address;
802
803 if (hint_flag && !(S_ISDIR(mode) || S_ISREG(mode)))
804 return __this_address;
805
806 if (hint_flag && cowextsize == 0)
807 return __this_address;
808
809 /* free inodes get flags set to zero but cowextsize remains */
810 if (mode && !hint_flag && cowextsize != 0)
811 return __this_address;
812
813 if (hint_flag && rt_flag)
814 return __this_address;
815
816 if (cowextsize_bytes % mp->m_sb.sb_blocksize)
817 return __this_address;
818
819 if (cowextsize > MAXEXTLEN)
820 return __this_address;
821
822 if (cowextsize > mp->m_sb.sb_agblocks / 2)
823 return __this_address;
824
825 return NULL;
826}