Loading...
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Copyright (c) 2000-2006 Silicon Graphics, Inc.
4 * All Rights Reserved.
5 */
6#include "xfs.h"
7#include "xfs_fs.h"
8#include "xfs_shared.h"
9#include "xfs_format.h"
10#include "xfs_log_format.h"
11#include "xfs_trans_resv.h"
12#include "xfs_mount.h"
13#include "xfs_ag.h"
14#include "xfs_inode.h"
15#include "xfs_errortag.h"
16#include "xfs_error.h"
17#include "xfs_icache.h"
18#include "xfs_trans.h"
19#include "xfs_ialloc.h"
20#include "xfs_dir2.h"
21
22#include <linux/iversion.h>
23
24/*
25 * If we are doing readahead on an inode buffer, we might be in log recovery
26 * reading an inode allocation buffer that hasn't yet been replayed, and hence
27 * has not had the inode cores stamped into it. Hence for readahead, the buffer
28 * may be potentially invalid.
29 *
30 * If the readahead buffer is invalid, we need to mark it with an error and
31 * clear the DONE status of the buffer so that a followup read will re-read it
32 * from disk. We don't report the error otherwise to avoid warnings during log
33 * recovery and we don't get unnecessary panics on debug kernels. We use EIO here
34 * because all we want to do is say readahead failed; there is no-one to report
35 * the error to, so this will distinguish it from a non-ra verifier failure.
36 * Changes to this readahead error behaviour also need to be reflected in
37 * xfs_dquot_buf_readahead_verify().
38 */
39static void
40xfs_inode_buf_verify(
41 struct xfs_buf *bp,
42 bool readahead)
43{
44 struct xfs_mount *mp = bp->b_mount;
45 int i;
46 int ni;
47
48 /*
49 * Validate the magic number and version of every inode in the buffer
50 */
51 ni = XFS_BB_TO_FSB(mp, bp->b_length) * mp->m_sb.sb_inopblock;
52 for (i = 0; i < ni; i++) {
53 struct xfs_dinode *dip;
54 xfs_agino_t unlinked_ino;
55 int di_ok;
56
57 dip = xfs_buf_offset(bp, (i << mp->m_sb.sb_inodelog));
58 unlinked_ino = be32_to_cpu(dip->di_next_unlinked);
59 di_ok = xfs_verify_magic16(bp, dip->di_magic) &&
60 xfs_dinode_good_version(mp, dip->di_version) &&
61 xfs_verify_agino_or_null(bp->b_pag, unlinked_ino);
62 if (unlikely(XFS_TEST_ERROR(!di_ok, mp,
63 XFS_ERRTAG_ITOBP_INOTOBP))) {
64 if (readahead) {
65 bp->b_flags &= ~XBF_DONE;
66 xfs_buf_ioerror(bp, -EIO);
67 return;
68 }
69
70#ifdef DEBUG
71 xfs_alert(mp,
72 "bad inode magic/vsn daddr %lld #%d (magic=%x)",
73 (unsigned long long)xfs_buf_daddr(bp), i,
74 be16_to_cpu(dip->di_magic));
75#endif
76 xfs_buf_verifier_error(bp, -EFSCORRUPTED,
77 __func__, dip, sizeof(*dip),
78 NULL);
79 return;
80 }
81 }
82}
83
84
85static void
86xfs_inode_buf_read_verify(
87 struct xfs_buf *bp)
88{
89 xfs_inode_buf_verify(bp, false);
90}
91
92static void
93xfs_inode_buf_readahead_verify(
94 struct xfs_buf *bp)
95{
96 xfs_inode_buf_verify(bp, true);
97}
98
99static void
100xfs_inode_buf_write_verify(
101 struct xfs_buf *bp)
102{
103 xfs_inode_buf_verify(bp, false);
104}
105
106const struct xfs_buf_ops xfs_inode_buf_ops = {
107 .name = "xfs_inode",
108 .magic16 = { cpu_to_be16(XFS_DINODE_MAGIC),
109 cpu_to_be16(XFS_DINODE_MAGIC) },
110 .verify_read = xfs_inode_buf_read_verify,
111 .verify_write = xfs_inode_buf_write_verify,
112};
113
114const struct xfs_buf_ops xfs_inode_buf_ra_ops = {
115 .name = "xfs_inode_ra",
116 .magic16 = { cpu_to_be16(XFS_DINODE_MAGIC),
117 cpu_to_be16(XFS_DINODE_MAGIC) },
118 .verify_read = xfs_inode_buf_readahead_verify,
119 .verify_write = xfs_inode_buf_write_verify,
120};
121
122
123/*
124 * This routine is called to map an inode to the buffer containing the on-disk
125 * version of the inode. It returns a pointer to the buffer containing the
126 * on-disk inode in the bpp parameter.
127 */
128int
129xfs_imap_to_bp(
130 struct xfs_mount *mp,
131 struct xfs_trans *tp,
132 struct xfs_imap *imap,
133 struct xfs_buf **bpp)
134{
135 return xfs_trans_read_buf(mp, tp, mp->m_ddev_targp, imap->im_blkno,
136 imap->im_len, XBF_UNMAPPED, bpp,
137 &xfs_inode_buf_ops);
138}
139
140static inline struct timespec64 xfs_inode_decode_bigtime(uint64_t ts)
141{
142 struct timespec64 tv;
143 uint32_t n;
144
145 tv.tv_sec = xfs_bigtime_to_unix(div_u64_rem(ts, NSEC_PER_SEC, &n));
146 tv.tv_nsec = n;
147
148 return tv;
149}
150
151/* Convert an ondisk timestamp to an incore timestamp. */
152struct timespec64
153xfs_inode_from_disk_ts(
154 struct xfs_dinode *dip,
155 const xfs_timestamp_t ts)
156{
157 struct timespec64 tv;
158 struct xfs_legacy_timestamp *lts;
159
160 if (xfs_dinode_has_bigtime(dip))
161 return xfs_inode_decode_bigtime(be64_to_cpu(ts));
162
163 lts = (struct xfs_legacy_timestamp *)&ts;
164 tv.tv_sec = (int)be32_to_cpu(lts->t_sec);
165 tv.tv_nsec = (int)be32_to_cpu(lts->t_nsec);
166
167 return tv;
168}
169
170int
171xfs_inode_from_disk(
172 struct xfs_inode *ip,
173 struct xfs_dinode *from)
174{
175 struct inode *inode = VFS_I(ip);
176 int error;
177 xfs_failaddr_t fa;
178
179 ASSERT(ip->i_cowfp == NULL);
180
181 fa = xfs_dinode_verify(ip->i_mount, ip->i_ino, from);
182 if (fa) {
183 xfs_inode_verifier_error(ip, -EFSCORRUPTED, "dinode", from,
184 sizeof(*from), fa);
185 return -EFSCORRUPTED;
186 }
187
188 /*
189 * First get the permanent information that is needed to allocate an
190 * inode. If the inode is unused, mode is zero and we shouldn't mess
191 * with the uninitialized part of it.
192 */
193 if (!xfs_has_v3inodes(ip->i_mount))
194 ip->i_flushiter = be16_to_cpu(from->di_flushiter);
195 inode->i_generation = be32_to_cpu(from->di_gen);
196 inode->i_mode = be16_to_cpu(from->di_mode);
197 if (!inode->i_mode)
198 return 0;
199
200 /*
201 * Convert v1 inodes immediately to v2 inode format as this is the
202 * minimum inode version format we support in the rest of the code.
203 * They will also be unconditionally written back to disk as v2 inodes.
204 */
205 if (unlikely(from->di_version == 1)) {
206 set_nlink(inode, be16_to_cpu(from->di_onlink));
207 ip->i_projid = 0;
208 } else {
209 set_nlink(inode, be32_to_cpu(from->di_nlink));
210 ip->i_projid = (prid_t)be16_to_cpu(from->di_projid_hi) << 16 |
211 be16_to_cpu(from->di_projid_lo);
212 }
213
214 i_uid_write(inode, be32_to_cpu(from->di_uid));
215 i_gid_write(inode, be32_to_cpu(from->di_gid));
216
217 /*
218 * Time is signed, so need to convert to signed 32 bit before
219 * storing in inode timestamp which may be 64 bit. Otherwise
220 * a time before epoch is converted to a time long after epoch
221 * on 64 bit systems.
222 */
223 inode->i_atime = xfs_inode_from_disk_ts(from, from->di_atime);
224 inode->i_mtime = xfs_inode_from_disk_ts(from, from->di_mtime);
225 inode->i_ctime = xfs_inode_from_disk_ts(from, from->di_ctime);
226
227 ip->i_disk_size = be64_to_cpu(from->di_size);
228 ip->i_nblocks = be64_to_cpu(from->di_nblocks);
229 ip->i_extsize = be32_to_cpu(from->di_extsize);
230 ip->i_forkoff = from->di_forkoff;
231 ip->i_diflags = be16_to_cpu(from->di_flags);
232 ip->i_next_unlinked = be32_to_cpu(from->di_next_unlinked);
233
234 if (from->di_dmevmask || from->di_dmstate)
235 xfs_iflags_set(ip, XFS_IPRESERVE_DM_FIELDS);
236
237 if (xfs_has_v3inodes(ip->i_mount)) {
238 inode_set_iversion_queried(inode,
239 be64_to_cpu(from->di_changecount));
240 ip->i_crtime = xfs_inode_from_disk_ts(from, from->di_crtime);
241 ip->i_diflags2 = be64_to_cpu(from->di_flags2);
242 ip->i_cowextsize = be32_to_cpu(from->di_cowextsize);
243 }
244
245 error = xfs_iformat_data_fork(ip, from);
246 if (error)
247 return error;
248 if (from->di_forkoff) {
249 error = xfs_iformat_attr_fork(ip, from);
250 if (error)
251 goto out_destroy_data_fork;
252 }
253 if (xfs_is_reflink_inode(ip))
254 xfs_ifork_init_cow(ip);
255 return 0;
256
257out_destroy_data_fork:
258 xfs_idestroy_fork(&ip->i_df);
259 return error;
260}
261
262/* Convert an incore timestamp to an ondisk timestamp. */
263static inline xfs_timestamp_t
264xfs_inode_to_disk_ts(
265 struct xfs_inode *ip,
266 const struct timespec64 tv)
267{
268 struct xfs_legacy_timestamp *lts;
269 xfs_timestamp_t ts;
270
271 if (xfs_inode_has_bigtime(ip))
272 return cpu_to_be64(xfs_inode_encode_bigtime(tv));
273
274 lts = (struct xfs_legacy_timestamp *)&ts;
275 lts->t_sec = cpu_to_be32(tv.tv_sec);
276 lts->t_nsec = cpu_to_be32(tv.tv_nsec);
277
278 return ts;
279}
280
281static inline void
282xfs_inode_to_disk_iext_counters(
283 struct xfs_inode *ip,
284 struct xfs_dinode *to)
285{
286 if (xfs_inode_has_large_extent_counts(ip)) {
287 to->di_big_nextents = cpu_to_be64(xfs_ifork_nextents(&ip->i_df));
288 to->di_big_anextents = cpu_to_be32(xfs_ifork_nextents(&ip->i_af));
289 /*
290 * We might be upgrading the inode to use larger extent counters
291 * than was previously used. Hence zero the unused field.
292 */
293 to->di_nrext64_pad = cpu_to_be16(0);
294 } else {
295 to->di_nextents = cpu_to_be32(xfs_ifork_nextents(&ip->i_df));
296 to->di_anextents = cpu_to_be16(xfs_ifork_nextents(&ip->i_af));
297 }
298}
299
300void
301xfs_inode_to_disk(
302 struct xfs_inode *ip,
303 struct xfs_dinode *to,
304 xfs_lsn_t lsn)
305{
306 struct inode *inode = VFS_I(ip);
307
308 to->di_magic = cpu_to_be16(XFS_DINODE_MAGIC);
309 to->di_onlink = 0;
310
311 to->di_format = xfs_ifork_format(&ip->i_df);
312 to->di_uid = cpu_to_be32(i_uid_read(inode));
313 to->di_gid = cpu_to_be32(i_gid_read(inode));
314 to->di_projid_lo = cpu_to_be16(ip->i_projid & 0xffff);
315 to->di_projid_hi = cpu_to_be16(ip->i_projid >> 16);
316
317 to->di_atime = xfs_inode_to_disk_ts(ip, inode->i_atime);
318 to->di_mtime = xfs_inode_to_disk_ts(ip, inode->i_mtime);
319 to->di_ctime = xfs_inode_to_disk_ts(ip, inode->i_ctime);
320 to->di_nlink = cpu_to_be32(inode->i_nlink);
321 to->di_gen = cpu_to_be32(inode->i_generation);
322 to->di_mode = cpu_to_be16(inode->i_mode);
323
324 to->di_size = cpu_to_be64(ip->i_disk_size);
325 to->di_nblocks = cpu_to_be64(ip->i_nblocks);
326 to->di_extsize = cpu_to_be32(ip->i_extsize);
327 to->di_forkoff = ip->i_forkoff;
328 to->di_aformat = xfs_ifork_format(&ip->i_af);
329 to->di_flags = cpu_to_be16(ip->i_diflags);
330
331 if (xfs_has_v3inodes(ip->i_mount)) {
332 to->di_version = 3;
333 to->di_changecount = cpu_to_be64(inode_peek_iversion(inode));
334 to->di_crtime = xfs_inode_to_disk_ts(ip, ip->i_crtime);
335 to->di_flags2 = cpu_to_be64(ip->i_diflags2);
336 to->di_cowextsize = cpu_to_be32(ip->i_cowextsize);
337 to->di_ino = cpu_to_be64(ip->i_ino);
338 to->di_lsn = cpu_to_be64(lsn);
339 memset(to->di_pad2, 0, sizeof(to->di_pad2));
340 uuid_copy(&to->di_uuid, &ip->i_mount->m_sb.sb_meta_uuid);
341 to->di_v3_pad = 0;
342 } else {
343 to->di_version = 2;
344 to->di_flushiter = cpu_to_be16(ip->i_flushiter);
345 memset(to->di_v2_pad, 0, sizeof(to->di_v2_pad));
346 }
347
348 xfs_inode_to_disk_iext_counters(ip, to);
349}
350
351static xfs_failaddr_t
352xfs_dinode_verify_fork(
353 struct xfs_dinode *dip,
354 struct xfs_mount *mp,
355 int whichfork)
356{
357 xfs_extnum_t di_nextents;
358 xfs_extnum_t max_extents;
359 mode_t mode = be16_to_cpu(dip->di_mode);
360 uint32_t fork_size = XFS_DFORK_SIZE(dip, mp, whichfork);
361 uint32_t fork_format = XFS_DFORK_FORMAT(dip, whichfork);
362
363 di_nextents = xfs_dfork_nextents(dip, whichfork);
364
365 /*
366 * For fork types that can contain local data, check that the fork
367 * format matches the size of local data contained within the fork.
368 *
369 * For all types, check that when the size says the should be in extent
370 * or btree format, the inode isn't claiming it is in local format.
371 */
372 if (whichfork == XFS_DATA_FORK) {
373 if (S_ISDIR(mode) || S_ISLNK(mode)) {
374 if (be64_to_cpu(dip->di_size) <= fork_size &&
375 fork_format != XFS_DINODE_FMT_LOCAL)
376 return __this_address;
377 }
378
379 if (be64_to_cpu(dip->di_size) > fork_size &&
380 fork_format == XFS_DINODE_FMT_LOCAL)
381 return __this_address;
382 }
383
384 switch (fork_format) {
385 case XFS_DINODE_FMT_LOCAL:
386 /*
387 * No local regular files yet.
388 */
389 if (S_ISREG(mode) && whichfork == XFS_DATA_FORK)
390 return __this_address;
391 if (di_nextents)
392 return __this_address;
393 break;
394 case XFS_DINODE_FMT_EXTENTS:
395 if (di_nextents > XFS_DFORK_MAXEXT(dip, mp, whichfork))
396 return __this_address;
397 break;
398 case XFS_DINODE_FMT_BTREE:
399 max_extents = xfs_iext_max_nextents(
400 xfs_dinode_has_large_extent_counts(dip),
401 whichfork);
402 if (di_nextents > max_extents)
403 return __this_address;
404 break;
405 default:
406 return __this_address;
407 }
408 return NULL;
409}
410
411static xfs_failaddr_t
412xfs_dinode_verify_forkoff(
413 struct xfs_dinode *dip,
414 struct xfs_mount *mp)
415{
416 if (!dip->di_forkoff)
417 return NULL;
418
419 switch (dip->di_format) {
420 case XFS_DINODE_FMT_DEV:
421 if (dip->di_forkoff != (roundup(sizeof(xfs_dev_t), 8) >> 3))
422 return __this_address;
423 break;
424 case XFS_DINODE_FMT_LOCAL: /* fall through ... */
425 case XFS_DINODE_FMT_EXTENTS: /* fall through ... */
426 case XFS_DINODE_FMT_BTREE:
427 if (dip->di_forkoff >= (XFS_LITINO(mp) >> 3))
428 return __this_address;
429 break;
430 default:
431 return __this_address;
432 }
433 return NULL;
434}
435
436static xfs_failaddr_t
437xfs_dinode_verify_nrext64(
438 struct xfs_mount *mp,
439 struct xfs_dinode *dip)
440{
441 if (xfs_dinode_has_large_extent_counts(dip)) {
442 if (!xfs_has_large_extent_counts(mp))
443 return __this_address;
444 if (dip->di_nrext64_pad != 0)
445 return __this_address;
446 } else if (dip->di_version >= 3) {
447 if (dip->di_v3_pad != 0)
448 return __this_address;
449 }
450
451 return NULL;
452}
453
454xfs_failaddr_t
455xfs_dinode_verify(
456 struct xfs_mount *mp,
457 xfs_ino_t ino,
458 struct xfs_dinode *dip)
459{
460 xfs_failaddr_t fa;
461 uint16_t mode;
462 uint16_t flags;
463 uint64_t flags2;
464 uint64_t di_size;
465 xfs_extnum_t nextents;
466 xfs_extnum_t naextents;
467 xfs_filblks_t nblocks;
468
469 if (dip->di_magic != cpu_to_be16(XFS_DINODE_MAGIC))
470 return __this_address;
471
472 /* Verify v3 integrity information first */
473 if (dip->di_version >= 3) {
474 if (!xfs_has_v3inodes(mp))
475 return __this_address;
476 if (!xfs_verify_cksum((char *)dip, mp->m_sb.sb_inodesize,
477 XFS_DINODE_CRC_OFF))
478 return __this_address;
479 if (be64_to_cpu(dip->di_ino) != ino)
480 return __this_address;
481 if (!uuid_equal(&dip->di_uuid, &mp->m_sb.sb_meta_uuid))
482 return __this_address;
483 }
484
485 /* don't allow invalid i_size */
486 di_size = be64_to_cpu(dip->di_size);
487 if (di_size & (1ULL << 63))
488 return __this_address;
489
490 mode = be16_to_cpu(dip->di_mode);
491 if (mode && xfs_mode_to_ftype(mode) == XFS_DIR3_FT_UNKNOWN)
492 return __this_address;
493
494 /* No zero-length symlinks/dirs. */
495 if ((S_ISLNK(mode) || S_ISDIR(mode)) && di_size == 0)
496 return __this_address;
497
498 fa = xfs_dinode_verify_nrext64(mp, dip);
499 if (fa)
500 return fa;
501
502 nextents = xfs_dfork_data_extents(dip);
503 naextents = xfs_dfork_attr_extents(dip);
504 nblocks = be64_to_cpu(dip->di_nblocks);
505
506 /* Fork checks carried over from xfs_iformat_fork */
507 if (mode && nextents + naextents > nblocks)
508 return __this_address;
509
510 if (S_ISDIR(mode) && nextents > mp->m_dir_geo->max_extents)
511 return __this_address;
512
513 if (mode && XFS_DFORK_BOFF(dip) > mp->m_sb.sb_inodesize)
514 return __this_address;
515
516 flags = be16_to_cpu(dip->di_flags);
517
518 if (mode && (flags & XFS_DIFLAG_REALTIME) && !mp->m_rtdev_targp)
519 return __this_address;
520
521 /* check for illegal values of forkoff */
522 fa = xfs_dinode_verify_forkoff(dip, mp);
523 if (fa)
524 return fa;
525
526 /* Do we have appropriate data fork formats for the mode? */
527 switch (mode & S_IFMT) {
528 case S_IFIFO:
529 case S_IFCHR:
530 case S_IFBLK:
531 case S_IFSOCK:
532 if (dip->di_format != XFS_DINODE_FMT_DEV)
533 return __this_address;
534 break;
535 case S_IFREG:
536 case S_IFLNK:
537 case S_IFDIR:
538 fa = xfs_dinode_verify_fork(dip, mp, XFS_DATA_FORK);
539 if (fa)
540 return fa;
541 break;
542 case 0:
543 /* Uninitialized inode ok. */
544 break;
545 default:
546 return __this_address;
547 }
548
549 if (dip->di_forkoff) {
550 fa = xfs_dinode_verify_fork(dip, mp, XFS_ATTR_FORK);
551 if (fa)
552 return fa;
553 } else {
554 /*
555 * If there is no fork offset, this may be a freshly-made inode
556 * in a new disk cluster, in which case di_aformat is zeroed.
557 * Otherwise, such an inode must be in EXTENTS format; this goes
558 * for freed inodes as well.
559 */
560 switch (dip->di_aformat) {
561 case 0:
562 case XFS_DINODE_FMT_EXTENTS:
563 break;
564 default:
565 return __this_address;
566 }
567 if (naextents)
568 return __this_address;
569 }
570
571 /* extent size hint validation */
572 fa = xfs_inode_validate_extsize(mp, be32_to_cpu(dip->di_extsize),
573 mode, flags);
574 if (fa)
575 return fa;
576
577 /* only version 3 or greater inodes are extensively verified here */
578 if (dip->di_version < 3)
579 return NULL;
580
581 flags2 = be64_to_cpu(dip->di_flags2);
582
583 /* don't allow reflink/cowextsize if we don't have reflink */
584 if ((flags2 & (XFS_DIFLAG2_REFLINK | XFS_DIFLAG2_COWEXTSIZE)) &&
585 !xfs_has_reflink(mp))
586 return __this_address;
587
588 /* only regular files get reflink */
589 if ((flags2 & XFS_DIFLAG2_REFLINK) && (mode & S_IFMT) != S_IFREG)
590 return __this_address;
591
592 /* don't let reflink and realtime mix */
593 if ((flags2 & XFS_DIFLAG2_REFLINK) && (flags & XFS_DIFLAG_REALTIME))
594 return __this_address;
595
596 /* COW extent size hint validation */
597 fa = xfs_inode_validate_cowextsize(mp, be32_to_cpu(dip->di_cowextsize),
598 mode, flags, flags2);
599 if (fa)
600 return fa;
601
602 /* bigtime iflag can only happen on bigtime filesystems */
603 if (xfs_dinode_has_bigtime(dip) &&
604 !xfs_has_bigtime(mp))
605 return __this_address;
606
607 return NULL;
608}
609
610void
611xfs_dinode_calc_crc(
612 struct xfs_mount *mp,
613 struct xfs_dinode *dip)
614{
615 uint32_t crc;
616
617 if (dip->di_version < 3)
618 return;
619
620 ASSERT(xfs_has_crc(mp));
621 crc = xfs_start_cksum_update((char *)dip, mp->m_sb.sb_inodesize,
622 XFS_DINODE_CRC_OFF);
623 dip->di_crc = xfs_end_cksum(crc);
624}
625
626/*
627 * Validate di_extsize hint.
628 *
629 * 1. Extent size hint is only valid for directories and regular files.
630 * 2. FS_XFLAG_EXTSIZE is only valid for regular files.
631 * 3. FS_XFLAG_EXTSZINHERIT is only valid for directories.
632 * 4. Hint cannot be larger than MAXTEXTLEN.
633 * 5. Can be changed on directories at any time.
634 * 6. Hint value of 0 turns off hints, clears inode flags.
635 * 7. Extent size must be a multiple of the appropriate block size.
636 * For realtime files, this is the rt extent size.
637 * 8. For non-realtime files, the extent size hint must be limited
638 * to half the AG size to avoid alignment extending the extent beyond the
639 * limits of the AG.
640 */
641xfs_failaddr_t
642xfs_inode_validate_extsize(
643 struct xfs_mount *mp,
644 uint32_t extsize,
645 uint16_t mode,
646 uint16_t flags)
647{
648 bool rt_flag;
649 bool hint_flag;
650 bool inherit_flag;
651 uint32_t extsize_bytes;
652 uint32_t blocksize_bytes;
653
654 rt_flag = (flags & XFS_DIFLAG_REALTIME);
655 hint_flag = (flags & XFS_DIFLAG_EXTSIZE);
656 inherit_flag = (flags & XFS_DIFLAG_EXTSZINHERIT);
657 extsize_bytes = XFS_FSB_TO_B(mp, extsize);
658
659 /*
660 * This comment describes a historic gap in this verifier function.
661 *
662 * For a directory with both RTINHERIT and EXTSZINHERIT flags set, this
663 * function has never checked that the extent size hint is an integer
664 * multiple of the realtime extent size. Since we allow users to set
665 * this combination on non-rt filesystems /and/ to change the rt
666 * extent size when adding a rt device to a filesystem, the net effect
667 * is that users can configure a filesystem anticipating one rt
668 * geometry and change their minds later. Directories do not use the
669 * extent size hint, so this is harmless for them.
670 *
671 * If a directory with a misaligned extent size hint is allowed to
672 * propagate that hint into a new regular realtime file, the result
673 * is that the inode cluster buffer verifier will trigger a corruption
674 * shutdown the next time it is run, because the verifier has always
675 * enforced the alignment rule for regular files.
676 *
677 * Because we allow administrators to set a new rt extent size when
678 * adding a rt section, we cannot add a check to this verifier because
679 * that will result a new source of directory corruption errors when
680 * reading an existing filesystem. Instead, we rely on callers to
681 * decide when alignment checks are appropriate, and fix things up as
682 * needed.
683 */
684
685 if (rt_flag)
686 blocksize_bytes = XFS_FSB_TO_B(mp, mp->m_sb.sb_rextsize);
687 else
688 blocksize_bytes = mp->m_sb.sb_blocksize;
689
690 if ((hint_flag || inherit_flag) && !(S_ISDIR(mode) || S_ISREG(mode)))
691 return __this_address;
692
693 if (hint_flag && !S_ISREG(mode))
694 return __this_address;
695
696 if (inherit_flag && !S_ISDIR(mode))
697 return __this_address;
698
699 if ((hint_flag || inherit_flag) && extsize == 0)
700 return __this_address;
701
702 /* free inodes get flags set to zero but extsize remains */
703 if (mode && !(hint_flag || inherit_flag) && extsize != 0)
704 return __this_address;
705
706 if (extsize_bytes % blocksize_bytes)
707 return __this_address;
708
709 if (extsize > XFS_MAX_BMBT_EXTLEN)
710 return __this_address;
711
712 if (!rt_flag && extsize > mp->m_sb.sb_agblocks / 2)
713 return __this_address;
714
715 return NULL;
716}
717
718/*
719 * Validate di_cowextsize hint.
720 *
721 * 1. CoW extent size hint can only be set if reflink is enabled on the fs.
722 * The inode does not have to have any shared blocks, but it must be a v3.
723 * 2. FS_XFLAG_COWEXTSIZE is only valid for directories and regular files;
724 * for a directory, the hint is propagated to new files.
725 * 3. Can be changed on files & directories at any time.
726 * 4. Hint value of 0 turns off hints, clears inode flags.
727 * 5. Extent size must be a multiple of the appropriate block size.
728 * 6. The extent size hint must be limited to half the AG size to avoid
729 * alignment extending the extent beyond the limits of the AG.
730 */
731xfs_failaddr_t
732xfs_inode_validate_cowextsize(
733 struct xfs_mount *mp,
734 uint32_t cowextsize,
735 uint16_t mode,
736 uint16_t flags,
737 uint64_t flags2)
738{
739 bool rt_flag;
740 bool hint_flag;
741 uint32_t cowextsize_bytes;
742
743 rt_flag = (flags & XFS_DIFLAG_REALTIME);
744 hint_flag = (flags2 & XFS_DIFLAG2_COWEXTSIZE);
745 cowextsize_bytes = XFS_FSB_TO_B(mp, cowextsize);
746
747 if (hint_flag && !xfs_has_reflink(mp))
748 return __this_address;
749
750 if (hint_flag && !(S_ISDIR(mode) || S_ISREG(mode)))
751 return __this_address;
752
753 if (hint_flag && cowextsize == 0)
754 return __this_address;
755
756 /* free inodes get flags set to zero but cowextsize remains */
757 if (mode && !hint_flag && cowextsize != 0)
758 return __this_address;
759
760 if (hint_flag && rt_flag)
761 return __this_address;
762
763 if (cowextsize_bytes % mp->m_sb.sb_blocksize)
764 return __this_address;
765
766 if (cowextsize > XFS_MAX_BMBT_EXTLEN)
767 return __this_address;
768
769 if (cowextsize > mp->m_sb.sb_agblocks / 2)
770 return __this_address;
771
772 return NULL;
773}
1/*
2 * Copyright (c) 2000-2006 Silicon Graphics, Inc.
3 * All Rights Reserved.
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it would be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write the Free Software Foundation,
16 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
17 */
18#include "xfs.h"
19#include "xfs_fs.h"
20#include "xfs_shared.h"
21#include "xfs_format.h"
22#include "xfs_log_format.h"
23#include "xfs_trans_resv.h"
24#include "xfs_mount.h"
25#include "xfs_inode.h"
26#include "xfs_error.h"
27#include "xfs_cksum.h"
28#include "xfs_icache.h"
29#include "xfs_trans.h"
30#include "xfs_ialloc.h"
31
32/*
33 * Check that none of the inode's in the buffer have a next
34 * unlinked field of 0.
35 */
36#if defined(DEBUG)
37void
38xfs_inobp_check(
39 xfs_mount_t *mp,
40 xfs_buf_t *bp)
41{
42 int i;
43 int j;
44 xfs_dinode_t *dip;
45
46 j = mp->m_inode_cluster_size >> mp->m_sb.sb_inodelog;
47
48 for (i = 0; i < j; i++) {
49 dip = xfs_buf_offset(bp, i * mp->m_sb.sb_inodesize);
50 if (!dip->di_next_unlinked) {
51 xfs_alert(mp,
52 "Detected bogus zero next_unlinked field in inode %d buffer 0x%llx.",
53 i, (long long)bp->b_bn);
54 }
55 }
56}
57#endif
58
59/*
60 * If we are doing readahead on an inode buffer, we might be in log recovery
61 * reading an inode allocation buffer that hasn't yet been replayed, and hence
62 * has not had the inode cores stamped into it. Hence for readahead, the buffer
63 * may be potentially invalid.
64 *
65 * If the readahead buffer is invalid, we need to mark it with an error and
66 * clear the DONE status of the buffer so that a followup read will re-read it
67 * from disk. We don't report the error otherwise to avoid warnings during log
68 * recovery and we don't get unnecssary panics on debug kernels. We use EIO here
69 * because all we want to do is say readahead failed; there is no-one to report
70 * the error to, so this will distinguish it from a non-ra verifier failure.
71 * Changes to this readahead error behavour also need to be reflected in
72 * xfs_dquot_buf_readahead_verify().
73 */
74static void
75xfs_inode_buf_verify(
76 struct xfs_buf *bp,
77 bool readahead)
78{
79 struct xfs_mount *mp = bp->b_target->bt_mount;
80 int i;
81 int ni;
82
83 /*
84 * Validate the magic number and version of every inode in the buffer
85 */
86 ni = XFS_BB_TO_FSB(mp, bp->b_length) * mp->m_sb.sb_inopblock;
87 for (i = 0; i < ni; i++) {
88 int di_ok;
89 xfs_dinode_t *dip;
90
91 dip = xfs_buf_offset(bp, (i << mp->m_sb.sb_inodelog));
92 di_ok = dip->di_magic == cpu_to_be16(XFS_DINODE_MAGIC) &&
93 XFS_DINODE_GOOD_VERSION(dip->di_version);
94 if (unlikely(XFS_TEST_ERROR(!di_ok, mp,
95 XFS_ERRTAG_ITOBP_INOTOBP,
96 XFS_RANDOM_ITOBP_INOTOBP))) {
97 if (readahead) {
98 bp->b_flags &= ~XBF_DONE;
99 xfs_buf_ioerror(bp, -EIO);
100 return;
101 }
102
103 xfs_buf_ioerror(bp, -EFSCORRUPTED);
104 xfs_verifier_error(bp);
105#ifdef DEBUG
106 xfs_alert(mp,
107 "bad inode magic/vsn daddr %lld #%d (magic=%x)",
108 (unsigned long long)bp->b_bn, i,
109 be16_to_cpu(dip->di_magic));
110#endif
111 }
112 }
113 xfs_inobp_check(mp, bp);
114}
115
116
117static void
118xfs_inode_buf_read_verify(
119 struct xfs_buf *bp)
120{
121 xfs_inode_buf_verify(bp, false);
122}
123
124static void
125xfs_inode_buf_readahead_verify(
126 struct xfs_buf *bp)
127{
128 xfs_inode_buf_verify(bp, true);
129}
130
131static void
132xfs_inode_buf_write_verify(
133 struct xfs_buf *bp)
134{
135 xfs_inode_buf_verify(bp, false);
136}
137
138const struct xfs_buf_ops xfs_inode_buf_ops = {
139 .name = "xfs_inode",
140 .verify_read = xfs_inode_buf_read_verify,
141 .verify_write = xfs_inode_buf_write_verify,
142};
143
144const struct xfs_buf_ops xfs_inode_buf_ra_ops = {
145 .name = "xxfs_inode_ra",
146 .verify_read = xfs_inode_buf_readahead_verify,
147 .verify_write = xfs_inode_buf_write_verify,
148};
149
150
151/*
152 * This routine is called to map an inode to the buffer containing the on-disk
153 * version of the inode. It returns a pointer to the buffer containing the
154 * on-disk inode in the bpp parameter, and in the dipp parameter it returns a
155 * pointer to the on-disk inode within that buffer.
156 *
157 * If a non-zero error is returned, then the contents of bpp and dipp are
158 * undefined.
159 */
160int
161xfs_imap_to_bp(
162 struct xfs_mount *mp,
163 struct xfs_trans *tp,
164 struct xfs_imap *imap,
165 struct xfs_dinode **dipp,
166 struct xfs_buf **bpp,
167 uint buf_flags,
168 uint iget_flags)
169{
170 struct xfs_buf *bp;
171 int error;
172
173 buf_flags |= XBF_UNMAPPED;
174 error = xfs_trans_read_buf(mp, tp, mp->m_ddev_targp, imap->im_blkno,
175 (int)imap->im_len, buf_flags, &bp,
176 &xfs_inode_buf_ops);
177 if (error) {
178 if (error == -EAGAIN) {
179 ASSERT(buf_flags & XBF_TRYLOCK);
180 return error;
181 }
182
183 if (error == -EFSCORRUPTED &&
184 (iget_flags & XFS_IGET_UNTRUSTED))
185 return -EINVAL;
186
187 xfs_warn(mp, "%s: xfs_trans_read_buf() returned error %d.",
188 __func__, error);
189 return error;
190 }
191
192 *bpp = bp;
193 *dipp = xfs_buf_offset(bp, imap->im_boffset);
194 return 0;
195}
196
197void
198xfs_inode_from_disk(
199 struct xfs_inode *ip,
200 struct xfs_dinode *from)
201{
202 struct xfs_icdinode *to = &ip->i_d;
203 struct inode *inode = VFS_I(ip);
204
205
206 /*
207 * Convert v1 inodes immediately to v2 inode format as this is the
208 * minimum inode version format we support in the rest of the code.
209 */
210 to->di_version = from->di_version;
211 if (to->di_version == 1) {
212 set_nlink(inode, be16_to_cpu(from->di_onlink));
213 to->di_projid_lo = 0;
214 to->di_projid_hi = 0;
215 to->di_version = 2;
216 } else {
217 set_nlink(inode, be32_to_cpu(from->di_nlink));
218 to->di_projid_lo = be16_to_cpu(from->di_projid_lo);
219 to->di_projid_hi = be16_to_cpu(from->di_projid_hi);
220 }
221
222 to->di_format = from->di_format;
223 to->di_uid = be32_to_cpu(from->di_uid);
224 to->di_gid = be32_to_cpu(from->di_gid);
225 to->di_flushiter = be16_to_cpu(from->di_flushiter);
226
227 /*
228 * Time is signed, so need to convert to signed 32 bit before
229 * storing in inode timestamp which may be 64 bit. Otherwise
230 * a time before epoch is converted to a time long after epoch
231 * on 64 bit systems.
232 */
233 inode->i_atime.tv_sec = (int)be32_to_cpu(from->di_atime.t_sec);
234 inode->i_atime.tv_nsec = (int)be32_to_cpu(from->di_atime.t_nsec);
235 inode->i_mtime.tv_sec = (int)be32_to_cpu(from->di_mtime.t_sec);
236 inode->i_mtime.tv_nsec = (int)be32_to_cpu(from->di_mtime.t_nsec);
237 inode->i_ctime.tv_sec = (int)be32_to_cpu(from->di_ctime.t_sec);
238 inode->i_ctime.tv_nsec = (int)be32_to_cpu(from->di_ctime.t_nsec);
239 inode->i_generation = be32_to_cpu(from->di_gen);
240 inode->i_mode = be16_to_cpu(from->di_mode);
241
242 to->di_size = be64_to_cpu(from->di_size);
243 to->di_nblocks = be64_to_cpu(from->di_nblocks);
244 to->di_extsize = be32_to_cpu(from->di_extsize);
245 to->di_nextents = be32_to_cpu(from->di_nextents);
246 to->di_anextents = be16_to_cpu(from->di_anextents);
247 to->di_forkoff = from->di_forkoff;
248 to->di_aformat = from->di_aformat;
249 to->di_dmevmask = be32_to_cpu(from->di_dmevmask);
250 to->di_dmstate = be16_to_cpu(from->di_dmstate);
251 to->di_flags = be16_to_cpu(from->di_flags);
252
253 if (to->di_version == 3) {
254 inode->i_version = be64_to_cpu(from->di_changecount);
255 to->di_crtime.t_sec = be32_to_cpu(from->di_crtime.t_sec);
256 to->di_crtime.t_nsec = be32_to_cpu(from->di_crtime.t_nsec);
257 to->di_flags2 = be64_to_cpu(from->di_flags2);
258 }
259}
260
261void
262xfs_inode_to_disk(
263 struct xfs_inode *ip,
264 struct xfs_dinode *to,
265 xfs_lsn_t lsn)
266{
267 struct xfs_icdinode *from = &ip->i_d;
268 struct inode *inode = VFS_I(ip);
269
270 to->di_magic = cpu_to_be16(XFS_DINODE_MAGIC);
271 to->di_onlink = 0;
272
273 to->di_version = from->di_version;
274 to->di_format = from->di_format;
275 to->di_uid = cpu_to_be32(from->di_uid);
276 to->di_gid = cpu_to_be32(from->di_gid);
277 to->di_projid_lo = cpu_to_be16(from->di_projid_lo);
278 to->di_projid_hi = cpu_to_be16(from->di_projid_hi);
279
280 memset(to->di_pad, 0, sizeof(to->di_pad));
281 to->di_atime.t_sec = cpu_to_be32(inode->i_atime.tv_sec);
282 to->di_atime.t_nsec = cpu_to_be32(inode->i_atime.tv_nsec);
283 to->di_mtime.t_sec = cpu_to_be32(inode->i_mtime.tv_sec);
284 to->di_mtime.t_nsec = cpu_to_be32(inode->i_mtime.tv_nsec);
285 to->di_ctime.t_sec = cpu_to_be32(inode->i_ctime.tv_sec);
286 to->di_ctime.t_nsec = cpu_to_be32(inode->i_ctime.tv_nsec);
287 to->di_nlink = cpu_to_be32(inode->i_nlink);
288 to->di_gen = cpu_to_be32(inode->i_generation);
289 to->di_mode = cpu_to_be16(inode->i_mode);
290
291 to->di_size = cpu_to_be64(from->di_size);
292 to->di_nblocks = cpu_to_be64(from->di_nblocks);
293 to->di_extsize = cpu_to_be32(from->di_extsize);
294 to->di_nextents = cpu_to_be32(from->di_nextents);
295 to->di_anextents = cpu_to_be16(from->di_anextents);
296 to->di_forkoff = from->di_forkoff;
297 to->di_aformat = from->di_aformat;
298 to->di_dmevmask = cpu_to_be32(from->di_dmevmask);
299 to->di_dmstate = cpu_to_be16(from->di_dmstate);
300 to->di_flags = cpu_to_be16(from->di_flags);
301
302 if (from->di_version == 3) {
303 to->di_changecount = cpu_to_be64(inode->i_version);
304 to->di_crtime.t_sec = cpu_to_be32(from->di_crtime.t_sec);
305 to->di_crtime.t_nsec = cpu_to_be32(from->di_crtime.t_nsec);
306 to->di_flags2 = cpu_to_be64(from->di_flags2);
307
308 to->di_ino = cpu_to_be64(ip->i_ino);
309 to->di_lsn = cpu_to_be64(lsn);
310 memset(to->di_pad2, 0, sizeof(to->di_pad2));
311 uuid_copy(&to->di_uuid, &ip->i_mount->m_sb.sb_meta_uuid);
312 to->di_flushiter = 0;
313 } else {
314 to->di_flushiter = cpu_to_be16(from->di_flushiter);
315 }
316}
317
318void
319xfs_log_dinode_to_disk(
320 struct xfs_log_dinode *from,
321 struct xfs_dinode *to)
322{
323 to->di_magic = cpu_to_be16(from->di_magic);
324 to->di_mode = cpu_to_be16(from->di_mode);
325 to->di_version = from->di_version;
326 to->di_format = from->di_format;
327 to->di_onlink = 0;
328 to->di_uid = cpu_to_be32(from->di_uid);
329 to->di_gid = cpu_to_be32(from->di_gid);
330 to->di_nlink = cpu_to_be32(from->di_nlink);
331 to->di_projid_lo = cpu_to_be16(from->di_projid_lo);
332 to->di_projid_hi = cpu_to_be16(from->di_projid_hi);
333 memcpy(to->di_pad, from->di_pad, sizeof(to->di_pad));
334
335 to->di_atime.t_sec = cpu_to_be32(from->di_atime.t_sec);
336 to->di_atime.t_nsec = cpu_to_be32(from->di_atime.t_nsec);
337 to->di_mtime.t_sec = cpu_to_be32(from->di_mtime.t_sec);
338 to->di_mtime.t_nsec = cpu_to_be32(from->di_mtime.t_nsec);
339 to->di_ctime.t_sec = cpu_to_be32(from->di_ctime.t_sec);
340 to->di_ctime.t_nsec = cpu_to_be32(from->di_ctime.t_nsec);
341
342 to->di_size = cpu_to_be64(from->di_size);
343 to->di_nblocks = cpu_to_be64(from->di_nblocks);
344 to->di_extsize = cpu_to_be32(from->di_extsize);
345 to->di_nextents = cpu_to_be32(from->di_nextents);
346 to->di_anextents = cpu_to_be16(from->di_anextents);
347 to->di_forkoff = from->di_forkoff;
348 to->di_aformat = from->di_aformat;
349 to->di_dmevmask = cpu_to_be32(from->di_dmevmask);
350 to->di_dmstate = cpu_to_be16(from->di_dmstate);
351 to->di_flags = cpu_to_be16(from->di_flags);
352 to->di_gen = cpu_to_be32(from->di_gen);
353
354 if (from->di_version == 3) {
355 to->di_changecount = cpu_to_be64(from->di_changecount);
356 to->di_crtime.t_sec = cpu_to_be32(from->di_crtime.t_sec);
357 to->di_crtime.t_nsec = cpu_to_be32(from->di_crtime.t_nsec);
358 to->di_flags2 = cpu_to_be64(from->di_flags2);
359 to->di_ino = cpu_to_be64(from->di_ino);
360 to->di_lsn = cpu_to_be64(from->di_lsn);
361 memcpy(to->di_pad2, from->di_pad2, sizeof(to->di_pad2));
362 uuid_copy(&to->di_uuid, &from->di_uuid);
363 to->di_flushiter = 0;
364 } else {
365 to->di_flushiter = cpu_to_be16(from->di_flushiter);
366 }
367}
368
369static bool
370xfs_dinode_verify(
371 struct xfs_mount *mp,
372 struct xfs_inode *ip,
373 struct xfs_dinode *dip)
374{
375 if (dip->di_magic != cpu_to_be16(XFS_DINODE_MAGIC))
376 return false;
377
378 /* only version 3 or greater inodes are extensively verified here */
379 if (dip->di_version < 3)
380 return true;
381
382 if (!xfs_sb_version_hascrc(&mp->m_sb))
383 return false;
384 if (!xfs_verify_cksum((char *)dip, mp->m_sb.sb_inodesize,
385 XFS_DINODE_CRC_OFF))
386 return false;
387 if (be64_to_cpu(dip->di_ino) != ip->i_ino)
388 return false;
389 if (!uuid_equal(&dip->di_uuid, &mp->m_sb.sb_meta_uuid))
390 return false;
391 return true;
392}
393
394void
395xfs_dinode_calc_crc(
396 struct xfs_mount *mp,
397 struct xfs_dinode *dip)
398{
399 __uint32_t crc;
400
401 if (dip->di_version < 3)
402 return;
403
404 ASSERT(xfs_sb_version_hascrc(&mp->m_sb));
405 crc = xfs_start_cksum((char *)dip, mp->m_sb.sb_inodesize,
406 XFS_DINODE_CRC_OFF);
407 dip->di_crc = xfs_end_cksum(crc);
408}
409
410/*
411 * Read the disk inode attributes into the in-core inode structure.
412 *
413 * For version 5 superblocks, if we are initialising a new inode and we are not
414 * utilising the XFS_MOUNT_IKEEP inode cluster mode, we can simple build the new
415 * inode core with a random generation number. If we are keeping inodes around,
416 * we need to read the inode cluster to get the existing generation number off
417 * disk. Further, if we are using version 4 superblocks (i.e. v1/v2 inode
418 * format) then log recovery is dependent on the di_flushiter field being
419 * initialised from the current on-disk value and hence we must also read the
420 * inode off disk.
421 */
422int
423xfs_iread(
424 xfs_mount_t *mp,
425 xfs_trans_t *tp,
426 xfs_inode_t *ip,
427 uint iget_flags)
428{
429 xfs_buf_t *bp;
430 xfs_dinode_t *dip;
431 int error;
432
433 /*
434 * Fill in the location information in the in-core inode.
435 */
436 error = xfs_imap(mp, tp, ip->i_ino, &ip->i_imap, iget_flags);
437 if (error)
438 return error;
439
440 /* shortcut IO on inode allocation if possible */
441 if ((iget_flags & XFS_IGET_CREATE) &&
442 xfs_sb_version_hascrc(&mp->m_sb) &&
443 !(mp->m_flags & XFS_MOUNT_IKEEP)) {
444 /* initialise the on-disk inode core */
445 memset(&ip->i_d, 0, sizeof(ip->i_d));
446 VFS_I(ip)->i_generation = prandom_u32();
447 if (xfs_sb_version_hascrc(&mp->m_sb))
448 ip->i_d.di_version = 3;
449 else
450 ip->i_d.di_version = 2;
451 return 0;
452 }
453
454 /*
455 * Get pointers to the on-disk inode and the buffer containing it.
456 */
457 error = xfs_imap_to_bp(mp, tp, &ip->i_imap, &dip, &bp, 0, iget_flags);
458 if (error)
459 return error;
460
461 /* even unallocated inodes are verified */
462 if (!xfs_dinode_verify(mp, ip, dip)) {
463 xfs_alert(mp, "%s: validation failed for inode %lld failed",
464 __func__, ip->i_ino);
465
466 XFS_CORRUPTION_ERROR(__func__, XFS_ERRLEVEL_LOW, mp, dip);
467 error = -EFSCORRUPTED;
468 goto out_brelse;
469 }
470
471 /*
472 * If the on-disk inode is already linked to a directory
473 * entry, copy all of the inode into the in-core inode.
474 * xfs_iformat_fork() handles copying in the inode format
475 * specific information.
476 * Otherwise, just get the truly permanent information.
477 */
478 if (dip->di_mode) {
479 xfs_inode_from_disk(ip, dip);
480 error = xfs_iformat_fork(ip, dip);
481 if (error) {
482#ifdef DEBUG
483 xfs_alert(mp, "%s: xfs_iformat() returned error %d",
484 __func__, error);
485#endif /* DEBUG */
486 goto out_brelse;
487 }
488 } else {
489 /*
490 * Partial initialisation of the in-core inode. Just the bits
491 * that xfs_ialloc won't overwrite or relies on being correct.
492 */
493 ip->i_d.di_version = dip->di_version;
494 VFS_I(ip)->i_generation = be32_to_cpu(dip->di_gen);
495 ip->i_d.di_flushiter = be16_to_cpu(dip->di_flushiter);
496
497 /*
498 * Make sure to pull in the mode here as well in
499 * case the inode is released without being used.
500 * This ensures that xfs_inactive() will see that
501 * the inode is already free and not try to mess
502 * with the uninitialized part of it.
503 */
504 VFS_I(ip)->i_mode = 0;
505 }
506
507 ASSERT(ip->i_d.di_version >= 2);
508 ip->i_delayed_blks = 0;
509
510 /*
511 * Mark the buffer containing the inode as something to keep
512 * around for a while. This helps to keep recently accessed
513 * meta-data in-core longer.
514 */
515 xfs_buf_set_ref(bp, XFS_INO_REF);
516
517 /*
518 * Use xfs_trans_brelse() to release the buffer containing the on-disk
519 * inode, because it was acquired with xfs_trans_read_buf() in
520 * xfs_imap_to_bp() above. If tp is NULL, this is just a normal
521 * brelse(). If we're within a transaction, then xfs_trans_brelse()
522 * will only release the buffer if it is not dirty within the
523 * transaction. It will be OK to release the buffer in this case,
524 * because inodes on disk are never destroyed and we will be locking the
525 * new in-core inode before putting it in the cache where other
526 * processes can find it. Thus we don't have to worry about the inode
527 * being changed just because we released the buffer.
528 */
529 out_brelse:
530 xfs_trans_brelse(tp, bp);
531 return error;
532}