Loading...
1/*
2 * Copyright (c) 2000-2006 Silicon Graphics, Inc.
3 * All Rights Reserved.
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it would be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write the Free Software Foundation,
16 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
17 */
18#include <linux/log2.h>
19
20#include "xfs.h"
21#include "xfs_fs.h"
22#include "xfs_types.h"
23#include "xfs_bit.h"
24#include "xfs_log.h"
25#include "xfs_inum.h"
26#include "xfs_trans.h"
27#include "xfs_trans_priv.h"
28#include "xfs_sb.h"
29#include "xfs_ag.h"
30#include "xfs_mount.h"
31#include "xfs_bmap_btree.h"
32#include "xfs_alloc_btree.h"
33#include "xfs_ialloc_btree.h"
34#include "xfs_attr_sf.h"
35#include "xfs_dinode.h"
36#include "xfs_inode.h"
37#include "xfs_buf_item.h"
38#include "xfs_inode_item.h"
39#include "xfs_btree.h"
40#include "xfs_alloc.h"
41#include "xfs_ialloc.h"
42#include "xfs_bmap.h"
43#include "xfs_error.h"
44#include "xfs_utils.h"
45#include "xfs_quota.h"
46#include "xfs_filestream.h"
47#include "xfs_vnodeops.h"
48#include "xfs_trace.h"
49
50kmem_zone_t *xfs_ifork_zone;
51kmem_zone_t *xfs_inode_zone;
52
53/*
54 * Used in xfs_itruncate_extents(). This is the maximum number of extents
55 * freed from a file in a single transaction.
56 */
57#define XFS_ITRUNC_MAX_EXTENTS 2
58
59STATIC int xfs_iflush_int(xfs_inode_t *, xfs_buf_t *);
60STATIC int xfs_iformat_local(xfs_inode_t *, xfs_dinode_t *, int, int);
61STATIC int xfs_iformat_extents(xfs_inode_t *, xfs_dinode_t *, int);
62STATIC int xfs_iformat_btree(xfs_inode_t *, xfs_dinode_t *, int);
63
64#ifdef DEBUG
65/*
66 * Make sure that the extents in the given memory buffer
67 * are valid.
68 */
69STATIC void
70xfs_validate_extents(
71 xfs_ifork_t *ifp,
72 int nrecs,
73 xfs_exntfmt_t fmt)
74{
75 xfs_bmbt_irec_t irec;
76 xfs_bmbt_rec_host_t rec;
77 int i;
78
79 for (i = 0; i < nrecs; i++) {
80 xfs_bmbt_rec_host_t *ep = xfs_iext_get_ext(ifp, i);
81 rec.l0 = get_unaligned(&ep->l0);
82 rec.l1 = get_unaligned(&ep->l1);
83 xfs_bmbt_get_all(&rec, &irec);
84 if (fmt == XFS_EXTFMT_NOSTATE)
85 ASSERT(irec.br_state == XFS_EXT_NORM);
86 }
87}
88#else /* DEBUG */
89#define xfs_validate_extents(ifp, nrecs, fmt)
90#endif /* DEBUG */
91
92/*
93 * Check that none of the inode's in the buffer have a next
94 * unlinked field of 0.
95 */
96#if defined(DEBUG)
97void
98xfs_inobp_check(
99 xfs_mount_t *mp,
100 xfs_buf_t *bp)
101{
102 int i;
103 int j;
104 xfs_dinode_t *dip;
105
106 j = mp->m_inode_cluster_size >> mp->m_sb.sb_inodelog;
107
108 for (i = 0; i < j; i++) {
109 dip = (xfs_dinode_t *)xfs_buf_offset(bp,
110 i * mp->m_sb.sb_inodesize);
111 if (!dip->di_next_unlinked) {
112 xfs_alert(mp,
113 "Detected bogus zero next_unlinked field in incore inode buffer 0x%p.",
114 bp);
115 ASSERT(dip->di_next_unlinked);
116 }
117 }
118}
119#endif
120
121/*
122 * Find the buffer associated with the given inode map
123 * We do basic validation checks on the buffer once it has been
124 * retrieved from disk.
125 */
126STATIC int
127xfs_imap_to_bp(
128 xfs_mount_t *mp,
129 xfs_trans_t *tp,
130 struct xfs_imap *imap,
131 xfs_buf_t **bpp,
132 uint buf_flags,
133 uint iget_flags)
134{
135 int error;
136 int i;
137 int ni;
138 xfs_buf_t *bp;
139
140 error = xfs_trans_read_buf(mp, tp, mp->m_ddev_targp, imap->im_blkno,
141 (int)imap->im_len, buf_flags, &bp);
142 if (error) {
143 if (error != EAGAIN) {
144 xfs_warn(mp,
145 "%s: xfs_trans_read_buf() returned error %d.",
146 __func__, error);
147 } else {
148 ASSERT(buf_flags & XBF_TRYLOCK);
149 }
150 return error;
151 }
152
153 /*
154 * Validate the magic number and version of every inode in the buffer
155 * (if DEBUG kernel) or the first inode in the buffer, otherwise.
156 */
157#ifdef DEBUG
158 ni = BBTOB(imap->im_len) >> mp->m_sb.sb_inodelog;
159#else /* usual case */
160 ni = 1;
161#endif
162
163 for (i = 0; i < ni; i++) {
164 int di_ok;
165 xfs_dinode_t *dip;
166
167 dip = (xfs_dinode_t *)xfs_buf_offset(bp,
168 (i << mp->m_sb.sb_inodelog));
169 di_ok = dip->di_magic == cpu_to_be16(XFS_DINODE_MAGIC) &&
170 XFS_DINODE_GOOD_VERSION(dip->di_version);
171 if (unlikely(XFS_TEST_ERROR(!di_ok, mp,
172 XFS_ERRTAG_ITOBP_INOTOBP,
173 XFS_RANDOM_ITOBP_INOTOBP))) {
174 if (iget_flags & XFS_IGET_UNTRUSTED) {
175 xfs_trans_brelse(tp, bp);
176 return XFS_ERROR(EINVAL);
177 }
178 XFS_CORRUPTION_ERROR("xfs_imap_to_bp",
179 XFS_ERRLEVEL_HIGH, mp, dip);
180#ifdef DEBUG
181 xfs_emerg(mp,
182 "bad inode magic/vsn daddr %lld #%d (magic=%x)",
183 (unsigned long long)imap->im_blkno, i,
184 be16_to_cpu(dip->di_magic));
185 ASSERT(0);
186#endif
187 xfs_trans_brelse(tp, bp);
188 return XFS_ERROR(EFSCORRUPTED);
189 }
190 }
191
192 xfs_inobp_check(mp, bp);
193
194 /*
195 * Mark the buffer as an inode buffer now that it looks good
196 */
197 XFS_BUF_SET_VTYPE(bp, B_FS_INO);
198
199 *bpp = bp;
200 return 0;
201}
202
203/*
204 * This routine is called to map an inode number within a file
205 * system to the buffer containing the on-disk version of the
206 * inode. It returns a pointer to the buffer containing the
207 * on-disk inode in the bpp parameter, and in the dip parameter
208 * it returns a pointer to the on-disk inode within that buffer.
209 *
210 * If a non-zero error is returned, then the contents of bpp and
211 * dipp are undefined.
212 *
213 * Use xfs_imap() to determine the size and location of the
214 * buffer to read from disk.
215 */
216int
217xfs_inotobp(
218 xfs_mount_t *mp,
219 xfs_trans_t *tp,
220 xfs_ino_t ino,
221 xfs_dinode_t **dipp,
222 xfs_buf_t **bpp,
223 int *offset,
224 uint imap_flags)
225{
226 struct xfs_imap imap;
227 xfs_buf_t *bp;
228 int error;
229
230 imap.im_blkno = 0;
231 error = xfs_imap(mp, tp, ino, &imap, imap_flags);
232 if (error)
233 return error;
234
235 error = xfs_imap_to_bp(mp, tp, &imap, &bp, XBF_LOCK, imap_flags);
236 if (error)
237 return error;
238
239 *dipp = (xfs_dinode_t *)xfs_buf_offset(bp, imap.im_boffset);
240 *bpp = bp;
241 *offset = imap.im_boffset;
242 return 0;
243}
244
245
246/*
247 * This routine is called to map an inode to the buffer containing
248 * the on-disk version of the inode. It returns a pointer to the
249 * buffer containing the on-disk inode in the bpp parameter, and in
250 * the dip parameter it returns a pointer to the on-disk inode within
251 * that buffer.
252 *
253 * If a non-zero error is returned, then the contents of bpp and
254 * dipp are undefined.
255 *
256 * The inode is expected to already been mapped to its buffer and read
257 * in once, thus we can use the mapping information stored in the inode
258 * rather than calling xfs_imap(). This allows us to avoid the overhead
259 * of looking at the inode btree for small block file systems
260 * (see xfs_imap()).
261 */
262int
263xfs_itobp(
264 xfs_mount_t *mp,
265 xfs_trans_t *tp,
266 xfs_inode_t *ip,
267 xfs_dinode_t **dipp,
268 xfs_buf_t **bpp,
269 uint buf_flags)
270{
271 xfs_buf_t *bp;
272 int error;
273
274 ASSERT(ip->i_imap.im_blkno != 0);
275
276 error = xfs_imap_to_bp(mp, tp, &ip->i_imap, &bp, buf_flags, 0);
277 if (error)
278 return error;
279
280 if (!bp) {
281 ASSERT(buf_flags & XBF_TRYLOCK);
282 ASSERT(tp == NULL);
283 *bpp = NULL;
284 return EAGAIN;
285 }
286
287 *dipp = (xfs_dinode_t *)xfs_buf_offset(bp, ip->i_imap.im_boffset);
288 *bpp = bp;
289 return 0;
290}
291
292/*
293 * Move inode type and inode format specific information from the
294 * on-disk inode to the in-core inode. For fifos, devs, and sockets
295 * this means set if_rdev to the proper value. For files, directories,
296 * and symlinks this means to bring in the in-line data or extent
297 * pointers. For a file in B-tree format, only the root is immediately
298 * brought in-core. The rest will be in-lined in if_extents when it
299 * is first referenced (see xfs_iread_extents()).
300 */
301STATIC int
302xfs_iformat(
303 xfs_inode_t *ip,
304 xfs_dinode_t *dip)
305{
306 xfs_attr_shortform_t *atp;
307 int size;
308 int error;
309 xfs_fsize_t di_size;
310 ip->i_df.if_ext_max =
311 XFS_IFORK_DSIZE(ip) / (uint)sizeof(xfs_bmbt_rec_t);
312 error = 0;
313
314 if (unlikely(be32_to_cpu(dip->di_nextents) +
315 be16_to_cpu(dip->di_anextents) >
316 be64_to_cpu(dip->di_nblocks))) {
317 xfs_warn(ip->i_mount,
318 "corrupt dinode %Lu, extent total = %d, nblocks = %Lu.",
319 (unsigned long long)ip->i_ino,
320 (int)(be32_to_cpu(dip->di_nextents) +
321 be16_to_cpu(dip->di_anextents)),
322 (unsigned long long)
323 be64_to_cpu(dip->di_nblocks));
324 XFS_CORRUPTION_ERROR("xfs_iformat(1)", XFS_ERRLEVEL_LOW,
325 ip->i_mount, dip);
326 return XFS_ERROR(EFSCORRUPTED);
327 }
328
329 if (unlikely(dip->di_forkoff > ip->i_mount->m_sb.sb_inodesize)) {
330 xfs_warn(ip->i_mount, "corrupt dinode %Lu, forkoff = 0x%x.",
331 (unsigned long long)ip->i_ino,
332 dip->di_forkoff);
333 XFS_CORRUPTION_ERROR("xfs_iformat(2)", XFS_ERRLEVEL_LOW,
334 ip->i_mount, dip);
335 return XFS_ERROR(EFSCORRUPTED);
336 }
337
338 if (unlikely((ip->i_d.di_flags & XFS_DIFLAG_REALTIME) &&
339 !ip->i_mount->m_rtdev_targp)) {
340 xfs_warn(ip->i_mount,
341 "corrupt dinode %Lu, has realtime flag set.",
342 ip->i_ino);
343 XFS_CORRUPTION_ERROR("xfs_iformat(realtime)",
344 XFS_ERRLEVEL_LOW, ip->i_mount, dip);
345 return XFS_ERROR(EFSCORRUPTED);
346 }
347
348 switch (ip->i_d.di_mode & S_IFMT) {
349 case S_IFIFO:
350 case S_IFCHR:
351 case S_IFBLK:
352 case S_IFSOCK:
353 if (unlikely(dip->di_format != XFS_DINODE_FMT_DEV)) {
354 XFS_CORRUPTION_ERROR("xfs_iformat(3)", XFS_ERRLEVEL_LOW,
355 ip->i_mount, dip);
356 return XFS_ERROR(EFSCORRUPTED);
357 }
358 ip->i_d.di_size = 0;
359 ip->i_size = 0;
360 ip->i_df.if_u2.if_rdev = xfs_dinode_get_rdev(dip);
361 break;
362
363 case S_IFREG:
364 case S_IFLNK:
365 case S_IFDIR:
366 switch (dip->di_format) {
367 case XFS_DINODE_FMT_LOCAL:
368 /*
369 * no local regular files yet
370 */
371 if (unlikely(S_ISREG(be16_to_cpu(dip->di_mode)))) {
372 xfs_warn(ip->i_mount,
373 "corrupt inode %Lu (local format for regular file).",
374 (unsigned long long) ip->i_ino);
375 XFS_CORRUPTION_ERROR("xfs_iformat(4)",
376 XFS_ERRLEVEL_LOW,
377 ip->i_mount, dip);
378 return XFS_ERROR(EFSCORRUPTED);
379 }
380
381 di_size = be64_to_cpu(dip->di_size);
382 if (unlikely(di_size > XFS_DFORK_DSIZE(dip, ip->i_mount))) {
383 xfs_warn(ip->i_mount,
384 "corrupt inode %Lu (bad size %Ld for local inode).",
385 (unsigned long long) ip->i_ino,
386 (long long) di_size);
387 XFS_CORRUPTION_ERROR("xfs_iformat(5)",
388 XFS_ERRLEVEL_LOW,
389 ip->i_mount, dip);
390 return XFS_ERROR(EFSCORRUPTED);
391 }
392
393 size = (int)di_size;
394 error = xfs_iformat_local(ip, dip, XFS_DATA_FORK, size);
395 break;
396 case XFS_DINODE_FMT_EXTENTS:
397 error = xfs_iformat_extents(ip, dip, XFS_DATA_FORK);
398 break;
399 case XFS_DINODE_FMT_BTREE:
400 error = xfs_iformat_btree(ip, dip, XFS_DATA_FORK);
401 break;
402 default:
403 XFS_ERROR_REPORT("xfs_iformat(6)", XFS_ERRLEVEL_LOW,
404 ip->i_mount);
405 return XFS_ERROR(EFSCORRUPTED);
406 }
407 break;
408
409 default:
410 XFS_ERROR_REPORT("xfs_iformat(7)", XFS_ERRLEVEL_LOW, ip->i_mount);
411 return XFS_ERROR(EFSCORRUPTED);
412 }
413 if (error) {
414 return error;
415 }
416 if (!XFS_DFORK_Q(dip))
417 return 0;
418 ASSERT(ip->i_afp == NULL);
419 ip->i_afp = kmem_zone_zalloc(xfs_ifork_zone, KM_SLEEP | KM_NOFS);
420 ip->i_afp->if_ext_max =
421 XFS_IFORK_ASIZE(ip) / (uint)sizeof(xfs_bmbt_rec_t);
422 switch (dip->di_aformat) {
423 case XFS_DINODE_FMT_LOCAL:
424 atp = (xfs_attr_shortform_t *)XFS_DFORK_APTR(dip);
425 size = be16_to_cpu(atp->hdr.totsize);
426
427 if (unlikely(size < sizeof(struct xfs_attr_sf_hdr))) {
428 xfs_warn(ip->i_mount,
429 "corrupt inode %Lu (bad attr fork size %Ld).",
430 (unsigned long long) ip->i_ino,
431 (long long) size);
432 XFS_CORRUPTION_ERROR("xfs_iformat(8)",
433 XFS_ERRLEVEL_LOW,
434 ip->i_mount, dip);
435 return XFS_ERROR(EFSCORRUPTED);
436 }
437
438 error = xfs_iformat_local(ip, dip, XFS_ATTR_FORK, size);
439 break;
440 case XFS_DINODE_FMT_EXTENTS:
441 error = xfs_iformat_extents(ip, dip, XFS_ATTR_FORK);
442 break;
443 case XFS_DINODE_FMT_BTREE:
444 error = xfs_iformat_btree(ip, dip, XFS_ATTR_FORK);
445 break;
446 default:
447 error = XFS_ERROR(EFSCORRUPTED);
448 break;
449 }
450 if (error) {
451 kmem_zone_free(xfs_ifork_zone, ip->i_afp);
452 ip->i_afp = NULL;
453 xfs_idestroy_fork(ip, XFS_DATA_FORK);
454 }
455 return error;
456}
457
458/*
459 * The file is in-lined in the on-disk inode.
460 * If it fits into if_inline_data, then copy
461 * it there, otherwise allocate a buffer for it
462 * and copy the data there. Either way, set
463 * if_data to point at the data.
464 * If we allocate a buffer for the data, make
465 * sure that its size is a multiple of 4 and
466 * record the real size in i_real_bytes.
467 */
468STATIC int
469xfs_iformat_local(
470 xfs_inode_t *ip,
471 xfs_dinode_t *dip,
472 int whichfork,
473 int size)
474{
475 xfs_ifork_t *ifp;
476 int real_size;
477
478 /*
479 * If the size is unreasonable, then something
480 * is wrong and we just bail out rather than crash in
481 * kmem_alloc() or memcpy() below.
482 */
483 if (unlikely(size > XFS_DFORK_SIZE(dip, ip->i_mount, whichfork))) {
484 xfs_warn(ip->i_mount,
485 "corrupt inode %Lu (bad size %d for local fork, size = %d).",
486 (unsigned long long) ip->i_ino, size,
487 XFS_DFORK_SIZE(dip, ip->i_mount, whichfork));
488 XFS_CORRUPTION_ERROR("xfs_iformat_local", XFS_ERRLEVEL_LOW,
489 ip->i_mount, dip);
490 return XFS_ERROR(EFSCORRUPTED);
491 }
492 ifp = XFS_IFORK_PTR(ip, whichfork);
493 real_size = 0;
494 if (size == 0)
495 ifp->if_u1.if_data = NULL;
496 else if (size <= sizeof(ifp->if_u2.if_inline_data))
497 ifp->if_u1.if_data = ifp->if_u2.if_inline_data;
498 else {
499 real_size = roundup(size, 4);
500 ifp->if_u1.if_data = kmem_alloc(real_size, KM_SLEEP | KM_NOFS);
501 }
502 ifp->if_bytes = size;
503 ifp->if_real_bytes = real_size;
504 if (size)
505 memcpy(ifp->if_u1.if_data, XFS_DFORK_PTR(dip, whichfork), size);
506 ifp->if_flags &= ~XFS_IFEXTENTS;
507 ifp->if_flags |= XFS_IFINLINE;
508 return 0;
509}
510
511/*
512 * The file consists of a set of extents all
513 * of which fit into the on-disk inode.
514 * If there are few enough extents to fit into
515 * the if_inline_ext, then copy them there.
516 * Otherwise allocate a buffer for them and copy
517 * them into it. Either way, set if_extents
518 * to point at the extents.
519 */
520STATIC int
521xfs_iformat_extents(
522 xfs_inode_t *ip,
523 xfs_dinode_t *dip,
524 int whichfork)
525{
526 xfs_bmbt_rec_t *dp;
527 xfs_ifork_t *ifp;
528 int nex;
529 int size;
530 int i;
531
532 ifp = XFS_IFORK_PTR(ip, whichfork);
533 nex = XFS_DFORK_NEXTENTS(dip, whichfork);
534 size = nex * (uint)sizeof(xfs_bmbt_rec_t);
535
536 /*
537 * If the number of extents is unreasonable, then something
538 * is wrong and we just bail out rather than crash in
539 * kmem_alloc() or memcpy() below.
540 */
541 if (unlikely(size < 0 || size > XFS_DFORK_SIZE(dip, ip->i_mount, whichfork))) {
542 xfs_warn(ip->i_mount, "corrupt inode %Lu ((a)extents = %d).",
543 (unsigned long long) ip->i_ino, nex);
544 XFS_CORRUPTION_ERROR("xfs_iformat_extents(1)", XFS_ERRLEVEL_LOW,
545 ip->i_mount, dip);
546 return XFS_ERROR(EFSCORRUPTED);
547 }
548
549 ifp->if_real_bytes = 0;
550 if (nex == 0)
551 ifp->if_u1.if_extents = NULL;
552 else if (nex <= XFS_INLINE_EXTS)
553 ifp->if_u1.if_extents = ifp->if_u2.if_inline_ext;
554 else
555 xfs_iext_add(ifp, 0, nex);
556
557 ifp->if_bytes = size;
558 if (size) {
559 dp = (xfs_bmbt_rec_t *) XFS_DFORK_PTR(dip, whichfork);
560 xfs_validate_extents(ifp, nex, XFS_EXTFMT_INODE(ip));
561 for (i = 0; i < nex; i++, dp++) {
562 xfs_bmbt_rec_host_t *ep = xfs_iext_get_ext(ifp, i);
563 ep->l0 = get_unaligned_be64(&dp->l0);
564 ep->l1 = get_unaligned_be64(&dp->l1);
565 }
566 XFS_BMAP_TRACE_EXLIST(ip, nex, whichfork);
567 if (whichfork != XFS_DATA_FORK ||
568 XFS_EXTFMT_INODE(ip) == XFS_EXTFMT_NOSTATE)
569 if (unlikely(xfs_check_nostate_extents(
570 ifp, 0, nex))) {
571 XFS_ERROR_REPORT("xfs_iformat_extents(2)",
572 XFS_ERRLEVEL_LOW,
573 ip->i_mount);
574 return XFS_ERROR(EFSCORRUPTED);
575 }
576 }
577 ifp->if_flags |= XFS_IFEXTENTS;
578 return 0;
579}
580
581/*
582 * The file has too many extents to fit into
583 * the inode, so they are in B-tree format.
584 * Allocate a buffer for the root of the B-tree
585 * and copy the root into it. The i_extents
586 * field will remain NULL until all of the
587 * extents are read in (when they are needed).
588 */
589STATIC int
590xfs_iformat_btree(
591 xfs_inode_t *ip,
592 xfs_dinode_t *dip,
593 int whichfork)
594{
595 xfs_bmdr_block_t *dfp;
596 xfs_ifork_t *ifp;
597 /* REFERENCED */
598 int nrecs;
599 int size;
600
601 ifp = XFS_IFORK_PTR(ip, whichfork);
602 dfp = (xfs_bmdr_block_t *)XFS_DFORK_PTR(dip, whichfork);
603 size = XFS_BMAP_BROOT_SPACE(dfp);
604 nrecs = be16_to_cpu(dfp->bb_numrecs);
605
606 /*
607 * blow out if -- fork has less extents than can fit in
608 * fork (fork shouldn't be a btree format), root btree
609 * block has more records than can fit into the fork,
610 * or the number of extents is greater than the number of
611 * blocks.
612 */
613 if (unlikely(XFS_IFORK_NEXTENTS(ip, whichfork) <= ifp->if_ext_max
614 || XFS_BMDR_SPACE_CALC(nrecs) >
615 XFS_DFORK_SIZE(dip, ip->i_mount, whichfork)
616 || XFS_IFORK_NEXTENTS(ip, whichfork) > ip->i_d.di_nblocks)) {
617 xfs_warn(ip->i_mount, "corrupt inode %Lu (btree).",
618 (unsigned long long) ip->i_ino);
619 XFS_CORRUPTION_ERROR("xfs_iformat_btree", XFS_ERRLEVEL_LOW,
620 ip->i_mount, dip);
621 return XFS_ERROR(EFSCORRUPTED);
622 }
623
624 ifp->if_broot_bytes = size;
625 ifp->if_broot = kmem_alloc(size, KM_SLEEP | KM_NOFS);
626 ASSERT(ifp->if_broot != NULL);
627 /*
628 * Copy and convert from the on-disk structure
629 * to the in-memory structure.
630 */
631 xfs_bmdr_to_bmbt(ip->i_mount, dfp,
632 XFS_DFORK_SIZE(dip, ip->i_mount, whichfork),
633 ifp->if_broot, size);
634 ifp->if_flags &= ~XFS_IFEXTENTS;
635 ifp->if_flags |= XFS_IFBROOT;
636
637 return 0;
638}
639
640STATIC void
641xfs_dinode_from_disk(
642 xfs_icdinode_t *to,
643 xfs_dinode_t *from)
644{
645 to->di_magic = be16_to_cpu(from->di_magic);
646 to->di_mode = be16_to_cpu(from->di_mode);
647 to->di_version = from ->di_version;
648 to->di_format = from->di_format;
649 to->di_onlink = be16_to_cpu(from->di_onlink);
650 to->di_uid = be32_to_cpu(from->di_uid);
651 to->di_gid = be32_to_cpu(from->di_gid);
652 to->di_nlink = be32_to_cpu(from->di_nlink);
653 to->di_projid_lo = be16_to_cpu(from->di_projid_lo);
654 to->di_projid_hi = be16_to_cpu(from->di_projid_hi);
655 memcpy(to->di_pad, from->di_pad, sizeof(to->di_pad));
656 to->di_flushiter = be16_to_cpu(from->di_flushiter);
657 to->di_atime.t_sec = be32_to_cpu(from->di_atime.t_sec);
658 to->di_atime.t_nsec = be32_to_cpu(from->di_atime.t_nsec);
659 to->di_mtime.t_sec = be32_to_cpu(from->di_mtime.t_sec);
660 to->di_mtime.t_nsec = be32_to_cpu(from->di_mtime.t_nsec);
661 to->di_ctime.t_sec = be32_to_cpu(from->di_ctime.t_sec);
662 to->di_ctime.t_nsec = be32_to_cpu(from->di_ctime.t_nsec);
663 to->di_size = be64_to_cpu(from->di_size);
664 to->di_nblocks = be64_to_cpu(from->di_nblocks);
665 to->di_extsize = be32_to_cpu(from->di_extsize);
666 to->di_nextents = be32_to_cpu(from->di_nextents);
667 to->di_anextents = be16_to_cpu(from->di_anextents);
668 to->di_forkoff = from->di_forkoff;
669 to->di_aformat = from->di_aformat;
670 to->di_dmevmask = be32_to_cpu(from->di_dmevmask);
671 to->di_dmstate = be16_to_cpu(from->di_dmstate);
672 to->di_flags = be16_to_cpu(from->di_flags);
673 to->di_gen = be32_to_cpu(from->di_gen);
674}
675
676void
677xfs_dinode_to_disk(
678 xfs_dinode_t *to,
679 xfs_icdinode_t *from)
680{
681 to->di_magic = cpu_to_be16(from->di_magic);
682 to->di_mode = cpu_to_be16(from->di_mode);
683 to->di_version = from ->di_version;
684 to->di_format = from->di_format;
685 to->di_onlink = cpu_to_be16(from->di_onlink);
686 to->di_uid = cpu_to_be32(from->di_uid);
687 to->di_gid = cpu_to_be32(from->di_gid);
688 to->di_nlink = cpu_to_be32(from->di_nlink);
689 to->di_projid_lo = cpu_to_be16(from->di_projid_lo);
690 to->di_projid_hi = cpu_to_be16(from->di_projid_hi);
691 memcpy(to->di_pad, from->di_pad, sizeof(to->di_pad));
692 to->di_flushiter = cpu_to_be16(from->di_flushiter);
693 to->di_atime.t_sec = cpu_to_be32(from->di_atime.t_sec);
694 to->di_atime.t_nsec = cpu_to_be32(from->di_atime.t_nsec);
695 to->di_mtime.t_sec = cpu_to_be32(from->di_mtime.t_sec);
696 to->di_mtime.t_nsec = cpu_to_be32(from->di_mtime.t_nsec);
697 to->di_ctime.t_sec = cpu_to_be32(from->di_ctime.t_sec);
698 to->di_ctime.t_nsec = cpu_to_be32(from->di_ctime.t_nsec);
699 to->di_size = cpu_to_be64(from->di_size);
700 to->di_nblocks = cpu_to_be64(from->di_nblocks);
701 to->di_extsize = cpu_to_be32(from->di_extsize);
702 to->di_nextents = cpu_to_be32(from->di_nextents);
703 to->di_anextents = cpu_to_be16(from->di_anextents);
704 to->di_forkoff = from->di_forkoff;
705 to->di_aformat = from->di_aformat;
706 to->di_dmevmask = cpu_to_be32(from->di_dmevmask);
707 to->di_dmstate = cpu_to_be16(from->di_dmstate);
708 to->di_flags = cpu_to_be16(from->di_flags);
709 to->di_gen = cpu_to_be32(from->di_gen);
710}
711
712STATIC uint
713_xfs_dic2xflags(
714 __uint16_t di_flags)
715{
716 uint flags = 0;
717
718 if (di_flags & XFS_DIFLAG_ANY) {
719 if (di_flags & XFS_DIFLAG_REALTIME)
720 flags |= XFS_XFLAG_REALTIME;
721 if (di_flags & XFS_DIFLAG_PREALLOC)
722 flags |= XFS_XFLAG_PREALLOC;
723 if (di_flags & XFS_DIFLAG_IMMUTABLE)
724 flags |= XFS_XFLAG_IMMUTABLE;
725 if (di_flags & XFS_DIFLAG_APPEND)
726 flags |= XFS_XFLAG_APPEND;
727 if (di_flags & XFS_DIFLAG_SYNC)
728 flags |= XFS_XFLAG_SYNC;
729 if (di_flags & XFS_DIFLAG_NOATIME)
730 flags |= XFS_XFLAG_NOATIME;
731 if (di_flags & XFS_DIFLAG_NODUMP)
732 flags |= XFS_XFLAG_NODUMP;
733 if (di_flags & XFS_DIFLAG_RTINHERIT)
734 flags |= XFS_XFLAG_RTINHERIT;
735 if (di_flags & XFS_DIFLAG_PROJINHERIT)
736 flags |= XFS_XFLAG_PROJINHERIT;
737 if (di_flags & XFS_DIFLAG_NOSYMLINKS)
738 flags |= XFS_XFLAG_NOSYMLINKS;
739 if (di_flags & XFS_DIFLAG_EXTSIZE)
740 flags |= XFS_XFLAG_EXTSIZE;
741 if (di_flags & XFS_DIFLAG_EXTSZINHERIT)
742 flags |= XFS_XFLAG_EXTSZINHERIT;
743 if (di_flags & XFS_DIFLAG_NODEFRAG)
744 flags |= XFS_XFLAG_NODEFRAG;
745 if (di_flags & XFS_DIFLAG_FILESTREAM)
746 flags |= XFS_XFLAG_FILESTREAM;
747 }
748
749 return flags;
750}
751
752uint
753xfs_ip2xflags(
754 xfs_inode_t *ip)
755{
756 xfs_icdinode_t *dic = &ip->i_d;
757
758 return _xfs_dic2xflags(dic->di_flags) |
759 (XFS_IFORK_Q(ip) ? XFS_XFLAG_HASATTR : 0);
760}
761
762uint
763xfs_dic2xflags(
764 xfs_dinode_t *dip)
765{
766 return _xfs_dic2xflags(be16_to_cpu(dip->di_flags)) |
767 (XFS_DFORK_Q(dip) ? XFS_XFLAG_HASATTR : 0);
768}
769
770/*
771 * Read the disk inode attributes into the in-core inode structure.
772 */
773int
774xfs_iread(
775 xfs_mount_t *mp,
776 xfs_trans_t *tp,
777 xfs_inode_t *ip,
778 uint iget_flags)
779{
780 xfs_buf_t *bp;
781 xfs_dinode_t *dip;
782 int error;
783
784 /*
785 * Fill in the location information in the in-core inode.
786 */
787 error = xfs_imap(mp, tp, ip->i_ino, &ip->i_imap, iget_flags);
788 if (error)
789 return error;
790
791 /*
792 * Get pointers to the on-disk inode and the buffer containing it.
793 */
794 error = xfs_imap_to_bp(mp, tp, &ip->i_imap, &bp,
795 XBF_LOCK, iget_flags);
796 if (error)
797 return error;
798 dip = (xfs_dinode_t *)xfs_buf_offset(bp, ip->i_imap.im_boffset);
799
800 /*
801 * If we got something that isn't an inode it means someone
802 * (nfs or dmi) has a stale handle.
803 */
804 if (dip->di_magic != cpu_to_be16(XFS_DINODE_MAGIC)) {
805#ifdef DEBUG
806 xfs_alert(mp,
807 "%s: dip->di_magic (0x%x) != XFS_DINODE_MAGIC (0x%x)",
808 __func__, be16_to_cpu(dip->di_magic), XFS_DINODE_MAGIC);
809#endif /* DEBUG */
810 error = XFS_ERROR(EINVAL);
811 goto out_brelse;
812 }
813
814 /*
815 * If the on-disk inode is already linked to a directory
816 * entry, copy all of the inode into the in-core inode.
817 * xfs_iformat() handles copying in the inode format
818 * specific information.
819 * Otherwise, just get the truly permanent information.
820 */
821 if (dip->di_mode) {
822 xfs_dinode_from_disk(&ip->i_d, dip);
823 error = xfs_iformat(ip, dip);
824 if (error) {
825#ifdef DEBUG
826 xfs_alert(mp, "%s: xfs_iformat() returned error %d",
827 __func__, error);
828#endif /* DEBUG */
829 goto out_brelse;
830 }
831 } else {
832 ip->i_d.di_magic = be16_to_cpu(dip->di_magic);
833 ip->i_d.di_version = dip->di_version;
834 ip->i_d.di_gen = be32_to_cpu(dip->di_gen);
835 ip->i_d.di_flushiter = be16_to_cpu(dip->di_flushiter);
836 /*
837 * Make sure to pull in the mode here as well in
838 * case the inode is released without being used.
839 * This ensures that xfs_inactive() will see that
840 * the inode is already free and not try to mess
841 * with the uninitialized part of it.
842 */
843 ip->i_d.di_mode = 0;
844 /*
845 * Initialize the per-fork minima and maxima for a new
846 * inode here. xfs_iformat will do it for old inodes.
847 */
848 ip->i_df.if_ext_max =
849 XFS_IFORK_DSIZE(ip) / (uint)sizeof(xfs_bmbt_rec_t);
850 }
851
852 /*
853 * The inode format changed when we moved the link count and
854 * made it 32 bits long. If this is an old format inode,
855 * convert it in memory to look like a new one. If it gets
856 * flushed to disk we will convert back before flushing or
857 * logging it. We zero out the new projid field and the old link
858 * count field. We'll handle clearing the pad field (the remains
859 * of the old uuid field) when we actually convert the inode to
860 * the new format. We don't change the version number so that we
861 * can distinguish this from a real new format inode.
862 */
863 if (ip->i_d.di_version == 1) {
864 ip->i_d.di_nlink = ip->i_d.di_onlink;
865 ip->i_d.di_onlink = 0;
866 xfs_set_projid(ip, 0);
867 }
868
869 ip->i_delayed_blks = 0;
870 ip->i_size = ip->i_d.di_size;
871
872 /*
873 * Mark the buffer containing the inode as something to keep
874 * around for a while. This helps to keep recently accessed
875 * meta-data in-core longer.
876 */
877 xfs_buf_set_ref(bp, XFS_INO_REF);
878
879 /*
880 * Use xfs_trans_brelse() to release the buffer containing the
881 * on-disk inode, because it was acquired with xfs_trans_read_buf()
882 * in xfs_itobp() above. If tp is NULL, this is just a normal
883 * brelse(). If we're within a transaction, then xfs_trans_brelse()
884 * will only release the buffer if it is not dirty within the
885 * transaction. It will be OK to release the buffer in this case,
886 * because inodes on disk are never destroyed and we will be
887 * locking the new in-core inode before putting it in the hash
888 * table where other processes can find it. Thus we don't have
889 * to worry about the inode being changed just because we released
890 * the buffer.
891 */
892 out_brelse:
893 xfs_trans_brelse(tp, bp);
894 return error;
895}
896
897/*
898 * Read in extents from a btree-format inode.
899 * Allocate and fill in if_extents. Real work is done in xfs_bmap.c.
900 */
901int
902xfs_iread_extents(
903 xfs_trans_t *tp,
904 xfs_inode_t *ip,
905 int whichfork)
906{
907 int error;
908 xfs_ifork_t *ifp;
909 xfs_extnum_t nextents;
910
911 if (unlikely(XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_BTREE)) {
912 XFS_ERROR_REPORT("xfs_iread_extents", XFS_ERRLEVEL_LOW,
913 ip->i_mount);
914 return XFS_ERROR(EFSCORRUPTED);
915 }
916 nextents = XFS_IFORK_NEXTENTS(ip, whichfork);
917 ifp = XFS_IFORK_PTR(ip, whichfork);
918
919 /*
920 * We know that the size is valid (it's checked in iformat_btree)
921 */
922 ifp->if_bytes = ifp->if_real_bytes = 0;
923 ifp->if_flags |= XFS_IFEXTENTS;
924 xfs_iext_add(ifp, 0, nextents);
925 error = xfs_bmap_read_extents(tp, ip, whichfork);
926 if (error) {
927 xfs_iext_destroy(ifp);
928 ifp->if_flags &= ~XFS_IFEXTENTS;
929 return error;
930 }
931 xfs_validate_extents(ifp, nextents, XFS_EXTFMT_INODE(ip));
932 return 0;
933}
934
935/*
936 * Allocate an inode on disk and return a copy of its in-core version.
937 * The in-core inode is locked exclusively. Set mode, nlink, and rdev
938 * appropriately within the inode. The uid and gid for the inode are
939 * set according to the contents of the given cred structure.
940 *
941 * Use xfs_dialloc() to allocate the on-disk inode. If xfs_dialloc()
942 * has a free inode available, call xfs_iget()
943 * to obtain the in-core version of the allocated inode. Finally,
944 * fill in the inode and log its initial contents. In this case,
945 * ialloc_context would be set to NULL and call_again set to false.
946 *
947 * If xfs_dialloc() does not have an available inode,
948 * it will replenish its supply by doing an allocation. Since we can
949 * only do one allocation within a transaction without deadlocks, we
950 * must commit the current transaction before returning the inode itself.
951 * In this case, therefore, we will set call_again to true and return.
952 * The caller should then commit the current transaction, start a new
953 * transaction, and call xfs_ialloc() again to actually get the inode.
954 *
955 * To ensure that some other process does not grab the inode that
956 * was allocated during the first call to xfs_ialloc(), this routine
957 * also returns the [locked] bp pointing to the head of the freelist
958 * as ialloc_context. The caller should hold this buffer across
959 * the commit and pass it back into this routine on the second call.
960 *
961 * If we are allocating quota inodes, we do not have a parent inode
962 * to attach to or associate with (i.e. pip == NULL) because they
963 * are not linked into the directory structure - they are attached
964 * directly to the superblock - and so have no parent.
965 */
966int
967xfs_ialloc(
968 xfs_trans_t *tp,
969 xfs_inode_t *pip,
970 mode_t mode,
971 xfs_nlink_t nlink,
972 xfs_dev_t rdev,
973 prid_t prid,
974 int okalloc,
975 xfs_buf_t **ialloc_context,
976 boolean_t *call_again,
977 xfs_inode_t **ipp)
978{
979 xfs_ino_t ino;
980 xfs_inode_t *ip;
981 uint flags;
982 int error;
983 timespec_t tv;
984 int filestreams = 0;
985
986 /*
987 * Call the space management code to pick
988 * the on-disk inode to be allocated.
989 */
990 error = xfs_dialloc(tp, pip ? pip->i_ino : 0, mode, okalloc,
991 ialloc_context, call_again, &ino);
992 if (error)
993 return error;
994 if (*call_again || ino == NULLFSINO) {
995 *ipp = NULL;
996 return 0;
997 }
998 ASSERT(*ialloc_context == NULL);
999
1000 /*
1001 * Get the in-core inode with the lock held exclusively.
1002 * This is because we're setting fields here we need
1003 * to prevent others from looking at until we're done.
1004 */
1005 error = xfs_iget(tp->t_mountp, tp, ino, XFS_IGET_CREATE,
1006 XFS_ILOCK_EXCL, &ip);
1007 if (error)
1008 return error;
1009 ASSERT(ip != NULL);
1010
1011 ip->i_d.di_mode = (__uint16_t)mode;
1012 ip->i_d.di_onlink = 0;
1013 ip->i_d.di_nlink = nlink;
1014 ASSERT(ip->i_d.di_nlink == nlink);
1015 ip->i_d.di_uid = current_fsuid();
1016 ip->i_d.di_gid = current_fsgid();
1017 xfs_set_projid(ip, prid);
1018 memset(&(ip->i_d.di_pad[0]), 0, sizeof(ip->i_d.di_pad));
1019
1020 /*
1021 * If the superblock version is up to where we support new format
1022 * inodes and this is currently an old format inode, then change
1023 * the inode version number now. This way we only do the conversion
1024 * here rather than here and in the flush/logging code.
1025 */
1026 if (xfs_sb_version_hasnlink(&tp->t_mountp->m_sb) &&
1027 ip->i_d.di_version == 1) {
1028 ip->i_d.di_version = 2;
1029 /*
1030 * We've already zeroed the old link count, the projid field,
1031 * and the pad field.
1032 */
1033 }
1034
1035 /*
1036 * Project ids won't be stored on disk if we are using a version 1 inode.
1037 */
1038 if ((prid != 0) && (ip->i_d.di_version == 1))
1039 xfs_bump_ino_vers2(tp, ip);
1040
1041 if (pip && XFS_INHERIT_GID(pip)) {
1042 ip->i_d.di_gid = pip->i_d.di_gid;
1043 if ((pip->i_d.di_mode & S_ISGID) && S_ISDIR(mode)) {
1044 ip->i_d.di_mode |= S_ISGID;
1045 }
1046 }
1047
1048 /*
1049 * If the group ID of the new file does not match the effective group
1050 * ID or one of the supplementary group IDs, the S_ISGID bit is cleared
1051 * (and only if the irix_sgid_inherit compatibility variable is set).
1052 */
1053 if ((irix_sgid_inherit) &&
1054 (ip->i_d.di_mode & S_ISGID) &&
1055 (!in_group_p((gid_t)ip->i_d.di_gid))) {
1056 ip->i_d.di_mode &= ~S_ISGID;
1057 }
1058
1059 ip->i_d.di_size = 0;
1060 ip->i_size = 0;
1061 ip->i_d.di_nextents = 0;
1062 ASSERT(ip->i_d.di_nblocks == 0);
1063
1064 nanotime(&tv);
1065 ip->i_d.di_mtime.t_sec = (__int32_t)tv.tv_sec;
1066 ip->i_d.di_mtime.t_nsec = (__int32_t)tv.tv_nsec;
1067 ip->i_d.di_atime = ip->i_d.di_mtime;
1068 ip->i_d.di_ctime = ip->i_d.di_mtime;
1069
1070 /*
1071 * di_gen will have been taken care of in xfs_iread.
1072 */
1073 ip->i_d.di_extsize = 0;
1074 ip->i_d.di_dmevmask = 0;
1075 ip->i_d.di_dmstate = 0;
1076 ip->i_d.di_flags = 0;
1077 flags = XFS_ILOG_CORE;
1078 switch (mode & S_IFMT) {
1079 case S_IFIFO:
1080 case S_IFCHR:
1081 case S_IFBLK:
1082 case S_IFSOCK:
1083 ip->i_d.di_format = XFS_DINODE_FMT_DEV;
1084 ip->i_df.if_u2.if_rdev = rdev;
1085 ip->i_df.if_flags = 0;
1086 flags |= XFS_ILOG_DEV;
1087 break;
1088 case S_IFREG:
1089 /*
1090 * we can't set up filestreams until after the VFS inode
1091 * is set up properly.
1092 */
1093 if (pip && xfs_inode_is_filestream(pip))
1094 filestreams = 1;
1095 /* fall through */
1096 case S_IFDIR:
1097 if (pip && (pip->i_d.di_flags & XFS_DIFLAG_ANY)) {
1098 uint di_flags = 0;
1099
1100 if (S_ISDIR(mode)) {
1101 if (pip->i_d.di_flags & XFS_DIFLAG_RTINHERIT)
1102 di_flags |= XFS_DIFLAG_RTINHERIT;
1103 if (pip->i_d.di_flags & XFS_DIFLAG_EXTSZINHERIT) {
1104 di_flags |= XFS_DIFLAG_EXTSZINHERIT;
1105 ip->i_d.di_extsize = pip->i_d.di_extsize;
1106 }
1107 } else if (S_ISREG(mode)) {
1108 if (pip->i_d.di_flags & XFS_DIFLAG_RTINHERIT)
1109 di_flags |= XFS_DIFLAG_REALTIME;
1110 if (pip->i_d.di_flags & XFS_DIFLAG_EXTSZINHERIT) {
1111 di_flags |= XFS_DIFLAG_EXTSIZE;
1112 ip->i_d.di_extsize = pip->i_d.di_extsize;
1113 }
1114 }
1115 if ((pip->i_d.di_flags & XFS_DIFLAG_NOATIME) &&
1116 xfs_inherit_noatime)
1117 di_flags |= XFS_DIFLAG_NOATIME;
1118 if ((pip->i_d.di_flags & XFS_DIFLAG_NODUMP) &&
1119 xfs_inherit_nodump)
1120 di_flags |= XFS_DIFLAG_NODUMP;
1121 if ((pip->i_d.di_flags & XFS_DIFLAG_SYNC) &&
1122 xfs_inherit_sync)
1123 di_flags |= XFS_DIFLAG_SYNC;
1124 if ((pip->i_d.di_flags & XFS_DIFLAG_NOSYMLINKS) &&
1125 xfs_inherit_nosymlinks)
1126 di_flags |= XFS_DIFLAG_NOSYMLINKS;
1127 if (pip->i_d.di_flags & XFS_DIFLAG_PROJINHERIT)
1128 di_flags |= XFS_DIFLAG_PROJINHERIT;
1129 if ((pip->i_d.di_flags & XFS_DIFLAG_NODEFRAG) &&
1130 xfs_inherit_nodefrag)
1131 di_flags |= XFS_DIFLAG_NODEFRAG;
1132 if (pip->i_d.di_flags & XFS_DIFLAG_FILESTREAM)
1133 di_flags |= XFS_DIFLAG_FILESTREAM;
1134 ip->i_d.di_flags |= di_flags;
1135 }
1136 /* FALLTHROUGH */
1137 case S_IFLNK:
1138 ip->i_d.di_format = XFS_DINODE_FMT_EXTENTS;
1139 ip->i_df.if_flags = XFS_IFEXTENTS;
1140 ip->i_df.if_bytes = ip->i_df.if_real_bytes = 0;
1141 ip->i_df.if_u1.if_extents = NULL;
1142 break;
1143 default:
1144 ASSERT(0);
1145 }
1146 /*
1147 * Attribute fork settings for new inode.
1148 */
1149 ip->i_d.di_aformat = XFS_DINODE_FMT_EXTENTS;
1150 ip->i_d.di_anextents = 0;
1151
1152 /*
1153 * Log the new values stuffed into the inode.
1154 */
1155 xfs_trans_ijoin_ref(tp, ip, XFS_ILOCK_EXCL);
1156 xfs_trans_log_inode(tp, ip, flags);
1157
1158 /* now that we have an i_mode we can setup inode ops and unlock */
1159 xfs_setup_inode(ip);
1160
1161 /* now we have set up the vfs inode we can associate the filestream */
1162 if (filestreams) {
1163 error = xfs_filestream_associate(pip, ip);
1164 if (error < 0)
1165 return -error;
1166 if (!error)
1167 xfs_iflags_set(ip, XFS_IFILESTREAM);
1168 }
1169
1170 *ipp = ip;
1171 return 0;
1172}
1173
1174/*
1175 * Check to make sure that there are no blocks allocated to the
1176 * file beyond the size of the file. We don't check this for
1177 * files with fixed size extents or real time extents, but we
1178 * at least do it for regular files.
1179 */
1180#ifdef DEBUG
1181STATIC void
1182xfs_isize_check(
1183 struct xfs_inode *ip,
1184 xfs_fsize_t isize)
1185{
1186 struct xfs_mount *mp = ip->i_mount;
1187 xfs_fileoff_t map_first;
1188 int nimaps;
1189 xfs_bmbt_irec_t imaps[2];
1190
1191 if (!S_ISREG(ip->i_d.di_mode))
1192 return;
1193
1194 if (XFS_IS_REALTIME_INODE(ip))
1195 return;
1196
1197 if (ip->i_d.di_flags & XFS_DIFLAG_EXTSIZE)
1198 return;
1199
1200 nimaps = 2;
1201 map_first = XFS_B_TO_FSB(mp, (xfs_ufsize_t)isize);
1202 /*
1203 * The filesystem could be shutting down, so bmapi may return
1204 * an error.
1205 */
1206 if (xfs_bmapi(NULL, ip, map_first,
1207 (XFS_B_TO_FSB(mp,
1208 (xfs_ufsize_t)XFS_MAXIOFFSET(mp)) -
1209 map_first),
1210 XFS_BMAPI_ENTIRE, NULL, 0, imaps, &nimaps,
1211 NULL))
1212 return;
1213 ASSERT(nimaps == 1);
1214 ASSERT(imaps[0].br_startblock == HOLESTARTBLOCK);
1215}
1216#else /* DEBUG */
1217#define xfs_isize_check(ip, isize)
1218#endif /* DEBUG */
1219
1220/*
1221 * Free up the underlying blocks past new_size. The new size must be smaller
1222 * than the current size. This routine can be used both for the attribute and
1223 * data fork, and does not modify the inode size, which is left to the caller.
1224 *
1225 * The transaction passed to this routine must have made a permanent log
1226 * reservation of at least XFS_ITRUNCATE_LOG_RES. This routine may commit the
1227 * given transaction and start new ones, so make sure everything involved in
1228 * the transaction is tidy before calling here. Some transaction will be
1229 * returned to the caller to be committed. The incoming transaction must
1230 * already include the inode, and both inode locks must be held exclusively.
1231 * The inode must also be "held" within the transaction. On return the inode
1232 * will be "held" within the returned transaction. This routine does NOT
1233 * require any disk space to be reserved for it within the transaction.
1234 *
1235 * If we get an error, we must return with the inode locked and linked into the
1236 * current transaction. This keeps things simple for the higher level code,
1237 * because it always knows that the inode is locked and held in the transaction
1238 * that returns to it whether errors occur or not. We don't mark the inode
1239 * dirty on error so that transactions can be easily aborted if possible.
1240 */
1241int
1242xfs_itruncate_extents(
1243 struct xfs_trans **tpp,
1244 struct xfs_inode *ip,
1245 int whichfork,
1246 xfs_fsize_t new_size)
1247{
1248 struct xfs_mount *mp = ip->i_mount;
1249 struct xfs_trans *tp = *tpp;
1250 struct xfs_trans *ntp;
1251 xfs_bmap_free_t free_list;
1252 xfs_fsblock_t first_block;
1253 xfs_fileoff_t first_unmap_block;
1254 xfs_fileoff_t last_block;
1255 xfs_filblks_t unmap_len;
1256 int committed;
1257 int error = 0;
1258 int done = 0;
1259
1260 ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL|XFS_IOLOCK_EXCL));
1261 ASSERT(new_size <= ip->i_size);
1262 ASSERT(tp->t_flags & XFS_TRANS_PERM_LOG_RES);
1263 ASSERT(ip->i_itemp != NULL);
1264 ASSERT(ip->i_itemp->ili_lock_flags == 0);
1265 ASSERT(!XFS_NOT_DQATTACHED(mp, ip));
1266
1267 /*
1268 * Since it is possible for space to become allocated beyond
1269 * the end of the file (in a crash where the space is allocated
1270 * but the inode size is not yet updated), simply remove any
1271 * blocks which show up between the new EOF and the maximum
1272 * possible file size. If the first block to be removed is
1273 * beyond the maximum file size (ie it is the same as last_block),
1274 * then there is nothing to do.
1275 */
1276 first_unmap_block = XFS_B_TO_FSB(mp, (xfs_ufsize_t)new_size);
1277 last_block = XFS_B_TO_FSB(mp, (xfs_ufsize_t)XFS_MAXIOFFSET(mp));
1278 if (first_unmap_block == last_block)
1279 return 0;
1280
1281 ASSERT(first_unmap_block < last_block);
1282 unmap_len = last_block - first_unmap_block + 1;
1283 while (!done) {
1284 xfs_bmap_init(&free_list, &first_block);
1285 error = xfs_bunmapi(tp, ip,
1286 first_unmap_block, unmap_len,
1287 xfs_bmapi_aflag(whichfork),
1288 XFS_ITRUNC_MAX_EXTENTS,
1289 &first_block, &free_list,
1290 &done);
1291 if (error)
1292 goto out_bmap_cancel;
1293
1294 /*
1295 * Duplicate the transaction that has the permanent
1296 * reservation and commit the old transaction.
1297 */
1298 error = xfs_bmap_finish(&tp, &free_list, &committed);
1299 if (committed)
1300 xfs_trans_ijoin(tp, ip);
1301 if (error)
1302 goto out_bmap_cancel;
1303
1304 if (committed) {
1305 /*
1306 * Mark the inode dirty so it will be logged and
1307 * moved forward in the log as part of every commit.
1308 */
1309 xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
1310 }
1311
1312 ntp = xfs_trans_dup(tp);
1313 error = xfs_trans_commit(tp, 0);
1314 tp = ntp;
1315
1316 xfs_trans_ijoin(tp, ip);
1317
1318 if (error)
1319 goto out;
1320
1321 /*
1322 * Transaction commit worked ok so we can drop the extra ticket
1323 * reference that we gained in xfs_trans_dup()
1324 */
1325 xfs_log_ticket_put(tp->t_ticket);
1326 error = xfs_trans_reserve(tp, 0,
1327 XFS_ITRUNCATE_LOG_RES(mp), 0,
1328 XFS_TRANS_PERM_LOG_RES,
1329 XFS_ITRUNCATE_LOG_COUNT);
1330 if (error)
1331 goto out;
1332 }
1333
1334out:
1335 *tpp = tp;
1336 return error;
1337out_bmap_cancel:
1338 /*
1339 * If the bunmapi call encounters an error, return to the caller where
1340 * the transaction can be properly aborted. We just need to make sure
1341 * we're not holding any resources that we were not when we came in.
1342 */
1343 xfs_bmap_cancel(&free_list);
1344 goto out;
1345}
1346
1347int
1348xfs_itruncate_data(
1349 struct xfs_trans **tpp,
1350 struct xfs_inode *ip,
1351 xfs_fsize_t new_size)
1352{
1353 int error;
1354
1355 trace_xfs_itruncate_data_start(ip, new_size);
1356
1357 /*
1358 * The first thing we do is set the size to new_size permanently on
1359 * disk. This way we don't have to worry about anyone ever being able
1360 * to look at the data being freed even in the face of a crash.
1361 * What we're getting around here is the case where we free a block, it
1362 * is allocated to another file, it is written to, and then we crash.
1363 * If the new data gets written to the file but the log buffers
1364 * containing the free and reallocation don't, then we'd end up with
1365 * garbage in the blocks being freed. As long as we make the new_size
1366 * permanent before actually freeing any blocks it doesn't matter if
1367 * they get written to.
1368 */
1369 if (ip->i_d.di_nextents > 0) {
1370 /*
1371 * If we are not changing the file size then do not update
1372 * the on-disk file size - we may be called from
1373 * xfs_inactive_free_eofblocks(). If we update the on-disk
1374 * file size and then the system crashes before the contents
1375 * of the file are flushed to disk then the files may be
1376 * full of holes (ie NULL files bug).
1377 */
1378 if (ip->i_size != new_size) {
1379 ip->i_d.di_size = new_size;
1380 ip->i_size = new_size;
1381 xfs_trans_log_inode(*tpp, ip, XFS_ILOG_CORE);
1382 }
1383 }
1384
1385 error = xfs_itruncate_extents(tpp, ip, XFS_DATA_FORK, new_size);
1386 if (error)
1387 return error;
1388
1389 /*
1390 * If we are not changing the file size then do not update the on-disk
1391 * file size - we may be called from xfs_inactive_free_eofblocks().
1392 * If we update the on-disk file size and then the system crashes
1393 * before the contents of the file are flushed to disk then the files
1394 * may be full of holes (ie NULL files bug).
1395 */
1396 xfs_isize_check(ip, new_size);
1397 if (ip->i_size != new_size) {
1398 ip->i_d.di_size = new_size;
1399 ip->i_size = new_size;
1400 }
1401
1402 ASSERT(new_size != 0 || ip->i_delayed_blks == 0);
1403 ASSERT(new_size != 0 || ip->i_d.di_nextents == 0);
1404
1405 /*
1406 * Always re-log the inode so that our permanent transaction can keep
1407 * on rolling it forward in the log.
1408 */
1409 xfs_trans_log_inode(*tpp, ip, XFS_ILOG_CORE);
1410
1411 trace_xfs_itruncate_data_end(ip, new_size);
1412 return 0;
1413}
1414
1415/*
1416 * This is called when the inode's link count goes to 0.
1417 * We place the on-disk inode on a list in the AGI. It
1418 * will be pulled from this list when the inode is freed.
1419 */
1420int
1421xfs_iunlink(
1422 xfs_trans_t *tp,
1423 xfs_inode_t *ip)
1424{
1425 xfs_mount_t *mp;
1426 xfs_agi_t *agi;
1427 xfs_dinode_t *dip;
1428 xfs_buf_t *agibp;
1429 xfs_buf_t *ibp;
1430 xfs_agino_t agino;
1431 short bucket_index;
1432 int offset;
1433 int error;
1434
1435 ASSERT(ip->i_d.di_nlink == 0);
1436 ASSERT(ip->i_d.di_mode != 0);
1437
1438 mp = tp->t_mountp;
1439
1440 /*
1441 * Get the agi buffer first. It ensures lock ordering
1442 * on the list.
1443 */
1444 error = xfs_read_agi(mp, tp, XFS_INO_TO_AGNO(mp, ip->i_ino), &agibp);
1445 if (error)
1446 return error;
1447 agi = XFS_BUF_TO_AGI(agibp);
1448
1449 /*
1450 * Get the index into the agi hash table for the
1451 * list this inode will go on.
1452 */
1453 agino = XFS_INO_TO_AGINO(mp, ip->i_ino);
1454 ASSERT(agino != 0);
1455 bucket_index = agino % XFS_AGI_UNLINKED_BUCKETS;
1456 ASSERT(agi->agi_unlinked[bucket_index]);
1457 ASSERT(be32_to_cpu(agi->agi_unlinked[bucket_index]) != agino);
1458
1459 if (agi->agi_unlinked[bucket_index] != cpu_to_be32(NULLAGINO)) {
1460 /*
1461 * There is already another inode in the bucket we need
1462 * to add ourselves to. Add us at the front of the list.
1463 * Here we put the head pointer into our next pointer,
1464 * and then we fall through to point the head at us.
1465 */
1466 error = xfs_itobp(mp, tp, ip, &dip, &ibp, XBF_LOCK);
1467 if (error)
1468 return error;
1469
1470 ASSERT(dip->di_next_unlinked == cpu_to_be32(NULLAGINO));
1471 dip->di_next_unlinked = agi->agi_unlinked[bucket_index];
1472 offset = ip->i_imap.im_boffset +
1473 offsetof(xfs_dinode_t, di_next_unlinked);
1474 xfs_trans_inode_buf(tp, ibp);
1475 xfs_trans_log_buf(tp, ibp, offset,
1476 (offset + sizeof(xfs_agino_t) - 1));
1477 xfs_inobp_check(mp, ibp);
1478 }
1479
1480 /*
1481 * Point the bucket head pointer at the inode being inserted.
1482 */
1483 ASSERT(agino != 0);
1484 agi->agi_unlinked[bucket_index] = cpu_to_be32(agino);
1485 offset = offsetof(xfs_agi_t, agi_unlinked) +
1486 (sizeof(xfs_agino_t) * bucket_index);
1487 xfs_trans_log_buf(tp, agibp, offset,
1488 (offset + sizeof(xfs_agino_t) - 1));
1489 return 0;
1490}
1491
1492/*
1493 * Pull the on-disk inode from the AGI unlinked list.
1494 */
1495STATIC int
1496xfs_iunlink_remove(
1497 xfs_trans_t *tp,
1498 xfs_inode_t *ip)
1499{
1500 xfs_ino_t next_ino;
1501 xfs_mount_t *mp;
1502 xfs_agi_t *agi;
1503 xfs_dinode_t *dip;
1504 xfs_buf_t *agibp;
1505 xfs_buf_t *ibp;
1506 xfs_agnumber_t agno;
1507 xfs_agino_t agino;
1508 xfs_agino_t next_agino;
1509 xfs_buf_t *last_ibp;
1510 xfs_dinode_t *last_dip = NULL;
1511 short bucket_index;
1512 int offset, last_offset = 0;
1513 int error;
1514
1515 mp = tp->t_mountp;
1516 agno = XFS_INO_TO_AGNO(mp, ip->i_ino);
1517
1518 /*
1519 * Get the agi buffer first. It ensures lock ordering
1520 * on the list.
1521 */
1522 error = xfs_read_agi(mp, tp, agno, &agibp);
1523 if (error)
1524 return error;
1525
1526 agi = XFS_BUF_TO_AGI(agibp);
1527
1528 /*
1529 * Get the index into the agi hash table for the
1530 * list this inode will go on.
1531 */
1532 agino = XFS_INO_TO_AGINO(mp, ip->i_ino);
1533 ASSERT(agino != 0);
1534 bucket_index = agino % XFS_AGI_UNLINKED_BUCKETS;
1535 ASSERT(agi->agi_unlinked[bucket_index] != cpu_to_be32(NULLAGINO));
1536 ASSERT(agi->agi_unlinked[bucket_index]);
1537
1538 if (be32_to_cpu(agi->agi_unlinked[bucket_index]) == agino) {
1539 /*
1540 * We're at the head of the list. Get the inode's
1541 * on-disk buffer to see if there is anyone after us
1542 * on the list. Only modify our next pointer if it
1543 * is not already NULLAGINO. This saves us the overhead
1544 * of dealing with the buffer when there is no need to
1545 * change it.
1546 */
1547 error = xfs_itobp(mp, tp, ip, &dip, &ibp, XBF_LOCK);
1548 if (error) {
1549 xfs_warn(mp, "%s: xfs_itobp() returned error %d.",
1550 __func__, error);
1551 return error;
1552 }
1553 next_agino = be32_to_cpu(dip->di_next_unlinked);
1554 ASSERT(next_agino != 0);
1555 if (next_agino != NULLAGINO) {
1556 dip->di_next_unlinked = cpu_to_be32(NULLAGINO);
1557 offset = ip->i_imap.im_boffset +
1558 offsetof(xfs_dinode_t, di_next_unlinked);
1559 xfs_trans_inode_buf(tp, ibp);
1560 xfs_trans_log_buf(tp, ibp, offset,
1561 (offset + sizeof(xfs_agino_t) - 1));
1562 xfs_inobp_check(mp, ibp);
1563 } else {
1564 xfs_trans_brelse(tp, ibp);
1565 }
1566 /*
1567 * Point the bucket head pointer at the next inode.
1568 */
1569 ASSERT(next_agino != 0);
1570 ASSERT(next_agino != agino);
1571 agi->agi_unlinked[bucket_index] = cpu_to_be32(next_agino);
1572 offset = offsetof(xfs_agi_t, agi_unlinked) +
1573 (sizeof(xfs_agino_t) * bucket_index);
1574 xfs_trans_log_buf(tp, agibp, offset,
1575 (offset + sizeof(xfs_agino_t) - 1));
1576 } else {
1577 /*
1578 * We need to search the list for the inode being freed.
1579 */
1580 next_agino = be32_to_cpu(agi->agi_unlinked[bucket_index]);
1581 last_ibp = NULL;
1582 while (next_agino != agino) {
1583 /*
1584 * If the last inode wasn't the one pointing to
1585 * us, then release its buffer since we're not
1586 * going to do anything with it.
1587 */
1588 if (last_ibp != NULL) {
1589 xfs_trans_brelse(tp, last_ibp);
1590 }
1591 next_ino = XFS_AGINO_TO_INO(mp, agno, next_agino);
1592 error = xfs_inotobp(mp, tp, next_ino, &last_dip,
1593 &last_ibp, &last_offset, 0);
1594 if (error) {
1595 xfs_warn(mp,
1596 "%s: xfs_inotobp() returned error %d.",
1597 __func__, error);
1598 return error;
1599 }
1600 next_agino = be32_to_cpu(last_dip->di_next_unlinked);
1601 ASSERT(next_agino != NULLAGINO);
1602 ASSERT(next_agino != 0);
1603 }
1604 /*
1605 * Now last_ibp points to the buffer previous to us on
1606 * the unlinked list. Pull us from the list.
1607 */
1608 error = xfs_itobp(mp, tp, ip, &dip, &ibp, XBF_LOCK);
1609 if (error) {
1610 xfs_warn(mp, "%s: xfs_itobp(2) returned error %d.",
1611 __func__, error);
1612 return error;
1613 }
1614 next_agino = be32_to_cpu(dip->di_next_unlinked);
1615 ASSERT(next_agino != 0);
1616 ASSERT(next_agino != agino);
1617 if (next_agino != NULLAGINO) {
1618 dip->di_next_unlinked = cpu_to_be32(NULLAGINO);
1619 offset = ip->i_imap.im_boffset +
1620 offsetof(xfs_dinode_t, di_next_unlinked);
1621 xfs_trans_inode_buf(tp, ibp);
1622 xfs_trans_log_buf(tp, ibp, offset,
1623 (offset + sizeof(xfs_agino_t) - 1));
1624 xfs_inobp_check(mp, ibp);
1625 } else {
1626 xfs_trans_brelse(tp, ibp);
1627 }
1628 /*
1629 * Point the previous inode on the list to the next inode.
1630 */
1631 last_dip->di_next_unlinked = cpu_to_be32(next_agino);
1632 ASSERT(next_agino != 0);
1633 offset = last_offset + offsetof(xfs_dinode_t, di_next_unlinked);
1634 xfs_trans_inode_buf(tp, last_ibp);
1635 xfs_trans_log_buf(tp, last_ibp, offset,
1636 (offset + sizeof(xfs_agino_t) - 1));
1637 xfs_inobp_check(mp, last_ibp);
1638 }
1639 return 0;
1640}
1641
1642/*
1643 * A big issue when freeing the inode cluster is is that we _cannot_ skip any
1644 * inodes that are in memory - they all must be marked stale and attached to
1645 * the cluster buffer.
1646 */
1647STATIC void
1648xfs_ifree_cluster(
1649 xfs_inode_t *free_ip,
1650 xfs_trans_t *tp,
1651 xfs_ino_t inum)
1652{
1653 xfs_mount_t *mp = free_ip->i_mount;
1654 int blks_per_cluster;
1655 int nbufs;
1656 int ninodes;
1657 int i, j;
1658 xfs_daddr_t blkno;
1659 xfs_buf_t *bp;
1660 xfs_inode_t *ip;
1661 xfs_inode_log_item_t *iip;
1662 xfs_log_item_t *lip;
1663 struct xfs_perag *pag;
1664
1665 pag = xfs_perag_get(mp, XFS_INO_TO_AGNO(mp, inum));
1666 if (mp->m_sb.sb_blocksize >= XFS_INODE_CLUSTER_SIZE(mp)) {
1667 blks_per_cluster = 1;
1668 ninodes = mp->m_sb.sb_inopblock;
1669 nbufs = XFS_IALLOC_BLOCKS(mp);
1670 } else {
1671 blks_per_cluster = XFS_INODE_CLUSTER_SIZE(mp) /
1672 mp->m_sb.sb_blocksize;
1673 ninodes = blks_per_cluster * mp->m_sb.sb_inopblock;
1674 nbufs = XFS_IALLOC_BLOCKS(mp) / blks_per_cluster;
1675 }
1676
1677 for (j = 0; j < nbufs; j++, inum += ninodes) {
1678 blkno = XFS_AGB_TO_DADDR(mp, XFS_INO_TO_AGNO(mp, inum),
1679 XFS_INO_TO_AGBNO(mp, inum));
1680
1681 /*
1682 * We obtain and lock the backing buffer first in the process
1683 * here, as we have to ensure that any dirty inode that we
1684 * can't get the flush lock on is attached to the buffer.
1685 * If we scan the in-memory inodes first, then buffer IO can
1686 * complete before we get a lock on it, and hence we may fail
1687 * to mark all the active inodes on the buffer stale.
1688 */
1689 bp = xfs_trans_get_buf(tp, mp->m_ddev_targp, blkno,
1690 mp->m_bsize * blks_per_cluster,
1691 XBF_LOCK);
1692
1693 /*
1694 * Walk the inodes already attached to the buffer and mark them
1695 * stale. These will all have the flush locks held, so an
1696 * in-memory inode walk can't lock them. By marking them all
1697 * stale first, we will not attempt to lock them in the loop
1698 * below as the XFS_ISTALE flag will be set.
1699 */
1700 lip = bp->b_fspriv;
1701 while (lip) {
1702 if (lip->li_type == XFS_LI_INODE) {
1703 iip = (xfs_inode_log_item_t *)lip;
1704 ASSERT(iip->ili_logged == 1);
1705 lip->li_cb = xfs_istale_done;
1706 xfs_trans_ail_copy_lsn(mp->m_ail,
1707 &iip->ili_flush_lsn,
1708 &iip->ili_item.li_lsn);
1709 xfs_iflags_set(iip->ili_inode, XFS_ISTALE);
1710 }
1711 lip = lip->li_bio_list;
1712 }
1713
1714
1715 /*
1716 * For each inode in memory attempt to add it to the inode
1717 * buffer and set it up for being staled on buffer IO
1718 * completion. This is safe as we've locked out tail pushing
1719 * and flushing by locking the buffer.
1720 *
1721 * We have already marked every inode that was part of a
1722 * transaction stale above, which means there is no point in
1723 * even trying to lock them.
1724 */
1725 for (i = 0; i < ninodes; i++) {
1726retry:
1727 rcu_read_lock();
1728 ip = radix_tree_lookup(&pag->pag_ici_root,
1729 XFS_INO_TO_AGINO(mp, (inum + i)));
1730
1731 /* Inode not in memory, nothing to do */
1732 if (!ip) {
1733 rcu_read_unlock();
1734 continue;
1735 }
1736
1737 /*
1738 * because this is an RCU protected lookup, we could
1739 * find a recently freed or even reallocated inode
1740 * during the lookup. We need to check under the
1741 * i_flags_lock for a valid inode here. Skip it if it
1742 * is not valid, the wrong inode or stale.
1743 */
1744 spin_lock(&ip->i_flags_lock);
1745 if (ip->i_ino != inum + i ||
1746 __xfs_iflags_test(ip, XFS_ISTALE)) {
1747 spin_unlock(&ip->i_flags_lock);
1748 rcu_read_unlock();
1749 continue;
1750 }
1751 spin_unlock(&ip->i_flags_lock);
1752
1753 /*
1754 * Don't try to lock/unlock the current inode, but we
1755 * _cannot_ skip the other inodes that we did not find
1756 * in the list attached to the buffer and are not
1757 * already marked stale. If we can't lock it, back off
1758 * and retry.
1759 */
1760 if (ip != free_ip &&
1761 !xfs_ilock_nowait(ip, XFS_ILOCK_EXCL)) {
1762 rcu_read_unlock();
1763 delay(1);
1764 goto retry;
1765 }
1766 rcu_read_unlock();
1767
1768 xfs_iflock(ip);
1769 xfs_iflags_set(ip, XFS_ISTALE);
1770
1771 /*
1772 * we don't need to attach clean inodes or those only
1773 * with unlogged changes (which we throw away, anyway).
1774 */
1775 iip = ip->i_itemp;
1776 if (!iip || xfs_inode_clean(ip)) {
1777 ASSERT(ip != free_ip);
1778 ip->i_update_core = 0;
1779 xfs_ifunlock(ip);
1780 xfs_iunlock(ip, XFS_ILOCK_EXCL);
1781 continue;
1782 }
1783
1784 iip->ili_last_fields = iip->ili_format.ilf_fields;
1785 iip->ili_format.ilf_fields = 0;
1786 iip->ili_logged = 1;
1787 xfs_trans_ail_copy_lsn(mp->m_ail, &iip->ili_flush_lsn,
1788 &iip->ili_item.li_lsn);
1789
1790 xfs_buf_attach_iodone(bp, xfs_istale_done,
1791 &iip->ili_item);
1792
1793 if (ip != free_ip)
1794 xfs_iunlock(ip, XFS_ILOCK_EXCL);
1795 }
1796
1797 xfs_trans_stale_inode_buf(tp, bp);
1798 xfs_trans_binval(tp, bp);
1799 }
1800
1801 xfs_perag_put(pag);
1802}
1803
1804/*
1805 * This is called to return an inode to the inode free list.
1806 * The inode should already be truncated to 0 length and have
1807 * no pages associated with it. This routine also assumes that
1808 * the inode is already a part of the transaction.
1809 *
1810 * The on-disk copy of the inode will have been added to the list
1811 * of unlinked inodes in the AGI. We need to remove the inode from
1812 * that list atomically with respect to freeing it here.
1813 */
1814int
1815xfs_ifree(
1816 xfs_trans_t *tp,
1817 xfs_inode_t *ip,
1818 xfs_bmap_free_t *flist)
1819{
1820 int error;
1821 int delete;
1822 xfs_ino_t first_ino;
1823 xfs_dinode_t *dip;
1824 xfs_buf_t *ibp;
1825
1826 ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
1827 ASSERT(ip->i_d.di_nlink == 0);
1828 ASSERT(ip->i_d.di_nextents == 0);
1829 ASSERT(ip->i_d.di_anextents == 0);
1830 ASSERT((ip->i_d.di_size == 0 && ip->i_size == 0) ||
1831 (!S_ISREG(ip->i_d.di_mode)));
1832 ASSERT(ip->i_d.di_nblocks == 0);
1833
1834 /*
1835 * Pull the on-disk inode from the AGI unlinked list.
1836 */
1837 error = xfs_iunlink_remove(tp, ip);
1838 if (error != 0) {
1839 return error;
1840 }
1841
1842 error = xfs_difree(tp, ip->i_ino, flist, &delete, &first_ino);
1843 if (error != 0) {
1844 return error;
1845 }
1846 ip->i_d.di_mode = 0; /* mark incore inode as free */
1847 ip->i_d.di_flags = 0;
1848 ip->i_d.di_dmevmask = 0;
1849 ip->i_d.di_forkoff = 0; /* mark the attr fork not in use */
1850 ip->i_df.if_ext_max =
1851 XFS_IFORK_DSIZE(ip) / (uint)sizeof(xfs_bmbt_rec_t);
1852 ip->i_d.di_format = XFS_DINODE_FMT_EXTENTS;
1853 ip->i_d.di_aformat = XFS_DINODE_FMT_EXTENTS;
1854 /*
1855 * Bump the generation count so no one will be confused
1856 * by reincarnations of this inode.
1857 */
1858 ip->i_d.di_gen++;
1859
1860 xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
1861
1862 error = xfs_itobp(ip->i_mount, tp, ip, &dip, &ibp, XBF_LOCK);
1863 if (error)
1864 return error;
1865
1866 /*
1867 * Clear the on-disk di_mode. This is to prevent xfs_bulkstat
1868 * from picking up this inode when it is reclaimed (its incore state
1869 * initialzed but not flushed to disk yet). The in-core di_mode is
1870 * already cleared and a corresponding transaction logged.
1871 * The hack here just synchronizes the in-core to on-disk
1872 * di_mode value in advance before the actual inode sync to disk.
1873 * This is OK because the inode is already unlinked and would never
1874 * change its di_mode again for this inode generation.
1875 * This is a temporary hack that would require a proper fix
1876 * in the future.
1877 */
1878 dip->di_mode = 0;
1879
1880 if (delete) {
1881 xfs_ifree_cluster(ip, tp, first_ino);
1882 }
1883
1884 return 0;
1885}
1886
1887/*
1888 * Reallocate the space for if_broot based on the number of records
1889 * being added or deleted as indicated in rec_diff. Move the records
1890 * and pointers in if_broot to fit the new size. When shrinking this
1891 * will eliminate holes between the records and pointers created by
1892 * the caller. When growing this will create holes to be filled in
1893 * by the caller.
1894 *
1895 * The caller must not request to add more records than would fit in
1896 * the on-disk inode root. If the if_broot is currently NULL, then
1897 * if we adding records one will be allocated. The caller must also
1898 * not request that the number of records go below zero, although
1899 * it can go to zero.
1900 *
1901 * ip -- the inode whose if_broot area is changing
1902 * ext_diff -- the change in the number of records, positive or negative,
1903 * requested for the if_broot array.
1904 */
1905void
1906xfs_iroot_realloc(
1907 xfs_inode_t *ip,
1908 int rec_diff,
1909 int whichfork)
1910{
1911 struct xfs_mount *mp = ip->i_mount;
1912 int cur_max;
1913 xfs_ifork_t *ifp;
1914 struct xfs_btree_block *new_broot;
1915 int new_max;
1916 size_t new_size;
1917 char *np;
1918 char *op;
1919
1920 /*
1921 * Handle the degenerate case quietly.
1922 */
1923 if (rec_diff == 0) {
1924 return;
1925 }
1926
1927 ifp = XFS_IFORK_PTR(ip, whichfork);
1928 if (rec_diff > 0) {
1929 /*
1930 * If there wasn't any memory allocated before, just
1931 * allocate it now and get out.
1932 */
1933 if (ifp->if_broot_bytes == 0) {
1934 new_size = (size_t)XFS_BMAP_BROOT_SPACE_CALC(rec_diff);
1935 ifp->if_broot = kmem_alloc(new_size, KM_SLEEP | KM_NOFS);
1936 ifp->if_broot_bytes = (int)new_size;
1937 return;
1938 }
1939
1940 /*
1941 * If there is already an existing if_broot, then we need
1942 * to realloc() it and shift the pointers to their new
1943 * location. The records don't change location because
1944 * they are kept butted up against the btree block header.
1945 */
1946 cur_max = xfs_bmbt_maxrecs(mp, ifp->if_broot_bytes, 0);
1947 new_max = cur_max + rec_diff;
1948 new_size = (size_t)XFS_BMAP_BROOT_SPACE_CALC(new_max);
1949 ifp->if_broot = kmem_realloc(ifp->if_broot, new_size,
1950 (size_t)XFS_BMAP_BROOT_SPACE_CALC(cur_max), /* old size */
1951 KM_SLEEP | KM_NOFS);
1952 op = (char *)XFS_BMAP_BROOT_PTR_ADDR(mp, ifp->if_broot, 1,
1953 ifp->if_broot_bytes);
1954 np = (char *)XFS_BMAP_BROOT_PTR_ADDR(mp, ifp->if_broot, 1,
1955 (int)new_size);
1956 ifp->if_broot_bytes = (int)new_size;
1957 ASSERT(ifp->if_broot_bytes <=
1958 XFS_IFORK_SIZE(ip, whichfork) + XFS_BROOT_SIZE_ADJ);
1959 memmove(np, op, cur_max * (uint)sizeof(xfs_dfsbno_t));
1960 return;
1961 }
1962
1963 /*
1964 * rec_diff is less than 0. In this case, we are shrinking the
1965 * if_broot buffer. It must already exist. If we go to zero
1966 * records, just get rid of the root and clear the status bit.
1967 */
1968 ASSERT((ifp->if_broot != NULL) && (ifp->if_broot_bytes > 0));
1969 cur_max = xfs_bmbt_maxrecs(mp, ifp->if_broot_bytes, 0);
1970 new_max = cur_max + rec_diff;
1971 ASSERT(new_max >= 0);
1972 if (new_max > 0)
1973 new_size = (size_t)XFS_BMAP_BROOT_SPACE_CALC(new_max);
1974 else
1975 new_size = 0;
1976 if (new_size > 0) {
1977 new_broot = kmem_alloc(new_size, KM_SLEEP | KM_NOFS);
1978 /*
1979 * First copy over the btree block header.
1980 */
1981 memcpy(new_broot, ifp->if_broot, XFS_BTREE_LBLOCK_LEN);
1982 } else {
1983 new_broot = NULL;
1984 ifp->if_flags &= ~XFS_IFBROOT;
1985 }
1986
1987 /*
1988 * Only copy the records and pointers if there are any.
1989 */
1990 if (new_max > 0) {
1991 /*
1992 * First copy the records.
1993 */
1994 op = (char *)XFS_BMBT_REC_ADDR(mp, ifp->if_broot, 1);
1995 np = (char *)XFS_BMBT_REC_ADDR(mp, new_broot, 1);
1996 memcpy(np, op, new_max * (uint)sizeof(xfs_bmbt_rec_t));
1997
1998 /*
1999 * Then copy the pointers.
2000 */
2001 op = (char *)XFS_BMAP_BROOT_PTR_ADDR(mp, ifp->if_broot, 1,
2002 ifp->if_broot_bytes);
2003 np = (char *)XFS_BMAP_BROOT_PTR_ADDR(mp, new_broot, 1,
2004 (int)new_size);
2005 memcpy(np, op, new_max * (uint)sizeof(xfs_dfsbno_t));
2006 }
2007 kmem_free(ifp->if_broot);
2008 ifp->if_broot = new_broot;
2009 ifp->if_broot_bytes = (int)new_size;
2010 ASSERT(ifp->if_broot_bytes <=
2011 XFS_IFORK_SIZE(ip, whichfork) + XFS_BROOT_SIZE_ADJ);
2012 return;
2013}
2014
2015
2016/*
2017 * This is called when the amount of space needed for if_data
2018 * is increased or decreased. The change in size is indicated by
2019 * the number of bytes that need to be added or deleted in the
2020 * byte_diff parameter.
2021 *
2022 * If the amount of space needed has decreased below the size of the
2023 * inline buffer, then switch to using the inline buffer. Otherwise,
2024 * use kmem_realloc() or kmem_alloc() to adjust the size of the buffer
2025 * to what is needed.
2026 *
2027 * ip -- the inode whose if_data area is changing
2028 * byte_diff -- the change in the number of bytes, positive or negative,
2029 * requested for the if_data array.
2030 */
2031void
2032xfs_idata_realloc(
2033 xfs_inode_t *ip,
2034 int byte_diff,
2035 int whichfork)
2036{
2037 xfs_ifork_t *ifp;
2038 int new_size;
2039 int real_size;
2040
2041 if (byte_diff == 0) {
2042 return;
2043 }
2044
2045 ifp = XFS_IFORK_PTR(ip, whichfork);
2046 new_size = (int)ifp->if_bytes + byte_diff;
2047 ASSERT(new_size >= 0);
2048
2049 if (new_size == 0) {
2050 if (ifp->if_u1.if_data != ifp->if_u2.if_inline_data) {
2051 kmem_free(ifp->if_u1.if_data);
2052 }
2053 ifp->if_u1.if_data = NULL;
2054 real_size = 0;
2055 } else if (new_size <= sizeof(ifp->if_u2.if_inline_data)) {
2056 /*
2057 * If the valid extents/data can fit in if_inline_ext/data,
2058 * copy them from the malloc'd vector and free it.
2059 */
2060 if (ifp->if_u1.if_data == NULL) {
2061 ifp->if_u1.if_data = ifp->if_u2.if_inline_data;
2062 } else if (ifp->if_u1.if_data != ifp->if_u2.if_inline_data) {
2063 ASSERT(ifp->if_real_bytes != 0);
2064 memcpy(ifp->if_u2.if_inline_data, ifp->if_u1.if_data,
2065 new_size);
2066 kmem_free(ifp->if_u1.if_data);
2067 ifp->if_u1.if_data = ifp->if_u2.if_inline_data;
2068 }
2069 real_size = 0;
2070 } else {
2071 /*
2072 * Stuck with malloc/realloc.
2073 * For inline data, the underlying buffer must be
2074 * a multiple of 4 bytes in size so that it can be
2075 * logged and stay on word boundaries. We enforce
2076 * that here.
2077 */
2078 real_size = roundup(new_size, 4);
2079 if (ifp->if_u1.if_data == NULL) {
2080 ASSERT(ifp->if_real_bytes == 0);
2081 ifp->if_u1.if_data = kmem_alloc(real_size,
2082 KM_SLEEP | KM_NOFS);
2083 } else if (ifp->if_u1.if_data != ifp->if_u2.if_inline_data) {
2084 /*
2085 * Only do the realloc if the underlying size
2086 * is really changing.
2087 */
2088 if (ifp->if_real_bytes != real_size) {
2089 ifp->if_u1.if_data =
2090 kmem_realloc(ifp->if_u1.if_data,
2091 real_size,
2092 ifp->if_real_bytes,
2093 KM_SLEEP | KM_NOFS);
2094 }
2095 } else {
2096 ASSERT(ifp->if_real_bytes == 0);
2097 ifp->if_u1.if_data = kmem_alloc(real_size,
2098 KM_SLEEP | KM_NOFS);
2099 memcpy(ifp->if_u1.if_data, ifp->if_u2.if_inline_data,
2100 ifp->if_bytes);
2101 }
2102 }
2103 ifp->if_real_bytes = real_size;
2104 ifp->if_bytes = new_size;
2105 ASSERT(ifp->if_bytes <= XFS_IFORK_SIZE(ip, whichfork));
2106}
2107
2108void
2109xfs_idestroy_fork(
2110 xfs_inode_t *ip,
2111 int whichfork)
2112{
2113 xfs_ifork_t *ifp;
2114
2115 ifp = XFS_IFORK_PTR(ip, whichfork);
2116 if (ifp->if_broot != NULL) {
2117 kmem_free(ifp->if_broot);
2118 ifp->if_broot = NULL;
2119 }
2120
2121 /*
2122 * If the format is local, then we can't have an extents
2123 * array so just look for an inline data array. If we're
2124 * not local then we may or may not have an extents list,
2125 * so check and free it up if we do.
2126 */
2127 if (XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_LOCAL) {
2128 if ((ifp->if_u1.if_data != ifp->if_u2.if_inline_data) &&
2129 (ifp->if_u1.if_data != NULL)) {
2130 ASSERT(ifp->if_real_bytes != 0);
2131 kmem_free(ifp->if_u1.if_data);
2132 ifp->if_u1.if_data = NULL;
2133 ifp->if_real_bytes = 0;
2134 }
2135 } else if ((ifp->if_flags & XFS_IFEXTENTS) &&
2136 ((ifp->if_flags & XFS_IFEXTIREC) ||
2137 ((ifp->if_u1.if_extents != NULL) &&
2138 (ifp->if_u1.if_extents != ifp->if_u2.if_inline_ext)))) {
2139 ASSERT(ifp->if_real_bytes != 0);
2140 xfs_iext_destroy(ifp);
2141 }
2142 ASSERT(ifp->if_u1.if_extents == NULL ||
2143 ifp->if_u1.if_extents == ifp->if_u2.if_inline_ext);
2144 ASSERT(ifp->if_real_bytes == 0);
2145 if (whichfork == XFS_ATTR_FORK) {
2146 kmem_zone_free(xfs_ifork_zone, ip->i_afp);
2147 ip->i_afp = NULL;
2148 }
2149}
2150
2151/*
2152 * This is called to unpin an inode. The caller must have the inode locked
2153 * in at least shared mode so that the buffer cannot be subsequently pinned
2154 * once someone is waiting for it to be unpinned.
2155 */
2156static void
2157xfs_iunpin_nowait(
2158 struct xfs_inode *ip)
2159{
2160 ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL|XFS_ILOCK_SHARED));
2161
2162 trace_xfs_inode_unpin_nowait(ip, _RET_IP_);
2163
2164 /* Give the log a push to start the unpinning I/O */
2165 xfs_log_force_lsn(ip->i_mount, ip->i_itemp->ili_last_lsn, 0);
2166
2167}
2168
2169void
2170xfs_iunpin_wait(
2171 struct xfs_inode *ip)
2172{
2173 if (xfs_ipincount(ip)) {
2174 xfs_iunpin_nowait(ip);
2175 wait_event(ip->i_ipin_wait, (xfs_ipincount(ip) == 0));
2176 }
2177}
2178
2179/*
2180 * xfs_iextents_copy()
2181 *
2182 * This is called to copy the REAL extents (as opposed to the delayed
2183 * allocation extents) from the inode into the given buffer. It
2184 * returns the number of bytes copied into the buffer.
2185 *
2186 * If there are no delayed allocation extents, then we can just
2187 * memcpy() the extents into the buffer. Otherwise, we need to
2188 * examine each extent in turn and skip those which are delayed.
2189 */
2190int
2191xfs_iextents_copy(
2192 xfs_inode_t *ip,
2193 xfs_bmbt_rec_t *dp,
2194 int whichfork)
2195{
2196 int copied;
2197 int i;
2198 xfs_ifork_t *ifp;
2199 int nrecs;
2200 xfs_fsblock_t start_block;
2201
2202 ifp = XFS_IFORK_PTR(ip, whichfork);
2203 ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL|XFS_ILOCK_SHARED));
2204 ASSERT(ifp->if_bytes > 0);
2205
2206 nrecs = ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t);
2207 XFS_BMAP_TRACE_EXLIST(ip, nrecs, whichfork);
2208 ASSERT(nrecs > 0);
2209
2210 /*
2211 * There are some delayed allocation extents in the
2212 * inode, so copy the extents one at a time and skip
2213 * the delayed ones. There must be at least one
2214 * non-delayed extent.
2215 */
2216 copied = 0;
2217 for (i = 0; i < nrecs; i++) {
2218 xfs_bmbt_rec_host_t *ep = xfs_iext_get_ext(ifp, i);
2219 start_block = xfs_bmbt_get_startblock(ep);
2220 if (isnullstartblock(start_block)) {
2221 /*
2222 * It's a delayed allocation extent, so skip it.
2223 */
2224 continue;
2225 }
2226
2227 /* Translate to on disk format */
2228 put_unaligned(cpu_to_be64(ep->l0), &dp->l0);
2229 put_unaligned(cpu_to_be64(ep->l1), &dp->l1);
2230 dp++;
2231 copied++;
2232 }
2233 ASSERT(copied != 0);
2234 xfs_validate_extents(ifp, copied, XFS_EXTFMT_INODE(ip));
2235
2236 return (copied * (uint)sizeof(xfs_bmbt_rec_t));
2237}
2238
2239/*
2240 * Each of the following cases stores data into the same region
2241 * of the on-disk inode, so only one of them can be valid at
2242 * any given time. While it is possible to have conflicting formats
2243 * and log flags, e.g. having XFS_ILOG_?DATA set when the fork is
2244 * in EXTENTS format, this can only happen when the fork has
2245 * changed formats after being modified but before being flushed.
2246 * In these cases, the format always takes precedence, because the
2247 * format indicates the current state of the fork.
2248 */
2249/*ARGSUSED*/
2250STATIC void
2251xfs_iflush_fork(
2252 xfs_inode_t *ip,
2253 xfs_dinode_t *dip,
2254 xfs_inode_log_item_t *iip,
2255 int whichfork,
2256 xfs_buf_t *bp)
2257{
2258 char *cp;
2259 xfs_ifork_t *ifp;
2260 xfs_mount_t *mp;
2261#ifdef XFS_TRANS_DEBUG
2262 int first;
2263#endif
2264 static const short brootflag[2] =
2265 { XFS_ILOG_DBROOT, XFS_ILOG_ABROOT };
2266 static const short dataflag[2] =
2267 { XFS_ILOG_DDATA, XFS_ILOG_ADATA };
2268 static const short extflag[2] =
2269 { XFS_ILOG_DEXT, XFS_ILOG_AEXT };
2270
2271 if (!iip)
2272 return;
2273 ifp = XFS_IFORK_PTR(ip, whichfork);
2274 /*
2275 * This can happen if we gave up in iformat in an error path,
2276 * for the attribute fork.
2277 */
2278 if (!ifp) {
2279 ASSERT(whichfork == XFS_ATTR_FORK);
2280 return;
2281 }
2282 cp = XFS_DFORK_PTR(dip, whichfork);
2283 mp = ip->i_mount;
2284 switch (XFS_IFORK_FORMAT(ip, whichfork)) {
2285 case XFS_DINODE_FMT_LOCAL:
2286 if ((iip->ili_format.ilf_fields & dataflag[whichfork]) &&
2287 (ifp->if_bytes > 0)) {
2288 ASSERT(ifp->if_u1.if_data != NULL);
2289 ASSERT(ifp->if_bytes <= XFS_IFORK_SIZE(ip, whichfork));
2290 memcpy(cp, ifp->if_u1.if_data, ifp->if_bytes);
2291 }
2292 break;
2293
2294 case XFS_DINODE_FMT_EXTENTS:
2295 ASSERT((ifp->if_flags & XFS_IFEXTENTS) ||
2296 !(iip->ili_format.ilf_fields & extflag[whichfork]));
2297 if ((iip->ili_format.ilf_fields & extflag[whichfork]) &&
2298 (ifp->if_bytes > 0)) {
2299 ASSERT(xfs_iext_get_ext(ifp, 0));
2300 ASSERT(XFS_IFORK_NEXTENTS(ip, whichfork) > 0);
2301 (void)xfs_iextents_copy(ip, (xfs_bmbt_rec_t *)cp,
2302 whichfork);
2303 }
2304 break;
2305
2306 case XFS_DINODE_FMT_BTREE:
2307 if ((iip->ili_format.ilf_fields & brootflag[whichfork]) &&
2308 (ifp->if_broot_bytes > 0)) {
2309 ASSERT(ifp->if_broot != NULL);
2310 ASSERT(ifp->if_broot_bytes <=
2311 (XFS_IFORK_SIZE(ip, whichfork) +
2312 XFS_BROOT_SIZE_ADJ));
2313 xfs_bmbt_to_bmdr(mp, ifp->if_broot, ifp->if_broot_bytes,
2314 (xfs_bmdr_block_t *)cp,
2315 XFS_DFORK_SIZE(dip, mp, whichfork));
2316 }
2317 break;
2318
2319 case XFS_DINODE_FMT_DEV:
2320 if (iip->ili_format.ilf_fields & XFS_ILOG_DEV) {
2321 ASSERT(whichfork == XFS_DATA_FORK);
2322 xfs_dinode_put_rdev(dip, ip->i_df.if_u2.if_rdev);
2323 }
2324 break;
2325
2326 case XFS_DINODE_FMT_UUID:
2327 if (iip->ili_format.ilf_fields & XFS_ILOG_UUID) {
2328 ASSERT(whichfork == XFS_DATA_FORK);
2329 memcpy(XFS_DFORK_DPTR(dip),
2330 &ip->i_df.if_u2.if_uuid,
2331 sizeof(uuid_t));
2332 }
2333 break;
2334
2335 default:
2336 ASSERT(0);
2337 break;
2338 }
2339}
2340
2341STATIC int
2342xfs_iflush_cluster(
2343 xfs_inode_t *ip,
2344 xfs_buf_t *bp)
2345{
2346 xfs_mount_t *mp = ip->i_mount;
2347 struct xfs_perag *pag;
2348 unsigned long first_index, mask;
2349 unsigned long inodes_per_cluster;
2350 int ilist_size;
2351 xfs_inode_t **ilist;
2352 xfs_inode_t *iq;
2353 int nr_found;
2354 int clcount = 0;
2355 int bufwasdelwri;
2356 int i;
2357
2358 pag = xfs_perag_get(mp, XFS_INO_TO_AGNO(mp, ip->i_ino));
2359
2360 inodes_per_cluster = XFS_INODE_CLUSTER_SIZE(mp) >> mp->m_sb.sb_inodelog;
2361 ilist_size = inodes_per_cluster * sizeof(xfs_inode_t *);
2362 ilist = kmem_alloc(ilist_size, KM_MAYFAIL|KM_NOFS);
2363 if (!ilist)
2364 goto out_put;
2365
2366 mask = ~(((XFS_INODE_CLUSTER_SIZE(mp) >> mp->m_sb.sb_inodelog)) - 1);
2367 first_index = XFS_INO_TO_AGINO(mp, ip->i_ino) & mask;
2368 rcu_read_lock();
2369 /* really need a gang lookup range call here */
2370 nr_found = radix_tree_gang_lookup(&pag->pag_ici_root, (void**)ilist,
2371 first_index, inodes_per_cluster);
2372 if (nr_found == 0)
2373 goto out_free;
2374
2375 for (i = 0; i < nr_found; i++) {
2376 iq = ilist[i];
2377 if (iq == ip)
2378 continue;
2379
2380 /*
2381 * because this is an RCU protected lookup, we could find a
2382 * recently freed or even reallocated inode during the lookup.
2383 * We need to check under the i_flags_lock for a valid inode
2384 * here. Skip it if it is not valid or the wrong inode.
2385 */
2386 spin_lock(&ip->i_flags_lock);
2387 if (!ip->i_ino ||
2388 (XFS_INO_TO_AGINO(mp, iq->i_ino) & mask) != first_index) {
2389 spin_unlock(&ip->i_flags_lock);
2390 continue;
2391 }
2392 spin_unlock(&ip->i_flags_lock);
2393
2394 /*
2395 * Do an un-protected check to see if the inode is dirty and
2396 * is a candidate for flushing. These checks will be repeated
2397 * later after the appropriate locks are acquired.
2398 */
2399 if (xfs_inode_clean(iq) && xfs_ipincount(iq) == 0)
2400 continue;
2401
2402 /*
2403 * Try to get locks. If any are unavailable or it is pinned,
2404 * then this inode cannot be flushed and is skipped.
2405 */
2406
2407 if (!xfs_ilock_nowait(iq, XFS_ILOCK_SHARED))
2408 continue;
2409 if (!xfs_iflock_nowait(iq)) {
2410 xfs_iunlock(iq, XFS_ILOCK_SHARED);
2411 continue;
2412 }
2413 if (xfs_ipincount(iq)) {
2414 xfs_ifunlock(iq);
2415 xfs_iunlock(iq, XFS_ILOCK_SHARED);
2416 continue;
2417 }
2418
2419 /*
2420 * arriving here means that this inode can be flushed. First
2421 * re-check that it's dirty before flushing.
2422 */
2423 if (!xfs_inode_clean(iq)) {
2424 int error;
2425 error = xfs_iflush_int(iq, bp);
2426 if (error) {
2427 xfs_iunlock(iq, XFS_ILOCK_SHARED);
2428 goto cluster_corrupt_out;
2429 }
2430 clcount++;
2431 } else {
2432 xfs_ifunlock(iq);
2433 }
2434 xfs_iunlock(iq, XFS_ILOCK_SHARED);
2435 }
2436
2437 if (clcount) {
2438 XFS_STATS_INC(xs_icluster_flushcnt);
2439 XFS_STATS_ADD(xs_icluster_flushinode, clcount);
2440 }
2441
2442out_free:
2443 rcu_read_unlock();
2444 kmem_free(ilist);
2445out_put:
2446 xfs_perag_put(pag);
2447 return 0;
2448
2449
2450cluster_corrupt_out:
2451 /*
2452 * Corruption detected in the clustering loop. Invalidate the
2453 * inode buffer and shut down the filesystem.
2454 */
2455 rcu_read_unlock();
2456 /*
2457 * Clean up the buffer. If it was B_DELWRI, just release it --
2458 * brelse can handle it with no problems. If not, shut down the
2459 * filesystem before releasing the buffer.
2460 */
2461 bufwasdelwri = XFS_BUF_ISDELAYWRITE(bp);
2462 if (bufwasdelwri)
2463 xfs_buf_relse(bp);
2464
2465 xfs_force_shutdown(mp, SHUTDOWN_CORRUPT_INCORE);
2466
2467 if (!bufwasdelwri) {
2468 /*
2469 * Just like incore_relse: if we have b_iodone functions,
2470 * mark the buffer as an error and call them. Otherwise
2471 * mark it as stale and brelse.
2472 */
2473 if (bp->b_iodone) {
2474 XFS_BUF_UNDONE(bp);
2475 XFS_BUF_STALE(bp);
2476 xfs_buf_ioerror(bp, EIO);
2477 xfs_buf_ioend(bp, 0);
2478 } else {
2479 XFS_BUF_STALE(bp);
2480 xfs_buf_relse(bp);
2481 }
2482 }
2483
2484 /*
2485 * Unlocks the flush lock
2486 */
2487 xfs_iflush_abort(iq);
2488 kmem_free(ilist);
2489 xfs_perag_put(pag);
2490 return XFS_ERROR(EFSCORRUPTED);
2491}
2492
2493/*
2494 * xfs_iflush() will write a modified inode's changes out to the
2495 * inode's on disk home. The caller must have the inode lock held
2496 * in at least shared mode and the inode flush completion must be
2497 * active as well. The inode lock will still be held upon return from
2498 * the call and the caller is free to unlock it.
2499 * The inode flush will be completed when the inode reaches the disk.
2500 * The flags indicate how the inode's buffer should be written out.
2501 */
2502int
2503xfs_iflush(
2504 xfs_inode_t *ip,
2505 uint flags)
2506{
2507 xfs_inode_log_item_t *iip;
2508 xfs_buf_t *bp;
2509 xfs_dinode_t *dip;
2510 xfs_mount_t *mp;
2511 int error;
2512
2513 XFS_STATS_INC(xs_iflush_count);
2514
2515 ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL|XFS_ILOCK_SHARED));
2516 ASSERT(!completion_done(&ip->i_flush));
2517 ASSERT(ip->i_d.di_format != XFS_DINODE_FMT_BTREE ||
2518 ip->i_d.di_nextents > ip->i_df.if_ext_max);
2519
2520 iip = ip->i_itemp;
2521 mp = ip->i_mount;
2522
2523 /*
2524 * We can't flush the inode until it is unpinned, so wait for it if we
2525 * are allowed to block. We know no one new can pin it, because we are
2526 * holding the inode lock shared and you need to hold it exclusively to
2527 * pin the inode.
2528 *
2529 * If we are not allowed to block, force the log out asynchronously so
2530 * that when we come back the inode will be unpinned. If other inodes
2531 * in the same cluster are dirty, they will probably write the inode
2532 * out for us if they occur after the log force completes.
2533 */
2534 if (!(flags & SYNC_WAIT) && xfs_ipincount(ip)) {
2535 xfs_iunpin_nowait(ip);
2536 xfs_ifunlock(ip);
2537 return EAGAIN;
2538 }
2539 xfs_iunpin_wait(ip);
2540
2541 /*
2542 * For stale inodes we cannot rely on the backing buffer remaining
2543 * stale in cache for the remaining life of the stale inode and so
2544 * xfs_itobp() below may give us a buffer that no longer contains
2545 * inodes below. We have to check this after ensuring the inode is
2546 * unpinned so that it is safe to reclaim the stale inode after the
2547 * flush call.
2548 */
2549 if (xfs_iflags_test(ip, XFS_ISTALE)) {
2550 xfs_ifunlock(ip);
2551 return 0;
2552 }
2553
2554 /*
2555 * This may have been unpinned because the filesystem is shutting
2556 * down forcibly. If that's the case we must not write this inode
2557 * to disk, because the log record didn't make it to disk!
2558 */
2559 if (XFS_FORCED_SHUTDOWN(mp)) {
2560 ip->i_update_core = 0;
2561 if (iip)
2562 iip->ili_format.ilf_fields = 0;
2563 xfs_ifunlock(ip);
2564 return XFS_ERROR(EIO);
2565 }
2566
2567 /*
2568 * Get the buffer containing the on-disk inode.
2569 */
2570 error = xfs_itobp(mp, NULL, ip, &dip, &bp,
2571 (flags & SYNC_TRYLOCK) ? XBF_TRYLOCK : XBF_LOCK);
2572 if (error || !bp) {
2573 xfs_ifunlock(ip);
2574 return error;
2575 }
2576
2577 /*
2578 * First flush out the inode that xfs_iflush was called with.
2579 */
2580 error = xfs_iflush_int(ip, bp);
2581 if (error)
2582 goto corrupt_out;
2583
2584 /*
2585 * If the buffer is pinned then push on the log now so we won't
2586 * get stuck waiting in the write for too long.
2587 */
2588 if (xfs_buf_ispinned(bp))
2589 xfs_log_force(mp, 0);
2590
2591 /*
2592 * inode clustering:
2593 * see if other inodes can be gathered into this write
2594 */
2595 error = xfs_iflush_cluster(ip, bp);
2596 if (error)
2597 goto cluster_corrupt_out;
2598
2599 if (flags & SYNC_WAIT)
2600 error = xfs_bwrite(mp, bp);
2601 else
2602 xfs_bdwrite(mp, bp);
2603 return error;
2604
2605corrupt_out:
2606 xfs_buf_relse(bp);
2607 xfs_force_shutdown(mp, SHUTDOWN_CORRUPT_INCORE);
2608cluster_corrupt_out:
2609 /*
2610 * Unlocks the flush lock
2611 */
2612 xfs_iflush_abort(ip);
2613 return XFS_ERROR(EFSCORRUPTED);
2614}
2615
2616
2617STATIC int
2618xfs_iflush_int(
2619 xfs_inode_t *ip,
2620 xfs_buf_t *bp)
2621{
2622 xfs_inode_log_item_t *iip;
2623 xfs_dinode_t *dip;
2624 xfs_mount_t *mp;
2625#ifdef XFS_TRANS_DEBUG
2626 int first;
2627#endif
2628
2629 ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL|XFS_ILOCK_SHARED));
2630 ASSERT(!completion_done(&ip->i_flush));
2631 ASSERT(ip->i_d.di_format != XFS_DINODE_FMT_BTREE ||
2632 ip->i_d.di_nextents > ip->i_df.if_ext_max);
2633
2634 iip = ip->i_itemp;
2635 mp = ip->i_mount;
2636
2637 /* set *dip = inode's place in the buffer */
2638 dip = (xfs_dinode_t *)xfs_buf_offset(bp, ip->i_imap.im_boffset);
2639
2640 /*
2641 * Clear i_update_core before copying out the data.
2642 * This is for coordination with our timestamp updates
2643 * that don't hold the inode lock. They will always
2644 * update the timestamps BEFORE setting i_update_core,
2645 * so if we clear i_update_core after they set it we
2646 * are guaranteed to see their updates to the timestamps.
2647 * I believe that this depends on strongly ordered memory
2648 * semantics, but we have that. We use the SYNCHRONIZE
2649 * macro to make sure that the compiler does not reorder
2650 * the i_update_core access below the data copy below.
2651 */
2652 ip->i_update_core = 0;
2653 SYNCHRONIZE();
2654
2655 /*
2656 * Make sure to get the latest timestamps from the Linux inode.
2657 */
2658 xfs_synchronize_times(ip);
2659
2660 if (XFS_TEST_ERROR(dip->di_magic != cpu_to_be16(XFS_DINODE_MAGIC),
2661 mp, XFS_ERRTAG_IFLUSH_1, XFS_RANDOM_IFLUSH_1)) {
2662 xfs_alert_tag(mp, XFS_PTAG_IFLUSH,
2663 "%s: Bad inode %Lu magic number 0x%x, ptr 0x%p",
2664 __func__, ip->i_ino, be16_to_cpu(dip->di_magic), dip);
2665 goto corrupt_out;
2666 }
2667 if (XFS_TEST_ERROR(ip->i_d.di_magic != XFS_DINODE_MAGIC,
2668 mp, XFS_ERRTAG_IFLUSH_2, XFS_RANDOM_IFLUSH_2)) {
2669 xfs_alert_tag(mp, XFS_PTAG_IFLUSH,
2670 "%s: Bad inode %Lu, ptr 0x%p, magic number 0x%x",
2671 __func__, ip->i_ino, ip, ip->i_d.di_magic);
2672 goto corrupt_out;
2673 }
2674 if (S_ISREG(ip->i_d.di_mode)) {
2675 if (XFS_TEST_ERROR(
2676 (ip->i_d.di_format != XFS_DINODE_FMT_EXTENTS) &&
2677 (ip->i_d.di_format != XFS_DINODE_FMT_BTREE),
2678 mp, XFS_ERRTAG_IFLUSH_3, XFS_RANDOM_IFLUSH_3)) {
2679 xfs_alert_tag(mp, XFS_PTAG_IFLUSH,
2680 "%s: Bad regular inode %Lu, ptr 0x%p",
2681 __func__, ip->i_ino, ip);
2682 goto corrupt_out;
2683 }
2684 } else if (S_ISDIR(ip->i_d.di_mode)) {
2685 if (XFS_TEST_ERROR(
2686 (ip->i_d.di_format != XFS_DINODE_FMT_EXTENTS) &&
2687 (ip->i_d.di_format != XFS_DINODE_FMT_BTREE) &&
2688 (ip->i_d.di_format != XFS_DINODE_FMT_LOCAL),
2689 mp, XFS_ERRTAG_IFLUSH_4, XFS_RANDOM_IFLUSH_4)) {
2690 xfs_alert_tag(mp, XFS_PTAG_IFLUSH,
2691 "%s: Bad directory inode %Lu, ptr 0x%p",
2692 __func__, ip->i_ino, ip);
2693 goto corrupt_out;
2694 }
2695 }
2696 if (XFS_TEST_ERROR(ip->i_d.di_nextents + ip->i_d.di_anextents >
2697 ip->i_d.di_nblocks, mp, XFS_ERRTAG_IFLUSH_5,
2698 XFS_RANDOM_IFLUSH_5)) {
2699 xfs_alert_tag(mp, XFS_PTAG_IFLUSH,
2700 "%s: detected corrupt incore inode %Lu, "
2701 "total extents = %d, nblocks = %Ld, ptr 0x%p",
2702 __func__, ip->i_ino,
2703 ip->i_d.di_nextents + ip->i_d.di_anextents,
2704 ip->i_d.di_nblocks, ip);
2705 goto corrupt_out;
2706 }
2707 if (XFS_TEST_ERROR(ip->i_d.di_forkoff > mp->m_sb.sb_inodesize,
2708 mp, XFS_ERRTAG_IFLUSH_6, XFS_RANDOM_IFLUSH_6)) {
2709 xfs_alert_tag(mp, XFS_PTAG_IFLUSH,
2710 "%s: bad inode %Lu, forkoff 0x%x, ptr 0x%p",
2711 __func__, ip->i_ino, ip->i_d.di_forkoff, ip);
2712 goto corrupt_out;
2713 }
2714 /*
2715 * bump the flush iteration count, used to detect flushes which
2716 * postdate a log record during recovery.
2717 */
2718
2719 ip->i_d.di_flushiter++;
2720
2721 /*
2722 * Copy the dirty parts of the inode into the on-disk
2723 * inode. We always copy out the core of the inode,
2724 * because if the inode is dirty at all the core must
2725 * be.
2726 */
2727 xfs_dinode_to_disk(dip, &ip->i_d);
2728
2729 /* Wrap, we never let the log put out DI_MAX_FLUSH */
2730 if (ip->i_d.di_flushiter == DI_MAX_FLUSH)
2731 ip->i_d.di_flushiter = 0;
2732
2733 /*
2734 * If this is really an old format inode and the superblock version
2735 * has not been updated to support only new format inodes, then
2736 * convert back to the old inode format. If the superblock version
2737 * has been updated, then make the conversion permanent.
2738 */
2739 ASSERT(ip->i_d.di_version == 1 || xfs_sb_version_hasnlink(&mp->m_sb));
2740 if (ip->i_d.di_version == 1) {
2741 if (!xfs_sb_version_hasnlink(&mp->m_sb)) {
2742 /*
2743 * Convert it back.
2744 */
2745 ASSERT(ip->i_d.di_nlink <= XFS_MAXLINK_1);
2746 dip->di_onlink = cpu_to_be16(ip->i_d.di_nlink);
2747 } else {
2748 /*
2749 * The superblock version has already been bumped,
2750 * so just make the conversion to the new inode
2751 * format permanent.
2752 */
2753 ip->i_d.di_version = 2;
2754 dip->di_version = 2;
2755 ip->i_d.di_onlink = 0;
2756 dip->di_onlink = 0;
2757 memset(&(ip->i_d.di_pad[0]), 0, sizeof(ip->i_d.di_pad));
2758 memset(&(dip->di_pad[0]), 0,
2759 sizeof(dip->di_pad));
2760 ASSERT(xfs_get_projid(ip) == 0);
2761 }
2762 }
2763
2764 xfs_iflush_fork(ip, dip, iip, XFS_DATA_FORK, bp);
2765 if (XFS_IFORK_Q(ip))
2766 xfs_iflush_fork(ip, dip, iip, XFS_ATTR_FORK, bp);
2767 xfs_inobp_check(mp, bp);
2768
2769 /*
2770 * We've recorded everything logged in the inode, so we'd
2771 * like to clear the ilf_fields bits so we don't log and
2772 * flush things unnecessarily. However, we can't stop
2773 * logging all this information until the data we've copied
2774 * into the disk buffer is written to disk. If we did we might
2775 * overwrite the copy of the inode in the log with all the
2776 * data after re-logging only part of it, and in the face of
2777 * a crash we wouldn't have all the data we need to recover.
2778 *
2779 * What we do is move the bits to the ili_last_fields field.
2780 * When logging the inode, these bits are moved back to the
2781 * ilf_fields field. In the xfs_iflush_done() routine we
2782 * clear ili_last_fields, since we know that the information
2783 * those bits represent is permanently on disk. As long as
2784 * the flush completes before the inode is logged again, then
2785 * both ilf_fields and ili_last_fields will be cleared.
2786 *
2787 * We can play with the ilf_fields bits here, because the inode
2788 * lock must be held exclusively in order to set bits there
2789 * and the flush lock protects the ili_last_fields bits.
2790 * Set ili_logged so the flush done
2791 * routine can tell whether or not to look in the AIL.
2792 * Also, store the current LSN of the inode so that we can tell
2793 * whether the item has moved in the AIL from xfs_iflush_done().
2794 * In order to read the lsn we need the AIL lock, because
2795 * it is a 64 bit value that cannot be read atomically.
2796 */
2797 if (iip != NULL && iip->ili_format.ilf_fields != 0) {
2798 iip->ili_last_fields = iip->ili_format.ilf_fields;
2799 iip->ili_format.ilf_fields = 0;
2800 iip->ili_logged = 1;
2801
2802 xfs_trans_ail_copy_lsn(mp->m_ail, &iip->ili_flush_lsn,
2803 &iip->ili_item.li_lsn);
2804
2805 /*
2806 * Attach the function xfs_iflush_done to the inode's
2807 * buffer. This will remove the inode from the AIL
2808 * and unlock the inode's flush lock when the inode is
2809 * completely written to disk.
2810 */
2811 xfs_buf_attach_iodone(bp, xfs_iflush_done, &iip->ili_item);
2812
2813 ASSERT(bp->b_fspriv != NULL);
2814 ASSERT(bp->b_iodone != NULL);
2815 } else {
2816 /*
2817 * We're flushing an inode which is not in the AIL and has
2818 * not been logged but has i_update_core set. For this
2819 * case we can use a B_DELWRI flush and immediately drop
2820 * the inode flush lock because we can avoid the whole
2821 * AIL state thing. It's OK to drop the flush lock now,
2822 * because we've already locked the buffer and to do anything
2823 * you really need both.
2824 */
2825 if (iip != NULL) {
2826 ASSERT(iip->ili_logged == 0);
2827 ASSERT(iip->ili_last_fields == 0);
2828 ASSERT((iip->ili_item.li_flags & XFS_LI_IN_AIL) == 0);
2829 }
2830 xfs_ifunlock(ip);
2831 }
2832
2833 return 0;
2834
2835corrupt_out:
2836 return XFS_ERROR(EFSCORRUPTED);
2837}
2838
2839/*
2840 * Return a pointer to the extent record at file index idx.
2841 */
2842xfs_bmbt_rec_host_t *
2843xfs_iext_get_ext(
2844 xfs_ifork_t *ifp, /* inode fork pointer */
2845 xfs_extnum_t idx) /* index of target extent */
2846{
2847 ASSERT(idx >= 0);
2848 ASSERT(idx < ifp->if_bytes / sizeof(xfs_bmbt_rec_t));
2849
2850 if ((ifp->if_flags & XFS_IFEXTIREC) && (idx == 0)) {
2851 return ifp->if_u1.if_ext_irec->er_extbuf;
2852 } else if (ifp->if_flags & XFS_IFEXTIREC) {
2853 xfs_ext_irec_t *erp; /* irec pointer */
2854 int erp_idx = 0; /* irec index */
2855 xfs_extnum_t page_idx = idx; /* ext index in target list */
2856
2857 erp = xfs_iext_idx_to_irec(ifp, &page_idx, &erp_idx, 0);
2858 return &erp->er_extbuf[page_idx];
2859 } else if (ifp->if_bytes) {
2860 return &ifp->if_u1.if_extents[idx];
2861 } else {
2862 return NULL;
2863 }
2864}
2865
2866/*
2867 * Insert new item(s) into the extent records for incore inode
2868 * fork 'ifp'. 'count' new items are inserted at index 'idx'.
2869 */
2870void
2871xfs_iext_insert(
2872 xfs_inode_t *ip, /* incore inode pointer */
2873 xfs_extnum_t idx, /* starting index of new items */
2874 xfs_extnum_t count, /* number of inserted items */
2875 xfs_bmbt_irec_t *new, /* items to insert */
2876 int state) /* type of extent conversion */
2877{
2878 xfs_ifork_t *ifp = (state & BMAP_ATTRFORK) ? ip->i_afp : &ip->i_df;
2879 xfs_extnum_t i; /* extent record index */
2880
2881 trace_xfs_iext_insert(ip, idx, new, state, _RET_IP_);
2882
2883 ASSERT(ifp->if_flags & XFS_IFEXTENTS);
2884 xfs_iext_add(ifp, idx, count);
2885 for (i = idx; i < idx + count; i++, new++)
2886 xfs_bmbt_set_all(xfs_iext_get_ext(ifp, i), new);
2887}
2888
2889/*
2890 * This is called when the amount of space required for incore file
2891 * extents needs to be increased. The ext_diff parameter stores the
2892 * number of new extents being added and the idx parameter contains
2893 * the extent index where the new extents will be added. If the new
2894 * extents are being appended, then we just need to (re)allocate and
2895 * initialize the space. Otherwise, if the new extents are being
2896 * inserted into the middle of the existing entries, a bit more work
2897 * is required to make room for the new extents to be inserted. The
2898 * caller is responsible for filling in the new extent entries upon
2899 * return.
2900 */
2901void
2902xfs_iext_add(
2903 xfs_ifork_t *ifp, /* inode fork pointer */
2904 xfs_extnum_t idx, /* index to begin adding exts */
2905 int ext_diff) /* number of extents to add */
2906{
2907 int byte_diff; /* new bytes being added */
2908 int new_size; /* size of extents after adding */
2909 xfs_extnum_t nextents; /* number of extents in file */
2910
2911 nextents = ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t);
2912 ASSERT((idx >= 0) && (idx <= nextents));
2913 byte_diff = ext_diff * sizeof(xfs_bmbt_rec_t);
2914 new_size = ifp->if_bytes + byte_diff;
2915 /*
2916 * If the new number of extents (nextents + ext_diff)
2917 * fits inside the inode, then continue to use the inline
2918 * extent buffer.
2919 */
2920 if (nextents + ext_diff <= XFS_INLINE_EXTS) {
2921 if (idx < nextents) {
2922 memmove(&ifp->if_u2.if_inline_ext[idx + ext_diff],
2923 &ifp->if_u2.if_inline_ext[idx],
2924 (nextents - idx) * sizeof(xfs_bmbt_rec_t));
2925 memset(&ifp->if_u2.if_inline_ext[idx], 0, byte_diff);
2926 }
2927 ifp->if_u1.if_extents = ifp->if_u2.if_inline_ext;
2928 ifp->if_real_bytes = 0;
2929 }
2930 /*
2931 * Otherwise use a linear (direct) extent list.
2932 * If the extents are currently inside the inode,
2933 * xfs_iext_realloc_direct will switch us from
2934 * inline to direct extent allocation mode.
2935 */
2936 else if (nextents + ext_diff <= XFS_LINEAR_EXTS) {
2937 xfs_iext_realloc_direct(ifp, new_size);
2938 if (idx < nextents) {
2939 memmove(&ifp->if_u1.if_extents[idx + ext_diff],
2940 &ifp->if_u1.if_extents[idx],
2941 (nextents - idx) * sizeof(xfs_bmbt_rec_t));
2942 memset(&ifp->if_u1.if_extents[idx], 0, byte_diff);
2943 }
2944 }
2945 /* Indirection array */
2946 else {
2947 xfs_ext_irec_t *erp;
2948 int erp_idx = 0;
2949 int page_idx = idx;
2950
2951 ASSERT(nextents + ext_diff > XFS_LINEAR_EXTS);
2952 if (ifp->if_flags & XFS_IFEXTIREC) {
2953 erp = xfs_iext_idx_to_irec(ifp, &page_idx, &erp_idx, 1);
2954 } else {
2955 xfs_iext_irec_init(ifp);
2956 ASSERT(ifp->if_flags & XFS_IFEXTIREC);
2957 erp = ifp->if_u1.if_ext_irec;
2958 }
2959 /* Extents fit in target extent page */
2960 if (erp && erp->er_extcount + ext_diff <= XFS_LINEAR_EXTS) {
2961 if (page_idx < erp->er_extcount) {
2962 memmove(&erp->er_extbuf[page_idx + ext_diff],
2963 &erp->er_extbuf[page_idx],
2964 (erp->er_extcount - page_idx) *
2965 sizeof(xfs_bmbt_rec_t));
2966 memset(&erp->er_extbuf[page_idx], 0, byte_diff);
2967 }
2968 erp->er_extcount += ext_diff;
2969 xfs_iext_irec_update_extoffs(ifp, erp_idx + 1, ext_diff);
2970 }
2971 /* Insert a new extent page */
2972 else if (erp) {
2973 xfs_iext_add_indirect_multi(ifp,
2974 erp_idx, page_idx, ext_diff);
2975 }
2976 /*
2977 * If extent(s) are being appended to the last page in
2978 * the indirection array and the new extent(s) don't fit
2979 * in the page, then erp is NULL and erp_idx is set to
2980 * the next index needed in the indirection array.
2981 */
2982 else {
2983 int count = ext_diff;
2984
2985 while (count) {
2986 erp = xfs_iext_irec_new(ifp, erp_idx);
2987 erp->er_extcount = count;
2988 count -= MIN(count, (int)XFS_LINEAR_EXTS);
2989 if (count) {
2990 erp_idx++;
2991 }
2992 }
2993 }
2994 }
2995 ifp->if_bytes = new_size;
2996}
2997
2998/*
2999 * This is called when incore extents are being added to the indirection
3000 * array and the new extents do not fit in the target extent list. The
3001 * erp_idx parameter contains the irec index for the target extent list
3002 * in the indirection array, and the idx parameter contains the extent
3003 * index within the list. The number of extents being added is stored
3004 * in the count parameter.
3005 *
3006 * |-------| |-------|
3007 * | | | | idx - number of extents before idx
3008 * | idx | | count |
3009 * | | | | count - number of extents being inserted at idx
3010 * |-------| |-------|
3011 * | count | | nex2 | nex2 - number of extents after idx + count
3012 * |-------| |-------|
3013 */
3014void
3015xfs_iext_add_indirect_multi(
3016 xfs_ifork_t *ifp, /* inode fork pointer */
3017 int erp_idx, /* target extent irec index */
3018 xfs_extnum_t idx, /* index within target list */
3019 int count) /* new extents being added */
3020{
3021 int byte_diff; /* new bytes being added */
3022 xfs_ext_irec_t *erp; /* pointer to irec entry */
3023 xfs_extnum_t ext_diff; /* number of extents to add */
3024 xfs_extnum_t ext_cnt; /* new extents still needed */
3025 xfs_extnum_t nex2; /* extents after idx + count */
3026 xfs_bmbt_rec_t *nex2_ep = NULL; /* temp list for nex2 extents */
3027 int nlists; /* number of irec's (lists) */
3028
3029 ASSERT(ifp->if_flags & XFS_IFEXTIREC);
3030 erp = &ifp->if_u1.if_ext_irec[erp_idx];
3031 nex2 = erp->er_extcount - idx;
3032 nlists = ifp->if_real_bytes / XFS_IEXT_BUFSZ;
3033
3034 /*
3035 * Save second part of target extent list
3036 * (all extents past */
3037 if (nex2) {
3038 byte_diff = nex2 * sizeof(xfs_bmbt_rec_t);
3039 nex2_ep = (xfs_bmbt_rec_t *) kmem_alloc(byte_diff, KM_NOFS);
3040 memmove(nex2_ep, &erp->er_extbuf[idx], byte_diff);
3041 erp->er_extcount -= nex2;
3042 xfs_iext_irec_update_extoffs(ifp, erp_idx + 1, -nex2);
3043 memset(&erp->er_extbuf[idx], 0, byte_diff);
3044 }
3045
3046 /*
3047 * Add the new extents to the end of the target
3048 * list, then allocate new irec record(s) and
3049 * extent buffer(s) as needed to store the rest
3050 * of the new extents.
3051 */
3052 ext_cnt = count;
3053 ext_diff = MIN(ext_cnt, (int)XFS_LINEAR_EXTS - erp->er_extcount);
3054 if (ext_diff) {
3055 erp->er_extcount += ext_diff;
3056 xfs_iext_irec_update_extoffs(ifp, erp_idx + 1, ext_diff);
3057 ext_cnt -= ext_diff;
3058 }
3059 while (ext_cnt) {
3060 erp_idx++;
3061 erp = xfs_iext_irec_new(ifp, erp_idx);
3062 ext_diff = MIN(ext_cnt, (int)XFS_LINEAR_EXTS);
3063 erp->er_extcount = ext_diff;
3064 xfs_iext_irec_update_extoffs(ifp, erp_idx + 1, ext_diff);
3065 ext_cnt -= ext_diff;
3066 }
3067
3068 /* Add nex2 extents back to indirection array */
3069 if (nex2) {
3070 xfs_extnum_t ext_avail;
3071 int i;
3072
3073 byte_diff = nex2 * sizeof(xfs_bmbt_rec_t);
3074 ext_avail = XFS_LINEAR_EXTS - erp->er_extcount;
3075 i = 0;
3076 /*
3077 * If nex2 extents fit in the current page, append
3078 * nex2_ep after the new extents.
3079 */
3080 if (nex2 <= ext_avail) {
3081 i = erp->er_extcount;
3082 }
3083 /*
3084 * Otherwise, check if space is available in the
3085 * next page.
3086 */
3087 else if ((erp_idx < nlists - 1) &&
3088 (nex2 <= (ext_avail = XFS_LINEAR_EXTS -
3089 ifp->if_u1.if_ext_irec[erp_idx+1].er_extcount))) {
3090 erp_idx++;
3091 erp++;
3092 /* Create a hole for nex2 extents */
3093 memmove(&erp->er_extbuf[nex2], erp->er_extbuf,
3094 erp->er_extcount * sizeof(xfs_bmbt_rec_t));
3095 }
3096 /*
3097 * Final choice, create a new extent page for
3098 * nex2 extents.
3099 */
3100 else {
3101 erp_idx++;
3102 erp = xfs_iext_irec_new(ifp, erp_idx);
3103 }
3104 memmove(&erp->er_extbuf[i], nex2_ep, byte_diff);
3105 kmem_free(nex2_ep);
3106 erp->er_extcount += nex2;
3107 xfs_iext_irec_update_extoffs(ifp, erp_idx + 1, nex2);
3108 }
3109}
3110
3111/*
3112 * This is called when the amount of space required for incore file
3113 * extents needs to be decreased. The ext_diff parameter stores the
3114 * number of extents to be removed and the idx parameter contains
3115 * the extent index where the extents will be removed from.
3116 *
3117 * If the amount of space needed has decreased below the linear
3118 * limit, XFS_IEXT_BUFSZ, then switch to using the contiguous
3119 * extent array. Otherwise, use kmem_realloc() to adjust the
3120 * size to what is needed.
3121 */
3122void
3123xfs_iext_remove(
3124 xfs_inode_t *ip, /* incore inode pointer */
3125 xfs_extnum_t idx, /* index to begin removing exts */
3126 int ext_diff, /* number of extents to remove */
3127 int state) /* type of extent conversion */
3128{
3129 xfs_ifork_t *ifp = (state & BMAP_ATTRFORK) ? ip->i_afp : &ip->i_df;
3130 xfs_extnum_t nextents; /* number of extents in file */
3131 int new_size; /* size of extents after removal */
3132
3133 trace_xfs_iext_remove(ip, idx, state, _RET_IP_);
3134
3135 ASSERT(ext_diff > 0);
3136 nextents = ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t);
3137 new_size = (nextents - ext_diff) * sizeof(xfs_bmbt_rec_t);
3138
3139 if (new_size == 0) {
3140 xfs_iext_destroy(ifp);
3141 } else if (ifp->if_flags & XFS_IFEXTIREC) {
3142 xfs_iext_remove_indirect(ifp, idx, ext_diff);
3143 } else if (ifp->if_real_bytes) {
3144 xfs_iext_remove_direct(ifp, idx, ext_diff);
3145 } else {
3146 xfs_iext_remove_inline(ifp, idx, ext_diff);
3147 }
3148 ifp->if_bytes = new_size;
3149}
3150
3151/*
3152 * This removes ext_diff extents from the inline buffer, beginning
3153 * at extent index idx.
3154 */
3155void
3156xfs_iext_remove_inline(
3157 xfs_ifork_t *ifp, /* inode fork pointer */
3158 xfs_extnum_t idx, /* index to begin removing exts */
3159 int ext_diff) /* number of extents to remove */
3160{
3161 int nextents; /* number of extents in file */
3162
3163 ASSERT(!(ifp->if_flags & XFS_IFEXTIREC));
3164 ASSERT(idx < XFS_INLINE_EXTS);
3165 nextents = ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t);
3166 ASSERT(((nextents - ext_diff) > 0) &&
3167 (nextents - ext_diff) < XFS_INLINE_EXTS);
3168
3169 if (idx + ext_diff < nextents) {
3170 memmove(&ifp->if_u2.if_inline_ext[idx],
3171 &ifp->if_u2.if_inline_ext[idx + ext_diff],
3172 (nextents - (idx + ext_diff)) *
3173 sizeof(xfs_bmbt_rec_t));
3174 memset(&ifp->if_u2.if_inline_ext[nextents - ext_diff],
3175 0, ext_diff * sizeof(xfs_bmbt_rec_t));
3176 } else {
3177 memset(&ifp->if_u2.if_inline_ext[idx], 0,
3178 ext_diff * sizeof(xfs_bmbt_rec_t));
3179 }
3180}
3181
3182/*
3183 * This removes ext_diff extents from a linear (direct) extent list,
3184 * beginning at extent index idx. If the extents are being removed
3185 * from the end of the list (ie. truncate) then we just need to re-
3186 * allocate the list to remove the extra space. Otherwise, if the
3187 * extents are being removed from the middle of the existing extent
3188 * entries, then we first need to move the extent records beginning
3189 * at idx + ext_diff up in the list to overwrite the records being
3190 * removed, then remove the extra space via kmem_realloc.
3191 */
3192void
3193xfs_iext_remove_direct(
3194 xfs_ifork_t *ifp, /* inode fork pointer */
3195 xfs_extnum_t idx, /* index to begin removing exts */
3196 int ext_diff) /* number of extents to remove */
3197{
3198 xfs_extnum_t nextents; /* number of extents in file */
3199 int new_size; /* size of extents after removal */
3200
3201 ASSERT(!(ifp->if_flags & XFS_IFEXTIREC));
3202 new_size = ifp->if_bytes -
3203 (ext_diff * sizeof(xfs_bmbt_rec_t));
3204 nextents = ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t);
3205
3206 if (new_size == 0) {
3207 xfs_iext_destroy(ifp);
3208 return;
3209 }
3210 /* Move extents up in the list (if needed) */
3211 if (idx + ext_diff < nextents) {
3212 memmove(&ifp->if_u1.if_extents[idx],
3213 &ifp->if_u1.if_extents[idx + ext_diff],
3214 (nextents - (idx + ext_diff)) *
3215 sizeof(xfs_bmbt_rec_t));
3216 }
3217 memset(&ifp->if_u1.if_extents[nextents - ext_diff],
3218 0, ext_diff * sizeof(xfs_bmbt_rec_t));
3219 /*
3220 * Reallocate the direct extent list. If the extents
3221 * will fit inside the inode then xfs_iext_realloc_direct
3222 * will switch from direct to inline extent allocation
3223 * mode for us.
3224 */
3225 xfs_iext_realloc_direct(ifp, new_size);
3226 ifp->if_bytes = new_size;
3227}
3228
3229/*
3230 * This is called when incore extents are being removed from the
3231 * indirection array and the extents being removed span multiple extent
3232 * buffers. The idx parameter contains the file extent index where we
3233 * want to begin removing extents, and the count parameter contains
3234 * how many extents need to be removed.
3235 *
3236 * |-------| |-------|
3237 * | nex1 | | | nex1 - number of extents before idx
3238 * |-------| | count |
3239 * | | | | count - number of extents being removed at idx
3240 * | count | |-------|
3241 * | | | nex2 | nex2 - number of extents after idx + count
3242 * |-------| |-------|
3243 */
3244void
3245xfs_iext_remove_indirect(
3246 xfs_ifork_t *ifp, /* inode fork pointer */
3247 xfs_extnum_t idx, /* index to begin removing extents */
3248 int count) /* number of extents to remove */
3249{
3250 xfs_ext_irec_t *erp; /* indirection array pointer */
3251 int erp_idx = 0; /* indirection array index */
3252 xfs_extnum_t ext_cnt; /* extents left to remove */
3253 xfs_extnum_t ext_diff; /* extents to remove in current list */
3254 xfs_extnum_t nex1; /* number of extents before idx */
3255 xfs_extnum_t nex2; /* extents after idx + count */
3256 int page_idx = idx; /* index in target extent list */
3257
3258 ASSERT(ifp->if_flags & XFS_IFEXTIREC);
3259 erp = xfs_iext_idx_to_irec(ifp, &page_idx, &erp_idx, 0);
3260 ASSERT(erp != NULL);
3261 nex1 = page_idx;
3262 ext_cnt = count;
3263 while (ext_cnt) {
3264 nex2 = MAX((erp->er_extcount - (nex1 + ext_cnt)), 0);
3265 ext_diff = MIN(ext_cnt, (erp->er_extcount - nex1));
3266 /*
3267 * Check for deletion of entire list;
3268 * xfs_iext_irec_remove() updates extent offsets.
3269 */
3270 if (ext_diff == erp->er_extcount) {
3271 xfs_iext_irec_remove(ifp, erp_idx);
3272 ext_cnt -= ext_diff;
3273 nex1 = 0;
3274 if (ext_cnt) {
3275 ASSERT(erp_idx < ifp->if_real_bytes /
3276 XFS_IEXT_BUFSZ);
3277 erp = &ifp->if_u1.if_ext_irec[erp_idx];
3278 nex1 = 0;
3279 continue;
3280 } else {
3281 break;
3282 }
3283 }
3284 /* Move extents up (if needed) */
3285 if (nex2) {
3286 memmove(&erp->er_extbuf[nex1],
3287 &erp->er_extbuf[nex1 + ext_diff],
3288 nex2 * sizeof(xfs_bmbt_rec_t));
3289 }
3290 /* Zero out rest of page */
3291 memset(&erp->er_extbuf[nex1 + nex2], 0, (XFS_IEXT_BUFSZ -
3292 ((nex1 + nex2) * sizeof(xfs_bmbt_rec_t))));
3293 /* Update remaining counters */
3294 erp->er_extcount -= ext_diff;
3295 xfs_iext_irec_update_extoffs(ifp, erp_idx + 1, -ext_diff);
3296 ext_cnt -= ext_diff;
3297 nex1 = 0;
3298 erp_idx++;
3299 erp++;
3300 }
3301 ifp->if_bytes -= count * sizeof(xfs_bmbt_rec_t);
3302 xfs_iext_irec_compact(ifp);
3303}
3304
3305/*
3306 * Create, destroy, or resize a linear (direct) block of extents.
3307 */
3308void
3309xfs_iext_realloc_direct(
3310 xfs_ifork_t *ifp, /* inode fork pointer */
3311 int new_size) /* new size of extents */
3312{
3313 int rnew_size; /* real new size of extents */
3314
3315 rnew_size = new_size;
3316
3317 ASSERT(!(ifp->if_flags & XFS_IFEXTIREC) ||
3318 ((new_size >= 0) && (new_size <= XFS_IEXT_BUFSZ) &&
3319 (new_size != ifp->if_real_bytes)));
3320
3321 /* Free extent records */
3322 if (new_size == 0) {
3323 xfs_iext_destroy(ifp);
3324 }
3325 /* Resize direct extent list and zero any new bytes */
3326 else if (ifp->if_real_bytes) {
3327 /* Check if extents will fit inside the inode */
3328 if (new_size <= XFS_INLINE_EXTS * sizeof(xfs_bmbt_rec_t)) {
3329 xfs_iext_direct_to_inline(ifp, new_size /
3330 (uint)sizeof(xfs_bmbt_rec_t));
3331 ifp->if_bytes = new_size;
3332 return;
3333 }
3334 if (!is_power_of_2(new_size)){
3335 rnew_size = roundup_pow_of_two(new_size);
3336 }
3337 if (rnew_size != ifp->if_real_bytes) {
3338 ifp->if_u1.if_extents =
3339 kmem_realloc(ifp->if_u1.if_extents,
3340 rnew_size,
3341 ifp->if_real_bytes, KM_NOFS);
3342 }
3343 if (rnew_size > ifp->if_real_bytes) {
3344 memset(&ifp->if_u1.if_extents[ifp->if_bytes /
3345 (uint)sizeof(xfs_bmbt_rec_t)], 0,
3346 rnew_size - ifp->if_real_bytes);
3347 }
3348 }
3349 /*
3350 * Switch from the inline extent buffer to a direct
3351 * extent list. Be sure to include the inline extent
3352 * bytes in new_size.
3353 */
3354 else {
3355 new_size += ifp->if_bytes;
3356 if (!is_power_of_2(new_size)) {
3357 rnew_size = roundup_pow_of_two(new_size);
3358 }
3359 xfs_iext_inline_to_direct(ifp, rnew_size);
3360 }
3361 ifp->if_real_bytes = rnew_size;
3362 ifp->if_bytes = new_size;
3363}
3364
3365/*
3366 * Switch from linear (direct) extent records to inline buffer.
3367 */
3368void
3369xfs_iext_direct_to_inline(
3370 xfs_ifork_t *ifp, /* inode fork pointer */
3371 xfs_extnum_t nextents) /* number of extents in file */
3372{
3373 ASSERT(ifp->if_flags & XFS_IFEXTENTS);
3374 ASSERT(nextents <= XFS_INLINE_EXTS);
3375 /*
3376 * The inline buffer was zeroed when we switched
3377 * from inline to direct extent allocation mode,
3378 * so we don't need to clear it here.
3379 */
3380 memcpy(ifp->if_u2.if_inline_ext, ifp->if_u1.if_extents,
3381 nextents * sizeof(xfs_bmbt_rec_t));
3382 kmem_free(ifp->if_u1.if_extents);
3383 ifp->if_u1.if_extents = ifp->if_u2.if_inline_ext;
3384 ifp->if_real_bytes = 0;
3385}
3386
3387/*
3388 * Switch from inline buffer to linear (direct) extent records.
3389 * new_size should already be rounded up to the next power of 2
3390 * by the caller (when appropriate), so use new_size as it is.
3391 * However, since new_size may be rounded up, we can't update
3392 * if_bytes here. It is the caller's responsibility to update
3393 * if_bytes upon return.
3394 */
3395void
3396xfs_iext_inline_to_direct(
3397 xfs_ifork_t *ifp, /* inode fork pointer */
3398 int new_size) /* number of extents in file */
3399{
3400 ifp->if_u1.if_extents = kmem_alloc(new_size, KM_NOFS);
3401 memset(ifp->if_u1.if_extents, 0, new_size);
3402 if (ifp->if_bytes) {
3403 memcpy(ifp->if_u1.if_extents, ifp->if_u2.if_inline_ext,
3404 ifp->if_bytes);
3405 memset(ifp->if_u2.if_inline_ext, 0, XFS_INLINE_EXTS *
3406 sizeof(xfs_bmbt_rec_t));
3407 }
3408 ifp->if_real_bytes = new_size;
3409}
3410
3411/*
3412 * Resize an extent indirection array to new_size bytes.
3413 */
3414STATIC void
3415xfs_iext_realloc_indirect(
3416 xfs_ifork_t *ifp, /* inode fork pointer */
3417 int new_size) /* new indirection array size */
3418{
3419 int nlists; /* number of irec's (ex lists) */
3420 int size; /* current indirection array size */
3421
3422 ASSERT(ifp->if_flags & XFS_IFEXTIREC);
3423 nlists = ifp->if_real_bytes / XFS_IEXT_BUFSZ;
3424 size = nlists * sizeof(xfs_ext_irec_t);
3425 ASSERT(ifp->if_real_bytes);
3426 ASSERT((new_size >= 0) && (new_size != size));
3427 if (new_size == 0) {
3428 xfs_iext_destroy(ifp);
3429 } else {
3430 ifp->if_u1.if_ext_irec = (xfs_ext_irec_t *)
3431 kmem_realloc(ifp->if_u1.if_ext_irec,
3432 new_size, size, KM_NOFS);
3433 }
3434}
3435
3436/*
3437 * Switch from indirection array to linear (direct) extent allocations.
3438 */
3439STATIC void
3440xfs_iext_indirect_to_direct(
3441 xfs_ifork_t *ifp) /* inode fork pointer */
3442{
3443 xfs_bmbt_rec_host_t *ep; /* extent record pointer */
3444 xfs_extnum_t nextents; /* number of extents in file */
3445 int size; /* size of file extents */
3446
3447 ASSERT(ifp->if_flags & XFS_IFEXTIREC);
3448 nextents = ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t);
3449 ASSERT(nextents <= XFS_LINEAR_EXTS);
3450 size = nextents * sizeof(xfs_bmbt_rec_t);
3451
3452 xfs_iext_irec_compact_pages(ifp);
3453 ASSERT(ifp->if_real_bytes == XFS_IEXT_BUFSZ);
3454
3455 ep = ifp->if_u1.if_ext_irec->er_extbuf;
3456 kmem_free(ifp->if_u1.if_ext_irec);
3457 ifp->if_flags &= ~XFS_IFEXTIREC;
3458 ifp->if_u1.if_extents = ep;
3459 ifp->if_bytes = size;
3460 if (nextents < XFS_LINEAR_EXTS) {
3461 xfs_iext_realloc_direct(ifp, size);
3462 }
3463}
3464
3465/*
3466 * Free incore file extents.
3467 */
3468void
3469xfs_iext_destroy(
3470 xfs_ifork_t *ifp) /* inode fork pointer */
3471{
3472 if (ifp->if_flags & XFS_IFEXTIREC) {
3473 int erp_idx;
3474 int nlists;
3475
3476 nlists = ifp->if_real_bytes / XFS_IEXT_BUFSZ;
3477 for (erp_idx = nlists - 1; erp_idx >= 0 ; erp_idx--) {
3478 xfs_iext_irec_remove(ifp, erp_idx);
3479 }
3480 ifp->if_flags &= ~XFS_IFEXTIREC;
3481 } else if (ifp->if_real_bytes) {
3482 kmem_free(ifp->if_u1.if_extents);
3483 } else if (ifp->if_bytes) {
3484 memset(ifp->if_u2.if_inline_ext, 0, XFS_INLINE_EXTS *
3485 sizeof(xfs_bmbt_rec_t));
3486 }
3487 ifp->if_u1.if_extents = NULL;
3488 ifp->if_real_bytes = 0;
3489 ifp->if_bytes = 0;
3490}
3491
3492/*
3493 * Return a pointer to the extent record for file system block bno.
3494 */
3495xfs_bmbt_rec_host_t * /* pointer to found extent record */
3496xfs_iext_bno_to_ext(
3497 xfs_ifork_t *ifp, /* inode fork pointer */
3498 xfs_fileoff_t bno, /* block number to search for */
3499 xfs_extnum_t *idxp) /* index of target extent */
3500{
3501 xfs_bmbt_rec_host_t *base; /* pointer to first extent */
3502 xfs_filblks_t blockcount = 0; /* number of blocks in extent */
3503 xfs_bmbt_rec_host_t *ep = NULL; /* pointer to target extent */
3504 xfs_ext_irec_t *erp = NULL; /* indirection array pointer */
3505 int high; /* upper boundary in search */
3506 xfs_extnum_t idx = 0; /* index of target extent */
3507 int low; /* lower boundary in search */
3508 xfs_extnum_t nextents; /* number of file extents */
3509 xfs_fileoff_t startoff = 0; /* start offset of extent */
3510
3511 nextents = ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t);
3512 if (nextents == 0) {
3513 *idxp = 0;
3514 return NULL;
3515 }
3516 low = 0;
3517 if (ifp->if_flags & XFS_IFEXTIREC) {
3518 /* Find target extent list */
3519 int erp_idx = 0;
3520 erp = xfs_iext_bno_to_irec(ifp, bno, &erp_idx);
3521 base = erp->er_extbuf;
3522 high = erp->er_extcount - 1;
3523 } else {
3524 base = ifp->if_u1.if_extents;
3525 high = nextents - 1;
3526 }
3527 /* Binary search extent records */
3528 while (low <= high) {
3529 idx = (low + high) >> 1;
3530 ep = base + idx;
3531 startoff = xfs_bmbt_get_startoff(ep);
3532 blockcount = xfs_bmbt_get_blockcount(ep);
3533 if (bno < startoff) {
3534 high = idx - 1;
3535 } else if (bno >= startoff + blockcount) {
3536 low = idx + 1;
3537 } else {
3538 /* Convert back to file-based extent index */
3539 if (ifp->if_flags & XFS_IFEXTIREC) {
3540 idx += erp->er_extoff;
3541 }
3542 *idxp = idx;
3543 return ep;
3544 }
3545 }
3546 /* Convert back to file-based extent index */
3547 if (ifp->if_flags & XFS_IFEXTIREC) {
3548 idx += erp->er_extoff;
3549 }
3550 if (bno >= startoff + blockcount) {
3551 if (++idx == nextents) {
3552 ep = NULL;
3553 } else {
3554 ep = xfs_iext_get_ext(ifp, idx);
3555 }
3556 }
3557 *idxp = idx;
3558 return ep;
3559}
3560
3561/*
3562 * Return a pointer to the indirection array entry containing the
3563 * extent record for filesystem block bno. Store the index of the
3564 * target irec in *erp_idxp.
3565 */
3566xfs_ext_irec_t * /* pointer to found extent record */
3567xfs_iext_bno_to_irec(
3568 xfs_ifork_t *ifp, /* inode fork pointer */
3569 xfs_fileoff_t bno, /* block number to search for */
3570 int *erp_idxp) /* irec index of target ext list */
3571{
3572 xfs_ext_irec_t *erp = NULL; /* indirection array pointer */
3573 xfs_ext_irec_t *erp_next; /* next indirection array entry */
3574 int erp_idx; /* indirection array index */
3575 int nlists; /* number of extent irec's (lists) */
3576 int high; /* binary search upper limit */
3577 int low; /* binary search lower limit */
3578
3579 ASSERT(ifp->if_flags & XFS_IFEXTIREC);
3580 nlists = ifp->if_real_bytes / XFS_IEXT_BUFSZ;
3581 erp_idx = 0;
3582 low = 0;
3583 high = nlists - 1;
3584 while (low <= high) {
3585 erp_idx = (low + high) >> 1;
3586 erp = &ifp->if_u1.if_ext_irec[erp_idx];
3587 erp_next = erp_idx < nlists - 1 ? erp + 1 : NULL;
3588 if (bno < xfs_bmbt_get_startoff(erp->er_extbuf)) {
3589 high = erp_idx - 1;
3590 } else if (erp_next && bno >=
3591 xfs_bmbt_get_startoff(erp_next->er_extbuf)) {
3592 low = erp_idx + 1;
3593 } else {
3594 break;
3595 }
3596 }
3597 *erp_idxp = erp_idx;
3598 return erp;
3599}
3600
3601/*
3602 * Return a pointer to the indirection array entry containing the
3603 * extent record at file extent index *idxp. Store the index of the
3604 * target irec in *erp_idxp and store the page index of the target
3605 * extent record in *idxp.
3606 */
3607xfs_ext_irec_t *
3608xfs_iext_idx_to_irec(
3609 xfs_ifork_t *ifp, /* inode fork pointer */
3610 xfs_extnum_t *idxp, /* extent index (file -> page) */
3611 int *erp_idxp, /* pointer to target irec */
3612 int realloc) /* new bytes were just added */
3613{
3614 xfs_ext_irec_t *prev; /* pointer to previous irec */
3615 xfs_ext_irec_t *erp = NULL; /* pointer to current irec */
3616 int erp_idx; /* indirection array index */
3617 int nlists; /* number of irec's (ex lists) */
3618 int high; /* binary search upper limit */
3619 int low; /* binary search lower limit */
3620 xfs_extnum_t page_idx = *idxp; /* extent index in target list */
3621
3622 ASSERT(ifp->if_flags & XFS_IFEXTIREC);
3623 ASSERT(page_idx >= 0);
3624 ASSERT(page_idx <= ifp->if_bytes / sizeof(xfs_bmbt_rec_t));
3625 ASSERT(page_idx < ifp->if_bytes / sizeof(xfs_bmbt_rec_t) || realloc);
3626
3627 nlists = ifp->if_real_bytes / XFS_IEXT_BUFSZ;
3628 erp_idx = 0;
3629 low = 0;
3630 high = nlists - 1;
3631
3632 /* Binary search extent irec's */
3633 while (low <= high) {
3634 erp_idx = (low + high) >> 1;
3635 erp = &ifp->if_u1.if_ext_irec[erp_idx];
3636 prev = erp_idx > 0 ? erp - 1 : NULL;
3637 if (page_idx < erp->er_extoff || (page_idx == erp->er_extoff &&
3638 realloc && prev && prev->er_extcount < XFS_LINEAR_EXTS)) {
3639 high = erp_idx - 1;
3640 } else if (page_idx > erp->er_extoff + erp->er_extcount ||
3641 (page_idx == erp->er_extoff + erp->er_extcount &&
3642 !realloc)) {
3643 low = erp_idx + 1;
3644 } else if (page_idx == erp->er_extoff + erp->er_extcount &&
3645 erp->er_extcount == XFS_LINEAR_EXTS) {
3646 ASSERT(realloc);
3647 page_idx = 0;
3648 erp_idx++;
3649 erp = erp_idx < nlists ? erp + 1 : NULL;
3650 break;
3651 } else {
3652 page_idx -= erp->er_extoff;
3653 break;
3654 }
3655 }
3656 *idxp = page_idx;
3657 *erp_idxp = erp_idx;
3658 return(erp);
3659}
3660
3661/*
3662 * Allocate and initialize an indirection array once the space needed
3663 * for incore extents increases above XFS_IEXT_BUFSZ.
3664 */
3665void
3666xfs_iext_irec_init(
3667 xfs_ifork_t *ifp) /* inode fork pointer */
3668{
3669 xfs_ext_irec_t *erp; /* indirection array pointer */
3670 xfs_extnum_t nextents; /* number of extents in file */
3671
3672 ASSERT(!(ifp->if_flags & XFS_IFEXTIREC));
3673 nextents = ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t);
3674 ASSERT(nextents <= XFS_LINEAR_EXTS);
3675
3676 erp = kmem_alloc(sizeof(xfs_ext_irec_t), KM_NOFS);
3677
3678 if (nextents == 0) {
3679 ifp->if_u1.if_extents = kmem_alloc(XFS_IEXT_BUFSZ, KM_NOFS);
3680 } else if (!ifp->if_real_bytes) {
3681 xfs_iext_inline_to_direct(ifp, XFS_IEXT_BUFSZ);
3682 } else if (ifp->if_real_bytes < XFS_IEXT_BUFSZ) {
3683 xfs_iext_realloc_direct(ifp, XFS_IEXT_BUFSZ);
3684 }
3685 erp->er_extbuf = ifp->if_u1.if_extents;
3686 erp->er_extcount = nextents;
3687 erp->er_extoff = 0;
3688
3689 ifp->if_flags |= XFS_IFEXTIREC;
3690 ifp->if_real_bytes = XFS_IEXT_BUFSZ;
3691 ifp->if_bytes = nextents * sizeof(xfs_bmbt_rec_t);
3692 ifp->if_u1.if_ext_irec = erp;
3693
3694 return;
3695}
3696
3697/*
3698 * Allocate and initialize a new entry in the indirection array.
3699 */
3700xfs_ext_irec_t *
3701xfs_iext_irec_new(
3702 xfs_ifork_t *ifp, /* inode fork pointer */
3703 int erp_idx) /* index for new irec */
3704{
3705 xfs_ext_irec_t *erp; /* indirection array pointer */
3706 int i; /* loop counter */
3707 int nlists; /* number of irec's (ex lists) */
3708
3709 ASSERT(ifp->if_flags & XFS_IFEXTIREC);
3710 nlists = ifp->if_real_bytes / XFS_IEXT_BUFSZ;
3711
3712 /* Resize indirection array */
3713 xfs_iext_realloc_indirect(ifp, ++nlists *
3714 sizeof(xfs_ext_irec_t));
3715 /*
3716 * Move records down in the array so the
3717 * new page can use erp_idx.
3718 */
3719 erp = ifp->if_u1.if_ext_irec;
3720 for (i = nlists - 1; i > erp_idx; i--) {
3721 memmove(&erp[i], &erp[i-1], sizeof(xfs_ext_irec_t));
3722 }
3723 ASSERT(i == erp_idx);
3724
3725 /* Initialize new extent record */
3726 erp = ifp->if_u1.if_ext_irec;
3727 erp[erp_idx].er_extbuf = kmem_alloc(XFS_IEXT_BUFSZ, KM_NOFS);
3728 ifp->if_real_bytes = nlists * XFS_IEXT_BUFSZ;
3729 memset(erp[erp_idx].er_extbuf, 0, XFS_IEXT_BUFSZ);
3730 erp[erp_idx].er_extcount = 0;
3731 erp[erp_idx].er_extoff = erp_idx > 0 ?
3732 erp[erp_idx-1].er_extoff + erp[erp_idx-1].er_extcount : 0;
3733 return (&erp[erp_idx]);
3734}
3735
3736/*
3737 * Remove a record from the indirection array.
3738 */
3739void
3740xfs_iext_irec_remove(
3741 xfs_ifork_t *ifp, /* inode fork pointer */
3742 int erp_idx) /* irec index to remove */
3743{
3744 xfs_ext_irec_t *erp; /* indirection array pointer */
3745 int i; /* loop counter */
3746 int nlists; /* number of irec's (ex lists) */
3747
3748 ASSERT(ifp->if_flags & XFS_IFEXTIREC);
3749 nlists = ifp->if_real_bytes / XFS_IEXT_BUFSZ;
3750 erp = &ifp->if_u1.if_ext_irec[erp_idx];
3751 if (erp->er_extbuf) {
3752 xfs_iext_irec_update_extoffs(ifp, erp_idx + 1,
3753 -erp->er_extcount);
3754 kmem_free(erp->er_extbuf);
3755 }
3756 /* Compact extent records */
3757 erp = ifp->if_u1.if_ext_irec;
3758 for (i = erp_idx; i < nlists - 1; i++) {
3759 memmove(&erp[i], &erp[i+1], sizeof(xfs_ext_irec_t));
3760 }
3761 /*
3762 * Manually free the last extent record from the indirection
3763 * array. A call to xfs_iext_realloc_indirect() with a size
3764 * of zero would result in a call to xfs_iext_destroy() which
3765 * would in turn call this function again, creating a nasty
3766 * infinite loop.
3767 */
3768 if (--nlists) {
3769 xfs_iext_realloc_indirect(ifp,
3770 nlists * sizeof(xfs_ext_irec_t));
3771 } else {
3772 kmem_free(ifp->if_u1.if_ext_irec);
3773 }
3774 ifp->if_real_bytes = nlists * XFS_IEXT_BUFSZ;
3775}
3776
3777/*
3778 * This is called to clean up large amounts of unused memory allocated
3779 * by the indirection array. Before compacting anything though, verify
3780 * that the indirection array is still needed and switch back to the
3781 * linear extent list (or even the inline buffer) if possible. The
3782 * compaction policy is as follows:
3783 *
3784 * Full Compaction: Extents fit into a single page (or inline buffer)
3785 * Partial Compaction: Extents occupy less than 50% of allocated space
3786 * No Compaction: Extents occupy at least 50% of allocated space
3787 */
3788void
3789xfs_iext_irec_compact(
3790 xfs_ifork_t *ifp) /* inode fork pointer */
3791{
3792 xfs_extnum_t nextents; /* number of extents in file */
3793 int nlists; /* number of irec's (ex lists) */
3794
3795 ASSERT(ifp->if_flags & XFS_IFEXTIREC);
3796 nlists = ifp->if_real_bytes / XFS_IEXT_BUFSZ;
3797 nextents = ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t);
3798
3799 if (nextents == 0) {
3800 xfs_iext_destroy(ifp);
3801 } else if (nextents <= XFS_INLINE_EXTS) {
3802 xfs_iext_indirect_to_direct(ifp);
3803 xfs_iext_direct_to_inline(ifp, nextents);
3804 } else if (nextents <= XFS_LINEAR_EXTS) {
3805 xfs_iext_indirect_to_direct(ifp);
3806 } else if (nextents < (nlists * XFS_LINEAR_EXTS) >> 1) {
3807 xfs_iext_irec_compact_pages(ifp);
3808 }
3809}
3810
3811/*
3812 * Combine extents from neighboring extent pages.
3813 */
3814void
3815xfs_iext_irec_compact_pages(
3816 xfs_ifork_t *ifp) /* inode fork pointer */
3817{
3818 xfs_ext_irec_t *erp, *erp_next;/* pointers to irec entries */
3819 int erp_idx = 0; /* indirection array index */
3820 int nlists; /* number of irec's (ex lists) */
3821
3822 ASSERT(ifp->if_flags & XFS_IFEXTIREC);
3823 nlists = ifp->if_real_bytes / XFS_IEXT_BUFSZ;
3824 while (erp_idx < nlists - 1) {
3825 erp = &ifp->if_u1.if_ext_irec[erp_idx];
3826 erp_next = erp + 1;
3827 if (erp_next->er_extcount <=
3828 (XFS_LINEAR_EXTS - erp->er_extcount)) {
3829 memcpy(&erp->er_extbuf[erp->er_extcount],
3830 erp_next->er_extbuf, erp_next->er_extcount *
3831 sizeof(xfs_bmbt_rec_t));
3832 erp->er_extcount += erp_next->er_extcount;
3833 /*
3834 * Free page before removing extent record
3835 * so er_extoffs don't get modified in
3836 * xfs_iext_irec_remove.
3837 */
3838 kmem_free(erp_next->er_extbuf);
3839 erp_next->er_extbuf = NULL;
3840 xfs_iext_irec_remove(ifp, erp_idx + 1);
3841 nlists = ifp->if_real_bytes / XFS_IEXT_BUFSZ;
3842 } else {
3843 erp_idx++;
3844 }
3845 }
3846}
3847
3848/*
3849 * This is called to update the er_extoff field in the indirection
3850 * array when extents have been added or removed from one of the
3851 * extent lists. erp_idx contains the irec index to begin updating
3852 * at and ext_diff contains the number of extents that were added
3853 * or removed.
3854 */
3855void
3856xfs_iext_irec_update_extoffs(
3857 xfs_ifork_t *ifp, /* inode fork pointer */
3858 int erp_idx, /* irec index to update */
3859 int ext_diff) /* number of new extents */
3860{
3861 int i; /* loop counter */
3862 int nlists; /* number of irec's (ex lists */
3863
3864 ASSERT(ifp->if_flags & XFS_IFEXTIREC);
3865 nlists = ifp->if_real_bytes / XFS_IEXT_BUFSZ;
3866 for (i = erp_idx; i < nlists; i++) {
3867 ifp->if_u1.if_ext_irec[i].er_extoff += ext_diff;
3868 }
3869}
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Copyright (c) 2000-2006 Silicon Graphics, Inc.
4 * All Rights Reserved.
5 */
6#include <linux/iversion.h>
7
8#include "xfs.h"
9#include "xfs_fs.h"
10#include "xfs_shared.h"
11#include "xfs_format.h"
12#include "xfs_log_format.h"
13#include "xfs_trans_resv.h"
14#include "xfs_mount.h"
15#include "xfs_defer.h"
16#include "xfs_inode.h"
17#include "xfs_dir2.h"
18#include "xfs_attr.h"
19#include "xfs_bit.h"
20#include "xfs_trans_space.h"
21#include "xfs_trans.h"
22#include "xfs_buf_item.h"
23#include "xfs_inode_item.h"
24#include "xfs_iunlink_item.h"
25#include "xfs_ialloc.h"
26#include "xfs_bmap.h"
27#include "xfs_bmap_util.h"
28#include "xfs_errortag.h"
29#include "xfs_error.h"
30#include "xfs_quota.h"
31#include "xfs_filestream.h"
32#include "xfs_trace.h"
33#include "xfs_icache.h"
34#include "xfs_symlink.h"
35#include "xfs_trans_priv.h"
36#include "xfs_log.h"
37#include "xfs_bmap_btree.h"
38#include "xfs_reflink.h"
39#include "xfs_ag.h"
40#include "xfs_log_priv.h"
41#include "xfs_health.h"
42#include "xfs_pnfs.h"
43#include "xfs_parent.h"
44#include "xfs_xattr.h"
45#include "xfs_inode_util.h"
46#include "xfs_metafile.h"
47
48struct kmem_cache *xfs_inode_cache;
49
50/*
51 * These two are wrapper routines around the xfs_ilock() routine used to
52 * centralize some grungy code. They are used in places that wish to lock the
53 * inode solely for reading the extents. The reason these places can't just
54 * call xfs_ilock(ip, XFS_ILOCK_SHARED) is that the inode lock also guards to
55 * bringing in of the extents from disk for a file in b-tree format. If the
56 * inode is in b-tree format, then we need to lock the inode exclusively until
57 * the extents are read in. Locking it exclusively all the time would limit
58 * our parallelism unnecessarily, though. What we do instead is check to see
59 * if the extents have been read in yet, and only lock the inode exclusively
60 * if they have not.
61 *
62 * The functions return a value which should be given to the corresponding
63 * xfs_iunlock() call.
64 */
65uint
66xfs_ilock_data_map_shared(
67 struct xfs_inode *ip)
68{
69 uint lock_mode = XFS_ILOCK_SHARED;
70
71 if (xfs_need_iread_extents(&ip->i_df))
72 lock_mode = XFS_ILOCK_EXCL;
73 xfs_ilock(ip, lock_mode);
74 return lock_mode;
75}
76
77uint
78xfs_ilock_attr_map_shared(
79 struct xfs_inode *ip)
80{
81 uint lock_mode = XFS_ILOCK_SHARED;
82
83 if (xfs_inode_has_attr_fork(ip) && xfs_need_iread_extents(&ip->i_af))
84 lock_mode = XFS_ILOCK_EXCL;
85 xfs_ilock(ip, lock_mode);
86 return lock_mode;
87}
88
89/*
90 * You can't set both SHARED and EXCL for the same lock,
91 * and only XFS_IOLOCK_SHARED, XFS_IOLOCK_EXCL, XFS_MMAPLOCK_SHARED,
92 * XFS_MMAPLOCK_EXCL, XFS_ILOCK_SHARED, XFS_ILOCK_EXCL are valid values
93 * to set in lock_flags.
94 */
95static inline void
96xfs_lock_flags_assert(
97 uint lock_flags)
98{
99 ASSERT((lock_flags & (XFS_IOLOCK_SHARED | XFS_IOLOCK_EXCL)) !=
100 (XFS_IOLOCK_SHARED | XFS_IOLOCK_EXCL));
101 ASSERT((lock_flags & (XFS_MMAPLOCK_SHARED | XFS_MMAPLOCK_EXCL)) !=
102 (XFS_MMAPLOCK_SHARED | XFS_MMAPLOCK_EXCL));
103 ASSERT((lock_flags & (XFS_ILOCK_SHARED | XFS_ILOCK_EXCL)) !=
104 (XFS_ILOCK_SHARED | XFS_ILOCK_EXCL));
105 ASSERT((lock_flags & ~(XFS_LOCK_MASK | XFS_LOCK_SUBCLASS_MASK)) == 0);
106 ASSERT(lock_flags != 0);
107}
108
109/*
110 * In addition to i_rwsem in the VFS inode, the xfs inode contains 2
111 * multi-reader locks: invalidate_lock and the i_lock. This routine allows
112 * various combinations of the locks to be obtained.
113 *
114 * The 3 locks should always be ordered so that the IO lock is obtained first,
115 * the mmap lock second and the ilock last in order to prevent deadlock.
116 *
117 * Basic locking order:
118 *
119 * i_rwsem -> invalidate_lock -> page_lock -> i_ilock
120 *
121 * mmap_lock locking order:
122 *
123 * i_rwsem -> page lock -> mmap_lock
124 * mmap_lock -> invalidate_lock -> page_lock
125 *
126 * The difference in mmap_lock locking order mean that we cannot hold the
127 * invalidate_lock over syscall based read(2)/write(2) based IO. These IO paths
128 * can fault in pages during copy in/out (for buffered IO) or require the
129 * mmap_lock in get_user_pages() to map the user pages into the kernel address
130 * space for direct IO. Similarly the i_rwsem cannot be taken inside a page
131 * fault because page faults already hold the mmap_lock.
132 *
133 * Hence to serialise fully against both syscall and mmap based IO, we need to
134 * take both the i_rwsem and the invalidate_lock. These locks should *only* be
135 * both taken in places where we need to invalidate the page cache in a race
136 * free manner (e.g. truncate, hole punch and other extent manipulation
137 * functions).
138 */
139void
140xfs_ilock(
141 xfs_inode_t *ip,
142 uint lock_flags)
143{
144 trace_xfs_ilock(ip, lock_flags, _RET_IP_);
145
146 xfs_lock_flags_assert(lock_flags);
147
148 if (lock_flags & XFS_IOLOCK_EXCL) {
149 down_write_nested(&VFS_I(ip)->i_rwsem,
150 XFS_IOLOCK_DEP(lock_flags));
151 } else if (lock_flags & XFS_IOLOCK_SHARED) {
152 down_read_nested(&VFS_I(ip)->i_rwsem,
153 XFS_IOLOCK_DEP(lock_flags));
154 }
155
156 if (lock_flags & XFS_MMAPLOCK_EXCL) {
157 down_write_nested(&VFS_I(ip)->i_mapping->invalidate_lock,
158 XFS_MMAPLOCK_DEP(lock_flags));
159 } else if (lock_flags & XFS_MMAPLOCK_SHARED) {
160 down_read_nested(&VFS_I(ip)->i_mapping->invalidate_lock,
161 XFS_MMAPLOCK_DEP(lock_flags));
162 }
163
164 if (lock_flags & XFS_ILOCK_EXCL)
165 down_write_nested(&ip->i_lock, XFS_ILOCK_DEP(lock_flags));
166 else if (lock_flags & XFS_ILOCK_SHARED)
167 down_read_nested(&ip->i_lock, XFS_ILOCK_DEP(lock_flags));
168}
169
170/*
171 * This is just like xfs_ilock(), except that the caller
172 * is guaranteed not to sleep. It returns 1 if it gets
173 * the requested locks and 0 otherwise. If the IO lock is
174 * obtained but the inode lock cannot be, then the IO lock
175 * is dropped before returning.
176 *
177 * ip -- the inode being locked
178 * lock_flags -- this parameter indicates the inode's locks to be
179 * to be locked. See the comment for xfs_ilock() for a list
180 * of valid values.
181 */
182int
183xfs_ilock_nowait(
184 xfs_inode_t *ip,
185 uint lock_flags)
186{
187 trace_xfs_ilock_nowait(ip, lock_flags, _RET_IP_);
188
189 xfs_lock_flags_assert(lock_flags);
190
191 if (lock_flags & XFS_IOLOCK_EXCL) {
192 if (!down_write_trylock(&VFS_I(ip)->i_rwsem))
193 goto out;
194 } else if (lock_flags & XFS_IOLOCK_SHARED) {
195 if (!down_read_trylock(&VFS_I(ip)->i_rwsem))
196 goto out;
197 }
198
199 if (lock_flags & XFS_MMAPLOCK_EXCL) {
200 if (!down_write_trylock(&VFS_I(ip)->i_mapping->invalidate_lock))
201 goto out_undo_iolock;
202 } else if (lock_flags & XFS_MMAPLOCK_SHARED) {
203 if (!down_read_trylock(&VFS_I(ip)->i_mapping->invalidate_lock))
204 goto out_undo_iolock;
205 }
206
207 if (lock_flags & XFS_ILOCK_EXCL) {
208 if (!down_write_trylock(&ip->i_lock))
209 goto out_undo_mmaplock;
210 } else if (lock_flags & XFS_ILOCK_SHARED) {
211 if (!down_read_trylock(&ip->i_lock))
212 goto out_undo_mmaplock;
213 }
214 return 1;
215
216out_undo_mmaplock:
217 if (lock_flags & XFS_MMAPLOCK_EXCL)
218 up_write(&VFS_I(ip)->i_mapping->invalidate_lock);
219 else if (lock_flags & XFS_MMAPLOCK_SHARED)
220 up_read(&VFS_I(ip)->i_mapping->invalidate_lock);
221out_undo_iolock:
222 if (lock_flags & XFS_IOLOCK_EXCL)
223 up_write(&VFS_I(ip)->i_rwsem);
224 else if (lock_flags & XFS_IOLOCK_SHARED)
225 up_read(&VFS_I(ip)->i_rwsem);
226out:
227 return 0;
228}
229
230/*
231 * xfs_iunlock() is used to drop the inode locks acquired with
232 * xfs_ilock() and xfs_ilock_nowait(). The caller must pass
233 * in the flags given to xfs_ilock() or xfs_ilock_nowait() so
234 * that we know which locks to drop.
235 *
236 * ip -- the inode being unlocked
237 * lock_flags -- this parameter indicates the inode's locks to be
238 * to be unlocked. See the comment for xfs_ilock() for a list
239 * of valid values for this parameter.
240 *
241 */
242void
243xfs_iunlock(
244 xfs_inode_t *ip,
245 uint lock_flags)
246{
247 xfs_lock_flags_assert(lock_flags);
248
249 if (lock_flags & XFS_IOLOCK_EXCL)
250 up_write(&VFS_I(ip)->i_rwsem);
251 else if (lock_flags & XFS_IOLOCK_SHARED)
252 up_read(&VFS_I(ip)->i_rwsem);
253
254 if (lock_flags & XFS_MMAPLOCK_EXCL)
255 up_write(&VFS_I(ip)->i_mapping->invalidate_lock);
256 else if (lock_flags & XFS_MMAPLOCK_SHARED)
257 up_read(&VFS_I(ip)->i_mapping->invalidate_lock);
258
259 if (lock_flags & XFS_ILOCK_EXCL)
260 up_write(&ip->i_lock);
261 else if (lock_flags & XFS_ILOCK_SHARED)
262 up_read(&ip->i_lock);
263
264 trace_xfs_iunlock(ip, lock_flags, _RET_IP_);
265}
266
267/*
268 * give up write locks. the i/o lock cannot be held nested
269 * if it is being demoted.
270 */
271void
272xfs_ilock_demote(
273 xfs_inode_t *ip,
274 uint lock_flags)
275{
276 ASSERT(lock_flags & (XFS_IOLOCK_EXCL|XFS_MMAPLOCK_EXCL|XFS_ILOCK_EXCL));
277 ASSERT((lock_flags &
278 ~(XFS_IOLOCK_EXCL|XFS_MMAPLOCK_EXCL|XFS_ILOCK_EXCL)) == 0);
279
280 if (lock_flags & XFS_ILOCK_EXCL)
281 downgrade_write(&ip->i_lock);
282 if (lock_flags & XFS_MMAPLOCK_EXCL)
283 downgrade_write(&VFS_I(ip)->i_mapping->invalidate_lock);
284 if (lock_flags & XFS_IOLOCK_EXCL)
285 downgrade_write(&VFS_I(ip)->i_rwsem);
286
287 trace_xfs_ilock_demote(ip, lock_flags, _RET_IP_);
288}
289
290void
291xfs_assert_ilocked(
292 struct xfs_inode *ip,
293 uint lock_flags)
294{
295 /*
296 * Sometimes we assert the ILOCK is held exclusively, but we're in
297 * a workqueue, so lockdep doesn't know we're the owner.
298 */
299 if (lock_flags & XFS_ILOCK_SHARED)
300 rwsem_assert_held(&ip->i_lock);
301 else if (lock_flags & XFS_ILOCK_EXCL)
302 rwsem_assert_held_write_nolockdep(&ip->i_lock);
303
304 if (lock_flags & XFS_MMAPLOCK_SHARED)
305 rwsem_assert_held(&VFS_I(ip)->i_mapping->invalidate_lock);
306 else if (lock_flags & XFS_MMAPLOCK_EXCL)
307 rwsem_assert_held_write(&VFS_I(ip)->i_mapping->invalidate_lock);
308
309 if (lock_flags & XFS_IOLOCK_SHARED)
310 rwsem_assert_held(&VFS_I(ip)->i_rwsem);
311 else if (lock_flags & XFS_IOLOCK_EXCL)
312 rwsem_assert_held_write(&VFS_I(ip)->i_rwsem);
313}
314
315/*
316 * xfs_lockdep_subclass_ok() is only used in an ASSERT, so is only called when
317 * DEBUG or XFS_WARN is set. And MAX_LOCKDEP_SUBCLASSES is then only defined
318 * when CONFIG_LOCKDEP is set. Hence the complex define below to avoid build
319 * errors and warnings.
320 */
321#if (defined(DEBUG) || defined(XFS_WARN)) && defined(CONFIG_LOCKDEP)
322static bool
323xfs_lockdep_subclass_ok(
324 int subclass)
325{
326 return subclass < MAX_LOCKDEP_SUBCLASSES;
327}
328#else
329#define xfs_lockdep_subclass_ok(subclass) (true)
330#endif
331
332/*
333 * Bump the subclass so xfs_lock_inodes() acquires each lock with a different
334 * value. This can be called for any type of inode lock combination, including
335 * parent locking. Care must be taken to ensure we don't overrun the subclass
336 * storage fields in the class mask we build.
337 */
338static inline uint
339xfs_lock_inumorder(
340 uint lock_mode,
341 uint subclass)
342{
343 uint class = 0;
344
345 ASSERT(!(lock_mode & XFS_ILOCK_PARENT));
346 ASSERT(xfs_lockdep_subclass_ok(subclass));
347
348 if (lock_mode & (XFS_IOLOCK_SHARED|XFS_IOLOCK_EXCL)) {
349 ASSERT(subclass <= XFS_IOLOCK_MAX_SUBCLASS);
350 class += subclass << XFS_IOLOCK_SHIFT;
351 }
352
353 if (lock_mode & (XFS_MMAPLOCK_SHARED|XFS_MMAPLOCK_EXCL)) {
354 ASSERT(subclass <= XFS_MMAPLOCK_MAX_SUBCLASS);
355 class += subclass << XFS_MMAPLOCK_SHIFT;
356 }
357
358 if (lock_mode & (XFS_ILOCK_SHARED|XFS_ILOCK_EXCL)) {
359 ASSERT(subclass <= XFS_ILOCK_MAX_SUBCLASS);
360 class += subclass << XFS_ILOCK_SHIFT;
361 }
362
363 return (lock_mode & ~XFS_LOCK_SUBCLASS_MASK) | class;
364}
365
366/*
367 * The following routine will lock n inodes in exclusive mode. We assume the
368 * caller calls us with the inodes in i_ino order.
369 *
370 * We need to detect deadlock where an inode that we lock is in the AIL and we
371 * start waiting for another inode that is locked by a thread in a long running
372 * transaction (such as truncate). This can result in deadlock since the long
373 * running trans might need to wait for the inode we just locked in order to
374 * push the tail and free space in the log.
375 *
376 * xfs_lock_inodes() can only be used to lock one type of lock at a time -
377 * the iolock, the mmaplock or the ilock, but not more than one at a time. If we
378 * lock more than one at a time, lockdep will report false positives saying we
379 * have violated locking orders.
380 */
381void
382xfs_lock_inodes(
383 struct xfs_inode **ips,
384 int inodes,
385 uint lock_mode)
386{
387 int attempts = 0;
388 uint i;
389 int j;
390 bool try_lock;
391 struct xfs_log_item *lp;
392
393 /*
394 * Currently supports between 2 and 5 inodes with exclusive locking. We
395 * support an arbitrary depth of locking here, but absolute limits on
396 * inodes depend on the type of locking and the limits placed by
397 * lockdep annotations in xfs_lock_inumorder. These are all checked by
398 * the asserts.
399 */
400 ASSERT(ips && inodes >= 2 && inodes <= 5);
401 ASSERT(lock_mode & (XFS_IOLOCK_EXCL | XFS_MMAPLOCK_EXCL |
402 XFS_ILOCK_EXCL));
403 ASSERT(!(lock_mode & (XFS_IOLOCK_SHARED | XFS_MMAPLOCK_SHARED |
404 XFS_ILOCK_SHARED)));
405 ASSERT(!(lock_mode & XFS_MMAPLOCK_EXCL) ||
406 inodes <= XFS_MMAPLOCK_MAX_SUBCLASS + 1);
407 ASSERT(!(lock_mode & XFS_ILOCK_EXCL) ||
408 inodes <= XFS_ILOCK_MAX_SUBCLASS + 1);
409
410 if (lock_mode & XFS_IOLOCK_EXCL) {
411 ASSERT(!(lock_mode & (XFS_MMAPLOCK_EXCL | XFS_ILOCK_EXCL)));
412 } else if (lock_mode & XFS_MMAPLOCK_EXCL)
413 ASSERT(!(lock_mode & XFS_ILOCK_EXCL));
414
415again:
416 try_lock = false;
417 i = 0;
418 for (; i < inodes; i++) {
419 ASSERT(ips[i]);
420
421 if (i && (ips[i] == ips[i - 1])) /* Already locked */
422 continue;
423
424 /*
425 * If try_lock is not set yet, make sure all locked inodes are
426 * not in the AIL. If any are, set try_lock to be used later.
427 */
428 if (!try_lock) {
429 for (j = (i - 1); j >= 0 && !try_lock; j--) {
430 lp = &ips[j]->i_itemp->ili_item;
431 if (lp && test_bit(XFS_LI_IN_AIL, &lp->li_flags))
432 try_lock = true;
433 }
434 }
435
436 /*
437 * If any of the previous locks we have locked is in the AIL,
438 * we must TRY to get the second and subsequent locks. If
439 * we can't get any, we must release all we have
440 * and try again.
441 */
442 if (!try_lock) {
443 xfs_ilock(ips[i], xfs_lock_inumorder(lock_mode, i));
444 continue;
445 }
446
447 /* try_lock means we have an inode locked that is in the AIL. */
448 ASSERT(i != 0);
449 if (xfs_ilock_nowait(ips[i], xfs_lock_inumorder(lock_mode, i)))
450 continue;
451
452 /*
453 * Unlock all previous guys and try again. xfs_iunlock will try
454 * to push the tail if the inode is in the AIL.
455 */
456 attempts++;
457 for (j = i - 1; j >= 0; j--) {
458 /*
459 * Check to see if we've already unlocked this one. Not
460 * the first one going back, and the inode ptr is the
461 * same.
462 */
463 if (j != (i - 1) && ips[j] == ips[j + 1])
464 continue;
465
466 xfs_iunlock(ips[j], lock_mode);
467 }
468
469 if ((attempts % 5) == 0) {
470 delay(1); /* Don't just spin the CPU */
471 }
472 goto again;
473 }
474}
475
476/*
477 * xfs_lock_two_inodes() can only be used to lock ilock. The iolock and
478 * mmaplock must be double-locked separately since we use i_rwsem and
479 * invalidate_lock for that. We now support taking one lock EXCL and the
480 * other SHARED.
481 */
482void
483xfs_lock_two_inodes(
484 struct xfs_inode *ip0,
485 uint ip0_mode,
486 struct xfs_inode *ip1,
487 uint ip1_mode)
488{
489 int attempts = 0;
490 struct xfs_log_item *lp;
491
492 ASSERT(hweight32(ip0_mode) == 1);
493 ASSERT(hweight32(ip1_mode) == 1);
494 ASSERT(!(ip0_mode & (XFS_IOLOCK_SHARED|XFS_IOLOCK_EXCL)));
495 ASSERT(!(ip1_mode & (XFS_IOLOCK_SHARED|XFS_IOLOCK_EXCL)));
496 ASSERT(!(ip0_mode & (XFS_MMAPLOCK_SHARED|XFS_MMAPLOCK_EXCL)));
497 ASSERT(!(ip1_mode & (XFS_MMAPLOCK_SHARED|XFS_MMAPLOCK_EXCL)));
498 ASSERT(ip0->i_ino != ip1->i_ino);
499
500 if (ip0->i_ino > ip1->i_ino) {
501 swap(ip0, ip1);
502 swap(ip0_mode, ip1_mode);
503 }
504
505 again:
506 xfs_ilock(ip0, xfs_lock_inumorder(ip0_mode, 0));
507
508 /*
509 * If the first lock we have locked is in the AIL, we must TRY to get
510 * the second lock. If we can't get it, we must release the first one
511 * and try again.
512 */
513 lp = &ip0->i_itemp->ili_item;
514 if (lp && test_bit(XFS_LI_IN_AIL, &lp->li_flags)) {
515 if (!xfs_ilock_nowait(ip1, xfs_lock_inumorder(ip1_mode, 1))) {
516 xfs_iunlock(ip0, ip0_mode);
517 if ((++attempts % 5) == 0)
518 delay(1); /* Don't just spin the CPU */
519 goto again;
520 }
521 } else {
522 xfs_ilock(ip1, xfs_lock_inumorder(ip1_mode, 1));
523 }
524}
525
526/*
527 * Lookups up an inode from "name". If ci_name is not NULL, then a CI match
528 * is allowed, otherwise it has to be an exact match. If a CI match is found,
529 * ci_name->name will point to a the actual name (caller must free) or
530 * will be set to NULL if an exact match is found.
531 */
532int
533xfs_lookup(
534 struct xfs_inode *dp,
535 const struct xfs_name *name,
536 struct xfs_inode **ipp,
537 struct xfs_name *ci_name)
538{
539 xfs_ino_t inum;
540 int error;
541
542 trace_xfs_lookup(dp, name);
543
544 if (xfs_is_shutdown(dp->i_mount))
545 return -EIO;
546 if (xfs_ifork_zapped(dp, XFS_DATA_FORK))
547 return -EIO;
548
549 error = xfs_dir_lookup(NULL, dp, name, &inum, ci_name);
550 if (error)
551 goto out_unlock;
552
553 error = xfs_iget(dp->i_mount, NULL, inum, 0, 0, ipp);
554 if (error)
555 goto out_free_name;
556
557 /*
558 * Fail if a directory entry in the regular directory tree points to
559 * a metadata file.
560 */
561 if (XFS_IS_CORRUPT(dp->i_mount, xfs_is_metadir_inode(*ipp))) {
562 xfs_fs_mark_sick(dp->i_mount, XFS_SICK_FS_METADIR);
563 error = -EFSCORRUPTED;
564 goto out_irele;
565 }
566
567 return 0;
568
569out_irele:
570 xfs_irele(*ipp);
571out_free_name:
572 if (ci_name)
573 kfree(ci_name->name);
574out_unlock:
575 *ipp = NULL;
576 return error;
577}
578
579/*
580 * Initialise a newly allocated inode and return the in-core inode to the
581 * caller locked exclusively.
582 *
583 * Caller is responsible for unlocking the inode manually upon return
584 */
585int
586xfs_icreate(
587 struct xfs_trans *tp,
588 xfs_ino_t ino,
589 const struct xfs_icreate_args *args,
590 struct xfs_inode **ipp)
591{
592 struct xfs_mount *mp = tp->t_mountp;
593 struct xfs_inode *ip = NULL;
594 int error;
595
596 /*
597 * Get the in-core inode with the lock held exclusively to prevent
598 * others from looking at until we're done.
599 */
600 error = xfs_iget(mp, tp, ino, XFS_IGET_CREATE, XFS_ILOCK_EXCL, &ip);
601 if (error)
602 return error;
603
604 ASSERT(ip != NULL);
605 xfs_trans_ijoin(tp, ip, 0);
606 xfs_inode_init(tp, args, ip);
607
608 /* now that we have an i_mode we can setup the inode structure */
609 xfs_setup_inode(ip);
610
611 *ipp = ip;
612 return 0;
613}
614
615/* Return dquots for the ids that will be assigned to a new file. */
616int
617xfs_icreate_dqalloc(
618 const struct xfs_icreate_args *args,
619 struct xfs_dquot **udqpp,
620 struct xfs_dquot **gdqpp,
621 struct xfs_dquot **pdqpp)
622{
623 struct inode *dir = VFS_I(args->pip);
624 kuid_t uid = GLOBAL_ROOT_UID;
625 kgid_t gid = GLOBAL_ROOT_GID;
626 prid_t prid = 0;
627 unsigned int flags = XFS_QMOPT_QUOTALL;
628
629 if (args->idmap) {
630 /*
631 * The uid/gid computation code must match what the VFS uses to
632 * assign i_[ug]id. INHERIT adjusts the gid computation for
633 * setgid/grpid systems.
634 */
635 uid = mapped_fsuid(args->idmap, i_user_ns(dir));
636 gid = mapped_fsgid(args->idmap, i_user_ns(dir));
637 prid = xfs_get_initial_prid(args->pip);
638 flags |= XFS_QMOPT_INHERIT;
639 }
640
641 *udqpp = *gdqpp = *pdqpp = NULL;
642
643 return xfs_qm_vop_dqalloc(args->pip, uid, gid, prid, flags, udqpp,
644 gdqpp, pdqpp);
645}
646
647int
648xfs_create(
649 const struct xfs_icreate_args *args,
650 struct xfs_name *name,
651 struct xfs_inode **ipp)
652{
653 struct xfs_inode *dp = args->pip;
654 struct xfs_dir_update du = {
655 .dp = dp,
656 .name = name,
657 };
658 struct xfs_mount *mp = dp->i_mount;
659 struct xfs_trans *tp = NULL;
660 struct xfs_dquot *udqp;
661 struct xfs_dquot *gdqp;
662 struct xfs_dquot *pdqp;
663 struct xfs_trans_res *tres;
664 xfs_ino_t ino;
665 bool unlock_dp_on_error = false;
666 bool is_dir = S_ISDIR(args->mode);
667 uint resblks;
668 int error;
669
670 trace_xfs_create(dp, name);
671
672 if (xfs_is_shutdown(mp))
673 return -EIO;
674 if (xfs_ifork_zapped(dp, XFS_DATA_FORK))
675 return -EIO;
676
677 /* Make sure that we have allocated dquot(s) on disk. */
678 error = xfs_icreate_dqalloc(args, &udqp, &gdqp, &pdqp);
679 if (error)
680 return error;
681
682 if (is_dir) {
683 resblks = xfs_mkdir_space_res(mp, name->len);
684 tres = &M_RES(mp)->tr_mkdir;
685 } else {
686 resblks = xfs_create_space_res(mp, name->len);
687 tres = &M_RES(mp)->tr_create;
688 }
689
690 error = xfs_parent_start(mp, &du.ppargs);
691 if (error)
692 goto out_release_dquots;
693
694 /*
695 * Initially assume that the file does not exist and
696 * reserve the resources for that case. If that is not
697 * the case we'll drop the one we have and get a more
698 * appropriate transaction later.
699 */
700 error = xfs_trans_alloc_icreate(mp, tres, udqp, gdqp, pdqp, resblks,
701 &tp);
702 if (error == -ENOSPC) {
703 /* flush outstanding delalloc blocks and retry */
704 xfs_flush_inodes(mp);
705 error = xfs_trans_alloc_icreate(mp, tres, udqp, gdqp, pdqp,
706 resblks, &tp);
707 }
708 if (error)
709 goto out_parent;
710
711 xfs_ilock(dp, XFS_ILOCK_EXCL | XFS_ILOCK_PARENT);
712 unlock_dp_on_error = true;
713
714 /*
715 * A newly created regular or special file just has one directory
716 * entry pointing to them, but a directory also the "." entry
717 * pointing to itself.
718 */
719 error = xfs_dialloc(&tp, args, &ino);
720 if (!error)
721 error = xfs_icreate(tp, ino, args, &du.ip);
722 if (error)
723 goto out_trans_cancel;
724
725 /*
726 * Now we join the directory inode to the transaction. We do not do it
727 * earlier because xfs_dialloc might commit the previous transaction
728 * (and release all the locks). An error from here on will result in
729 * the transaction cancel unlocking dp so don't do it explicitly in the
730 * error path.
731 */
732 xfs_trans_ijoin(tp, dp, 0);
733
734 error = xfs_dir_create_child(tp, resblks, &du);
735 if (error)
736 goto out_trans_cancel;
737
738 /*
739 * If this is a synchronous mount, make sure that the
740 * create transaction goes to disk before returning to
741 * the user.
742 */
743 if (xfs_has_wsync(mp) || xfs_has_dirsync(mp))
744 xfs_trans_set_sync(tp);
745
746 /*
747 * Attach the dquot(s) to the inodes and modify them incore.
748 * These ids of the inode couldn't have changed since the new
749 * inode has been locked ever since it was created.
750 */
751 xfs_qm_vop_create_dqattach(tp, du.ip, udqp, gdqp, pdqp);
752
753 error = xfs_trans_commit(tp);
754 if (error)
755 goto out_release_inode;
756
757 xfs_qm_dqrele(udqp);
758 xfs_qm_dqrele(gdqp);
759 xfs_qm_dqrele(pdqp);
760
761 *ipp = du.ip;
762 xfs_iunlock(du.ip, XFS_ILOCK_EXCL);
763 xfs_iunlock(dp, XFS_ILOCK_EXCL);
764 xfs_parent_finish(mp, du.ppargs);
765 return 0;
766
767 out_trans_cancel:
768 xfs_trans_cancel(tp);
769 out_release_inode:
770 /*
771 * Wait until after the current transaction is aborted to finish the
772 * setup of the inode and release the inode. This prevents recursive
773 * transactions and deadlocks from xfs_inactive.
774 */
775 if (du.ip) {
776 xfs_iunlock(du.ip, XFS_ILOCK_EXCL);
777 xfs_finish_inode_setup(du.ip);
778 xfs_irele(du.ip);
779 }
780 out_parent:
781 xfs_parent_finish(mp, du.ppargs);
782 out_release_dquots:
783 xfs_qm_dqrele(udqp);
784 xfs_qm_dqrele(gdqp);
785 xfs_qm_dqrele(pdqp);
786
787 if (unlock_dp_on_error)
788 xfs_iunlock(dp, XFS_ILOCK_EXCL);
789 return error;
790}
791
792int
793xfs_create_tmpfile(
794 const struct xfs_icreate_args *args,
795 struct xfs_inode **ipp)
796{
797 struct xfs_inode *dp = args->pip;
798 struct xfs_mount *mp = dp->i_mount;
799 struct xfs_inode *ip = NULL;
800 struct xfs_trans *tp = NULL;
801 struct xfs_dquot *udqp;
802 struct xfs_dquot *gdqp;
803 struct xfs_dquot *pdqp;
804 struct xfs_trans_res *tres;
805 xfs_ino_t ino;
806 uint resblks;
807 int error;
808
809 ASSERT(args->flags & XFS_ICREATE_TMPFILE);
810
811 if (xfs_is_shutdown(mp))
812 return -EIO;
813
814 /* Make sure that we have allocated dquot(s) on disk. */
815 error = xfs_icreate_dqalloc(args, &udqp, &gdqp, &pdqp);
816 if (error)
817 return error;
818
819 resblks = XFS_IALLOC_SPACE_RES(mp);
820 tres = &M_RES(mp)->tr_create_tmpfile;
821
822 error = xfs_trans_alloc_icreate(mp, tres, udqp, gdqp, pdqp, resblks,
823 &tp);
824 if (error)
825 goto out_release_dquots;
826
827 error = xfs_dialloc(&tp, args, &ino);
828 if (!error)
829 error = xfs_icreate(tp, ino, args, &ip);
830 if (error)
831 goto out_trans_cancel;
832
833 if (xfs_has_wsync(mp))
834 xfs_trans_set_sync(tp);
835
836 /*
837 * Attach the dquot(s) to the inodes and modify them incore.
838 * These ids of the inode couldn't have changed since the new
839 * inode has been locked ever since it was created.
840 */
841 xfs_qm_vop_create_dqattach(tp, ip, udqp, gdqp, pdqp);
842
843 error = xfs_iunlink(tp, ip);
844 if (error)
845 goto out_trans_cancel;
846
847 error = xfs_trans_commit(tp);
848 if (error)
849 goto out_release_inode;
850
851 xfs_qm_dqrele(udqp);
852 xfs_qm_dqrele(gdqp);
853 xfs_qm_dqrele(pdqp);
854
855 *ipp = ip;
856 xfs_iunlock(ip, XFS_ILOCK_EXCL);
857 return 0;
858
859 out_trans_cancel:
860 xfs_trans_cancel(tp);
861 out_release_inode:
862 /*
863 * Wait until after the current transaction is aborted to finish the
864 * setup of the inode and release the inode. This prevents recursive
865 * transactions and deadlocks from xfs_inactive.
866 */
867 if (ip) {
868 xfs_iunlock(ip, XFS_ILOCK_EXCL);
869 xfs_finish_inode_setup(ip);
870 xfs_irele(ip);
871 }
872 out_release_dquots:
873 xfs_qm_dqrele(udqp);
874 xfs_qm_dqrele(gdqp);
875 xfs_qm_dqrele(pdqp);
876
877 return error;
878}
879
880int
881xfs_link(
882 struct xfs_inode *tdp,
883 struct xfs_inode *sip,
884 struct xfs_name *target_name)
885{
886 struct xfs_dir_update du = {
887 .dp = tdp,
888 .name = target_name,
889 .ip = sip,
890 };
891 struct xfs_mount *mp = tdp->i_mount;
892 struct xfs_trans *tp;
893 int error, nospace_error = 0;
894 int resblks;
895
896 trace_xfs_link(tdp, target_name);
897
898 ASSERT(!S_ISDIR(VFS_I(sip)->i_mode));
899
900 if (xfs_is_shutdown(mp))
901 return -EIO;
902 if (xfs_ifork_zapped(tdp, XFS_DATA_FORK))
903 return -EIO;
904
905 error = xfs_qm_dqattach(sip);
906 if (error)
907 goto std_return;
908
909 error = xfs_qm_dqattach(tdp);
910 if (error)
911 goto std_return;
912
913 error = xfs_parent_start(mp, &du.ppargs);
914 if (error)
915 goto std_return;
916
917 resblks = xfs_link_space_res(mp, target_name->len);
918 error = xfs_trans_alloc_dir(tdp, &M_RES(mp)->tr_link, sip, &resblks,
919 &tp, &nospace_error);
920 if (error)
921 goto out_parent;
922
923 /*
924 * We don't allow reservationless or quotaless hardlinking when parent
925 * pointers are enabled because we can't back out if the xattrs must
926 * grow.
927 */
928 if (du.ppargs && nospace_error) {
929 error = nospace_error;
930 goto error_return;
931 }
932
933 /*
934 * If we are using project inheritance, we only allow hard link
935 * creation in our tree when the project IDs are the same; else
936 * the tree quota mechanism could be circumvented.
937 */
938 if (unlikely((tdp->i_diflags & XFS_DIFLAG_PROJINHERIT) &&
939 tdp->i_projid != sip->i_projid)) {
940 /*
941 * Project quota setup skips special files which can
942 * leave inodes in a PROJINHERIT directory without a
943 * project ID set. We need to allow links to be made
944 * to these "project-less" inodes because userspace
945 * expects them to succeed after project ID setup,
946 * but everything else should be rejected.
947 */
948 if (!special_file(VFS_I(sip)->i_mode) ||
949 sip->i_projid != 0) {
950 error = -EXDEV;
951 goto error_return;
952 }
953 }
954
955 error = xfs_dir_add_child(tp, resblks, &du);
956 if (error)
957 goto error_return;
958
959 /*
960 * If this is a synchronous mount, make sure that the
961 * link transaction goes to disk before returning to
962 * the user.
963 */
964 if (xfs_has_wsync(mp) || xfs_has_dirsync(mp))
965 xfs_trans_set_sync(tp);
966
967 error = xfs_trans_commit(tp);
968 xfs_iunlock(tdp, XFS_ILOCK_EXCL);
969 xfs_iunlock(sip, XFS_ILOCK_EXCL);
970 xfs_parent_finish(mp, du.ppargs);
971 return error;
972
973 error_return:
974 xfs_trans_cancel(tp);
975 xfs_iunlock(tdp, XFS_ILOCK_EXCL);
976 xfs_iunlock(sip, XFS_ILOCK_EXCL);
977 out_parent:
978 xfs_parent_finish(mp, du.ppargs);
979 std_return:
980 if (error == -ENOSPC && nospace_error)
981 error = nospace_error;
982 return error;
983}
984
985/* Clear the reflink flag and the cowblocks tag if possible. */
986static void
987xfs_itruncate_clear_reflink_flags(
988 struct xfs_inode *ip)
989{
990 struct xfs_ifork *dfork;
991 struct xfs_ifork *cfork;
992
993 if (!xfs_is_reflink_inode(ip))
994 return;
995 dfork = xfs_ifork_ptr(ip, XFS_DATA_FORK);
996 cfork = xfs_ifork_ptr(ip, XFS_COW_FORK);
997 if (dfork->if_bytes == 0 && cfork->if_bytes == 0)
998 ip->i_diflags2 &= ~XFS_DIFLAG2_REFLINK;
999 if (cfork->if_bytes == 0)
1000 xfs_inode_clear_cowblocks_tag(ip);
1001}
1002
1003/*
1004 * Free up the underlying blocks past new_size. The new size must be smaller
1005 * than the current size. This routine can be used both for the attribute and
1006 * data fork, and does not modify the inode size, which is left to the caller.
1007 *
1008 * The transaction passed to this routine must have made a permanent log
1009 * reservation of at least XFS_ITRUNCATE_LOG_RES. This routine may commit the
1010 * given transaction and start new ones, so make sure everything involved in
1011 * the transaction is tidy before calling here. Some transaction will be
1012 * returned to the caller to be committed. The incoming transaction must
1013 * already include the inode, and both inode locks must be held exclusively.
1014 * The inode must also be "held" within the transaction. On return the inode
1015 * will be "held" within the returned transaction. This routine does NOT
1016 * require any disk space to be reserved for it within the transaction.
1017 *
1018 * If we get an error, we must return with the inode locked and linked into the
1019 * current transaction. This keeps things simple for the higher level code,
1020 * because it always knows that the inode is locked and held in the transaction
1021 * that returns to it whether errors occur or not. We don't mark the inode
1022 * dirty on error so that transactions can be easily aborted if possible.
1023 */
1024int
1025xfs_itruncate_extents_flags(
1026 struct xfs_trans **tpp,
1027 struct xfs_inode *ip,
1028 int whichfork,
1029 xfs_fsize_t new_size,
1030 int flags)
1031{
1032 struct xfs_mount *mp = ip->i_mount;
1033 struct xfs_trans *tp = *tpp;
1034 xfs_fileoff_t first_unmap_block;
1035 int error = 0;
1036
1037 xfs_assert_ilocked(ip, XFS_ILOCK_EXCL);
1038 if (atomic_read(&VFS_I(ip)->i_count))
1039 xfs_assert_ilocked(ip, XFS_IOLOCK_EXCL);
1040 ASSERT(new_size <= XFS_ISIZE(ip));
1041 ASSERT(tp->t_flags & XFS_TRANS_PERM_LOG_RES);
1042 ASSERT(ip->i_itemp != NULL);
1043 ASSERT(ip->i_itemp->ili_lock_flags == 0);
1044 ASSERT(!XFS_NOT_DQATTACHED(mp, ip));
1045
1046 trace_xfs_itruncate_extents_start(ip, new_size);
1047
1048 flags |= xfs_bmapi_aflag(whichfork);
1049
1050 /*
1051 * Since it is possible for space to become allocated beyond
1052 * the end of the file (in a crash where the space is allocated
1053 * but the inode size is not yet updated), simply remove any
1054 * blocks which show up between the new EOF and the maximum
1055 * possible file size.
1056 *
1057 * We have to free all the blocks to the bmbt maximum offset, even if
1058 * the page cache can't scale that far.
1059 */
1060 first_unmap_block = XFS_B_TO_FSB(mp, (xfs_ufsize_t)new_size);
1061 if (!xfs_verify_fileoff(mp, first_unmap_block)) {
1062 WARN_ON_ONCE(first_unmap_block > XFS_MAX_FILEOFF);
1063 return 0;
1064 }
1065
1066 error = xfs_bunmapi_range(&tp, ip, flags, first_unmap_block,
1067 XFS_MAX_FILEOFF);
1068 if (error)
1069 goto out;
1070
1071 if (whichfork == XFS_DATA_FORK) {
1072 /* Remove all pending CoW reservations. */
1073 error = xfs_reflink_cancel_cow_blocks(ip, &tp,
1074 first_unmap_block, XFS_MAX_FILEOFF, true);
1075 if (error)
1076 goto out;
1077
1078 xfs_itruncate_clear_reflink_flags(ip);
1079 }
1080
1081 /*
1082 * Always re-log the inode so that our permanent transaction can keep
1083 * on rolling it forward in the log.
1084 */
1085 xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
1086
1087 trace_xfs_itruncate_extents_end(ip, new_size);
1088
1089out:
1090 *tpp = tp;
1091 return error;
1092}
1093
1094/*
1095 * Mark all the buffers attached to this directory stale. In theory we should
1096 * never be freeing a directory with any blocks at all, but this covers the
1097 * case where we've recovered a directory swap with a "temporary" directory
1098 * created by online repair and now need to dump it.
1099 */
1100STATIC void
1101xfs_inactive_dir(
1102 struct xfs_inode *dp)
1103{
1104 struct xfs_iext_cursor icur;
1105 struct xfs_bmbt_irec got;
1106 struct xfs_mount *mp = dp->i_mount;
1107 struct xfs_da_geometry *geo = mp->m_dir_geo;
1108 struct xfs_ifork *ifp = xfs_ifork_ptr(dp, XFS_DATA_FORK);
1109 xfs_fileoff_t off;
1110
1111 /*
1112 * Invalidate each directory block. All directory blocks are of
1113 * fsbcount length and alignment, so we only need to walk those same
1114 * offsets. We hold the only reference to this inode, so we must wait
1115 * for the buffer locks.
1116 */
1117 for_each_xfs_iext(ifp, &icur, &got) {
1118 for (off = round_up(got.br_startoff, geo->fsbcount);
1119 off < got.br_startoff + got.br_blockcount;
1120 off += geo->fsbcount) {
1121 struct xfs_buf *bp = NULL;
1122 xfs_fsblock_t fsbno;
1123 int error;
1124
1125 fsbno = (off - got.br_startoff) + got.br_startblock;
1126 error = xfs_buf_incore(mp->m_ddev_targp,
1127 XFS_FSB_TO_DADDR(mp, fsbno),
1128 XFS_FSB_TO_BB(mp, geo->fsbcount),
1129 XBF_LIVESCAN, &bp);
1130 if (error)
1131 continue;
1132
1133 xfs_buf_stale(bp);
1134 xfs_buf_relse(bp);
1135 }
1136 }
1137}
1138
1139/*
1140 * xfs_inactive_truncate
1141 *
1142 * Called to perform a truncate when an inode becomes unlinked.
1143 */
1144STATIC int
1145xfs_inactive_truncate(
1146 struct xfs_inode *ip)
1147{
1148 struct xfs_mount *mp = ip->i_mount;
1149 struct xfs_trans *tp;
1150 int error;
1151
1152 error = xfs_trans_alloc(mp, &M_RES(mp)->tr_itruncate, 0, 0, 0, &tp);
1153 if (error) {
1154 ASSERT(xfs_is_shutdown(mp));
1155 return error;
1156 }
1157 xfs_ilock(ip, XFS_ILOCK_EXCL);
1158 xfs_trans_ijoin(tp, ip, 0);
1159
1160 /*
1161 * Log the inode size first to prevent stale data exposure in the event
1162 * of a system crash before the truncate completes. See the related
1163 * comment in xfs_vn_setattr_size() for details.
1164 */
1165 ip->i_disk_size = 0;
1166 xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
1167
1168 error = xfs_itruncate_extents(&tp, ip, XFS_DATA_FORK, 0);
1169 if (error)
1170 goto error_trans_cancel;
1171
1172 ASSERT(ip->i_df.if_nextents == 0);
1173
1174 error = xfs_trans_commit(tp);
1175 if (error)
1176 goto error_unlock;
1177
1178 xfs_iunlock(ip, XFS_ILOCK_EXCL);
1179 return 0;
1180
1181error_trans_cancel:
1182 xfs_trans_cancel(tp);
1183error_unlock:
1184 xfs_iunlock(ip, XFS_ILOCK_EXCL);
1185 return error;
1186}
1187
1188/*
1189 * xfs_inactive_ifree()
1190 *
1191 * Perform the inode free when an inode is unlinked.
1192 */
1193STATIC int
1194xfs_inactive_ifree(
1195 struct xfs_inode *ip)
1196{
1197 struct xfs_mount *mp = ip->i_mount;
1198 struct xfs_trans *tp;
1199 int error;
1200
1201 /*
1202 * We try to use a per-AG reservation for any block needed by the finobt
1203 * tree, but as the finobt feature predates the per-AG reservation
1204 * support a degraded file system might not have enough space for the
1205 * reservation at mount time. In that case try to dip into the reserved
1206 * pool and pray.
1207 *
1208 * Send a warning if the reservation does happen to fail, as the inode
1209 * now remains allocated and sits on the unlinked list until the fs is
1210 * repaired.
1211 */
1212 if (unlikely(mp->m_finobt_nores)) {
1213 error = xfs_trans_alloc(mp, &M_RES(mp)->tr_ifree,
1214 XFS_IFREE_SPACE_RES(mp), 0, XFS_TRANS_RESERVE,
1215 &tp);
1216 } else {
1217 error = xfs_trans_alloc(mp, &M_RES(mp)->tr_ifree, 0, 0, 0, &tp);
1218 }
1219 if (error) {
1220 if (error == -ENOSPC) {
1221 xfs_warn_ratelimited(mp,
1222 "Failed to remove inode(s) from unlinked list. "
1223 "Please free space, unmount and run xfs_repair.");
1224 } else {
1225 ASSERT(xfs_is_shutdown(mp));
1226 }
1227 return error;
1228 }
1229
1230 /*
1231 * We do not hold the inode locked across the entire rolling transaction
1232 * here. We only need to hold it for the first transaction that
1233 * xfs_ifree() builds, which may mark the inode XFS_ISTALE if the
1234 * underlying cluster buffer is freed. Relogging an XFS_ISTALE inode
1235 * here breaks the relationship between cluster buffer invalidation and
1236 * stale inode invalidation on cluster buffer item journal commit
1237 * completion, and can result in leaving dirty stale inodes hanging
1238 * around in memory.
1239 *
1240 * We have no need for serialising this inode operation against other
1241 * operations - we freed the inode and hence reallocation is required
1242 * and that will serialise on reallocating the space the deferops need
1243 * to free. Hence we can unlock the inode on the first commit of
1244 * the transaction rather than roll it right through the deferops. This
1245 * avoids relogging the XFS_ISTALE inode.
1246 *
1247 * We check that xfs_ifree() hasn't grown an internal transaction roll
1248 * by asserting that the inode is still locked when it returns.
1249 */
1250 xfs_ilock(ip, XFS_ILOCK_EXCL);
1251 xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL);
1252
1253 error = xfs_ifree(tp, ip);
1254 xfs_assert_ilocked(ip, XFS_ILOCK_EXCL);
1255 if (error) {
1256 /*
1257 * If we fail to free the inode, shut down. The cancel
1258 * might do that, we need to make sure. Otherwise the
1259 * inode might be lost for a long time or forever.
1260 */
1261 if (!xfs_is_shutdown(mp)) {
1262 xfs_notice(mp, "%s: xfs_ifree returned error %d",
1263 __func__, error);
1264 xfs_force_shutdown(mp, SHUTDOWN_META_IO_ERROR);
1265 }
1266 xfs_trans_cancel(tp);
1267 return error;
1268 }
1269
1270 /*
1271 * Credit the quota account(s). The inode is gone.
1272 */
1273 xfs_trans_mod_dquot_byino(tp, ip, XFS_TRANS_DQ_ICOUNT, -1);
1274
1275 return xfs_trans_commit(tp);
1276}
1277
1278/*
1279 * Returns true if we need to update the on-disk metadata before we can free
1280 * the memory used by this inode. Updates include freeing post-eof
1281 * preallocations; freeing COW staging extents; and marking the inode free in
1282 * the inobt if it is on the unlinked list.
1283 */
1284bool
1285xfs_inode_needs_inactive(
1286 struct xfs_inode *ip)
1287{
1288 struct xfs_mount *mp = ip->i_mount;
1289 struct xfs_ifork *cow_ifp = xfs_ifork_ptr(ip, XFS_COW_FORK);
1290
1291 /*
1292 * If the inode is already free, then there can be nothing
1293 * to clean up here.
1294 */
1295 if (VFS_I(ip)->i_mode == 0)
1296 return false;
1297
1298 /*
1299 * If this is a read-only mount, don't do this (would generate I/O)
1300 * unless we're in log recovery and cleaning the iunlinked list.
1301 */
1302 if (xfs_is_readonly(mp) && !xlog_recovery_needed(mp->m_log))
1303 return false;
1304
1305 /* If the log isn't running, push inodes straight to reclaim. */
1306 if (xfs_is_shutdown(mp) || xfs_has_norecovery(mp))
1307 return false;
1308
1309 /* Metadata inodes require explicit resource cleanup. */
1310 if (xfs_is_internal_inode(ip))
1311 return false;
1312
1313 /* Want to clean out the cow blocks if there are any. */
1314 if (cow_ifp && cow_ifp->if_bytes > 0)
1315 return true;
1316
1317 /* Unlinked files must be freed. */
1318 if (VFS_I(ip)->i_nlink == 0)
1319 return true;
1320
1321 /*
1322 * This file isn't being freed, so check if there are post-eof blocks
1323 * to free.
1324 *
1325 * Note: don't bother with iolock here since lockdep complains about
1326 * acquiring it in reclaim context. We have the only reference to the
1327 * inode at this point anyways.
1328 */
1329 return xfs_can_free_eofblocks(ip);
1330}
1331
1332/*
1333 * Save health status somewhere, if we're dumping an inode with uncorrected
1334 * errors and online repair isn't running.
1335 */
1336static inline void
1337xfs_inactive_health(
1338 struct xfs_inode *ip)
1339{
1340 struct xfs_mount *mp = ip->i_mount;
1341 struct xfs_perag *pag;
1342 unsigned int sick;
1343 unsigned int checked;
1344
1345 xfs_inode_measure_sickness(ip, &sick, &checked);
1346 if (!sick)
1347 return;
1348
1349 trace_xfs_inode_unfixed_corruption(ip, sick);
1350
1351 if (sick & XFS_SICK_INO_FORGET)
1352 return;
1353
1354 pag = xfs_perag_get(mp, XFS_INO_TO_AGNO(mp, ip->i_ino));
1355 if (!pag) {
1356 /* There had better still be a perag structure! */
1357 ASSERT(0);
1358 return;
1359 }
1360
1361 xfs_ag_mark_sick(pag, XFS_SICK_AG_INODES);
1362 xfs_perag_put(pag);
1363}
1364
1365/*
1366 * xfs_inactive
1367 *
1368 * This is called when the vnode reference count for the vnode
1369 * goes to zero. If the file has been unlinked, then it must
1370 * now be truncated. Also, we clear all of the read-ahead state
1371 * kept for the inode here since the file is now closed.
1372 */
1373int
1374xfs_inactive(
1375 xfs_inode_t *ip)
1376{
1377 struct xfs_mount *mp;
1378 int error = 0;
1379 int truncate = 0;
1380
1381 /*
1382 * If the inode is already free, then there can be nothing
1383 * to clean up here.
1384 */
1385 if (VFS_I(ip)->i_mode == 0) {
1386 ASSERT(ip->i_df.if_broot_bytes == 0);
1387 goto out;
1388 }
1389
1390 mp = ip->i_mount;
1391 ASSERT(!xfs_iflags_test(ip, XFS_IRECOVERY));
1392
1393 xfs_inactive_health(ip);
1394
1395 /*
1396 * If this is a read-only mount, don't do this (would generate I/O)
1397 * unless we're in log recovery and cleaning the iunlinked list.
1398 */
1399 if (xfs_is_readonly(mp) && !xlog_recovery_needed(mp->m_log))
1400 goto out;
1401
1402 /* Metadata inodes require explicit resource cleanup. */
1403 if (xfs_is_internal_inode(ip))
1404 goto out;
1405
1406 /* Try to clean out the cow blocks if there are any. */
1407 if (xfs_inode_has_cow_data(ip)) {
1408 error = xfs_reflink_cancel_cow_range(ip, 0, NULLFILEOFF, true);
1409 if (error)
1410 goto out;
1411 }
1412
1413 if (VFS_I(ip)->i_nlink != 0) {
1414 /*
1415 * Note: don't bother with iolock here since lockdep complains
1416 * about acquiring it in reclaim context. We have the only
1417 * reference to the inode at this point anyways.
1418 */
1419 if (xfs_can_free_eofblocks(ip))
1420 error = xfs_free_eofblocks(ip);
1421
1422 goto out;
1423 }
1424
1425 if (S_ISREG(VFS_I(ip)->i_mode) &&
1426 (ip->i_disk_size != 0 || XFS_ISIZE(ip) != 0 ||
1427 xfs_inode_has_filedata(ip)))
1428 truncate = 1;
1429
1430 if (xfs_iflags_test(ip, XFS_IQUOTAUNCHECKED)) {
1431 /*
1432 * If this inode is being inactivated during a quotacheck and
1433 * has not yet been scanned by quotacheck, we /must/ remove
1434 * the dquots from the inode before inactivation changes the
1435 * block and inode counts. Most probably this is a result of
1436 * reloading the incore iunlinked list to purge unrecovered
1437 * unlinked inodes.
1438 */
1439 xfs_qm_dqdetach(ip);
1440 } else {
1441 error = xfs_qm_dqattach(ip);
1442 if (error)
1443 goto out;
1444 }
1445
1446 if (S_ISDIR(VFS_I(ip)->i_mode) && ip->i_df.if_nextents > 0) {
1447 xfs_inactive_dir(ip);
1448 truncate = 1;
1449 }
1450
1451 if (S_ISLNK(VFS_I(ip)->i_mode))
1452 error = xfs_inactive_symlink(ip);
1453 else if (truncate)
1454 error = xfs_inactive_truncate(ip);
1455 if (error)
1456 goto out;
1457
1458 /*
1459 * If there are attributes associated with the file then blow them away
1460 * now. The code calls a routine that recursively deconstructs the
1461 * attribute fork. If also blows away the in-core attribute fork.
1462 */
1463 if (xfs_inode_has_attr_fork(ip)) {
1464 error = xfs_attr_inactive(ip);
1465 if (error)
1466 goto out;
1467 }
1468
1469 ASSERT(ip->i_forkoff == 0);
1470
1471 /*
1472 * Free the inode.
1473 */
1474 error = xfs_inactive_ifree(ip);
1475
1476out:
1477 /*
1478 * We're done making metadata updates for this inode, so we can release
1479 * the attached dquots.
1480 */
1481 xfs_qm_dqdetach(ip);
1482 return error;
1483}
1484
1485/*
1486 * Find an inode on the unlinked list. This does not take references to the
1487 * inode as we have existence guarantees by holding the AGI buffer lock and that
1488 * only unlinked, referenced inodes can be on the unlinked inode list. If we
1489 * don't find the inode in cache, then let the caller handle the situation.
1490 */
1491struct xfs_inode *
1492xfs_iunlink_lookup(
1493 struct xfs_perag *pag,
1494 xfs_agino_t agino)
1495{
1496 struct xfs_inode *ip;
1497
1498 rcu_read_lock();
1499 ip = radix_tree_lookup(&pag->pag_ici_root, agino);
1500 if (!ip) {
1501 /* Caller can handle inode not being in memory. */
1502 rcu_read_unlock();
1503 return NULL;
1504 }
1505
1506 /*
1507 * Inode in RCU freeing limbo should not happen. Warn about this and
1508 * let the caller handle the failure.
1509 */
1510 if (WARN_ON_ONCE(!ip->i_ino)) {
1511 rcu_read_unlock();
1512 return NULL;
1513 }
1514 ASSERT(!xfs_iflags_test(ip, XFS_IRECLAIMABLE | XFS_IRECLAIM));
1515 rcu_read_unlock();
1516 return ip;
1517}
1518
1519/*
1520 * Load the inode @next_agino into the cache and set its prev_unlinked pointer
1521 * to @prev_agino. Caller must hold the AGI to synchronize with other changes
1522 * to the unlinked list.
1523 */
1524int
1525xfs_iunlink_reload_next(
1526 struct xfs_trans *tp,
1527 struct xfs_buf *agibp,
1528 xfs_agino_t prev_agino,
1529 xfs_agino_t next_agino)
1530{
1531 struct xfs_perag *pag = agibp->b_pag;
1532 struct xfs_mount *mp = pag_mount(pag);
1533 struct xfs_inode *next_ip = NULL;
1534 int error;
1535
1536 ASSERT(next_agino != NULLAGINO);
1537
1538#ifdef DEBUG
1539 rcu_read_lock();
1540 next_ip = radix_tree_lookup(&pag->pag_ici_root, next_agino);
1541 ASSERT(next_ip == NULL);
1542 rcu_read_unlock();
1543#endif
1544
1545 xfs_info_ratelimited(mp,
1546 "Found unrecovered unlinked inode 0x%x in AG 0x%x. Initiating recovery.",
1547 next_agino, pag_agno(pag));
1548
1549 /*
1550 * Use an untrusted lookup just to be cautious in case the AGI has been
1551 * corrupted and now points at a free inode. That shouldn't happen,
1552 * but we'd rather shut down now since we're already running in a weird
1553 * situation.
1554 */
1555 error = xfs_iget(mp, tp, xfs_agino_to_ino(pag, next_agino),
1556 XFS_IGET_UNTRUSTED, 0, &next_ip);
1557 if (error) {
1558 xfs_ag_mark_sick(pag, XFS_SICK_AG_AGI);
1559 return error;
1560 }
1561
1562 /* If this is not an unlinked inode, something is very wrong. */
1563 if (VFS_I(next_ip)->i_nlink != 0) {
1564 xfs_ag_mark_sick(pag, XFS_SICK_AG_AGI);
1565 error = -EFSCORRUPTED;
1566 goto rele;
1567 }
1568
1569 next_ip->i_prev_unlinked = prev_agino;
1570 trace_xfs_iunlink_reload_next(next_ip);
1571rele:
1572 ASSERT(!(VFS_I(next_ip)->i_state & I_DONTCACHE));
1573 if (xfs_is_quotacheck_running(mp) && next_ip)
1574 xfs_iflags_set(next_ip, XFS_IQUOTAUNCHECKED);
1575 xfs_irele(next_ip);
1576 return error;
1577}
1578
1579/*
1580 * Look up the inode number specified and if it is not already marked XFS_ISTALE
1581 * mark it stale. We should only find clean inodes in this lookup that aren't
1582 * already stale.
1583 */
1584static void
1585xfs_ifree_mark_inode_stale(
1586 struct xfs_perag *pag,
1587 struct xfs_inode *free_ip,
1588 xfs_ino_t inum)
1589{
1590 struct xfs_mount *mp = pag_mount(pag);
1591 struct xfs_inode_log_item *iip;
1592 struct xfs_inode *ip;
1593
1594retry:
1595 rcu_read_lock();
1596 ip = radix_tree_lookup(&pag->pag_ici_root, XFS_INO_TO_AGINO(mp, inum));
1597
1598 /* Inode not in memory, nothing to do */
1599 if (!ip) {
1600 rcu_read_unlock();
1601 return;
1602 }
1603
1604 /*
1605 * because this is an RCU protected lookup, we could find a recently
1606 * freed or even reallocated inode during the lookup. We need to check
1607 * under the i_flags_lock for a valid inode here. Skip it if it is not
1608 * valid, the wrong inode or stale.
1609 */
1610 spin_lock(&ip->i_flags_lock);
1611 if (ip->i_ino != inum || __xfs_iflags_test(ip, XFS_ISTALE))
1612 goto out_iflags_unlock;
1613
1614 /*
1615 * Don't try to lock/unlock the current inode, but we _cannot_ skip the
1616 * other inodes that we did not find in the list attached to the buffer
1617 * and are not already marked stale. If we can't lock it, back off and
1618 * retry.
1619 */
1620 if (ip != free_ip) {
1621 if (!xfs_ilock_nowait(ip, XFS_ILOCK_EXCL)) {
1622 spin_unlock(&ip->i_flags_lock);
1623 rcu_read_unlock();
1624 delay(1);
1625 goto retry;
1626 }
1627 }
1628 ip->i_flags |= XFS_ISTALE;
1629
1630 /*
1631 * If the inode is flushing, it is already attached to the buffer. All
1632 * we needed to do here is mark the inode stale so buffer IO completion
1633 * will remove it from the AIL.
1634 */
1635 iip = ip->i_itemp;
1636 if (__xfs_iflags_test(ip, XFS_IFLUSHING)) {
1637 ASSERT(!list_empty(&iip->ili_item.li_bio_list));
1638 ASSERT(iip->ili_last_fields);
1639 goto out_iunlock;
1640 }
1641
1642 /*
1643 * Inodes not attached to the buffer can be released immediately.
1644 * Everything else has to go through xfs_iflush_abort() on journal
1645 * commit as the flock synchronises removal of the inode from the
1646 * cluster buffer against inode reclaim.
1647 */
1648 if (!iip || list_empty(&iip->ili_item.li_bio_list))
1649 goto out_iunlock;
1650
1651 __xfs_iflags_set(ip, XFS_IFLUSHING);
1652 spin_unlock(&ip->i_flags_lock);
1653 rcu_read_unlock();
1654
1655 /* we have a dirty inode in memory that has not yet been flushed. */
1656 spin_lock(&iip->ili_lock);
1657 iip->ili_last_fields = iip->ili_fields;
1658 iip->ili_fields = 0;
1659 iip->ili_fsync_fields = 0;
1660 spin_unlock(&iip->ili_lock);
1661 ASSERT(iip->ili_last_fields);
1662
1663 if (ip != free_ip)
1664 xfs_iunlock(ip, XFS_ILOCK_EXCL);
1665 return;
1666
1667out_iunlock:
1668 if (ip != free_ip)
1669 xfs_iunlock(ip, XFS_ILOCK_EXCL);
1670out_iflags_unlock:
1671 spin_unlock(&ip->i_flags_lock);
1672 rcu_read_unlock();
1673}
1674
1675/*
1676 * A big issue when freeing the inode cluster is that we _cannot_ skip any
1677 * inodes that are in memory - they all must be marked stale and attached to
1678 * the cluster buffer.
1679 */
1680static int
1681xfs_ifree_cluster(
1682 struct xfs_trans *tp,
1683 struct xfs_perag *pag,
1684 struct xfs_inode *free_ip,
1685 struct xfs_icluster *xic)
1686{
1687 struct xfs_mount *mp = free_ip->i_mount;
1688 struct xfs_ino_geometry *igeo = M_IGEO(mp);
1689 struct xfs_buf *bp;
1690 xfs_daddr_t blkno;
1691 xfs_ino_t inum = xic->first_ino;
1692 int nbufs;
1693 int i, j;
1694 int ioffset;
1695 int error;
1696
1697 nbufs = igeo->ialloc_blks / igeo->blocks_per_cluster;
1698
1699 for (j = 0; j < nbufs; j++, inum += igeo->inodes_per_cluster) {
1700 /*
1701 * The allocation bitmap tells us which inodes of the chunk were
1702 * physically allocated. Skip the cluster if an inode falls into
1703 * a sparse region.
1704 */
1705 ioffset = inum - xic->first_ino;
1706 if ((xic->alloc & XFS_INOBT_MASK(ioffset)) == 0) {
1707 ASSERT(ioffset % igeo->inodes_per_cluster == 0);
1708 continue;
1709 }
1710
1711 blkno = XFS_AGB_TO_DADDR(mp, XFS_INO_TO_AGNO(mp, inum),
1712 XFS_INO_TO_AGBNO(mp, inum));
1713
1714 /*
1715 * We obtain and lock the backing buffer first in the process
1716 * here to ensure dirty inodes attached to the buffer remain in
1717 * the flushing state while we mark them stale.
1718 *
1719 * If we scan the in-memory inodes first, then buffer IO can
1720 * complete before we get a lock on it, and hence we may fail
1721 * to mark all the active inodes on the buffer stale.
1722 */
1723 error = xfs_trans_get_buf(tp, mp->m_ddev_targp, blkno,
1724 mp->m_bsize * igeo->blocks_per_cluster,
1725 XBF_UNMAPPED, &bp);
1726 if (error)
1727 return error;
1728
1729 /*
1730 * This buffer may not have been correctly initialised as we
1731 * didn't read it from disk. That's not important because we are
1732 * only using to mark the buffer as stale in the log, and to
1733 * attach stale cached inodes on it.
1734 *
1735 * For the inode that triggered the cluster freeing, this
1736 * attachment may occur in xfs_inode_item_precommit() after we
1737 * have marked this buffer stale. If this buffer was not in
1738 * memory before xfs_ifree_cluster() started, it will not be
1739 * marked XBF_DONE and this will cause problems later in
1740 * xfs_inode_item_precommit() when we trip over a (stale, !done)
1741 * buffer to attached to the transaction.
1742 *
1743 * Hence we have to mark the buffer as XFS_DONE here. This is
1744 * safe because we are also marking the buffer as XBF_STALE and
1745 * XFS_BLI_STALE. That means it will never be dispatched for
1746 * IO and it won't be unlocked until the cluster freeing has
1747 * been committed to the journal and the buffer unpinned. If it
1748 * is written, we want to know about it, and we want it to
1749 * fail. We can acheive this by adding a write verifier to the
1750 * buffer.
1751 */
1752 bp->b_flags |= XBF_DONE;
1753 bp->b_ops = &xfs_inode_buf_ops;
1754
1755 /*
1756 * Now we need to set all the cached clean inodes as XFS_ISTALE,
1757 * too. This requires lookups, and will skip inodes that we've
1758 * already marked XFS_ISTALE.
1759 */
1760 for (i = 0; i < igeo->inodes_per_cluster; i++)
1761 xfs_ifree_mark_inode_stale(pag, free_ip, inum + i);
1762
1763 xfs_trans_stale_inode_buf(tp, bp);
1764 xfs_trans_binval(tp, bp);
1765 }
1766 return 0;
1767}
1768
1769/*
1770 * This is called to return an inode to the inode free list. The inode should
1771 * already be truncated to 0 length and have no pages associated with it. This
1772 * routine also assumes that the inode is already a part of the transaction.
1773 *
1774 * The on-disk copy of the inode will have been added to the list of unlinked
1775 * inodes in the AGI. We need to remove the inode from that list atomically with
1776 * respect to freeing it here.
1777 */
1778int
1779xfs_ifree(
1780 struct xfs_trans *tp,
1781 struct xfs_inode *ip)
1782{
1783 struct xfs_mount *mp = ip->i_mount;
1784 struct xfs_perag *pag;
1785 struct xfs_icluster xic = { 0 };
1786 struct xfs_inode_log_item *iip = ip->i_itemp;
1787 int error;
1788
1789 xfs_assert_ilocked(ip, XFS_ILOCK_EXCL);
1790 ASSERT(VFS_I(ip)->i_nlink == 0);
1791 ASSERT(ip->i_df.if_nextents == 0);
1792 ASSERT(ip->i_disk_size == 0 || !S_ISREG(VFS_I(ip)->i_mode));
1793 ASSERT(ip->i_nblocks == 0);
1794
1795 pag = xfs_perag_get(mp, XFS_INO_TO_AGNO(mp, ip->i_ino));
1796
1797 error = xfs_inode_uninit(tp, pag, ip, &xic);
1798 if (error)
1799 goto out;
1800
1801 if (xfs_iflags_test(ip, XFS_IPRESERVE_DM_FIELDS))
1802 xfs_iflags_clear(ip, XFS_IPRESERVE_DM_FIELDS);
1803
1804 /* Don't attempt to replay owner changes for a deleted inode */
1805 spin_lock(&iip->ili_lock);
1806 iip->ili_fields &= ~(XFS_ILOG_AOWNER | XFS_ILOG_DOWNER);
1807 spin_unlock(&iip->ili_lock);
1808
1809 if (xic.deleted)
1810 error = xfs_ifree_cluster(tp, pag, ip, &xic);
1811out:
1812 xfs_perag_put(pag);
1813 return error;
1814}
1815
1816/*
1817 * This is called to unpin an inode. The caller must have the inode locked
1818 * in at least shared mode so that the buffer cannot be subsequently pinned
1819 * once someone is waiting for it to be unpinned.
1820 */
1821static void
1822xfs_iunpin(
1823 struct xfs_inode *ip)
1824{
1825 xfs_assert_ilocked(ip, XFS_ILOCK_EXCL | XFS_ILOCK_SHARED);
1826
1827 trace_xfs_inode_unpin_nowait(ip, _RET_IP_);
1828
1829 /* Give the log a push to start the unpinning I/O */
1830 xfs_log_force_seq(ip->i_mount, ip->i_itemp->ili_commit_seq, 0, NULL);
1831
1832}
1833
1834static void
1835__xfs_iunpin_wait(
1836 struct xfs_inode *ip)
1837{
1838 wait_queue_head_t *wq = bit_waitqueue(&ip->i_flags, __XFS_IPINNED_BIT);
1839 DEFINE_WAIT_BIT(wait, &ip->i_flags, __XFS_IPINNED_BIT);
1840
1841 xfs_iunpin(ip);
1842
1843 do {
1844 prepare_to_wait(wq, &wait.wq_entry, TASK_UNINTERRUPTIBLE);
1845 if (xfs_ipincount(ip))
1846 io_schedule();
1847 } while (xfs_ipincount(ip));
1848 finish_wait(wq, &wait.wq_entry);
1849}
1850
1851void
1852xfs_iunpin_wait(
1853 struct xfs_inode *ip)
1854{
1855 if (xfs_ipincount(ip))
1856 __xfs_iunpin_wait(ip);
1857}
1858
1859/*
1860 * Removing an inode from the namespace involves removing the directory entry
1861 * and dropping the link count on the inode. Removing the directory entry can
1862 * result in locking an AGF (directory blocks were freed) and removing a link
1863 * count can result in placing the inode on an unlinked list which results in
1864 * locking an AGI.
1865 *
1866 * The big problem here is that we have an ordering constraint on AGF and AGI
1867 * locking - inode allocation locks the AGI, then can allocate a new extent for
1868 * new inodes, locking the AGF after the AGI. Similarly, freeing the inode
1869 * removes the inode from the unlinked list, requiring that we lock the AGI
1870 * first, and then freeing the inode can result in an inode chunk being freed
1871 * and hence freeing disk space requiring that we lock an AGF.
1872 *
1873 * Hence the ordering that is imposed by other parts of the code is AGI before
1874 * AGF. This means we cannot remove the directory entry before we drop the inode
1875 * reference count and put it on the unlinked list as this results in a lock
1876 * order of AGF then AGI, and this can deadlock against inode allocation and
1877 * freeing. Therefore we must drop the link counts before we remove the
1878 * directory entry.
1879 *
1880 * This is still safe from a transactional point of view - it is not until we
1881 * get to xfs_defer_finish() that we have the possibility of multiple
1882 * transactions in this operation. Hence as long as we remove the directory
1883 * entry and drop the link count in the first transaction of the remove
1884 * operation, there are no transactional constraints on the ordering here.
1885 */
1886int
1887xfs_remove(
1888 struct xfs_inode *dp,
1889 struct xfs_name *name,
1890 struct xfs_inode *ip)
1891{
1892 struct xfs_dir_update du = {
1893 .dp = dp,
1894 .name = name,
1895 .ip = ip,
1896 };
1897 struct xfs_mount *mp = dp->i_mount;
1898 struct xfs_trans *tp = NULL;
1899 int is_dir = S_ISDIR(VFS_I(ip)->i_mode);
1900 int dontcare;
1901 int error = 0;
1902 uint resblks;
1903
1904 trace_xfs_remove(dp, name);
1905
1906 if (xfs_is_shutdown(mp))
1907 return -EIO;
1908 if (xfs_ifork_zapped(dp, XFS_DATA_FORK))
1909 return -EIO;
1910
1911 error = xfs_qm_dqattach(dp);
1912 if (error)
1913 goto std_return;
1914
1915 error = xfs_qm_dqattach(ip);
1916 if (error)
1917 goto std_return;
1918
1919 error = xfs_parent_start(mp, &du.ppargs);
1920 if (error)
1921 goto std_return;
1922
1923 /*
1924 * We try to get the real space reservation first, allowing for
1925 * directory btree deletion(s) implying possible bmap insert(s). If we
1926 * can't get the space reservation then we use 0 instead, and avoid the
1927 * bmap btree insert(s) in the directory code by, if the bmap insert
1928 * tries to happen, instead trimming the LAST block from the directory.
1929 *
1930 * Ignore EDQUOT and ENOSPC being returned via nospace_error because
1931 * the directory code can handle a reservationless update and we don't
1932 * want to prevent a user from trying to free space by deleting things.
1933 */
1934 resblks = xfs_remove_space_res(mp, name->len);
1935 error = xfs_trans_alloc_dir(dp, &M_RES(mp)->tr_remove, ip, &resblks,
1936 &tp, &dontcare);
1937 if (error) {
1938 ASSERT(error != -ENOSPC);
1939 goto out_parent;
1940 }
1941
1942 error = xfs_dir_remove_child(tp, resblks, &du);
1943 if (error)
1944 goto out_trans_cancel;
1945
1946 /*
1947 * If this is a synchronous mount, make sure that the
1948 * remove transaction goes to disk before returning to
1949 * the user.
1950 */
1951 if (xfs_has_wsync(mp) || xfs_has_dirsync(mp))
1952 xfs_trans_set_sync(tp);
1953
1954 error = xfs_trans_commit(tp);
1955 if (error)
1956 goto out_unlock;
1957
1958 if (is_dir && xfs_inode_is_filestream(ip))
1959 xfs_filestream_deassociate(ip);
1960
1961 xfs_iunlock(ip, XFS_ILOCK_EXCL);
1962 xfs_iunlock(dp, XFS_ILOCK_EXCL);
1963 xfs_parent_finish(mp, du.ppargs);
1964 return 0;
1965
1966 out_trans_cancel:
1967 xfs_trans_cancel(tp);
1968 out_unlock:
1969 xfs_iunlock(ip, XFS_ILOCK_EXCL);
1970 xfs_iunlock(dp, XFS_ILOCK_EXCL);
1971 out_parent:
1972 xfs_parent_finish(mp, du.ppargs);
1973 std_return:
1974 return error;
1975}
1976
1977static inline void
1978xfs_iunlock_rename(
1979 struct xfs_inode **i_tab,
1980 int num_inodes)
1981{
1982 int i;
1983
1984 for (i = num_inodes - 1; i >= 0; i--) {
1985 /* Skip duplicate inodes if src and target dps are the same */
1986 if (!i_tab[i] || (i > 0 && i_tab[i] == i_tab[i - 1]))
1987 continue;
1988 xfs_iunlock(i_tab[i], XFS_ILOCK_EXCL);
1989 }
1990}
1991
1992/*
1993 * Enter all inodes for a rename transaction into a sorted array.
1994 */
1995#define __XFS_SORT_INODES 5
1996STATIC void
1997xfs_sort_for_rename(
1998 struct xfs_inode *dp1, /* in: old (source) directory inode */
1999 struct xfs_inode *dp2, /* in: new (target) directory inode */
2000 struct xfs_inode *ip1, /* in: inode of old entry */
2001 struct xfs_inode *ip2, /* in: inode of new entry */
2002 struct xfs_inode *wip, /* in: whiteout inode */
2003 struct xfs_inode **i_tab,/* out: sorted array of inodes */
2004 int *num_inodes) /* in/out: inodes in array */
2005{
2006 int i;
2007
2008 ASSERT(*num_inodes == __XFS_SORT_INODES);
2009 memset(i_tab, 0, *num_inodes * sizeof(struct xfs_inode *));
2010
2011 /*
2012 * i_tab contains a list of pointers to inodes. We initialize
2013 * the table here & we'll sort it. We will then use it to
2014 * order the acquisition of the inode locks.
2015 *
2016 * Note that the table may contain duplicates. e.g., dp1 == dp2.
2017 */
2018 i = 0;
2019 i_tab[i++] = dp1;
2020 i_tab[i++] = dp2;
2021 i_tab[i++] = ip1;
2022 if (ip2)
2023 i_tab[i++] = ip2;
2024 if (wip)
2025 i_tab[i++] = wip;
2026 *num_inodes = i;
2027
2028 xfs_sort_inodes(i_tab, *num_inodes);
2029}
2030
2031void
2032xfs_sort_inodes(
2033 struct xfs_inode **i_tab,
2034 unsigned int num_inodes)
2035{
2036 int i, j;
2037
2038 ASSERT(num_inodes <= __XFS_SORT_INODES);
2039
2040 /*
2041 * Sort the elements via bubble sort. (Remember, there are at
2042 * most 5 elements to sort, so this is adequate.)
2043 */
2044 for (i = 0; i < num_inodes; i++) {
2045 for (j = 1; j < num_inodes; j++) {
2046 if (i_tab[j]->i_ino < i_tab[j-1]->i_ino)
2047 swap(i_tab[j], i_tab[j - 1]);
2048 }
2049 }
2050}
2051
2052/*
2053 * xfs_rename_alloc_whiteout()
2054 *
2055 * Return a referenced, unlinked, unlocked inode that can be used as a
2056 * whiteout in a rename transaction. We use a tmpfile inode here so that if we
2057 * crash between allocating the inode and linking it into the rename transaction
2058 * recovery will free the inode and we won't leak it.
2059 */
2060static int
2061xfs_rename_alloc_whiteout(
2062 struct mnt_idmap *idmap,
2063 struct xfs_name *src_name,
2064 struct xfs_inode *dp,
2065 struct xfs_inode **wip)
2066{
2067 struct xfs_icreate_args args = {
2068 .idmap = idmap,
2069 .pip = dp,
2070 .mode = S_IFCHR | WHITEOUT_MODE,
2071 .flags = XFS_ICREATE_TMPFILE,
2072 };
2073 struct xfs_inode *tmpfile;
2074 struct qstr name;
2075 int error;
2076
2077 error = xfs_create_tmpfile(&args, &tmpfile);
2078 if (error)
2079 return error;
2080
2081 name.name = src_name->name;
2082 name.len = src_name->len;
2083 error = xfs_inode_init_security(VFS_I(tmpfile), VFS_I(dp), &name);
2084 if (error) {
2085 xfs_finish_inode_setup(tmpfile);
2086 xfs_irele(tmpfile);
2087 return error;
2088 }
2089
2090 /*
2091 * Prepare the tmpfile inode as if it were created through the VFS.
2092 * Complete the inode setup and flag it as linkable. nlink is already
2093 * zero, so we can skip the drop_nlink.
2094 */
2095 xfs_setup_iops(tmpfile);
2096 xfs_finish_inode_setup(tmpfile);
2097 VFS_I(tmpfile)->i_state |= I_LINKABLE;
2098
2099 *wip = tmpfile;
2100 return 0;
2101}
2102
2103/*
2104 * xfs_rename
2105 */
2106int
2107xfs_rename(
2108 struct mnt_idmap *idmap,
2109 struct xfs_inode *src_dp,
2110 struct xfs_name *src_name,
2111 struct xfs_inode *src_ip,
2112 struct xfs_inode *target_dp,
2113 struct xfs_name *target_name,
2114 struct xfs_inode *target_ip,
2115 unsigned int flags)
2116{
2117 struct xfs_dir_update du_src = {
2118 .dp = src_dp,
2119 .name = src_name,
2120 .ip = src_ip,
2121 };
2122 struct xfs_dir_update du_tgt = {
2123 .dp = target_dp,
2124 .name = target_name,
2125 .ip = target_ip,
2126 };
2127 struct xfs_dir_update du_wip = { };
2128 struct xfs_mount *mp = src_dp->i_mount;
2129 struct xfs_trans *tp;
2130 struct xfs_inode *inodes[__XFS_SORT_INODES];
2131 int i;
2132 int num_inodes = __XFS_SORT_INODES;
2133 bool new_parent = (src_dp != target_dp);
2134 bool src_is_directory = S_ISDIR(VFS_I(src_ip)->i_mode);
2135 int spaceres;
2136 bool retried = false;
2137 int error, nospace_error = 0;
2138
2139 trace_xfs_rename(src_dp, target_dp, src_name, target_name);
2140
2141 if ((flags & RENAME_EXCHANGE) && !target_ip)
2142 return -EINVAL;
2143
2144 /*
2145 * If we are doing a whiteout operation, allocate the whiteout inode
2146 * we will be placing at the target and ensure the type is set
2147 * appropriately.
2148 */
2149 if (flags & RENAME_WHITEOUT) {
2150 error = xfs_rename_alloc_whiteout(idmap, src_name, target_dp,
2151 &du_wip.ip);
2152 if (error)
2153 return error;
2154
2155 /* setup target dirent info as whiteout */
2156 src_name->type = XFS_DIR3_FT_CHRDEV;
2157 }
2158
2159 xfs_sort_for_rename(src_dp, target_dp, src_ip, target_ip, du_wip.ip,
2160 inodes, &num_inodes);
2161
2162 error = xfs_parent_start(mp, &du_src.ppargs);
2163 if (error)
2164 goto out_release_wip;
2165
2166 if (du_wip.ip) {
2167 error = xfs_parent_start(mp, &du_wip.ppargs);
2168 if (error)
2169 goto out_src_ppargs;
2170 }
2171
2172 if (target_ip) {
2173 error = xfs_parent_start(mp, &du_tgt.ppargs);
2174 if (error)
2175 goto out_wip_ppargs;
2176 }
2177
2178retry:
2179 nospace_error = 0;
2180 spaceres = xfs_rename_space_res(mp, src_name->len, target_ip != NULL,
2181 target_name->len, du_wip.ip != NULL);
2182 error = xfs_trans_alloc(mp, &M_RES(mp)->tr_rename, spaceres, 0, 0, &tp);
2183 if (error == -ENOSPC) {
2184 nospace_error = error;
2185 spaceres = 0;
2186 error = xfs_trans_alloc(mp, &M_RES(mp)->tr_rename, 0, 0, 0,
2187 &tp);
2188 }
2189 if (error)
2190 goto out_tgt_ppargs;
2191
2192 /*
2193 * We don't allow reservationless renaming when parent pointers are
2194 * enabled because we can't back out if the xattrs must grow.
2195 */
2196 if (du_src.ppargs && nospace_error) {
2197 error = nospace_error;
2198 xfs_trans_cancel(tp);
2199 goto out_tgt_ppargs;
2200 }
2201
2202 /*
2203 * Attach the dquots to the inodes
2204 */
2205 error = xfs_qm_vop_rename_dqattach(inodes);
2206 if (error) {
2207 xfs_trans_cancel(tp);
2208 goto out_tgt_ppargs;
2209 }
2210
2211 /*
2212 * Lock all the participating inodes. Depending upon whether
2213 * the target_name exists in the target directory, and
2214 * whether the target directory is the same as the source
2215 * directory, we can lock from 2 to 5 inodes.
2216 */
2217 xfs_lock_inodes(inodes, num_inodes, XFS_ILOCK_EXCL);
2218
2219 /*
2220 * Join all the inodes to the transaction.
2221 */
2222 xfs_trans_ijoin(tp, src_dp, 0);
2223 if (new_parent)
2224 xfs_trans_ijoin(tp, target_dp, 0);
2225 xfs_trans_ijoin(tp, src_ip, 0);
2226 if (target_ip)
2227 xfs_trans_ijoin(tp, target_ip, 0);
2228 if (du_wip.ip)
2229 xfs_trans_ijoin(tp, du_wip.ip, 0);
2230
2231 /*
2232 * If we are using project inheritance, we only allow renames
2233 * into our tree when the project IDs are the same; else the
2234 * tree quota mechanism would be circumvented.
2235 */
2236 if (unlikely((target_dp->i_diflags & XFS_DIFLAG_PROJINHERIT) &&
2237 target_dp->i_projid != src_ip->i_projid)) {
2238 error = -EXDEV;
2239 goto out_trans_cancel;
2240 }
2241
2242 /* RENAME_EXCHANGE is unique from here on. */
2243 if (flags & RENAME_EXCHANGE) {
2244 error = xfs_dir_exchange_children(tp, &du_src, &du_tgt,
2245 spaceres);
2246 if (error)
2247 goto out_trans_cancel;
2248 goto out_commit;
2249 }
2250
2251 /*
2252 * Try to reserve quota to handle an expansion of the target directory.
2253 * We'll allow the rename to continue in reservationless mode if we hit
2254 * a space usage constraint. If we trigger reservationless mode, save
2255 * the errno if there isn't any free space in the target directory.
2256 */
2257 if (spaceres != 0) {
2258 error = xfs_trans_reserve_quota_nblks(tp, target_dp, spaceres,
2259 0, false);
2260 if (error == -EDQUOT || error == -ENOSPC) {
2261 if (!retried) {
2262 xfs_trans_cancel(tp);
2263 xfs_iunlock_rename(inodes, num_inodes);
2264 xfs_blockgc_free_quota(target_dp, 0);
2265 retried = true;
2266 goto retry;
2267 }
2268
2269 nospace_error = error;
2270 spaceres = 0;
2271 error = 0;
2272 }
2273 if (error)
2274 goto out_trans_cancel;
2275 }
2276
2277 /*
2278 * We don't allow quotaless renaming when parent pointers are enabled
2279 * because we can't back out if the xattrs must grow.
2280 */
2281 if (du_src.ppargs && nospace_error) {
2282 error = nospace_error;
2283 goto out_trans_cancel;
2284 }
2285
2286 /*
2287 * Lock the AGI buffers we need to handle bumping the nlink of the
2288 * whiteout inode off the unlinked list and to handle dropping the
2289 * nlink of the target inode. Per locking order rules, do this in
2290 * increasing AG order and before directory block allocation tries to
2291 * grab AGFs because we grab AGIs before AGFs.
2292 *
2293 * The (vfs) caller must ensure that if src is a directory then
2294 * target_ip is either null or an empty directory.
2295 */
2296 for (i = 0; i < num_inodes && inodes[i] != NULL; i++) {
2297 if (inodes[i] == du_wip.ip ||
2298 (inodes[i] == target_ip &&
2299 (VFS_I(target_ip)->i_nlink == 1 || src_is_directory))) {
2300 struct xfs_perag *pag;
2301 struct xfs_buf *bp;
2302
2303 pag = xfs_perag_get(mp,
2304 XFS_INO_TO_AGNO(mp, inodes[i]->i_ino));
2305 error = xfs_read_agi(pag, tp, 0, &bp);
2306 xfs_perag_put(pag);
2307 if (error)
2308 goto out_trans_cancel;
2309 }
2310 }
2311
2312 error = xfs_dir_rename_children(tp, &du_src, &du_tgt, spaceres,
2313 &du_wip);
2314 if (error)
2315 goto out_trans_cancel;
2316
2317 if (du_wip.ip) {
2318 /*
2319 * Now we have a real link, clear the "I'm a tmpfile" state
2320 * flag from the inode so it doesn't accidentally get misused in
2321 * future.
2322 */
2323 VFS_I(du_wip.ip)->i_state &= ~I_LINKABLE;
2324 }
2325
2326out_commit:
2327 /*
2328 * If this is a synchronous mount, make sure that the rename
2329 * transaction goes to disk before returning to the user.
2330 */
2331 if (xfs_has_wsync(tp->t_mountp) || xfs_has_dirsync(tp->t_mountp))
2332 xfs_trans_set_sync(tp);
2333
2334 error = xfs_trans_commit(tp);
2335 nospace_error = 0;
2336 goto out_unlock;
2337
2338out_trans_cancel:
2339 xfs_trans_cancel(tp);
2340out_unlock:
2341 xfs_iunlock_rename(inodes, num_inodes);
2342out_tgt_ppargs:
2343 xfs_parent_finish(mp, du_tgt.ppargs);
2344out_wip_ppargs:
2345 xfs_parent_finish(mp, du_wip.ppargs);
2346out_src_ppargs:
2347 xfs_parent_finish(mp, du_src.ppargs);
2348out_release_wip:
2349 if (du_wip.ip)
2350 xfs_irele(du_wip.ip);
2351 if (error == -ENOSPC && nospace_error)
2352 error = nospace_error;
2353 return error;
2354}
2355
2356static int
2357xfs_iflush(
2358 struct xfs_inode *ip,
2359 struct xfs_buf *bp)
2360{
2361 struct xfs_inode_log_item *iip = ip->i_itemp;
2362 struct xfs_dinode *dip;
2363 struct xfs_mount *mp = ip->i_mount;
2364 int error;
2365
2366 xfs_assert_ilocked(ip, XFS_ILOCK_EXCL | XFS_ILOCK_SHARED);
2367 ASSERT(xfs_iflags_test(ip, XFS_IFLUSHING));
2368 ASSERT(ip->i_df.if_format != XFS_DINODE_FMT_BTREE ||
2369 ip->i_df.if_nextents > XFS_IFORK_MAXEXT(ip, XFS_DATA_FORK));
2370 ASSERT(iip->ili_item.li_buf == bp);
2371
2372 dip = xfs_buf_offset(bp, ip->i_imap.im_boffset);
2373
2374 /*
2375 * We don't flush the inode if any of the following checks fail, but we
2376 * do still update the log item and attach to the backing buffer as if
2377 * the flush happened. This is a formality to facilitate predictable
2378 * error handling as the caller will shutdown and fail the buffer.
2379 */
2380 error = -EFSCORRUPTED;
2381 if (XFS_TEST_ERROR(dip->di_magic != cpu_to_be16(XFS_DINODE_MAGIC),
2382 mp, XFS_ERRTAG_IFLUSH_1)) {
2383 xfs_alert_tag(mp, XFS_PTAG_IFLUSH,
2384 "%s: Bad inode %llu magic number 0x%x, ptr "PTR_FMT,
2385 __func__, ip->i_ino, be16_to_cpu(dip->di_magic), dip);
2386 goto flush_out;
2387 }
2388 if (S_ISREG(VFS_I(ip)->i_mode)) {
2389 if (XFS_TEST_ERROR(
2390 ip->i_df.if_format != XFS_DINODE_FMT_EXTENTS &&
2391 ip->i_df.if_format != XFS_DINODE_FMT_BTREE,
2392 mp, XFS_ERRTAG_IFLUSH_3)) {
2393 xfs_alert_tag(mp, XFS_PTAG_IFLUSH,
2394 "%s: Bad regular inode %llu, ptr "PTR_FMT,
2395 __func__, ip->i_ino, ip);
2396 goto flush_out;
2397 }
2398 } else if (S_ISDIR(VFS_I(ip)->i_mode)) {
2399 if (XFS_TEST_ERROR(
2400 ip->i_df.if_format != XFS_DINODE_FMT_EXTENTS &&
2401 ip->i_df.if_format != XFS_DINODE_FMT_BTREE &&
2402 ip->i_df.if_format != XFS_DINODE_FMT_LOCAL,
2403 mp, XFS_ERRTAG_IFLUSH_4)) {
2404 xfs_alert_tag(mp, XFS_PTAG_IFLUSH,
2405 "%s: Bad directory inode %llu, ptr "PTR_FMT,
2406 __func__, ip->i_ino, ip);
2407 goto flush_out;
2408 }
2409 }
2410 if (XFS_TEST_ERROR(ip->i_df.if_nextents + xfs_ifork_nextents(&ip->i_af) >
2411 ip->i_nblocks, mp, XFS_ERRTAG_IFLUSH_5)) {
2412 xfs_alert_tag(mp, XFS_PTAG_IFLUSH,
2413 "%s: detected corrupt incore inode %llu, "
2414 "total extents = %llu nblocks = %lld, ptr "PTR_FMT,
2415 __func__, ip->i_ino,
2416 ip->i_df.if_nextents + xfs_ifork_nextents(&ip->i_af),
2417 ip->i_nblocks, ip);
2418 goto flush_out;
2419 }
2420 if (XFS_TEST_ERROR(ip->i_forkoff > mp->m_sb.sb_inodesize,
2421 mp, XFS_ERRTAG_IFLUSH_6)) {
2422 xfs_alert_tag(mp, XFS_PTAG_IFLUSH,
2423 "%s: bad inode %llu, forkoff 0x%x, ptr "PTR_FMT,
2424 __func__, ip->i_ino, ip->i_forkoff, ip);
2425 goto flush_out;
2426 }
2427
2428 /*
2429 * Inode item log recovery for v2 inodes are dependent on the flushiter
2430 * count for correct sequencing. We bump the flush iteration count so
2431 * we can detect flushes which postdate a log record during recovery.
2432 * This is redundant as we now log every change and hence this can't
2433 * happen but we need to still do it to ensure backwards compatibility
2434 * with old kernels that predate logging all inode changes.
2435 */
2436 if (!xfs_has_v3inodes(mp))
2437 ip->i_flushiter++;
2438
2439 /*
2440 * If there are inline format data / attr forks attached to this inode,
2441 * make sure they are not corrupt.
2442 */
2443 if (ip->i_df.if_format == XFS_DINODE_FMT_LOCAL &&
2444 xfs_ifork_verify_local_data(ip))
2445 goto flush_out;
2446 if (xfs_inode_has_attr_fork(ip) &&
2447 ip->i_af.if_format == XFS_DINODE_FMT_LOCAL &&
2448 xfs_ifork_verify_local_attr(ip))
2449 goto flush_out;
2450
2451 /*
2452 * Copy the dirty parts of the inode into the on-disk inode. We always
2453 * copy out the core of the inode, because if the inode is dirty at all
2454 * the core must be.
2455 */
2456 xfs_inode_to_disk(ip, dip, iip->ili_item.li_lsn);
2457
2458 /* Wrap, we never let the log put out DI_MAX_FLUSH */
2459 if (!xfs_has_v3inodes(mp)) {
2460 if (ip->i_flushiter == DI_MAX_FLUSH)
2461 ip->i_flushiter = 0;
2462 }
2463
2464 xfs_iflush_fork(ip, dip, iip, XFS_DATA_FORK);
2465 if (xfs_inode_has_attr_fork(ip))
2466 xfs_iflush_fork(ip, dip, iip, XFS_ATTR_FORK);
2467
2468 /*
2469 * We've recorded everything logged in the inode, so we'd like to clear
2470 * the ili_fields bits so we don't log and flush things unnecessarily.
2471 * However, we can't stop logging all this information until the data
2472 * we've copied into the disk buffer is written to disk. If we did we
2473 * might overwrite the copy of the inode in the log with all the data
2474 * after re-logging only part of it, and in the face of a crash we
2475 * wouldn't have all the data we need to recover.
2476 *
2477 * What we do is move the bits to the ili_last_fields field. When
2478 * logging the inode, these bits are moved back to the ili_fields field.
2479 * In the xfs_buf_inode_iodone() routine we clear ili_last_fields, since
2480 * we know that the information those bits represent is permanently on
2481 * disk. As long as the flush completes before the inode is logged
2482 * again, then both ili_fields and ili_last_fields will be cleared.
2483 */
2484 error = 0;
2485flush_out:
2486 spin_lock(&iip->ili_lock);
2487 iip->ili_last_fields = iip->ili_fields;
2488 iip->ili_fields = 0;
2489 iip->ili_fsync_fields = 0;
2490 set_bit(XFS_LI_FLUSHING, &iip->ili_item.li_flags);
2491 spin_unlock(&iip->ili_lock);
2492
2493 /*
2494 * Store the current LSN of the inode so that we can tell whether the
2495 * item has moved in the AIL from xfs_buf_inode_iodone().
2496 */
2497 xfs_trans_ail_copy_lsn(mp->m_ail, &iip->ili_flush_lsn,
2498 &iip->ili_item.li_lsn);
2499
2500 /* generate the checksum. */
2501 xfs_dinode_calc_crc(mp, dip);
2502 if (error)
2503 xfs_inode_mark_sick(ip, XFS_SICK_INO_CORE);
2504 return error;
2505}
2506
2507/*
2508 * Non-blocking flush of dirty inode metadata into the backing buffer.
2509 *
2510 * The caller must have a reference to the inode and hold the cluster buffer
2511 * locked. The function will walk across all the inodes on the cluster buffer it
2512 * can find and lock without blocking, and flush them to the cluster buffer.
2513 *
2514 * On successful flushing of at least one inode, the caller must write out the
2515 * buffer and release it. If no inodes are flushed, -EAGAIN will be returned and
2516 * the caller needs to release the buffer. On failure, the filesystem will be
2517 * shut down, the buffer will have been unlocked and released, and EFSCORRUPTED
2518 * will be returned.
2519 */
2520int
2521xfs_iflush_cluster(
2522 struct xfs_buf *bp)
2523{
2524 struct xfs_mount *mp = bp->b_mount;
2525 struct xfs_log_item *lip, *n;
2526 struct xfs_inode *ip;
2527 struct xfs_inode_log_item *iip;
2528 int clcount = 0;
2529 int error = 0;
2530
2531 /*
2532 * We must use the safe variant here as on shutdown xfs_iflush_abort()
2533 * will remove itself from the list.
2534 */
2535 list_for_each_entry_safe(lip, n, &bp->b_li_list, li_bio_list) {
2536 iip = (struct xfs_inode_log_item *)lip;
2537 ip = iip->ili_inode;
2538
2539 /*
2540 * Quick and dirty check to avoid locks if possible.
2541 */
2542 if (__xfs_iflags_test(ip, XFS_IRECLAIM | XFS_IFLUSHING))
2543 continue;
2544 if (xfs_ipincount(ip))
2545 continue;
2546
2547 /*
2548 * The inode is still attached to the buffer, which means it is
2549 * dirty but reclaim might try to grab it. Check carefully for
2550 * that, and grab the ilock while still holding the i_flags_lock
2551 * to guarantee reclaim will not be able to reclaim this inode
2552 * once we drop the i_flags_lock.
2553 */
2554 spin_lock(&ip->i_flags_lock);
2555 ASSERT(!__xfs_iflags_test(ip, XFS_ISTALE));
2556 if (__xfs_iflags_test(ip, XFS_IRECLAIM | XFS_IFLUSHING)) {
2557 spin_unlock(&ip->i_flags_lock);
2558 continue;
2559 }
2560
2561 /*
2562 * ILOCK will pin the inode against reclaim and prevent
2563 * concurrent transactions modifying the inode while we are
2564 * flushing the inode. If we get the lock, set the flushing
2565 * state before we drop the i_flags_lock.
2566 */
2567 if (!xfs_ilock_nowait(ip, XFS_ILOCK_SHARED)) {
2568 spin_unlock(&ip->i_flags_lock);
2569 continue;
2570 }
2571 __xfs_iflags_set(ip, XFS_IFLUSHING);
2572 spin_unlock(&ip->i_flags_lock);
2573
2574 /*
2575 * Abort flushing this inode if we are shut down because the
2576 * inode may not currently be in the AIL. This can occur when
2577 * log I/O failure unpins the inode without inserting into the
2578 * AIL, leaving a dirty/unpinned inode attached to the buffer
2579 * that otherwise looks like it should be flushed.
2580 */
2581 if (xlog_is_shutdown(mp->m_log)) {
2582 xfs_iunpin_wait(ip);
2583 xfs_iflush_abort(ip);
2584 xfs_iunlock(ip, XFS_ILOCK_SHARED);
2585 error = -EIO;
2586 continue;
2587 }
2588
2589 /* don't block waiting on a log force to unpin dirty inodes */
2590 if (xfs_ipincount(ip)) {
2591 xfs_iflags_clear(ip, XFS_IFLUSHING);
2592 xfs_iunlock(ip, XFS_ILOCK_SHARED);
2593 continue;
2594 }
2595
2596 if (!xfs_inode_clean(ip))
2597 error = xfs_iflush(ip, bp);
2598 else
2599 xfs_iflags_clear(ip, XFS_IFLUSHING);
2600 xfs_iunlock(ip, XFS_ILOCK_SHARED);
2601 if (error)
2602 break;
2603 clcount++;
2604 }
2605
2606 if (error) {
2607 /*
2608 * Shutdown first so we kill the log before we release this
2609 * buffer. If it is an INODE_ALLOC buffer and pins the tail
2610 * of the log, failing it before the _log_ is shut down can
2611 * result in the log tail being moved forward in the journal
2612 * on disk because log writes can still be taking place. Hence
2613 * unpinning the tail will allow the ICREATE intent to be
2614 * removed from the log an recovery will fail with uninitialised
2615 * inode cluster buffers.
2616 */
2617 xfs_force_shutdown(mp, SHUTDOWN_CORRUPT_INCORE);
2618 bp->b_flags |= XBF_ASYNC;
2619 xfs_buf_ioend_fail(bp);
2620 return error;
2621 }
2622
2623 if (!clcount)
2624 return -EAGAIN;
2625
2626 XFS_STATS_INC(mp, xs_icluster_flushcnt);
2627 XFS_STATS_ADD(mp, xs_icluster_flushinode, clcount);
2628 return 0;
2629
2630}
2631
2632/* Release an inode. */
2633void
2634xfs_irele(
2635 struct xfs_inode *ip)
2636{
2637 trace_xfs_irele(ip, _RET_IP_);
2638 iput(VFS_I(ip));
2639}
2640
2641/*
2642 * Ensure all commited transactions touching the inode are written to the log.
2643 */
2644int
2645xfs_log_force_inode(
2646 struct xfs_inode *ip)
2647{
2648 xfs_csn_t seq = 0;
2649
2650 xfs_ilock(ip, XFS_ILOCK_SHARED);
2651 if (xfs_ipincount(ip))
2652 seq = ip->i_itemp->ili_commit_seq;
2653 xfs_iunlock(ip, XFS_ILOCK_SHARED);
2654
2655 if (!seq)
2656 return 0;
2657 return xfs_log_force_seq(ip->i_mount, seq, XFS_LOG_SYNC, NULL);
2658}
2659
2660/*
2661 * Grab the exclusive iolock for a data copy from src to dest, making sure to
2662 * abide vfs locking order (lowest pointer value goes first) and breaking the
2663 * layout leases before proceeding. The loop is needed because we cannot call
2664 * the blocking break_layout() with the iolocks held, and therefore have to
2665 * back out both locks.
2666 */
2667static int
2668xfs_iolock_two_inodes_and_break_layout(
2669 struct inode *src,
2670 struct inode *dest)
2671{
2672 int error;
2673
2674 if (src > dest)
2675 swap(src, dest);
2676
2677retry:
2678 /* Wait to break both inodes' layouts before we start locking. */
2679 error = break_layout(src, true);
2680 if (error)
2681 return error;
2682 if (src != dest) {
2683 error = break_layout(dest, true);
2684 if (error)
2685 return error;
2686 }
2687
2688 /* Lock one inode and make sure nobody got in and leased it. */
2689 inode_lock(src);
2690 error = break_layout(src, false);
2691 if (error) {
2692 inode_unlock(src);
2693 if (error == -EWOULDBLOCK)
2694 goto retry;
2695 return error;
2696 }
2697
2698 if (src == dest)
2699 return 0;
2700
2701 /* Lock the other inode and make sure nobody got in and leased it. */
2702 inode_lock_nested(dest, I_MUTEX_NONDIR2);
2703 error = break_layout(dest, false);
2704 if (error) {
2705 inode_unlock(src);
2706 inode_unlock(dest);
2707 if (error == -EWOULDBLOCK)
2708 goto retry;
2709 return error;
2710 }
2711
2712 return 0;
2713}
2714
2715static int
2716xfs_mmaplock_two_inodes_and_break_dax_layout(
2717 struct xfs_inode *ip1,
2718 struct xfs_inode *ip2)
2719{
2720 int error;
2721 bool retry;
2722 struct page *page;
2723
2724 if (ip1->i_ino > ip2->i_ino)
2725 swap(ip1, ip2);
2726
2727again:
2728 retry = false;
2729 /* Lock the first inode */
2730 xfs_ilock(ip1, XFS_MMAPLOCK_EXCL);
2731 error = xfs_break_dax_layouts(VFS_I(ip1), &retry);
2732 if (error || retry) {
2733 xfs_iunlock(ip1, XFS_MMAPLOCK_EXCL);
2734 if (error == 0 && retry)
2735 goto again;
2736 return error;
2737 }
2738
2739 if (ip1 == ip2)
2740 return 0;
2741
2742 /* Nested lock the second inode */
2743 xfs_ilock(ip2, xfs_lock_inumorder(XFS_MMAPLOCK_EXCL, 1));
2744 /*
2745 * We cannot use xfs_break_dax_layouts() directly here because it may
2746 * need to unlock & lock the XFS_MMAPLOCK_EXCL which is not suitable
2747 * for this nested lock case.
2748 */
2749 page = dax_layout_busy_page(VFS_I(ip2)->i_mapping);
2750 if (page && page_ref_count(page) != 1) {
2751 xfs_iunlock(ip2, XFS_MMAPLOCK_EXCL);
2752 xfs_iunlock(ip1, XFS_MMAPLOCK_EXCL);
2753 goto again;
2754 }
2755
2756 return 0;
2757}
2758
2759/*
2760 * Lock two inodes so that userspace cannot initiate I/O via file syscalls or
2761 * mmap activity.
2762 */
2763int
2764xfs_ilock2_io_mmap(
2765 struct xfs_inode *ip1,
2766 struct xfs_inode *ip2)
2767{
2768 int ret;
2769
2770 ret = xfs_iolock_two_inodes_and_break_layout(VFS_I(ip1), VFS_I(ip2));
2771 if (ret)
2772 return ret;
2773
2774 if (IS_DAX(VFS_I(ip1)) && IS_DAX(VFS_I(ip2))) {
2775 ret = xfs_mmaplock_two_inodes_and_break_dax_layout(ip1, ip2);
2776 if (ret) {
2777 inode_unlock(VFS_I(ip2));
2778 if (ip1 != ip2)
2779 inode_unlock(VFS_I(ip1));
2780 return ret;
2781 }
2782 } else
2783 filemap_invalidate_lock_two(VFS_I(ip1)->i_mapping,
2784 VFS_I(ip2)->i_mapping);
2785
2786 return 0;
2787}
2788
2789/* Unlock both inodes to allow IO and mmap activity. */
2790void
2791xfs_iunlock2_io_mmap(
2792 struct xfs_inode *ip1,
2793 struct xfs_inode *ip2)
2794{
2795 if (IS_DAX(VFS_I(ip1)) && IS_DAX(VFS_I(ip2))) {
2796 xfs_iunlock(ip2, XFS_MMAPLOCK_EXCL);
2797 if (ip1 != ip2)
2798 xfs_iunlock(ip1, XFS_MMAPLOCK_EXCL);
2799 } else
2800 filemap_invalidate_unlock_two(VFS_I(ip1)->i_mapping,
2801 VFS_I(ip2)->i_mapping);
2802
2803 inode_unlock(VFS_I(ip2));
2804 if (ip1 != ip2)
2805 inode_unlock(VFS_I(ip1));
2806}
2807
2808/* Drop the MMAPLOCK and the IOLOCK after a remap completes. */
2809void
2810xfs_iunlock2_remapping(
2811 struct xfs_inode *ip1,
2812 struct xfs_inode *ip2)
2813{
2814 xfs_iflags_clear(ip1, XFS_IREMAPPING);
2815
2816 if (ip1 != ip2)
2817 xfs_iunlock(ip1, XFS_MMAPLOCK_SHARED);
2818 xfs_iunlock(ip2, XFS_MMAPLOCK_EXCL);
2819
2820 if (ip1 != ip2)
2821 inode_unlock_shared(VFS_I(ip1));
2822 inode_unlock(VFS_I(ip2));
2823}
2824
2825/*
2826 * Reload the incore inode list for this inode. Caller should ensure that
2827 * the link count cannot change, either by taking ILOCK_SHARED or otherwise
2828 * preventing other threads from executing.
2829 */
2830int
2831xfs_inode_reload_unlinked_bucket(
2832 struct xfs_trans *tp,
2833 struct xfs_inode *ip)
2834{
2835 struct xfs_mount *mp = tp->t_mountp;
2836 struct xfs_buf *agibp;
2837 struct xfs_agi *agi;
2838 struct xfs_perag *pag;
2839 xfs_agnumber_t agno = XFS_INO_TO_AGNO(mp, ip->i_ino);
2840 xfs_agino_t agino = XFS_INO_TO_AGINO(mp, ip->i_ino);
2841 xfs_agino_t prev_agino, next_agino;
2842 unsigned int bucket;
2843 bool foundit = false;
2844 int error;
2845
2846 /* Grab the first inode in the list */
2847 pag = xfs_perag_get(mp, agno);
2848 error = xfs_ialloc_read_agi(pag, tp, 0, &agibp);
2849 xfs_perag_put(pag);
2850 if (error)
2851 return error;
2852
2853 /*
2854 * We've taken ILOCK_SHARED and the AGI buffer lock to stabilize the
2855 * incore unlinked list pointers for this inode. Check once more to
2856 * see if we raced with anyone else to reload the unlinked list.
2857 */
2858 if (!xfs_inode_unlinked_incomplete(ip)) {
2859 foundit = true;
2860 goto out_agibp;
2861 }
2862
2863 bucket = agino % XFS_AGI_UNLINKED_BUCKETS;
2864 agi = agibp->b_addr;
2865
2866 trace_xfs_inode_reload_unlinked_bucket(ip);
2867
2868 xfs_info_ratelimited(mp,
2869 "Found unrecovered unlinked inode 0x%x in AG 0x%x. Initiating list recovery.",
2870 agino, agno);
2871
2872 prev_agino = NULLAGINO;
2873 next_agino = be32_to_cpu(agi->agi_unlinked[bucket]);
2874 while (next_agino != NULLAGINO) {
2875 struct xfs_inode *next_ip = NULL;
2876
2877 /* Found this caller's inode, set its backlink. */
2878 if (next_agino == agino) {
2879 next_ip = ip;
2880 next_ip->i_prev_unlinked = prev_agino;
2881 foundit = true;
2882 goto next_inode;
2883 }
2884
2885 /* Try in-memory lookup first. */
2886 next_ip = xfs_iunlink_lookup(pag, next_agino);
2887 if (next_ip)
2888 goto next_inode;
2889
2890 /* Inode not in memory, try reloading it. */
2891 error = xfs_iunlink_reload_next(tp, agibp, prev_agino,
2892 next_agino);
2893 if (error)
2894 break;
2895
2896 /* Grab the reloaded inode. */
2897 next_ip = xfs_iunlink_lookup(pag, next_agino);
2898 if (!next_ip) {
2899 /* No incore inode at all? We reloaded it... */
2900 ASSERT(next_ip != NULL);
2901 error = -EFSCORRUPTED;
2902 break;
2903 }
2904
2905next_inode:
2906 prev_agino = next_agino;
2907 next_agino = next_ip->i_next_unlinked;
2908 }
2909
2910out_agibp:
2911 xfs_trans_brelse(tp, agibp);
2912 /* Should have found this inode somewhere in the iunlinked bucket. */
2913 if (!error && !foundit)
2914 error = -EFSCORRUPTED;
2915 return error;
2916}
2917
2918/* Decide if this inode is missing its unlinked list and reload it. */
2919int
2920xfs_inode_reload_unlinked(
2921 struct xfs_inode *ip)
2922{
2923 struct xfs_trans *tp;
2924 int error;
2925
2926 error = xfs_trans_alloc_empty(ip->i_mount, &tp);
2927 if (error)
2928 return error;
2929
2930 xfs_ilock(ip, XFS_ILOCK_SHARED);
2931 if (xfs_inode_unlinked_incomplete(ip))
2932 error = xfs_inode_reload_unlinked_bucket(tp, ip);
2933 xfs_iunlock(ip, XFS_ILOCK_SHARED);
2934 xfs_trans_cancel(tp);
2935
2936 return error;
2937}
2938
2939/* Has this inode fork been zapped by repair? */
2940bool
2941xfs_ifork_zapped(
2942 const struct xfs_inode *ip,
2943 int whichfork)
2944{
2945 unsigned int datamask = 0;
2946
2947 switch (whichfork) {
2948 case XFS_DATA_FORK:
2949 switch (ip->i_vnode.i_mode & S_IFMT) {
2950 case S_IFDIR:
2951 datamask = XFS_SICK_INO_DIR_ZAPPED;
2952 break;
2953 case S_IFLNK:
2954 datamask = XFS_SICK_INO_SYMLINK_ZAPPED;
2955 break;
2956 }
2957 return ip->i_sick & (XFS_SICK_INO_BMBTD_ZAPPED | datamask);
2958 case XFS_ATTR_FORK:
2959 return ip->i_sick & XFS_SICK_INO_BMBTA_ZAPPED;
2960 default:
2961 return false;
2962 }
2963}
2964
2965/* Compute the number of data and realtime blocks used by a file. */
2966void
2967xfs_inode_count_blocks(
2968 struct xfs_trans *tp,
2969 struct xfs_inode *ip,
2970 xfs_filblks_t *dblocks,
2971 xfs_filblks_t *rblocks)
2972{
2973 struct xfs_ifork *ifp = xfs_ifork_ptr(ip, XFS_DATA_FORK);
2974
2975 *rblocks = 0;
2976 if (XFS_IS_REALTIME_INODE(ip))
2977 xfs_bmap_count_leaves(ifp, rblocks);
2978 *dblocks = ip->i_nblocks - *rblocks;
2979}
2980
2981static void
2982xfs_wait_dax_page(
2983 struct inode *inode)
2984{
2985 struct xfs_inode *ip = XFS_I(inode);
2986
2987 xfs_iunlock(ip, XFS_MMAPLOCK_EXCL);
2988 schedule();
2989 xfs_ilock(ip, XFS_MMAPLOCK_EXCL);
2990}
2991
2992int
2993xfs_break_dax_layouts(
2994 struct inode *inode,
2995 bool *retry)
2996{
2997 struct page *page;
2998
2999 xfs_assert_ilocked(XFS_I(inode), XFS_MMAPLOCK_EXCL);
3000
3001 page = dax_layout_busy_page(inode->i_mapping);
3002 if (!page)
3003 return 0;
3004
3005 *retry = true;
3006 return ___wait_var_event(&page->_refcount,
3007 atomic_read(&page->_refcount) == 1, TASK_INTERRUPTIBLE,
3008 0, 0, xfs_wait_dax_page(inode));
3009}
3010
3011int
3012xfs_break_layouts(
3013 struct inode *inode,
3014 uint *iolock,
3015 enum layout_break_reason reason)
3016{
3017 bool retry;
3018 int error;
3019
3020 xfs_assert_ilocked(XFS_I(inode), XFS_IOLOCK_SHARED | XFS_IOLOCK_EXCL);
3021
3022 do {
3023 retry = false;
3024 switch (reason) {
3025 case BREAK_UNMAP:
3026 error = xfs_break_dax_layouts(inode, &retry);
3027 if (error || retry)
3028 break;
3029 fallthrough;
3030 case BREAK_WRITE:
3031 error = xfs_break_leased_layouts(inode, iolock, &retry);
3032 break;
3033 default:
3034 WARN_ON_ONCE(1);
3035 error = -EINVAL;
3036 }
3037 } while (error == 0 && retry);
3038
3039 return error;
3040}
3041
3042/* Returns the size of fundamental allocation unit for a file, in bytes. */
3043unsigned int
3044xfs_inode_alloc_unitsize(
3045 struct xfs_inode *ip)
3046{
3047 unsigned int blocks = 1;
3048
3049 if (XFS_IS_REALTIME_INODE(ip))
3050 blocks = ip->i_mount->m_sb.sb_rextsize;
3051
3052 return XFS_FSB_TO_B(ip->i_mount, blocks);
3053}
3054
3055/* Should we always be using copy on write for file writes? */
3056bool
3057xfs_is_always_cow_inode(
3058 const struct xfs_inode *ip)
3059{
3060 return ip->i_mount->m_always_cow && xfs_has_reflink(ip->i_mount);
3061}