Loading...
1/*
2 * Copyright (c) 2000-2006 Silicon Graphics, Inc.
3 * All Rights Reserved.
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it would be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write the Free Software Foundation,
16 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
17 */
18#include <linux/log2.h>
19
20#include "xfs.h"
21#include "xfs_fs.h"
22#include "xfs_shared.h"
23#include "xfs_format.h"
24#include "xfs_log_format.h"
25#include "xfs_trans_resv.h"
26#include "xfs_sb.h"
27#include "xfs_mount.h"
28#include "xfs_inode.h"
29#include "xfs_da_format.h"
30#include "xfs_da_btree.h"
31#include "xfs_dir2.h"
32#include "xfs_attr_sf.h"
33#include "xfs_attr.h"
34#include "xfs_trans_space.h"
35#include "xfs_trans.h"
36#include "xfs_buf_item.h"
37#include "xfs_inode_item.h"
38#include "xfs_ialloc.h"
39#include "xfs_bmap.h"
40#include "xfs_bmap_util.h"
41#include "xfs_error.h"
42#include "xfs_quota.h"
43#include "xfs_filestream.h"
44#include "xfs_cksum.h"
45#include "xfs_trace.h"
46#include "xfs_icache.h"
47#include "xfs_symlink.h"
48#include "xfs_trans_priv.h"
49#include "xfs_log.h"
50#include "xfs_bmap_btree.h"
51
52kmem_zone_t *xfs_inode_zone;
53
54/*
55 * Used in xfs_itruncate_extents(). This is the maximum number of extents
56 * freed from a file in a single transaction.
57 */
58#define XFS_ITRUNC_MAX_EXTENTS 2
59
60STATIC int xfs_iflush_int(struct xfs_inode *, struct xfs_buf *);
61STATIC int xfs_iunlink(struct xfs_trans *, struct xfs_inode *);
62STATIC int xfs_iunlink_remove(struct xfs_trans *, struct xfs_inode *);
63
64/*
65 * helper function to extract extent size hint from inode
66 */
67xfs_extlen_t
68xfs_get_extsz_hint(
69 struct xfs_inode *ip)
70{
71 if ((ip->i_d.di_flags & XFS_DIFLAG_EXTSIZE) && ip->i_d.di_extsize)
72 return ip->i_d.di_extsize;
73 if (XFS_IS_REALTIME_INODE(ip))
74 return ip->i_mount->m_sb.sb_rextsize;
75 return 0;
76}
77
78/*
79 * These two are wrapper routines around the xfs_ilock() routine used to
80 * centralize some grungy code. They are used in places that wish to lock the
81 * inode solely for reading the extents. The reason these places can't just
82 * call xfs_ilock(ip, XFS_ILOCK_SHARED) is that the inode lock also guards to
83 * bringing in of the extents from disk for a file in b-tree format. If the
84 * inode is in b-tree format, then we need to lock the inode exclusively until
85 * the extents are read in. Locking it exclusively all the time would limit
86 * our parallelism unnecessarily, though. What we do instead is check to see
87 * if the extents have been read in yet, and only lock the inode exclusively
88 * if they have not.
89 *
90 * The functions return a value which should be given to the corresponding
91 * xfs_iunlock() call.
92 */
93uint
94xfs_ilock_data_map_shared(
95 struct xfs_inode *ip)
96{
97 uint lock_mode = XFS_ILOCK_SHARED;
98
99 if (ip->i_d.di_format == XFS_DINODE_FMT_BTREE &&
100 (ip->i_df.if_flags & XFS_IFEXTENTS) == 0)
101 lock_mode = XFS_ILOCK_EXCL;
102 xfs_ilock(ip, lock_mode);
103 return lock_mode;
104}
105
106uint
107xfs_ilock_attr_map_shared(
108 struct xfs_inode *ip)
109{
110 uint lock_mode = XFS_ILOCK_SHARED;
111
112 if (ip->i_d.di_aformat == XFS_DINODE_FMT_BTREE &&
113 (ip->i_afp->if_flags & XFS_IFEXTENTS) == 0)
114 lock_mode = XFS_ILOCK_EXCL;
115 xfs_ilock(ip, lock_mode);
116 return lock_mode;
117}
118
119/*
120 * The xfs inode contains 3 multi-reader locks: the i_iolock the i_mmap_lock and
121 * the i_lock. This routine allows various combinations of the locks to be
122 * obtained.
123 *
124 * The 3 locks should always be ordered so that the IO lock is obtained first,
125 * the mmap lock second and the ilock last in order to prevent deadlock.
126 *
127 * Basic locking order:
128 *
129 * i_iolock -> i_mmap_lock -> page_lock -> i_ilock
130 *
131 * mmap_sem locking order:
132 *
133 * i_iolock -> page lock -> mmap_sem
134 * mmap_sem -> i_mmap_lock -> page_lock
135 *
136 * The difference in mmap_sem locking order mean that we cannot hold the
137 * i_mmap_lock over syscall based read(2)/write(2) based IO. These IO paths can
138 * fault in pages during copy in/out (for buffered IO) or require the mmap_sem
139 * in get_user_pages() to map the user pages into the kernel address space for
140 * direct IO. Similarly the i_iolock cannot be taken inside a page fault because
141 * page faults already hold the mmap_sem.
142 *
143 * Hence to serialise fully against both syscall and mmap based IO, we need to
144 * take both the i_iolock and the i_mmap_lock. These locks should *only* be both
145 * taken in places where we need to invalidate the page cache in a race
146 * free manner (e.g. truncate, hole punch and other extent manipulation
147 * functions).
148 */
149void
150xfs_ilock(
151 xfs_inode_t *ip,
152 uint lock_flags)
153{
154 trace_xfs_ilock(ip, lock_flags, _RET_IP_);
155
156 /*
157 * You can't set both SHARED and EXCL for the same lock,
158 * and only XFS_IOLOCK_SHARED, XFS_IOLOCK_EXCL, XFS_ILOCK_SHARED,
159 * and XFS_ILOCK_EXCL are valid values to set in lock_flags.
160 */
161 ASSERT((lock_flags & (XFS_IOLOCK_SHARED | XFS_IOLOCK_EXCL)) !=
162 (XFS_IOLOCK_SHARED | XFS_IOLOCK_EXCL));
163 ASSERT((lock_flags & (XFS_MMAPLOCK_SHARED | XFS_MMAPLOCK_EXCL)) !=
164 (XFS_MMAPLOCK_SHARED | XFS_MMAPLOCK_EXCL));
165 ASSERT((lock_flags & (XFS_ILOCK_SHARED | XFS_ILOCK_EXCL)) !=
166 (XFS_ILOCK_SHARED | XFS_ILOCK_EXCL));
167 ASSERT((lock_flags & ~(XFS_LOCK_MASK | XFS_LOCK_SUBCLASS_MASK)) == 0);
168
169 if (lock_flags & XFS_IOLOCK_EXCL)
170 mrupdate_nested(&ip->i_iolock, XFS_IOLOCK_DEP(lock_flags));
171 else if (lock_flags & XFS_IOLOCK_SHARED)
172 mraccess_nested(&ip->i_iolock, XFS_IOLOCK_DEP(lock_flags));
173
174 if (lock_flags & XFS_MMAPLOCK_EXCL)
175 mrupdate_nested(&ip->i_mmaplock, XFS_MMAPLOCK_DEP(lock_flags));
176 else if (lock_flags & XFS_MMAPLOCK_SHARED)
177 mraccess_nested(&ip->i_mmaplock, XFS_MMAPLOCK_DEP(lock_flags));
178
179 if (lock_flags & XFS_ILOCK_EXCL)
180 mrupdate_nested(&ip->i_lock, XFS_ILOCK_DEP(lock_flags));
181 else if (lock_flags & XFS_ILOCK_SHARED)
182 mraccess_nested(&ip->i_lock, XFS_ILOCK_DEP(lock_flags));
183}
184
185/*
186 * This is just like xfs_ilock(), except that the caller
187 * is guaranteed not to sleep. It returns 1 if it gets
188 * the requested locks and 0 otherwise. If the IO lock is
189 * obtained but the inode lock cannot be, then the IO lock
190 * is dropped before returning.
191 *
192 * ip -- the inode being locked
193 * lock_flags -- this parameter indicates the inode's locks to be
194 * to be locked. See the comment for xfs_ilock() for a list
195 * of valid values.
196 */
197int
198xfs_ilock_nowait(
199 xfs_inode_t *ip,
200 uint lock_flags)
201{
202 trace_xfs_ilock_nowait(ip, lock_flags, _RET_IP_);
203
204 /*
205 * You can't set both SHARED and EXCL for the same lock,
206 * and only XFS_IOLOCK_SHARED, XFS_IOLOCK_EXCL, XFS_ILOCK_SHARED,
207 * and XFS_ILOCK_EXCL are valid values to set in lock_flags.
208 */
209 ASSERT((lock_flags & (XFS_IOLOCK_SHARED | XFS_IOLOCK_EXCL)) !=
210 (XFS_IOLOCK_SHARED | XFS_IOLOCK_EXCL));
211 ASSERT((lock_flags & (XFS_MMAPLOCK_SHARED | XFS_MMAPLOCK_EXCL)) !=
212 (XFS_MMAPLOCK_SHARED | XFS_MMAPLOCK_EXCL));
213 ASSERT((lock_flags & (XFS_ILOCK_SHARED | XFS_ILOCK_EXCL)) !=
214 (XFS_ILOCK_SHARED | XFS_ILOCK_EXCL));
215 ASSERT((lock_flags & ~(XFS_LOCK_MASK | XFS_LOCK_SUBCLASS_MASK)) == 0);
216
217 if (lock_flags & XFS_IOLOCK_EXCL) {
218 if (!mrtryupdate(&ip->i_iolock))
219 goto out;
220 } else if (lock_flags & XFS_IOLOCK_SHARED) {
221 if (!mrtryaccess(&ip->i_iolock))
222 goto out;
223 }
224
225 if (lock_flags & XFS_MMAPLOCK_EXCL) {
226 if (!mrtryupdate(&ip->i_mmaplock))
227 goto out_undo_iolock;
228 } else if (lock_flags & XFS_MMAPLOCK_SHARED) {
229 if (!mrtryaccess(&ip->i_mmaplock))
230 goto out_undo_iolock;
231 }
232
233 if (lock_flags & XFS_ILOCK_EXCL) {
234 if (!mrtryupdate(&ip->i_lock))
235 goto out_undo_mmaplock;
236 } else if (lock_flags & XFS_ILOCK_SHARED) {
237 if (!mrtryaccess(&ip->i_lock))
238 goto out_undo_mmaplock;
239 }
240 return 1;
241
242out_undo_mmaplock:
243 if (lock_flags & XFS_MMAPLOCK_EXCL)
244 mrunlock_excl(&ip->i_mmaplock);
245 else if (lock_flags & XFS_MMAPLOCK_SHARED)
246 mrunlock_shared(&ip->i_mmaplock);
247out_undo_iolock:
248 if (lock_flags & XFS_IOLOCK_EXCL)
249 mrunlock_excl(&ip->i_iolock);
250 else if (lock_flags & XFS_IOLOCK_SHARED)
251 mrunlock_shared(&ip->i_iolock);
252out:
253 return 0;
254}
255
256/*
257 * xfs_iunlock() is used to drop the inode locks acquired with
258 * xfs_ilock() and xfs_ilock_nowait(). The caller must pass
259 * in the flags given to xfs_ilock() or xfs_ilock_nowait() so
260 * that we know which locks to drop.
261 *
262 * ip -- the inode being unlocked
263 * lock_flags -- this parameter indicates the inode's locks to be
264 * to be unlocked. See the comment for xfs_ilock() for a list
265 * of valid values for this parameter.
266 *
267 */
268void
269xfs_iunlock(
270 xfs_inode_t *ip,
271 uint lock_flags)
272{
273 /*
274 * You can't set both SHARED and EXCL for the same lock,
275 * and only XFS_IOLOCK_SHARED, XFS_IOLOCK_EXCL, XFS_ILOCK_SHARED,
276 * and XFS_ILOCK_EXCL are valid values to set in lock_flags.
277 */
278 ASSERT((lock_flags & (XFS_IOLOCK_SHARED | XFS_IOLOCK_EXCL)) !=
279 (XFS_IOLOCK_SHARED | XFS_IOLOCK_EXCL));
280 ASSERT((lock_flags & (XFS_MMAPLOCK_SHARED | XFS_MMAPLOCK_EXCL)) !=
281 (XFS_MMAPLOCK_SHARED | XFS_MMAPLOCK_EXCL));
282 ASSERT((lock_flags & (XFS_ILOCK_SHARED | XFS_ILOCK_EXCL)) !=
283 (XFS_ILOCK_SHARED | XFS_ILOCK_EXCL));
284 ASSERT((lock_flags & ~(XFS_LOCK_MASK | XFS_LOCK_SUBCLASS_MASK)) == 0);
285 ASSERT(lock_flags != 0);
286
287 if (lock_flags & XFS_IOLOCK_EXCL)
288 mrunlock_excl(&ip->i_iolock);
289 else if (lock_flags & XFS_IOLOCK_SHARED)
290 mrunlock_shared(&ip->i_iolock);
291
292 if (lock_flags & XFS_MMAPLOCK_EXCL)
293 mrunlock_excl(&ip->i_mmaplock);
294 else if (lock_flags & XFS_MMAPLOCK_SHARED)
295 mrunlock_shared(&ip->i_mmaplock);
296
297 if (lock_flags & XFS_ILOCK_EXCL)
298 mrunlock_excl(&ip->i_lock);
299 else if (lock_flags & XFS_ILOCK_SHARED)
300 mrunlock_shared(&ip->i_lock);
301
302 trace_xfs_iunlock(ip, lock_flags, _RET_IP_);
303}
304
305/*
306 * give up write locks. the i/o lock cannot be held nested
307 * if it is being demoted.
308 */
309void
310xfs_ilock_demote(
311 xfs_inode_t *ip,
312 uint lock_flags)
313{
314 ASSERT(lock_flags & (XFS_IOLOCK_EXCL|XFS_MMAPLOCK_EXCL|XFS_ILOCK_EXCL));
315 ASSERT((lock_flags &
316 ~(XFS_IOLOCK_EXCL|XFS_MMAPLOCK_EXCL|XFS_ILOCK_EXCL)) == 0);
317
318 if (lock_flags & XFS_ILOCK_EXCL)
319 mrdemote(&ip->i_lock);
320 if (lock_flags & XFS_MMAPLOCK_EXCL)
321 mrdemote(&ip->i_mmaplock);
322 if (lock_flags & XFS_IOLOCK_EXCL)
323 mrdemote(&ip->i_iolock);
324
325 trace_xfs_ilock_demote(ip, lock_flags, _RET_IP_);
326}
327
328#if defined(DEBUG) || defined(XFS_WARN)
329int
330xfs_isilocked(
331 xfs_inode_t *ip,
332 uint lock_flags)
333{
334 if (lock_flags & (XFS_ILOCK_EXCL|XFS_ILOCK_SHARED)) {
335 if (!(lock_flags & XFS_ILOCK_SHARED))
336 return !!ip->i_lock.mr_writer;
337 return rwsem_is_locked(&ip->i_lock.mr_lock);
338 }
339
340 if (lock_flags & (XFS_MMAPLOCK_EXCL|XFS_MMAPLOCK_SHARED)) {
341 if (!(lock_flags & XFS_MMAPLOCK_SHARED))
342 return !!ip->i_mmaplock.mr_writer;
343 return rwsem_is_locked(&ip->i_mmaplock.mr_lock);
344 }
345
346 if (lock_flags & (XFS_IOLOCK_EXCL|XFS_IOLOCK_SHARED)) {
347 if (!(lock_flags & XFS_IOLOCK_SHARED))
348 return !!ip->i_iolock.mr_writer;
349 return rwsem_is_locked(&ip->i_iolock.mr_lock);
350 }
351
352 ASSERT(0);
353 return 0;
354}
355#endif
356
357#ifdef DEBUG
358int xfs_locked_n;
359int xfs_small_retries;
360int xfs_middle_retries;
361int xfs_lots_retries;
362int xfs_lock_delays;
363#endif
364
365/*
366 * xfs_lockdep_subclass_ok() is only used in an ASSERT, so is only called when
367 * DEBUG or XFS_WARN is set. And MAX_LOCKDEP_SUBCLASSES is then only defined
368 * when CONFIG_LOCKDEP is set. Hence the complex define below to avoid build
369 * errors and warnings.
370 */
371#if (defined(DEBUG) || defined(XFS_WARN)) && defined(CONFIG_LOCKDEP)
372static bool
373xfs_lockdep_subclass_ok(
374 int subclass)
375{
376 return subclass < MAX_LOCKDEP_SUBCLASSES;
377}
378#else
379#define xfs_lockdep_subclass_ok(subclass) (true)
380#endif
381
382/*
383 * Bump the subclass so xfs_lock_inodes() acquires each lock with a different
384 * value. This can be called for any type of inode lock combination, including
385 * parent locking. Care must be taken to ensure we don't overrun the subclass
386 * storage fields in the class mask we build.
387 */
388static inline int
389xfs_lock_inumorder(int lock_mode, int subclass)
390{
391 int class = 0;
392
393 ASSERT(!(lock_mode & (XFS_ILOCK_PARENT | XFS_ILOCK_RTBITMAP |
394 XFS_ILOCK_RTSUM)));
395 ASSERT(xfs_lockdep_subclass_ok(subclass));
396
397 if (lock_mode & (XFS_IOLOCK_SHARED|XFS_IOLOCK_EXCL)) {
398 ASSERT(subclass <= XFS_IOLOCK_MAX_SUBCLASS);
399 ASSERT(xfs_lockdep_subclass_ok(subclass +
400 XFS_IOLOCK_PARENT_VAL));
401 class += subclass << XFS_IOLOCK_SHIFT;
402 if (lock_mode & XFS_IOLOCK_PARENT)
403 class += XFS_IOLOCK_PARENT_VAL << XFS_IOLOCK_SHIFT;
404 }
405
406 if (lock_mode & (XFS_MMAPLOCK_SHARED|XFS_MMAPLOCK_EXCL)) {
407 ASSERT(subclass <= XFS_MMAPLOCK_MAX_SUBCLASS);
408 class += subclass << XFS_MMAPLOCK_SHIFT;
409 }
410
411 if (lock_mode & (XFS_ILOCK_SHARED|XFS_ILOCK_EXCL)) {
412 ASSERT(subclass <= XFS_ILOCK_MAX_SUBCLASS);
413 class += subclass << XFS_ILOCK_SHIFT;
414 }
415
416 return (lock_mode & ~XFS_LOCK_SUBCLASS_MASK) | class;
417}
418
419/*
420 * The following routine will lock n inodes in exclusive mode. We assume the
421 * caller calls us with the inodes in i_ino order.
422 *
423 * We need to detect deadlock where an inode that we lock is in the AIL and we
424 * start waiting for another inode that is locked by a thread in a long running
425 * transaction (such as truncate). This can result in deadlock since the long
426 * running trans might need to wait for the inode we just locked in order to
427 * push the tail and free space in the log.
428 *
429 * xfs_lock_inodes() can only be used to lock one type of lock at a time -
430 * the iolock, the mmaplock or the ilock, but not more than one at a time. If we
431 * lock more than one at a time, lockdep will report false positives saying we
432 * have violated locking orders.
433 */
434void
435xfs_lock_inodes(
436 xfs_inode_t **ips,
437 int inodes,
438 uint lock_mode)
439{
440 int attempts = 0, i, j, try_lock;
441 xfs_log_item_t *lp;
442
443 /*
444 * Currently supports between 2 and 5 inodes with exclusive locking. We
445 * support an arbitrary depth of locking here, but absolute limits on
446 * inodes depend on the the type of locking and the limits placed by
447 * lockdep annotations in xfs_lock_inumorder. These are all checked by
448 * the asserts.
449 */
450 ASSERT(ips && inodes >= 2 && inodes <= 5);
451 ASSERT(lock_mode & (XFS_IOLOCK_EXCL | XFS_MMAPLOCK_EXCL |
452 XFS_ILOCK_EXCL));
453 ASSERT(!(lock_mode & (XFS_IOLOCK_SHARED | XFS_MMAPLOCK_SHARED |
454 XFS_ILOCK_SHARED)));
455 ASSERT(!(lock_mode & XFS_IOLOCK_EXCL) ||
456 inodes <= XFS_IOLOCK_MAX_SUBCLASS + 1);
457 ASSERT(!(lock_mode & XFS_MMAPLOCK_EXCL) ||
458 inodes <= XFS_MMAPLOCK_MAX_SUBCLASS + 1);
459 ASSERT(!(lock_mode & XFS_ILOCK_EXCL) ||
460 inodes <= XFS_ILOCK_MAX_SUBCLASS + 1);
461
462 if (lock_mode & XFS_IOLOCK_EXCL) {
463 ASSERT(!(lock_mode & (XFS_MMAPLOCK_EXCL | XFS_ILOCK_EXCL)));
464 } else if (lock_mode & XFS_MMAPLOCK_EXCL)
465 ASSERT(!(lock_mode & XFS_ILOCK_EXCL));
466
467 try_lock = 0;
468 i = 0;
469again:
470 for (; i < inodes; i++) {
471 ASSERT(ips[i]);
472
473 if (i && (ips[i] == ips[i - 1])) /* Already locked */
474 continue;
475
476 /*
477 * If try_lock is not set yet, make sure all locked inodes are
478 * not in the AIL. If any are, set try_lock to be used later.
479 */
480 if (!try_lock) {
481 for (j = (i - 1); j >= 0 && !try_lock; j--) {
482 lp = (xfs_log_item_t *)ips[j]->i_itemp;
483 if (lp && (lp->li_flags & XFS_LI_IN_AIL))
484 try_lock++;
485 }
486 }
487
488 /*
489 * If any of the previous locks we have locked is in the AIL,
490 * we must TRY to get the second and subsequent locks. If
491 * we can't get any, we must release all we have
492 * and try again.
493 */
494 if (!try_lock) {
495 xfs_ilock(ips[i], xfs_lock_inumorder(lock_mode, i));
496 continue;
497 }
498
499 /* try_lock means we have an inode locked that is in the AIL. */
500 ASSERT(i != 0);
501 if (xfs_ilock_nowait(ips[i], xfs_lock_inumorder(lock_mode, i)))
502 continue;
503
504 /*
505 * Unlock all previous guys and try again. xfs_iunlock will try
506 * to push the tail if the inode is in the AIL.
507 */
508 attempts++;
509 for (j = i - 1; j >= 0; j--) {
510 /*
511 * Check to see if we've already unlocked this one. Not
512 * the first one going back, and the inode ptr is the
513 * same.
514 */
515 if (j != (i - 1) && ips[j] == ips[j + 1])
516 continue;
517
518 xfs_iunlock(ips[j], lock_mode);
519 }
520
521 if ((attempts % 5) == 0) {
522 delay(1); /* Don't just spin the CPU */
523#ifdef DEBUG
524 xfs_lock_delays++;
525#endif
526 }
527 i = 0;
528 try_lock = 0;
529 goto again;
530 }
531
532#ifdef DEBUG
533 if (attempts) {
534 if (attempts < 5) xfs_small_retries++;
535 else if (attempts < 100) xfs_middle_retries++;
536 else xfs_lots_retries++;
537 } else {
538 xfs_locked_n++;
539 }
540#endif
541}
542
543/*
544 * xfs_lock_two_inodes() can only be used to lock one type of lock at a time -
545 * the iolock, the mmaplock or the ilock, but not more than one at a time. If we
546 * lock more than one at a time, lockdep will report false positives saying we
547 * have violated locking orders.
548 */
549void
550xfs_lock_two_inodes(
551 xfs_inode_t *ip0,
552 xfs_inode_t *ip1,
553 uint lock_mode)
554{
555 xfs_inode_t *temp;
556 int attempts = 0;
557 xfs_log_item_t *lp;
558
559 if (lock_mode & (XFS_IOLOCK_SHARED|XFS_IOLOCK_EXCL)) {
560 ASSERT(!(lock_mode & (XFS_MMAPLOCK_SHARED|XFS_MMAPLOCK_EXCL)));
561 ASSERT(!(lock_mode & (XFS_ILOCK_SHARED|XFS_ILOCK_EXCL)));
562 } else if (lock_mode & (XFS_MMAPLOCK_SHARED|XFS_MMAPLOCK_EXCL))
563 ASSERT(!(lock_mode & (XFS_ILOCK_SHARED|XFS_ILOCK_EXCL)));
564
565 ASSERT(ip0->i_ino != ip1->i_ino);
566
567 if (ip0->i_ino > ip1->i_ino) {
568 temp = ip0;
569 ip0 = ip1;
570 ip1 = temp;
571 }
572
573 again:
574 xfs_ilock(ip0, xfs_lock_inumorder(lock_mode, 0));
575
576 /*
577 * If the first lock we have locked is in the AIL, we must TRY to get
578 * the second lock. If we can't get it, we must release the first one
579 * and try again.
580 */
581 lp = (xfs_log_item_t *)ip0->i_itemp;
582 if (lp && (lp->li_flags & XFS_LI_IN_AIL)) {
583 if (!xfs_ilock_nowait(ip1, xfs_lock_inumorder(lock_mode, 1))) {
584 xfs_iunlock(ip0, lock_mode);
585 if ((++attempts % 5) == 0)
586 delay(1); /* Don't just spin the CPU */
587 goto again;
588 }
589 } else {
590 xfs_ilock(ip1, xfs_lock_inumorder(lock_mode, 1));
591 }
592}
593
594
595void
596__xfs_iflock(
597 struct xfs_inode *ip)
598{
599 wait_queue_head_t *wq = bit_waitqueue(&ip->i_flags, __XFS_IFLOCK_BIT);
600 DEFINE_WAIT_BIT(wait, &ip->i_flags, __XFS_IFLOCK_BIT);
601
602 do {
603 prepare_to_wait_exclusive(wq, &wait.wait, TASK_UNINTERRUPTIBLE);
604 if (xfs_isiflocked(ip))
605 io_schedule();
606 } while (!xfs_iflock_nowait(ip));
607
608 finish_wait(wq, &wait.wait);
609}
610
611STATIC uint
612_xfs_dic2xflags(
613 __uint16_t di_flags,
614 uint64_t di_flags2,
615 bool has_attr)
616{
617 uint flags = 0;
618
619 if (di_flags & XFS_DIFLAG_ANY) {
620 if (di_flags & XFS_DIFLAG_REALTIME)
621 flags |= FS_XFLAG_REALTIME;
622 if (di_flags & XFS_DIFLAG_PREALLOC)
623 flags |= FS_XFLAG_PREALLOC;
624 if (di_flags & XFS_DIFLAG_IMMUTABLE)
625 flags |= FS_XFLAG_IMMUTABLE;
626 if (di_flags & XFS_DIFLAG_APPEND)
627 flags |= FS_XFLAG_APPEND;
628 if (di_flags & XFS_DIFLAG_SYNC)
629 flags |= FS_XFLAG_SYNC;
630 if (di_flags & XFS_DIFLAG_NOATIME)
631 flags |= FS_XFLAG_NOATIME;
632 if (di_flags & XFS_DIFLAG_NODUMP)
633 flags |= FS_XFLAG_NODUMP;
634 if (di_flags & XFS_DIFLAG_RTINHERIT)
635 flags |= FS_XFLAG_RTINHERIT;
636 if (di_flags & XFS_DIFLAG_PROJINHERIT)
637 flags |= FS_XFLAG_PROJINHERIT;
638 if (di_flags & XFS_DIFLAG_NOSYMLINKS)
639 flags |= FS_XFLAG_NOSYMLINKS;
640 if (di_flags & XFS_DIFLAG_EXTSIZE)
641 flags |= FS_XFLAG_EXTSIZE;
642 if (di_flags & XFS_DIFLAG_EXTSZINHERIT)
643 flags |= FS_XFLAG_EXTSZINHERIT;
644 if (di_flags & XFS_DIFLAG_NODEFRAG)
645 flags |= FS_XFLAG_NODEFRAG;
646 if (di_flags & XFS_DIFLAG_FILESTREAM)
647 flags |= FS_XFLAG_FILESTREAM;
648 }
649
650 if (di_flags2 & XFS_DIFLAG2_ANY) {
651 if (di_flags2 & XFS_DIFLAG2_DAX)
652 flags |= FS_XFLAG_DAX;
653 }
654
655 if (has_attr)
656 flags |= FS_XFLAG_HASATTR;
657
658 return flags;
659}
660
661uint
662xfs_ip2xflags(
663 struct xfs_inode *ip)
664{
665 struct xfs_icdinode *dic = &ip->i_d;
666
667 return _xfs_dic2xflags(dic->di_flags, dic->di_flags2, XFS_IFORK_Q(ip));
668}
669
670uint
671xfs_dic2xflags(
672 struct xfs_dinode *dip)
673{
674 return _xfs_dic2xflags(be16_to_cpu(dip->di_flags),
675 be64_to_cpu(dip->di_flags2), XFS_DFORK_Q(dip));
676}
677
678/*
679 * Lookups up an inode from "name". If ci_name is not NULL, then a CI match
680 * is allowed, otherwise it has to be an exact match. If a CI match is found,
681 * ci_name->name will point to a the actual name (caller must free) or
682 * will be set to NULL if an exact match is found.
683 */
684int
685xfs_lookup(
686 xfs_inode_t *dp,
687 struct xfs_name *name,
688 xfs_inode_t **ipp,
689 struct xfs_name *ci_name)
690{
691 xfs_ino_t inum;
692 int error;
693
694 trace_xfs_lookup(dp, name);
695
696 if (XFS_FORCED_SHUTDOWN(dp->i_mount))
697 return -EIO;
698
699 xfs_ilock(dp, XFS_IOLOCK_SHARED);
700 error = xfs_dir_lookup(NULL, dp, name, &inum, ci_name);
701 if (error)
702 goto out_unlock;
703
704 error = xfs_iget(dp->i_mount, NULL, inum, 0, 0, ipp);
705 if (error)
706 goto out_free_name;
707
708 xfs_iunlock(dp, XFS_IOLOCK_SHARED);
709 return 0;
710
711out_free_name:
712 if (ci_name)
713 kmem_free(ci_name->name);
714out_unlock:
715 xfs_iunlock(dp, XFS_IOLOCK_SHARED);
716 *ipp = NULL;
717 return error;
718}
719
720/*
721 * Allocate an inode on disk and return a copy of its in-core version.
722 * The in-core inode is locked exclusively. Set mode, nlink, and rdev
723 * appropriately within the inode. The uid and gid for the inode are
724 * set according to the contents of the given cred structure.
725 *
726 * Use xfs_dialloc() to allocate the on-disk inode. If xfs_dialloc()
727 * has a free inode available, call xfs_iget() to obtain the in-core
728 * version of the allocated inode. Finally, fill in the inode and
729 * log its initial contents. In this case, ialloc_context would be
730 * set to NULL.
731 *
732 * If xfs_dialloc() does not have an available inode, it will replenish
733 * its supply by doing an allocation. Since we can only do one
734 * allocation within a transaction without deadlocks, we must commit
735 * the current transaction before returning the inode itself.
736 * In this case, therefore, we will set ialloc_context and return.
737 * The caller should then commit the current transaction, start a new
738 * transaction, and call xfs_ialloc() again to actually get the inode.
739 *
740 * To ensure that some other process does not grab the inode that
741 * was allocated during the first call to xfs_ialloc(), this routine
742 * also returns the [locked] bp pointing to the head of the freelist
743 * as ialloc_context. The caller should hold this buffer across
744 * the commit and pass it back into this routine on the second call.
745 *
746 * If we are allocating quota inodes, we do not have a parent inode
747 * to attach to or associate with (i.e. pip == NULL) because they
748 * are not linked into the directory structure - they are attached
749 * directly to the superblock - and so have no parent.
750 */
751int
752xfs_ialloc(
753 xfs_trans_t *tp,
754 xfs_inode_t *pip,
755 umode_t mode,
756 xfs_nlink_t nlink,
757 xfs_dev_t rdev,
758 prid_t prid,
759 int okalloc,
760 xfs_buf_t **ialloc_context,
761 xfs_inode_t **ipp)
762{
763 struct xfs_mount *mp = tp->t_mountp;
764 xfs_ino_t ino;
765 xfs_inode_t *ip;
766 uint flags;
767 int error;
768 struct timespec tv;
769 struct inode *inode;
770
771 /*
772 * Call the space management code to pick
773 * the on-disk inode to be allocated.
774 */
775 error = xfs_dialloc(tp, pip ? pip->i_ino : 0, mode, okalloc,
776 ialloc_context, &ino);
777 if (error)
778 return error;
779 if (*ialloc_context || ino == NULLFSINO) {
780 *ipp = NULL;
781 return 0;
782 }
783 ASSERT(*ialloc_context == NULL);
784
785 /*
786 * Get the in-core inode with the lock held exclusively.
787 * This is because we're setting fields here we need
788 * to prevent others from looking at until we're done.
789 */
790 error = xfs_iget(mp, tp, ino, XFS_IGET_CREATE,
791 XFS_ILOCK_EXCL, &ip);
792 if (error)
793 return error;
794 ASSERT(ip != NULL);
795 inode = VFS_I(ip);
796
797 /*
798 * We always convert v1 inodes to v2 now - we only support filesystems
799 * with >= v2 inode capability, so there is no reason for ever leaving
800 * an inode in v1 format.
801 */
802 if (ip->i_d.di_version == 1)
803 ip->i_d.di_version = 2;
804
805 inode->i_mode = mode;
806 set_nlink(inode, nlink);
807 ip->i_d.di_uid = xfs_kuid_to_uid(current_fsuid());
808 ip->i_d.di_gid = xfs_kgid_to_gid(current_fsgid());
809 xfs_set_projid(ip, prid);
810
811 if (pip && XFS_INHERIT_GID(pip)) {
812 ip->i_d.di_gid = pip->i_d.di_gid;
813 if ((VFS_I(pip)->i_mode & S_ISGID) && S_ISDIR(mode))
814 inode->i_mode |= S_ISGID;
815 }
816
817 /*
818 * If the group ID of the new file does not match the effective group
819 * ID or one of the supplementary group IDs, the S_ISGID bit is cleared
820 * (and only if the irix_sgid_inherit compatibility variable is set).
821 */
822 if ((irix_sgid_inherit) &&
823 (inode->i_mode & S_ISGID) &&
824 (!in_group_p(xfs_gid_to_kgid(ip->i_d.di_gid))))
825 inode->i_mode &= ~S_ISGID;
826
827 ip->i_d.di_size = 0;
828 ip->i_d.di_nextents = 0;
829 ASSERT(ip->i_d.di_nblocks == 0);
830
831 tv = current_fs_time(mp->m_super);
832 inode->i_mtime = tv;
833 inode->i_atime = tv;
834 inode->i_ctime = tv;
835
836 ip->i_d.di_extsize = 0;
837 ip->i_d.di_dmevmask = 0;
838 ip->i_d.di_dmstate = 0;
839 ip->i_d.di_flags = 0;
840
841 if (ip->i_d.di_version == 3) {
842 inode->i_version = 1;
843 ip->i_d.di_flags2 = 0;
844 ip->i_d.di_crtime.t_sec = (__int32_t)tv.tv_sec;
845 ip->i_d.di_crtime.t_nsec = (__int32_t)tv.tv_nsec;
846 }
847
848
849 flags = XFS_ILOG_CORE;
850 switch (mode & S_IFMT) {
851 case S_IFIFO:
852 case S_IFCHR:
853 case S_IFBLK:
854 case S_IFSOCK:
855 ip->i_d.di_format = XFS_DINODE_FMT_DEV;
856 ip->i_df.if_u2.if_rdev = rdev;
857 ip->i_df.if_flags = 0;
858 flags |= XFS_ILOG_DEV;
859 break;
860 case S_IFREG:
861 case S_IFDIR:
862 if (pip && (pip->i_d.di_flags & XFS_DIFLAG_ANY)) {
863 uint64_t di_flags2 = 0;
864 uint di_flags = 0;
865
866 if (S_ISDIR(mode)) {
867 if (pip->i_d.di_flags & XFS_DIFLAG_RTINHERIT)
868 di_flags |= XFS_DIFLAG_RTINHERIT;
869 if (pip->i_d.di_flags & XFS_DIFLAG_EXTSZINHERIT) {
870 di_flags |= XFS_DIFLAG_EXTSZINHERIT;
871 ip->i_d.di_extsize = pip->i_d.di_extsize;
872 }
873 if (pip->i_d.di_flags & XFS_DIFLAG_PROJINHERIT)
874 di_flags |= XFS_DIFLAG_PROJINHERIT;
875 } else if (S_ISREG(mode)) {
876 if (pip->i_d.di_flags & XFS_DIFLAG_RTINHERIT)
877 di_flags |= XFS_DIFLAG_REALTIME;
878 if (pip->i_d.di_flags & XFS_DIFLAG_EXTSZINHERIT) {
879 di_flags |= XFS_DIFLAG_EXTSIZE;
880 ip->i_d.di_extsize = pip->i_d.di_extsize;
881 }
882 }
883 if ((pip->i_d.di_flags & XFS_DIFLAG_NOATIME) &&
884 xfs_inherit_noatime)
885 di_flags |= XFS_DIFLAG_NOATIME;
886 if ((pip->i_d.di_flags & XFS_DIFLAG_NODUMP) &&
887 xfs_inherit_nodump)
888 di_flags |= XFS_DIFLAG_NODUMP;
889 if ((pip->i_d.di_flags & XFS_DIFLAG_SYNC) &&
890 xfs_inherit_sync)
891 di_flags |= XFS_DIFLAG_SYNC;
892 if ((pip->i_d.di_flags & XFS_DIFLAG_NOSYMLINKS) &&
893 xfs_inherit_nosymlinks)
894 di_flags |= XFS_DIFLAG_NOSYMLINKS;
895 if ((pip->i_d.di_flags & XFS_DIFLAG_NODEFRAG) &&
896 xfs_inherit_nodefrag)
897 di_flags |= XFS_DIFLAG_NODEFRAG;
898 if (pip->i_d.di_flags & XFS_DIFLAG_FILESTREAM)
899 di_flags |= XFS_DIFLAG_FILESTREAM;
900 if (pip->i_d.di_flags2 & XFS_DIFLAG2_DAX)
901 di_flags2 |= XFS_DIFLAG2_DAX;
902
903 ip->i_d.di_flags |= di_flags;
904 ip->i_d.di_flags2 |= di_flags2;
905 }
906 /* FALLTHROUGH */
907 case S_IFLNK:
908 ip->i_d.di_format = XFS_DINODE_FMT_EXTENTS;
909 ip->i_df.if_flags = XFS_IFEXTENTS;
910 ip->i_df.if_bytes = ip->i_df.if_real_bytes = 0;
911 ip->i_df.if_u1.if_extents = NULL;
912 break;
913 default:
914 ASSERT(0);
915 }
916 /*
917 * Attribute fork settings for new inode.
918 */
919 ip->i_d.di_aformat = XFS_DINODE_FMT_EXTENTS;
920 ip->i_d.di_anextents = 0;
921
922 /*
923 * Log the new values stuffed into the inode.
924 */
925 xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL);
926 xfs_trans_log_inode(tp, ip, flags);
927
928 /* now that we have an i_mode we can setup the inode structure */
929 xfs_setup_inode(ip);
930
931 *ipp = ip;
932 return 0;
933}
934
935/*
936 * Allocates a new inode from disk and return a pointer to the
937 * incore copy. This routine will internally commit the current
938 * transaction and allocate a new one if the Space Manager needed
939 * to do an allocation to replenish the inode free-list.
940 *
941 * This routine is designed to be called from xfs_create and
942 * xfs_create_dir.
943 *
944 */
945int
946xfs_dir_ialloc(
947 xfs_trans_t **tpp, /* input: current transaction;
948 output: may be a new transaction. */
949 xfs_inode_t *dp, /* directory within whose allocate
950 the inode. */
951 umode_t mode,
952 xfs_nlink_t nlink,
953 xfs_dev_t rdev,
954 prid_t prid, /* project id */
955 int okalloc, /* ok to allocate new space */
956 xfs_inode_t **ipp, /* pointer to inode; it will be
957 locked. */
958 int *committed)
959
960{
961 xfs_trans_t *tp;
962 xfs_inode_t *ip;
963 xfs_buf_t *ialloc_context = NULL;
964 int code;
965 void *dqinfo;
966 uint tflags;
967
968 tp = *tpp;
969 ASSERT(tp->t_flags & XFS_TRANS_PERM_LOG_RES);
970
971 /*
972 * xfs_ialloc will return a pointer to an incore inode if
973 * the Space Manager has an available inode on the free
974 * list. Otherwise, it will do an allocation and replenish
975 * the freelist. Since we can only do one allocation per
976 * transaction without deadlocks, we will need to commit the
977 * current transaction and start a new one. We will then
978 * need to call xfs_ialloc again to get the inode.
979 *
980 * If xfs_ialloc did an allocation to replenish the freelist,
981 * it returns the bp containing the head of the freelist as
982 * ialloc_context. We will hold a lock on it across the
983 * transaction commit so that no other process can steal
984 * the inode(s) that we've just allocated.
985 */
986 code = xfs_ialloc(tp, dp, mode, nlink, rdev, prid, okalloc,
987 &ialloc_context, &ip);
988
989 /*
990 * Return an error if we were unable to allocate a new inode.
991 * This should only happen if we run out of space on disk or
992 * encounter a disk error.
993 */
994 if (code) {
995 *ipp = NULL;
996 return code;
997 }
998 if (!ialloc_context && !ip) {
999 *ipp = NULL;
1000 return -ENOSPC;
1001 }
1002
1003 /*
1004 * If the AGI buffer is non-NULL, then we were unable to get an
1005 * inode in one operation. We need to commit the current
1006 * transaction and call xfs_ialloc() again. It is guaranteed
1007 * to succeed the second time.
1008 */
1009 if (ialloc_context) {
1010 /*
1011 * Normally, xfs_trans_commit releases all the locks.
1012 * We call bhold to hang on to the ialloc_context across
1013 * the commit. Holding this buffer prevents any other
1014 * processes from doing any allocations in this
1015 * allocation group.
1016 */
1017 xfs_trans_bhold(tp, ialloc_context);
1018
1019 /*
1020 * We want the quota changes to be associated with the next
1021 * transaction, NOT this one. So, detach the dqinfo from this
1022 * and attach it to the next transaction.
1023 */
1024 dqinfo = NULL;
1025 tflags = 0;
1026 if (tp->t_dqinfo) {
1027 dqinfo = (void *)tp->t_dqinfo;
1028 tp->t_dqinfo = NULL;
1029 tflags = tp->t_flags & XFS_TRANS_DQ_DIRTY;
1030 tp->t_flags &= ~(XFS_TRANS_DQ_DIRTY);
1031 }
1032
1033 code = xfs_trans_roll(&tp, 0);
1034 if (committed != NULL)
1035 *committed = 1;
1036
1037 /*
1038 * Re-attach the quota info that we detached from prev trx.
1039 */
1040 if (dqinfo) {
1041 tp->t_dqinfo = dqinfo;
1042 tp->t_flags |= tflags;
1043 }
1044
1045 if (code) {
1046 xfs_buf_relse(ialloc_context);
1047 *tpp = tp;
1048 *ipp = NULL;
1049 return code;
1050 }
1051 xfs_trans_bjoin(tp, ialloc_context);
1052
1053 /*
1054 * Call ialloc again. Since we've locked out all
1055 * other allocations in this allocation group,
1056 * this call should always succeed.
1057 */
1058 code = xfs_ialloc(tp, dp, mode, nlink, rdev, prid,
1059 okalloc, &ialloc_context, &ip);
1060
1061 /*
1062 * If we get an error at this point, return to the caller
1063 * so that the current transaction can be aborted.
1064 */
1065 if (code) {
1066 *tpp = tp;
1067 *ipp = NULL;
1068 return code;
1069 }
1070 ASSERT(!ialloc_context && ip);
1071
1072 } else {
1073 if (committed != NULL)
1074 *committed = 0;
1075 }
1076
1077 *ipp = ip;
1078 *tpp = tp;
1079
1080 return 0;
1081}
1082
1083/*
1084 * Decrement the link count on an inode & log the change. If this causes the
1085 * link count to go to zero, move the inode to AGI unlinked list so that it can
1086 * be freed when the last active reference goes away via xfs_inactive().
1087 */
1088int /* error */
1089xfs_droplink(
1090 xfs_trans_t *tp,
1091 xfs_inode_t *ip)
1092{
1093 xfs_trans_ichgtime(tp, ip, XFS_ICHGTIME_CHG);
1094
1095 drop_nlink(VFS_I(ip));
1096 xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
1097
1098 if (VFS_I(ip)->i_nlink)
1099 return 0;
1100
1101 return xfs_iunlink(tp, ip);
1102}
1103
1104/*
1105 * Increment the link count on an inode & log the change.
1106 */
1107int
1108xfs_bumplink(
1109 xfs_trans_t *tp,
1110 xfs_inode_t *ip)
1111{
1112 xfs_trans_ichgtime(tp, ip, XFS_ICHGTIME_CHG);
1113
1114 ASSERT(ip->i_d.di_version > 1);
1115 inc_nlink(VFS_I(ip));
1116 xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
1117 return 0;
1118}
1119
1120int
1121xfs_create(
1122 xfs_inode_t *dp,
1123 struct xfs_name *name,
1124 umode_t mode,
1125 xfs_dev_t rdev,
1126 xfs_inode_t **ipp)
1127{
1128 int is_dir = S_ISDIR(mode);
1129 struct xfs_mount *mp = dp->i_mount;
1130 struct xfs_inode *ip = NULL;
1131 struct xfs_trans *tp = NULL;
1132 int error;
1133 xfs_bmap_free_t free_list;
1134 xfs_fsblock_t first_block;
1135 bool unlock_dp_on_error = false;
1136 prid_t prid;
1137 struct xfs_dquot *udqp = NULL;
1138 struct xfs_dquot *gdqp = NULL;
1139 struct xfs_dquot *pdqp = NULL;
1140 struct xfs_trans_res *tres;
1141 uint resblks;
1142
1143 trace_xfs_create(dp, name);
1144
1145 if (XFS_FORCED_SHUTDOWN(mp))
1146 return -EIO;
1147
1148 prid = xfs_get_initial_prid(dp);
1149
1150 /*
1151 * Make sure that we have allocated dquot(s) on disk.
1152 */
1153 error = xfs_qm_vop_dqalloc(dp, xfs_kuid_to_uid(current_fsuid()),
1154 xfs_kgid_to_gid(current_fsgid()), prid,
1155 XFS_QMOPT_QUOTALL | XFS_QMOPT_INHERIT,
1156 &udqp, &gdqp, &pdqp);
1157 if (error)
1158 return error;
1159
1160 if (is_dir) {
1161 rdev = 0;
1162 resblks = XFS_MKDIR_SPACE_RES(mp, name->len);
1163 tres = &M_RES(mp)->tr_mkdir;
1164 tp = xfs_trans_alloc(mp, XFS_TRANS_MKDIR);
1165 } else {
1166 resblks = XFS_CREATE_SPACE_RES(mp, name->len);
1167 tres = &M_RES(mp)->tr_create;
1168 tp = xfs_trans_alloc(mp, XFS_TRANS_CREATE);
1169 }
1170
1171 /*
1172 * Initially assume that the file does not exist and
1173 * reserve the resources for that case. If that is not
1174 * the case we'll drop the one we have and get a more
1175 * appropriate transaction later.
1176 */
1177 error = xfs_trans_reserve(tp, tres, resblks, 0);
1178 if (error == -ENOSPC) {
1179 /* flush outstanding delalloc blocks and retry */
1180 xfs_flush_inodes(mp);
1181 error = xfs_trans_reserve(tp, tres, resblks, 0);
1182 }
1183 if (error == -ENOSPC) {
1184 /* No space at all so try a "no-allocation" reservation */
1185 resblks = 0;
1186 error = xfs_trans_reserve(tp, tres, 0, 0);
1187 }
1188 if (error)
1189 goto out_trans_cancel;
1190
1191
1192 xfs_ilock(dp, XFS_IOLOCK_EXCL | XFS_ILOCK_EXCL |
1193 XFS_IOLOCK_PARENT | XFS_ILOCK_PARENT);
1194 unlock_dp_on_error = true;
1195
1196 xfs_bmap_init(&free_list, &first_block);
1197
1198 /*
1199 * Reserve disk quota and the inode.
1200 */
1201 error = xfs_trans_reserve_quota(tp, mp, udqp, gdqp,
1202 pdqp, resblks, 1, 0);
1203 if (error)
1204 goto out_trans_cancel;
1205
1206 if (!resblks) {
1207 error = xfs_dir_canenter(tp, dp, name);
1208 if (error)
1209 goto out_trans_cancel;
1210 }
1211
1212 /*
1213 * A newly created regular or special file just has one directory
1214 * entry pointing to them, but a directory also the "." entry
1215 * pointing to itself.
1216 */
1217 error = xfs_dir_ialloc(&tp, dp, mode, is_dir ? 2 : 1, rdev,
1218 prid, resblks > 0, &ip, NULL);
1219 if (error)
1220 goto out_trans_cancel;
1221
1222 /*
1223 * Now we join the directory inode to the transaction. We do not do it
1224 * earlier because xfs_dir_ialloc might commit the previous transaction
1225 * (and release all the locks). An error from here on will result in
1226 * the transaction cancel unlocking dp so don't do it explicitly in the
1227 * error path.
1228 */
1229 xfs_trans_ijoin(tp, dp, XFS_IOLOCK_EXCL | XFS_ILOCK_EXCL);
1230 unlock_dp_on_error = false;
1231
1232 error = xfs_dir_createname(tp, dp, name, ip->i_ino,
1233 &first_block, &free_list, resblks ?
1234 resblks - XFS_IALLOC_SPACE_RES(mp) : 0);
1235 if (error) {
1236 ASSERT(error != -ENOSPC);
1237 goto out_trans_cancel;
1238 }
1239 xfs_trans_ichgtime(tp, dp, XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG);
1240 xfs_trans_log_inode(tp, dp, XFS_ILOG_CORE);
1241
1242 if (is_dir) {
1243 error = xfs_dir_init(tp, ip, dp);
1244 if (error)
1245 goto out_bmap_cancel;
1246
1247 error = xfs_bumplink(tp, dp);
1248 if (error)
1249 goto out_bmap_cancel;
1250 }
1251
1252 /*
1253 * If this is a synchronous mount, make sure that the
1254 * create transaction goes to disk before returning to
1255 * the user.
1256 */
1257 if (mp->m_flags & (XFS_MOUNT_WSYNC|XFS_MOUNT_DIRSYNC))
1258 xfs_trans_set_sync(tp);
1259
1260 /*
1261 * Attach the dquot(s) to the inodes and modify them incore.
1262 * These ids of the inode couldn't have changed since the new
1263 * inode has been locked ever since it was created.
1264 */
1265 xfs_qm_vop_create_dqattach(tp, ip, udqp, gdqp, pdqp);
1266
1267 error = xfs_bmap_finish(&tp, &free_list, NULL);
1268 if (error)
1269 goto out_bmap_cancel;
1270
1271 error = xfs_trans_commit(tp);
1272 if (error)
1273 goto out_release_inode;
1274
1275 xfs_qm_dqrele(udqp);
1276 xfs_qm_dqrele(gdqp);
1277 xfs_qm_dqrele(pdqp);
1278
1279 *ipp = ip;
1280 return 0;
1281
1282 out_bmap_cancel:
1283 xfs_bmap_cancel(&free_list);
1284 out_trans_cancel:
1285 xfs_trans_cancel(tp);
1286 out_release_inode:
1287 /*
1288 * Wait until after the current transaction is aborted to finish the
1289 * setup of the inode and release the inode. This prevents recursive
1290 * transactions and deadlocks from xfs_inactive.
1291 */
1292 if (ip) {
1293 xfs_finish_inode_setup(ip);
1294 IRELE(ip);
1295 }
1296
1297 xfs_qm_dqrele(udqp);
1298 xfs_qm_dqrele(gdqp);
1299 xfs_qm_dqrele(pdqp);
1300
1301 if (unlock_dp_on_error)
1302 xfs_iunlock(dp, XFS_IOLOCK_EXCL | XFS_ILOCK_EXCL);
1303 return error;
1304}
1305
1306int
1307xfs_create_tmpfile(
1308 struct xfs_inode *dp,
1309 struct dentry *dentry,
1310 umode_t mode,
1311 struct xfs_inode **ipp)
1312{
1313 struct xfs_mount *mp = dp->i_mount;
1314 struct xfs_inode *ip = NULL;
1315 struct xfs_trans *tp = NULL;
1316 int error;
1317 prid_t prid;
1318 struct xfs_dquot *udqp = NULL;
1319 struct xfs_dquot *gdqp = NULL;
1320 struct xfs_dquot *pdqp = NULL;
1321 struct xfs_trans_res *tres;
1322 uint resblks;
1323
1324 if (XFS_FORCED_SHUTDOWN(mp))
1325 return -EIO;
1326
1327 prid = xfs_get_initial_prid(dp);
1328
1329 /*
1330 * Make sure that we have allocated dquot(s) on disk.
1331 */
1332 error = xfs_qm_vop_dqalloc(dp, xfs_kuid_to_uid(current_fsuid()),
1333 xfs_kgid_to_gid(current_fsgid()), prid,
1334 XFS_QMOPT_QUOTALL | XFS_QMOPT_INHERIT,
1335 &udqp, &gdqp, &pdqp);
1336 if (error)
1337 return error;
1338
1339 resblks = XFS_IALLOC_SPACE_RES(mp);
1340 tp = xfs_trans_alloc(mp, XFS_TRANS_CREATE_TMPFILE);
1341
1342 tres = &M_RES(mp)->tr_create_tmpfile;
1343 error = xfs_trans_reserve(tp, tres, resblks, 0);
1344 if (error == -ENOSPC) {
1345 /* No space at all so try a "no-allocation" reservation */
1346 resblks = 0;
1347 error = xfs_trans_reserve(tp, tres, 0, 0);
1348 }
1349 if (error)
1350 goto out_trans_cancel;
1351
1352 error = xfs_trans_reserve_quota(tp, mp, udqp, gdqp,
1353 pdqp, resblks, 1, 0);
1354 if (error)
1355 goto out_trans_cancel;
1356
1357 error = xfs_dir_ialloc(&tp, dp, mode, 1, 0,
1358 prid, resblks > 0, &ip, NULL);
1359 if (error)
1360 goto out_trans_cancel;
1361
1362 if (mp->m_flags & XFS_MOUNT_WSYNC)
1363 xfs_trans_set_sync(tp);
1364
1365 /*
1366 * Attach the dquot(s) to the inodes and modify them incore.
1367 * These ids of the inode couldn't have changed since the new
1368 * inode has been locked ever since it was created.
1369 */
1370 xfs_qm_vop_create_dqattach(tp, ip, udqp, gdqp, pdqp);
1371
1372 error = xfs_iunlink(tp, ip);
1373 if (error)
1374 goto out_trans_cancel;
1375
1376 error = xfs_trans_commit(tp);
1377 if (error)
1378 goto out_release_inode;
1379
1380 xfs_qm_dqrele(udqp);
1381 xfs_qm_dqrele(gdqp);
1382 xfs_qm_dqrele(pdqp);
1383
1384 *ipp = ip;
1385 return 0;
1386
1387 out_trans_cancel:
1388 xfs_trans_cancel(tp);
1389 out_release_inode:
1390 /*
1391 * Wait until after the current transaction is aborted to finish the
1392 * setup of the inode and release the inode. This prevents recursive
1393 * transactions and deadlocks from xfs_inactive.
1394 */
1395 if (ip) {
1396 xfs_finish_inode_setup(ip);
1397 IRELE(ip);
1398 }
1399
1400 xfs_qm_dqrele(udqp);
1401 xfs_qm_dqrele(gdqp);
1402 xfs_qm_dqrele(pdqp);
1403
1404 return error;
1405}
1406
1407int
1408xfs_link(
1409 xfs_inode_t *tdp,
1410 xfs_inode_t *sip,
1411 struct xfs_name *target_name)
1412{
1413 xfs_mount_t *mp = tdp->i_mount;
1414 xfs_trans_t *tp;
1415 int error;
1416 xfs_bmap_free_t free_list;
1417 xfs_fsblock_t first_block;
1418 int resblks;
1419
1420 trace_xfs_link(tdp, target_name);
1421
1422 ASSERT(!S_ISDIR(VFS_I(sip)->i_mode));
1423
1424 if (XFS_FORCED_SHUTDOWN(mp))
1425 return -EIO;
1426
1427 error = xfs_qm_dqattach(sip, 0);
1428 if (error)
1429 goto std_return;
1430
1431 error = xfs_qm_dqattach(tdp, 0);
1432 if (error)
1433 goto std_return;
1434
1435 tp = xfs_trans_alloc(mp, XFS_TRANS_LINK);
1436 resblks = XFS_LINK_SPACE_RES(mp, target_name->len);
1437 error = xfs_trans_reserve(tp, &M_RES(mp)->tr_link, resblks, 0);
1438 if (error == -ENOSPC) {
1439 resblks = 0;
1440 error = xfs_trans_reserve(tp, &M_RES(mp)->tr_link, 0, 0);
1441 }
1442 if (error)
1443 goto error_return;
1444
1445 xfs_ilock(tdp, XFS_IOLOCK_EXCL | XFS_IOLOCK_PARENT);
1446 xfs_lock_two_inodes(sip, tdp, XFS_ILOCK_EXCL);
1447
1448 xfs_trans_ijoin(tp, sip, XFS_ILOCK_EXCL);
1449 xfs_trans_ijoin(tp, tdp, XFS_IOLOCK_EXCL | XFS_ILOCK_EXCL);
1450
1451 /*
1452 * If we are using project inheritance, we only allow hard link
1453 * creation in our tree when the project IDs are the same; else
1454 * the tree quota mechanism could be circumvented.
1455 */
1456 if (unlikely((tdp->i_d.di_flags & XFS_DIFLAG_PROJINHERIT) &&
1457 (xfs_get_projid(tdp) != xfs_get_projid(sip)))) {
1458 error = -EXDEV;
1459 goto error_return;
1460 }
1461
1462 if (!resblks) {
1463 error = xfs_dir_canenter(tp, tdp, target_name);
1464 if (error)
1465 goto error_return;
1466 }
1467
1468 xfs_bmap_init(&free_list, &first_block);
1469
1470 /*
1471 * Handle initial link state of O_TMPFILE inode
1472 */
1473 if (VFS_I(sip)->i_nlink == 0) {
1474 error = xfs_iunlink_remove(tp, sip);
1475 if (error)
1476 goto error_return;
1477 }
1478
1479 error = xfs_dir_createname(tp, tdp, target_name, sip->i_ino,
1480 &first_block, &free_list, resblks);
1481 if (error)
1482 goto error_return;
1483 xfs_trans_ichgtime(tp, tdp, XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG);
1484 xfs_trans_log_inode(tp, tdp, XFS_ILOG_CORE);
1485
1486 error = xfs_bumplink(tp, sip);
1487 if (error)
1488 goto error_return;
1489
1490 /*
1491 * If this is a synchronous mount, make sure that the
1492 * link transaction goes to disk before returning to
1493 * the user.
1494 */
1495 if (mp->m_flags & (XFS_MOUNT_WSYNC|XFS_MOUNT_DIRSYNC))
1496 xfs_trans_set_sync(tp);
1497
1498 error = xfs_bmap_finish(&tp, &free_list, NULL);
1499 if (error) {
1500 xfs_bmap_cancel(&free_list);
1501 goto error_return;
1502 }
1503
1504 return xfs_trans_commit(tp);
1505
1506 error_return:
1507 xfs_trans_cancel(tp);
1508 std_return:
1509 return error;
1510}
1511
1512/*
1513 * Free up the underlying blocks past new_size. The new size must be smaller
1514 * than the current size. This routine can be used both for the attribute and
1515 * data fork, and does not modify the inode size, which is left to the caller.
1516 *
1517 * The transaction passed to this routine must have made a permanent log
1518 * reservation of at least XFS_ITRUNCATE_LOG_RES. This routine may commit the
1519 * given transaction and start new ones, so make sure everything involved in
1520 * the transaction is tidy before calling here. Some transaction will be
1521 * returned to the caller to be committed. The incoming transaction must
1522 * already include the inode, and both inode locks must be held exclusively.
1523 * The inode must also be "held" within the transaction. On return the inode
1524 * will be "held" within the returned transaction. This routine does NOT
1525 * require any disk space to be reserved for it within the transaction.
1526 *
1527 * If we get an error, we must return with the inode locked and linked into the
1528 * current transaction. This keeps things simple for the higher level code,
1529 * because it always knows that the inode is locked and held in the transaction
1530 * that returns to it whether errors occur or not. We don't mark the inode
1531 * dirty on error so that transactions can be easily aborted if possible.
1532 */
1533int
1534xfs_itruncate_extents(
1535 struct xfs_trans **tpp,
1536 struct xfs_inode *ip,
1537 int whichfork,
1538 xfs_fsize_t new_size)
1539{
1540 struct xfs_mount *mp = ip->i_mount;
1541 struct xfs_trans *tp = *tpp;
1542 xfs_bmap_free_t free_list;
1543 xfs_fsblock_t first_block;
1544 xfs_fileoff_t first_unmap_block;
1545 xfs_fileoff_t last_block;
1546 xfs_filblks_t unmap_len;
1547 int error = 0;
1548 int done = 0;
1549
1550 ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
1551 ASSERT(!atomic_read(&VFS_I(ip)->i_count) ||
1552 xfs_isilocked(ip, XFS_IOLOCK_EXCL));
1553 ASSERT(new_size <= XFS_ISIZE(ip));
1554 ASSERT(tp->t_flags & XFS_TRANS_PERM_LOG_RES);
1555 ASSERT(ip->i_itemp != NULL);
1556 ASSERT(ip->i_itemp->ili_lock_flags == 0);
1557 ASSERT(!XFS_NOT_DQATTACHED(mp, ip));
1558
1559 trace_xfs_itruncate_extents_start(ip, new_size);
1560
1561 /*
1562 * Since it is possible for space to become allocated beyond
1563 * the end of the file (in a crash where the space is allocated
1564 * but the inode size is not yet updated), simply remove any
1565 * blocks which show up between the new EOF and the maximum
1566 * possible file size. If the first block to be removed is
1567 * beyond the maximum file size (ie it is the same as last_block),
1568 * then there is nothing to do.
1569 */
1570 first_unmap_block = XFS_B_TO_FSB(mp, (xfs_ufsize_t)new_size);
1571 last_block = XFS_B_TO_FSB(mp, mp->m_super->s_maxbytes);
1572 if (first_unmap_block == last_block)
1573 return 0;
1574
1575 ASSERT(first_unmap_block < last_block);
1576 unmap_len = last_block - first_unmap_block + 1;
1577 while (!done) {
1578 xfs_bmap_init(&free_list, &first_block);
1579 error = xfs_bunmapi(tp, ip,
1580 first_unmap_block, unmap_len,
1581 xfs_bmapi_aflag(whichfork),
1582 XFS_ITRUNC_MAX_EXTENTS,
1583 &first_block, &free_list,
1584 &done);
1585 if (error)
1586 goto out_bmap_cancel;
1587
1588 /*
1589 * Duplicate the transaction that has the permanent
1590 * reservation and commit the old transaction.
1591 */
1592 error = xfs_bmap_finish(&tp, &free_list, ip);
1593 if (error)
1594 goto out_bmap_cancel;
1595
1596 error = xfs_trans_roll(&tp, ip);
1597 if (error)
1598 goto out;
1599 }
1600
1601 /*
1602 * Always re-log the inode so that our permanent transaction can keep
1603 * on rolling it forward in the log.
1604 */
1605 xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
1606
1607 trace_xfs_itruncate_extents_end(ip, new_size);
1608
1609out:
1610 *tpp = tp;
1611 return error;
1612out_bmap_cancel:
1613 /*
1614 * If the bunmapi call encounters an error, return to the caller where
1615 * the transaction can be properly aborted. We just need to make sure
1616 * we're not holding any resources that we were not when we came in.
1617 */
1618 xfs_bmap_cancel(&free_list);
1619 goto out;
1620}
1621
1622int
1623xfs_release(
1624 xfs_inode_t *ip)
1625{
1626 xfs_mount_t *mp = ip->i_mount;
1627 int error;
1628
1629 if (!S_ISREG(VFS_I(ip)->i_mode) || (VFS_I(ip)->i_mode == 0))
1630 return 0;
1631
1632 /* If this is a read-only mount, don't do this (would generate I/O) */
1633 if (mp->m_flags & XFS_MOUNT_RDONLY)
1634 return 0;
1635
1636 if (!XFS_FORCED_SHUTDOWN(mp)) {
1637 int truncated;
1638
1639 /*
1640 * If we previously truncated this file and removed old data
1641 * in the process, we want to initiate "early" writeout on
1642 * the last close. This is an attempt to combat the notorious
1643 * NULL files problem which is particularly noticeable from a
1644 * truncate down, buffered (re-)write (delalloc), followed by
1645 * a crash. What we are effectively doing here is
1646 * significantly reducing the time window where we'd otherwise
1647 * be exposed to that problem.
1648 */
1649 truncated = xfs_iflags_test_and_clear(ip, XFS_ITRUNCATED);
1650 if (truncated) {
1651 xfs_iflags_clear(ip, XFS_IDIRTY_RELEASE);
1652 if (ip->i_delayed_blks > 0) {
1653 error = filemap_flush(VFS_I(ip)->i_mapping);
1654 if (error)
1655 return error;
1656 }
1657 }
1658 }
1659
1660 if (VFS_I(ip)->i_nlink == 0)
1661 return 0;
1662
1663 if (xfs_can_free_eofblocks(ip, false)) {
1664
1665 /*
1666 * If we can't get the iolock just skip truncating the blocks
1667 * past EOF because we could deadlock with the mmap_sem
1668 * otherwise. We'll get another chance to drop them once the
1669 * last reference to the inode is dropped, so we'll never leak
1670 * blocks permanently.
1671 *
1672 * Further, check if the inode is being opened, written and
1673 * closed frequently and we have delayed allocation blocks
1674 * outstanding (e.g. streaming writes from the NFS server),
1675 * truncating the blocks past EOF will cause fragmentation to
1676 * occur.
1677 *
1678 * In this case don't do the truncation, either, but we have to
1679 * be careful how we detect this case. Blocks beyond EOF show
1680 * up as i_delayed_blks even when the inode is clean, so we
1681 * need to truncate them away first before checking for a dirty
1682 * release. Hence on the first dirty close we will still remove
1683 * the speculative allocation, but after that we will leave it
1684 * in place.
1685 */
1686 if (xfs_iflags_test(ip, XFS_IDIRTY_RELEASE))
1687 return 0;
1688
1689 error = xfs_free_eofblocks(mp, ip, true);
1690 if (error && error != -EAGAIN)
1691 return error;
1692
1693 /* delalloc blocks after truncation means it really is dirty */
1694 if (ip->i_delayed_blks)
1695 xfs_iflags_set(ip, XFS_IDIRTY_RELEASE);
1696 }
1697 return 0;
1698}
1699
1700/*
1701 * xfs_inactive_truncate
1702 *
1703 * Called to perform a truncate when an inode becomes unlinked.
1704 */
1705STATIC int
1706xfs_inactive_truncate(
1707 struct xfs_inode *ip)
1708{
1709 struct xfs_mount *mp = ip->i_mount;
1710 struct xfs_trans *tp;
1711 int error;
1712
1713 tp = xfs_trans_alloc(mp, XFS_TRANS_INACTIVE);
1714 error = xfs_trans_reserve(tp, &M_RES(mp)->tr_itruncate, 0, 0);
1715 if (error) {
1716 ASSERT(XFS_FORCED_SHUTDOWN(mp));
1717 xfs_trans_cancel(tp);
1718 return error;
1719 }
1720
1721 xfs_ilock(ip, XFS_ILOCK_EXCL);
1722 xfs_trans_ijoin(tp, ip, 0);
1723
1724 /*
1725 * Log the inode size first to prevent stale data exposure in the event
1726 * of a system crash before the truncate completes. See the related
1727 * comment in xfs_setattr_size() for details.
1728 */
1729 ip->i_d.di_size = 0;
1730 xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
1731
1732 error = xfs_itruncate_extents(&tp, ip, XFS_DATA_FORK, 0);
1733 if (error)
1734 goto error_trans_cancel;
1735
1736 ASSERT(ip->i_d.di_nextents == 0);
1737
1738 error = xfs_trans_commit(tp);
1739 if (error)
1740 goto error_unlock;
1741
1742 xfs_iunlock(ip, XFS_ILOCK_EXCL);
1743 return 0;
1744
1745error_trans_cancel:
1746 xfs_trans_cancel(tp);
1747error_unlock:
1748 xfs_iunlock(ip, XFS_ILOCK_EXCL);
1749 return error;
1750}
1751
1752/*
1753 * xfs_inactive_ifree()
1754 *
1755 * Perform the inode free when an inode is unlinked.
1756 */
1757STATIC int
1758xfs_inactive_ifree(
1759 struct xfs_inode *ip)
1760{
1761 xfs_bmap_free_t free_list;
1762 xfs_fsblock_t first_block;
1763 struct xfs_mount *mp = ip->i_mount;
1764 struct xfs_trans *tp;
1765 int error;
1766
1767 tp = xfs_trans_alloc(mp, XFS_TRANS_INACTIVE);
1768
1769 /*
1770 * The ifree transaction might need to allocate blocks for record
1771 * insertion to the finobt. We don't want to fail here at ENOSPC, so
1772 * allow ifree to dip into the reserved block pool if necessary.
1773 *
1774 * Freeing large sets of inodes generally means freeing inode chunks,
1775 * directory and file data blocks, so this should be relatively safe.
1776 * Only under severe circumstances should it be possible to free enough
1777 * inodes to exhaust the reserve block pool via finobt expansion while
1778 * at the same time not creating free space in the filesystem.
1779 *
1780 * Send a warning if the reservation does happen to fail, as the inode
1781 * now remains allocated and sits on the unlinked list until the fs is
1782 * repaired.
1783 */
1784 tp->t_flags |= XFS_TRANS_RESERVE;
1785 error = xfs_trans_reserve(tp, &M_RES(mp)->tr_ifree,
1786 XFS_IFREE_SPACE_RES(mp), 0);
1787 if (error) {
1788 if (error == -ENOSPC) {
1789 xfs_warn_ratelimited(mp,
1790 "Failed to remove inode(s) from unlinked list. "
1791 "Please free space, unmount and run xfs_repair.");
1792 } else {
1793 ASSERT(XFS_FORCED_SHUTDOWN(mp));
1794 }
1795 xfs_trans_cancel(tp);
1796 return error;
1797 }
1798
1799 xfs_ilock(ip, XFS_ILOCK_EXCL);
1800 xfs_trans_ijoin(tp, ip, 0);
1801
1802 xfs_bmap_init(&free_list, &first_block);
1803 error = xfs_ifree(tp, ip, &free_list);
1804 if (error) {
1805 /*
1806 * If we fail to free the inode, shut down. The cancel
1807 * might do that, we need to make sure. Otherwise the
1808 * inode might be lost for a long time or forever.
1809 */
1810 if (!XFS_FORCED_SHUTDOWN(mp)) {
1811 xfs_notice(mp, "%s: xfs_ifree returned error %d",
1812 __func__, error);
1813 xfs_force_shutdown(mp, SHUTDOWN_META_IO_ERROR);
1814 }
1815 xfs_trans_cancel(tp);
1816 xfs_iunlock(ip, XFS_ILOCK_EXCL);
1817 return error;
1818 }
1819
1820 /*
1821 * Credit the quota account(s). The inode is gone.
1822 */
1823 xfs_trans_mod_dquot_byino(tp, ip, XFS_TRANS_DQ_ICOUNT, -1);
1824
1825 /*
1826 * Just ignore errors at this point. There is nothing we can do except
1827 * to try to keep going. Make sure it's not a silent error.
1828 */
1829 error = xfs_bmap_finish(&tp, &free_list, NULL);
1830 if (error) {
1831 xfs_notice(mp, "%s: xfs_bmap_finish returned error %d",
1832 __func__, error);
1833 xfs_bmap_cancel(&free_list);
1834 }
1835 error = xfs_trans_commit(tp);
1836 if (error)
1837 xfs_notice(mp, "%s: xfs_trans_commit returned error %d",
1838 __func__, error);
1839
1840 xfs_iunlock(ip, XFS_ILOCK_EXCL);
1841 return 0;
1842}
1843
1844/*
1845 * xfs_inactive
1846 *
1847 * This is called when the vnode reference count for the vnode
1848 * goes to zero. If the file has been unlinked, then it must
1849 * now be truncated. Also, we clear all of the read-ahead state
1850 * kept for the inode here since the file is now closed.
1851 */
1852void
1853xfs_inactive(
1854 xfs_inode_t *ip)
1855{
1856 struct xfs_mount *mp;
1857 int error;
1858 int truncate = 0;
1859
1860 /*
1861 * If the inode is already free, then there can be nothing
1862 * to clean up here.
1863 */
1864 if (VFS_I(ip)->i_mode == 0) {
1865 ASSERT(ip->i_df.if_real_bytes == 0);
1866 ASSERT(ip->i_df.if_broot_bytes == 0);
1867 return;
1868 }
1869
1870 mp = ip->i_mount;
1871
1872 /* If this is a read-only mount, don't do this (would generate I/O) */
1873 if (mp->m_flags & XFS_MOUNT_RDONLY)
1874 return;
1875
1876 if (VFS_I(ip)->i_nlink != 0) {
1877 /*
1878 * force is true because we are evicting an inode from the
1879 * cache. Post-eof blocks must be freed, lest we end up with
1880 * broken free space accounting.
1881 */
1882 if (xfs_can_free_eofblocks(ip, true))
1883 xfs_free_eofblocks(mp, ip, false);
1884
1885 return;
1886 }
1887
1888 if (S_ISREG(VFS_I(ip)->i_mode) &&
1889 (ip->i_d.di_size != 0 || XFS_ISIZE(ip) != 0 ||
1890 ip->i_d.di_nextents > 0 || ip->i_delayed_blks > 0))
1891 truncate = 1;
1892
1893 error = xfs_qm_dqattach(ip, 0);
1894 if (error)
1895 return;
1896
1897 if (S_ISLNK(VFS_I(ip)->i_mode))
1898 error = xfs_inactive_symlink(ip);
1899 else if (truncate)
1900 error = xfs_inactive_truncate(ip);
1901 if (error)
1902 return;
1903
1904 /*
1905 * If there are attributes associated with the file then blow them away
1906 * now. The code calls a routine that recursively deconstructs the
1907 * attribute fork. If also blows away the in-core attribute fork.
1908 */
1909 if (XFS_IFORK_Q(ip)) {
1910 error = xfs_attr_inactive(ip);
1911 if (error)
1912 return;
1913 }
1914
1915 ASSERT(!ip->i_afp);
1916 ASSERT(ip->i_d.di_anextents == 0);
1917 ASSERT(ip->i_d.di_forkoff == 0);
1918
1919 /*
1920 * Free the inode.
1921 */
1922 error = xfs_inactive_ifree(ip);
1923 if (error)
1924 return;
1925
1926 /*
1927 * Release the dquots held by inode, if any.
1928 */
1929 xfs_qm_dqdetach(ip);
1930}
1931
1932/*
1933 * This is called when the inode's link count goes to 0 or we are creating a
1934 * tmpfile via O_TMPFILE. In the case of a tmpfile, @ignore_linkcount will be
1935 * set to true as the link count is dropped to zero by the VFS after we've
1936 * created the file successfully, so we have to add it to the unlinked list
1937 * while the link count is non-zero.
1938 *
1939 * We place the on-disk inode on a list in the AGI. It will be pulled from this
1940 * list when the inode is freed.
1941 */
1942STATIC int
1943xfs_iunlink(
1944 struct xfs_trans *tp,
1945 struct xfs_inode *ip)
1946{
1947 xfs_mount_t *mp = tp->t_mountp;
1948 xfs_agi_t *agi;
1949 xfs_dinode_t *dip;
1950 xfs_buf_t *agibp;
1951 xfs_buf_t *ibp;
1952 xfs_agino_t agino;
1953 short bucket_index;
1954 int offset;
1955 int error;
1956
1957 ASSERT(VFS_I(ip)->i_mode != 0);
1958
1959 /*
1960 * Get the agi buffer first. It ensures lock ordering
1961 * on the list.
1962 */
1963 error = xfs_read_agi(mp, tp, XFS_INO_TO_AGNO(mp, ip->i_ino), &agibp);
1964 if (error)
1965 return error;
1966 agi = XFS_BUF_TO_AGI(agibp);
1967
1968 /*
1969 * Get the index into the agi hash table for the
1970 * list this inode will go on.
1971 */
1972 agino = XFS_INO_TO_AGINO(mp, ip->i_ino);
1973 ASSERT(agino != 0);
1974 bucket_index = agino % XFS_AGI_UNLINKED_BUCKETS;
1975 ASSERT(agi->agi_unlinked[bucket_index]);
1976 ASSERT(be32_to_cpu(agi->agi_unlinked[bucket_index]) != agino);
1977
1978 if (agi->agi_unlinked[bucket_index] != cpu_to_be32(NULLAGINO)) {
1979 /*
1980 * There is already another inode in the bucket we need
1981 * to add ourselves to. Add us at the front of the list.
1982 * Here we put the head pointer into our next pointer,
1983 * and then we fall through to point the head at us.
1984 */
1985 error = xfs_imap_to_bp(mp, tp, &ip->i_imap, &dip, &ibp,
1986 0, 0);
1987 if (error)
1988 return error;
1989
1990 ASSERT(dip->di_next_unlinked == cpu_to_be32(NULLAGINO));
1991 dip->di_next_unlinked = agi->agi_unlinked[bucket_index];
1992 offset = ip->i_imap.im_boffset +
1993 offsetof(xfs_dinode_t, di_next_unlinked);
1994
1995 /* need to recalc the inode CRC if appropriate */
1996 xfs_dinode_calc_crc(mp, dip);
1997
1998 xfs_trans_inode_buf(tp, ibp);
1999 xfs_trans_log_buf(tp, ibp, offset,
2000 (offset + sizeof(xfs_agino_t) - 1));
2001 xfs_inobp_check(mp, ibp);
2002 }
2003
2004 /*
2005 * Point the bucket head pointer at the inode being inserted.
2006 */
2007 ASSERT(agino != 0);
2008 agi->agi_unlinked[bucket_index] = cpu_to_be32(agino);
2009 offset = offsetof(xfs_agi_t, agi_unlinked) +
2010 (sizeof(xfs_agino_t) * bucket_index);
2011 xfs_trans_buf_set_type(tp, agibp, XFS_BLFT_AGI_BUF);
2012 xfs_trans_log_buf(tp, agibp, offset,
2013 (offset + sizeof(xfs_agino_t) - 1));
2014 return 0;
2015}
2016
2017/*
2018 * Pull the on-disk inode from the AGI unlinked list.
2019 */
2020STATIC int
2021xfs_iunlink_remove(
2022 xfs_trans_t *tp,
2023 xfs_inode_t *ip)
2024{
2025 xfs_ino_t next_ino;
2026 xfs_mount_t *mp;
2027 xfs_agi_t *agi;
2028 xfs_dinode_t *dip;
2029 xfs_buf_t *agibp;
2030 xfs_buf_t *ibp;
2031 xfs_agnumber_t agno;
2032 xfs_agino_t agino;
2033 xfs_agino_t next_agino;
2034 xfs_buf_t *last_ibp;
2035 xfs_dinode_t *last_dip = NULL;
2036 short bucket_index;
2037 int offset, last_offset = 0;
2038 int error;
2039
2040 mp = tp->t_mountp;
2041 agno = XFS_INO_TO_AGNO(mp, ip->i_ino);
2042
2043 /*
2044 * Get the agi buffer first. It ensures lock ordering
2045 * on the list.
2046 */
2047 error = xfs_read_agi(mp, tp, agno, &agibp);
2048 if (error)
2049 return error;
2050
2051 agi = XFS_BUF_TO_AGI(agibp);
2052
2053 /*
2054 * Get the index into the agi hash table for the
2055 * list this inode will go on.
2056 */
2057 agino = XFS_INO_TO_AGINO(mp, ip->i_ino);
2058 ASSERT(agino != 0);
2059 bucket_index = agino % XFS_AGI_UNLINKED_BUCKETS;
2060 ASSERT(agi->agi_unlinked[bucket_index] != cpu_to_be32(NULLAGINO));
2061 ASSERT(agi->agi_unlinked[bucket_index]);
2062
2063 if (be32_to_cpu(agi->agi_unlinked[bucket_index]) == agino) {
2064 /*
2065 * We're at the head of the list. Get the inode's on-disk
2066 * buffer to see if there is anyone after us on the list.
2067 * Only modify our next pointer if it is not already NULLAGINO.
2068 * This saves us the overhead of dealing with the buffer when
2069 * there is no need to change it.
2070 */
2071 error = xfs_imap_to_bp(mp, tp, &ip->i_imap, &dip, &ibp,
2072 0, 0);
2073 if (error) {
2074 xfs_warn(mp, "%s: xfs_imap_to_bp returned error %d.",
2075 __func__, error);
2076 return error;
2077 }
2078 next_agino = be32_to_cpu(dip->di_next_unlinked);
2079 ASSERT(next_agino != 0);
2080 if (next_agino != NULLAGINO) {
2081 dip->di_next_unlinked = cpu_to_be32(NULLAGINO);
2082 offset = ip->i_imap.im_boffset +
2083 offsetof(xfs_dinode_t, di_next_unlinked);
2084
2085 /* need to recalc the inode CRC if appropriate */
2086 xfs_dinode_calc_crc(mp, dip);
2087
2088 xfs_trans_inode_buf(tp, ibp);
2089 xfs_trans_log_buf(tp, ibp, offset,
2090 (offset + sizeof(xfs_agino_t) - 1));
2091 xfs_inobp_check(mp, ibp);
2092 } else {
2093 xfs_trans_brelse(tp, ibp);
2094 }
2095 /*
2096 * Point the bucket head pointer at the next inode.
2097 */
2098 ASSERT(next_agino != 0);
2099 ASSERT(next_agino != agino);
2100 agi->agi_unlinked[bucket_index] = cpu_to_be32(next_agino);
2101 offset = offsetof(xfs_agi_t, agi_unlinked) +
2102 (sizeof(xfs_agino_t) * bucket_index);
2103 xfs_trans_buf_set_type(tp, agibp, XFS_BLFT_AGI_BUF);
2104 xfs_trans_log_buf(tp, agibp, offset,
2105 (offset + sizeof(xfs_agino_t) - 1));
2106 } else {
2107 /*
2108 * We need to search the list for the inode being freed.
2109 */
2110 next_agino = be32_to_cpu(agi->agi_unlinked[bucket_index]);
2111 last_ibp = NULL;
2112 while (next_agino != agino) {
2113 struct xfs_imap imap;
2114
2115 if (last_ibp)
2116 xfs_trans_brelse(tp, last_ibp);
2117
2118 imap.im_blkno = 0;
2119 next_ino = XFS_AGINO_TO_INO(mp, agno, next_agino);
2120
2121 error = xfs_imap(mp, tp, next_ino, &imap, 0);
2122 if (error) {
2123 xfs_warn(mp,
2124 "%s: xfs_imap returned error %d.",
2125 __func__, error);
2126 return error;
2127 }
2128
2129 error = xfs_imap_to_bp(mp, tp, &imap, &last_dip,
2130 &last_ibp, 0, 0);
2131 if (error) {
2132 xfs_warn(mp,
2133 "%s: xfs_imap_to_bp returned error %d.",
2134 __func__, error);
2135 return error;
2136 }
2137
2138 last_offset = imap.im_boffset;
2139 next_agino = be32_to_cpu(last_dip->di_next_unlinked);
2140 ASSERT(next_agino != NULLAGINO);
2141 ASSERT(next_agino != 0);
2142 }
2143
2144 /*
2145 * Now last_ibp points to the buffer previous to us on the
2146 * unlinked list. Pull us from the list.
2147 */
2148 error = xfs_imap_to_bp(mp, tp, &ip->i_imap, &dip, &ibp,
2149 0, 0);
2150 if (error) {
2151 xfs_warn(mp, "%s: xfs_imap_to_bp(2) returned error %d.",
2152 __func__, error);
2153 return error;
2154 }
2155 next_agino = be32_to_cpu(dip->di_next_unlinked);
2156 ASSERT(next_agino != 0);
2157 ASSERT(next_agino != agino);
2158 if (next_agino != NULLAGINO) {
2159 dip->di_next_unlinked = cpu_to_be32(NULLAGINO);
2160 offset = ip->i_imap.im_boffset +
2161 offsetof(xfs_dinode_t, di_next_unlinked);
2162
2163 /* need to recalc the inode CRC if appropriate */
2164 xfs_dinode_calc_crc(mp, dip);
2165
2166 xfs_trans_inode_buf(tp, ibp);
2167 xfs_trans_log_buf(tp, ibp, offset,
2168 (offset + sizeof(xfs_agino_t) - 1));
2169 xfs_inobp_check(mp, ibp);
2170 } else {
2171 xfs_trans_brelse(tp, ibp);
2172 }
2173 /*
2174 * Point the previous inode on the list to the next inode.
2175 */
2176 last_dip->di_next_unlinked = cpu_to_be32(next_agino);
2177 ASSERT(next_agino != 0);
2178 offset = last_offset + offsetof(xfs_dinode_t, di_next_unlinked);
2179
2180 /* need to recalc the inode CRC if appropriate */
2181 xfs_dinode_calc_crc(mp, last_dip);
2182
2183 xfs_trans_inode_buf(tp, last_ibp);
2184 xfs_trans_log_buf(tp, last_ibp, offset,
2185 (offset + sizeof(xfs_agino_t) - 1));
2186 xfs_inobp_check(mp, last_ibp);
2187 }
2188 return 0;
2189}
2190
2191/*
2192 * A big issue when freeing the inode cluster is that we _cannot_ skip any
2193 * inodes that are in memory - they all must be marked stale and attached to
2194 * the cluster buffer.
2195 */
2196STATIC int
2197xfs_ifree_cluster(
2198 xfs_inode_t *free_ip,
2199 xfs_trans_t *tp,
2200 struct xfs_icluster *xic)
2201{
2202 xfs_mount_t *mp = free_ip->i_mount;
2203 int blks_per_cluster;
2204 int inodes_per_cluster;
2205 int nbufs;
2206 int i, j;
2207 int ioffset;
2208 xfs_daddr_t blkno;
2209 xfs_buf_t *bp;
2210 xfs_inode_t *ip;
2211 xfs_inode_log_item_t *iip;
2212 xfs_log_item_t *lip;
2213 struct xfs_perag *pag;
2214 xfs_ino_t inum;
2215
2216 inum = xic->first_ino;
2217 pag = xfs_perag_get(mp, XFS_INO_TO_AGNO(mp, inum));
2218 blks_per_cluster = xfs_icluster_size_fsb(mp);
2219 inodes_per_cluster = blks_per_cluster << mp->m_sb.sb_inopblog;
2220 nbufs = mp->m_ialloc_blks / blks_per_cluster;
2221
2222 for (j = 0; j < nbufs; j++, inum += inodes_per_cluster) {
2223 /*
2224 * The allocation bitmap tells us which inodes of the chunk were
2225 * physically allocated. Skip the cluster if an inode falls into
2226 * a sparse region.
2227 */
2228 ioffset = inum - xic->first_ino;
2229 if ((xic->alloc & XFS_INOBT_MASK(ioffset)) == 0) {
2230 ASSERT(do_mod(ioffset, inodes_per_cluster) == 0);
2231 continue;
2232 }
2233
2234 blkno = XFS_AGB_TO_DADDR(mp, XFS_INO_TO_AGNO(mp, inum),
2235 XFS_INO_TO_AGBNO(mp, inum));
2236
2237 /*
2238 * We obtain and lock the backing buffer first in the process
2239 * here, as we have to ensure that any dirty inode that we
2240 * can't get the flush lock on is attached to the buffer.
2241 * If we scan the in-memory inodes first, then buffer IO can
2242 * complete before we get a lock on it, and hence we may fail
2243 * to mark all the active inodes on the buffer stale.
2244 */
2245 bp = xfs_trans_get_buf(tp, mp->m_ddev_targp, blkno,
2246 mp->m_bsize * blks_per_cluster,
2247 XBF_UNMAPPED);
2248
2249 if (!bp)
2250 return -ENOMEM;
2251
2252 /*
2253 * This buffer may not have been correctly initialised as we
2254 * didn't read it from disk. That's not important because we are
2255 * only using to mark the buffer as stale in the log, and to
2256 * attach stale cached inodes on it. That means it will never be
2257 * dispatched for IO. If it is, we want to know about it, and we
2258 * want it to fail. We can acheive this by adding a write
2259 * verifier to the buffer.
2260 */
2261 bp->b_ops = &xfs_inode_buf_ops;
2262
2263 /*
2264 * Walk the inodes already attached to the buffer and mark them
2265 * stale. These will all have the flush locks held, so an
2266 * in-memory inode walk can't lock them. By marking them all
2267 * stale first, we will not attempt to lock them in the loop
2268 * below as the XFS_ISTALE flag will be set.
2269 */
2270 lip = bp->b_fspriv;
2271 while (lip) {
2272 if (lip->li_type == XFS_LI_INODE) {
2273 iip = (xfs_inode_log_item_t *)lip;
2274 ASSERT(iip->ili_logged == 1);
2275 lip->li_cb = xfs_istale_done;
2276 xfs_trans_ail_copy_lsn(mp->m_ail,
2277 &iip->ili_flush_lsn,
2278 &iip->ili_item.li_lsn);
2279 xfs_iflags_set(iip->ili_inode, XFS_ISTALE);
2280 }
2281 lip = lip->li_bio_list;
2282 }
2283
2284
2285 /*
2286 * For each inode in memory attempt to add it to the inode
2287 * buffer and set it up for being staled on buffer IO
2288 * completion. This is safe as we've locked out tail pushing
2289 * and flushing by locking the buffer.
2290 *
2291 * We have already marked every inode that was part of a
2292 * transaction stale above, which means there is no point in
2293 * even trying to lock them.
2294 */
2295 for (i = 0; i < inodes_per_cluster; i++) {
2296retry:
2297 rcu_read_lock();
2298 ip = radix_tree_lookup(&pag->pag_ici_root,
2299 XFS_INO_TO_AGINO(mp, (inum + i)));
2300
2301 /* Inode not in memory, nothing to do */
2302 if (!ip) {
2303 rcu_read_unlock();
2304 continue;
2305 }
2306
2307 /*
2308 * because this is an RCU protected lookup, we could
2309 * find a recently freed or even reallocated inode
2310 * during the lookup. We need to check under the
2311 * i_flags_lock for a valid inode here. Skip it if it
2312 * is not valid, the wrong inode or stale.
2313 */
2314 spin_lock(&ip->i_flags_lock);
2315 if (ip->i_ino != inum + i ||
2316 __xfs_iflags_test(ip, XFS_ISTALE)) {
2317 spin_unlock(&ip->i_flags_lock);
2318 rcu_read_unlock();
2319 continue;
2320 }
2321 spin_unlock(&ip->i_flags_lock);
2322
2323 /*
2324 * Don't try to lock/unlock the current inode, but we
2325 * _cannot_ skip the other inodes that we did not find
2326 * in the list attached to the buffer and are not
2327 * already marked stale. If we can't lock it, back off
2328 * and retry.
2329 */
2330 if (ip != free_ip &&
2331 !xfs_ilock_nowait(ip, XFS_ILOCK_EXCL)) {
2332 rcu_read_unlock();
2333 delay(1);
2334 goto retry;
2335 }
2336 rcu_read_unlock();
2337
2338 xfs_iflock(ip);
2339 xfs_iflags_set(ip, XFS_ISTALE);
2340
2341 /*
2342 * we don't need to attach clean inodes or those only
2343 * with unlogged changes (which we throw away, anyway).
2344 */
2345 iip = ip->i_itemp;
2346 if (!iip || xfs_inode_clean(ip)) {
2347 ASSERT(ip != free_ip);
2348 xfs_ifunlock(ip);
2349 xfs_iunlock(ip, XFS_ILOCK_EXCL);
2350 continue;
2351 }
2352
2353 iip->ili_last_fields = iip->ili_fields;
2354 iip->ili_fields = 0;
2355 iip->ili_fsync_fields = 0;
2356 iip->ili_logged = 1;
2357 xfs_trans_ail_copy_lsn(mp->m_ail, &iip->ili_flush_lsn,
2358 &iip->ili_item.li_lsn);
2359
2360 xfs_buf_attach_iodone(bp, xfs_istale_done,
2361 &iip->ili_item);
2362
2363 if (ip != free_ip)
2364 xfs_iunlock(ip, XFS_ILOCK_EXCL);
2365 }
2366
2367 xfs_trans_stale_inode_buf(tp, bp);
2368 xfs_trans_binval(tp, bp);
2369 }
2370
2371 xfs_perag_put(pag);
2372 return 0;
2373}
2374
2375/*
2376 * This is called to return an inode to the inode free list.
2377 * The inode should already be truncated to 0 length and have
2378 * no pages associated with it. This routine also assumes that
2379 * the inode is already a part of the transaction.
2380 *
2381 * The on-disk copy of the inode will have been added to the list
2382 * of unlinked inodes in the AGI. We need to remove the inode from
2383 * that list atomically with respect to freeing it here.
2384 */
2385int
2386xfs_ifree(
2387 xfs_trans_t *tp,
2388 xfs_inode_t *ip,
2389 xfs_bmap_free_t *flist)
2390{
2391 int error;
2392 struct xfs_icluster xic = { 0 };
2393
2394 ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
2395 ASSERT(VFS_I(ip)->i_nlink == 0);
2396 ASSERT(ip->i_d.di_nextents == 0);
2397 ASSERT(ip->i_d.di_anextents == 0);
2398 ASSERT(ip->i_d.di_size == 0 || !S_ISREG(VFS_I(ip)->i_mode));
2399 ASSERT(ip->i_d.di_nblocks == 0);
2400
2401 /*
2402 * Pull the on-disk inode from the AGI unlinked list.
2403 */
2404 error = xfs_iunlink_remove(tp, ip);
2405 if (error)
2406 return error;
2407
2408 error = xfs_difree(tp, ip->i_ino, flist, &xic);
2409 if (error)
2410 return error;
2411
2412 VFS_I(ip)->i_mode = 0; /* mark incore inode as free */
2413 ip->i_d.di_flags = 0;
2414 ip->i_d.di_dmevmask = 0;
2415 ip->i_d.di_forkoff = 0; /* mark the attr fork not in use */
2416 ip->i_d.di_format = XFS_DINODE_FMT_EXTENTS;
2417 ip->i_d.di_aformat = XFS_DINODE_FMT_EXTENTS;
2418 /*
2419 * Bump the generation count so no one will be confused
2420 * by reincarnations of this inode.
2421 */
2422 VFS_I(ip)->i_generation++;
2423 xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
2424
2425 if (xic.deleted)
2426 error = xfs_ifree_cluster(ip, tp, &xic);
2427
2428 return error;
2429}
2430
2431/*
2432 * This is called to unpin an inode. The caller must have the inode locked
2433 * in at least shared mode so that the buffer cannot be subsequently pinned
2434 * once someone is waiting for it to be unpinned.
2435 */
2436static void
2437xfs_iunpin(
2438 struct xfs_inode *ip)
2439{
2440 ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL|XFS_ILOCK_SHARED));
2441
2442 trace_xfs_inode_unpin_nowait(ip, _RET_IP_);
2443
2444 /* Give the log a push to start the unpinning I/O */
2445 xfs_log_force_lsn(ip->i_mount, ip->i_itemp->ili_last_lsn, 0);
2446
2447}
2448
2449static void
2450__xfs_iunpin_wait(
2451 struct xfs_inode *ip)
2452{
2453 wait_queue_head_t *wq = bit_waitqueue(&ip->i_flags, __XFS_IPINNED_BIT);
2454 DEFINE_WAIT_BIT(wait, &ip->i_flags, __XFS_IPINNED_BIT);
2455
2456 xfs_iunpin(ip);
2457
2458 do {
2459 prepare_to_wait(wq, &wait.wait, TASK_UNINTERRUPTIBLE);
2460 if (xfs_ipincount(ip))
2461 io_schedule();
2462 } while (xfs_ipincount(ip));
2463 finish_wait(wq, &wait.wait);
2464}
2465
2466void
2467xfs_iunpin_wait(
2468 struct xfs_inode *ip)
2469{
2470 if (xfs_ipincount(ip))
2471 __xfs_iunpin_wait(ip);
2472}
2473
2474/*
2475 * Removing an inode from the namespace involves removing the directory entry
2476 * and dropping the link count on the inode. Removing the directory entry can
2477 * result in locking an AGF (directory blocks were freed) and removing a link
2478 * count can result in placing the inode on an unlinked list which results in
2479 * locking an AGI.
2480 *
2481 * The big problem here is that we have an ordering constraint on AGF and AGI
2482 * locking - inode allocation locks the AGI, then can allocate a new extent for
2483 * new inodes, locking the AGF after the AGI. Similarly, freeing the inode
2484 * removes the inode from the unlinked list, requiring that we lock the AGI
2485 * first, and then freeing the inode can result in an inode chunk being freed
2486 * and hence freeing disk space requiring that we lock an AGF.
2487 *
2488 * Hence the ordering that is imposed by other parts of the code is AGI before
2489 * AGF. This means we cannot remove the directory entry before we drop the inode
2490 * reference count and put it on the unlinked list as this results in a lock
2491 * order of AGF then AGI, and this can deadlock against inode allocation and
2492 * freeing. Therefore we must drop the link counts before we remove the
2493 * directory entry.
2494 *
2495 * This is still safe from a transactional point of view - it is not until we
2496 * get to xfs_bmap_finish() that we have the possibility of multiple
2497 * transactions in this operation. Hence as long as we remove the directory
2498 * entry and drop the link count in the first transaction of the remove
2499 * operation, there are no transactional constraints on the ordering here.
2500 */
2501int
2502xfs_remove(
2503 xfs_inode_t *dp,
2504 struct xfs_name *name,
2505 xfs_inode_t *ip)
2506{
2507 xfs_mount_t *mp = dp->i_mount;
2508 xfs_trans_t *tp = NULL;
2509 int is_dir = S_ISDIR(VFS_I(ip)->i_mode);
2510 int error = 0;
2511 xfs_bmap_free_t free_list;
2512 xfs_fsblock_t first_block;
2513 uint resblks;
2514
2515 trace_xfs_remove(dp, name);
2516
2517 if (XFS_FORCED_SHUTDOWN(mp))
2518 return -EIO;
2519
2520 error = xfs_qm_dqattach(dp, 0);
2521 if (error)
2522 goto std_return;
2523
2524 error = xfs_qm_dqattach(ip, 0);
2525 if (error)
2526 goto std_return;
2527
2528 if (is_dir)
2529 tp = xfs_trans_alloc(mp, XFS_TRANS_RMDIR);
2530 else
2531 tp = xfs_trans_alloc(mp, XFS_TRANS_REMOVE);
2532
2533 /*
2534 * We try to get the real space reservation first,
2535 * allowing for directory btree deletion(s) implying
2536 * possible bmap insert(s). If we can't get the space
2537 * reservation then we use 0 instead, and avoid the bmap
2538 * btree insert(s) in the directory code by, if the bmap
2539 * insert tries to happen, instead trimming the LAST
2540 * block from the directory.
2541 */
2542 resblks = XFS_REMOVE_SPACE_RES(mp);
2543 error = xfs_trans_reserve(tp, &M_RES(mp)->tr_remove, resblks, 0);
2544 if (error == -ENOSPC) {
2545 resblks = 0;
2546 error = xfs_trans_reserve(tp, &M_RES(mp)->tr_remove, 0, 0);
2547 }
2548 if (error) {
2549 ASSERT(error != -ENOSPC);
2550 goto out_trans_cancel;
2551 }
2552
2553 xfs_ilock(dp, XFS_IOLOCK_EXCL | XFS_IOLOCK_PARENT);
2554 xfs_lock_two_inodes(dp, ip, XFS_ILOCK_EXCL);
2555
2556 xfs_trans_ijoin(tp, dp, XFS_IOLOCK_EXCL | XFS_ILOCK_EXCL);
2557 xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL);
2558
2559 /*
2560 * If we're removing a directory perform some additional validation.
2561 */
2562 if (is_dir) {
2563 ASSERT(VFS_I(ip)->i_nlink >= 2);
2564 if (VFS_I(ip)->i_nlink != 2) {
2565 error = -ENOTEMPTY;
2566 goto out_trans_cancel;
2567 }
2568 if (!xfs_dir_isempty(ip)) {
2569 error = -ENOTEMPTY;
2570 goto out_trans_cancel;
2571 }
2572
2573 /* Drop the link from ip's "..". */
2574 error = xfs_droplink(tp, dp);
2575 if (error)
2576 goto out_trans_cancel;
2577
2578 /* Drop the "." link from ip to self. */
2579 error = xfs_droplink(tp, ip);
2580 if (error)
2581 goto out_trans_cancel;
2582 } else {
2583 /*
2584 * When removing a non-directory we need to log the parent
2585 * inode here. For a directory this is done implicitly
2586 * by the xfs_droplink call for the ".." entry.
2587 */
2588 xfs_trans_log_inode(tp, dp, XFS_ILOG_CORE);
2589 }
2590 xfs_trans_ichgtime(tp, dp, XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG);
2591
2592 /* Drop the link from dp to ip. */
2593 error = xfs_droplink(tp, ip);
2594 if (error)
2595 goto out_trans_cancel;
2596
2597 xfs_bmap_init(&free_list, &first_block);
2598 error = xfs_dir_removename(tp, dp, name, ip->i_ino,
2599 &first_block, &free_list, resblks);
2600 if (error) {
2601 ASSERT(error != -ENOENT);
2602 goto out_bmap_cancel;
2603 }
2604
2605 /*
2606 * If this is a synchronous mount, make sure that the
2607 * remove transaction goes to disk before returning to
2608 * the user.
2609 */
2610 if (mp->m_flags & (XFS_MOUNT_WSYNC|XFS_MOUNT_DIRSYNC))
2611 xfs_trans_set_sync(tp);
2612
2613 error = xfs_bmap_finish(&tp, &free_list, NULL);
2614 if (error)
2615 goto out_bmap_cancel;
2616
2617 error = xfs_trans_commit(tp);
2618 if (error)
2619 goto std_return;
2620
2621 if (is_dir && xfs_inode_is_filestream(ip))
2622 xfs_filestream_deassociate(ip);
2623
2624 return 0;
2625
2626 out_bmap_cancel:
2627 xfs_bmap_cancel(&free_list);
2628 out_trans_cancel:
2629 xfs_trans_cancel(tp);
2630 std_return:
2631 return error;
2632}
2633
2634/*
2635 * Enter all inodes for a rename transaction into a sorted array.
2636 */
2637#define __XFS_SORT_INODES 5
2638STATIC void
2639xfs_sort_for_rename(
2640 struct xfs_inode *dp1, /* in: old (source) directory inode */
2641 struct xfs_inode *dp2, /* in: new (target) directory inode */
2642 struct xfs_inode *ip1, /* in: inode of old entry */
2643 struct xfs_inode *ip2, /* in: inode of new entry */
2644 struct xfs_inode *wip, /* in: whiteout inode */
2645 struct xfs_inode **i_tab,/* out: sorted array of inodes */
2646 int *num_inodes) /* in/out: inodes in array */
2647{
2648 int i, j;
2649
2650 ASSERT(*num_inodes == __XFS_SORT_INODES);
2651 memset(i_tab, 0, *num_inodes * sizeof(struct xfs_inode *));
2652
2653 /*
2654 * i_tab contains a list of pointers to inodes. We initialize
2655 * the table here & we'll sort it. We will then use it to
2656 * order the acquisition of the inode locks.
2657 *
2658 * Note that the table may contain duplicates. e.g., dp1 == dp2.
2659 */
2660 i = 0;
2661 i_tab[i++] = dp1;
2662 i_tab[i++] = dp2;
2663 i_tab[i++] = ip1;
2664 if (ip2)
2665 i_tab[i++] = ip2;
2666 if (wip)
2667 i_tab[i++] = wip;
2668 *num_inodes = i;
2669
2670 /*
2671 * Sort the elements via bubble sort. (Remember, there are at
2672 * most 5 elements to sort, so this is adequate.)
2673 */
2674 for (i = 0; i < *num_inodes; i++) {
2675 for (j = 1; j < *num_inodes; j++) {
2676 if (i_tab[j]->i_ino < i_tab[j-1]->i_ino) {
2677 struct xfs_inode *temp = i_tab[j];
2678 i_tab[j] = i_tab[j-1];
2679 i_tab[j-1] = temp;
2680 }
2681 }
2682 }
2683}
2684
2685static int
2686xfs_finish_rename(
2687 struct xfs_trans *tp,
2688 struct xfs_bmap_free *free_list)
2689{
2690 int error;
2691
2692 /*
2693 * If this is a synchronous mount, make sure that the rename transaction
2694 * goes to disk before returning to the user.
2695 */
2696 if (tp->t_mountp->m_flags & (XFS_MOUNT_WSYNC|XFS_MOUNT_DIRSYNC))
2697 xfs_trans_set_sync(tp);
2698
2699 error = xfs_bmap_finish(&tp, free_list, NULL);
2700 if (error) {
2701 xfs_bmap_cancel(free_list);
2702 xfs_trans_cancel(tp);
2703 return error;
2704 }
2705
2706 return xfs_trans_commit(tp);
2707}
2708
2709/*
2710 * xfs_cross_rename()
2711 *
2712 * responsible for handling RENAME_EXCHANGE flag in renameat2() sytemcall
2713 */
2714STATIC int
2715xfs_cross_rename(
2716 struct xfs_trans *tp,
2717 struct xfs_inode *dp1,
2718 struct xfs_name *name1,
2719 struct xfs_inode *ip1,
2720 struct xfs_inode *dp2,
2721 struct xfs_name *name2,
2722 struct xfs_inode *ip2,
2723 struct xfs_bmap_free *free_list,
2724 xfs_fsblock_t *first_block,
2725 int spaceres)
2726{
2727 int error = 0;
2728 int ip1_flags = 0;
2729 int ip2_flags = 0;
2730 int dp2_flags = 0;
2731
2732 /* Swap inode number for dirent in first parent */
2733 error = xfs_dir_replace(tp, dp1, name1,
2734 ip2->i_ino,
2735 first_block, free_list, spaceres);
2736 if (error)
2737 goto out_trans_abort;
2738
2739 /* Swap inode number for dirent in second parent */
2740 error = xfs_dir_replace(tp, dp2, name2,
2741 ip1->i_ino,
2742 first_block, free_list, spaceres);
2743 if (error)
2744 goto out_trans_abort;
2745
2746 /*
2747 * If we're renaming one or more directories across different parents,
2748 * update the respective ".." entries (and link counts) to match the new
2749 * parents.
2750 */
2751 if (dp1 != dp2) {
2752 dp2_flags = XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG;
2753
2754 if (S_ISDIR(VFS_I(ip2)->i_mode)) {
2755 error = xfs_dir_replace(tp, ip2, &xfs_name_dotdot,
2756 dp1->i_ino, first_block,
2757 free_list, spaceres);
2758 if (error)
2759 goto out_trans_abort;
2760
2761 /* transfer ip2 ".." reference to dp1 */
2762 if (!S_ISDIR(VFS_I(ip1)->i_mode)) {
2763 error = xfs_droplink(tp, dp2);
2764 if (error)
2765 goto out_trans_abort;
2766 error = xfs_bumplink(tp, dp1);
2767 if (error)
2768 goto out_trans_abort;
2769 }
2770
2771 /*
2772 * Although ip1 isn't changed here, userspace needs
2773 * to be warned about the change, so that applications
2774 * relying on it (like backup ones), will properly
2775 * notify the change
2776 */
2777 ip1_flags |= XFS_ICHGTIME_CHG;
2778 ip2_flags |= XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG;
2779 }
2780
2781 if (S_ISDIR(VFS_I(ip1)->i_mode)) {
2782 error = xfs_dir_replace(tp, ip1, &xfs_name_dotdot,
2783 dp2->i_ino, first_block,
2784 free_list, spaceres);
2785 if (error)
2786 goto out_trans_abort;
2787
2788 /* transfer ip1 ".." reference to dp2 */
2789 if (!S_ISDIR(VFS_I(ip2)->i_mode)) {
2790 error = xfs_droplink(tp, dp1);
2791 if (error)
2792 goto out_trans_abort;
2793 error = xfs_bumplink(tp, dp2);
2794 if (error)
2795 goto out_trans_abort;
2796 }
2797
2798 /*
2799 * Although ip2 isn't changed here, userspace needs
2800 * to be warned about the change, so that applications
2801 * relying on it (like backup ones), will properly
2802 * notify the change
2803 */
2804 ip1_flags |= XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG;
2805 ip2_flags |= XFS_ICHGTIME_CHG;
2806 }
2807 }
2808
2809 if (ip1_flags) {
2810 xfs_trans_ichgtime(tp, ip1, ip1_flags);
2811 xfs_trans_log_inode(tp, ip1, XFS_ILOG_CORE);
2812 }
2813 if (ip2_flags) {
2814 xfs_trans_ichgtime(tp, ip2, ip2_flags);
2815 xfs_trans_log_inode(tp, ip2, XFS_ILOG_CORE);
2816 }
2817 if (dp2_flags) {
2818 xfs_trans_ichgtime(tp, dp2, dp2_flags);
2819 xfs_trans_log_inode(tp, dp2, XFS_ILOG_CORE);
2820 }
2821 xfs_trans_ichgtime(tp, dp1, XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG);
2822 xfs_trans_log_inode(tp, dp1, XFS_ILOG_CORE);
2823 return xfs_finish_rename(tp, free_list);
2824
2825out_trans_abort:
2826 xfs_bmap_cancel(free_list);
2827 xfs_trans_cancel(tp);
2828 return error;
2829}
2830
2831/*
2832 * xfs_rename_alloc_whiteout()
2833 *
2834 * Return a referenced, unlinked, unlocked inode that that can be used as a
2835 * whiteout in a rename transaction. We use a tmpfile inode here so that if we
2836 * crash between allocating the inode and linking it into the rename transaction
2837 * recovery will free the inode and we won't leak it.
2838 */
2839static int
2840xfs_rename_alloc_whiteout(
2841 struct xfs_inode *dp,
2842 struct xfs_inode **wip)
2843{
2844 struct xfs_inode *tmpfile;
2845 int error;
2846
2847 error = xfs_create_tmpfile(dp, NULL, S_IFCHR | WHITEOUT_MODE, &tmpfile);
2848 if (error)
2849 return error;
2850
2851 /*
2852 * Prepare the tmpfile inode as if it were created through the VFS.
2853 * Otherwise, the link increment paths will complain about nlink 0->1.
2854 * Drop the link count as done by d_tmpfile(), complete the inode setup
2855 * and flag it as linkable.
2856 */
2857 drop_nlink(VFS_I(tmpfile));
2858 xfs_finish_inode_setup(tmpfile);
2859 VFS_I(tmpfile)->i_state |= I_LINKABLE;
2860
2861 *wip = tmpfile;
2862 return 0;
2863}
2864
2865/*
2866 * xfs_rename
2867 */
2868int
2869xfs_rename(
2870 struct xfs_inode *src_dp,
2871 struct xfs_name *src_name,
2872 struct xfs_inode *src_ip,
2873 struct xfs_inode *target_dp,
2874 struct xfs_name *target_name,
2875 struct xfs_inode *target_ip,
2876 unsigned int flags)
2877{
2878 struct xfs_mount *mp = src_dp->i_mount;
2879 struct xfs_trans *tp;
2880 struct xfs_bmap_free free_list;
2881 xfs_fsblock_t first_block;
2882 struct xfs_inode *wip = NULL; /* whiteout inode */
2883 struct xfs_inode *inodes[__XFS_SORT_INODES];
2884 int num_inodes = __XFS_SORT_INODES;
2885 bool new_parent = (src_dp != target_dp);
2886 bool src_is_directory = S_ISDIR(VFS_I(src_ip)->i_mode);
2887 int spaceres;
2888 int error;
2889
2890 trace_xfs_rename(src_dp, target_dp, src_name, target_name);
2891
2892 if ((flags & RENAME_EXCHANGE) && !target_ip)
2893 return -EINVAL;
2894
2895 /*
2896 * If we are doing a whiteout operation, allocate the whiteout inode
2897 * we will be placing at the target and ensure the type is set
2898 * appropriately.
2899 */
2900 if (flags & RENAME_WHITEOUT) {
2901 ASSERT(!(flags & (RENAME_NOREPLACE | RENAME_EXCHANGE)));
2902 error = xfs_rename_alloc_whiteout(target_dp, &wip);
2903 if (error)
2904 return error;
2905
2906 /* setup target dirent info as whiteout */
2907 src_name->type = XFS_DIR3_FT_CHRDEV;
2908 }
2909
2910 xfs_sort_for_rename(src_dp, target_dp, src_ip, target_ip, wip,
2911 inodes, &num_inodes);
2912
2913 tp = xfs_trans_alloc(mp, XFS_TRANS_RENAME);
2914 spaceres = XFS_RENAME_SPACE_RES(mp, target_name->len);
2915 error = xfs_trans_reserve(tp, &M_RES(mp)->tr_rename, spaceres, 0);
2916 if (error == -ENOSPC) {
2917 spaceres = 0;
2918 error = xfs_trans_reserve(tp, &M_RES(mp)->tr_rename, 0, 0);
2919 }
2920 if (error)
2921 goto out_trans_cancel;
2922
2923 /*
2924 * Attach the dquots to the inodes
2925 */
2926 error = xfs_qm_vop_rename_dqattach(inodes);
2927 if (error)
2928 goto out_trans_cancel;
2929
2930 /*
2931 * Lock all the participating inodes. Depending upon whether
2932 * the target_name exists in the target directory, and
2933 * whether the target directory is the same as the source
2934 * directory, we can lock from 2 to 4 inodes.
2935 */
2936 if (!new_parent)
2937 xfs_ilock(src_dp, XFS_IOLOCK_EXCL | XFS_IOLOCK_PARENT);
2938 else
2939 xfs_lock_two_inodes(src_dp, target_dp,
2940 XFS_IOLOCK_EXCL | XFS_IOLOCK_PARENT);
2941
2942 xfs_lock_inodes(inodes, num_inodes, XFS_ILOCK_EXCL);
2943
2944 /*
2945 * Join all the inodes to the transaction. From this point on,
2946 * we can rely on either trans_commit or trans_cancel to unlock
2947 * them.
2948 */
2949 xfs_trans_ijoin(tp, src_dp, XFS_IOLOCK_EXCL | XFS_ILOCK_EXCL);
2950 if (new_parent)
2951 xfs_trans_ijoin(tp, target_dp, XFS_IOLOCK_EXCL | XFS_ILOCK_EXCL);
2952 xfs_trans_ijoin(tp, src_ip, XFS_ILOCK_EXCL);
2953 if (target_ip)
2954 xfs_trans_ijoin(tp, target_ip, XFS_ILOCK_EXCL);
2955 if (wip)
2956 xfs_trans_ijoin(tp, wip, XFS_ILOCK_EXCL);
2957
2958 /*
2959 * If we are using project inheritance, we only allow renames
2960 * into our tree when the project IDs are the same; else the
2961 * tree quota mechanism would be circumvented.
2962 */
2963 if (unlikely((target_dp->i_d.di_flags & XFS_DIFLAG_PROJINHERIT) &&
2964 (xfs_get_projid(target_dp) != xfs_get_projid(src_ip)))) {
2965 error = -EXDEV;
2966 goto out_trans_cancel;
2967 }
2968
2969 xfs_bmap_init(&free_list, &first_block);
2970
2971 /* RENAME_EXCHANGE is unique from here on. */
2972 if (flags & RENAME_EXCHANGE)
2973 return xfs_cross_rename(tp, src_dp, src_name, src_ip,
2974 target_dp, target_name, target_ip,
2975 &free_list, &first_block, spaceres);
2976
2977 /*
2978 * Set up the target.
2979 */
2980 if (target_ip == NULL) {
2981 /*
2982 * If there's no space reservation, check the entry will
2983 * fit before actually inserting it.
2984 */
2985 if (!spaceres) {
2986 error = xfs_dir_canenter(tp, target_dp, target_name);
2987 if (error)
2988 goto out_trans_cancel;
2989 }
2990 /*
2991 * If target does not exist and the rename crosses
2992 * directories, adjust the target directory link count
2993 * to account for the ".." reference from the new entry.
2994 */
2995 error = xfs_dir_createname(tp, target_dp, target_name,
2996 src_ip->i_ino, &first_block,
2997 &free_list, spaceres);
2998 if (error)
2999 goto out_bmap_cancel;
3000
3001 xfs_trans_ichgtime(tp, target_dp,
3002 XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG);
3003
3004 if (new_parent && src_is_directory) {
3005 error = xfs_bumplink(tp, target_dp);
3006 if (error)
3007 goto out_bmap_cancel;
3008 }
3009 } else { /* target_ip != NULL */
3010 /*
3011 * If target exists and it's a directory, check that both
3012 * target and source are directories and that target can be
3013 * destroyed, or that neither is a directory.
3014 */
3015 if (S_ISDIR(VFS_I(target_ip)->i_mode)) {
3016 /*
3017 * Make sure target dir is empty.
3018 */
3019 if (!(xfs_dir_isempty(target_ip)) ||
3020 (VFS_I(target_ip)->i_nlink > 2)) {
3021 error = -EEXIST;
3022 goto out_trans_cancel;
3023 }
3024 }
3025
3026 /*
3027 * Link the source inode under the target name.
3028 * If the source inode is a directory and we are moving
3029 * it across directories, its ".." entry will be
3030 * inconsistent until we replace that down below.
3031 *
3032 * In case there is already an entry with the same
3033 * name at the destination directory, remove it first.
3034 */
3035 error = xfs_dir_replace(tp, target_dp, target_name,
3036 src_ip->i_ino,
3037 &first_block, &free_list, spaceres);
3038 if (error)
3039 goto out_bmap_cancel;
3040
3041 xfs_trans_ichgtime(tp, target_dp,
3042 XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG);
3043
3044 /*
3045 * Decrement the link count on the target since the target
3046 * dir no longer points to it.
3047 */
3048 error = xfs_droplink(tp, target_ip);
3049 if (error)
3050 goto out_bmap_cancel;
3051
3052 if (src_is_directory) {
3053 /*
3054 * Drop the link from the old "." entry.
3055 */
3056 error = xfs_droplink(tp, target_ip);
3057 if (error)
3058 goto out_bmap_cancel;
3059 }
3060 } /* target_ip != NULL */
3061
3062 /*
3063 * Remove the source.
3064 */
3065 if (new_parent && src_is_directory) {
3066 /*
3067 * Rewrite the ".." entry to point to the new
3068 * directory.
3069 */
3070 error = xfs_dir_replace(tp, src_ip, &xfs_name_dotdot,
3071 target_dp->i_ino,
3072 &first_block, &free_list, spaceres);
3073 ASSERT(error != -EEXIST);
3074 if (error)
3075 goto out_bmap_cancel;
3076 }
3077
3078 /*
3079 * We always want to hit the ctime on the source inode.
3080 *
3081 * This isn't strictly required by the standards since the source
3082 * inode isn't really being changed, but old unix file systems did
3083 * it and some incremental backup programs won't work without it.
3084 */
3085 xfs_trans_ichgtime(tp, src_ip, XFS_ICHGTIME_CHG);
3086 xfs_trans_log_inode(tp, src_ip, XFS_ILOG_CORE);
3087
3088 /*
3089 * Adjust the link count on src_dp. This is necessary when
3090 * renaming a directory, either within one parent when
3091 * the target existed, or across two parent directories.
3092 */
3093 if (src_is_directory && (new_parent || target_ip != NULL)) {
3094
3095 /*
3096 * Decrement link count on src_directory since the
3097 * entry that's moved no longer points to it.
3098 */
3099 error = xfs_droplink(tp, src_dp);
3100 if (error)
3101 goto out_bmap_cancel;
3102 }
3103
3104 /*
3105 * For whiteouts, we only need to update the source dirent with the
3106 * inode number of the whiteout inode rather than removing it
3107 * altogether.
3108 */
3109 if (wip) {
3110 error = xfs_dir_replace(tp, src_dp, src_name, wip->i_ino,
3111 &first_block, &free_list, spaceres);
3112 } else
3113 error = xfs_dir_removename(tp, src_dp, src_name, src_ip->i_ino,
3114 &first_block, &free_list, spaceres);
3115 if (error)
3116 goto out_bmap_cancel;
3117
3118 /*
3119 * For whiteouts, we need to bump the link count on the whiteout inode.
3120 * This means that failures all the way up to this point leave the inode
3121 * on the unlinked list and so cleanup is a simple matter of dropping
3122 * the remaining reference to it. If we fail here after bumping the link
3123 * count, we're shutting down the filesystem so we'll never see the
3124 * intermediate state on disk.
3125 */
3126 if (wip) {
3127 ASSERT(VFS_I(wip)->i_nlink == 0);
3128 error = xfs_bumplink(tp, wip);
3129 if (error)
3130 goto out_bmap_cancel;
3131 error = xfs_iunlink_remove(tp, wip);
3132 if (error)
3133 goto out_bmap_cancel;
3134 xfs_trans_log_inode(tp, wip, XFS_ILOG_CORE);
3135
3136 /*
3137 * Now we have a real link, clear the "I'm a tmpfile" state
3138 * flag from the inode so it doesn't accidentally get misused in
3139 * future.
3140 */
3141 VFS_I(wip)->i_state &= ~I_LINKABLE;
3142 }
3143
3144 xfs_trans_ichgtime(tp, src_dp, XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG);
3145 xfs_trans_log_inode(tp, src_dp, XFS_ILOG_CORE);
3146 if (new_parent)
3147 xfs_trans_log_inode(tp, target_dp, XFS_ILOG_CORE);
3148
3149 error = xfs_finish_rename(tp, &free_list);
3150 if (wip)
3151 IRELE(wip);
3152 return error;
3153
3154out_bmap_cancel:
3155 xfs_bmap_cancel(&free_list);
3156out_trans_cancel:
3157 xfs_trans_cancel(tp);
3158 if (wip)
3159 IRELE(wip);
3160 return error;
3161}
3162
3163STATIC int
3164xfs_iflush_cluster(
3165 xfs_inode_t *ip,
3166 xfs_buf_t *bp)
3167{
3168 xfs_mount_t *mp = ip->i_mount;
3169 struct xfs_perag *pag;
3170 unsigned long first_index, mask;
3171 unsigned long inodes_per_cluster;
3172 int ilist_size;
3173 xfs_inode_t **ilist;
3174 xfs_inode_t *iq;
3175 int nr_found;
3176 int clcount = 0;
3177 int bufwasdelwri;
3178 int i;
3179
3180 pag = xfs_perag_get(mp, XFS_INO_TO_AGNO(mp, ip->i_ino));
3181
3182 inodes_per_cluster = mp->m_inode_cluster_size >> mp->m_sb.sb_inodelog;
3183 ilist_size = inodes_per_cluster * sizeof(xfs_inode_t *);
3184 ilist = kmem_alloc(ilist_size, KM_MAYFAIL|KM_NOFS);
3185 if (!ilist)
3186 goto out_put;
3187
3188 mask = ~(((mp->m_inode_cluster_size >> mp->m_sb.sb_inodelog)) - 1);
3189 first_index = XFS_INO_TO_AGINO(mp, ip->i_ino) & mask;
3190 rcu_read_lock();
3191 /* really need a gang lookup range call here */
3192 nr_found = radix_tree_gang_lookup(&pag->pag_ici_root, (void**)ilist,
3193 first_index, inodes_per_cluster);
3194 if (nr_found == 0)
3195 goto out_free;
3196
3197 for (i = 0; i < nr_found; i++) {
3198 iq = ilist[i];
3199 if (iq == ip)
3200 continue;
3201
3202 /*
3203 * because this is an RCU protected lookup, we could find a
3204 * recently freed or even reallocated inode during the lookup.
3205 * We need to check under the i_flags_lock for a valid inode
3206 * here. Skip it if it is not valid or the wrong inode.
3207 */
3208 spin_lock(&ip->i_flags_lock);
3209 if (!ip->i_ino ||
3210 (XFS_INO_TO_AGINO(mp, iq->i_ino) & mask) != first_index) {
3211 spin_unlock(&ip->i_flags_lock);
3212 continue;
3213 }
3214 spin_unlock(&ip->i_flags_lock);
3215
3216 /*
3217 * Do an un-protected check to see if the inode is dirty and
3218 * is a candidate for flushing. These checks will be repeated
3219 * later after the appropriate locks are acquired.
3220 */
3221 if (xfs_inode_clean(iq) && xfs_ipincount(iq) == 0)
3222 continue;
3223
3224 /*
3225 * Try to get locks. If any are unavailable or it is pinned,
3226 * then this inode cannot be flushed and is skipped.
3227 */
3228
3229 if (!xfs_ilock_nowait(iq, XFS_ILOCK_SHARED))
3230 continue;
3231 if (!xfs_iflock_nowait(iq)) {
3232 xfs_iunlock(iq, XFS_ILOCK_SHARED);
3233 continue;
3234 }
3235 if (xfs_ipincount(iq)) {
3236 xfs_ifunlock(iq);
3237 xfs_iunlock(iq, XFS_ILOCK_SHARED);
3238 continue;
3239 }
3240
3241 /*
3242 * arriving here means that this inode can be flushed. First
3243 * re-check that it's dirty before flushing.
3244 */
3245 if (!xfs_inode_clean(iq)) {
3246 int error;
3247 error = xfs_iflush_int(iq, bp);
3248 if (error) {
3249 xfs_iunlock(iq, XFS_ILOCK_SHARED);
3250 goto cluster_corrupt_out;
3251 }
3252 clcount++;
3253 } else {
3254 xfs_ifunlock(iq);
3255 }
3256 xfs_iunlock(iq, XFS_ILOCK_SHARED);
3257 }
3258
3259 if (clcount) {
3260 XFS_STATS_INC(mp, xs_icluster_flushcnt);
3261 XFS_STATS_ADD(mp, xs_icluster_flushinode, clcount);
3262 }
3263
3264out_free:
3265 rcu_read_unlock();
3266 kmem_free(ilist);
3267out_put:
3268 xfs_perag_put(pag);
3269 return 0;
3270
3271
3272cluster_corrupt_out:
3273 /*
3274 * Corruption detected in the clustering loop. Invalidate the
3275 * inode buffer and shut down the filesystem.
3276 */
3277 rcu_read_unlock();
3278 /*
3279 * Clean up the buffer. If it was delwri, just release it --
3280 * brelse can handle it with no problems. If not, shut down the
3281 * filesystem before releasing the buffer.
3282 */
3283 bufwasdelwri = (bp->b_flags & _XBF_DELWRI_Q);
3284 if (bufwasdelwri)
3285 xfs_buf_relse(bp);
3286
3287 xfs_force_shutdown(mp, SHUTDOWN_CORRUPT_INCORE);
3288
3289 if (!bufwasdelwri) {
3290 /*
3291 * Just like incore_relse: if we have b_iodone functions,
3292 * mark the buffer as an error and call them. Otherwise
3293 * mark it as stale and brelse.
3294 */
3295 if (bp->b_iodone) {
3296 bp->b_flags &= ~XBF_DONE;
3297 xfs_buf_stale(bp);
3298 xfs_buf_ioerror(bp, -EIO);
3299 xfs_buf_ioend(bp);
3300 } else {
3301 xfs_buf_stale(bp);
3302 xfs_buf_relse(bp);
3303 }
3304 }
3305
3306 /*
3307 * Unlocks the flush lock
3308 */
3309 xfs_iflush_abort(iq, false);
3310 kmem_free(ilist);
3311 xfs_perag_put(pag);
3312 return -EFSCORRUPTED;
3313}
3314
3315/*
3316 * Flush dirty inode metadata into the backing buffer.
3317 *
3318 * The caller must have the inode lock and the inode flush lock held. The
3319 * inode lock will still be held upon return to the caller, and the inode
3320 * flush lock will be released after the inode has reached the disk.
3321 *
3322 * The caller must write out the buffer returned in *bpp and release it.
3323 */
3324int
3325xfs_iflush(
3326 struct xfs_inode *ip,
3327 struct xfs_buf **bpp)
3328{
3329 struct xfs_mount *mp = ip->i_mount;
3330 struct xfs_buf *bp;
3331 struct xfs_dinode *dip;
3332 int error;
3333
3334 XFS_STATS_INC(mp, xs_iflush_count);
3335
3336 ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL|XFS_ILOCK_SHARED));
3337 ASSERT(xfs_isiflocked(ip));
3338 ASSERT(ip->i_d.di_format != XFS_DINODE_FMT_BTREE ||
3339 ip->i_d.di_nextents > XFS_IFORK_MAXEXT(ip, XFS_DATA_FORK));
3340
3341 *bpp = NULL;
3342
3343 xfs_iunpin_wait(ip);
3344
3345 /*
3346 * For stale inodes we cannot rely on the backing buffer remaining
3347 * stale in cache for the remaining life of the stale inode and so
3348 * xfs_imap_to_bp() below may give us a buffer that no longer contains
3349 * inodes below. We have to check this after ensuring the inode is
3350 * unpinned so that it is safe to reclaim the stale inode after the
3351 * flush call.
3352 */
3353 if (xfs_iflags_test(ip, XFS_ISTALE)) {
3354 xfs_ifunlock(ip);
3355 return 0;
3356 }
3357
3358 /*
3359 * This may have been unpinned because the filesystem is shutting
3360 * down forcibly. If that's the case we must not write this inode
3361 * to disk, because the log record didn't make it to disk.
3362 *
3363 * We also have to remove the log item from the AIL in this case,
3364 * as we wait for an empty AIL as part of the unmount process.
3365 */
3366 if (XFS_FORCED_SHUTDOWN(mp)) {
3367 error = -EIO;
3368 goto abort_out;
3369 }
3370
3371 /*
3372 * Get the buffer containing the on-disk inode.
3373 */
3374 error = xfs_imap_to_bp(mp, NULL, &ip->i_imap, &dip, &bp, XBF_TRYLOCK,
3375 0);
3376 if (error || !bp) {
3377 xfs_ifunlock(ip);
3378 return error;
3379 }
3380
3381 /*
3382 * First flush out the inode that xfs_iflush was called with.
3383 */
3384 error = xfs_iflush_int(ip, bp);
3385 if (error)
3386 goto corrupt_out;
3387
3388 /*
3389 * If the buffer is pinned then push on the log now so we won't
3390 * get stuck waiting in the write for too long.
3391 */
3392 if (xfs_buf_ispinned(bp))
3393 xfs_log_force(mp, 0);
3394
3395 /*
3396 * inode clustering:
3397 * see if other inodes can be gathered into this write
3398 */
3399 error = xfs_iflush_cluster(ip, bp);
3400 if (error)
3401 goto cluster_corrupt_out;
3402
3403 *bpp = bp;
3404 return 0;
3405
3406corrupt_out:
3407 xfs_buf_relse(bp);
3408 xfs_force_shutdown(mp, SHUTDOWN_CORRUPT_INCORE);
3409cluster_corrupt_out:
3410 error = -EFSCORRUPTED;
3411abort_out:
3412 /*
3413 * Unlocks the flush lock
3414 */
3415 xfs_iflush_abort(ip, false);
3416 return error;
3417}
3418
3419STATIC int
3420xfs_iflush_int(
3421 struct xfs_inode *ip,
3422 struct xfs_buf *bp)
3423{
3424 struct xfs_inode_log_item *iip = ip->i_itemp;
3425 struct xfs_dinode *dip;
3426 struct xfs_mount *mp = ip->i_mount;
3427
3428 ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL|XFS_ILOCK_SHARED));
3429 ASSERT(xfs_isiflocked(ip));
3430 ASSERT(ip->i_d.di_format != XFS_DINODE_FMT_BTREE ||
3431 ip->i_d.di_nextents > XFS_IFORK_MAXEXT(ip, XFS_DATA_FORK));
3432 ASSERT(iip != NULL && iip->ili_fields != 0);
3433 ASSERT(ip->i_d.di_version > 1);
3434
3435 /* set *dip = inode's place in the buffer */
3436 dip = xfs_buf_offset(bp, ip->i_imap.im_boffset);
3437
3438 if (XFS_TEST_ERROR(dip->di_magic != cpu_to_be16(XFS_DINODE_MAGIC),
3439 mp, XFS_ERRTAG_IFLUSH_1, XFS_RANDOM_IFLUSH_1)) {
3440 xfs_alert_tag(mp, XFS_PTAG_IFLUSH,
3441 "%s: Bad inode %Lu magic number 0x%x, ptr 0x%p",
3442 __func__, ip->i_ino, be16_to_cpu(dip->di_magic), dip);
3443 goto corrupt_out;
3444 }
3445 if (S_ISREG(VFS_I(ip)->i_mode)) {
3446 if (XFS_TEST_ERROR(
3447 (ip->i_d.di_format != XFS_DINODE_FMT_EXTENTS) &&
3448 (ip->i_d.di_format != XFS_DINODE_FMT_BTREE),
3449 mp, XFS_ERRTAG_IFLUSH_3, XFS_RANDOM_IFLUSH_3)) {
3450 xfs_alert_tag(mp, XFS_PTAG_IFLUSH,
3451 "%s: Bad regular inode %Lu, ptr 0x%p",
3452 __func__, ip->i_ino, ip);
3453 goto corrupt_out;
3454 }
3455 } else if (S_ISDIR(VFS_I(ip)->i_mode)) {
3456 if (XFS_TEST_ERROR(
3457 (ip->i_d.di_format != XFS_DINODE_FMT_EXTENTS) &&
3458 (ip->i_d.di_format != XFS_DINODE_FMT_BTREE) &&
3459 (ip->i_d.di_format != XFS_DINODE_FMT_LOCAL),
3460 mp, XFS_ERRTAG_IFLUSH_4, XFS_RANDOM_IFLUSH_4)) {
3461 xfs_alert_tag(mp, XFS_PTAG_IFLUSH,
3462 "%s: Bad directory inode %Lu, ptr 0x%p",
3463 __func__, ip->i_ino, ip);
3464 goto corrupt_out;
3465 }
3466 }
3467 if (XFS_TEST_ERROR(ip->i_d.di_nextents + ip->i_d.di_anextents >
3468 ip->i_d.di_nblocks, mp, XFS_ERRTAG_IFLUSH_5,
3469 XFS_RANDOM_IFLUSH_5)) {
3470 xfs_alert_tag(mp, XFS_PTAG_IFLUSH,
3471 "%s: detected corrupt incore inode %Lu, "
3472 "total extents = %d, nblocks = %Ld, ptr 0x%p",
3473 __func__, ip->i_ino,
3474 ip->i_d.di_nextents + ip->i_d.di_anextents,
3475 ip->i_d.di_nblocks, ip);
3476 goto corrupt_out;
3477 }
3478 if (XFS_TEST_ERROR(ip->i_d.di_forkoff > mp->m_sb.sb_inodesize,
3479 mp, XFS_ERRTAG_IFLUSH_6, XFS_RANDOM_IFLUSH_6)) {
3480 xfs_alert_tag(mp, XFS_PTAG_IFLUSH,
3481 "%s: bad inode %Lu, forkoff 0x%x, ptr 0x%p",
3482 __func__, ip->i_ino, ip->i_d.di_forkoff, ip);
3483 goto corrupt_out;
3484 }
3485
3486 /*
3487 * Inode item log recovery for v2 inodes are dependent on the
3488 * di_flushiter count for correct sequencing. We bump the flush
3489 * iteration count so we can detect flushes which postdate a log record
3490 * during recovery. This is redundant as we now log every change and
3491 * hence this can't happen but we need to still do it to ensure
3492 * backwards compatibility with old kernels that predate logging all
3493 * inode changes.
3494 */
3495 if (ip->i_d.di_version < 3)
3496 ip->i_d.di_flushiter++;
3497
3498 /*
3499 * Copy the dirty parts of the inode into the on-disk inode. We always
3500 * copy out the core of the inode, because if the inode is dirty at all
3501 * the core must be.
3502 */
3503 xfs_inode_to_disk(ip, dip, iip->ili_item.li_lsn);
3504
3505 /* Wrap, we never let the log put out DI_MAX_FLUSH */
3506 if (ip->i_d.di_flushiter == DI_MAX_FLUSH)
3507 ip->i_d.di_flushiter = 0;
3508
3509 xfs_iflush_fork(ip, dip, iip, XFS_DATA_FORK);
3510 if (XFS_IFORK_Q(ip))
3511 xfs_iflush_fork(ip, dip, iip, XFS_ATTR_FORK);
3512 xfs_inobp_check(mp, bp);
3513
3514 /*
3515 * We've recorded everything logged in the inode, so we'd like to clear
3516 * the ili_fields bits so we don't log and flush things unnecessarily.
3517 * However, we can't stop logging all this information until the data
3518 * we've copied into the disk buffer is written to disk. If we did we
3519 * might overwrite the copy of the inode in the log with all the data
3520 * after re-logging only part of it, and in the face of a crash we
3521 * wouldn't have all the data we need to recover.
3522 *
3523 * What we do is move the bits to the ili_last_fields field. When
3524 * logging the inode, these bits are moved back to the ili_fields field.
3525 * In the xfs_iflush_done() routine we clear ili_last_fields, since we
3526 * know that the information those bits represent is permanently on
3527 * disk. As long as the flush completes before the inode is logged
3528 * again, then both ili_fields and ili_last_fields will be cleared.
3529 *
3530 * We can play with the ili_fields bits here, because the inode lock
3531 * must be held exclusively in order to set bits there and the flush
3532 * lock protects the ili_last_fields bits. Set ili_logged so the flush
3533 * done routine can tell whether or not to look in the AIL. Also, store
3534 * the current LSN of the inode so that we can tell whether the item has
3535 * moved in the AIL from xfs_iflush_done(). In order to read the lsn we
3536 * need the AIL lock, because it is a 64 bit value that cannot be read
3537 * atomically.
3538 */
3539 iip->ili_last_fields = iip->ili_fields;
3540 iip->ili_fields = 0;
3541 iip->ili_fsync_fields = 0;
3542 iip->ili_logged = 1;
3543
3544 xfs_trans_ail_copy_lsn(mp->m_ail, &iip->ili_flush_lsn,
3545 &iip->ili_item.li_lsn);
3546
3547 /*
3548 * Attach the function xfs_iflush_done to the inode's
3549 * buffer. This will remove the inode from the AIL
3550 * and unlock the inode's flush lock when the inode is
3551 * completely written to disk.
3552 */
3553 xfs_buf_attach_iodone(bp, xfs_iflush_done, &iip->ili_item);
3554
3555 /* generate the checksum. */
3556 xfs_dinode_calc_crc(mp, dip);
3557
3558 ASSERT(bp->b_fspriv != NULL);
3559 ASSERT(bp->b_iodone != NULL);
3560 return 0;
3561
3562corrupt_out:
3563 return -EFSCORRUPTED;
3564}
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Copyright (c) 2000-2006 Silicon Graphics, Inc.
4 * All Rights Reserved.
5 */
6#include <linux/iversion.h>
7
8#include "xfs.h"
9#include "xfs_fs.h"
10#include "xfs_shared.h"
11#include "xfs_format.h"
12#include "xfs_log_format.h"
13#include "xfs_trans_resv.h"
14#include "xfs_mount.h"
15#include "xfs_defer.h"
16#include "xfs_inode.h"
17#include "xfs_dir2.h"
18#include "xfs_attr.h"
19#include "xfs_trans_space.h"
20#include "xfs_trans.h"
21#include "xfs_buf_item.h"
22#include "xfs_inode_item.h"
23#include "xfs_iunlink_item.h"
24#include "xfs_ialloc.h"
25#include "xfs_bmap.h"
26#include "xfs_bmap_util.h"
27#include "xfs_errortag.h"
28#include "xfs_error.h"
29#include "xfs_quota.h"
30#include "xfs_filestream.h"
31#include "xfs_trace.h"
32#include "xfs_icache.h"
33#include "xfs_symlink.h"
34#include "xfs_trans_priv.h"
35#include "xfs_log.h"
36#include "xfs_bmap_btree.h"
37#include "xfs_reflink.h"
38#include "xfs_ag.h"
39#include "xfs_log_priv.h"
40#include "xfs_health.h"
41
42struct kmem_cache *xfs_inode_cache;
43
44STATIC int xfs_iunlink(struct xfs_trans *, struct xfs_inode *);
45STATIC int xfs_iunlink_remove(struct xfs_trans *tp, struct xfs_perag *pag,
46 struct xfs_inode *);
47
48/*
49 * helper function to extract extent size hint from inode
50 */
51xfs_extlen_t
52xfs_get_extsz_hint(
53 struct xfs_inode *ip)
54{
55 /*
56 * No point in aligning allocations if we need to COW to actually
57 * write to them.
58 */
59 if (xfs_is_always_cow_inode(ip))
60 return 0;
61 if ((ip->i_diflags & XFS_DIFLAG_EXTSIZE) && ip->i_extsize)
62 return ip->i_extsize;
63 if (XFS_IS_REALTIME_INODE(ip))
64 return ip->i_mount->m_sb.sb_rextsize;
65 return 0;
66}
67
68/*
69 * Helper function to extract CoW extent size hint from inode.
70 * Between the extent size hint and the CoW extent size hint, we
71 * return the greater of the two. If the value is zero (automatic),
72 * use the default size.
73 */
74xfs_extlen_t
75xfs_get_cowextsz_hint(
76 struct xfs_inode *ip)
77{
78 xfs_extlen_t a, b;
79
80 a = 0;
81 if (ip->i_diflags2 & XFS_DIFLAG2_COWEXTSIZE)
82 a = ip->i_cowextsize;
83 b = xfs_get_extsz_hint(ip);
84
85 a = max(a, b);
86 if (a == 0)
87 return XFS_DEFAULT_COWEXTSZ_HINT;
88 return a;
89}
90
91/*
92 * These two are wrapper routines around the xfs_ilock() routine used to
93 * centralize some grungy code. They are used in places that wish to lock the
94 * inode solely for reading the extents. The reason these places can't just
95 * call xfs_ilock(ip, XFS_ILOCK_SHARED) is that the inode lock also guards to
96 * bringing in of the extents from disk for a file in b-tree format. If the
97 * inode is in b-tree format, then we need to lock the inode exclusively until
98 * the extents are read in. Locking it exclusively all the time would limit
99 * our parallelism unnecessarily, though. What we do instead is check to see
100 * if the extents have been read in yet, and only lock the inode exclusively
101 * if they have not.
102 *
103 * The functions return a value which should be given to the corresponding
104 * xfs_iunlock() call.
105 */
106uint
107xfs_ilock_data_map_shared(
108 struct xfs_inode *ip)
109{
110 uint lock_mode = XFS_ILOCK_SHARED;
111
112 if (xfs_need_iread_extents(&ip->i_df))
113 lock_mode = XFS_ILOCK_EXCL;
114 xfs_ilock(ip, lock_mode);
115 return lock_mode;
116}
117
118uint
119xfs_ilock_attr_map_shared(
120 struct xfs_inode *ip)
121{
122 uint lock_mode = XFS_ILOCK_SHARED;
123
124 if (xfs_inode_has_attr_fork(ip) && xfs_need_iread_extents(&ip->i_af))
125 lock_mode = XFS_ILOCK_EXCL;
126 xfs_ilock(ip, lock_mode);
127 return lock_mode;
128}
129
130/*
131 * You can't set both SHARED and EXCL for the same lock,
132 * and only XFS_IOLOCK_SHARED, XFS_IOLOCK_EXCL, XFS_MMAPLOCK_SHARED,
133 * XFS_MMAPLOCK_EXCL, XFS_ILOCK_SHARED, XFS_ILOCK_EXCL are valid values
134 * to set in lock_flags.
135 */
136static inline void
137xfs_lock_flags_assert(
138 uint lock_flags)
139{
140 ASSERT((lock_flags & (XFS_IOLOCK_SHARED | XFS_IOLOCK_EXCL)) !=
141 (XFS_IOLOCK_SHARED | XFS_IOLOCK_EXCL));
142 ASSERT((lock_flags & (XFS_MMAPLOCK_SHARED | XFS_MMAPLOCK_EXCL)) !=
143 (XFS_MMAPLOCK_SHARED | XFS_MMAPLOCK_EXCL));
144 ASSERT((lock_flags & (XFS_ILOCK_SHARED | XFS_ILOCK_EXCL)) !=
145 (XFS_ILOCK_SHARED | XFS_ILOCK_EXCL));
146 ASSERT((lock_flags & ~(XFS_LOCK_MASK | XFS_LOCK_SUBCLASS_MASK)) == 0);
147 ASSERT(lock_flags != 0);
148}
149
150/*
151 * In addition to i_rwsem in the VFS inode, the xfs inode contains 2
152 * multi-reader locks: invalidate_lock and the i_lock. This routine allows
153 * various combinations of the locks to be obtained.
154 *
155 * The 3 locks should always be ordered so that the IO lock is obtained first,
156 * the mmap lock second and the ilock last in order to prevent deadlock.
157 *
158 * Basic locking order:
159 *
160 * i_rwsem -> invalidate_lock -> page_lock -> i_ilock
161 *
162 * mmap_lock locking order:
163 *
164 * i_rwsem -> page lock -> mmap_lock
165 * mmap_lock -> invalidate_lock -> page_lock
166 *
167 * The difference in mmap_lock locking order mean that we cannot hold the
168 * invalidate_lock over syscall based read(2)/write(2) based IO. These IO paths
169 * can fault in pages during copy in/out (for buffered IO) or require the
170 * mmap_lock in get_user_pages() to map the user pages into the kernel address
171 * space for direct IO. Similarly the i_rwsem cannot be taken inside a page
172 * fault because page faults already hold the mmap_lock.
173 *
174 * Hence to serialise fully against both syscall and mmap based IO, we need to
175 * take both the i_rwsem and the invalidate_lock. These locks should *only* be
176 * both taken in places where we need to invalidate the page cache in a race
177 * free manner (e.g. truncate, hole punch and other extent manipulation
178 * functions).
179 */
180void
181xfs_ilock(
182 xfs_inode_t *ip,
183 uint lock_flags)
184{
185 trace_xfs_ilock(ip, lock_flags, _RET_IP_);
186
187 xfs_lock_flags_assert(lock_flags);
188
189 if (lock_flags & XFS_IOLOCK_EXCL) {
190 down_write_nested(&VFS_I(ip)->i_rwsem,
191 XFS_IOLOCK_DEP(lock_flags));
192 } else if (lock_flags & XFS_IOLOCK_SHARED) {
193 down_read_nested(&VFS_I(ip)->i_rwsem,
194 XFS_IOLOCK_DEP(lock_flags));
195 }
196
197 if (lock_flags & XFS_MMAPLOCK_EXCL) {
198 down_write_nested(&VFS_I(ip)->i_mapping->invalidate_lock,
199 XFS_MMAPLOCK_DEP(lock_flags));
200 } else if (lock_flags & XFS_MMAPLOCK_SHARED) {
201 down_read_nested(&VFS_I(ip)->i_mapping->invalidate_lock,
202 XFS_MMAPLOCK_DEP(lock_flags));
203 }
204
205 if (lock_flags & XFS_ILOCK_EXCL)
206 mrupdate_nested(&ip->i_lock, XFS_ILOCK_DEP(lock_flags));
207 else if (lock_flags & XFS_ILOCK_SHARED)
208 mraccess_nested(&ip->i_lock, XFS_ILOCK_DEP(lock_flags));
209}
210
211/*
212 * This is just like xfs_ilock(), except that the caller
213 * is guaranteed not to sleep. It returns 1 if it gets
214 * the requested locks and 0 otherwise. If the IO lock is
215 * obtained but the inode lock cannot be, then the IO lock
216 * is dropped before returning.
217 *
218 * ip -- the inode being locked
219 * lock_flags -- this parameter indicates the inode's locks to be
220 * to be locked. See the comment for xfs_ilock() for a list
221 * of valid values.
222 */
223int
224xfs_ilock_nowait(
225 xfs_inode_t *ip,
226 uint lock_flags)
227{
228 trace_xfs_ilock_nowait(ip, lock_flags, _RET_IP_);
229
230 xfs_lock_flags_assert(lock_flags);
231
232 if (lock_flags & XFS_IOLOCK_EXCL) {
233 if (!down_write_trylock(&VFS_I(ip)->i_rwsem))
234 goto out;
235 } else if (lock_flags & XFS_IOLOCK_SHARED) {
236 if (!down_read_trylock(&VFS_I(ip)->i_rwsem))
237 goto out;
238 }
239
240 if (lock_flags & XFS_MMAPLOCK_EXCL) {
241 if (!down_write_trylock(&VFS_I(ip)->i_mapping->invalidate_lock))
242 goto out_undo_iolock;
243 } else if (lock_flags & XFS_MMAPLOCK_SHARED) {
244 if (!down_read_trylock(&VFS_I(ip)->i_mapping->invalidate_lock))
245 goto out_undo_iolock;
246 }
247
248 if (lock_flags & XFS_ILOCK_EXCL) {
249 if (!mrtryupdate(&ip->i_lock))
250 goto out_undo_mmaplock;
251 } else if (lock_flags & XFS_ILOCK_SHARED) {
252 if (!mrtryaccess(&ip->i_lock))
253 goto out_undo_mmaplock;
254 }
255 return 1;
256
257out_undo_mmaplock:
258 if (lock_flags & XFS_MMAPLOCK_EXCL)
259 up_write(&VFS_I(ip)->i_mapping->invalidate_lock);
260 else if (lock_flags & XFS_MMAPLOCK_SHARED)
261 up_read(&VFS_I(ip)->i_mapping->invalidate_lock);
262out_undo_iolock:
263 if (lock_flags & XFS_IOLOCK_EXCL)
264 up_write(&VFS_I(ip)->i_rwsem);
265 else if (lock_flags & XFS_IOLOCK_SHARED)
266 up_read(&VFS_I(ip)->i_rwsem);
267out:
268 return 0;
269}
270
271/*
272 * xfs_iunlock() is used to drop the inode locks acquired with
273 * xfs_ilock() and xfs_ilock_nowait(). The caller must pass
274 * in the flags given to xfs_ilock() or xfs_ilock_nowait() so
275 * that we know which locks to drop.
276 *
277 * ip -- the inode being unlocked
278 * lock_flags -- this parameter indicates the inode's locks to be
279 * to be unlocked. See the comment for xfs_ilock() for a list
280 * of valid values for this parameter.
281 *
282 */
283void
284xfs_iunlock(
285 xfs_inode_t *ip,
286 uint lock_flags)
287{
288 xfs_lock_flags_assert(lock_flags);
289
290 if (lock_flags & XFS_IOLOCK_EXCL)
291 up_write(&VFS_I(ip)->i_rwsem);
292 else if (lock_flags & XFS_IOLOCK_SHARED)
293 up_read(&VFS_I(ip)->i_rwsem);
294
295 if (lock_flags & XFS_MMAPLOCK_EXCL)
296 up_write(&VFS_I(ip)->i_mapping->invalidate_lock);
297 else if (lock_flags & XFS_MMAPLOCK_SHARED)
298 up_read(&VFS_I(ip)->i_mapping->invalidate_lock);
299
300 if (lock_flags & XFS_ILOCK_EXCL)
301 mrunlock_excl(&ip->i_lock);
302 else if (lock_flags & XFS_ILOCK_SHARED)
303 mrunlock_shared(&ip->i_lock);
304
305 trace_xfs_iunlock(ip, lock_flags, _RET_IP_);
306}
307
308/*
309 * give up write locks. the i/o lock cannot be held nested
310 * if it is being demoted.
311 */
312void
313xfs_ilock_demote(
314 xfs_inode_t *ip,
315 uint lock_flags)
316{
317 ASSERT(lock_flags & (XFS_IOLOCK_EXCL|XFS_MMAPLOCK_EXCL|XFS_ILOCK_EXCL));
318 ASSERT((lock_flags &
319 ~(XFS_IOLOCK_EXCL|XFS_MMAPLOCK_EXCL|XFS_ILOCK_EXCL)) == 0);
320
321 if (lock_flags & XFS_ILOCK_EXCL)
322 mrdemote(&ip->i_lock);
323 if (lock_flags & XFS_MMAPLOCK_EXCL)
324 downgrade_write(&VFS_I(ip)->i_mapping->invalidate_lock);
325 if (lock_flags & XFS_IOLOCK_EXCL)
326 downgrade_write(&VFS_I(ip)->i_rwsem);
327
328 trace_xfs_ilock_demote(ip, lock_flags, _RET_IP_);
329}
330
331#if defined(DEBUG) || defined(XFS_WARN)
332static inline bool
333__xfs_rwsem_islocked(
334 struct rw_semaphore *rwsem,
335 bool shared)
336{
337 if (!debug_locks)
338 return rwsem_is_locked(rwsem);
339
340 if (!shared)
341 return lockdep_is_held_type(rwsem, 0);
342
343 /*
344 * We are checking that the lock is held at least in shared
345 * mode but don't care that it might be held exclusively
346 * (i.e. shared | excl). Hence we check if the lock is held
347 * in any mode rather than an explicit shared mode.
348 */
349 return lockdep_is_held_type(rwsem, -1);
350}
351
352bool
353xfs_isilocked(
354 struct xfs_inode *ip,
355 uint lock_flags)
356{
357 if (lock_flags & (XFS_ILOCK_EXCL|XFS_ILOCK_SHARED)) {
358 if (!(lock_flags & XFS_ILOCK_SHARED))
359 return !!ip->i_lock.mr_writer;
360 return rwsem_is_locked(&ip->i_lock.mr_lock);
361 }
362
363 if (lock_flags & (XFS_MMAPLOCK_EXCL|XFS_MMAPLOCK_SHARED)) {
364 return __xfs_rwsem_islocked(&VFS_I(ip)->i_mapping->invalidate_lock,
365 (lock_flags & XFS_MMAPLOCK_SHARED));
366 }
367
368 if (lock_flags & (XFS_IOLOCK_EXCL | XFS_IOLOCK_SHARED)) {
369 return __xfs_rwsem_islocked(&VFS_I(ip)->i_rwsem,
370 (lock_flags & XFS_IOLOCK_SHARED));
371 }
372
373 ASSERT(0);
374 return false;
375}
376#endif
377
378/*
379 * xfs_lockdep_subclass_ok() is only used in an ASSERT, so is only called when
380 * DEBUG or XFS_WARN is set. And MAX_LOCKDEP_SUBCLASSES is then only defined
381 * when CONFIG_LOCKDEP is set. Hence the complex define below to avoid build
382 * errors and warnings.
383 */
384#if (defined(DEBUG) || defined(XFS_WARN)) && defined(CONFIG_LOCKDEP)
385static bool
386xfs_lockdep_subclass_ok(
387 int subclass)
388{
389 return subclass < MAX_LOCKDEP_SUBCLASSES;
390}
391#else
392#define xfs_lockdep_subclass_ok(subclass) (true)
393#endif
394
395/*
396 * Bump the subclass so xfs_lock_inodes() acquires each lock with a different
397 * value. This can be called for any type of inode lock combination, including
398 * parent locking. Care must be taken to ensure we don't overrun the subclass
399 * storage fields in the class mask we build.
400 */
401static inline uint
402xfs_lock_inumorder(
403 uint lock_mode,
404 uint subclass)
405{
406 uint class = 0;
407
408 ASSERT(!(lock_mode & (XFS_ILOCK_PARENT | XFS_ILOCK_RTBITMAP |
409 XFS_ILOCK_RTSUM)));
410 ASSERT(xfs_lockdep_subclass_ok(subclass));
411
412 if (lock_mode & (XFS_IOLOCK_SHARED|XFS_IOLOCK_EXCL)) {
413 ASSERT(subclass <= XFS_IOLOCK_MAX_SUBCLASS);
414 class += subclass << XFS_IOLOCK_SHIFT;
415 }
416
417 if (lock_mode & (XFS_MMAPLOCK_SHARED|XFS_MMAPLOCK_EXCL)) {
418 ASSERT(subclass <= XFS_MMAPLOCK_MAX_SUBCLASS);
419 class += subclass << XFS_MMAPLOCK_SHIFT;
420 }
421
422 if (lock_mode & (XFS_ILOCK_SHARED|XFS_ILOCK_EXCL)) {
423 ASSERT(subclass <= XFS_ILOCK_MAX_SUBCLASS);
424 class += subclass << XFS_ILOCK_SHIFT;
425 }
426
427 return (lock_mode & ~XFS_LOCK_SUBCLASS_MASK) | class;
428}
429
430/*
431 * The following routine will lock n inodes in exclusive mode. We assume the
432 * caller calls us with the inodes in i_ino order.
433 *
434 * We need to detect deadlock where an inode that we lock is in the AIL and we
435 * start waiting for another inode that is locked by a thread in a long running
436 * transaction (such as truncate). This can result in deadlock since the long
437 * running trans might need to wait for the inode we just locked in order to
438 * push the tail and free space in the log.
439 *
440 * xfs_lock_inodes() can only be used to lock one type of lock at a time -
441 * the iolock, the mmaplock or the ilock, but not more than one at a time. If we
442 * lock more than one at a time, lockdep will report false positives saying we
443 * have violated locking orders.
444 */
445static void
446xfs_lock_inodes(
447 struct xfs_inode **ips,
448 int inodes,
449 uint lock_mode)
450{
451 int attempts = 0;
452 uint i;
453 int j;
454 bool try_lock;
455 struct xfs_log_item *lp;
456
457 /*
458 * Currently supports between 2 and 5 inodes with exclusive locking. We
459 * support an arbitrary depth of locking here, but absolute limits on
460 * inodes depend on the type of locking and the limits placed by
461 * lockdep annotations in xfs_lock_inumorder. These are all checked by
462 * the asserts.
463 */
464 ASSERT(ips && inodes >= 2 && inodes <= 5);
465 ASSERT(lock_mode & (XFS_IOLOCK_EXCL | XFS_MMAPLOCK_EXCL |
466 XFS_ILOCK_EXCL));
467 ASSERT(!(lock_mode & (XFS_IOLOCK_SHARED | XFS_MMAPLOCK_SHARED |
468 XFS_ILOCK_SHARED)));
469 ASSERT(!(lock_mode & XFS_MMAPLOCK_EXCL) ||
470 inodes <= XFS_MMAPLOCK_MAX_SUBCLASS + 1);
471 ASSERT(!(lock_mode & XFS_ILOCK_EXCL) ||
472 inodes <= XFS_ILOCK_MAX_SUBCLASS + 1);
473
474 if (lock_mode & XFS_IOLOCK_EXCL) {
475 ASSERT(!(lock_mode & (XFS_MMAPLOCK_EXCL | XFS_ILOCK_EXCL)));
476 } else if (lock_mode & XFS_MMAPLOCK_EXCL)
477 ASSERT(!(lock_mode & XFS_ILOCK_EXCL));
478
479again:
480 try_lock = false;
481 i = 0;
482 for (; i < inodes; i++) {
483 ASSERT(ips[i]);
484
485 if (i && (ips[i] == ips[i - 1])) /* Already locked */
486 continue;
487
488 /*
489 * If try_lock is not set yet, make sure all locked inodes are
490 * not in the AIL. If any are, set try_lock to be used later.
491 */
492 if (!try_lock) {
493 for (j = (i - 1); j >= 0 && !try_lock; j--) {
494 lp = &ips[j]->i_itemp->ili_item;
495 if (lp && test_bit(XFS_LI_IN_AIL, &lp->li_flags))
496 try_lock = true;
497 }
498 }
499
500 /*
501 * If any of the previous locks we have locked is in the AIL,
502 * we must TRY to get the second and subsequent locks. If
503 * we can't get any, we must release all we have
504 * and try again.
505 */
506 if (!try_lock) {
507 xfs_ilock(ips[i], xfs_lock_inumorder(lock_mode, i));
508 continue;
509 }
510
511 /* try_lock means we have an inode locked that is in the AIL. */
512 ASSERT(i != 0);
513 if (xfs_ilock_nowait(ips[i], xfs_lock_inumorder(lock_mode, i)))
514 continue;
515
516 /*
517 * Unlock all previous guys and try again. xfs_iunlock will try
518 * to push the tail if the inode is in the AIL.
519 */
520 attempts++;
521 for (j = i - 1; j >= 0; j--) {
522 /*
523 * Check to see if we've already unlocked this one. Not
524 * the first one going back, and the inode ptr is the
525 * same.
526 */
527 if (j != (i - 1) && ips[j] == ips[j + 1])
528 continue;
529
530 xfs_iunlock(ips[j], lock_mode);
531 }
532
533 if ((attempts % 5) == 0) {
534 delay(1); /* Don't just spin the CPU */
535 }
536 goto again;
537 }
538}
539
540/*
541 * xfs_lock_two_inodes() can only be used to lock ilock. The iolock and
542 * mmaplock must be double-locked separately since we use i_rwsem and
543 * invalidate_lock for that. We now support taking one lock EXCL and the
544 * other SHARED.
545 */
546void
547xfs_lock_two_inodes(
548 struct xfs_inode *ip0,
549 uint ip0_mode,
550 struct xfs_inode *ip1,
551 uint ip1_mode)
552{
553 int attempts = 0;
554 struct xfs_log_item *lp;
555
556 ASSERT(hweight32(ip0_mode) == 1);
557 ASSERT(hweight32(ip1_mode) == 1);
558 ASSERT(!(ip0_mode & (XFS_IOLOCK_SHARED|XFS_IOLOCK_EXCL)));
559 ASSERT(!(ip1_mode & (XFS_IOLOCK_SHARED|XFS_IOLOCK_EXCL)));
560 ASSERT(!(ip0_mode & (XFS_MMAPLOCK_SHARED|XFS_MMAPLOCK_EXCL)));
561 ASSERT(!(ip1_mode & (XFS_MMAPLOCK_SHARED|XFS_MMAPLOCK_EXCL)));
562 ASSERT(ip0->i_ino != ip1->i_ino);
563
564 if (ip0->i_ino > ip1->i_ino) {
565 swap(ip0, ip1);
566 swap(ip0_mode, ip1_mode);
567 }
568
569 again:
570 xfs_ilock(ip0, xfs_lock_inumorder(ip0_mode, 0));
571
572 /*
573 * If the first lock we have locked is in the AIL, we must TRY to get
574 * the second lock. If we can't get it, we must release the first one
575 * and try again.
576 */
577 lp = &ip0->i_itemp->ili_item;
578 if (lp && test_bit(XFS_LI_IN_AIL, &lp->li_flags)) {
579 if (!xfs_ilock_nowait(ip1, xfs_lock_inumorder(ip1_mode, 1))) {
580 xfs_iunlock(ip0, ip0_mode);
581 if ((++attempts % 5) == 0)
582 delay(1); /* Don't just spin the CPU */
583 goto again;
584 }
585 } else {
586 xfs_ilock(ip1, xfs_lock_inumorder(ip1_mode, 1));
587 }
588}
589
590uint
591xfs_ip2xflags(
592 struct xfs_inode *ip)
593{
594 uint flags = 0;
595
596 if (ip->i_diflags & XFS_DIFLAG_ANY) {
597 if (ip->i_diflags & XFS_DIFLAG_REALTIME)
598 flags |= FS_XFLAG_REALTIME;
599 if (ip->i_diflags & XFS_DIFLAG_PREALLOC)
600 flags |= FS_XFLAG_PREALLOC;
601 if (ip->i_diflags & XFS_DIFLAG_IMMUTABLE)
602 flags |= FS_XFLAG_IMMUTABLE;
603 if (ip->i_diflags & XFS_DIFLAG_APPEND)
604 flags |= FS_XFLAG_APPEND;
605 if (ip->i_diflags & XFS_DIFLAG_SYNC)
606 flags |= FS_XFLAG_SYNC;
607 if (ip->i_diflags & XFS_DIFLAG_NOATIME)
608 flags |= FS_XFLAG_NOATIME;
609 if (ip->i_diflags & XFS_DIFLAG_NODUMP)
610 flags |= FS_XFLAG_NODUMP;
611 if (ip->i_diflags & XFS_DIFLAG_RTINHERIT)
612 flags |= FS_XFLAG_RTINHERIT;
613 if (ip->i_diflags & XFS_DIFLAG_PROJINHERIT)
614 flags |= FS_XFLAG_PROJINHERIT;
615 if (ip->i_diflags & XFS_DIFLAG_NOSYMLINKS)
616 flags |= FS_XFLAG_NOSYMLINKS;
617 if (ip->i_diflags & XFS_DIFLAG_EXTSIZE)
618 flags |= FS_XFLAG_EXTSIZE;
619 if (ip->i_diflags & XFS_DIFLAG_EXTSZINHERIT)
620 flags |= FS_XFLAG_EXTSZINHERIT;
621 if (ip->i_diflags & XFS_DIFLAG_NODEFRAG)
622 flags |= FS_XFLAG_NODEFRAG;
623 if (ip->i_diflags & XFS_DIFLAG_FILESTREAM)
624 flags |= FS_XFLAG_FILESTREAM;
625 }
626
627 if (ip->i_diflags2 & XFS_DIFLAG2_ANY) {
628 if (ip->i_diflags2 & XFS_DIFLAG2_DAX)
629 flags |= FS_XFLAG_DAX;
630 if (ip->i_diflags2 & XFS_DIFLAG2_COWEXTSIZE)
631 flags |= FS_XFLAG_COWEXTSIZE;
632 }
633
634 if (xfs_inode_has_attr_fork(ip))
635 flags |= FS_XFLAG_HASATTR;
636 return flags;
637}
638
639/*
640 * Lookups up an inode from "name". If ci_name is not NULL, then a CI match
641 * is allowed, otherwise it has to be an exact match. If a CI match is found,
642 * ci_name->name will point to a the actual name (caller must free) or
643 * will be set to NULL if an exact match is found.
644 */
645int
646xfs_lookup(
647 struct xfs_inode *dp,
648 const struct xfs_name *name,
649 struct xfs_inode **ipp,
650 struct xfs_name *ci_name)
651{
652 xfs_ino_t inum;
653 int error;
654
655 trace_xfs_lookup(dp, name);
656
657 if (xfs_is_shutdown(dp->i_mount))
658 return -EIO;
659 if (xfs_ifork_zapped(dp, XFS_DATA_FORK))
660 return -EIO;
661
662 error = xfs_dir_lookup(NULL, dp, name, &inum, ci_name);
663 if (error)
664 goto out_unlock;
665
666 error = xfs_iget(dp->i_mount, NULL, inum, 0, 0, ipp);
667 if (error)
668 goto out_free_name;
669
670 return 0;
671
672out_free_name:
673 if (ci_name)
674 kmem_free(ci_name->name);
675out_unlock:
676 *ipp = NULL;
677 return error;
678}
679
680/* Propagate di_flags from a parent inode to a child inode. */
681static void
682xfs_inode_inherit_flags(
683 struct xfs_inode *ip,
684 const struct xfs_inode *pip)
685{
686 unsigned int di_flags = 0;
687 xfs_failaddr_t failaddr;
688 umode_t mode = VFS_I(ip)->i_mode;
689
690 if (S_ISDIR(mode)) {
691 if (pip->i_diflags & XFS_DIFLAG_RTINHERIT)
692 di_flags |= XFS_DIFLAG_RTINHERIT;
693 if (pip->i_diflags & XFS_DIFLAG_EXTSZINHERIT) {
694 di_flags |= XFS_DIFLAG_EXTSZINHERIT;
695 ip->i_extsize = pip->i_extsize;
696 }
697 if (pip->i_diflags & XFS_DIFLAG_PROJINHERIT)
698 di_flags |= XFS_DIFLAG_PROJINHERIT;
699 } else if (S_ISREG(mode)) {
700 if ((pip->i_diflags & XFS_DIFLAG_RTINHERIT) &&
701 xfs_has_realtime(ip->i_mount))
702 di_flags |= XFS_DIFLAG_REALTIME;
703 if (pip->i_diflags & XFS_DIFLAG_EXTSZINHERIT) {
704 di_flags |= XFS_DIFLAG_EXTSIZE;
705 ip->i_extsize = pip->i_extsize;
706 }
707 }
708 if ((pip->i_diflags & XFS_DIFLAG_NOATIME) &&
709 xfs_inherit_noatime)
710 di_flags |= XFS_DIFLAG_NOATIME;
711 if ((pip->i_diflags & XFS_DIFLAG_NODUMP) &&
712 xfs_inherit_nodump)
713 di_flags |= XFS_DIFLAG_NODUMP;
714 if ((pip->i_diflags & XFS_DIFLAG_SYNC) &&
715 xfs_inherit_sync)
716 di_flags |= XFS_DIFLAG_SYNC;
717 if ((pip->i_diflags & XFS_DIFLAG_NOSYMLINKS) &&
718 xfs_inherit_nosymlinks)
719 di_flags |= XFS_DIFLAG_NOSYMLINKS;
720 if ((pip->i_diflags & XFS_DIFLAG_NODEFRAG) &&
721 xfs_inherit_nodefrag)
722 di_flags |= XFS_DIFLAG_NODEFRAG;
723 if (pip->i_diflags & XFS_DIFLAG_FILESTREAM)
724 di_flags |= XFS_DIFLAG_FILESTREAM;
725
726 ip->i_diflags |= di_flags;
727
728 /*
729 * Inode verifiers on older kernels only check that the extent size
730 * hint is an integer multiple of the rt extent size on realtime files.
731 * They did not check the hint alignment on a directory with both
732 * rtinherit and extszinherit flags set. If the misaligned hint is
733 * propagated from a directory into a new realtime file, new file
734 * allocations will fail due to math errors in the rt allocator and/or
735 * trip the verifiers. Validate the hint settings in the new file so
736 * that we don't let broken hints propagate.
737 */
738 failaddr = xfs_inode_validate_extsize(ip->i_mount, ip->i_extsize,
739 VFS_I(ip)->i_mode, ip->i_diflags);
740 if (failaddr) {
741 ip->i_diflags &= ~(XFS_DIFLAG_EXTSIZE |
742 XFS_DIFLAG_EXTSZINHERIT);
743 ip->i_extsize = 0;
744 }
745}
746
747/* Propagate di_flags2 from a parent inode to a child inode. */
748static void
749xfs_inode_inherit_flags2(
750 struct xfs_inode *ip,
751 const struct xfs_inode *pip)
752{
753 xfs_failaddr_t failaddr;
754
755 if (pip->i_diflags2 & XFS_DIFLAG2_COWEXTSIZE) {
756 ip->i_diflags2 |= XFS_DIFLAG2_COWEXTSIZE;
757 ip->i_cowextsize = pip->i_cowextsize;
758 }
759 if (pip->i_diflags2 & XFS_DIFLAG2_DAX)
760 ip->i_diflags2 |= XFS_DIFLAG2_DAX;
761
762 /* Don't let invalid cowextsize hints propagate. */
763 failaddr = xfs_inode_validate_cowextsize(ip->i_mount, ip->i_cowextsize,
764 VFS_I(ip)->i_mode, ip->i_diflags, ip->i_diflags2);
765 if (failaddr) {
766 ip->i_diflags2 &= ~XFS_DIFLAG2_COWEXTSIZE;
767 ip->i_cowextsize = 0;
768 }
769}
770
771/*
772 * Initialise a newly allocated inode and return the in-core inode to the
773 * caller locked exclusively.
774 */
775int
776xfs_init_new_inode(
777 struct mnt_idmap *idmap,
778 struct xfs_trans *tp,
779 struct xfs_inode *pip,
780 xfs_ino_t ino,
781 umode_t mode,
782 xfs_nlink_t nlink,
783 dev_t rdev,
784 prid_t prid,
785 bool init_xattrs,
786 struct xfs_inode **ipp)
787{
788 struct inode *dir = pip ? VFS_I(pip) : NULL;
789 struct xfs_mount *mp = tp->t_mountp;
790 struct xfs_inode *ip;
791 unsigned int flags;
792 int error;
793 struct timespec64 tv;
794 struct inode *inode;
795
796 /*
797 * Protect against obviously corrupt allocation btree records. Later
798 * xfs_iget checks will catch re-allocation of other active in-memory
799 * and on-disk inodes. If we don't catch reallocating the parent inode
800 * here we will deadlock in xfs_iget() so we have to do these checks
801 * first.
802 */
803 if ((pip && ino == pip->i_ino) || !xfs_verify_dir_ino(mp, ino)) {
804 xfs_alert(mp, "Allocated a known in-use inode 0x%llx!", ino);
805 return -EFSCORRUPTED;
806 }
807
808 /*
809 * Get the in-core inode with the lock held exclusively to prevent
810 * others from looking at until we're done.
811 */
812 error = xfs_iget(mp, tp, ino, XFS_IGET_CREATE, XFS_ILOCK_EXCL, &ip);
813 if (error)
814 return error;
815
816 ASSERT(ip != NULL);
817 inode = VFS_I(ip);
818 set_nlink(inode, nlink);
819 inode->i_rdev = rdev;
820 ip->i_projid = prid;
821
822 if (dir && !(dir->i_mode & S_ISGID) && xfs_has_grpid(mp)) {
823 inode_fsuid_set(inode, idmap);
824 inode->i_gid = dir->i_gid;
825 inode->i_mode = mode;
826 } else {
827 inode_init_owner(idmap, inode, dir, mode);
828 }
829
830 /*
831 * If the group ID of the new file does not match the effective group
832 * ID or one of the supplementary group IDs, the S_ISGID bit is cleared
833 * (and only if the irix_sgid_inherit compatibility variable is set).
834 */
835 if (irix_sgid_inherit && (inode->i_mode & S_ISGID) &&
836 !vfsgid_in_group_p(i_gid_into_vfsgid(idmap, inode)))
837 inode->i_mode &= ~S_ISGID;
838
839 ip->i_disk_size = 0;
840 ip->i_df.if_nextents = 0;
841 ASSERT(ip->i_nblocks == 0);
842
843 tv = inode_set_ctime_current(inode);
844 inode_set_mtime_to_ts(inode, tv);
845 inode_set_atime_to_ts(inode, tv);
846
847 ip->i_extsize = 0;
848 ip->i_diflags = 0;
849
850 if (xfs_has_v3inodes(mp)) {
851 inode_set_iversion(inode, 1);
852 ip->i_cowextsize = 0;
853 ip->i_crtime = tv;
854 }
855
856 flags = XFS_ILOG_CORE;
857 switch (mode & S_IFMT) {
858 case S_IFIFO:
859 case S_IFCHR:
860 case S_IFBLK:
861 case S_IFSOCK:
862 ip->i_df.if_format = XFS_DINODE_FMT_DEV;
863 flags |= XFS_ILOG_DEV;
864 break;
865 case S_IFREG:
866 case S_IFDIR:
867 if (pip && (pip->i_diflags & XFS_DIFLAG_ANY))
868 xfs_inode_inherit_flags(ip, pip);
869 if (pip && (pip->i_diflags2 & XFS_DIFLAG2_ANY))
870 xfs_inode_inherit_flags2(ip, pip);
871 fallthrough;
872 case S_IFLNK:
873 ip->i_df.if_format = XFS_DINODE_FMT_EXTENTS;
874 ip->i_df.if_bytes = 0;
875 ip->i_df.if_data = NULL;
876 break;
877 default:
878 ASSERT(0);
879 }
880
881 /*
882 * If we need to create attributes immediately after allocating the
883 * inode, initialise an empty attribute fork right now. We use the
884 * default fork offset for attributes here as we don't know exactly what
885 * size or how many attributes we might be adding. We can do this
886 * safely here because we know the data fork is completely empty and
887 * this saves us from needing to run a separate transaction to set the
888 * fork offset in the immediate future.
889 */
890 if (init_xattrs && xfs_has_attr(mp)) {
891 ip->i_forkoff = xfs_default_attroffset(ip) >> 3;
892 xfs_ifork_init_attr(ip, XFS_DINODE_FMT_EXTENTS, 0);
893 }
894
895 /*
896 * Log the new values stuffed into the inode.
897 */
898 xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL);
899 xfs_trans_log_inode(tp, ip, flags);
900
901 /* now that we have an i_mode we can setup the inode structure */
902 xfs_setup_inode(ip);
903
904 *ipp = ip;
905 return 0;
906}
907
908/*
909 * Decrement the link count on an inode & log the change. If this causes the
910 * link count to go to zero, move the inode to AGI unlinked list so that it can
911 * be freed when the last active reference goes away via xfs_inactive().
912 */
913static int /* error */
914xfs_droplink(
915 xfs_trans_t *tp,
916 xfs_inode_t *ip)
917{
918 if (VFS_I(ip)->i_nlink == 0) {
919 xfs_alert(ip->i_mount,
920 "%s: Attempt to drop inode (%llu) with nlink zero.",
921 __func__, ip->i_ino);
922 return -EFSCORRUPTED;
923 }
924
925 xfs_trans_ichgtime(tp, ip, XFS_ICHGTIME_CHG);
926
927 drop_nlink(VFS_I(ip));
928 xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
929
930 if (VFS_I(ip)->i_nlink)
931 return 0;
932
933 return xfs_iunlink(tp, ip);
934}
935
936/*
937 * Increment the link count on an inode & log the change.
938 */
939static void
940xfs_bumplink(
941 xfs_trans_t *tp,
942 xfs_inode_t *ip)
943{
944 xfs_trans_ichgtime(tp, ip, XFS_ICHGTIME_CHG);
945
946 inc_nlink(VFS_I(ip));
947 xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
948}
949
950int
951xfs_create(
952 struct mnt_idmap *idmap,
953 xfs_inode_t *dp,
954 struct xfs_name *name,
955 umode_t mode,
956 dev_t rdev,
957 bool init_xattrs,
958 xfs_inode_t **ipp)
959{
960 int is_dir = S_ISDIR(mode);
961 struct xfs_mount *mp = dp->i_mount;
962 struct xfs_inode *ip = NULL;
963 struct xfs_trans *tp = NULL;
964 int error;
965 bool unlock_dp_on_error = false;
966 prid_t prid;
967 struct xfs_dquot *udqp = NULL;
968 struct xfs_dquot *gdqp = NULL;
969 struct xfs_dquot *pdqp = NULL;
970 struct xfs_trans_res *tres;
971 uint resblks;
972 xfs_ino_t ino;
973
974 trace_xfs_create(dp, name);
975
976 if (xfs_is_shutdown(mp))
977 return -EIO;
978 if (xfs_ifork_zapped(dp, XFS_DATA_FORK))
979 return -EIO;
980
981 prid = xfs_get_initial_prid(dp);
982
983 /*
984 * Make sure that we have allocated dquot(s) on disk.
985 */
986 error = xfs_qm_vop_dqalloc(dp, mapped_fsuid(idmap, &init_user_ns),
987 mapped_fsgid(idmap, &init_user_ns), prid,
988 XFS_QMOPT_QUOTALL | XFS_QMOPT_INHERIT,
989 &udqp, &gdqp, &pdqp);
990 if (error)
991 return error;
992
993 if (is_dir) {
994 resblks = XFS_MKDIR_SPACE_RES(mp, name->len);
995 tres = &M_RES(mp)->tr_mkdir;
996 } else {
997 resblks = XFS_CREATE_SPACE_RES(mp, name->len);
998 tres = &M_RES(mp)->tr_create;
999 }
1000
1001 /*
1002 * Initially assume that the file does not exist and
1003 * reserve the resources for that case. If that is not
1004 * the case we'll drop the one we have and get a more
1005 * appropriate transaction later.
1006 */
1007 error = xfs_trans_alloc_icreate(mp, tres, udqp, gdqp, pdqp, resblks,
1008 &tp);
1009 if (error == -ENOSPC) {
1010 /* flush outstanding delalloc blocks and retry */
1011 xfs_flush_inodes(mp);
1012 error = xfs_trans_alloc_icreate(mp, tres, udqp, gdqp, pdqp,
1013 resblks, &tp);
1014 }
1015 if (error)
1016 goto out_release_dquots;
1017
1018 xfs_ilock(dp, XFS_ILOCK_EXCL | XFS_ILOCK_PARENT);
1019 unlock_dp_on_error = true;
1020
1021 /*
1022 * A newly created regular or special file just has one directory
1023 * entry pointing to them, but a directory also the "." entry
1024 * pointing to itself.
1025 */
1026 error = xfs_dialloc(&tp, dp->i_ino, mode, &ino);
1027 if (!error)
1028 error = xfs_init_new_inode(idmap, tp, dp, ino, mode,
1029 is_dir ? 2 : 1, rdev, prid, init_xattrs, &ip);
1030 if (error)
1031 goto out_trans_cancel;
1032
1033 /*
1034 * Now we join the directory inode to the transaction. We do not do it
1035 * earlier because xfs_dialloc might commit the previous transaction
1036 * (and release all the locks). An error from here on will result in
1037 * the transaction cancel unlocking dp so don't do it explicitly in the
1038 * error path.
1039 */
1040 xfs_trans_ijoin(tp, dp, XFS_ILOCK_EXCL);
1041 unlock_dp_on_error = false;
1042
1043 error = xfs_dir_createname(tp, dp, name, ip->i_ino,
1044 resblks - XFS_IALLOC_SPACE_RES(mp));
1045 if (error) {
1046 ASSERT(error != -ENOSPC);
1047 goto out_trans_cancel;
1048 }
1049 xfs_trans_ichgtime(tp, dp, XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG);
1050 xfs_trans_log_inode(tp, dp, XFS_ILOG_CORE);
1051
1052 if (is_dir) {
1053 error = xfs_dir_init(tp, ip, dp);
1054 if (error)
1055 goto out_trans_cancel;
1056
1057 xfs_bumplink(tp, dp);
1058 }
1059
1060 /*
1061 * If this is a synchronous mount, make sure that the
1062 * create transaction goes to disk before returning to
1063 * the user.
1064 */
1065 if (xfs_has_wsync(mp) || xfs_has_dirsync(mp))
1066 xfs_trans_set_sync(tp);
1067
1068 /*
1069 * Attach the dquot(s) to the inodes and modify them incore.
1070 * These ids of the inode couldn't have changed since the new
1071 * inode has been locked ever since it was created.
1072 */
1073 xfs_qm_vop_create_dqattach(tp, ip, udqp, gdqp, pdqp);
1074
1075 error = xfs_trans_commit(tp);
1076 if (error)
1077 goto out_release_inode;
1078
1079 xfs_qm_dqrele(udqp);
1080 xfs_qm_dqrele(gdqp);
1081 xfs_qm_dqrele(pdqp);
1082
1083 *ipp = ip;
1084 return 0;
1085
1086 out_trans_cancel:
1087 xfs_trans_cancel(tp);
1088 out_release_inode:
1089 /*
1090 * Wait until after the current transaction is aborted to finish the
1091 * setup of the inode and release the inode. This prevents recursive
1092 * transactions and deadlocks from xfs_inactive.
1093 */
1094 if (ip) {
1095 xfs_finish_inode_setup(ip);
1096 xfs_irele(ip);
1097 }
1098 out_release_dquots:
1099 xfs_qm_dqrele(udqp);
1100 xfs_qm_dqrele(gdqp);
1101 xfs_qm_dqrele(pdqp);
1102
1103 if (unlock_dp_on_error)
1104 xfs_iunlock(dp, XFS_ILOCK_EXCL);
1105 return error;
1106}
1107
1108int
1109xfs_create_tmpfile(
1110 struct mnt_idmap *idmap,
1111 struct xfs_inode *dp,
1112 umode_t mode,
1113 struct xfs_inode **ipp)
1114{
1115 struct xfs_mount *mp = dp->i_mount;
1116 struct xfs_inode *ip = NULL;
1117 struct xfs_trans *tp = NULL;
1118 int error;
1119 prid_t prid;
1120 struct xfs_dquot *udqp = NULL;
1121 struct xfs_dquot *gdqp = NULL;
1122 struct xfs_dquot *pdqp = NULL;
1123 struct xfs_trans_res *tres;
1124 uint resblks;
1125 xfs_ino_t ino;
1126
1127 if (xfs_is_shutdown(mp))
1128 return -EIO;
1129
1130 prid = xfs_get_initial_prid(dp);
1131
1132 /*
1133 * Make sure that we have allocated dquot(s) on disk.
1134 */
1135 error = xfs_qm_vop_dqalloc(dp, mapped_fsuid(idmap, &init_user_ns),
1136 mapped_fsgid(idmap, &init_user_ns), prid,
1137 XFS_QMOPT_QUOTALL | XFS_QMOPT_INHERIT,
1138 &udqp, &gdqp, &pdqp);
1139 if (error)
1140 return error;
1141
1142 resblks = XFS_IALLOC_SPACE_RES(mp);
1143 tres = &M_RES(mp)->tr_create_tmpfile;
1144
1145 error = xfs_trans_alloc_icreate(mp, tres, udqp, gdqp, pdqp, resblks,
1146 &tp);
1147 if (error)
1148 goto out_release_dquots;
1149
1150 error = xfs_dialloc(&tp, dp->i_ino, mode, &ino);
1151 if (!error)
1152 error = xfs_init_new_inode(idmap, tp, dp, ino, mode,
1153 0, 0, prid, false, &ip);
1154 if (error)
1155 goto out_trans_cancel;
1156
1157 if (xfs_has_wsync(mp))
1158 xfs_trans_set_sync(tp);
1159
1160 /*
1161 * Attach the dquot(s) to the inodes and modify them incore.
1162 * These ids of the inode couldn't have changed since the new
1163 * inode has been locked ever since it was created.
1164 */
1165 xfs_qm_vop_create_dqattach(tp, ip, udqp, gdqp, pdqp);
1166
1167 error = xfs_iunlink(tp, ip);
1168 if (error)
1169 goto out_trans_cancel;
1170
1171 error = xfs_trans_commit(tp);
1172 if (error)
1173 goto out_release_inode;
1174
1175 xfs_qm_dqrele(udqp);
1176 xfs_qm_dqrele(gdqp);
1177 xfs_qm_dqrele(pdqp);
1178
1179 *ipp = ip;
1180 return 0;
1181
1182 out_trans_cancel:
1183 xfs_trans_cancel(tp);
1184 out_release_inode:
1185 /*
1186 * Wait until after the current transaction is aborted to finish the
1187 * setup of the inode and release the inode. This prevents recursive
1188 * transactions and deadlocks from xfs_inactive.
1189 */
1190 if (ip) {
1191 xfs_finish_inode_setup(ip);
1192 xfs_irele(ip);
1193 }
1194 out_release_dquots:
1195 xfs_qm_dqrele(udqp);
1196 xfs_qm_dqrele(gdqp);
1197 xfs_qm_dqrele(pdqp);
1198
1199 return error;
1200}
1201
1202int
1203xfs_link(
1204 xfs_inode_t *tdp,
1205 xfs_inode_t *sip,
1206 struct xfs_name *target_name)
1207{
1208 xfs_mount_t *mp = tdp->i_mount;
1209 xfs_trans_t *tp;
1210 int error, nospace_error = 0;
1211 int resblks;
1212
1213 trace_xfs_link(tdp, target_name);
1214
1215 ASSERT(!S_ISDIR(VFS_I(sip)->i_mode));
1216
1217 if (xfs_is_shutdown(mp))
1218 return -EIO;
1219 if (xfs_ifork_zapped(tdp, XFS_DATA_FORK))
1220 return -EIO;
1221
1222 error = xfs_qm_dqattach(sip);
1223 if (error)
1224 goto std_return;
1225
1226 error = xfs_qm_dqattach(tdp);
1227 if (error)
1228 goto std_return;
1229
1230 resblks = XFS_LINK_SPACE_RES(mp, target_name->len);
1231 error = xfs_trans_alloc_dir(tdp, &M_RES(mp)->tr_link, sip, &resblks,
1232 &tp, &nospace_error);
1233 if (error)
1234 goto std_return;
1235
1236 /*
1237 * If we are using project inheritance, we only allow hard link
1238 * creation in our tree when the project IDs are the same; else
1239 * the tree quota mechanism could be circumvented.
1240 */
1241 if (unlikely((tdp->i_diflags & XFS_DIFLAG_PROJINHERIT) &&
1242 tdp->i_projid != sip->i_projid)) {
1243 error = -EXDEV;
1244 goto error_return;
1245 }
1246
1247 if (!resblks) {
1248 error = xfs_dir_canenter(tp, tdp, target_name);
1249 if (error)
1250 goto error_return;
1251 }
1252
1253 /*
1254 * Handle initial link state of O_TMPFILE inode
1255 */
1256 if (VFS_I(sip)->i_nlink == 0) {
1257 struct xfs_perag *pag;
1258
1259 pag = xfs_perag_get(mp, XFS_INO_TO_AGNO(mp, sip->i_ino));
1260 error = xfs_iunlink_remove(tp, pag, sip);
1261 xfs_perag_put(pag);
1262 if (error)
1263 goto error_return;
1264 }
1265
1266 error = xfs_dir_createname(tp, tdp, target_name, sip->i_ino,
1267 resblks);
1268 if (error)
1269 goto error_return;
1270 xfs_trans_ichgtime(tp, tdp, XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG);
1271 xfs_trans_log_inode(tp, tdp, XFS_ILOG_CORE);
1272
1273 xfs_bumplink(tp, sip);
1274
1275 /*
1276 * If this is a synchronous mount, make sure that the
1277 * link transaction goes to disk before returning to
1278 * the user.
1279 */
1280 if (xfs_has_wsync(mp) || xfs_has_dirsync(mp))
1281 xfs_trans_set_sync(tp);
1282
1283 return xfs_trans_commit(tp);
1284
1285 error_return:
1286 xfs_trans_cancel(tp);
1287 std_return:
1288 if (error == -ENOSPC && nospace_error)
1289 error = nospace_error;
1290 return error;
1291}
1292
1293/* Clear the reflink flag and the cowblocks tag if possible. */
1294static void
1295xfs_itruncate_clear_reflink_flags(
1296 struct xfs_inode *ip)
1297{
1298 struct xfs_ifork *dfork;
1299 struct xfs_ifork *cfork;
1300
1301 if (!xfs_is_reflink_inode(ip))
1302 return;
1303 dfork = xfs_ifork_ptr(ip, XFS_DATA_FORK);
1304 cfork = xfs_ifork_ptr(ip, XFS_COW_FORK);
1305 if (dfork->if_bytes == 0 && cfork->if_bytes == 0)
1306 ip->i_diflags2 &= ~XFS_DIFLAG2_REFLINK;
1307 if (cfork->if_bytes == 0)
1308 xfs_inode_clear_cowblocks_tag(ip);
1309}
1310
1311/*
1312 * Free up the underlying blocks past new_size. The new size must be smaller
1313 * than the current size. This routine can be used both for the attribute and
1314 * data fork, and does not modify the inode size, which is left to the caller.
1315 *
1316 * The transaction passed to this routine must have made a permanent log
1317 * reservation of at least XFS_ITRUNCATE_LOG_RES. This routine may commit the
1318 * given transaction and start new ones, so make sure everything involved in
1319 * the transaction is tidy before calling here. Some transaction will be
1320 * returned to the caller to be committed. The incoming transaction must
1321 * already include the inode, and both inode locks must be held exclusively.
1322 * The inode must also be "held" within the transaction. On return the inode
1323 * will be "held" within the returned transaction. This routine does NOT
1324 * require any disk space to be reserved for it within the transaction.
1325 *
1326 * If we get an error, we must return with the inode locked and linked into the
1327 * current transaction. This keeps things simple for the higher level code,
1328 * because it always knows that the inode is locked and held in the transaction
1329 * that returns to it whether errors occur or not. We don't mark the inode
1330 * dirty on error so that transactions can be easily aborted if possible.
1331 */
1332int
1333xfs_itruncate_extents_flags(
1334 struct xfs_trans **tpp,
1335 struct xfs_inode *ip,
1336 int whichfork,
1337 xfs_fsize_t new_size,
1338 int flags)
1339{
1340 struct xfs_mount *mp = ip->i_mount;
1341 struct xfs_trans *tp = *tpp;
1342 xfs_fileoff_t first_unmap_block;
1343 int error = 0;
1344
1345 ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
1346 ASSERT(!atomic_read(&VFS_I(ip)->i_count) ||
1347 xfs_isilocked(ip, XFS_IOLOCK_EXCL));
1348 ASSERT(new_size <= XFS_ISIZE(ip));
1349 ASSERT(tp->t_flags & XFS_TRANS_PERM_LOG_RES);
1350 ASSERT(ip->i_itemp != NULL);
1351 ASSERT(ip->i_itemp->ili_lock_flags == 0);
1352 ASSERT(!XFS_NOT_DQATTACHED(mp, ip));
1353
1354 trace_xfs_itruncate_extents_start(ip, new_size);
1355
1356 flags |= xfs_bmapi_aflag(whichfork);
1357
1358 /*
1359 * Since it is possible for space to become allocated beyond
1360 * the end of the file (in a crash where the space is allocated
1361 * but the inode size is not yet updated), simply remove any
1362 * blocks which show up between the new EOF and the maximum
1363 * possible file size.
1364 *
1365 * We have to free all the blocks to the bmbt maximum offset, even if
1366 * the page cache can't scale that far.
1367 */
1368 first_unmap_block = XFS_B_TO_FSB(mp, (xfs_ufsize_t)new_size);
1369 if (!xfs_verify_fileoff(mp, first_unmap_block)) {
1370 WARN_ON_ONCE(first_unmap_block > XFS_MAX_FILEOFF);
1371 return 0;
1372 }
1373
1374 error = xfs_bunmapi_range(&tp, ip, flags, first_unmap_block,
1375 XFS_MAX_FILEOFF);
1376 if (error)
1377 goto out;
1378
1379 if (whichfork == XFS_DATA_FORK) {
1380 /* Remove all pending CoW reservations. */
1381 error = xfs_reflink_cancel_cow_blocks(ip, &tp,
1382 first_unmap_block, XFS_MAX_FILEOFF, true);
1383 if (error)
1384 goto out;
1385
1386 xfs_itruncate_clear_reflink_flags(ip);
1387 }
1388
1389 /*
1390 * Always re-log the inode so that our permanent transaction can keep
1391 * on rolling it forward in the log.
1392 */
1393 xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
1394
1395 trace_xfs_itruncate_extents_end(ip, new_size);
1396
1397out:
1398 *tpp = tp;
1399 return error;
1400}
1401
1402int
1403xfs_release(
1404 xfs_inode_t *ip)
1405{
1406 xfs_mount_t *mp = ip->i_mount;
1407 int error = 0;
1408
1409 if (!S_ISREG(VFS_I(ip)->i_mode) || (VFS_I(ip)->i_mode == 0))
1410 return 0;
1411
1412 /* If this is a read-only mount, don't do this (would generate I/O) */
1413 if (xfs_is_readonly(mp))
1414 return 0;
1415
1416 if (!xfs_is_shutdown(mp)) {
1417 int truncated;
1418
1419 /*
1420 * If we previously truncated this file and removed old data
1421 * in the process, we want to initiate "early" writeout on
1422 * the last close. This is an attempt to combat the notorious
1423 * NULL files problem which is particularly noticeable from a
1424 * truncate down, buffered (re-)write (delalloc), followed by
1425 * a crash. What we are effectively doing here is
1426 * significantly reducing the time window where we'd otherwise
1427 * be exposed to that problem.
1428 */
1429 truncated = xfs_iflags_test_and_clear(ip, XFS_ITRUNCATED);
1430 if (truncated) {
1431 xfs_iflags_clear(ip, XFS_IDIRTY_RELEASE);
1432 if (ip->i_delayed_blks > 0) {
1433 error = filemap_flush(VFS_I(ip)->i_mapping);
1434 if (error)
1435 return error;
1436 }
1437 }
1438 }
1439
1440 if (VFS_I(ip)->i_nlink == 0)
1441 return 0;
1442
1443 /*
1444 * If we can't get the iolock just skip truncating the blocks past EOF
1445 * because we could deadlock with the mmap_lock otherwise. We'll get
1446 * another chance to drop them once the last reference to the inode is
1447 * dropped, so we'll never leak blocks permanently.
1448 */
1449 if (!xfs_ilock_nowait(ip, XFS_IOLOCK_EXCL))
1450 return 0;
1451
1452 if (xfs_can_free_eofblocks(ip, false)) {
1453 /*
1454 * Check if the inode is being opened, written and closed
1455 * frequently and we have delayed allocation blocks outstanding
1456 * (e.g. streaming writes from the NFS server), truncating the
1457 * blocks past EOF will cause fragmentation to occur.
1458 *
1459 * In this case don't do the truncation, but we have to be
1460 * careful how we detect this case. Blocks beyond EOF show up as
1461 * i_delayed_blks even when the inode is clean, so we need to
1462 * truncate them away first before checking for a dirty release.
1463 * Hence on the first dirty close we will still remove the
1464 * speculative allocation, but after that we will leave it in
1465 * place.
1466 */
1467 if (xfs_iflags_test(ip, XFS_IDIRTY_RELEASE))
1468 goto out_unlock;
1469
1470 error = xfs_free_eofblocks(ip);
1471 if (error)
1472 goto out_unlock;
1473
1474 /* delalloc blocks after truncation means it really is dirty */
1475 if (ip->i_delayed_blks)
1476 xfs_iflags_set(ip, XFS_IDIRTY_RELEASE);
1477 }
1478
1479out_unlock:
1480 xfs_iunlock(ip, XFS_IOLOCK_EXCL);
1481 return error;
1482}
1483
1484/*
1485 * xfs_inactive_truncate
1486 *
1487 * Called to perform a truncate when an inode becomes unlinked.
1488 */
1489STATIC int
1490xfs_inactive_truncate(
1491 struct xfs_inode *ip)
1492{
1493 struct xfs_mount *mp = ip->i_mount;
1494 struct xfs_trans *tp;
1495 int error;
1496
1497 error = xfs_trans_alloc(mp, &M_RES(mp)->tr_itruncate, 0, 0, 0, &tp);
1498 if (error) {
1499 ASSERT(xfs_is_shutdown(mp));
1500 return error;
1501 }
1502 xfs_ilock(ip, XFS_ILOCK_EXCL);
1503 xfs_trans_ijoin(tp, ip, 0);
1504
1505 /*
1506 * Log the inode size first to prevent stale data exposure in the event
1507 * of a system crash before the truncate completes. See the related
1508 * comment in xfs_vn_setattr_size() for details.
1509 */
1510 ip->i_disk_size = 0;
1511 xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
1512
1513 error = xfs_itruncate_extents(&tp, ip, XFS_DATA_FORK, 0);
1514 if (error)
1515 goto error_trans_cancel;
1516
1517 ASSERT(ip->i_df.if_nextents == 0);
1518
1519 error = xfs_trans_commit(tp);
1520 if (error)
1521 goto error_unlock;
1522
1523 xfs_iunlock(ip, XFS_ILOCK_EXCL);
1524 return 0;
1525
1526error_trans_cancel:
1527 xfs_trans_cancel(tp);
1528error_unlock:
1529 xfs_iunlock(ip, XFS_ILOCK_EXCL);
1530 return error;
1531}
1532
1533/*
1534 * xfs_inactive_ifree()
1535 *
1536 * Perform the inode free when an inode is unlinked.
1537 */
1538STATIC int
1539xfs_inactive_ifree(
1540 struct xfs_inode *ip)
1541{
1542 struct xfs_mount *mp = ip->i_mount;
1543 struct xfs_trans *tp;
1544 int error;
1545
1546 /*
1547 * We try to use a per-AG reservation for any block needed by the finobt
1548 * tree, but as the finobt feature predates the per-AG reservation
1549 * support a degraded file system might not have enough space for the
1550 * reservation at mount time. In that case try to dip into the reserved
1551 * pool and pray.
1552 *
1553 * Send a warning if the reservation does happen to fail, as the inode
1554 * now remains allocated and sits on the unlinked list until the fs is
1555 * repaired.
1556 */
1557 if (unlikely(mp->m_finobt_nores)) {
1558 error = xfs_trans_alloc(mp, &M_RES(mp)->tr_ifree,
1559 XFS_IFREE_SPACE_RES(mp), 0, XFS_TRANS_RESERVE,
1560 &tp);
1561 } else {
1562 error = xfs_trans_alloc(mp, &M_RES(mp)->tr_ifree, 0, 0, 0, &tp);
1563 }
1564 if (error) {
1565 if (error == -ENOSPC) {
1566 xfs_warn_ratelimited(mp,
1567 "Failed to remove inode(s) from unlinked list. "
1568 "Please free space, unmount and run xfs_repair.");
1569 } else {
1570 ASSERT(xfs_is_shutdown(mp));
1571 }
1572 return error;
1573 }
1574
1575 /*
1576 * We do not hold the inode locked across the entire rolling transaction
1577 * here. We only need to hold it for the first transaction that
1578 * xfs_ifree() builds, which may mark the inode XFS_ISTALE if the
1579 * underlying cluster buffer is freed. Relogging an XFS_ISTALE inode
1580 * here breaks the relationship between cluster buffer invalidation and
1581 * stale inode invalidation on cluster buffer item journal commit
1582 * completion, and can result in leaving dirty stale inodes hanging
1583 * around in memory.
1584 *
1585 * We have no need for serialising this inode operation against other
1586 * operations - we freed the inode and hence reallocation is required
1587 * and that will serialise on reallocating the space the deferops need
1588 * to free. Hence we can unlock the inode on the first commit of
1589 * the transaction rather than roll it right through the deferops. This
1590 * avoids relogging the XFS_ISTALE inode.
1591 *
1592 * We check that xfs_ifree() hasn't grown an internal transaction roll
1593 * by asserting that the inode is still locked when it returns.
1594 */
1595 xfs_ilock(ip, XFS_ILOCK_EXCL);
1596 xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL);
1597
1598 error = xfs_ifree(tp, ip);
1599 ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
1600 if (error) {
1601 /*
1602 * If we fail to free the inode, shut down. The cancel
1603 * might do that, we need to make sure. Otherwise the
1604 * inode might be lost for a long time or forever.
1605 */
1606 if (!xfs_is_shutdown(mp)) {
1607 xfs_notice(mp, "%s: xfs_ifree returned error %d",
1608 __func__, error);
1609 xfs_force_shutdown(mp, SHUTDOWN_META_IO_ERROR);
1610 }
1611 xfs_trans_cancel(tp);
1612 return error;
1613 }
1614
1615 /*
1616 * Credit the quota account(s). The inode is gone.
1617 */
1618 xfs_trans_mod_dquot_byino(tp, ip, XFS_TRANS_DQ_ICOUNT, -1);
1619
1620 return xfs_trans_commit(tp);
1621}
1622
1623/*
1624 * Returns true if we need to update the on-disk metadata before we can free
1625 * the memory used by this inode. Updates include freeing post-eof
1626 * preallocations; freeing COW staging extents; and marking the inode free in
1627 * the inobt if it is on the unlinked list.
1628 */
1629bool
1630xfs_inode_needs_inactive(
1631 struct xfs_inode *ip)
1632{
1633 struct xfs_mount *mp = ip->i_mount;
1634 struct xfs_ifork *cow_ifp = xfs_ifork_ptr(ip, XFS_COW_FORK);
1635
1636 /*
1637 * If the inode is already free, then there can be nothing
1638 * to clean up here.
1639 */
1640 if (VFS_I(ip)->i_mode == 0)
1641 return false;
1642
1643 /*
1644 * If this is a read-only mount, don't do this (would generate I/O)
1645 * unless we're in log recovery and cleaning the iunlinked list.
1646 */
1647 if (xfs_is_readonly(mp) && !xlog_recovery_needed(mp->m_log))
1648 return false;
1649
1650 /* If the log isn't running, push inodes straight to reclaim. */
1651 if (xfs_is_shutdown(mp) || xfs_has_norecovery(mp))
1652 return false;
1653
1654 /* Metadata inodes require explicit resource cleanup. */
1655 if (xfs_is_metadata_inode(ip))
1656 return false;
1657
1658 /* Want to clean out the cow blocks if there are any. */
1659 if (cow_ifp && cow_ifp->if_bytes > 0)
1660 return true;
1661
1662 /* Unlinked files must be freed. */
1663 if (VFS_I(ip)->i_nlink == 0)
1664 return true;
1665
1666 /*
1667 * This file isn't being freed, so check if there are post-eof blocks
1668 * to free. @force is true because we are evicting an inode from the
1669 * cache. Post-eof blocks must be freed, lest we end up with broken
1670 * free space accounting.
1671 *
1672 * Note: don't bother with iolock here since lockdep complains about
1673 * acquiring it in reclaim context. We have the only reference to the
1674 * inode at this point anyways.
1675 */
1676 return xfs_can_free_eofblocks(ip, true);
1677}
1678
1679/*
1680 * xfs_inactive
1681 *
1682 * This is called when the vnode reference count for the vnode
1683 * goes to zero. If the file has been unlinked, then it must
1684 * now be truncated. Also, we clear all of the read-ahead state
1685 * kept for the inode here since the file is now closed.
1686 */
1687int
1688xfs_inactive(
1689 xfs_inode_t *ip)
1690{
1691 struct xfs_mount *mp;
1692 int error = 0;
1693 int truncate = 0;
1694
1695 /*
1696 * If the inode is already free, then there can be nothing
1697 * to clean up here.
1698 */
1699 if (VFS_I(ip)->i_mode == 0) {
1700 ASSERT(ip->i_df.if_broot_bytes == 0);
1701 goto out;
1702 }
1703
1704 mp = ip->i_mount;
1705 ASSERT(!xfs_iflags_test(ip, XFS_IRECOVERY));
1706
1707 /*
1708 * If this is a read-only mount, don't do this (would generate I/O)
1709 * unless we're in log recovery and cleaning the iunlinked list.
1710 */
1711 if (xfs_is_readonly(mp) && !xlog_recovery_needed(mp->m_log))
1712 goto out;
1713
1714 /* Metadata inodes require explicit resource cleanup. */
1715 if (xfs_is_metadata_inode(ip))
1716 goto out;
1717
1718 /* Try to clean out the cow blocks if there are any. */
1719 if (xfs_inode_has_cow_data(ip))
1720 xfs_reflink_cancel_cow_range(ip, 0, NULLFILEOFF, true);
1721
1722 if (VFS_I(ip)->i_nlink != 0) {
1723 /*
1724 * force is true because we are evicting an inode from the
1725 * cache. Post-eof blocks must be freed, lest we end up with
1726 * broken free space accounting.
1727 *
1728 * Note: don't bother with iolock here since lockdep complains
1729 * about acquiring it in reclaim context. We have the only
1730 * reference to the inode at this point anyways.
1731 */
1732 if (xfs_can_free_eofblocks(ip, true))
1733 error = xfs_free_eofblocks(ip);
1734
1735 goto out;
1736 }
1737
1738 if (S_ISREG(VFS_I(ip)->i_mode) &&
1739 (ip->i_disk_size != 0 || XFS_ISIZE(ip) != 0 ||
1740 ip->i_df.if_nextents > 0 || ip->i_delayed_blks > 0))
1741 truncate = 1;
1742
1743 if (xfs_iflags_test(ip, XFS_IQUOTAUNCHECKED)) {
1744 /*
1745 * If this inode is being inactivated during a quotacheck and
1746 * has not yet been scanned by quotacheck, we /must/ remove
1747 * the dquots from the inode before inactivation changes the
1748 * block and inode counts. Most probably this is a result of
1749 * reloading the incore iunlinked list to purge unrecovered
1750 * unlinked inodes.
1751 */
1752 xfs_qm_dqdetach(ip);
1753 } else {
1754 error = xfs_qm_dqattach(ip);
1755 if (error)
1756 goto out;
1757 }
1758
1759 if (S_ISLNK(VFS_I(ip)->i_mode))
1760 error = xfs_inactive_symlink(ip);
1761 else if (truncate)
1762 error = xfs_inactive_truncate(ip);
1763 if (error)
1764 goto out;
1765
1766 /*
1767 * If there are attributes associated with the file then blow them away
1768 * now. The code calls a routine that recursively deconstructs the
1769 * attribute fork. If also blows away the in-core attribute fork.
1770 */
1771 if (xfs_inode_has_attr_fork(ip)) {
1772 error = xfs_attr_inactive(ip);
1773 if (error)
1774 goto out;
1775 }
1776
1777 ASSERT(ip->i_forkoff == 0);
1778
1779 /*
1780 * Free the inode.
1781 */
1782 error = xfs_inactive_ifree(ip);
1783
1784out:
1785 /*
1786 * We're done making metadata updates for this inode, so we can release
1787 * the attached dquots.
1788 */
1789 xfs_qm_dqdetach(ip);
1790 return error;
1791}
1792
1793/*
1794 * In-Core Unlinked List Lookups
1795 * =============================
1796 *
1797 * Every inode is supposed to be reachable from some other piece of metadata
1798 * with the exception of the root directory. Inodes with a connection to a
1799 * file descriptor but not linked from anywhere in the on-disk directory tree
1800 * are collectively known as unlinked inodes, though the filesystem itself
1801 * maintains links to these inodes so that on-disk metadata are consistent.
1802 *
1803 * XFS implements a per-AG on-disk hash table of unlinked inodes. The AGI
1804 * header contains a number of buckets that point to an inode, and each inode
1805 * record has a pointer to the next inode in the hash chain. This
1806 * singly-linked list causes scaling problems in the iunlink remove function
1807 * because we must walk that list to find the inode that points to the inode
1808 * being removed from the unlinked hash bucket list.
1809 *
1810 * Hence we keep an in-memory double linked list to link each inode on an
1811 * unlinked list. Because there are 64 unlinked lists per AGI, keeping pointer
1812 * based lists would require having 64 list heads in the perag, one for each
1813 * list. This is expensive in terms of memory (think millions of AGs) and cache
1814 * misses on lookups. Instead, use the fact that inodes on the unlinked list
1815 * must be referenced at the VFS level to keep them on the list and hence we
1816 * have an existence guarantee for inodes on the unlinked list.
1817 *
1818 * Given we have an existence guarantee, we can use lockless inode cache lookups
1819 * to resolve aginos to xfs inodes. This means we only need 8 bytes per inode
1820 * for the double linked unlinked list, and we don't need any extra locking to
1821 * keep the list safe as all manipulations are done under the AGI buffer lock.
1822 * Keeping the list up to date does not require memory allocation, just finding
1823 * the XFS inode and updating the next/prev unlinked list aginos.
1824 */
1825
1826/*
1827 * Find an inode on the unlinked list. This does not take references to the
1828 * inode as we have existence guarantees by holding the AGI buffer lock and that
1829 * only unlinked, referenced inodes can be on the unlinked inode list. If we
1830 * don't find the inode in cache, then let the caller handle the situation.
1831 */
1832static struct xfs_inode *
1833xfs_iunlink_lookup(
1834 struct xfs_perag *pag,
1835 xfs_agino_t agino)
1836{
1837 struct xfs_inode *ip;
1838
1839 rcu_read_lock();
1840 ip = radix_tree_lookup(&pag->pag_ici_root, agino);
1841 if (!ip) {
1842 /* Caller can handle inode not being in memory. */
1843 rcu_read_unlock();
1844 return NULL;
1845 }
1846
1847 /*
1848 * Inode in RCU freeing limbo should not happen. Warn about this and
1849 * let the caller handle the failure.
1850 */
1851 if (WARN_ON_ONCE(!ip->i_ino)) {
1852 rcu_read_unlock();
1853 return NULL;
1854 }
1855 ASSERT(!xfs_iflags_test(ip, XFS_IRECLAIMABLE | XFS_IRECLAIM));
1856 rcu_read_unlock();
1857 return ip;
1858}
1859
1860/*
1861 * Update the prev pointer of the next agino. Returns -ENOLINK if the inode
1862 * is not in cache.
1863 */
1864static int
1865xfs_iunlink_update_backref(
1866 struct xfs_perag *pag,
1867 xfs_agino_t prev_agino,
1868 xfs_agino_t next_agino)
1869{
1870 struct xfs_inode *ip;
1871
1872 /* No update necessary if we are at the end of the list. */
1873 if (next_agino == NULLAGINO)
1874 return 0;
1875
1876 ip = xfs_iunlink_lookup(pag, next_agino);
1877 if (!ip)
1878 return -ENOLINK;
1879
1880 ip->i_prev_unlinked = prev_agino;
1881 return 0;
1882}
1883
1884/*
1885 * Point the AGI unlinked bucket at an inode and log the results. The caller
1886 * is responsible for validating the old value.
1887 */
1888STATIC int
1889xfs_iunlink_update_bucket(
1890 struct xfs_trans *tp,
1891 struct xfs_perag *pag,
1892 struct xfs_buf *agibp,
1893 unsigned int bucket_index,
1894 xfs_agino_t new_agino)
1895{
1896 struct xfs_agi *agi = agibp->b_addr;
1897 xfs_agino_t old_value;
1898 int offset;
1899
1900 ASSERT(xfs_verify_agino_or_null(pag, new_agino));
1901
1902 old_value = be32_to_cpu(agi->agi_unlinked[bucket_index]);
1903 trace_xfs_iunlink_update_bucket(tp->t_mountp, pag->pag_agno, bucket_index,
1904 old_value, new_agino);
1905
1906 /*
1907 * We should never find the head of the list already set to the value
1908 * passed in because either we're adding or removing ourselves from the
1909 * head of the list.
1910 */
1911 if (old_value == new_agino) {
1912 xfs_buf_mark_corrupt(agibp);
1913 return -EFSCORRUPTED;
1914 }
1915
1916 agi->agi_unlinked[bucket_index] = cpu_to_be32(new_agino);
1917 offset = offsetof(struct xfs_agi, agi_unlinked) +
1918 (sizeof(xfs_agino_t) * bucket_index);
1919 xfs_trans_log_buf(tp, agibp, offset, offset + sizeof(xfs_agino_t) - 1);
1920 return 0;
1921}
1922
1923/*
1924 * Load the inode @next_agino into the cache and set its prev_unlinked pointer
1925 * to @prev_agino. Caller must hold the AGI to synchronize with other changes
1926 * to the unlinked list.
1927 */
1928STATIC int
1929xfs_iunlink_reload_next(
1930 struct xfs_trans *tp,
1931 struct xfs_buf *agibp,
1932 xfs_agino_t prev_agino,
1933 xfs_agino_t next_agino)
1934{
1935 struct xfs_perag *pag = agibp->b_pag;
1936 struct xfs_mount *mp = pag->pag_mount;
1937 struct xfs_inode *next_ip = NULL;
1938 xfs_ino_t ino;
1939 int error;
1940
1941 ASSERT(next_agino != NULLAGINO);
1942
1943#ifdef DEBUG
1944 rcu_read_lock();
1945 next_ip = radix_tree_lookup(&pag->pag_ici_root, next_agino);
1946 ASSERT(next_ip == NULL);
1947 rcu_read_unlock();
1948#endif
1949
1950 xfs_info_ratelimited(mp,
1951 "Found unrecovered unlinked inode 0x%x in AG 0x%x. Initiating recovery.",
1952 next_agino, pag->pag_agno);
1953
1954 /*
1955 * Use an untrusted lookup just to be cautious in case the AGI has been
1956 * corrupted and now points at a free inode. That shouldn't happen,
1957 * but we'd rather shut down now since we're already running in a weird
1958 * situation.
1959 */
1960 ino = XFS_AGINO_TO_INO(mp, pag->pag_agno, next_agino);
1961 error = xfs_iget(mp, tp, ino, XFS_IGET_UNTRUSTED, 0, &next_ip);
1962 if (error)
1963 return error;
1964
1965 /* If this is not an unlinked inode, something is very wrong. */
1966 if (VFS_I(next_ip)->i_nlink != 0) {
1967 error = -EFSCORRUPTED;
1968 goto rele;
1969 }
1970
1971 next_ip->i_prev_unlinked = prev_agino;
1972 trace_xfs_iunlink_reload_next(next_ip);
1973rele:
1974 ASSERT(!(VFS_I(next_ip)->i_state & I_DONTCACHE));
1975 if (xfs_is_quotacheck_running(mp) && next_ip)
1976 xfs_iflags_set(next_ip, XFS_IQUOTAUNCHECKED);
1977 xfs_irele(next_ip);
1978 return error;
1979}
1980
1981static int
1982xfs_iunlink_insert_inode(
1983 struct xfs_trans *tp,
1984 struct xfs_perag *pag,
1985 struct xfs_buf *agibp,
1986 struct xfs_inode *ip)
1987{
1988 struct xfs_mount *mp = tp->t_mountp;
1989 struct xfs_agi *agi = agibp->b_addr;
1990 xfs_agino_t next_agino;
1991 xfs_agino_t agino = XFS_INO_TO_AGINO(mp, ip->i_ino);
1992 short bucket_index = agino % XFS_AGI_UNLINKED_BUCKETS;
1993 int error;
1994
1995 /*
1996 * Get the index into the agi hash table for the list this inode will
1997 * go on. Make sure the pointer isn't garbage and that this inode
1998 * isn't already on the list.
1999 */
2000 next_agino = be32_to_cpu(agi->agi_unlinked[bucket_index]);
2001 if (next_agino == agino ||
2002 !xfs_verify_agino_or_null(pag, next_agino)) {
2003 xfs_buf_mark_corrupt(agibp);
2004 return -EFSCORRUPTED;
2005 }
2006
2007 /*
2008 * Update the prev pointer in the next inode to point back to this
2009 * inode.
2010 */
2011 error = xfs_iunlink_update_backref(pag, agino, next_agino);
2012 if (error == -ENOLINK)
2013 error = xfs_iunlink_reload_next(tp, agibp, agino, next_agino);
2014 if (error)
2015 return error;
2016
2017 if (next_agino != NULLAGINO) {
2018 /*
2019 * There is already another inode in the bucket, so point this
2020 * inode to the current head of the list.
2021 */
2022 error = xfs_iunlink_log_inode(tp, ip, pag, next_agino);
2023 if (error)
2024 return error;
2025 ip->i_next_unlinked = next_agino;
2026 }
2027
2028 /* Point the head of the list to point to this inode. */
2029 ip->i_prev_unlinked = NULLAGINO;
2030 return xfs_iunlink_update_bucket(tp, pag, agibp, bucket_index, agino);
2031}
2032
2033/*
2034 * This is called when the inode's link count has gone to 0 or we are creating
2035 * a tmpfile via O_TMPFILE. The inode @ip must have nlink == 0.
2036 *
2037 * We place the on-disk inode on a list in the AGI. It will be pulled from this
2038 * list when the inode is freed.
2039 */
2040STATIC int
2041xfs_iunlink(
2042 struct xfs_trans *tp,
2043 struct xfs_inode *ip)
2044{
2045 struct xfs_mount *mp = tp->t_mountp;
2046 struct xfs_perag *pag;
2047 struct xfs_buf *agibp;
2048 int error;
2049
2050 ASSERT(VFS_I(ip)->i_nlink == 0);
2051 ASSERT(VFS_I(ip)->i_mode != 0);
2052 trace_xfs_iunlink(ip);
2053
2054 pag = xfs_perag_get(mp, XFS_INO_TO_AGNO(mp, ip->i_ino));
2055
2056 /* Get the agi buffer first. It ensures lock ordering on the list. */
2057 error = xfs_read_agi(pag, tp, &agibp);
2058 if (error)
2059 goto out;
2060
2061 error = xfs_iunlink_insert_inode(tp, pag, agibp, ip);
2062out:
2063 xfs_perag_put(pag);
2064 return error;
2065}
2066
2067static int
2068xfs_iunlink_remove_inode(
2069 struct xfs_trans *tp,
2070 struct xfs_perag *pag,
2071 struct xfs_buf *agibp,
2072 struct xfs_inode *ip)
2073{
2074 struct xfs_mount *mp = tp->t_mountp;
2075 struct xfs_agi *agi = agibp->b_addr;
2076 xfs_agino_t agino = XFS_INO_TO_AGINO(mp, ip->i_ino);
2077 xfs_agino_t head_agino;
2078 short bucket_index = agino % XFS_AGI_UNLINKED_BUCKETS;
2079 int error;
2080
2081 trace_xfs_iunlink_remove(ip);
2082
2083 /*
2084 * Get the index into the agi hash table for the list this inode will
2085 * go on. Make sure the head pointer isn't garbage.
2086 */
2087 head_agino = be32_to_cpu(agi->agi_unlinked[bucket_index]);
2088 if (!xfs_verify_agino(pag, head_agino)) {
2089 XFS_CORRUPTION_ERROR(__func__, XFS_ERRLEVEL_LOW, mp,
2090 agi, sizeof(*agi));
2091 return -EFSCORRUPTED;
2092 }
2093
2094 /*
2095 * Set our inode's next_unlinked pointer to NULL and then return
2096 * the old pointer value so that we can update whatever was previous
2097 * to us in the list to point to whatever was next in the list.
2098 */
2099 error = xfs_iunlink_log_inode(tp, ip, pag, NULLAGINO);
2100 if (error)
2101 return error;
2102
2103 /*
2104 * Update the prev pointer in the next inode to point back to previous
2105 * inode in the chain.
2106 */
2107 error = xfs_iunlink_update_backref(pag, ip->i_prev_unlinked,
2108 ip->i_next_unlinked);
2109 if (error == -ENOLINK)
2110 error = xfs_iunlink_reload_next(tp, agibp, ip->i_prev_unlinked,
2111 ip->i_next_unlinked);
2112 if (error)
2113 return error;
2114
2115 if (head_agino != agino) {
2116 struct xfs_inode *prev_ip;
2117
2118 prev_ip = xfs_iunlink_lookup(pag, ip->i_prev_unlinked);
2119 if (!prev_ip)
2120 return -EFSCORRUPTED;
2121
2122 error = xfs_iunlink_log_inode(tp, prev_ip, pag,
2123 ip->i_next_unlinked);
2124 prev_ip->i_next_unlinked = ip->i_next_unlinked;
2125 } else {
2126 /* Point the head of the list to the next unlinked inode. */
2127 error = xfs_iunlink_update_bucket(tp, pag, agibp, bucket_index,
2128 ip->i_next_unlinked);
2129 }
2130
2131 ip->i_next_unlinked = NULLAGINO;
2132 ip->i_prev_unlinked = 0;
2133 return error;
2134}
2135
2136/*
2137 * Pull the on-disk inode from the AGI unlinked list.
2138 */
2139STATIC int
2140xfs_iunlink_remove(
2141 struct xfs_trans *tp,
2142 struct xfs_perag *pag,
2143 struct xfs_inode *ip)
2144{
2145 struct xfs_buf *agibp;
2146 int error;
2147
2148 trace_xfs_iunlink_remove(ip);
2149
2150 /* Get the agi buffer first. It ensures lock ordering on the list. */
2151 error = xfs_read_agi(pag, tp, &agibp);
2152 if (error)
2153 return error;
2154
2155 return xfs_iunlink_remove_inode(tp, pag, agibp, ip);
2156}
2157
2158/*
2159 * Look up the inode number specified and if it is not already marked XFS_ISTALE
2160 * mark it stale. We should only find clean inodes in this lookup that aren't
2161 * already stale.
2162 */
2163static void
2164xfs_ifree_mark_inode_stale(
2165 struct xfs_perag *pag,
2166 struct xfs_inode *free_ip,
2167 xfs_ino_t inum)
2168{
2169 struct xfs_mount *mp = pag->pag_mount;
2170 struct xfs_inode_log_item *iip;
2171 struct xfs_inode *ip;
2172
2173retry:
2174 rcu_read_lock();
2175 ip = radix_tree_lookup(&pag->pag_ici_root, XFS_INO_TO_AGINO(mp, inum));
2176
2177 /* Inode not in memory, nothing to do */
2178 if (!ip) {
2179 rcu_read_unlock();
2180 return;
2181 }
2182
2183 /*
2184 * because this is an RCU protected lookup, we could find a recently
2185 * freed or even reallocated inode during the lookup. We need to check
2186 * under the i_flags_lock for a valid inode here. Skip it if it is not
2187 * valid, the wrong inode or stale.
2188 */
2189 spin_lock(&ip->i_flags_lock);
2190 if (ip->i_ino != inum || __xfs_iflags_test(ip, XFS_ISTALE))
2191 goto out_iflags_unlock;
2192
2193 /*
2194 * Don't try to lock/unlock the current inode, but we _cannot_ skip the
2195 * other inodes that we did not find in the list attached to the buffer
2196 * and are not already marked stale. If we can't lock it, back off and
2197 * retry.
2198 */
2199 if (ip != free_ip) {
2200 if (!xfs_ilock_nowait(ip, XFS_ILOCK_EXCL)) {
2201 spin_unlock(&ip->i_flags_lock);
2202 rcu_read_unlock();
2203 delay(1);
2204 goto retry;
2205 }
2206 }
2207 ip->i_flags |= XFS_ISTALE;
2208
2209 /*
2210 * If the inode is flushing, it is already attached to the buffer. All
2211 * we needed to do here is mark the inode stale so buffer IO completion
2212 * will remove it from the AIL.
2213 */
2214 iip = ip->i_itemp;
2215 if (__xfs_iflags_test(ip, XFS_IFLUSHING)) {
2216 ASSERT(!list_empty(&iip->ili_item.li_bio_list));
2217 ASSERT(iip->ili_last_fields);
2218 goto out_iunlock;
2219 }
2220
2221 /*
2222 * Inodes not attached to the buffer can be released immediately.
2223 * Everything else has to go through xfs_iflush_abort() on journal
2224 * commit as the flock synchronises removal of the inode from the
2225 * cluster buffer against inode reclaim.
2226 */
2227 if (!iip || list_empty(&iip->ili_item.li_bio_list))
2228 goto out_iunlock;
2229
2230 __xfs_iflags_set(ip, XFS_IFLUSHING);
2231 spin_unlock(&ip->i_flags_lock);
2232 rcu_read_unlock();
2233
2234 /* we have a dirty inode in memory that has not yet been flushed. */
2235 spin_lock(&iip->ili_lock);
2236 iip->ili_last_fields = iip->ili_fields;
2237 iip->ili_fields = 0;
2238 iip->ili_fsync_fields = 0;
2239 spin_unlock(&iip->ili_lock);
2240 ASSERT(iip->ili_last_fields);
2241
2242 if (ip != free_ip)
2243 xfs_iunlock(ip, XFS_ILOCK_EXCL);
2244 return;
2245
2246out_iunlock:
2247 if (ip != free_ip)
2248 xfs_iunlock(ip, XFS_ILOCK_EXCL);
2249out_iflags_unlock:
2250 spin_unlock(&ip->i_flags_lock);
2251 rcu_read_unlock();
2252}
2253
2254/*
2255 * A big issue when freeing the inode cluster is that we _cannot_ skip any
2256 * inodes that are in memory - they all must be marked stale and attached to
2257 * the cluster buffer.
2258 */
2259static int
2260xfs_ifree_cluster(
2261 struct xfs_trans *tp,
2262 struct xfs_perag *pag,
2263 struct xfs_inode *free_ip,
2264 struct xfs_icluster *xic)
2265{
2266 struct xfs_mount *mp = free_ip->i_mount;
2267 struct xfs_ino_geometry *igeo = M_IGEO(mp);
2268 struct xfs_buf *bp;
2269 xfs_daddr_t blkno;
2270 xfs_ino_t inum = xic->first_ino;
2271 int nbufs;
2272 int i, j;
2273 int ioffset;
2274 int error;
2275
2276 nbufs = igeo->ialloc_blks / igeo->blocks_per_cluster;
2277
2278 for (j = 0; j < nbufs; j++, inum += igeo->inodes_per_cluster) {
2279 /*
2280 * The allocation bitmap tells us which inodes of the chunk were
2281 * physically allocated. Skip the cluster if an inode falls into
2282 * a sparse region.
2283 */
2284 ioffset = inum - xic->first_ino;
2285 if ((xic->alloc & XFS_INOBT_MASK(ioffset)) == 0) {
2286 ASSERT(ioffset % igeo->inodes_per_cluster == 0);
2287 continue;
2288 }
2289
2290 blkno = XFS_AGB_TO_DADDR(mp, XFS_INO_TO_AGNO(mp, inum),
2291 XFS_INO_TO_AGBNO(mp, inum));
2292
2293 /*
2294 * We obtain and lock the backing buffer first in the process
2295 * here to ensure dirty inodes attached to the buffer remain in
2296 * the flushing state while we mark them stale.
2297 *
2298 * If we scan the in-memory inodes first, then buffer IO can
2299 * complete before we get a lock on it, and hence we may fail
2300 * to mark all the active inodes on the buffer stale.
2301 */
2302 error = xfs_trans_get_buf(tp, mp->m_ddev_targp, blkno,
2303 mp->m_bsize * igeo->blocks_per_cluster,
2304 XBF_UNMAPPED, &bp);
2305 if (error)
2306 return error;
2307
2308 /*
2309 * This buffer may not have been correctly initialised as we
2310 * didn't read it from disk. That's not important because we are
2311 * only using to mark the buffer as stale in the log, and to
2312 * attach stale cached inodes on it. That means it will never be
2313 * dispatched for IO. If it is, we want to know about it, and we
2314 * want it to fail. We can acheive this by adding a write
2315 * verifier to the buffer.
2316 */
2317 bp->b_ops = &xfs_inode_buf_ops;
2318
2319 /*
2320 * Now we need to set all the cached clean inodes as XFS_ISTALE,
2321 * too. This requires lookups, and will skip inodes that we've
2322 * already marked XFS_ISTALE.
2323 */
2324 for (i = 0; i < igeo->inodes_per_cluster; i++)
2325 xfs_ifree_mark_inode_stale(pag, free_ip, inum + i);
2326
2327 xfs_trans_stale_inode_buf(tp, bp);
2328 xfs_trans_binval(tp, bp);
2329 }
2330 return 0;
2331}
2332
2333/*
2334 * This is called to return an inode to the inode free list. The inode should
2335 * already be truncated to 0 length and have no pages associated with it. This
2336 * routine also assumes that the inode is already a part of the transaction.
2337 *
2338 * The on-disk copy of the inode will have been added to the list of unlinked
2339 * inodes in the AGI. We need to remove the inode from that list atomically with
2340 * respect to freeing it here.
2341 */
2342int
2343xfs_ifree(
2344 struct xfs_trans *tp,
2345 struct xfs_inode *ip)
2346{
2347 struct xfs_mount *mp = ip->i_mount;
2348 struct xfs_perag *pag;
2349 struct xfs_icluster xic = { 0 };
2350 struct xfs_inode_log_item *iip = ip->i_itemp;
2351 int error;
2352
2353 ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
2354 ASSERT(VFS_I(ip)->i_nlink == 0);
2355 ASSERT(ip->i_df.if_nextents == 0);
2356 ASSERT(ip->i_disk_size == 0 || !S_ISREG(VFS_I(ip)->i_mode));
2357 ASSERT(ip->i_nblocks == 0);
2358
2359 pag = xfs_perag_get(mp, XFS_INO_TO_AGNO(mp, ip->i_ino));
2360
2361 /*
2362 * Free the inode first so that we guarantee that the AGI lock is going
2363 * to be taken before we remove the inode from the unlinked list. This
2364 * makes the AGI lock -> unlinked list modification order the same as
2365 * used in O_TMPFILE creation.
2366 */
2367 error = xfs_difree(tp, pag, ip->i_ino, &xic);
2368 if (error)
2369 goto out;
2370
2371 error = xfs_iunlink_remove(tp, pag, ip);
2372 if (error)
2373 goto out;
2374
2375 /*
2376 * Free any local-format data sitting around before we reset the
2377 * data fork to extents format. Note that the attr fork data has
2378 * already been freed by xfs_attr_inactive.
2379 */
2380 if (ip->i_df.if_format == XFS_DINODE_FMT_LOCAL) {
2381 kmem_free(ip->i_df.if_data);
2382 ip->i_df.if_data = NULL;
2383 ip->i_df.if_bytes = 0;
2384 }
2385
2386 VFS_I(ip)->i_mode = 0; /* mark incore inode as free */
2387 ip->i_diflags = 0;
2388 ip->i_diflags2 = mp->m_ino_geo.new_diflags2;
2389 ip->i_forkoff = 0; /* mark the attr fork not in use */
2390 ip->i_df.if_format = XFS_DINODE_FMT_EXTENTS;
2391 if (xfs_iflags_test(ip, XFS_IPRESERVE_DM_FIELDS))
2392 xfs_iflags_clear(ip, XFS_IPRESERVE_DM_FIELDS);
2393
2394 /* Don't attempt to replay owner changes for a deleted inode */
2395 spin_lock(&iip->ili_lock);
2396 iip->ili_fields &= ~(XFS_ILOG_AOWNER | XFS_ILOG_DOWNER);
2397 spin_unlock(&iip->ili_lock);
2398
2399 /*
2400 * Bump the generation count so no one will be confused
2401 * by reincarnations of this inode.
2402 */
2403 VFS_I(ip)->i_generation++;
2404 xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
2405
2406 if (xic.deleted)
2407 error = xfs_ifree_cluster(tp, pag, ip, &xic);
2408out:
2409 xfs_perag_put(pag);
2410 return error;
2411}
2412
2413/*
2414 * This is called to unpin an inode. The caller must have the inode locked
2415 * in at least shared mode so that the buffer cannot be subsequently pinned
2416 * once someone is waiting for it to be unpinned.
2417 */
2418static void
2419xfs_iunpin(
2420 struct xfs_inode *ip)
2421{
2422 ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL|XFS_ILOCK_SHARED));
2423
2424 trace_xfs_inode_unpin_nowait(ip, _RET_IP_);
2425
2426 /* Give the log a push to start the unpinning I/O */
2427 xfs_log_force_seq(ip->i_mount, ip->i_itemp->ili_commit_seq, 0, NULL);
2428
2429}
2430
2431static void
2432__xfs_iunpin_wait(
2433 struct xfs_inode *ip)
2434{
2435 wait_queue_head_t *wq = bit_waitqueue(&ip->i_flags, __XFS_IPINNED_BIT);
2436 DEFINE_WAIT_BIT(wait, &ip->i_flags, __XFS_IPINNED_BIT);
2437
2438 xfs_iunpin(ip);
2439
2440 do {
2441 prepare_to_wait(wq, &wait.wq_entry, TASK_UNINTERRUPTIBLE);
2442 if (xfs_ipincount(ip))
2443 io_schedule();
2444 } while (xfs_ipincount(ip));
2445 finish_wait(wq, &wait.wq_entry);
2446}
2447
2448void
2449xfs_iunpin_wait(
2450 struct xfs_inode *ip)
2451{
2452 if (xfs_ipincount(ip))
2453 __xfs_iunpin_wait(ip);
2454}
2455
2456/*
2457 * Removing an inode from the namespace involves removing the directory entry
2458 * and dropping the link count on the inode. Removing the directory entry can
2459 * result in locking an AGF (directory blocks were freed) and removing a link
2460 * count can result in placing the inode on an unlinked list which results in
2461 * locking an AGI.
2462 *
2463 * The big problem here is that we have an ordering constraint on AGF and AGI
2464 * locking - inode allocation locks the AGI, then can allocate a new extent for
2465 * new inodes, locking the AGF after the AGI. Similarly, freeing the inode
2466 * removes the inode from the unlinked list, requiring that we lock the AGI
2467 * first, and then freeing the inode can result in an inode chunk being freed
2468 * and hence freeing disk space requiring that we lock an AGF.
2469 *
2470 * Hence the ordering that is imposed by other parts of the code is AGI before
2471 * AGF. This means we cannot remove the directory entry before we drop the inode
2472 * reference count and put it on the unlinked list as this results in a lock
2473 * order of AGF then AGI, and this can deadlock against inode allocation and
2474 * freeing. Therefore we must drop the link counts before we remove the
2475 * directory entry.
2476 *
2477 * This is still safe from a transactional point of view - it is not until we
2478 * get to xfs_defer_finish() that we have the possibility of multiple
2479 * transactions in this operation. Hence as long as we remove the directory
2480 * entry and drop the link count in the first transaction of the remove
2481 * operation, there are no transactional constraints on the ordering here.
2482 */
2483int
2484xfs_remove(
2485 xfs_inode_t *dp,
2486 struct xfs_name *name,
2487 xfs_inode_t *ip)
2488{
2489 xfs_mount_t *mp = dp->i_mount;
2490 xfs_trans_t *tp = NULL;
2491 int is_dir = S_ISDIR(VFS_I(ip)->i_mode);
2492 int dontcare;
2493 int error = 0;
2494 uint resblks;
2495
2496 trace_xfs_remove(dp, name);
2497
2498 if (xfs_is_shutdown(mp))
2499 return -EIO;
2500 if (xfs_ifork_zapped(dp, XFS_DATA_FORK))
2501 return -EIO;
2502
2503 error = xfs_qm_dqattach(dp);
2504 if (error)
2505 goto std_return;
2506
2507 error = xfs_qm_dqattach(ip);
2508 if (error)
2509 goto std_return;
2510
2511 /*
2512 * We try to get the real space reservation first, allowing for
2513 * directory btree deletion(s) implying possible bmap insert(s). If we
2514 * can't get the space reservation then we use 0 instead, and avoid the
2515 * bmap btree insert(s) in the directory code by, if the bmap insert
2516 * tries to happen, instead trimming the LAST block from the directory.
2517 *
2518 * Ignore EDQUOT and ENOSPC being returned via nospace_error because
2519 * the directory code can handle a reservationless update and we don't
2520 * want to prevent a user from trying to free space by deleting things.
2521 */
2522 resblks = XFS_REMOVE_SPACE_RES(mp);
2523 error = xfs_trans_alloc_dir(dp, &M_RES(mp)->tr_remove, ip, &resblks,
2524 &tp, &dontcare);
2525 if (error) {
2526 ASSERT(error != -ENOSPC);
2527 goto std_return;
2528 }
2529
2530 /*
2531 * If we're removing a directory perform some additional validation.
2532 */
2533 if (is_dir) {
2534 ASSERT(VFS_I(ip)->i_nlink >= 2);
2535 if (VFS_I(ip)->i_nlink != 2) {
2536 error = -ENOTEMPTY;
2537 goto out_trans_cancel;
2538 }
2539 if (!xfs_dir_isempty(ip)) {
2540 error = -ENOTEMPTY;
2541 goto out_trans_cancel;
2542 }
2543
2544 /* Drop the link from ip's "..". */
2545 error = xfs_droplink(tp, dp);
2546 if (error)
2547 goto out_trans_cancel;
2548
2549 /* Drop the "." link from ip to self. */
2550 error = xfs_droplink(tp, ip);
2551 if (error)
2552 goto out_trans_cancel;
2553
2554 /*
2555 * Point the unlinked child directory's ".." entry to the root
2556 * directory to eliminate back-references to inodes that may
2557 * get freed before the child directory is closed. If the fs
2558 * gets shrunk, this can lead to dirent inode validation errors.
2559 */
2560 if (dp->i_ino != tp->t_mountp->m_sb.sb_rootino) {
2561 error = xfs_dir_replace(tp, ip, &xfs_name_dotdot,
2562 tp->t_mountp->m_sb.sb_rootino, 0);
2563 if (error)
2564 goto out_trans_cancel;
2565 }
2566 } else {
2567 /*
2568 * When removing a non-directory we need to log the parent
2569 * inode here. For a directory this is done implicitly
2570 * by the xfs_droplink call for the ".." entry.
2571 */
2572 xfs_trans_log_inode(tp, dp, XFS_ILOG_CORE);
2573 }
2574 xfs_trans_ichgtime(tp, dp, XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG);
2575
2576 /* Drop the link from dp to ip. */
2577 error = xfs_droplink(tp, ip);
2578 if (error)
2579 goto out_trans_cancel;
2580
2581 error = xfs_dir_removename(tp, dp, name, ip->i_ino, resblks);
2582 if (error) {
2583 ASSERT(error != -ENOENT);
2584 goto out_trans_cancel;
2585 }
2586
2587 /*
2588 * If this is a synchronous mount, make sure that the
2589 * remove transaction goes to disk before returning to
2590 * the user.
2591 */
2592 if (xfs_has_wsync(mp) || xfs_has_dirsync(mp))
2593 xfs_trans_set_sync(tp);
2594
2595 error = xfs_trans_commit(tp);
2596 if (error)
2597 goto std_return;
2598
2599 if (is_dir && xfs_inode_is_filestream(ip))
2600 xfs_filestream_deassociate(ip);
2601
2602 return 0;
2603
2604 out_trans_cancel:
2605 xfs_trans_cancel(tp);
2606 std_return:
2607 return error;
2608}
2609
2610/*
2611 * Enter all inodes for a rename transaction into a sorted array.
2612 */
2613#define __XFS_SORT_INODES 5
2614STATIC void
2615xfs_sort_for_rename(
2616 struct xfs_inode *dp1, /* in: old (source) directory inode */
2617 struct xfs_inode *dp2, /* in: new (target) directory inode */
2618 struct xfs_inode *ip1, /* in: inode of old entry */
2619 struct xfs_inode *ip2, /* in: inode of new entry */
2620 struct xfs_inode *wip, /* in: whiteout inode */
2621 struct xfs_inode **i_tab,/* out: sorted array of inodes */
2622 int *num_inodes) /* in/out: inodes in array */
2623{
2624 int i, j;
2625
2626 ASSERT(*num_inodes == __XFS_SORT_INODES);
2627 memset(i_tab, 0, *num_inodes * sizeof(struct xfs_inode *));
2628
2629 /*
2630 * i_tab contains a list of pointers to inodes. We initialize
2631 * the table here & we'll sort it. We will then use it to
2632 * order the acquisition of the inode locks.
2633 *
2634 * Note that the table may contain duplicates. e.g., dp1 == dp2.
2635 */
2636 i = 0;
2637 i_tab[i++] = dp1;
2638 i_tab[i++] = dp2;
2639 i_tab[i++] = ip1;
2640 if (ip2)
2641 i_tab[i++] = ip2;
2642 if (wip)
2643 i_tab[i++] = wip;
2644 *num_inodes = i;
2645
2646 /*
2647 * Sort the elements via bubble sort. (Remember, there are at
2648 * most 5 elements to sort, so this is adequate.)
2649 */
2650 for (i = 0; i < *num_inodes; i++) {
2651 for (j = 1; j < *num_inodes; j++) {
2652 if (i_tab[j]->i_ino < i_tab[j-1]->i_ino) {
2653 struct xfs_inode *temp = i_tab[j];
2654 i_tab[j] = i_tab[j-1];
2655 i_tab[j-1] = temp;
2656 }
2657 }
2658 }
2659}
2660
2661static int
2662xfs_finish_rename(
2663 struct xfs_trans *tp)
2664{
2665 /*
2666 * If this is a synchronous mount, make sure that the rename transaction
2667 * goes to disk before returning to the user.
2668 */
2669 if (xfs_has_wsync(tp->t_mountp) || xfs_has_dirsync(tp->t_mountp))
2670 xfs_trans_set_sync(tp);
2671
2672 return xfs_trans_commit(tp);
2673}
2674
2675/*
2676 * xfs_cross_rename()
2677 *
2678 * responsible for handling RENAME_EXCHANGE flag in renameat2() syscall
2679 */
2680STATIC int
2681xfs_cross_rename(
2682 struct xfs_trans *tp,
2683 struct xfs_inode *dp1,
2684 struct xfs_name *name1,
2685 struct xfs_inode *ip1,
2686 struct xfs_inode *dp2,
2687 struct xfs_name *name2,
2688 struct xfs_inode *ip2,
2689 int spaceres)
2690{
2691 int error = 0;
2692 int ip1_flags = 0;
2693 int ip2_flags = 0;
2694 int dp2_flags = 0;
2695
2696 /* Swap inode number for dirent in first parent */
2697 error = xfs_dir_replace(tp, dp1, name1, ip2->i_ino, spaceres);
2698 if (error)
2699 goto out_trans_abort;
2700
2701 /* Swap inode number for dirent in second parent */
2702 error = xfs_dir_replace(tp, dp2, name2, ip1->i_ino, spaceres);
2703 if (error)
2704 goto out_trans_abort;
2705
2706 /*
2707 * If we're renaming one or more directories across different parents,
2708 * update the respective ".." entries (and link counts) to match the new
2709 * parents.
2710 */
2711 if (dp1 != dp2) {
2712 dp2_flags = XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG;
2713
2714 if (S_ISDIR(VFS_I(ip2)->i_mode)) {
2715 error = xfs_dir_replace(tp, ip2, &xfs_name_dotdot,
2716 dp1->i_ino, spaceres);
2717 if (error)
2718 goto out_trans_abort;
2719
2720 /* transfer ip2 ".." reference to dp1 */
2721 if (!S_ISDIR(VFS_I(ip1)->i_mode)) {
2722 error = xfs_droplink(tp, dp2);
2723 if (error)
2724 goto out_trans_abort;
2725 xfs_bumplink(tp, dp1);
2726 }
2727
2728 /*
2729 * Although ip1 isn't changed here, userspace needs
2730 * to be warned about the change, so that applications
2731 * relying on it (like backup ones), will properly
2732 * notify the change
2733 */
2734 ip1_flags |= XFS_ICHGTIME_CHG;
2735 ip2_flags |= XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG;
2736 }
2737
2738 if (S_ISDIR(VFS_I(ip1)->i_mode)) {
2739 error = xfs_dir_replace(tp, ip1, &xfs_name_dotdot,
2740 dp2->i_ino, spaceres);
2741 if (error)
2742 goto out_trans_abort;
2743
2744 /* transfer ip1 ".." reference to dp2 */
2745 if (!S_ISDIR(VFS_I(ip2)->i_mode)) {
2746 error = xfs_droplink(tp, dp1);
2747 if (error)
2748 goto out_trans_abort;
2749 xfs_bumplink(tp, dp2);
2750 }
2751
2752 /*
2753 * Although ip2 isn't changed here, userspace needs
2754 * to be warned about the change, so that applications
2755 * relying on it (like backup ones), will properly
2756 * notify the change
2757 */
2758 ip1_flags |= XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG;
2759 ip2_flags |= XFS_ICHGTIME_CHG;
2760 }
2761 }
2762
2763 if (ip1_flags) {
2764 xfs_trans_ichgtime(tp, ip1, ip1_flags);
2765 xfs_trans_log_inode(tp, ip1, XFS_ILOG_CORE);
2766 }
2767 if (ip2_flags) {
2768 xfs_trans_ichgtime(tp, ip2, ip2_flags);
2769 xfs_trans_log_inode(tp, ip2, XFS_ILOG_CORE);
2770 }
2771 if (dp2_flags) {
2772 xfs_trans_ichgtime(tp, dp2, dp2_flags);
2773 xfs_trans_log_inode(tp, dp2, XFS_ILOG_CORE);
2774 }
2775 xfs_trans_ichgtime(tp, dp1, XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG);
2776 xfs_trans_log_inode(tp, dp1, XFS_ILOG_CORE);
2777 return xfs_finish_rename(tp);
2778
2779out_trans_abort:
2780 xfs_trans_cancel(tp);
2781 return error;
2782}
2783
2784/*
2785 * xfs_rename_alloc_whiteout()
2786 *
2787 * Return a referenced, unlinked, unlocked inode that can be used as a
2788 * whiteout in a rename transaction. We use a tmpfile inode here so that if we
2789 * crash between allocating the inode and linking it into the rename transaction
2790 * recovery will free the inode and we won't leak it.
2791 */
2792static int
2793xfs_rename_alloc_whiteout(
2794 struct mnt_idmap *idmap,
2795 struct xfs_name *src_name,
2796 struct xfs_inode *dp,
2797 struct xfs_inode **wip)
2798{
2799 struct xfs_inode *tmpfile;
2800 struct qstr name;
2801 int error;
2802
2803 error = xfs_create_tmpfile(idmap, dp, S_IFCHR | WHITEOUT_MODE,
2804 &tmpfile);
2805 if (error)
2806 return error;
2807
2808 name.name = src_name->name;
2809 name.len = src_name->len;
2810 error = xfs_inode_init_security(VFS_I(tmpfile), VFS_I(dp), &name);
2811 if (error) {
2812 xfs_finish_inode_setup(tmpfile);
2813 xfs_irele(tmpfile);
2814 return error;
2815 }
2816
2817 /*
2818 * Prepare the tmpfile inode as if it were created through the VFS.
2819 * Complete the inode setup and flag it as linkable. nlink is already
2820 * zero, so we can skip the drop_nlink.
2821 */
2822 xfs_setup_iops(tmpfile);
2823 xfs_finish_inode_setup(tmpfile);
2824 VFS_I(tmpfile)->i_state |= I_LINKABLE;
2825
2826 *wip = tmpfile;
2827 return 0;
2828}
2829
2830/*
2831 * xfs_rename
2832 */
2833int
2834xfs_rename(
2835 struct mnt_idmap *idmap,
2836 struct xfs_inode *src_dp,
2837 struct xfs_name *src_name,
2838 struct xfs_inode *src_ip,
2839 struct xfs_inode *target_dp,
2840 struct xfs_name *target_name,
2841 struct xfs_inode *target_ip,
2842 unsigned int flags)
2843{
2844 struct xfs_mount *mp = src_dp->i_mount;
2845 struct xfs_trans *tp;
2846 struct xfs_inode *wip = NULL; /* whiteout inode */
2847 struct xfs_inode *inodes[__XFS_SORT_INODES];
2848 int i;
2849 int num_inodes = __XFS_SORT_INODES;
2850 bool new_parent = (src_dp != target_dp);
2851 bool src_is_directory = S_ISDIR(VFS_I(src_ip)->i_mode);
2852 int spaceres;
2853 bool retried = false;
2854 int error, nospace_error = 0;
2855
2856 trace_xfs_rename(src_dp, target_dp, src_name, target_name);
2857
2858 if ((flags & RENAME_EXCHANGE) && !target_ip)
2859 return -EINVAL;
2860
2861 /*
2862 * If we are doing a whiteout operation, allocate the whiteout inode
2863 * we will be placing at the target and ensure the type is set
2864 * appropriately.
2865 */
2866 if (flags & RENAME_WHITEOUT) {
2867 error = xfs_rename_alloc_whiteout(idmap, src_name,
2868 target_dp, &wip);
2869 if (error)
2870 return error;
2871
2872 /* setup target dirent info as whiteout */
2873 src_name->type = XFS_DIR3_FT_CHRDEV;
2874 }
2875
2876 xfs_sort_for_rename(src_dp, target_dp, src_ip, target_ip, wip,
2877 inodes, &num_inodes);
2878
2879retry:
2880 nospace_error = 0;
2881 spaceres = XFS_RENAME_SPACE_RES(mp, target_name->len);
2882 error = xfs_trans_alloc(mp, &M_RES(mp)->tr_rename, spaceres, 0, 0, &tp);
2883 if (error == -ENOSPC) {
2884 nospace_error = error;
2885 spaceres = 0;
2886 error = xfs_trans_alloc(mp, &M_RES(mp)->tr_rename, 0, 0, 0,
2887 &tp);
2888 }
2889 if (error)
2890 goto out_release_wip;
2891
2892 /*
2893 * Attach the dquots to the inodes
2894 */
2895 error = xfs_qm_vop_rename_dqattach(inodes);
2896 if (error)
2897 goto out_trans_cancel;
2898
2899 /*
2900 * Lock all the participating inodes. Depending upon whether
2901 * the target_name exists in the target directory, and
2902 * whether the target directory is the same as the source
2903 * directory, we can lock from 2 to 5 inodes.
2904 */
2905 xfs_lock_inodes(inodes, num_inodes, XFS_ILOCK_EXCL);
2906
2907 /*
2908 * Join all the inodes to the transaction. From this point on,
2909 * we can rely on either trans_commit or trans_cancel to unlock
2910 * them.
2911 */
2912 xfs_trans_ijoin(tp, src_dp, XFS_ILOCK_EXCL);
2913 if (new_parent)
2914 xfs_trans_ijoin(tp, target_dp, XFS_ILOCK_EXCL);
2915 xfs_trans_ijoin(tp, src_ip, XFS_ILOCK_EXCL);
2916 if (target_ip)
2917 xfs_trans_ijoin(tp, target_ip, XFS_ILOCK_EXCL);
2918 if (wip)
2919 xfs_trans_ijoin(tp, wip, XFS_ILOCK_EXCL);
2920
2921 /*
2922 * If we are using project inheritance, we only allow renames
2923 * into our tree when the project IDs are the same; else the
2924 * tree quota mechanism would be circumvented.
2925 */
2926 if (unlikely((target_dp->i_diflags & XFS_DIFLAG_PROJINHERIT) &&
2927 target_dp->i_projid != src_ip->i_projid)) {
2928 error = -EXDEV;
2929 goto out_trans_cancel;
2930 }
2931
2932 /* RENAME_EXCHANGE is unique from here on. */
2933 if (flags & RENAME_EXCHANGE)
2934 return xfs_cross_rename(tp, src_dp, src_name, src_ip,
2935 target_dp, target_name, target_ip,
2936 spaceres);
2937
2938 /*
2939 * Try to reserve quota to handle an expansion of the target directory.
2940 * We'll allow the rename to continue in reservationless mode if we hit
2941 * a space usage constraint. If we trigger reservationless mode, save
2942 * the errno if there isn't any free space in the target directory.
2943 */
2944 if (spaceres != 0) {
2945 error = xfs_trans_reserve_quota_nblks(tp, target_dp, spaceres,
2946 0, false);
2947 if (error == -EDQUOT || error == -ENOSPC) {
2948 if (!retried) {
2949 xfs_trans_cancel(tp);
2950 xfs_blockgc_free_quota(target_dp, 0);
2951 retried = true;
2952 goto retry;
2953 }
2954
2955 nospace_error = error;
2956 spaceres = 0;
2957 error = 0;
2958 }
2959 if (error)
2960 goto out_trans_cancel;
2961 }
2962
2963 /*
2964 * Check for expected errors before we dirty the transaction
2965 * so we can return an error without a transaction abort.
2966 */
2967 if (target_ip == NULL) {
2968 /*
2969 * If there's no space reservation, check the entry will
2970 * fit before actually inserting it.
2971 */
2972 if (!spaceres) {
2973 error = xfs_dir_canenter(tp, target_dp, target_name);
2974 if (error)
2975 goto out_trans_cancel;
2976 }
2977 } else {
2978 /*
2979 * If target exists and it's a directory, check that whether
2980 * it can be destroyed.
2981 */
2982 if (S_ISDIR(VFS_I(target_ip)->i_mode) &&
2983 (!xfs_dir_isempty(target_ip) ||
2984 (VFS_I(target_ip)->i_nlink > 2))) {
2985 error = -EEXIST;
2986 goto out_trans_cancel;
2987 }
2988 }
2989
2990 /*
2991 * Lock the AGI buffers we need to handle bumping the nlink of the
2992 * whiteout inode off the unlinked list and to handle dropping the
2993 * nlink of the target inode. Per locking order rules, do this in
2994 * increasing AG order and before directory block allocation tries to
2995 * grab AGFs because we grab AGIs before AGFs.
2996 *
2997 * The (vfs) caller must ensure that if src is a directory then
2998 * target_ip is either null or an empty directory.
2999 */
3000 for (i = 0; i < num_inodes && inodes[i] != NULL; i++) {
3001 if (inodes[i] == wip ||
3002 (inodes[i] == target_ip &&
3003 (VFS_I(target_ip)->i_nlink == 1 || src_is_directory))) {
3004 struct xfs_perag *pag;
3005 struct xfs_buf *bp;
3006
3007 pag = xfs_perag_get(mp,
3008 XFS_INO_TO_AGNO(mp, inodes[i]->i_ino));
3009 error = xfs_read_agi(pag, tp, &bp);
3010 xfs_perag_put(pag);
3011 if (error)
3012 goto out_trans_cancel;
3013 }
3014 }
3015
3016 /*
3017 * Directory entry creation below may acquire the AGF. Remove
3018 * the whiteout from the unlinked list first to preserve correct
3019 * AGI/AGF locking order. This dirties the transaction so failures
3020 * after this point will abort and log recovery will clean up the
3021 * mess.
3022 *
3023 * For whiteouts, we need to bump the link count on the whiteout
3024 * inode. After this point, we have a real link, clear the tmpfile
3025 * state flag from the inode so it doesn't accidentally get misused
3026 * in future.
3027 */
3028 if (wip) {
3029 struct xfs_perag *pag;
3030
3031 ASSERT(VFS_I(wip)->i_nlink == 0);
3032
3033 pag = xfs_perag_get(mp, XFS_INO_TO_AGNO(mp, wip->i_ino));
3034 error = xfs_iunlink_remove(tp, pag, wip);
3035 xfs_perag_put(pag);
3036 if (error)
3037 goto out_trans_cancel;
3038
3039 xfs_bumplink(tp, wip);
3040 VFS_I(wip)->i_state &= ~I_LINKABLE;
3041 }
3042
3043 /*
3044 * Set up the target.
3045 */
3046 if (target_ip == NULL) {
3047 /*
3048 * If target does not exist and the rename crosses
3049 * directories, adjust the target directory link count
3050 * to account for the ".." reference from the new entry.
3051 */
3052 error = xfs_dir_createname(tp, target_dp, target_name,
3053 src_ip->i_ino, spaceres);
3054 if (error)
3055 goto out_trans_cancel;
3056
3057 xfs_trans_ichgtime(tp, target_dp,
3058 XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG);
3059
3060 if (new_parent && src_is_directory) {
3061 xfs_bumplink(tp, target_dp);
3062 }
3063 } else { /* target_ip != NULL */
3064 /*
3065 * Link the source inode under the target name.
3066 * If the source inode is a directory and we are moving
3067 * it across directories, its ".." entry will be
3068 * inconsistent until we replace that down below.
3069 *
3070 * In case there is already an entry with the same
3071 * name at the destination directory, remove it first.
3072 */
3073 error = xfs_dir_replace(tp, target_dp, target_name,
3074 src_ip->i_ino, spaceres);
3075 if (error)
3076 goto out_trans_cancel;
3077
3078 xfs_trans_ichgtime(tp, target_dp,
3079 XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG);
3080
3081 /*
3082 * Decrement the link count on the target since the target
3083 * dir no longer points to it.
3084 */
3085 error = xfs_droplink(tp, target_ip);
3086 if (error)
3087 goto out_trans_cancel;
3088
3089 if (src_is_directory) {
3090 /*
3091 * Drop the link from the old "." entry.
3092 */
3093 error = xfs_droplink(tp, target_ip);
3094 if (error)
3095 goto out_trans_cancel;
3096 }
3097 } /* target_ip != NULL */
3098
3099 /*
3100 * Remove the source.
3101 */
3102 if (new_parent && src_is_directory) {
3103 /*
3104 * Rewrite the ".." entry to point to the new
3105 * directory.
3106 */
3107 error = xfs_dir_replace(tp, src_ip, &xfs_name_dotdot,
3108 target_dp->i_ino, spaceres);
3109 ASSERT(error != -EEXIST);
3110 if (error)
3111 goto out_trans_cancel;
3112 }
3113
3114 /*
3115 * We always want to hit the ctime on the source inode.
3116 *
3117 * This isn't strictly required by the standards since the source
3118 * inode isn't really being changed, but old unix file systems did
3119 * it and some incremental backup programs won't work without it.
3120 */
3121 xfs_trans_ichgtime(tp, src_ip, XFS_ICHGTIME_CHG);
3122 xfs_trans_log_inode(tp, src_ip, XFS_ILOG_CORE);
3123
3124 /*
3125 * Adjust the link count on src_dp. This is necessary when
3126 * renaming a directory, either within one parent when
3127 * the target existed, or across two parent directories.
3128 */
3129 if (src_is_directory && (new_parent || target_ip != NULL)) {
3130
3131 /*
3132 * Decrement link count on src_directory since the
3133 * entry that's moved no longer points to it.
3134 */
3135 error = xfs_droplink(tp, src_dp);
3136 if (error)
3137 goto out_trans_cancel;
3138 }
3139
3140 /*
3141 * For whiteouts, we only need to update the source dirent with the
3142 * inode number of the whiteout inode rather than removing it
3143 * altogether.
3144 */
3145 if (wip)
3146 error = xfs_dir_replace(tp, src_dp, src_name, wip->i_ino,
3147 spaceres);
3148 else
3149 error = xfs_dir_removename(tp, src_dp, src_name, src_ip->i_ino,
3150 spaceres);
3151
3152 if (error)
3153 goto out_trans_cancel;
3154
3155 xfs_trans_ichgtime(tp, src_dp, XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG);
3156 xfs_trans_log_inode(tp, src_dp, XFS_ILOG_CORE);
3157 if (new_parent)
3158 xfs_trans_log_inode(tp, target_dp, XFS_ILOG_CORE);
3159
3160 error = xfs_finish_rename(tp);
3161 if (wip)
3162 xfs_irele(wip);
3163 return error;
3164
3165out_trans_cancel:
3166 xfs_trans_cancel(tp);
3167out_release_wip:
3168 if (wip)
3169 xfs_irele(wip);
3170 if (error == -ENOSPC && nospace_error)
3171 error = nospace_error;
3172 return error;
3173}
3174
3175static int
3176xfs_iflush(
3177 struct xfs_inode *ip,
3178 struct xfs_buf *bp)
3179{
3180 struct xfs_inode_log_item *iip = ip->i_itemp;
3181 struct xfs_dinode *dip;
3182 struct xfs_mount *mp = ip->i_mount;
3183 int error;
3184
3185 ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL|XFS_ILOCK_SHARED));
3186 ASSERT(xfs_iflags_test(ip, XFS_IFLUSHING));
3187 ASSERT(ip->i_df.if_format != XFS_DINODE_FMT_BTREE ||
3188 ip->i_df.if_nextents > XFS_IFORK_MAXEXT(ip, XFS_DATA_FORK));
3189 ASSERT(iip->ili_item.li_buf == bp);
3190
3191 dip = xfs_buf_offset(bp, ip->i_imap.im_boffset);
3192
3193 /*
3194 * We don't flush the inode if any of the following checks fail, but we
3195 * do still update the log item and attach to the backing buffer as if
3196 * the flush happened. This is a formality to facilitate predictable
3197 * error handling as the caller will shutdown and fail the buffer.
3198 */
3199 error = -EFSCORRUPTED;
3200 if (XFS_TEST_ERROR(dip->di_magic != cpu_to_be16(XFS_DINODE_MAGIC),
3201 mp, XFS_ERRTAG_IFLUSH_1)) {
3202 xfs_alert_tag(mp, XFS_PTAG_IFLUSH,
3203 "%s: Bad inode %llu magic number 0x%x, ptr "PTR_FMT,
3204 __func__, ip->i_ino, be16_to_cpu(dip->di_magic), dip);
3205 goto flush_out;
3206 }
3207 if (S_ISREG(VFS_I(ip)->i_mode)) {
3208 if (XFS_TEST_ERROR(
3209 ip->i_df.if_format != XFS_DINODE_FMT_EXTENTS &&
3210 ip->i_df.if_format != XFS_DINODE_FMT_BTREE,
3211 mp, XFS_ERRTAG_IFLUSH_3)) {
3212 xfs_alert_tag(mp, XFS_PTAG_IFLUSH,
3213 "%s: Bad regular inode %llu, ptr "PTR_FMT,
3214 __func__, ip->i_ino, ip);
3215 goto flush_out;
3216 }
3217 } else if (S_ISDIR(VFS_I(ip)->i_mode)) {
3218 if (XFS_TEST_ERROR(
3219 ip->i_df.if_format != XFS_DINODE_FMT_EXTENTS &&
3220 ip->i_df.if_format != XFS_DINODE_FMT_BTREE &&
3221 ip->i_df.if_format != XFS_DINODE_FMT_LOCAL,
3222 mp, XFS_ERRTAG_IFLUSH_4)) {
3223 xfs_alert_tag(mp, XFS_PTAG_IFLUSH,
3224 "%s: Bad directory inode %llu, ptr "PTR_FMT,
3225 __func__, ip->i_ino, ip);
3226 goto flush_out;
3227 }
3228 }
3229 if (XFS_TEST_ERROR(ip->i_df.if_nextents + xfs_ifork_nextents(&ip->i_af) >
3230 ip->i_nblocks, mp, XFS_ERRTAG_IFLUSH_5)) {
3231 xfs_alert_tag(mp, XFS_PTAG_IFLUSH,
3232 "%s: detected corrupt incore inode %llu, "
3233 "total extents = %llu nblocks = %lld, ptr "PTR_FMT,
3234 __func__, ip->i_ino,
3235 ip->i_df.if_nextents + xfs_ifork_nextents(&ip->i_af),
3236 ip->i_nblocks, ip);
3237 goto flush_out;
3238 }
3239 if (XFS_TEST_ERROR(ip->i_forkoff > mp->m_sb.sb_inodesize,
3240 mp, XFS_ERRTAG_IFLUSH_6)) {
3241 xfs_alert_tag(mp, XFS_PTAG_IFLUSH,
3242 "%s: bad inode %llu, forkoff 0x%x, ptr "PTR_FMT,
3243 __func__, ip->i_ino, ip->i_forkoff, ip);
3244 goto flush_out;
3245 }
3246
3247 /*
3248 * Inode item log recovery for v2 inodes are dependent on the flushiter
3249 * count for correct sequencing. We bump the flush iteration count so
3250 * we can detect flushes which postdate a log record during recovery.
3251 * This is redundant as we now log every change and hence this can't
3252 * happen but we need to still do it to ensure backwards compatibility
3253 * with old kernels that predate logging all inode changes.
3254 */
3255 if (!xfs_has_v3inodes(mp))
3256 ip->i_flushiter++;
3257
3258 /*
3259 * If there are inline format data / attr forks attached to this inode,
3260 * make sure they are not corrupt.
3261 */
3262 if (ip->i_df.if_format == XFS_DINODE_FMT_LOCAL &&
3263 xfs_ifork_verify_local_data(ip))
3264 goto flush_out;
3265 if (xfs_inode_has_attr_fork(ip) &&
3266 ip->i_af.if_format == XFS_DINODE_FMT_LOCAL &&
3267 xfs_ifork_verify_local_attr(ip))
3268 goto flush_out;
3269
3270 /*
3271 * Copy the dirty parts of the inode into the on-disk inode. We always
3272 * copy out the core of the inode, because if the inode is dirty at all
3273 * the core must be.
3274 */
3275 xfs_inode_to_disk(ip, dip, iip->ili_item.li_lsn);
3276
3277 /* Wrap, we never let the log put out DI_MAX_FLUSH */
3278 if (!xfs_has_v3inodes(mp)) {
3279 if (ip->i_flushiter == DI_MAX_FLUSH)
3280 ip->i_flushiter = 0;
3281 }
3282
3283 xfs_iflush_fork(ip, dip, iip, XFS_DATA_FORK);
3284 if (xfs_inode_has_attr_fork(ip))
3285 xfs_iflush_fork(ip, dip, iip, XFS_ATTR_FORK);
3286
3287 /*
3288 * We've recorded everything logged in the inode, so we'd like to clear
3289 * the ili_fields bits so we don't log and flush things unnecessarily.
3290 * However, we can't stop logging all this information until the data
3291 * we've copied into the disk buffer is written to disk. If we did we
3292 * might overwrite the copy of the inode in the log with all the data
3293 * after re-logging only part of it, and in the face of a crash we
3294 * wouldn't have all the data we need to recover.
3295 *
3296 * What we do is move the bits to the ili_last_fields field. When
3297 * logging the inode, these bits are moved back to the ili_fields field.
3298 * In the xfs_buf_inode_iodone() routine we clear ili_last_fields, since
3299 * we know that the information those bits represent is permanently on
3300 * disk. As long as the flush completes before the inode is logged
3301 * again, then both ili_fields and ili_last_fields will be cleared.
3302 */
3303 error = 0;
3304flush_out:
3305 spin_lock(&iip->ili_lock);
3306 iip->ili_last_fields = iip->ili_fields;
3307 iip->ili_fields = 0;
3308 iip->ili_fsync_fields = 0;
3309 spin_unlock(&iip->ili_lock);
3310
3311 /*
3312 * Store the current LSN of the inode so that we can tell whether the
3313 * item has moved in the AIL from xfs_buf_inode_iodone().
3314 */
3315 xfs_trans_ail_copy_lsn(mp->m_ail, &iip->ili_flush_lsn,
3316 &iip->ili_item.li_lsn);
3317
3318 /* generate the checksum. */
3319 xfs_dinode_calc_crc(mp, dip);
3320 return error;
3321}
3322
3323/*
3324 * Non-blocking flush of dirty inode metadata into the backing buffer.
3325 *
3326 * The caller must have a reference to the inode and hold the cluster buffer
3327 * locked. The function will walk across all the inodes on the cluster buffer it
3328 * can find and lock without blocking, and flush them to the cluster buffer.
3329 *
3330 * On successful flushing of at least one inode, the caller must write out the
3331 * buffer and release it. If no inodes are flushed, -EAGAIN will be returned and
3332 * the caller needs to release the buffer. On failure, the filesystem will be
3333 * shut down, the buffer will have been unlocked and released, and EFSCORRUPTED
3334 * will be returned.
3335 */
3336int
3337xfs_iflush_cluster(
3338 struct xfs_buf *bp)
3339{
3340 struct xfs_mount *mp = bp->b_mount;
3341 struct xfs_log_item *lip, *n;
3342 struct xfs_inode *ip;
3343 struct xfs_inode_log_item *iip;
3344 int clcount = 0;
3345 int error = 0;
3346
3347 /*
3348 * We must use the safe variant here as on shutdown xfs_iflush_abort()
3349 * will remove itself from the list.
3350 */
3351 list_for_each_entry_safe(lip, n, &bp->b_li_list, li_bio_list) {
3352 iip = (struct xfs_inode_log_item *)lip;
3353 ip = iip->ili_inode;
3354
3355 /*
3356 * Quick and dirty check to avoid locks if possible.
3357 */
3358 if (__xfs_iflags_test(ip, XFS_IRECLAIM | XFS_IFLUSHING))
3359 continue;
3360 if (xfs_ipincount(ip))
3361 continue;
3362
3363 /*
3364 * The inode is still attached to the buffer, which means it is
3365 * dirty but reclaim might try to grab it. Check carefully for
3366 * that, and grab the ilock while still holding the i_flags_lock
3367 * to guarantee reclaim will not be able to reclaim this inode
3368 * once we drop the i_flags_lock.
3369 */
3370 spin_lock(&ip->i_flags_lock);
3371 ASSERT(!__xfs_iflags_test(ip, XFS_ISTALE));
3372 if (__xfs_iflags_test(ip, XFS_IRECLAIM | XFS_IFLUSHING)) {
3373 spin_unlock(&ip->i_flags_lock);
3374 continue;
3375 }
3376
3377 /*
3378 * ILOCK will pin the inode against reclaim and prevent
3379 * concurrent transactions modifying the inode while we are
3380 * flushing the inode. If we get the lock, set the flushing
3381 * state before we drop the i_flags_lock.
3382 */
3383 if (!xfs_ilock_nowait(ip, XFS_ILOCK_SHARED)) {
3384 spin_unlock(&ip->i_flags_lock);
3385 continue;
3386 }
3387 __xfs_iflags_set(ip, XFS_IFLUSHING);
3388 spin_unlock(&ip->i_flags_lock);
3389
3390 /*
3391 * Abort flushing this inode if we are shut down because the
3392 * inode may not currently be in the AIL. This can occur when
3393 * log I/O failure unpins the inode without inserting into the
3394 * AIL, leaving a dirty/unpinned inode attached to the buffer
3395 * that otherwise looks like it should be flushed.
3396 */
3397 if (xlog_is_shutdown(mp->m_log)) {
3398 xfs_iunpin_wait(ip);
3399 xfs_iflush_abort(ip);
3400 xfs_iunlock(ip, XFS_ILOCK_SHARED);
3401 error = -EIO;
3402 continue;
3403 }
3404
3405 /* don't block waiting on a log force to unpin dirty inodes */
3406 if (xfs_ipincount(ip)) {
3407 xfs_iflags_clear(ip, XFS_IFLUSHING);
3408 xfs_iunlock(ip, XFS_ILOCK_SHARED);
3409 continue;
3410 }
3411
3412 if (!xfs_inode_clean(ip))
3413 error = xfs_iflush(ip, bp);
3414 else
3415 xfs_iflags_clear(ip, XFS_IFLUSHING);
3416 xfs_iunlock(ip, XFS_ILOCK_SHARED);
3417 if (error)
3418 break;
3419 clcount++;
3420 }
3421
3422 if (error) {
3423 /*
3424 * Shutdown first so we kill the log before we release this
3425 * buffer. If it is an INODE_ALLOC buffer and pins the tail
3426 * of the log, failing it before the _log_ is shut down can
3427 * result in the log tail being moved forward in the journal
3428 * on disk because log writes can still be taking place. Hence
3429 * unpinning the tail will allow the ICREATE intent to be
3430 * removed from the log an recovery will fail with uninitialised
3431 * inode cluster buffers.
3432 */
3433 xfs_force_shutdown(mp, SHUTDOWN_CORRUPT_INCORE);
3434 bp->b_flags |= XBF_ASYNC;
3435 xfs_buf_ioend_fail(bp);
3436 return error;
3437 }
3438
3439 if (!clcount)
3440 return -EAGAIN;
3441
3442 XFS_STATS_INC(mp, xs_icluster_flushcnt);
3443 XFS_STATS_ADD(mp, xs_icluster_flushinode, clcount);
3444 return 0;
3445
3446}
3447
3448/* Release an inode. */
3449void
3450xfs_irele(
3451 struct xfs_inode *ip)
3452{
3453 trace_xfs_irele(ip, _RET_IP_);
3454 iput(VFS_I(ip));
3455}
3456
3457/*
3458 * Ensure all commited transactions touching the inode are written to the log.
3459 */
3460int
3461xfs_log_force_inode(
3462 struct xfs_inode *ip)
3463{
3464 xfs_csn_t seq = 0;
3465
3466 xfs_ilock(ip, XFS_ILOCK_SHARED);
3467 if (xfs_ipincount(ip))
3468 seq = ip->i_itemp->ili_commit_seq;
3469 xfs_iunlock(ip, XFS_ILOCK_SHARED);
3470
3471 if (!seq)
3472 return 0;
3473 return xfs_log_force_seq(ip->i_mount, seq, XFS_LOG_SYNC, NULL);
3474}
3475
3476/*
3477 * Grab the exclusive iolock for a data copy from src to dest, making sure to
3478 * abide vfs locking order (lowest pointer value goes first) and breaking the
3479 * layout leases before proceeding. The loop is needed because we cannot call
3480 * the blocking break_layout() with the iolocks held, and therefore have to
3481 * back out both locks.
3482 */
3483static int
3484xfs_iolock_two_inodes_and_break_layout(
3485 struct inode *src,
3486 struct inode *dest)
3487{
3488 int error;
3489
3490 if (src > dest)
3491 swap(src, dest);
3492
3493retry:
3494 /* Wait to break both inodes' layouts before we start locking. */
3495 error = break_layout(src, true);
3496 if (error)
3497 return error;
3498 if (src != dest) {
3499 error = break_layout(dest, true);
3500 if (error)
3501 return error;
3502 }
3503
3504 /* Lock one inode and make sure nobody got in and leased it. */
3505 inode_lock(src);
3506 error = break_layout(src, false);
3507 if (error) {
3508 inode_unlock(src);
3509 if (error == -EWOULDBLOCK)
3510 goto retry;
3511 return error;
3512 }
3513
3514 if (src == dest)
3515 return 0;
3516
3517 /* Lock the other inode and make sure nobody got in and leased it. */
3518 inode_lock_nested(dest, I_MUTEX_NONDIR2);
3519 error = break_layout(dest, false);
3520 if (error) {
3521 inode_unlock(src);
3522 inode_unlock(dest);
3523 if (error == -EWOULDBLOCK)
3524 goto retry;
3525 return error;
3526 }
3527
3528 return 0;
3529}
3530
3531static int
3532xfs_mmaplock_two_inodes_and_break_dax_layout(
3533 struct xfs_inode *ip1,
3534 struct xfs_inode *ip2)
3535{
3536 int error;
3537 bool retry;
3538 struct page *page;
3539
3540 if (ip1->i_ino > ip2->i_ino)
3541 swap(ip1, ip2);
3542
3543again:
3544 retry = false;
3545 /* Lock the first inode */
3546 xfs_ilock(ip1, XFS_MMAPLOCK_EXCL);
3547 error = xfs_break_dax_layouts(VFS_I(ip1), &retry);
3548 if (error || retry) {
3549 xfs_iunlock(ip1, XFS_MMAPLOCK_EXCL);
3550 if (error == 0 && retry)
3551 goto again;
3552 return error;
3553 }
3554
3555 if (ip1 == ip2)
3556 return 0;
3557
3558 /* Nested lock the second inode */
3559 xfs_ilock(ip2, xfs_lock_inumorder(XFS_MMAPLOCK_EXCL, 1));
3560 /*
3561 * We cannot use xfs_break_dax_layouts() directly here because it may
3562 * need to unlock & lock the XFS_MMAPLOCK_EXCL which is not suitable
3563 * for this nested lock case.
3564 */
3565 page = dax_layout_busy_page(VFS_I(ip2)->i_mapping);
3566 if (page && page_ref_count(page) != 1) {
3567 xfs_iunlock(ip2, XFS_MMAPLOCK_EXCL);
3568 xfs_iunlock(ip1, XFS_MMAPLOCK_EXCL);
3569 goto again;
3570 }
3571
3572 return 0;
3573}
3574
3575/*
3576 * Lock two inodes so that userspace cannot initiate I/O via file syscalls or
3577 * mmap activity.
3578 */
3579int
3580xfs_ilock2_io_mmap(
3581 struct xfs_inode *ip1,
3582 struct xfs_inode *ip2)
3583{
3584 int ret;
3585
3586 ret = xfs_iolock_two_inodes_and_break_layout(VFS_I(ip1), VFS_I(ip2));
3587 if (ret)
3588 return ret;
3589
3590 if (IS_DAX(VFS_I(ip1)) && IS_DAX(VFS_I(ip2))) {
3591 ret = xfs_mmaplock_two_inodes_and_break_dax_layout(ip1, ip2);
3592 if (ret) {
3593 inode_unlock(VFS_I(ip2));
3594 if (ip1 != ip2)
3595 inode_unlock(VFS_I(ip1));
3596 return ret;
3597 }
3598 } else
3599 filemap_invalidate_lock_two(VFS_I(ip1)->i_mapping,
3600 VFS_I(ip2)->i_mapping);
3601
3602 return 0;
3603}
3604
3605/* Unlock both inodes to allow IO and mmap activity. */
3606void
3607xfs_iunlock2_io_mmap(
3608 struct xfs_inode *ip1,
3609 struct xfs_inode *ip2)
3610{
3611 if (IS_DAX(VFS_I(ip1)) && IS_DAX(VFS_I(ip2))) {
3612 xfs_iunlock(ip2, XFS_MMAPLOCK_EXCL);
3613 if (ip1 != ip2)
3614 xfs_iunlock(ip1, XFS_MMAPLOCK_EXCL);
3615 } else
3616 filemap_invalidate_unlock_two(VFS_I(ip1)->i_mapping,
3617 VFS_I(ip2)->i_mapping);
3618
3619 inode_unlock(VFS_I(ip2));
3620 if (ip1 != ip2)
3621 inode_unlock(VFS_I(ip1));
3622}
3623
3624/* Drop the MMAPLOCK and the IOLOCK after a remap completes. */
3625void
3626xfs_iunlock2_remapping(
3627 struct xfs_inode *ip1,
3628 struct xfs_inode *ip2)
3629{
3630 xfs_iflags_clear(ip1, XFS_IREMAPPING);
3631
3632 if (ip1 != ip2)
3633 xfs_iunlock(ip1, XFS_MMAPLOCK_SHARED);
3634 xfs_iunlock(ip2, XFS_MMAPLOCK_EXCL);
3635
3636 if (ip1 != ip2)
3637 inode_unlock_shared(VFS_I(ip1));
3638 inode_unlock(VFS_I(ip2));
3639}
3640
3641/*
3642 * Reload the incore inode list for this inode. Caller should ensure that
3643 * the link count cannot change, either by taking ILOCK_SHARED or otherwise
3644 * preventing other threads from executing.
3645 */
3646int
3647xfs_inode_reload_unlinked_bucket(
3648 struct xfs_trans *tp,
3649 struct xfs_inode *ip)
3650{
3651 struct xfs_mount *mp = tp->t_mountp;
3652 struct xfs_buf *agibp;
3653 struct xfs_agi *agi;
3654 struct xfs_perag *pag;
3655 xfs_agnumber_t agno = XFS_INO_TO_AGNO(mp, ip->i_ino);
3656 xfs_agino_t agino = XFS_INO_TO_AGINO(mp, ip->i_ino);
3657 xfs_agino_t prev_agino, next_agino;
3658 unsigned int bucket;
3659 bool foundit = false;
3660 int error;
3661
3662 /* Grab the first inode in the list */
3663 pag = xfs_perag_get(mp, agno);
3664 error = xfs_ialloc_read_agi(pag, tp, &agibp);
3665 xfs_perag_put(pag);
3666 if (error)
3667 return error;
3668
3669 /*
3670 * We've taken ILOCK_SHARED and the AGI buffer lock to stabilize the
3671 * incore unlinked list pointers for this inode. Check once more to
3672 * see if we raced with anyone else to reload the unlinked list.
3673 */
3674 if (!xfs_inode_unlinked_incomplete(ip)) {
3675 foundit = true;
3676 goto out_agibp;
3677 }
3678
3679 bucket = agino % XFS_AGI_UNLINKED_BUCKETS;
3680 agi = agibp->b_addr;
3681
3682 trace_xfs_inode_reload_unlinked_bucket(ip);
3683
3684 xfs_info_ratelimited(mp,
3685 "Found unrecovered unlinked inode 0x%x in AG 0x%x. Initiating list recovery.",
3686 agino, agno);
3687
3688 prev_agino = NULLAGINO;
3689 next_agino = be32_to_cpu(agi->agi_unlinked[bucket]);
3690 while (next_agino != NULLAGINO) {
3691 struct xfs_inode *next_ip = NULL;
3692
3693 /* Found this caller's inode, set its backlink. */
3694 if (next_agino == agino) {
3695 next_ip = ip;
3696 next_ip->i_prev_unlinked = prev_agino;
3697 foundit = true;
3698 goto next_inode;
3699 }
3700
3701 /* Try in-memory lookup first. */
3702 next_ip = xfs_iunlink_lookup(pag, next_agino);
3703 if (next_ip)
3704 goto next_inode;
3705
3706 /* Inode not in memory, try reloading it. */
3707 error = xfs_iunlink_reload_next(tp, agibp, prev_agino,
3708 next_agino);
3709 if (error)
3710 break;
3711
3712 /* Grab the reloaded inode. */
3713 next_ip = xfs_iunlink_lookup(pag, next_agino);
3714 if (!next_ip) {
3715 /* No incore inode at all? We reloaded it... */
3716 ASSERT(next_ip != NULL);
3717 error = -EFSCORRUPTED;
3718 break;
3719 }
3720
3721next_inode:
3722 prev_agino = next_agino;
3723 next_agino = next_ip->i_next_unlinked;
3724 }
3725
3726out_agibp:
3727 xfs_trans_brelse(tp, agibp);
3728 /* Should have found this inode somewhere in the iunlinked bucket. */
3729 if (!error && !foundit)
3730 error = -EFSCORRUPTED;
3731 return error;
3732}
3733
3734/* Decide if this inode is missing its unlinked list and reload it. */
3735int
3736xfs_inode_reload_unlinked(
3737 struct xfs_inode *ip)
3738{
3739 struct xfs_trans *tp;
3740 int error;
3741
3742 error = xfs_trans_alloc_empty(ip->i_mount, &tp);
3743 if (error)
3744 return error;
3745
3746 xfs_ilock(ip, XFS_ILOCK_SHARED);
3747 if (xfs_inode_unlinked_incomplete(ip))
3748 error = xfs_inode_reload_unlinked_bucket(tp, ip);
3749 xfs_iunlock(ip, XFS_ILOCK_SHARED);
3750 xfs_trans_cancel(tp);
3751
3752 return error;
3753}
3754
3755/* Has this inode fork been zapped by repair? */
3756bool
3757xfs_ifork_zapped(
3758 const struct xfs_inode *ip,
3759 int whichfork)
3760{
3761 unsigned int datamask = 0;
3762
3763 switch (whichfork) {
3764 case XFS_DATA_FORK:
3765 switch (ip->i_vnode.i_mode & S_IFMT) {
3766 case S_IFDIR:
3767 datamask = XFS_SICK_INO_DIR_ZAPPED;
3768 break;
3769 case S_IFLNK:
3770 datamask = XFS_SICK_INO_SYMLINK_ZAPPED;
3771 break;
3772 }
3773 return ip->i_sick & (XFS_SICK_INO_BMBTD_ZAPPED | datamask);
3774 case XFS_ATTR_FORK:
3775 return ip->i_sick & XFS_SICK_INO_BMBTA_ZAPPED;
3776 default:
3777 return false;
3778 }
3779}