Loading...
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Copyright (c) 2000-2006 Silicon Graphics, Inc.
4 * All Rights Reserved.
5 */
6#include <linux/iversion.h>
7
8#include "xfs.h"
9#include "xfs_fs.h"
10#include "xfs_shared.h"
11#include "xfs_format.h"
12#include "xfs_log_format.h"
13#include "xfs_trans_resv.h"
14#include "xfs_sb.h"
15#include "xfs_mount.h"
16#include "xfs_defer.h"
17#include "xfs_inode.h"
18#include "xfs_dir2.h"
19#include "xfs_attr.h"
20#include "xfs_trans_space.h"
21#include "xfs_trans.h"
22#include "xfs_buf_item.h"
23#include "xfs_inode_item.h"
24#include "xfs_ialloc.h"
25#include "xfs_bmap.h"
26#include "xfs_bmap_util.h"
27#include "xfs_errortag.h"
28#include "xfs_error.h"
29#include "xfs_quota.h"
30#include "xfs_filestream.h"
31#include "xfs_trace.h"
32#include "xfs_icache.h"
33#include "xfs_symlink.h"
34#include "xfs_trans_priv.h"
35#include "xfs_log.h"
36#include "xfs_bmap_btree.h"
37#include "xfs_reflink.h"
38
39kmem_zone_t *xfs_inode_zone;
40
41/*
42 * Used in xfs_itruncate_extents(). This is the maximum number of extents
43 * freed from a file in a single transaction.
44 */
45#define XFS_ITRUNC_MAX_EXTENTS 2
46
47STATIC int xfs_iunlink(struct xfs_trans *, struct xfs_inode *);
48STATIC int xfs_iunlink_remove(struct xfs_trans *, struct xfs_inode *);
49
50/*
51 * helper function to extract extent size hint from inode
52 */
53xfs_extlen_t
54xfs_get_extsz_hint(
55 struct xfs_inode *ip)
56{
57 /*
58 * No point in aligning allocations if we need to COW to actually
59 * write to them.
60 */
61 if (xfs_is_always_cow_inode(ip))
62 return 0;
63 if ((ip->i_d.di_flags & XFS_DIFLAG_EXTSIZE) && ip->i_d.di_extsize)
64 return ip->i_d.di_extsize;
65 if (XFS_IS_REALTIME_INODE(ip))
66 return ip->i_mount->m_sb.sb_rextsize;
67 return 0;
68}
69
70/*
71 * Helper function to extract CoW extent size hint from inode.
72 * Between the extent size hint and the CoW extent size hint, we
73 * return the greater of the two. If the value is zero (automatic),
74 * use the default size.
75 */
76xfs_extlen_t
77xfs_get_cowextsz_hint(
78 struct xfs_inode *ip)
79{
80 xfs_extlen_t a, b;
81
82 a = 0;
83 if (ip->i_d.di_flags2 & XFS_DIFLAG2_COWEXTSIZE)
84 a = ip->i_d.di_cowextsize;
85 b = xfs_get_extsz_hint(ip);
86
87 a = max(a, b);
88 if (a == 0)
89 return XFS_DEFAULT_COWEXTSZ_HINT;
90 return a;
91}
92
93/*
94 * These two are wrapper routines around the xfs_ilock() routine used to
95 * centralize some grungy code. They are used in places that wish to lock the
96 * inode solely for reading the extents. The reason these places can't just
97 * call xfs_ilock(ip, XFS_ILOCK_SHARED) is that the inode lock also guards to
98 * bringing in of the extents from disk for a file in b-tree format. If the
99 * inode is in b-tree format, then we need to lock the inode exclusively until
100 * the extents are read in. Locking it exclusively all the time would limit
101 * our parallelism unnecessarily, though. What we do instead is check to see
102 * if the extents have been read in yet, and only lock the inode exclusively
103 * if they have not.
104 *
105 * The functions return a value which should be given to the corresponding
106 * xfs_iunlock() call.
107 */
108uint
109xfs_ilock_data_map_shared(
110 struct xfs_inode *ip)
111{
112 uint lock_mode = XFS_ILOCK_SHARED;
113
114 if (ip->i_df.if_format == XFS_DINODE_FMT_BTREE &&
115 (ip->i_df.if_flags & XFS_IFEXTENTS) == 0)
116 lock_mode = XFS_ILOCK_EXCL;
117 xfs_ilock(ip, lock_mode);
118 return lock_mode;
119}
120
121uint
122xfs_ilock_attr_map_shared(
123 struct xfs_inode *ip)
124{
125 uint lock_mode = XFS_ILOCK_SHARED;
126
127 if (ip->i_afp &&
128 ip->i_afp->if_format == XFS_DINODE_FMT_BTREE &&
129 (ip->i_afp->if_flags & XFS_IFEXTENTS) == 0)
130 lock_mode = XFS_ILOCK_EXCL;
131 xfs_ilock(ip, lock_mode);
132 return lock_mode;
133}
134
135/*
136 * In addition to i_rwsem in the VFS inode, the xfs inode contains 2
137 * multi-reader locks: i_mmap_lock and the i_lock. This routine allows
138 * various combinations of the locks to be obtained.
139 *
140 * The 3 locks should always be ordered so that the IO lock is obtained first,
141 * the mmap lock second and the ilock last in order to prevent deadlock.
142 *
143 * Basic locking order:
144 *
145 * i_rwsem -> i_mmap_lock -> page_lock -> i_ilock
146 *
147 * mmap_lock locking order:
148 *
149 * i_rwsem -> page lock -> mmap_lock
150 * mmap_lock -> i_mmap_lock -> page_lock
151 *
152 * The difference in mmap_lock locking order mean that we cannot hold the
153 * i_mmap_lock over syscall based read(2)/write(2) based IO. These IO paths can
154 * fault in pages during copy in/out (for buffered IO) or require the mmap_lock
155 * in get_user_pages() to map the user pages into the kernel address space for
156 * direct IO. Similarly the i_rwsem cannot be taken inside a page fault because
157 * page faults already hold the mmap_lock.
158 *
159 * Hence to serialise fully against both syscall and mmap based IO, we need to
160 * take both the i_rwsem and the i_mmap_lock. These locks should *only* be both
161 * taken in places where we need to invalidate the page cache in a race
162 * free manner (e.g. truncate, hole punch and other extent manipulation
163 * functions).
164 */
165void
166xfs_ilock(
167 xfs_inode_t *ip,
168 uint lock_flags)
169{
170 trace_xfs_ilock(ip, lock_flags, _RET_IP_);
171
172 /*
173 * You can't set both SHARED and EXCL for the same lock,
174 * and only XFS_IOLOCK_SHARED, XFS_IOLOCK_EXCL, XFS_ILOCK_SHARED,
175 * and XFS_ILOCK_EXCL are valid values to set in lock_flags.
176 */
177 ASSERT((lock_flags & (XFS_IOLOCK_SHARED | XFS_IOLOCK_EXCL)) !=
178 (XFS_IOLOCK_SHARED | XFS_IOLOCK_EXCL));
179 ASSERT((lock_flags & (XFS_MMAPLOCK_SHARED | XFS_MMAPLOCK_EXCL)) !=
180 (XFS_MMAPLOCK_SHARED | XFS_MMAPLOCK_EXCL));
181 ASSERT((lock_flags & (XFS_ILOCK_SHARED | XFS_ILOCK_EXCL)) !=
182 (XFS_ILOCK_SHARED | XFS_ILOCK_EXCL));
183 ASSERT((lock_flags & ~(XFS_LOCK_MASK | XFS_LOCK_SUBCLASS_MASK)) == 0);
184
185 if (lock_flags & XFS_IOLOCK_EXCL) {
186 down_write_nested(&VFS_I(ip)->i_rwsem,
187 XFS_IOLOCK_DEP(lock_flags));
188 } else if (lock_flags & XFS_IOLOCK_SHARED) {
189 down_read_nested(&VFS_I(ip)->i_rwsem,
190 XFS_IOLOCK_DEP(lock_flags));
191 }
192
193 if (lock_flags & XFS_MMAPLOCK_EXCL)
194 mrupdate_nested(&ip->i_mmaplock, XFS_MMAPLOCK_DEP(lock_flags));
195 else if (lock_flags & XFS_MMAPLOCK_SHARED)
196 mraccess_nested(&ip->i_mmaplock, XFS_MMAPLOCK_DEP(lock_flags));
197
198 if (lock_flags & XFS_ILOCK_EXCL)
199 mrupdate_nested(&ip->i_lock, XFS_ILOCK_DEP(lock_flags));
200 else if (lock_flags & XFS_ILOCK_SHARED)
201 mraccess_nested(&ip->i_lock, XFS_ILOCK_DEP(lock_flags));
202}
203
204/*
205 * This is just like xfs_ilock(), except that the caller
206 * is guaranteed not to sleep. It returns 1 if it gets
207 * the requested locks and 0 otherwise. If the IO lock is
208 * obtained but the inode lock cannot be, then the IO lock
209 * is dropped before returning.
210 *
211 * ip -- the inode being locked
212 * lock_flags -- this parameter indicates the inode's locks to be
213 * to be locked. See the comment for xfs_ilock() for a list
214 * of valid values.
215 */
216int
217xfs_ilock_nowait(
218 xfs_inode_t *ip,
219 uint lock_flags)
220{
221 trace_xfs_ilock_nowait(ip, lock_flags, _RET_IP_);
222
223 /*
224 * You can't set both SHARED and EXCL for the same lock,
225 * and only XFS_IOLOCK_SHARED, XFS_IOLOCK_EXCL, XFS_ILOCK_SHARED,
226 * and XFS_ILOCK_EXCL are valid values to set in lock_flags.
227 */
228 ASSERT((lock_flags & (XFS_IOLOCK_SHARED | XFS_IOLOCK_EXCL)) !=
229 (XFS_IOLOCK_SHARED | XFS_IOLOCK_EXCL));
230 ASSERT((lock_flags & (XFS_MMAPLOCK_SHARED | XFS_MMAPLOCK_EXCL)) !=
231 (XFS_MMAPLOCK_SHARED | XFS_MMAPLOCK_EXCL));
232 ASSERT((lock_flags & (XFS_ILOCK_SHARED | XFS_ILOCK_EXCL)) !=
233 (XFS_ILOCK_SHARED | XFS_ILOCK_EXCL));
234 ASSERT((lock_flags & ~(XFS_LOCK_MASK | XFS_LOCK_SUBCLASS_MASK)) == 0);
235
236 if (lock_flags & XFS_IOLOCK_EXCL) {
237 if (!down_write_trylock(&VFS_I(ip)->i_rwsem))
238 goto out;
239 } else if (lock_flags & XFS_IOLOCK_SHARED) {
240 if (!down_read_trylock(&VFS_I(ip)->i_rwsem))
241 goto out;
242 }
243
244 if (lock_flags & XFS_MMAPLOCK_EXCL) {
245 if (!mrtryupdate(&ip->i_mmaplock))
246 goto out_undo_iolock;
247 } else if (lock_flags & XFS_MMAPLOCK_SHARED) {
248 if (!mrtryaccess(&ip->i_mmaplock))
249 goto out_undo_iolock;
250 }
251
252 if (lock_flags & XFS_ILOCK_EXCL) {
253 if (!mrtryupdate(&ip->i_lock))
254 goto out_undo_mmaplock;
255 } else if (lock_flags & XFS_ILOCK_SHARED) {
256 if (!mrtryaccess(&ip->i_lock))
257 goto out_undo_mmaplock;
258 }
259 return 1;
260
261out_undo_mmaplock:
262 if (lock_flags & XFS_MMAPLOCK_EXCL)
263 mrunlock_excl(&ip->i_mmaplock);
264 else if (lock_flags & XFS_MMAPLOCK_SHARED)
265 mrunlock_shared(&ip->i_mmaplock);
266out_undo_iolock:
267 if (lock_flags & XFS_IOLOCK_EXCL)
268 up_write(&VFS_I(ip)->i_rwsem);
269 else if (lock_flags & XFS_IOLOCK_SHARED)
270 up_read(&VFS_I(ip)->i_rwsem);
271out:
272 return 0;
273}
274
275/*
276 * xfs_iunlock() is used to drop the inode locks acquired with
277 * xfs_ilock() and xfs_ilock_nowait(). The caller must pass
278 * in the flags given to xfs_ilock() or xfs_ilock_nowait() so
279 * that we know which locks to drop.
280 *
281 * ip -- the inode being unlocked
282 * lock_flags -- this parameter indicates the inode's locks to be
283 * to be unlocked. See the comment for xfs_ilock() for a list
284 * of valid values for this parameter.
285 *
286 */
287void
288xfs_iunlock(
289 xfs_inode_t *ip,
290 uint lock_flags)
291{
292 /*
293 * You can't set both SHARED and EXCL for the same lock,
294 * and only XFS_IOLOCK_SHARED, XFS_IOLOCK_EXCL, XFS_ILOCK_SHARED,
295 * and XFS_ILOCK_EXCL are valid values to set in lock_flags.
296 */
297 ASSERT((lock_flags & (XFS_IOLOCK_SHARED | XFS_IOLOCK_EXCL)) !=
298 (XFS_IOLOCK_SHARED | XFS_IOLOCK_EXCL));
299 ASSERT((lock_flags & (XFS_MMAPLOCK_SHARED | XFS_MMAPLOCK_EXCL)) !=
300 (XFS_MMAPLOCK_SHARED | XFS_MMAPLOCK_EXCL));
301 ASSERT((lock_flags & (XFS_ILOCK_SHARED | XFS_ILOCK_EXCL)) !=
302 (XFS_ILOCK_SHARED | XFS_ILOCK_EXCL));
303 ASSERT((lock_flags & ~(XFS_LOCK_MASK | XFS_LOCK_SUBCLASS_MASK)) == 0);
304 ASSERT(lock_flags != 0);
305
306 if (lock_flags & XFS_IOLOCK_EXCL)
307 up_write(&VFS_I(ip)->i_rwsem);
308 else if (lock_flags & XFS_IOLOCK_SHARED)
309 up_read(&VFS_I(ip)->i_rwsem);
310
311 if (lock_flags & XFS_MMAPLOCK_EXCL)
312 mrunlock_excl(&ip->i_mmaplock);
313 else if (lock_flags & XFS_MMAPLOCK_SHARED)
314 mrunlock_shared(&ip->i_mmaplock);
315
316 if (lock_flags & XFS_ILOCK_EXCL)
317 mrunlock_excl(&ip->i_lock);
318 else if (lock_flags & XFS_ILOCK_SHARED)
319 mrunlock_shared(&ip->i_lock);
320
321 trace_xfs_iunlock(ip, lock_flags, _RET_IP_);
322}
323
324/*
325 * give up write locks. the i/o lock cannot be held nested
326 * if it is being demoted.
327 */
328void
329xfs_ilock_demote(
330 xfs_inode_t *ip,
331 uint lock_flags)
332{
333 ASSERT(lock_flags & (XFS_IOLOCK_EXCL|XFS_MMAPLOCK_EXCL|XFS_ILOCK_EXCL));
334 ASSERT((lock_flags &
335 ~(XFS_IOLOCK_EXCL|XFS_MMAPLOCK_EXCL|XFS_ILOCK_EXCL)) == 0);
336
337 if (lock_flags & XFS_ILOCK_EXCL)
338 mrdemote(&ip->i_lock);
339 if (lock_flags & XFS_MMAPLOCK_EXCL)
340 mrdemote(&ip->i_mmaplock);
341 if (lock_flags & XFS_IOLOCK_EXCL)
342 downgrade_write(&VFS_I(ip)->i_rwsem);
343
344 trace_xfs_ilock_demote(ip, lock_flags, _RET_IP_);
345}
346
347#if defined(DEBUG) || defined(XFS_WARN)
348int
349xfs_isilocked(
350 xfs_inode_t *ip,
351 uint lock_flags)
352{
353 if (lock_flags & (XFS_ILOCK_EXCL|XFS_ILOCK_SHARED)) {
354 if (!(lock_flags & XFS_ILOCK_SHARED))
355 return !!ip->i_lock.mr_writer;
356 return rwsem_is_locked(&ip->i_lock.mr_lock);
357 }
358
359 if (lock_flags & (XFS_MMAPLOCK_EXCL|XFS_MMAPLOCK_SHARED)) {
360 if (!(lock_flags & XFS_MMAPLOCK_SHARED))
361 return !!ip->i_mmaplock.mr_writer;
362 return rwsem_is_locked(&ip->i_mmaplock.mr_lock);
363 }
364
365 if (lock_flags & (XFS_IOLOCK_EXCL|XFS_IOLOCK_SHARED)) {
366 if (!(lock_flags & XFS_IOLOCK_SHARED))
367 return !debug_locks ||
368 lockdep_is_held_type(&VFS_I(ip)->i_rwsem, 0);
369 return rwsem_is_locked(&VFS_I(ip)->i_rwsem);
370 }
371
372 ASSERT(0);
373 return 0;
374}
375#endif
376
377/*
378 * xfs_lockdep_subclass_ok() is only used in an ASSERT, so is only called when
379 * DEBUG or XFS_WARN is set. And MAX_LOCKDEP_SUBCLASSES is then only defined
380 * when CONFIG_LOCKDEP is set. Hence the complex define below to avoid build
381 * errors and warnings.
382 */
383#if (defined(DEBUG) || defined(XFS_WARN)) && defined(CONFIG_LOCKDEP)
384static bool
385xfs_lockdep_subclass_ok(
386 int subclass)
387{
388 return subclass < MAX_LOCKDEP_SUBCLASSES;
389}
390#else
391#define xfs_lockdep_subclass_ok(subclass) (true)
392#endif
393
394/*
395 * Bump the subclass so xfs_lock_inodes() acquires each lock with a different
396 * value. This can be called for any type of inode lock combination, including
397 * parent locking. Care must be taken to ensure we don't overrun the subclass
398 * storage fields in the class mask we build.
399 */
400static inline int
401xfs_lock_inumorder(int lock_mode, int subclass)
402{
403 int class = 0;
404
405 ASSERT(!(lock_mode & (XFS_ILOCK_PARENT | XFS_ILOCK_RTBITMAP |
406 XFS_ILOCK_RTSUM)));
407 ASSERT(xfs_lockdep_subclass_ok(subclass));
408
409 if (lock_mode & (XFS_IOLOCK_SHARED|XFS_IOLOCK_EXCL)) {
410 ASSERT(subclass <= XFS_IOLOCK_MAX_SUBCLASS);
411 class += subclass << XFS_IOLOCK_SHIFT;
412 }
413
414 if (lock_mode & (XFS_MMAPLOCK_SHARED|XFS_MMAPLOCK_EXCL)) {
415 ASSERT(subclass <= XFS_MMAPLOCK_MAX_SUBCLASS);
416 class += subclass << XFS_MMAPLOCK_SHIFT;
417 }
418
419 if (lock_mode & (XFS_ILOCK_SHARED|XFS_ILOCK_EXCL)) {
420 ASSERT(subclass <= XFS_ILOCK_MAX_SUBCLASS);
421 class += subclass << XFS_ILOCK_SHIFT;
422 }
423
424 return (lock_mode & ~XFS_LOCK_SUBCLASS_MASK) | class;
425}
426
427/*
428 * The following routine will lock n inodes in exclusive mode. We assume the
429 * caller calls us with the inodes in i_ino order.
430 *
431 * We need to detect deadlock where an inode that we lock is in the AIL and we
432 * start waiting for another inode that is locked by a thread in a long running
433 * transaction (such as truncate). This can result in deadlock since the long
434 * running trans might need to wait for the inode we just locked in order to
435 * push the tail and free space in the log.
436 *
437 * xfs_lock_inodes() can only be used to lock one type of lock at a time -
438 * the iolock, the mmaplock or the ilock, but not more than one at a time. If we
439 * lock more than one at a time, lockdep will report false positives saying we
440 * have violated locking orders.
441 */
442static void
443xfs_lock_inodes(
444 struct xfs_inode **ips,
445 int inodes,
446 uint lock_mode)
447{
448 int attempts = 0, i, j, try_lock;
449 struct xfs_log_item *lp;
450
451 /*
452 * Currently supports between 2 and 5 inodes with exclusive locking. We
453 * support an arbitrary depth of locking here, but absolute limits on
454 * inodes depend on the type of locking and the limits placed by
455 * lockdep annotations in xfs_lock_inumorder. These are all checked by
456 * the asserts.
457 */
458 ASSERT(ips && inodes >= 2 && inodes <= 5);
459 ASSERT(lock_mode & (XFS_IOLOCK_EXCL | XFS_MMAPLOCK_EXCL |
460 XFS_ILOCK_EXCL));
461 ASSERT(!(lock_mode & (XFS_IOLOCK_SHARED | XFS_MMAPLOCK_SHARED |
462 XFS_ILOCK_SHARED)));
463 ASSERT(!(lock_mode & XFS_MMAPLOCK_EXCL) ||
464 inodes <= XFS_MMAPLOCK_MAX_SUBCLASS + 1);
465 ASSERT(!(lock_mode & XFS_ILOCK_EXCL) ||
466 inodes <= XFS_ILOCK_MAX_SUBCLASS + 1);
467
468 if (lock_mode & XFS_IOLOCK_EXCL) {
469 ASSERT(!(lock_mode & (XFS_MMAPLOCK_EXCL | XFS_ILOCK_EXCL)));
470 } else if (lock_mode & XFS_MMAPLOCK_EXCL)
471 ASSERT(!(lock_mode & XFS_ILOCK_EXCL));
472
473 try_lock = 0;
474 i = 0;
475again:
476 for (; i < inodes; i++) {
477 ASSERT(ips[i]);
478
479 if (i && (ips[i] == ips[i - 1])) /* Already locked */
480 continue;
481
482 /*
483 * If try_lock is not set yet, make sure all locked inodes are
484 * not in the AIL. If any are, set try_lock to be used later.
485 */
486 if (!try_lock) {
487 for (j = (i - 1); j >= 0 && !try_lock; j--) {
488 lp = &ips[j]->i_itemp->ili_item;
489 if (lp && test_bit(XFS_LI_IN_AIL, &lp->li_flags))
490 try_lock++;
491 }
492 }
493
494 /*
495 * If any of the previous locks we have locked is in the AIL,
496 * we must TRY to get the second and subsequent locks. If
497 * we can't get any, we must release all we have
498 * and try again.
499 */
500 if (!try_lock) {
501 xfs_ilock(ips[i], xfs_lock_inumorder(lock_mode, i));
502 continue;
503 }
504
505 /* try_lock means we have an inode locked that is in the AIL. */
506 ASSERT(i != 0);
507 if (xfs_ilock_nowait(ips[i], xfs_lock_inumorder(lock_mode, i)))
508 continue;
509
510 /*
511 * Unlock all previous guys and try again. xfs_iunlock will try
512 * to push the tail if the inode is in the AIL.
513 */
514 attempts++;
515 for (j = i - 1; j >= 0; j--) {
516 /*
517 * Check to see if we've already unlocked this one. Not
518 * the first one going back, and the inode ptr is the
519 * same.
520 */
521 if (j != (i - 1) && ips[j] == ips[j + 1])
522 continue;
523
524 xfs_iunlock(ips[j], lock_mode);
525 }
526
527 if ((attempts % 5) == 0) {
528 delay(1); /* Don't just spin the CPU */
529 }
530 i = 0;
531 try_lock = 0;
532 goto again;
533 }
534}
535
536/*
537 * xfs_lock_two_inodes() can only be used to lock one type of lock at a time -
538 * the mmaplock or the ilock, but not more than one type at a time. If we lock
539 * more than one at a time, lockdep will report false positives saying we have
540 * violated locking orders. The iolock must be double-locked separately since
541 * we use i_rwsem for that. We now support taking one lock EXCL and the other
542 * SHARED.
543 */
544void
545xfs_lock_two_inodes(
546 struct xfs_inode *ip0,
547 uint ip0_mode,
548 struct xfs_inode *ip1,
549 uint ip1_mode)
550{
551 struct xfs_inode *temp;
552 uint mode_temp;
553 int attempts = 0;
554 struct xfs_log_item *lp;
555
556 ASSERT(hweight32(ip0_mode) == 1);
557 ASSERT(hweight32(ip1_mode) == 1);
558 ASSERT(!(ip0_mode & (XFS_IOLOCK_SHARED|XFS_IOLOCK_EXCL)));
559 ASSERT(!(ip1_mode & (XFS_IOLOCK_SHARED|XFS_IOLOCK_EXCL)));
560 ASSERT(!(ip0_mode & (XFS_MMAPLOCK_SHARED|XFS_MMAPLOCK_EXCL)) ||
561 !(ip0_mode & (XFS_ILOCK_SHARED|XFS_ILOCK_EXCL)));
562 ASSERT(!(ip1_mode & (XFS_MMAPLOCK_SHARED|XFS_MMAPLOCK_EXCL)) ||
563 !(ip1_mode & (XFS_ILOCK_SHARED|XFS_ILOCK_EXCL)));
564 ASSERT(!(ip1_mode & (XFS_MMAPLOCK_SHARED|XFS_MMAPLOCK_EXCL)) ||
565 !(ip0_mode & (XFS_ILOCK_SHARED|XFS_ILOCK_EXCL)));
566 ASSERT(!(ip0_mode & (XFS_MMAPLOCK_SHARED|XFS_MMAPLOCK_EXCL)) ||
567 !(ip1_mode & (XFS_ILOCK_SHARED|XFS_ILOCK_EXCL)));
568
569 ASSERT(ip0->i_ino != ip1->i_ino);
570
571 if (ip0->i_ino > ip1->i_ino) {
572 temp = ip0;
573 ip0 = ip1;
574 ip1 = temp;
575 mode_temp = ip0_mode;
576 ip0_mode = ip1_mode;
577 ip1_mode = mode_temp;
578 }
579
580 again:
581 xfs_ilock(ip0, xfs_lock_inumorder(ip0_mode, 0));
582
583 /*
584 * If the first lock we have locked is in the AIL, we must TRY to get
585 * the second lock. If we can't get it, we must release the first one
586 * and try again.
587 */
588 lp = &ip0->i_itemp->ili_item;
589 if (lp && test_bit(XFS_LI_IN_AIL, &lp->li_flags)) {
590 if (!xfs_ilock_nowait(ip1, xfs_lock_inumorder(ip1_mode, 1))) {
591 xfs_iunlock(ip0, ip0_mode);
592 if ((++attempts % 5) == 0)
593 delay(1); /* Don't just spin the CPU */
594 goto again;
595 }
596 } else {
597 xfs_ilock(ip1, xfs_lock_inumorder(ip1_mode, 1));
598 }
599}
600
601void
602__xfs_iflock(
603 struct xfs_inode *ip)
604{
605 wait_queue_head_t *wq = bit_waitqueue(&ip->i_flags, __XFS_IFLOCK_BIT);
606 DEFINE_WAIT_BIT(wait, &ip->i_flags, __XFS_IFLOCK_BIT);
607
608 do {
609 prepare_to_wait_exclusive(wq, &wait.wq_entry, TASK_UNINTERRUPTIBLE);
610 if (xfs_isiflocked(ip))
611 io_schedule();
612 } while (!xfs_iflock_nowait(ip));
613
614 finish_wait(wq, &wait.wq_entry);
615}
616
617STATIC uint
618_xfs_dic2xflags(
619 uint16_t di_flags,
620 uint64_t di_flags2,
621 bool has_attr)
622{
623 uint flags = 0;
624
625 if (di_flags & XFS_DIFLAG_ANY) {
626 if (di_flags & XFS_DIFLAG_REALTIME)
627 flags |= FS_XFLAG_REALTIME;
628 if (di_flags & XFS_DIFLAG_PREALLOC)
629 flags |= FS_XFLAG_PREALLOC;
630 if (di_flags & XFS_DIFLAG_IMMUTABLE)
631 flags |= FS_XFLAG_IMMUTABLE;
632 if (di_flags & XFS_DIFLAG_APPEND)
633 flags |= FS_XFLAG_APPEND;
634 if (di_flags & XFS_DIFLAG_SYNC)
635 flags |= FS_XFLAG_SYNC;
636 if (di_flags & XFS_DIFLAG_NOATIME)
637 flags |= FS_XFLAG_NOATIME;
638 if (di_flags & XFS_DIFLAG_NODUMP)
639 flags |= FS_XFLAG_NODUMP;
640 if (di_flags & XFS_DIFLAG_RTINHERIT)
641 flags |= FS_XFLAG_RTINHERIT;
642 if (di_flags & XFS_DIFLAG_PROJINHERIT)
643 flags |= FS_XFLAG_PROJINHERIT;
644 if (di_flags & XFS_DIFLAG_NOSYMLINKS)
645 flags |= FS_XFLAG_NOSYMLINKS;
646 if (di_flags & XFS_DIFLAG_EXTSIZE)
647 flags |= FS_XFLAG_EXTSIZE;
648 if (di_flags & XFS_DIFLAG_EXTSZINHERIT)
649 flags |= FS_XFLAG_EXTSZINHERIT;
650 if (di_flags & XFS_DIFLAG_NODEFRAG)
651 flags |= FS_XFLAG_NODEFRAG;
652 if (di_flags & XFS_DIFLAG_FILESTREAM)
653 flags |= FS_XFLAG_FILESTREAM;
654 }
655
656 if (di_flags2 & XFS_DIFLAG2_ANY) {
657 if (di_flags2 & XFS_DIFLAG2_DAX)
658 flags |= FS_XFLAG_DAX;
659 if (di_flags2 & XFS_DIFLAG2_COWEXTSIZE)
660 flags |= FS_XFLAG_COWEXTSIZE;
661 }
662
663 if (has_attr)
664 flags |= FS_XFLAG_HASATTR;
665
666 return flags;
667}
668
669uint
670xfs_ip2xflags(
671 struct xfs_inode *ip)
672{
673 struct xfs_icdinode *dic = &ip->i_d;
674
675 return _xfs_dic2xflags(dic->di_flags, dic->di_flags2, XFS_IFORK_Q(ip));
676}
677
678/*
679 * Lookups up an inode from "name". If ci_name is not NULL, then a CI match
680 * is allowed, otherwise it has to be an exact match. If a CI match is found,
681 * ci_name->name will point to a the actual name (caller must free) or
682 * will be set to NULL if an exact match is found.
683 */
684int
685xfs_lookup(
686 xfs_inode_t *dp,
687 struct xfs_name *name,
688 xfs_inode_t **ipp,
689 struct xfs_name *ci_name)
690{
691 xfs_ino_t inum;
692 int error;
693
694 trace_xfs_lookup(dp, name);
695
696 if (XFS_FORCED_SHUTDOWN(dp->i_mount))
697 return -EIO;
698
699 error = xfs_dir_lookup(NULL, dp, name, &inum, ci_name);
700 if (error)
701 goto out_unlock;
702
703 error = xfs_iget(dp->i_mount, NULL, inum, 0, 0, ipp);
704 if (error)
705 goto out_free_name;
706
707 return 0;
708
709out_free_name:
710 if (ci_name)
711 kmem_free(ci_name->name);
712out_unlock:
713 *ipp = NULL;
714 return error;
715}
716
717/*
718 * Allocate an inode on disk and return a copy of its in-core version.
719 * The in-core inode is locked exclusively. Set mode, nlink, and rdev
720 * appropriately within the inode. The uid and gid for the inode are
721 * set according to the contents of the given cred structure.
722 *
723 * Use xfs_dialloc() to allocate the on-disk inode. If xfs_dialloc()
724 * has a free inode available, call xfs_iget() to obtain the in-core
725 * version of the allocated inode. Finally, fill in the inode and
726 * log its initial contents. In this case, ialloc_context would be
727 * set to NULL.
728 *
729 * If xfs_dialloc() does not have an available inode, it will replenish
730 * its supply by doing an allocation. Since we can only do one
731 * allocation within a transaction without deadlocks, we must commit
732 * the current transaction before returning the inode itself.
733 * In this case, therefore, we will set ialloc_context and return.
734 * The caller should then commit the current transaction, start a new
735 * transaction, and call xfs_ialloc() again to actually get the inode.
736 *
737 * To ensure that some other process does not grab the inode that
738 * was allocated during the first call to xfs_ialloc(), this routine
739 * also returns the [locked] bp pointing to the head of the freelist
740 * as ialloc_context. The caller should hold this buffer across
741 * the commit and pass it back into this routine on the second call.
742 *
743 * If we are allocating quota inodes, we do not have a parent inode
744 * to attach to or associate with (i.e. pip == NULL) because they
745 * are not linked into the directory structure - they are attached
746 * directly to the superblock - and so have no parent.
747 */
748static int
749xfs_ialloc(
750 xfs_trans_t *tp,
751 xfs_inode_t *pip,
752 umode_t mode,
753 xfs_nlink_t nlink,
754 dev_t rdev,
755 prid_t prid,
756 xfs_buf_t **ialloc_context,
757 xfs_inode_t **ipp)
758{
759 struct xfs_mount *mp = tp->t_mountp;
760 xfs_ino_t ino;
761 xfs_inode_t *ip;
762 uint flags;
763 int error;
764 struct timespec64 tv;
765 struct inode *inode;
766
767 /*
768 * Call the space management code to pick
769 * the on-disk inode to be allocated.
770 */
771 error = xfs_dialloc(tp, pip ? pip->i_ino : 0, mode,
772 ialloc_context, &ino);
773 if (error)
774 return error;
775 if (*ialloc_context || ino == NULLFSINO) {
776 *ipp = NULL;
777 return 0;
778 }
779 ASSERT(*ialloc_context == NULL);
780
781 /*
782 * Protect against obviously corrupt allocation btree records. Later
783 * xfs_iget checks will catch re-allocation of other active in-memory
784 * and on-disk inodes. If we don't catch reallocating the parent inode
785 * here we will deadlock in xfs_iget() so we have to do these checks
786 * first.
787 */
788 if ((pip && ino == pip->i_ino) || !xfs_verify_dir_ino(mp, ino)) {
789 xfs_alert(mp, "Allocated a known in-use inode 0x%llx!", ino);
790 return -EFSCORRUPTED;
791 }
792
793 /*
794 * Get the in-core inode with the lock held exclusively.
795 * This is because we're setting fields here we need
796 * to prevent others from looking at until we're done.
797 */
798 error = xfs_iget(mp, tp, ino, XFS_IGET_CREATE,
799 XFS_ILOCK_EXCL, &ip);
800 if (error)
801 return error;
802 ASSERT(ip != NULL);
803 inode = VFS_I(ip);
804 inode->i_mode = mode;
805 set_nlink(inode, nlink);
806 inode->i_uid = current_fsuid();
807 inode->i_rdev = rdev;
808 ip->i_d.di_projid = prid;
809
810 if (pip && XFS_INHERIT_GID(pip)) {
811 inode->i_gid = VFS_I(pip)->i_gid;
812 if ((VFS_I(pip)->i_mode & S_ISGID) && S_ISDIR(mode))
813 inode->i_mode |= S_ISGID;
814 } else {
815 inode->i_gid = current_fsgid();
816 }
817
818 /*
819 * If the group ID of the new file does not match the effective group
820 * ID or one of the supplementary group IDs, the S_ISGID bit is cleared
821 * (and only if the irix_sgid_inherit compatibility variable is set).
822 */
823 if (irix_sgid_inherit &&
824 (inode->i_mode & S_ISGID) && !in_group_p(inode->i_gid))
825 inode->i_mode &= ~S_ISGID;
826
827 ip->i_d.di_size = 0;
828 ip->i_df.if_nextents = 0;
829 ASSERT(ip->i_d.di_nblocks == 0);
830
831 tv = current_time(inode);
832 inode->i_mtime = tv;
833 inode->i_atime = tv;
834 inode->i_ctime = tv;
835
836 ip->i_d.di_extsize = 0;
837 ip->i_d.di_dmevmask = 0;
838 ip->i_d.di_dmstate = 0;
839 ip->i_d.di_flags = 0;
840
841 if (xfs_sb_version_has_v3inode(&mp->m_sb)) {
842 inode_set_iversion(inode, 1);
843 ip->i_d.di_flags2 = 0;
844 ip->i_d.di_cowextsize = 0;
845 ip->i_d.di_crtime = tv;
846 }
847
848 flags = XFS_ILOG_CORE;
849 switch (mode & S_IFMT) {
850 case S_IFIFO:
851 case S_IFCHR:
852 case S_IFBLK:
853 case S_IFSOCK:
854 ip->i_df.if_format = XFS_DINODE_FMT_DEV;
855 ip->i_df.if_flags = 0;
856 flags |= XFS_ILOG_DEV;
857 break;
858 case S_IFREG:
859 case S_IFDIR:
860 if (pip && (pip->i_d.di_flags & XFS_DIFLAG_ANY)) {
861 uint di_flags = 0;
862
863 if (S_ISDIR(mode)) {
864 if (pip->i_d.di_flags & XFS_DIFLAG_RTINHERIT)
865 di_flags |= XFS_DIFLAG_RTINHERIT;
866 if (pip->i_d.di_flags & XFS_DIFLAG_EXTSZINHERIT) {
867 di_flags |= XFS_DIFLAG_EXTSZINHERIT;
868 ip->i_d.di_extsize = pip->i_d.di_extsize;
869 }
870 if (pip->i_d.di_flags & XFS_DIFLAG_PROJINHERIT)
871 di_flags |= XFS_DIFLAG_PROJINHERIT;
872 } else if (S_ISREG(mode)) {
873 if (pip->i_d.di_flags & XFS_DIFLAG_RTINHERIT)
874 di_flags |= XFS_DIFLAG_REALTIME;
875 if (pip->i_d.di_flags & XFS_DIFLAG_EXTSZINHERIT) {
876 di_flags |= XFS_DIFLAG_EXTSIZE;
877 ip->i_d.di_extsize = pip->i_d.di_extsize;
878 }
879 }
880 if ((pip->i_d.di_flags & XFS_DIFLAG_NOATIME) &&
881 xfs_inherit_noatime)
882 di_flags |= XFS_DIFLAG_NOATIME;
883 if ((pip->i_d.di_flags & XFS_DIFLAG_NODUMP) &&
884 xfs_inherit_nodump)
885 di_flags |= XFS_DIFLAG_NODUMP;
886 if ((pip->i_d.di_flags & XFS_DIFLAG_SYNC) &&
887 xfs_inherit_sync)
888 di_flags |= XFS_DIFLAG_SYNC;
889 if ((pip->i_d.di_flags & XFS_DIFLAG_NOSYMLINKS) &&
890 xfs_inherit_nosymlinks)
891 di_flags |= XFS_DIFLAG_NOSYMLINKS;
892 if ((pip->i_d.di_flags & XFS_DIFLAG_NODEFRAG) &&
893 xfs_inherit_nodefrag)
894 di_flags |= XFS_DIFLAG_NODEFRAG;
895 if (pip->i_d.di_flags & XFS_DIFLAG_FILESTREAM)
896 di_flags |= XFS_DIFLAG_FILESTREAM;
897
898 ip->i_d.di_flags |= di_flags;
899 }
900 if (pip && (pip->i_d.di_flags2 & XFS_DIFLAG2_ANY)) {
901 if (pip->i_d.di_flags2 & XFS_DIFLAG2_COWEXTSIZE) {
902 ip->i_d.di_flags2 |= XFS_DIFLAG2_COWEXTSIZE;
903 ip->i_d.di_cowextsize = pip->i_d.di_cowextsize;
904 }
905 if (pip->i_d.di_flags2 & XFS_DIFLAG2_DAX)
906 ip->i_d.di_flags2 |= XFS_DIFLAG2_DAX;
907 }
908 /* FALLTHROUGH */
909 case S_IFLNK:
910 ip->i_df.if_format = XFS_DINODE_FMT_EXTENTS;
911 ip->i_df.if_flags = XFS_IFEXTENTS;
912 ip->i_df.if_bytes = 0;
913 ip->i_df.if_u1.if_root = NULL;
914 break;
915 default:
916 ASSERT(0);
917 }
918
919 /*
920 * Log the new values stuffed into the inode.
921 */
922 xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL);
923 xfs_trans_log_inode(tp, ip, flags);
924
925 /* now that we have an i_mode we can setup the inode structure */
926 xfs_setup_inode(ip);
927
928 *ipp = ip;
929 return 0;
930}
931
932/*
933 * Allocates a new inode from disk and return a pointer to the
934 * incore copy. This routine will internally commit the current
935 * transaction and allocate a new one if the Space Manager needed
936 * to do an allocation to replenish the inode free-list.
937 *
938 * This routine is designed to be called from xfs_create and
939 * xfs_create_dir.
940 *
941 */
942int
943xfs_dir_ialloc(
944 xfs_trans_t **tpp, /* input: current transaction;
945 output: may be a new transaction. */
946 xfs_inode_t *dp, /* directory within whose allocate
947 the inode. */
948 umode_t mode,
949 xfs_nlink_t nlink,
950 dev_t rdev,
951 prid_t prid, /* project id */
952 xfs_inode_t **ipp) /* pointer to inode; it will be
953 locked. */
954{
955 xfs_trans_t *tp;
956 xfs_inode_t *ip;
957 xfs_buf_t *ialloc_context = NULL;
958 int code;
959 void *dqinfo;
960 uint tflags;
961
962 tp = *tpp;
963 ASSERT(tp->t_flags & XFS_TRANS_PERM_LOG_RES);
964
965 /*
966 * xfs_ialloc will return a pointer to an incore inode if
967 * the Space Manager has an available inode on the free
968 * list. Otherwise, it will do an allocation and replenish
969 * the freelist. Since we can only do one allocation per
970 * transaction without deadlocks, we will need to commit the
971 * current transaction and start a new one. We will then
972 * need to call xfs_ialloc again to get the inode.
973 *
974 * If xfs_ialloc did an allocation to replenish the freelist,
975 * it returns the bp containing the head of the freelist as
976 * ialloc_context. We will hold a lock on it across the
977 * transaction commit so that no other process can steal
978 * the inode(s) that we've just allocated.
979 */
980 code = xfs_ialloc(tp, dp, mode, nlink, rdev, prid, &ialloc_context,
981 &ip);
982
983 /*
984 * Return an error if we were unable to allocate a new inode.
985 * This should only happen if we run out of space on disk or
986 * encounter a disk error.
987 */
988 if (code) {
989 *ipp = NULL;
990 return code;
991 }
992 if (!ialloc_context && !ip) {
993 *ipp = NULL;
994 return -ENOSPC;
995 }
996
997 /*
998 * If the AGI buffer is non-NULL, then we were unable to get an
999 * inode in one operation. We need to commit the current
1000 * transaction and call xfs_ialloc() again. It is guaranteed
1001 * to succeed the second time.
1002 */
1003 if (ialloc_context) {
1004 /*
1005 * Normally, xfs_trans_commit releases all the locks.
1006 * We call bhold to hang on to the ialloc_context across
1007 * the commit. Holding this buffer prevents any other
1008 * processes from doing any allocations in this
1009 * allocation group.
1010 */
1011 xfs_trans_bhold(tp, ialloc_context);
1012
1013 /*
1014 * We want the quota changes to be associated with the next
1015 * transaction, NOT this one. So, detach the dqinfo from this
1016 * and attach it to the next transaction.
1017 */
1018 dqinfo = NULL;
1019 tflags = 0;
1020 if (tp->t_dqinfo) {
1021 dqinfo = (void *)tp->t_dqinfo;
1022 tp->t_dqinfo = NULL;
1023 tflags = tp->t_flags & XFS_TRANS_DQ_DIRTY;
1024 tp->t_flags &= ~(XFS_TRANS_DQ_DIRTY);
1025 }
1026
1027 code = xfs_trans_roll(&tp);
1028
1029 /*
1030 * Re-attach the quota info that we detached from prev trx.
1031 */
1032 if (dqinfo) {
1033 tp->t_dqinfo = dqinfo;
1034 tp->t_flags |= tflags;
1035 }
1036
1037 if (code) {
1038 xfs_buf_relse(ialloc_context);
1039 *tpp = tp;
1040 *ipp = NULL;
1041 return code;
1042 }
1043 xfs_trans_bjoin(tp, ialloc_context);
1044
1045 /*
1046 * Call ialloc again. Since we've locked out all
1047 * other allocations in this allocation group,
1048 * this call should always succeed.
1049 */
1050 code = xfs_ialloc(tp, dp, mode, nlink, rdev, prid,
1051 &ialloc_context, &ip);
1052
1053 /*
1054 * If we get an error at this point, return to the caller
1055 * so that the current transaction can be aborted.
1056 */
1057 if (code) {
1058 *tpp = tp;
1059 *ipp = NULL;
1060 return code;
1061 }
1062 ASSERT(!ialloc_context && ip);
1063
1064 }
1065
1066 *ipp = ip;
1067 *tpp = tp;
1068
1069 return 0;
1070}
1071
1072/*
1073 * Decrement the link count on an inode & log the change. If this causes the
1074 * link count to go to zero, move the inode to AGI unlinked list so that it can
1075 * be freed when the last active reference goes away via xfs_inactive().
1076 */
1077static int /* error */
1078xfs_droplink(
1079 xfs_trans_t *tp,
1080 xfs_inode_t *ip)
1081{
1082 xfs_trans_ichgtime(tp, ip, XFS_ICHGTIME_CHG);
1083
1084 drop_nlink(VFS_I(ip));
1085 xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
1086
1087 if (VFS_I(ip)->i_nlink)
1088 return 0;
1089
1090 return xfs_iunlink(tp, ip);
1091}
1092
1093/*
1094 * Increment the link count on an inode & log the change.
1095 */
1096static void
1097xfs_bumplink(
1098 xfs_trans_t *tp,
1099 xfs_inode_t *ip)
1100{
1101 xfs_trans_ichgtime(tp, ip, XFS_ICHGTIME_CHG);
1102
1103 inc_nlink(VFS_I(ip));
1104 xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
1105}
1106
1107int
1108xfs_create(
1109 xfs_inode_t *dp,
1110 struct xfs_name *name,
1111 umode_t mode,
1112 dev_t rdev,
1113 xfs_inode_t **ipp)
1114{
1115 int is_dir = S_ISDIR(mode);
1116 struct xfs_mount *mp = dp->i_mount;
1117 struct xfs_inode *ip = NULL;
1118 struct xfs_trans *tp = NULL;
1119 int error;
1120 bool unlock_dp_on_error = false;
1121 prid_t prid;
1122 struct xfs_dquot *udqp = NULL;
1123 struct xfs_dquot *gdqp = NULL;
1124 struct xfs_dquot *pdqp = NULL;
1125 struct xfs_trans_res *tres;
1126 uint resblks;
1127
1128 trace_xfs_create(dp, name);
1129
1130 if (XFS_FORCED_SHUTDOWN(mp))
1131 return -EIO;
1132
1133 prid = xfs_get_initial_prid(dp);
1134
1135 /*
1136 * Make sure that we have allocated dquot(s) on disk.
1137 */
1138 error = xfs_qm_vop_dqalloc(dp, current_fsuid(), current_fsgid(), prid,
1139 XFS_QMOPT_QUOTALL | XFS_QMOPT_INHERIT,
1140 &udqp, &gdqp, &pdqp);
1141 if (error)
1142 return error;
1143
1144 if (is_dir) {
1145 resblks = XFS_MKDIR_SPACE_RES(mp, name->len);
1146 tres = &M_RES(mp)->tr_mkdir;
1147 } else {
1148 resblks = XFS_CREATE_SPACE_RES(mp, name->len);
1149 tres = &M_RES(mp)->tr_create;
1150 }
1151
1152 /*
1153 * Initially assume that the file does not exist and
1154 * reserve the resources for that case. If that is not
1155 * the case we'll drop the one we have and get a more
1156 * appropriate transaction later.
1157 */
1158 error = xfs_trans_alloc(mp, tres, resblks, 0, 0, &tp);
1159 if (error == -ENOSPC) {
1160 /* flush outstanding delalloc blocks and retry */
1161 xfs_flush_inodes(mp);
1162 error = xfs_trans_alloc(mp, tres, resblks, 0, 0, &tp);
1163 }
1164 if (error)
1165 goto out_release_inode;
1166
1167 xfs_ilock(dp, XFS_ILOCK_EXCL | XFS_ILOCK_PARENT);
1168 unlock_dp_on_error = true;
1169
1170 /*
1171 * Reserve disk quota and the inode.
1172 */
1173 error = xfs_trans_reserve_quota(tp, mp, udqp, gdqp,
1174 pdqp, resblks, 1, 0);
1175 if (error)
1176 goto out_trans_cancel;
1177
1178 /*
1179 * A newly created regular or special file just has one directory
1180 * entry pointing to them, but a directory also the "." entry
1181 * pointing to itself.
1182 */
1183 error = xfs_dir_ialloc(&tp, dp, mode, is_dir ? 2 : 1, rdev, prid, &ip);
1184 if (error)
1185 goto out_trans_cancel;
1186
1187 /*
1188 * Now we join the directory inode to the transaction. We do not do it
1189 * earlier because xfs_dir_ialloc might commit the previous transaction
1190 * (and release all the locks). An error from here on will result in
1191 * the transaction cancel unlocking dp so don't do it explicitly in the
1192 * error path.
1193 */
1194 xfs_trans_ijoin(tp, dp, XFS_ILOCK_EXCL);
1195 unlock_dp_on_error = false;
1196
1197 error = xfs_dir_createname(tp, dp, name, ip->i_ino,
1198 resblks - XFS_IALLOC_SPACE_RES(mp));
1199 if (error) {
1200 ASSERT(error != -ENOSPC);
1201 goto out_trans_cancel;
1202 }
1203 xfs_trans_ichgtime(tp, dp, XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG);
1204 xfs_trans_log_inode(tp, dp, XFS_ILOG_CORE);
1205
1206 if (is_dir) {
1207 error = xfs_dir_init(tp, ip, dp);
1208 if (error)
1209 goto out_trans_cancel;
1210
1211 xfs_bumplink(tp, dp);
1212 }
1213
1214 /*
1215 * If this is a synchronous mount, make sure that the
1216 * create transaction goes to disk before returning to
1217 * the user.
1218 */
1219 if (mp->m_flags & (XFS_MOUNT_WSYNC|XFS_MOUNT_DIRSYNC))
1220 xfs_trans_set_sync(tp);
1221
1222 /*
1223 * Attach the dquot(s) to the inodes and modify them incore.
1224 * These ids of the inode couldn't have changed since the new
1225 * inode has been locked ever since it was created.
1226 */
1227 xfs_qm_vop_create_dqattach(tp, ip, udqp, gdqp, pdqp);
1228
1229 error = xfs_trans_commit(tp);
1230 if (error)
1231 goto out_release_inode;
1232
1233 xfs_qm_dqrele(udqp);
1234 xfs_qm_dqrele(gdqp);
1235 xfs_qm_dqrele(pdqp);
1236
1237 *ipp = ip;
1238 return 0;
1239
1240 out_trans_cancel:
1241 xfs_trans_cancel(tp);
1242 out_release_inode:
1243 /*
1244 * Wait until after the current transaction is aborted to finish the
1245 * setup of the inode and release the inode. This prevents recursive
1246 * transactions and deadlocks from xfs_inactive.
1247 */
1248 if (ip) {
1249 xfs_finish_inode_setup(ip);
1250 xfs_irele(ip);
1251 }
1252
1253 xfs_qm_dqrele(udqp);
1254 xfs_qm_dqrele(gdqp);
1255 xfs_qm_dqrele(pdqp);
1256
1257 if (unlock_dp_on_error)
1258 xfs_iunlock(dp, XFS_ILOCK_EXCL);
1259 return error;
1260}
1261
1262int
1263xfs_create_tmpfile(
1264 struct xfs_inode *dp,
1265 umode_t mode,
1266 struct xfs_inode **ipp)
1267{
1268 struct xfs_mount *mp = dp->i_mount;
1269 struct xfs_inode *ip = NULL;
1270 struct xfs_trans *tp = NULL;
1271 int error;
1272 prid_t prid;
1273 struct xfs_dquot *udqp = NULL;
1274 struct xfs_dquot *gdqp = NULL;
1275 struct xfs_dquot *pdqp = NULL;
1276 struct xfs_trans_res *tres;
1277 uint resblks;
1278
1279 if (XFS_FORCED_SHUTDOWN(mp))
1280 return -EIO;
1281
1282 prid = xfs_get_initial_prid(dp);
1283
1284 /*
1285 * Make sure that we have allocated dquot(s) on disk.
1286 */
1287 error = xfs_qm_vop_dqalloc(dp, current_fsuid(), current_fsgid(), prid,
1288 XFS_QMOPT_QUOTALL | XFS_QMOPT_INHERIT,
1289 &udqp, &gdqp, &pdqp);
1290 if (error)
1291 return error;
1292
1293 resblks = XFS_IALLOC_SPACE_RES(mp);
1294 tres = &M_RES(mp)->tr_create_tmpfile;
1295
1296 error = xfs_trans_alloc(mp, tres, resblks, 0, 0, &tp);
1297 if (error)
1298 goto out_release_inode;
1299
1300 error = xfs_trans_reserve_quota(tp, mp, udqp, gdqp,
1301 pdqp, resblks, 1, 0);
1302 if (error)
1303 goto out_trans_cancel;
1304
1305 error = xfs_dir_ialloc(&tp, dp, mode, 0, 0, prid, &ip);
1306 if (error)
1307 goto out_trans_cancel;
1308
1309 if (mp->m_flags & XFS_MOUNT_WSYNC)
1310 xfs_trans_set_sync(tp);
1311
1312 /*
1313 * Attach the dquot(s) to the inodes and modify them incore.
1314 * These ids of the inode couldn't have changed since the new
1315 * inode has been locked ever since it was created.
1316 */
1317 xfs_qm_vop_create_dqattach(tp, ip, udqp, gdqp, pdqp);
1318
1319 error = xfs_iunlink(tp, ip);
1320 if (error)
1321 goto out_trans_cancel;
1322
1323 error = xfs_trans_commit(tp);
1324 if (error)
1325 goto out_release_inode;
1326
1327 xfs_qm_dqrele(udqp);
1328 xfs_qm_dqrele(gdqp);
1329 xfs_qm_dqrele(pdqp);
1330
1331 *ipp = ip;
1332 return 0;
1333
1334 out_trans_cancel:
1335 xfs_trans_cancel(tp);
1336 out_release_inode:
1337 /*
1338 * Wait until after the current transaction is aborted to finish the
1339 * setup of the inode and release the inode. This prevents recursive
1340 * transactions and deadlocks from xfs_inactive.
1341 */
1342 if (ip) {
1343 xfs_finish_inode_setup(ip);
1344 xfs_irele(ip);
1345 }
1346
1347 xfs_qm_dqrele(udqp);
1348 xfs_qm_dqrele(gdqp);
1349 xfs_qm_dqrele(pdqp);
1350
1351 return error;
1352}
1353
1354int
1355xfs_link(
1356 xfs_inode_t *tdp,
1357 xfs_inode_t *sip,
1358 struct xfs_name *target_name)
1359{
1360 xfs_mount_t *mp = tdp->i_mount;
1361 xfs_trans_t *tp;
1362 int error;
1363 int resblks;
1364
1365 trace_xfs_link(tdp, target_name);
1366
1367 ASSERT(!S_ISDIR(VFS_I(sip)->i_mode));
1368
1369 if (XFS_FORCED_SHUTDOWN(mp))
1370 return -EIO;
1371
1372 error = xfs_qm_dqattach(sip);
1373 if (error)
1374 goto std_return;
1375
1376 error = xfs_qm_dqattach(tdp);
1377 if (error)
1378 goto std_return;
1379
1380 resblks = XFS_LINK_SPACE_RES(mp, target_name->len);
1381 error = xfs_trans_alloc(mp, &M_RES(mp)->tr_link, resblks, 0, 0, &tp);
1382 if (error == -ENOSPC) {
1383 resblks = 0;
1384 error = xfs_trans_alloc(mp, &M_RES(mp)->tr_link, 0, 0, 0, &tp);
1385 }
1386 if (error)
1387 goto std_return;
1388
1389 xfs_lock_two_inodes(sip, XFS_ILOCK_EXCL, tdp, XFS_ILOCK_EXCL);
1390
1391 xfs_trans_ijoin(tp, sip, XFS_ILOCK_EXCL);
1392 xfs_trans_ijoin(tp, tdp, XFS_ILOCK_EXCL);
1393
1394 /*
1395 * If we are using project inheritance, we only allow hard link
1396 * creation in our tree when the project IDs are the same; else
1397 * the tree quota mechanism could be circumvented.
1398 */
1399 if (unlikely((tdp->i_d.di_flags & XFS_DIFLAG_PROJINHERIT) &&
1400 tdp->i_d.di_projid != sip->i_d.di_projid)) {
1401 error = -EXDEV;
1402 goto error_return;
1403 }
1404
1405 if (!resblks) {
1406 error = xfs_dir_canenter(tp, tdp, target_name);
1407 if (error)
1408 goto error_return;
1409 }
1410
1411 /*
1412 * Handle initial link state of O_TMPFILE inode
1413 */
1414 if (VFS_I(sip)->i_nlink == 0) {
1415 error = xfs_iunlink_remove(tp, sip);
1416 if (error)
1417 goto error_return;
1418 }
1419
1420 error = xfs_dir_createname(tp, tdp, target_name, sip->i_ino,
1421 resblks);
1422 if (error)
1423 goto error_return;
1424 xfs_trans_ichgtime(tp, tdp, XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG);
1425 xfs_trans_log_inode(tp, tdp, XFS_ILOG_CORE);
1426
1427 xfs_bumplink(tp, sip);
1428
1429 /*
1430 * If this is a synchronous mount, make sure that the
1431 * link transaction goes to disk before returning to
1432 * the user.
1433 */
1434 if (mp->m_flags & (XFS_MOUNT_WSYNC|XFS_MOUNT_DIRSYNC))
1435 xfs_trans_set_sync(tp);
1436
1437 return xfs_trans_commit(tp);
1438
1439 error_return:
1440 xfs_trans_cancel(tp);
1441 std_return:
1442 return error;
1443}
1444
1445/* Clear the reflink flag and the cowblocks tag if possible. */
1446static void
1447xfs_itruncate_clear_reflink_flags(
1448 struct xfs_inode *ip)
1449{
1450 struct xfs_ifork *dfork;
1451 struct xfs_ifork *cfork;
1452
1453 if (!xfs_is_reflink_inode(ip))
1454 return;
1455 dfork = XFS_IFORK_PTR(ip, XFS_DATA_FORK);
1456 cfork = XFS_IFORK_PTR(ip, XFS_COW_FORK);
1457 if (dfork->if_bytes == 0 && cfork->if_bytes == 0)
1458 ip->i_d.di_flags2 &= ~XFS_DIFLAG2_REFLINK;
1459 if (cfork->if_bytes == 0)
1460 xfs_inode_clear_cowblocks_tag(ip);
1461}
1462
1463/*
1464 * Free up the underlying blocks past new_size. The new size must be smaller
1465 * than the current size. This routine can be used both for the attribute and
1466 * data fork, and does not modify the inode size, which is left to the caller.
1467 *
1468 * The transaction passed to this routine must have made a permanent log
1469 * reservation of at least XFS_ITRUNCATE_LOG_RES. This routine may commit the
1470 * given transaction and start new ones, so make sure everything involved in
1471 * the transaction is tidy before calling here. Some transaction will be
1472 * returned to the caller to be committed. The incoming transaction must
1473 * already include the inode, and both inode locks must be held exclusively.
1474 * The inode must also be "held" within the transaction. On return the inode
1475 * will be "held" within the returned transaction. This routine does NOT
1476 * require any disk space to be reserved for it within the transaction.
1477 *
1478 * If we get an error, we must return with the inode locked and linked into the
1479 * current transaction. This keeps things simple for the higher level code,
1480 * because it always knows that the inode is locked and held in the transaction
1481 * that returns to it whether errors occur or not. We don't mark the inode
1482 * dirty on error so that transactions can be easily aborted if possible.
1483 */
1484int
1485xfs_itruncate_extents_flags(
1486 struct xfs_trans **tpp,
1487 struct xfs_inode *ip,
1488 int whichfork,
1489 xfs_fsize_t new_size,
1490 int flags)
1491{
1492 struct xfs_mount *mp = ip->i_mount;
1493 struct xfs_trans *tp = *tpp;
1494 xfs_fileoff_t first_unmap_block;
1495 xfs_filblks_t unmap_len;
1496 int error = 0;
1497
1498 ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
1499 ASSERT(!atomic_read(&VFS_I(ip)->i_count) ||
1500 xfs_isilocked(ip, XFS_IOLOCK_EXCL));
1501 ASSERT(new_size <= XFS_ISIZE(ip));
1502 ASSERT(tp->t_flags & XFS_TRANS_PERM_LOG_RES);
1503 ASSERT(ip->i_itemp != NULL);
1504 ASSERT(ip->i_itemp->ili_lock_flags == 0);
1505 ASSERT(!XFS_NOT_DQATTACHED(mp, ip));
1506
1507 trace_xfs_itruncate_extents_start(ip, new_size);
1508
1509 flags |= xfs_bmapi_aflag(whichfork);
1510
1511 /*
1512 * Since it is possible for space to become allocated beyond
1513 * the end of the file (in a crash where the space is allocated
1514 * but the inode size is not yet updated), simply remove any
1515 * blocks which show up between the new EOF and the maximum
1516 * possible file size.
1517 *
1518 * We have to free all the blocks to the bmbt maximum offset, even if
1519 * the page cache can't scale that far.
1520 */
1521 first_unmap_block = XFS_B_TO_FSB(mp, (xfs_ufsize_t)new_size);
1522 if (first_unmap_block >= XFS_MAX_FILEOFF) {
1523 WARN_ON_ONCE(first_unmap_block > XFS_MAX_FILEOFF);
1524 return 0;
1525 }
1526
1527 unmap_len = XFS_MAX_FILEOFF - first_unmap_block + 1;
1528 while (unmap_len > 0) {
1529 ASSERT(tp->t_firstblock == NULLFSBLOCK);
1530 error = __xfs_bunmapi(tp, ip, first_unmap_block, &unmap_len,
1531 flags, XFS_ITRUNC_MAX_EXTENTS);
1532 if (error)
1533 goto out;
1534
1535 /*
1536 * Duplicate the transaction that has the permanent
1537 * reservation and commit the old transaction.
1538 */
1539 error = xfs_defer_finish(&tp);
1540 if (error)
1541 goto out;
1542
1543 error = xfs_trans_roll_inode(&tp, ip);
1544 if (error)
1545 goto out;
1546 }
1547
1548 if (whichfork == XFS_DATA_FORK) {
1549 /* Remove all pending CoW reservations. */
1550 error = xfs_reflink_cancel_cow_blocks(ip, &tp,
1551 first_unmap_block, XFS_MAX_FILEOFF, true);
1552 if (error)
1553 goto out;
1554
1555 xfs_itruncate_clear_reflink_flags(ip);
1556 }
1557
1558 /*
1559 * Always re-log the inode so that our permanent transaction can keep
1560 * on rolling it forward in the log.
1561 */
1562 xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
1563
1564 trace_xfs_itruncate_extents_end(ip, new_size);
1565
1566out:
1567 *tpp = tp;
1568 return error;
1569}
1570
1571int
1572xfs_release(
1573 xfs_inode_t *ip)
1574{
1575 xfs_mount_t *mp = ip->i_mount;
1576 int error;
1577
1578 if (!S_ISREG(VFS_I(ip)->i_mode) || (VFS_I(ip)->i_mode == 0))
1579 return 0;
1580
1581 /* If this is a read-only mount, don't do this (would generate I/O) */
1582 if (mp->m_flags & XFS_MOUNT_RDONLY)
1583 return 0;
1584
1585 if (!XFS_FORCED_SHUTDOWN(mp)) {
1586 int truncated;
1587
1588 /*
1589 * If we previously truncated this file and removed old data
1590 * in the process, we want to initiate "early" writeout on
1591 * the last close. This is an attempt to combat the notorious
1592 * NULL files problem which is particularly noticeable from a
1593 * truncate down, buffered (re-)write (delalloc), followed by
1594 * a crash. What we are effectively doing here is
1595 * significantly reducing the time window where we'd otherwise
1596 * be exposed to that problem.
1597 */
1598 truncated = xfs_iflags_test_and_clear(ip, XFS_ITRUNCATED);
1599 if (truncated) {
1600 xfs_iflags_clear(ip, XFS_IDIRTY_RELEASE);
1601 if (ip->i_delayed_blks > 0) {
1602 error = filemap_flush(VFS_I(ip)->i_mapping);
1603 if (error)
1604 return error;
1605 }
1606 }
1607 }
1608
1609 if (VFS_I(ip)->i_nlink == 0)
1610 return 0;
1611
1612 if (xfs_can_free_eofblocks(ip, false)) {
1613
1614 /*
1615 * Check if the inode is being opened, written and closed
1616 * frequently and we have delayed allocation blocks outstanding
1617 * (e.g. streaming writes from the NFS server), truncating the
1618 * blocks past EOF will cause fragmentation to occur.
1619 *
1620 * In this case don't do the truncation, but we have to be
1621 * careful how we detect this case. Blocks beyond EOF show up as
1622 * i_delayed_blks even when the inode is clean, so we need to
1623 * truncate them away first before checking for a dirty release.
1624 * Hence on the first dirty close we will still remove the
1625 * speculative allocation, but after that we will leave it in
1626 * place.
1627 */
1628 if (xfs_iflags_test(ip, XFS_IDIRTY_RELEASE))
1629 return 0;
1630 /*
1631 * If we can't get the iolock just skip truncating the blocks
1632 * past EOF because we could deadlock with the mmap_lock
1633 * otherwise. We'll get another chance to drop them once the
1634 * last reference to the inode is dropped, so we'll never leak
1635 * blocks permanently.
1636 */
1637 if (xfs_ilock_nowait(ip, XFS_IOLOCK_EXCL)) {
1638 error = xfs_free_eofblocks(ip);
1639 xfs_iunlock(ip, XFS_IOLOCK_EXCL);
1640 if (error)
1641 return error;
1642 }
1643
1644 /* delalloc blocks after truncation means it really is dirty */
1645 if (ip->i_delayed_blks)
1646 xfs_iflags_set(ip, XFS_IDIRTY_RELEASE);
1647 }
1648 return 0;
1649}
1650
1651/*
1652 * xfs_inactive_truncate
1653 *
1654 * Called to perform a truncate when an inode becomes unlinked.
1655 */
1656STATIC int
1657xfs_inactive_truncate(
1658 struct xfs_inode *ip)
1659{
1660 struct xfs_mount *mp = ip->i_mount;
1661 struct xfs_trans *tp;
1662 int error;
1663
1664 error = xfs_trans_alloc(mp, &M_RES(mp)->tr_itruncate, 0, 0, 0, &tp);
1665 if (error) {
1666 ASSERT(XFS_FORCED_SHUTDOWN(mp));
1667 return error;
1668 }
1669 xfs_ilock(ip, XFS_ILOCK_EXCL);
1670 xfs_trans_ijoin(tp, ip, 0);
1671
1672 /*
1673 * Log the inode size first to prevent stale data exposure in the event
1674 * of a system crash before the truncate completes. See the related
1675 * comment in xfs_vn_setattr_size() for details.
1676 */
1677 ip->i_d.di_size = 0;
1678 xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
1679
1680 error = xfs_itruncate_extents(&tp, ip, XFS_DATA_FORK, 0);
1681 if (error)
1682 goto error_trans_cancel;
1683
1684 ASSERT(ip->i_df.if_nextents == 0);
1685
1686 error = xfs_trans_commit(tp);
1687 if (error)
1688 goto error_unlock;
1689
1690 xfs_iunlock(ip, XFS_ILOCK_EXCL);
1691 return 0;
1692
1693error_trans_cancel:
1694 xfs_trans_cancel(tp);
1695error_unlock:
1696 xfs_iunlock(ip, XFS_ILOCK_EXCL);
1697 return error;
1698}
1699
1700/*
1701 * xfs_inactive_ifree()
1702 *
1703 * Perform the inode free when an inode is unlinked.
1704 */
1705STATIC int
1706xfs_inactive_ifree(
1707 struct xfs_inode *ip)
1708{
1709 struct xfs_mount *mp = ip->i_mount;
1710 struct xfs_trans *tp;
1711 int error;
1712
1713 /*
1714 * We try to use a per-AG reservation for any block needed by the finobt
1715 * tree, but as the finobt feature predates the per-AG reservation
1716 * support a degraded file system might not have enough space for the
1717 * reservation at mount time. In that case try to dip into the reserved
1718 * pool and pray.
1719 *
1720 * Send a warning if the reservation does happen to fail, as the inode
1721 * now remains allocated and sits on the unlinked list until the fs is
1722 * repaired.
1723 */
1724 if (unlikely(mp->m_finobt_nores)) {
1725 error = xfs_trans_alloc(mp, &M_RES(mp)->tr_ifree,
1726 XFS_IFREE_SPACE_RES(mp), 0, XFS_TRANS_RESERVE,
1727 &tp);
1728 } else {
1729 error = xfs_trans_alloc(mp, &M_RES(mp)->tr_ifree, 0, 0, 0, &tp);
1730 }
1731 if (error) {
1732 if (error == -ENOSPC) {
1733 xfs_warn_ratelimited(mp,
1734 "Failed to remove inode(s) from unlinked list. "
1735 "Please free space, unmount and run xfs_repair.");
1736 } else {
1737 ASSERT(XFS_FORCED_SHUTDOWN(mp));
1738 }
1739 return error;
1740 }
1741
1742 /*
1743 * We do not hold the inode locked across the entire rolling transaction
1744 * here. We only need to hold it for the first transaction that
1745 * xfs_ifree() builds, which may mark the inode XFS_ISTALE if the
1746 * underlying cluster buffer is freed. Relogging an XFS_ISTALE inode
1747 * here breaks the relationship between cluster buffer invalidation and
1748 * stale inode invalidation on cluster buffer item journal commit
1749 * completion, and can result in leaving dirty stale inodes hanging
1750 * around in memory.
1751 *
1752 * We have no need for serialising this inode operation against other
1753 * operations - we freed the inode and hence reallocation is required
1754 * and that will serialise on reallocating the space the deferops need
1755 * to free. Hence we can unlock the inode on the first commit of
1756 * the transaction rather than roll it right through the deferops. This
1757 * avoids relogging the XFS_ISTALE inode.
1758 *
1759 * We check that xfs_ifree() hasn't grown an internal transaction roll
1760 * by asserting that the inode is still locked when it returns.
1761 */
1762 xfs_ilock(ip, XFS_ILOCK_EXCL);
1763 xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL);
1764
1765 error = xfs_ifree(tp, ip);
1766 ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
1767 if (error) {
1768 /*
1769 * If we fail to free the inode, shut down. The cancel
1770 * might do that, we need to make sure. Otherwise the
1771 * inode might be lost for a long time or forever.
1772 */
1773 if (!XFS_FORCED_SHUTDOWN(mp)) {
1774 xfs_notice(mp, "%s: xfs_ifree returned error %d",
1775 __func__, error);
1776 xfs_force_shutdown(mp, SHUTDOWN_META_IO_ERROR);
1777 }
1778 xfs_trans_cancel(tp);
1779 return error;
1780 }
1781
1782 /*
1783 * Credit the quota account(s). The inode is gone.
1784 */
1785 xfs_trans_mod_dquot_byino(tp, ip, XFS_TRANS_DQ_ICOUNT, -1);
1786
1787 /*
1788 * Just ignore errors at this point. There is nothing we can do except
1789 * to try to keep going. Make sure it's not a silent error.
1790 */
1791 error = xfs_trans_commit(tp);
1792 if (error)
1793 xfs_notice(mp, "%s: xfs_trans_commit returned error %d",
1794 __func__, error);
1795
1796 return 0;
1797}
1798
1799/*
1800 * xfs_inactive
1801 *
1802 * This is called when the vnode reference count for the vnode
1803 * goes to zero. If the file has been unlinked, then it must
1804 * now be truncated. Also, we clear all of the read-ahead state
1805 * kept for the inode here since the file is now closed.
1806 */
1807void
1808xfs_inactive(
1809 xfs_inode_t *ip)
1810{
1811 struct xfs_mount *mp;
1812 int error;
1813 int truncate = 0;
1814
1815 /*
1816 * If the inode is already free, then there can be nothing
1817 * to clean up here.
1818 */
1819 if (VFS_I(ip)->i_mode == 0) {
1820 ASSERT(ip->i_df.if_broot_bytes == 0);
1821 return;
1822 }
1823
1824 mp = ip->i_mount;
1825 ASSERT(!xfs_iflags_test(ip, XFS_IRECOVERY));
1826
1827 /* If this is a read-only mount, don't do this (would generate I/O) */
1828 if (mp->m_flags & XFS_MOUNT_RDONLY)
1829 return;
1830
1831 /* Try to clean out the cow blocks if there are any. */
1832 if (xfs_inode_has_cow_data(ip))
1833 xfs_reflink_cancel_cow_range(ip, 0, NULLFILEOFF, true);
1834
1835 if (VFS_I(ip)->i_nlink != 0) {
1836 /*
1837 * force is true because we are evicting an inode from the
1838 * cache. Post-eof blocks must be freed, lest we end up with
1839 * broken free space accounting.
1840 *
1841 * Note: don't bother with iolock here since lockdep complains
1842 * about acquiring it in reclaim context. We have the only
1843 * reference to the inode at this point anyways.
1844 */
1845 if (xfs_can_free_eofblocks(ip, true))
1846 xfs_free_eofblocks(ip);
1847
1848 return;
1849 }
1850
1851 if (S_ISREG(VFS_I(ip)->i_mode) &&
1852 (ip->i_d.di_size != 0 || XFS_ISIZE(ip) != 0 ||
1853 ip->i_df.if_nextents > 0 || ip->i_delayed_blks > 0))
1854 truncate = 1;
1855
1856 error = xfs_qm_dqattach(ip);
1857 if (error)
1858 return;
1859
1860 if (S_ISLNK(VFS_I(ip)->i_mode))
1861 error = xfs_inactive_symlink(ip);
1862 else if (truncate)
1863 error = xfs_inactive_truncate(ip);
1864 if (error)
1865 return;
1866
1867 /*
1868 * If there are attributes associated with the file then blow them away
1869 * now. The code calls a routine that recursively deconstructs the
1870 * attribute fork. If also blows away the in-core attribute fork.
1871 */
1872 if (XFS_IFORK_Q(ip)) {
1873 error = xfs_attr_inactive(ip);
1874 if (error)
1875 return;
1876 }
1877
1878 ASSERT(!ip->i_afp);
1879 ASSERT(ip->i_d.di_forkoff == 0);
1880
1881 /*
1882 * Free the inode.
1883 */
1884 error = xfs_inactive_ifree(ip);
1885 if (error)
1886 return;
1887
1888 /*
1889 * Release the dquots held by inode, if any.
1890 */
1891 xfs_qm_dqdetach(ip);
1892}
1893
1894/*
1895 * In-Core Unlinked List Lookups
1896 * =============================
1897 *
1898 * Every inode is supposed to be reachable from some other piece of metadata
1899 * with the exception of the root directory. Inodes with a connection to a
1900 * file descriptor but not linked from anywhere in the on-disk directory tree
1901 * are collectively known as unlinked inodes, though the filesystem itself
1902 * maintains links to these inodes so that on-disk metadata are consistent.
1903 *
1904 * XFS implements a per-AG on-disk hash table of unlinked inodes. The AGI
1905 * header contains a number of buckets that point to an inode, and each inode
1906 * record has a pointer to the next inode in the hash chain. This
1907 * singly-linked list causes scaling problems in the iunlink remove function
1908 * because we must walk that list to find the inode that points to the inode
1909 * being removed from the unlinked hash bucket list.
1910 *
1911 * What if we modelled the unlinked list as a collection of records capturing
1912 * "X.next_unlinked = Y" relations? If we indexed those records on Y, we'd
1913 * have a fast way to look up unlinked list predecessors, which avoids the
1914 * slow list walk. That's exactly what we do here (in-core) with a per-AG
1915 * rhashtable.
1916 *
1917 * Because this is a backref cache, we ignore operational failures since the
1918 * iunlink code can fall back to the slow bucket walk. The only errors that
1919 * should bubble out are for obviously incorrect situations.
1920 *
1921 * All users of the backref cache MUST hold the AGI buffer lock to serialize
1922 * access or have otherwise provided for concurrency control.
1923 */
1924
1925/* Capture a "X.next_unlinked = Y" relationship. */
1926struct xfs_iunlink {
1927 struct rhash_head iu_rhash_head;
1928 xfs_agino_t iu_agino; /* X */
1929 xfs_agino_t iu_next_unlinked; /* Y */
1930};
1931
1932/* Unlinked list predecessor lookup hashtable construction */
1933static int
1934xfs_iunlink_obj_cmpfn(
1935 struct rhashtable_compare_arg *arg,
1936 const void *obj)
1937{
1938 const xfs_agino_t *key = arg->key;
1939 const struct xfs_iunlink *iu = obj;
1940
1941 if (iu->iu_next_unlinked != *key)
1942 return 1;
1943 return 0;
1944}
1945
1946static const struct rhashtable_params xfs_iunlink_hash_params = {
1947 .min_size = XFS_AGI_UNLINKED_BUCKETS,
1948 .key_len = sizeof(xfs_agino_t),
1949 .key_offset = offsetof(struct xfs_iunlink,
1950 iu_next_unlinked),
1951 .head_offset = offsetof(struct xfs_iunlink, iu_rhash_head),
1952 .automatic_shrinking = true,
1953 .obj_cmpfn = xfs_iunlink_obj_cmpfn,
1954};
1955
1956/*
1957 * Return X, where X.next_unlinked == @agino. Returns NULLAGINO if no such
1958 * relation is found.
1959 */
1960static xfs_agino_t
1961xfs_iunlink_lookup_backref(
1962 struct xfs_perag *pag,
1963 xfs_agino_t agino)
1964{
1965 struct xfs_iunlink *iu;
1966
1967 iu = rhashtable_lookup_fast(&pag->pagi_unlinked_hash, &agino,
1968 xfs_iunlink_hash_params);
1969 return iu ? iu->iu_agino : NULLAGINO;
1970}
1971
1972/*
1973 * Take ownership of an iunlink cache entry and insert it into the hash table.
1974 * If successful, the entry will be owned by the cache; if not, it is freed.
1975 * Either way, the caller does not own @iu after this call.
1976 */
1977static int
1978xfs_iunlink_insert_backref(
1979 struct xfs_perag *pag,
1980 struct xfs_iunlink *iu)
1981{
1982 int error;
1983
1984 error = rhashtable_insert_fast(&pag->pagi_unlinked_hash,
1985 &iu->iu_rhash_head, xfs_iunlink_hash_params);
1986 /*
1987 * Fail loudly if there already was an entry because that's a sign of
1988 * corruption of in-memory data. Also fail loudly if we see an error
1989 * code we didn't anticipate from the rhashtable code. Currently we
1990 * only anticipate ENOMEM.
1991 */
1992 if (error) {
1993 WARN(error != -ENOMEM, "iunlink cache insert error %d", error);
1994 kmem_free(iu);
1995 }
1996 /*
1997 * Absorb any runtime errors that aren't a result of corruption because
1998 * this is a cache and we can always fall back to bucket list scanning.
1999 */
2000 if (error != 0 && error != -EEXIST)
2001 error = 0;
2002 return error;
2003}
2004
2005/* Remember that @prev_agino.next_unlinked = @this_agino. */
2006static int
2007xfs_iunlink_add_backref(
2008 struct xfs_perag *pag,
2009 xfs_agino_t prev_agino,
2010 xfs_agino_t this_agino)
2011{
2012 struct xfs_iunlink *iu;
2013
2014 if (XFS_TEST_ERROR(false, pag->pag_mount, XFS_ERRTAG_IUNLINK_FALLBACK))
2015 return 0;
2016
2017 iu = kmem_zalloc(sizeof(*iu), KM_NOFS);
2018 iu->iu_agino = prev_agino;
2019 iu->iu_next_unlinked = this_agino;
2020
2021 return xfs_iunlink_insert_backref(pag, iu);
2022}
2023
2024/*
2025 * Replace X.next_unlinked = @agino with X.next_unlinked = @next_unlinked.
2026 * If @next_unlinked is NULLAGINO, we drop the backref and exit. If there
2027 * wasn't any such entry then we don't bother.
2028 */
2029static int
2030xfs_iunlink_change_backref(
2031 struct xfs_perag *pag,
2032 xfs_agino_t agino,
2033 xfs_agino_t next_unlinked)
2034{
2035 struct xfs_iunlink *iu;
2036 int error;
2037
2038 /* Look up the old entry; if there wasn't one then exit. */
2039 iu = rhashtable_lookup_fast(&pag->pagi_unlinked_hash, &agino,
2040 xfs_iunlink_hash_params);
2041 if (!iu)
2042 return 0;
2043
2044 /*
2045 * Remove the entry. This shouldn't ever return an error, but if we
2046 * couldn't remove the old entry we don't want to add it again to the
2047 * hash table, and if the entry disappeared on us then someone's
2048 * violated the locking rules and we need to fail loudly. Either way
2049 * we cannot remove the inode because internal state is or would have
2050 * been corrupt.
2051 */
2052 error = rhashtable_remove_fast(&pag->pagi_unlinked_hash,
2053 &iu->iu_rhash_head, xfs_iunlink_hash_params);
2054 if (error)
2055 return error;
2056
2057 /* If there is no new next entry just free our item and return. */
2058 if (next_unlinked == NULLAGINO) {
2059 kmem_free(iu);
2060 return 0;
2061 }
2062
2063 /* Update the entry and re-add it to the hash table. */
2064 iu->iu_next_unlinked = next_unlinked;
2065 return xfs_iunlink_insert_backref(pag, iu);
2066}
2067
2068/* Set up the in-core predecessor structures. */
2069int
2070xfs_iunlink_init(
2071 struct xfs_perag *pag)
2072{
2073 return rhashtable_init(&pag->pagi_unlinked_hash,
2074 &xfs_iunlink_hash_params);
2075}
2076
2077/* Free the in-core predecessor structures. */
2078static void
2079xfs_iunlink_free_item(
2080 void *ptr,
2081 void *arg)
2082{
2083 struct xfs_iunlink *iu = ptr;
2084 bool *freed_anything = arg;
2085
2086 *freed_anything = true;
2087 kmem_free(iu);
2088}
2089
2090void
2091xfs_iunlink_destroy(
2092 struct xfs_perag *pag)
2093{
2094 bool freed_anything = false;
2095
2096 rhashtable_free_and_destroy(&pag->pagi_unlinked_hash,
2097 xfs_iunlink_free_item, &freed_anything);
2098
2099 ASSERT(freed_anything == false || XFS_FORCED_SHUTDOWN(pag->pag_mount));
2100}
2101
2102/*
2103 * Point the AGI unlinked bucket at an inode and log the results. The caller
2104 * is responsible for validating the old value.
2105 */
2106STATIC int
2107xfs_iunlink_update_bucket(
2108 struct xfs_trans *tp,
2109 xfs_agnumber_t agno,
2110 struct xfs_buf *agibp,
2111 unsigned int bucket_index,
2112 xfs_agino_t new_agino)
2113{
2114 struct xfs_agi *agi = agibp->b_addr;
2115 xfs_agino_t old_value;
2116 int offset;
2117
2118 ASSERT(xfs_verify_agino_or_null(tp->t_mountp, agno, new_agino));
2119
2120 old_value = be32_to_cpu(agi->agi_unlinked[bucket_index]);
2121 trace_xfs_iunlink_update_bucket(tp->t_mountp, agno, bucket_index,
2122 old_value, new_agino);
2123
2124 /*
2125 * We should never find the head of the list already set to the value
2126 * passed in because either we're adding or removing ourselves from the
2127 * head of the list.
2128 */
2129 if (old_value == new_agino) {
2130 xfs_buf_mark_corrupt(agibp);
2131 return -EFSCORRUPTED;
2132 }
2133
2134 agi->agi_unlinked[bucket_index] = cpu_to_be32(new_agino);
2135 offset = offsetof(struct xfs_agi, agi_unlinked) +
2136 (sizeof(xfs_agino_t) * bucket_index);
2137 xfs_trans_log_buf(tp, agibp, offset, offset + sizeof(xfs_agino_t) - 1);
2138 return 0;
2139}
2140
2141/* Set an on-disk inode's next_unlinked pointer. */
2142STATIC void
2143xfs_iunlink_update_dinode(
2144 struct xfs_trans *tp,
2145 xfs_agnumber_t agno,
2146 xfs_agino_t agino,
2147 struct xfs_buf *ibp,
2148 struct xfs_dinode *dip,
2149 struct xfs_imap *imap,
2150 xfs_agino_t next_agino)
2151{
2152 struct xfs_mount *mp = tp->t_mountp;
2153 int offset;
2154
2155 ASSERT(xfs_verify_agino_or_null(mp, agno, next_agino));
2156
2157 trace_xfs_iunlink_update_dinode(mp, agno, agino,
2158 be32_to_cpu(dip->di_next_unlinked), next_agino);
2159
2160 dip->di_next_unlinked = cpu_to_be32(next_agino);
2161 offset = imap->im_boffset +
2162 offsetof(struct xfs_dinode, di_next_unlinked);
2163
2164 /* need to recalc the inode CRC if appropriate */
2165 xfs_dinode_calc_crc(mp, dip);
2166 xfs_trans_inode_buf(tp, ibp);
2167 xfs_trans_log_buf(tp, ibp, offset, offset + sizeof(xfs_agino_t) - 1);
2168}
2169
2170/* Set an in-core inode's unlinked pointer and return the old value. */
2171STATIC int
2172xfs_iunlink_update_inode(
2173 struct xfs_trans *tp,
2174 struct xfs_inode *ip,
2175 xfs_agnumber_t agno,
2176 xfs_agino_t next_agino,
2177 xfs_agino_t *old_next_agino)
2178{
2179 struct xfs_mount *mp = tp->t_mountp;
2180 struct xfs_dinode *dip;
2181 struct xfs_buf *ibp;
2182 xfs_agino_t old_value;
2183 int error;
2184
2185 ASSERT(xfs_verify_agino_or_null(mp, agno, next_agino));
2186
2187 error = xfs_imap_to_bp(mp, tp, &ip->i_imap, &dip, &ibp, 0);
2188 if (error)
2189 return error;
2190
2191 /* Make sure the old pointer isn't garbage. */
2192 old_value = be32_to_cpu(dip->di_next_unlinked);
2193 if (!xfs_verify_agino_or_null(mp, agno, old_value)) {
2194 xfs_inode_verifier_error(ip, -EFSCORRUPTED, __func__, dip,
2195 sizeof(*dip), __this_address);
2196 error = -EFSCORRUPTED;
2197 goto out;
2198 }
2199
2200 /*
2201 * Since we're updating a linked list, we should never find that the
2202 * current pointer is the same as the new value, unless we're
2203 * terminating the list.
2204 */
2205 *old_next_agino = old_value;
2206 if (old_value == next_agino) {
2207 if (next_agino != NULLAGINO) {
2208 xfs_inode_verifier_error(ip, -EFSCORRUPTED, __func__,
2209 dip, sizeof(*dip), __this_address);
2210 error = -EFSCORRUPTED;
2211 }
2212 goto out;
2213 }
2214
2215 /* Ok, update the new pointer. */
2216 xfs_iunlink_update_dinode(tp, agno, XFS_INO_TO_AGINO(mp, ip->i_ino),
2217 ibp, dip, &ip->i_imap, next_agino);
2218 return 0;
2219out:
2220 xfs_trans_brelse(tp, ibp);
2221 return error;
2222}
2223
2224/*
2225 * This is called when the inode's link count has gone to 0 or we are creating
2226 * a tmpfile via O_TMPFILE. The inode @ip must have nlink == 0.
2227 *
2228 * We place the on-disk inode on a list in the AGI. It will be pulled from this
2229 * list when the inode is freed.
2230 */
2231STATIC int
2232xfs_iunlink(
2233 struct xfs_trans *tp,
2234 struct xfs_inode *ip)
2235{
2236 struct xfs_mount *mp = tp->t_mountp;
2237 struct xfs_agi *agi;
2238 struct xfs_buf *agibp;
2239 xfs_agino_t next_agino;
2240 xfs_agnumber_t agno = XFS_INO_TO_AGNO(mp, ip->i_ino);
2241 xfs_agino_t agino = XFS_INO_TO_AGINO(mp, ip->i_ino);
2242 short bucket_index = agino % XFS_AGI_UNLINKED_BUCKETS;
2243 int error;
2244
2245 ASSERT(VFS_I(ip)->i_nlink == 0);
2246 ASSERT(VFS_I(ip)->i_mode != 0);
2247 trace_xfs_iunlink(ip);
2248
2249 /* Get the agi buffer first. It ensures lock ordering on the list. */
2250 error = xfs_read_agi(mp, tp, agno, &agibp);
2251 if (error)
2252 return error;
2253 agi = agibp->b_addr;
2254
2255 /*
2256 * Get the index into the agi hash table for the list this inode will
2257 * go on. Make sure the pointer isn't garbage and that this inode
2258 * isn't already on the list.
2259 */
2260 next_agino = be32_to_cpu(agi->agi_unlinked[bucket_index]);
2261 if (next_agino == agino ||
2262 !xfs_verify_agino_or_null(mp, agno, next_agino)) {
2263 xfs_buf_mark_corrupt(agibp);
2264 return -EFSCORRUPTED;
2265 }
2266
2267 if (next_agino != NULLAGINO) {
2268 xfs_agino_t old_agino;
2269
2270 /*
2271 * There is already another inode in the bucket, so point this
2272 * inode to the current head of the list.
2273 */
2274 error = xfs_iunlink_update_inode(tp, ip, agno, next_agino,
2275 &old_agino);
2276 if (error)
2277 return error;
2278 ASSERT(old_agino == NULLAGINO);
2279
2280 /*
2281 * agino has been unlinked, add a backref from the next inode
2282 * back to agino.
2283 */
2284 error = xfs_iunlink_add_backref(agibp->b_pag, agino, next_agino);
2285 if (error)
2286 return error;
2287 }
2288
2289 /* Point the head of the list to point to this inode. */
2290 return xfs_iunlink_update_bucket(tp, agno, agibp, bucket_index, agino);
2291}
2292
2293/* Return the imap, dinode pointer, and buffer for an inode. */
2294STATIC int
2295xfs_iunlink_map_ino(
2296 struct xfs_trans *tp,
2297 xfs_agnumber_t agno,
2298 xfs_agino_t agino,
2299 struct xfs_imap *imap,
2300 struct xfs_dinode **dipp,
2301 struct xfs_buf **bpp)
2302{
2303 struct xfs_mount *mp = tp->t_mountp;
2304 int error;
2305
2306 imap->im_blkno = 0;
2307 error = xfs_imap(mp, tp, XFS_AGINO_TO_INO(mp, agno, agino), imap, 0);
2308 if (error) {
2309 xfs_warn(mp, "%s: xfs_imap returned error %d.",
2310 __func__, error);
2311 return error;
2312 }
2313
2314 error = xfs_imap_to_bp(mp, tp, imap, dipp, bpp, 0);
2315 if (error) {
2316 xfs_warn(mp, "%s: xfs_imap_to_bp returned error %d.",
2317 __func__, error);
2318 return error;
2319 }
2320
2321 return 0;
2322}
2323
2324/*
2325 * Walk the unlinked chain from @head_agino until we find the inode that
2326 * points to @target_agino. Return the inode number, map, dinode pointer,
2327 * and inode cluster buffer of that inode as @agino, @imap, @dipp, and @bpp.
2328 *
2329 * @tp, @pag, @head_agino, and @target_agino are input parameters.
2330 * @agino, @imap, @dipp, and @bpp are all output parameters.
2331 *
2332 * Do not call this function if @target_agino is the head of the list.
2333 */
2334STATIC int
2335xfs_iunlink_map_prev(
2336 struct xfs_trans *tp,
2337 xfs_agnumber_t agno,
2338 xfs_agino_t head_agino,
2339 xfs_agino_t target_agino,
2340 xfs_agino_t *agino,
2341 struct xfs_imap *imap,
2342 struct xfs_dinode **dipp,
2343 struct xfs_buf **bpp,
2344 struct xfs_perag *pag)
2345{
2346 struct xfs_mount *mp = tp->t_mountp;
2347 xfs_agino_t next_agino;
2348 int error;
2349
2350 ASSERT(head_agino != target_agino);
2351 *bpp = NULL;
2352
2353 /* See if our backref cache can find it faster. */
2354 *agino = xfs_iunlink_lookup_backref(pag, target_agino);
2355 if (*agino != NULLAGINO) {
2356 error = xfs_iunlink_map_ino(tp, agno, *agino, imap, dipp, bpp);
2357 if (error)
2358 return error;
2359
2360 if (be32_to_cpu((*dipp)->di_next_unlinked) == target_agino)
2361 return 0;
2362
2363 /*
2364 * If we get here the cache contents were corrupt, so drop the
2365 * buffer and fall back to walking the bucket list.
2366 */
2367 xfs_trans_brelse(tp, *bpp);
2368 *bpp = NULL;
2369 WARN_ON_ONCE(1);
2370 }
2371
2372 trace_xfs_iunlink_map_prev_fallback(mp, agno);
2373
2374 /* Otherwise, walk the entire bucket until we find it. */
2375 next_agino = head_agino;
2376 while (next_agino != target_agino) {
2377 xfs_agino_t unlinked_agino;
2378
2379 if (*bpp)
2380 xfs_trans_brelse(tp, *bpp);
2381
2382 *agino = next_agino;
2383 error = xfs_iunlink_map_ino(tp, agno, next_agino, imap, dipp,
2384 bpp);
2385 if (error)
2386 return error;
2387
2388 unlinked_agino = be32_to_cpu((*dipp)->di_next_unlinked);
2389 /*
2390 * Make sure this pointer is valid and isn't an obvious
2391 * infinite loop.
2392 */
2393 if (!xfs_verify_agino(mp, agno, unlinked_agino) ||
2394 next_agino == unlinked_agino) {
2395 XFS_CORRUPTION_ERROR(__func__,
2396 XFS_ERRLEVEL_LOW, mp,
2397 *dipp, sizeof(**dipp));
2398 error = -EFSCORRUPTED;
2399 return error;
2400 }
2401 next_agino = unlinked_agino;
2402 }
2403
2404 return 0;
2405}
2406
2407/*
2408 * Pull the on-disk inode from the AGI unlinked list.
2409 */
2410STATIC int
2411xfs_iunlink_remove(
2412 struct xfs_trans *tp,
2413 struct xfs_inode *ip)
2414{
2415 struct xfs_mount *mp = tp->t_mountp;
2416 struct xfs_agi *agi;
2417 struct xfs_buf *agibp;
2418 struct xfs_buf *last_ibp;
2419 struct xfs_dinode *last_dip = NULL;
2420 xfs_agnumber_t agno = XFS_INO_TO_AGNO(mp, ip->i_ino);
2421 xfs_agino_t agino = XFS_INO_TO_AGINO(mp, ip->i_ino);
2422 xfs_agino_t next_agino;
2423 xfs_agino_t head_agino;
2424 short bucket_index = agino % XFS_AGI_UNLINKED_BUCKETS;
2425 int error;
2426
2427 trace_xfs_iunlink_remove(ip);
2428
2429 /* Get the agi buffer first. It ensures lock ordering on the list. */
2430 error = xfs_read_agi(mp, tp, agno, &agibp);
2431 if (error)
2432 return error;
2433 agi = agibp->b_addr;
2434
2435 /*
2436 * Get the index into the agi hash table for the list this inode will
2437 * go on. Make sure the head pointer isn't garbage.
2438 */
2439 head_agino = be32_to_cpu(agi->agi_unlinked[bucket_index]);
2440 if (!xfs_verify_agino(mp, agno, head_agino)) {
2441 XFS_CORRUPTION_ERROR(__func__, XFS_ERRLEVEL_LOW, mp,
2442 agi, sizeof(*agi));
2443 return -EFSCORRUPTED;
2444 }
2445
2446 /*
2447 * Set our inode's next_unlinked pointer to NULL and then return
2448 * the old pointer value so that we can update whatever was previous
2449 * to us in the list to point to whatever was next in the list.
2450 */
2451 error = xfs_iunlink_update_inode(tp, ip, agno, NULLAGINO, &next_agino);
2452 if (error)
2453 return error;
2454
2455 /*
2456 * If there was a backref pointing from the next inode back to this
2457 * one, remove it because we've removed this inode from the list.
2458 *
2459 * Later, if this inode was in the middle of the list we'll update
2460 * this inode's backref to point from the next inode.
2461 */
2462 if (next_agino != NULLAGINO) {
2463 error = xfs_iunlink_change_backref(agibp->b_pag, next_agino,
2464 NULLAGINO);
2465 if (error)
2466 return error;
2467 }
2468
2469 if (head_agino != agino) {
2470 struct xfs_imap imap;
2471 xfs_agino_t prev_agino;
2472
2473 /* We need to search the list for the inode being freed. */
2474 error = xfs_iunlink_map_prev(tp, agno, head_agino, agino,
2475 &prev_agino, &imap, &last_dip, &last_ibp,
2476 agibp->b_pag);
2477 if (error)
2478 return error;
2479
2480 /* Point the previous inode on the list to the next inode. */
2481 xfs_iunlink_update_dinode(tp, agno, prev_agino, last_ibp,
2482 last_dip, &imap, next_agino);
2483
2484 /*
2485 * Now we deal with the backref for this inode. If this inode
2486 * pointed at a real inode, change the backref that pointed to
2487 * us to point to our old next. If this inode was the end of
2488 * the list, delete the backref that pointed to us. Note that
2489 * change_backref takes care of deleting the backref if
2490 * next_agino is NULLAGINO.
2491 */
2492 return xfs_iunlink_change_backref(agibp->b_pag, agino,
2493 next_agino);
2494 }
2495
2496 /* Point the head of the list to the next unlinked inode. */
2497 return xfs_iunlink_update_bucket(tp, agno, agibp, bucket_index,
2498 next_agino);
2499}
2500
2501/*
2502 * Look up the inode number specified and if it is not already marked XFS_ISTALE
2503 * mark it stale. We should only find clean inodes in this lookup that aren't
2504 * already stale.
2505 */
2506static void
2507xfs_ifree_mark_inode_stale(
2508 struct xfs_buf *bp,
2509 struct xfs_inode *free_ip,
2510 xfs_ino_t inum)
2511{
2512 struct xfs_mount *mp = bp->b_mount;
2513 struct xfs_perag *pag = bp->b_pag;
2514 struct xfs_inode_log_item *iip;
2515 struct xfs_inode *ip;
2516
2517retry:
2518 rcu_read_lock();
2519 ip = radix_tree_lookup(&pag->pag_ici_root, XFS_INO_TO_AGINO(mp, inum));
2520
2521 /* Inode not in memory, nothing to do */
2522 if (!ip) {
2523 rcu_read_unlock();
2524 return;
2525 }
2526
2527 /*
2528 * because this is an RCU protected lookup, we could find a recently
2529 * freed or even reallocated inode during the lookup. We need to check
2530 * under the i_flags_lock for a valid inode here. Skip it if it is not
2531 * valid, the wrong inode or stale.
2532 */
2533 spin_lock(&ip->i_flags_lock);
2534 if (ip->i_ino != inum || __xfs_iflags_test(ip, XFS_ISTALE)) {
2535 spin_unlock(&ip->i_flags_lock);
2536 rcu_read_unlock();
2537 return;
2538 }
2539
2540 /*
2541 * Don't try to lock/unlock the current inode, but we _cannot_ skip the
2542 * other inodes that we did not find in the list attached to the buffer
2543 * and are not already marked stale. If we can't lock it, back off and
2544 * retry.
2545 */
2546 if (ip != free_ip) {
2547 if (!xfs_ilock_nowait(ip, XFS_ILOCK_EXCL)) {
2548 spin_unlock(&ip->i_flags_lock);
2549 rcu_read_unlock();
2550 delay(1);
2551 goto retry;
2552 }
2553 }
2554 ip->i_flags |= XFS_ISTALE;
2555 spin_unlock(&ip->i_flags_lock);
2556 rcu_read_unlock();
2557
2558 /*
2559 * If we can't get the flush lock, the inode is already attached. All
2560 * we needed to do here is mark the inode stale so buffer IO completion
2561 * will remove it from the AIL.
2562 */
2563 iip = ip->i_itemp;
2564 if (!xfs_iflock_nowait(ip)) {
2565 ASSERT(!list_empty(&iip->ili_item.li_bio_list));
2566 ASSERT(iip->ili_last_fields);
2567 goto out_iunlock;
2568 }
2569
2570 /*
2571 * Inodes not attached to the buffer can be released immediately.
2572 * Everything else has to go through xfs_iflush_abort() on journal
2573 * commit as the flock synchronises removal of the inode from the
2574 * cluster buffer against inode reclaim.
2575 */
2576 if (!iip || list_empty(&iip->ili_item.li_bio_list)) {
2577 xfs_ifunlock(ip);
2578 goto out_iunlock;
2579 }
2580
2581 /* we have a dirty inode in memory that has not yet been flushed. */
2582 spin_lock(&iip->ili_lock);
2583 iip->ili_last_fields = iip->ili_fields;
2584 iip->ili_fields = 0;
2585 iip->ili_fsync_fields = 0;
2586 spin_unlock(&iip->ili_lock);
2587 ASSERT(iip->ili_last_fields);
2588
2589out_iunlock:
2590 if (ip != free_ip)
2591 xfs_iunlock(ip, XFS_ILOCK_EXCL);
2592}
2593
2594/*
2595 * A big issue when freeing the inode cluster is that we _cannot_ skip any
2596 * inodes that are in memory - they all must be marked stale and attached to
2597 * the cluster buffer.
2598 */
2599STATIC int
2600xfs_ifree_cluster(
2601 struct xfs_inode *free_ip,
2602 struct xfs_trans *tp,
2603 struct xfs_icluster *xic)
2604{
2605 struct xfs_mount *mp = free_ip->i_mount;
2606 struct xfs_ino_geometry *igeo = M_IGEO(mp);
2607 struct xfs_buf *bp;
2608 xfs_daddr_t blkno;
2609 xfs_ino_t inum = xic->first_ino;
2610 int nbufs;
2611 int i, j;
2612 int ioffset;
2613 int error;
2614
2615 nbufs = igeo->ialloc_blks / igeo->blocks_per_cluster;
2616
2617 for (j = 0; j < nbufs; j++, inum += igeo->inodes_per_cluster) {
2618 /*
2619 * The allocation bitmap tells us which inodes of the chunk were
2620 * physically allocated. Skip the cluster if an inode falls into
2621 * a sparse region.
2622 */
2623 ioffset = inum - xic->first_ino;
2624 if ((xic->alloc & XFS_INOBT_MASK(ioffset)) == 0) {
2625 ASSERT(ioffset % igeo->inodes_per_cluster == 0);
2626 continue;
2627 }
2628
2629 blkno = XFS_AGB_TO_DADDR(mp, XFS_INO_TO_AGNO(mp, inum),
2630 XFS_INO_TO_AGBNO(mp, inum));
2631
2632 /*
2633 * We obtain and lock the backing buffer first in the process
2634 * here, as we have to ensure that any dirty inode that we
2635 * can't get the flush lock on is attached to the buffer.
2636 * If we scan the in-memory inodes first, then buffer IO can
2637 * complete before we get a lock on it, and hence we may fail
2638 * to mark all the active inodes on the buffer stale.
2639 */
2640 error = xfs_trans_get_buf(tp, mp->m_ddev_targp, blkno,
2641 mp->m_bsize * igeo->blocks_per_cluster,
2642 XBF_UNMAPPED, &bp);
2643 if (error)
2644 return error;
2645
2646 /*
2647 * This buffer may not have been correctly initialised as we
2648 * didn't read it from disk. That's not important because we are
2649 * only using to mark the buffer as stale in the log, and to
2650 * attach stale cached inodes on it. That means it will never be
2651 * dispatched for IO. If it is, we want to know about it, and we
2652 * want it to fail. We can acheive this by adding a write
2653 * verifier to the buffer.
2654 */
2655 bp->b_ops = &xfs_inode_buf_ops;
2656
2657 /*
2658 * Now we need to set all the cached clean inodes as XFS_ISTALE,
2659 * too. This requires lookups, and will skip inodes that we've
2660 * already marked XFS_ISTALE.
2661 */
2662 for (i = 0; i < igeo->inodes_per_cluster; i++)
2663 xfs_ifree_mark_inode_stale(bp, free_ip, inum + i);
2664
2665 xfs_trans_stale_inode_buf(tp, bp);
2666 xfs_trans_binval(tp, bp);
2667 }
2668 return 0;
2669}
2670
2671/*
2672 * This is called to return an inode to the inode free list.
2673 * The inode should already be truncated to 0 length and have
2674 * no pages associated with it. This routine also assumes that
2675 * the inode is already a part of the transaction.
2676 *
2677 * The on-disk copy of the inode will have been added to the list
2678 * of unlinked inodes in the AGI. We need to remove the inode from
2679 * that list atomically with respect to freeing it here.
2680 */
2681int
2682xfs_ifree(
2683 struct xfs_trans *tp,
2684 struct xfs_inode *ip)
2685{
2686 int error;
2687 struct xfs_icluster xic = { 0 };
2688 struct xfs_inode_log_item *iip = ip->i_itemp;
2689
2690 ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
2691 ASSERT(VFS_I(ip)->i_nlink == 0);
2692 ASSERT(ip->i_df.if_nextents == 0);
2693 ASSERT(ip->i_d.di_size == 0 || !S_ISREG(VFS_I(ip)->i_mode));
2694 ASSERT(ip->i_d.di_nblocks == 0);
2695
2696 /*
2697 * Pull the on-disk inode from the AGI unlinked list.
2698 */
2699 error = xfs_iunlink_remove(tp, ip);
2700 if (error)
2701 return error;
2702
2703 error = xfs_difree(tp, ip->i_ino, &xic);
2704 if (error)
2705 return error;
2706
2707 /*
2708 * Free any local-format data sitting around before we reset the
2709 * data fork to extents format. Note that the attr fork data has
2710 * already been freed by xfs_attr_inactive.
2711 */
2712 if (ip->i_df.if_format == XFS_DINODE_FMT_LOCAL) {
2713 kmem_free(ip->i_df.if_u1.if_data);
2714 ip->i_df.if_u1.if_data = NULL;
2715 ip->i_df.if_bytes = 0;
2716 }
2717
2718 VFS_I(ip)->i_mode = 0; /* mark incore inode as free */
2719 ip->i_d.di_flags = 0;
2720 ip->i_d.di_flags2 = 0;
2721 ip->i_d.di_dmevmask = 0;
2722 ip->i_d.di_forkoff = 0; /* mark the attr fork not in use */
2723 ip->i_df.if_format = XFS_DINODE_FMT_EXTENTS;
2724
2725 /* Don't attempt to replay owner changes for a deleted inode */
2726 spin_lock(&iip->ili_lock);
2727 iip->ili_fields &= ~(XFS_ILOG_AOWNER | XFS_ILOG_DOWNER);
2728 spin_unlock(&iip->ili_lock);
2729
2730 /*
2731 * Bump the generation count so no one will be confused
2732 * by reincarnations of this inode.
2733 */
2734 VFS_I(ip)->i_generation++;
2735 xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
2736
2737 if (xic.deleted)
2738 error = xfs_ifree_cluster(ip, tp, &xic);
2739
2740 return error;
2741}
2742
2743/*
2744 * This is called to unpin an inode. The caller must have the inode locked
2745 * in at least shared mode so that the buffer cannot be subsequently pinned
2746 * once someone is waiting for it to be unpinned.
2747 */
2748static void
2749xfs_iunpin(
2750 struct xfs_inode *ip)
2751{
2752 ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL|XFS_ILOCK_SHARED));
2753
2754 trace_xfs_inode_unpin_nowait(ip, _RET_IP_);
2755
2756 /* Give the log a push to start the unpinning I/O */
2757 xfs_log_force_lsn(ip->i_mount, ip->i_itemp->ili_last_lsn, 0, NULL);
2758
2759}
2760
2761static void
2762__xfs_iunpin_wait(
2763 struct xfs_inode *ip)
2764{
2765 wait_queue_head_t *wq = bit_waitqueue(&ip->i_flags, __XFS_IPINNED_BIT);
2766 DEFINE_WAIT_BIT(wait, &ip->i_flags, __XFS_IPINNED_BIT);
2767
2768 xfs_iunpin(ip);
2769
2770 do {
2771 prepare_to_wait(wq, &wait.wq_entry, TASK_UNINTERRUPTIBLE);
2772 if (xfs_ipincount(ip))
2773 io_schedule();
2774 } while (xfs_ipincount(ip));
2775 finish_wait(wq, &wait.wq_entry);
2776}
2777
2778void
2779xfs_iunpin_wait(
2780 struct xfs_inode *ip)
2781{
2782 if (xfs_ipincount(ip))
2783 __xfs_iunpin_wait(ip);
2784}
2785
2786/*
2787 * Removing an inode from the namespace involves removing the directory entry
2788 * and dropping the link count on the inode. Removing the directory entry can
2789 * result in locking an AGF (directory blocks were freed) and removing a link
2790 * count can result in placing the inode on an unlinked list which results in
2791 * locking an AGI.
2792 *
2793 * The big problem here is that we have an ordering constraint on AGF and AGI
2794 * locking - inode allocation locks the AGI, then can allocate a new extent for
2795 * new inodes, locking the AGF after the AGI. Similarly, freeing the inode
2796 * removes the inode from the unlinked list, requiring that we lock the AGI
2797 * first, and then freeing the inode can result in an inode chunk being freed
2798 * and hence freeing disk space requiring that we lock an AGF.
2799 *
2800 * Hence the ordering that is imposed by other parts of the code is AGI before
2801 * AGF. This means we cannot remove the directory entry before we drop the inode
2802 * reference count and put it on the unlinked list as this results in a lock
2803 * order of AGF then AGI, and this can deadlock against inode allocation and
2804 * freeing. Therefore we must drop the link counts before we remove the
2805 * directory entry.
2806 *
2807 * This is still safe from a transactional point of view - it is not until we
2808 * get to xfs_defer_finish() that we have the possibility of multiple
2809 * transactions in this operation. Hence as long as we remove the directory
2810 * entry and drop the link count in the first transaction of the remove
2811 * operation, there are no transactional constraints on the ordering here.
2812 */
2813int
2814xfs_remove(
2815 xfs_inode_t *dp,
2816 struct xfs_name *name,
2817 xfs_inode_t *ip)
2818{
2819 xfs_mount_t *mp = dp->i_mount;
2820 xfs_trans_t *tp = NULL;
2821 int is_dir = S_ISDIR(VFS_I(ip)->i_mode);
2822 int error = 0;
2823 uint resblks;
2824
2825 trace_xfs_remove(dp, name);
2826
2827 if (XFS_FORCED_SHUTDOWN(mp))
2828 return -EIO;
2829
2830 error = xfs_qm_dqattach(dp);
2831 if (error)
2832 goto std_return;
2833
2834 error = xfs_qm_dqattach(ip);
2835 if (error)
2836 goto std_return;
2837
2838 /*
2839 * We try to get the real space reservation first,
2840 * allowing for directory btree deletion(s) implying
2841 * possible bmap insert(s). If we can't get the space
2842 * reservation then we use 0 instead, and avoid the bmap
2843 * btree insert(s) in the directory code by, if the bmap
2844 * insert tries to happen, instead trimming the LAST
2845 * block from the directory.
2846 */
2847 resblks = XFS_REMOVE_SPACE_RES(mp);
2848 error = xfs_trans_alloc(mp, &M_RES(mp)->tr_remove, resblks, 0, 0, &tp);
2849 if (error == -ENOSPC) {
2850 resblks = 0;
2851 error = xfs_trans_alloc(mp, &M_RES(mp)->tr_remove, 0, 0, 0,
2852 &tp);
2853 }
2854 if (error) {
2855 ASSERT(error != -ENOSPC);
2856 goto std_return;
2857 }
2858
2859 xfs_lock_two_inodes(dp, XFS_ILOCK_EXCL, ip, XFS_ILOCK_EXCL);
2860
2861 xfs_trans_ijoin(tp, dp, XFS_ILOCK_EXCL);
2862 xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL);
2863
2864 /*
2865 * If we're removing a directory perform some additional validation.
2866 */
2867 if (is_dir) {
2868 ASSERT(VFS_I(ip)->i_nlink >= 2);
2869 if (VFS_I(ip)->i_nlink != 2) {
2870 error = -ENOTEMPTY;
2871 goto out_trans_cancel;
2872 }
2873 if (!xfs_dir_isempty(ip)) {
2874 error = -ENOTEMPTY;
2875 goto out_trans_cancel;
2876 }
2877
2878 /* Drop the link from ip's "..". */
2879 error = xfs_droplink(tp, dp);
2880 if (error)
2881 goto out_trans_cancel;
2882
2883 /* Drop the "." link from ip to self. */
2884 error = xfs_droplink(tp, ip);
2885 if (error)
2886 goto out_trans_cancel;
2887 } else {
2888 /*
2889 * When removing a non-directory we need to log the parent
2890 * inode here. For a directory this is done implicitly
2891 * by the xfs_droplink call for the ".." entry.
2892 */
2893 xfs_trans_log_inode(tp, dp, XFS_ILOG_CORE);
2894 }
2895 xfs_trans_ichgtime(tp, dp, XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG);
2896
2897 /* Drop the link from dp to ip. */
2898 error = xfs_droplink(tp, ip);
2899 if (error)
2900 goto out_trans_cancel;
2901
2902 error = xfs_dir_removename(tp, dp, name, ip->i_ino, resblks);
2903 if (error) {
2904 ASSERT(error != -ENOENT);
2905 goto out_trans_cancel;
2906 }
2907
2908 /*
2909 * If this is a synchronous mount, make sure that the
2910 * remove transaction goes to disk before returning to
2911 * the user.
2912 */
2913 if (mp->m_flags & (XFS_MOUNT_WSYNC|XFS_MOUNT_DIRSYNC))
2914 xfs_trans_set_sync(tp);
2915
2916 error = xfs_trans_commit(tp);
2917 if (error)
2918 goto std_return;
2919
2920 if (is_dir && xfs_inode_is_filestream(ip))
2921 xfs_filestream_deassociate(ip);
2922
2923 return 0;
2924
2925 out_trans_cancel:
2926 xfs_trans_cancel(tp);
2927 std_return:
2928 return error;
2929}
2930
2931/*
2932 * Enter all inodes for a rename transaction into a sorted array.
2933 */
2934#define __XFS_SORT_INODES 5
2935STATIC void
2936xfs_sort_for_rename(
2937 struct xfs_inode *dp1, /* in: old (source) directory inode */
2938 struct xfs_inode *dp2, /* in: new (target) directory inode */
2939 struct xfs_inode *ip1, /* in: inode of old entry */
2940 struct xfs_inode *ip2, /* in: inode of new entry */
2941 struct xfs_inode *wip, /* in: whiteout inode */
2942 struct xfs_inode **i_tab,/* out: sorted array of inodes */
2943 int *num_inodes) /* in/out: inodes in array */
2944{
2945 int i, j;
2946
2947 ASSERT(*num_inodes == __XFS_SORT_INODES);
2948 memset(i_tab, 0, *num_inodes * sizeof(struct xfs_inode *));
2949
2950 /*
2951 * i_tab contains a list of pointers to inodes. We initialize
2952 * the table here & we'll sort it. We will then use it to
2953 * order the acquisition of the inode locks.
2954 *
2955 * Note that the table may contain duplicates. e.g., dp1 == dp2.
2956 */
2957 i = 0;
2958 i_tab[i++] = dp1;
2959 i_tab[i++] = dp2;
2960 i_tab[i++] = ip1;
2961 if (ip2)
2962 i_tab[i++] = ip2;
2963 if (wip)
2964 i_tab[i++] = wip;
2965 *num_inodes = i;
2966
2967 /*
2968 * Sort the elements via bubble sort. (Remember, there are at
2969 * most 5 elements to sort, so this is adequate.)
2970 */
2971 for (i = 0; i < *num_inodes; i++) {
2972 for (j = 1; j < *num_inodes; j++) {
2973 if (i_tab[j]->i_ino < i_tab[j-1]->i_ino) {
2974 struct xfs_inode *temp = i_tab[j];
2975 i_tab[j] = i_tab[j-1];
2976 i_tab[j-1] = temp;
2977 }
2978 }
2979 }
2980}
2981
2982static int
2983xfs_finish_rename(
2984 struct xfs_trans *tp)
2985{
2986 /*
2987 * If this is a synchronous mount, make sure that the rename transaction
2988 * goes to disk before returning to the user.
2989 */
2990 if (tp->t_mountp->m_flags & (XFS_MOUNT_WSYNC|XFS_MOUNT_DIRSYNC))
2991 xfs_trans_set_sync(tp);
2992
2993 return xfs_trans_commit(tp);
2994}
2995
2996/*
2997 * xfs_cross_rename()
2998 *
2999 * responsible for handling RENAME_EXCHANGE flag in renameat2() sytemcall
3000 */
3001STATIC int
3002xfs_cross_rename(
3003 struct xfs_trans *tp,
3004 struct xfs_inode *dp1,
3005 struct xfs_name *name1,
3006 struct xfs_inode *ip1,
3007 struct xfs_inode *dp2,
3008 struct xfs_name *name2,
3009 struct xfs_inode *ip2,
3010 int spaceres)
3011{
3012 int error = 0;
3013 int ip1_flags = 0;
3014 int ip2_flags = 0;
3015 int dp2_flags = 0;
3016
3017 /* Swap inode number for dirent in first parent */
3018 error = xfs_dir_replace(tp, dp1, name1, ip2->i_ino, spaceres);
3019 if (error)
3020 goto out_trans_abort;
3021
3022 /* Swap inode number for dirent in second parent */
3023 error = xfs_dir_replace(tp, dp2, name2, ip1->i_ino, spaceres);
3024 if (error)
3025 goto out_trans_abort;
3026
3027 /*
3028 * If we're renaming one or more directories across different parents,
3029 * update the respective ".." entries (and link counts) to match the new
3030 * parents.
3031 */
3032 if (dp1 != dp2) {
3033 dp2_flags = XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG;
3034
3035 if (S_ISDIR(VFS_I(ip2)->i_mode)) {
3036 error = xfs_dir_replace(tp, ip2, &xfs_name_dotdot,
3037 dp1->i_ino, spaceres);
3038 if (error)
3039 goto out_trans_abort;
3040
3041 /* transfer ip2 ".." reference to dp1 */
3042 if (!S_ISDIR(VFS_I(ip1)->i_mode)) {
3043 error = xfs_droplink(tp, dp2);
3044 if (error)
3045 goto out_trans_abort;
3046 xfs_bumplink(tp, dp1);
3047 }
3048
3049 /*
3050 * Although ip1 isn't changed here, userspace needs
3051 * to be warned about the change, so that applications
3052 * relying on it (like backup ones), will properly
3053 * notify the change
3054 */
3055 ip1_flags |= XFS_ICHGTIME_CHG;
3056 ip2_flags |= XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG;
3057 }
3058
3059 if (S_ISDIR(VFS_I(ip1)->i_mode)) {
3060 error = xfs_dir_replace(tp, ip1, &xfs_name_dotdot,
3061 dp2->i_ino, spaceres);
3062 if (error)
3063 goto out_trans_abort;
3064
3065 /* transfer ip1 ".." reference to dp2 */
3066 if (!S_ISDIR(VFS_I(ip2)->i_mode)) {
3067 error = xfs_droplink(tp, dp1);
3068 if (error)
3069 goto out_trans_abort;
3070 xfs_bumplink(tp, dp2);
3071 }
3072
3073 /*
3074 * Although ip2 isn't changed here, userspace needs
3075 * to be warned about the change, so that applications
3076 * relying on it (like backup ones), will properly
3077 * notify the change
3078 */
3079 ip1_flags |= XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG;
3080 ip2_flags |= XFS_ICHGTIME_CHG;
3081 }
3082 }
3083
3084 if (ip1_flags) {
3085 xfs_trans_ichgtime(tp, ip1, ip1_flags);
3086 xfs_trans_log_inode(tp, ip1, XFS_ILOG_CORE);
3087 }
3088 if (ip2_flags) {
3089 xfs_trans_ichgtime(tp, ip2, ip2_flags);
3090 xfs_trans_log_inode(tp, ip2, XFS_ILOG_CORE);
3091 }
3092 if (dp2_flags) {
3093 xfs_trans_ichgtime(tp, dp2, dp2_flags);
3094 xfs_trans_log_inode(tp, dp2, XFS_ILOG_CORE);
3095 }
3096 xfs_trans_ichgtime(tp, dp1, XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG);
3097 xfs_trans_log_inode(tp, dp1, XFS_ILOG_CORE);
3098 return xfs_finish_rename(tp);
3099
3100out_trans_abort:
3101 xfs_trans_cancel(tp);
3102 return error;
3103}
3104
3105/*
3106 * xfs_rename_alloc_whiteout()
3107 *
3108 * Return a referenced, unlinked, unlocked inode that can be used as a
3109 * whiteout in a rename transaction. We use a tmpfile inode here so that if we
3110 * crash between allocating the inode and linking it into the rename transaction
3111 * recovery will free the inode and we won't leak it.
3112 */
3113static int
3114xfs_rename_alloc_whiteout(
3115 struct xfs_inode *dp,
3116 struct xfs_inode **wip)
3117{
3118 struct xfs_inode *tmpfile;
3119 int error;
3120
3121 error = xfs_create_tmpfile(dp, S_IFCHR | WHITEOUT_MODE, &tmpfile);
3122 if (error)
3123 return error;
3124
3125 /*
3126 * Prepare the tmpfile inode as if it were created through the VFS.
3127 * Complete the inode setup and flag it as linkable. nlink is already
3128 * zero, so we can skip the drop_nlink.
3129 */
3130 xfs_setup_iops(tmpfile);
3131 xfs_finish_inode_setup(tmpfile);
3132 VFS_I(tmpfile)->i_state |= I_LINKABLE;
3133
3134 *wip = tmpfile;
3135 return 0;
3136}
3137
3138/*
3139 * xfs_rename
3140 */
3141int
3142xfs_rename(
3143 struct xfs_inode *src_dp,
3144 struct xfs_name *src_name,
3145 struct xfs_inode *src_ip,
3146 struct xfs_inode *target_dp,
3147 struct xfs_name *target_name,
3148 struct xfs_inode *target_ip,
3149 unsigned int flags)
3150{
3151 struct xfs_mount *mp = src_dp->i_mount;
3152 struct xfs_trans *tp;
3153 struct xfs_inode *wip = NULL; /* whiteout inode */
3154 struct xfs_inode *inodes[__XFS_SORT_INODES];
3155 struct xfs_buf *agibp;
3156 int num_inodes = __XFS_SORT_INODES;
3157 bool new_parent = (src_dp != target_dp);
3158 bool src_is_directory = S_ISDIR(VFS_I(src_ip)->i_mode);
3159 int spaceres;
3160 int error;
3161
3162 trace_xfs_rename(src_dp, target_dp, src_name, target_name);
3163
3164 if ((flags & RENAME_EXCHANGE) && !target_ip)
3165 return -EINVAL;
3166
3167 /*
3168 * If we are doing a whiteout operation, allocate the whiteout inode
3169 * we will be placing at the target and ensure the type is set
3170 * appropriately.
3171 */
3172 if (flags & RENAME_WHITEOUT) {
3173 ASSERT(!(flags & (RENAME_NOREPLACE | RENAME_EXCHANGE)));
3174 error = xfs_rename_alloc_whiteout(target_dp, &wip);
3175 if (error)
3176 return error;
3177
3178 /* setup target dirent info as whiteout */
3179 src_name->type = XFS_DIR3_FT_CHRDEV;
3180 }
3181
3182 xfs_sort_for_rename(src_dp, target_dp, src_ip, target_ip, wip,
3183 inodes, &num_inodes);
3184
3185 spaceres = XFS_RENAME_SPACE_RES(mp, target_name->len);
3186 error = xfs_trans_alloc(mp, &M_RES(mp)->tr_rename, spaceres, 0, 0, &tp);
3187 if (error == -ENOSPC) {
3188 spaceres = 0;
3189 error = xfs_trans_alloc(mp, &M_RES(mp)->tr_rename, 0, 0, 0,
3190 &tp);
3191 }
3192 if (error)
3193 goto out_release_wip;
3194
3195 /*
3196 * Attach the dquots to the inodes
3197 */
3198 error = xfs_qm_vop_rename_dqattach(inodes);
3199 if (error)
3200 goto out_trans_cancel;
3201
3202 /*
3203 * Lock all the participating inodes. Depending upon whether
3204 * the target_name exists in the target directory, and
3205 * whether the target directory is the same as the source
3206 * directory, we can lock from 2 to 4 inodes.
3207 */
3208 xfs_lock_inodes(inodes, num_inodes, XFS_ILOCK_EXCL);
3209
3210 /*
3211 * Join all the inodes to the transaction. From this point on,
3212 * we can rely on either trans_commit or trans_cancel to unlock
3213 * them.
3214 */
3215 xfs_trans_ijoin(tp, src_dp, XFS_ILOCK_EXCL);
3216 if (new_parent)
3217 xfs_trans_ijoin(tp, target_dp, XFS_ILOCK_EXCL);
3218 xfs_trans_ijoin(tp, src_ip, XFS_ILOCK_EXCL);
3219 if (target_ip)
3220 xfs_trans_ijoin(tp, target_ip, XFS_ILOCK_EXCL);
3221 if (wip)
3222 xfs_trans_ijoin(tp, wip, XFS_ILOCK_EXCL);
3223
3224 /*
3225 * If we are using project inheritance, we only allow renames
3226 * into our tree when the project IDs are the same; else the
3227 * tree quota mechanism would be circumvented.
3228 */
3229 if (unlikely((target_dp->i_d.di_flags & XFS_DIFLAG_PROJINHERIT) &&
3230 target_dp->i_d.di_projid != src_ip->i_d.di_projid)) {
3231 error = -EXDEV;
3232 goto out_trans_cancel;
3233 }
3234
3235 /* RENAME_EXCHANGE is unique from here on. */
3236 if (flags & RENAME_EXCHANGE)
3237 return xfs_cross_rename(tp, src_dp, src_name, src_ip,
3238 target_dp, target_name, target_ip,
3239 spaceres);
3240
3241 /*
3242 * Check for expected errors before we dirty the transaction
3243 * so we can return an error without a transaction abort.
3244 */
3245 if (target_ip == NULL) {
3246 /*
3247 * If there's no space reservation, check the entry will
3248 * fit before actually inserting it.
3249 */
3250 if (!spaceres) {
3251 error = xfs_dir_canenter(tp, target_dp, target_name);
3252 if (error)
3253 goto out_trans_cancel;
3254 }
3255 } else {
3256 /*
3257 * If target exists and it's a directory, check that whether
3258 * it can be destroyed.
3259 */
3260 if (S_ISDIR(VFS_I(target_ip)->i_mode) &&
3261 (!xfs_dir_isempty(target_ip) ||
3262 (VFS_I(target_ip)->i_nlink > 2))) {
3263 error = -EEXIST;
3264 goto out_trans_cancel;
3265 }
3266 }
3267
3268 /*
3269 * Directory entry creation below may acquire the AGF. Remove
3270 * the whiteout from the unlinked list first to preserve correct
3271 * AGI/AGF locking order. This dirties the transaction so failures
3272 * after this point will abort and log recovery will clean up the
3273 * mess.
3274 *
3275 * For whiteouts, we need to bump the link count on the whiteout
3276 * inode. After this point, we have a real link, clear the tmpfile
3277 * state flag from the inode so it doesn't accidentally get misused
3278 * in future.
3279 */
3280 if (wip) {
3281 ASSERT(VFS_I(wip)->i_nlink == 0);
3282 error = xfs_iunlink_remove(tp, wip);
3283 if (error)
3284 goto out_trans_cancel;
3285
3286 xfs_bumplink(tp, wip);
3287 VFS_I(wip)->i_state &= ~I_LINKABLE;
3288 }
3289
3290 /*
3291 * Set up the target.
3292 */
3293 if (target_ip == NULL) {
3294 /*
3295 * If target does not exist and the rename crosses
3296 * directories, adjust the target directory link count
3297 * to account for the ".." reference from the new entry.
3298 */
3299 error = xfs_dir_createname(tp, target_dp, target_name,
3300 src_ip->i_ino, spaceres);
3301 if (error)
3302 goto out_trans_cancel;
3303
3304 xfs_trans_ichgtime(tp, target_dp,
3305 XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG);
3306
3307 if (new_parent && src_is_directory) {
3308 xfs_bumplink(tp, target_dp);
3309 }
3310 } else { /* target_ip != NULL */
3311 /*
3312 * Link the source inode under the target name.
3313 * If the source inode is a directory and we are moving
3314 * it across directories, its ".." entry will be
3315 * inconsistent until we replace that down below.
3316 *
3317 * In case there is already an entry with the same
3318 * name at the destination directory, remove it first.
3319 */
3320
3321 /*
3322 * Check whether the replace operation will need to allocate
3323 * blocks. This happens when the shortform directory lacks
3324 * space and we have to convert it to a block format directory.
3325 * When more blocks are necessary, we must lock the AGI first
3326 * to preserve locking order (AGI -> AGF).
3327 */
3328 if (xfs_dir2_sf_replace_needblock(target_dp, src_ip->i_ino)) {
3329 error = xfs_read_agi(mp, tp,
3330 XFS_INO_TO_AGNO(mp, target_ip->i_ino),
3331 &agibp);
3332 if (error)
3333 goto out_trans_cancel;
3334 }
3335
3336 error = xfs_dir_replace(tp, target_dp, target_name,
3337 src_ip->i_ino, spaceres);
3338 if (error)
3339 goto out_trans_cancel;
3340
3341 xfs_trans_ichgtime(tp, target_dp,
3342 XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG);
3343
3344 /*
3345 * Decrement the link count on the target since the target
3346 * dir no longer points to it.
3347 */
3348 error = xfs_droplink(tp, target_ip);
3349 if (error)
3350 goto out_trans_cancel;
3351
3352 if (src_is_directory) {
3353 /*
3354 * Drop the link from the old "." entry.
3355 */
3356 error = xfs_droplink(tp, target_ip);
3357 if (error)
3358 goto out_trans_cancel;
3359 }
3360 } /* target_ip != NULL */
3361
3362 /*
3363 * Remove the source.
3364 */
3365 if (new_parent && src_is_directory) {
3366 /*
3367 * Rewrite the ".." entry to point to the new
3368 * directory.
3369 */
3370 error = xfs_dir_replace(tp, src_ip, &xfs_name_dotdot,
3371 target_dp->i_ino, spaceres);
3372 ASSERT(error != -EEXIST);
3373 if (error)
3374 goto out_trans_cancel;
3375 }
3376
3377 /*
3378 * We always want to hit the ctime on the source inode.
3379 *
3380 * This isn't strictly required by the standards since the source
3381 * inode isn't really being changed, but old unix file systems did
3382 * it and some incremental backup programs won't work without it.
3383 */
3384 xfs_trans_ichgtime(tp, src_ip, XFS_ICHGTIME_CHG);
3385 xfs_trans_log_inode(tp, src_ip, XFS_ILOG_CORE);
3386
3387 /*
3388 * Adjust the link count on src_dp. This is necessary when
3389 * renaming a directory, either within one parent when
3390 * the target existed, or across two parent directories.
3391 */
3392 if (src_is_directory && (new_parent || target_ip != NULL)) {
3393
3394 /*
3395 * Decrement link count on src_directory since the
3396 * entry that's moved no longer points to it.
3397 */
3398 error = xfs_droplink(tp, src_dp);
3399 if (error)
3400 goto out_trans_cancel;
3401 }
3402
3403 /*
3404 * For whiteouts, we only need to update the source dirent with the
3405 * inode number of the whiteout inode rather than removing it
3406 * altogether.
3407 */
3408 if (wip) {
3409 error = xfs_dir_replace(tp, src_dp, src_name, wip->i_ino,
3410 spaceres);
3411 } else
3412 error = xfs_dir_removename(tp, src_dp, src_name, src_ip->i_ino,
3413 spaceres);
3414 if (error)
3415 goto out_trans_cancel;
3416
3417 xfs_trans_ichgtime(tp, src_dp, XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG);
3418 xfs_trans_log_inode(tp, src_dp, XFS_ILOG_CORE);
3419 if (new_parent)
3420 xfs_trans_log_inode(tp, target_dp, XFS_ILOG_CORE);
3421
3422 error = xfs_finish_rename(tp);
3423 if (wip)
3424 xfs_irele(wip);
3425 return error;
3426
3427out_trans_cancel:
3428 xfs_trans_cancel(tp);
3429out_release_wip:
3430 if (wip)
3431 xfs_irele(wip);
3432 return error;
3433}
3434
3435static int
3436xfs_iflush(
3437 struct xfs_inode *ip,
3438 struct xfs_buf *bp)
3439{
3440 struct xfs_inode_log_item *iip = ip->i_itemp;
3441 struct xfs_dinode *dip;
3442 struct xfs_mount *mp = ip->i_mount;
3443 int error;
3444
3445 ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL|XFS_ILOCK_SHARED));
3446 ASSERT(xfs_isiflocked(ip));
3447 ASSERT(ip->i_df.if_format != XFS_DINODE_FMT_BTREE ||
3448 ip->i_df.if_nextents > XFS_IFORK_MAXEXT(ip, XFS_DATA_FORK));
3449 ASSERT(iip->ili_item.li_buf == bp);
3450
3451 dip = xfs_buf_offset(bp, ip->i_imap.im_boffset);
3452
3453 /*
3454 * We don't flush the inode if any of the following checks fail, but we
3455 * do still update the log item and attach to the backing buffer as if
3456 * the flush happened. This is a formality to facilitate predictable
3457 * error handling as the caller will shutdown and fail the buffer.
3458 */
3459 error = -EFSCORRUPTED;
3460 if (XFS_TEST_ERROR(dip->di_magic != cpu_to_be16(XFS_DINODE_MAGIC),
3461 mp, XFS_ERRTAG_IFLUSH_1)) {
3462 xfs_alert_tag(mp, XFS_PTAG_IFLUSH,
3463 "%s: Bad inode %Lu magic number 0x%x, ptr "PTR_FMT,
3464 __func__, ip->i_ino, be16_to_cpu(dip->di_magic), dip);
3465 goto flush_out;
3466 }
3467 if (S_ISREG(VFS_I(ip)->i_mode)) {
3468 if (XFS_TEST_ERROR(
3469 ip->i_df.if_format != XFS_DINODE_FMT_EXTENTS &&
3470 ip->i_df.if_format != XFS_DINODE_FMT_BTREE,
3471 mp, XFS_ERRTAG_IFLUSH_3)) {
3472 xfs_alert_tag(mp, XFS_PTAG_IFLUSH,
3473 "%s: Bad regular inode %Lu, ptr "PTR_FMT,
3474 __func__, ip->i_ino, ip);
3475 goto flush_out;
3476 }
3477 } else if (S_ISDIR(VFS_I(ip)->i_mode)) {
3478 if (XFS_TEST_ERROR(
3479 ip->i_df.if_format != XFS_DINODE_FMT_EXTENTS &&
3480 ip->i_df.if_format != XFS_DINODE_FMT_BTREE &&
3481 ip->i_df.if_format != XFS_DINODE_FMT_LOCAL,
3482 mp, XFS_ERRTAG_IFLUSH_4)) {
3483 xfs_alert_tag(mp, XFS_PTAG_IFLUSH,
3484 "%s: Bad directory inode %Lu, ptr "PTR_FMT,
3485 __func__, ip->i_ino, ip);
3486 goto flush_out;
3487 }
3488 }
3489 if (XFS_TEST_ERROR(ip->i_df.if_nextents + xfs_ifork_nextents(ip->i_afp) >
3490 ip->i_d.di_nblocks, mp, XFS_ERRTAG_IFLUSH_5)) {
3491 xfs_alert_tag(mp, XFS_PTAG_IFLUSH,
3492 "%s: detected corrupt incore inode %Lu, "
3493 "total extents = %d, nblocks = %Ld, ptr "PTR_FMT,
3494 __func__, ip->i_ino,
3495 ip->i_df.if_nextents + xfs_ifork_nextents(ip->i_afp),
3496 ip->i_d.di_nblocks, ip);
3497 goto flush_out;
3498 }
3499 if (XFS_TEST_ERROR(ip->i_d.di_forkoff > mp->m_sb.sb_inodesize,
3500 mp, XFS_ERRTAG_IFLUSH_6)) {
3501 xfs_alert_tag(mp, XFS_PTAG_IFLUSH,
3502 "%s: bad inode %Lu, forkoff 0x%x, ptr "PTR_FMT,
3503 __func__, ip->i_ino, ip->i_d.di_forkoff, ip);
3504 goto flush_out;
3505 }
3506
3507 /*
3508 * Inode item log recovery for v2 inodes are dependent on the
3509 * di_flushiter count for correct sequencing. We bump the flush
3510 * iteration count so we can detect flushes which postdate a log record
3511 * during recovery. This is redundant as we now log every change and
3512 * hence this can't happen but we need to still do it to ensure
3513 * backwards compatibility with old kernels that predate logging all
3514 * inode changes.
3515 */
3516 if (!xfs_sb_version_has_v3inode(&mp->m_sb))
3517 ip->i_d.di_flushiter++;
3518
3519 /*
3520 * If there are inline format data / attr forks attached to this inode,
3521 * make sure they are not corrupt.
3522 */
3523 if (ip->i_df.if_format == XFS_DINODE_FMT_LOCAL &&
3524 xfs_ifork_verify_local_data(ip))
3525 goto flush_out;
3526 if (ip->i_afp && ip->i_afp->if_format == XFS_DINODE_FMT_LOCAL &&
3527 xfs_ifork_verify_local_attr(ip))
3528 goto flush_out;
3529
3530 /*
3531 * Copy the dirty parts of the inode into the on-disk inode. We always
3532 * copy out the core of the inode, because if the inode is dirty at all
3533 * the core must be.
3534 */
3535 xfs_inode_to_disk(ip, dip, iip->ili_item.li_lsn);
3536
3537 /* Wrap, we never let the log put out DI_MAX_FLUSH */
3538 if (ip->i_d.di_flushiter == DI_MAX_FLUSH)
3539 ip->i_d.di_flushiter = 0;
3540
3541 xfs_iflush_fork(ip, dip, iip, XFS_DATA_FORK);
3542 if (XFS_IFORK_Q(ip))
3543 xfs_iflush_fork(ip, dip, iip, XFS_ATTR_FORK);
3544
3545 /*
3546 * We've recorded everything logged in the inode, so we'd like to clear
3547 * the ili_fields bits so we don't log and flush things unnecessarily.
3548 * However, we can't stop logging all this information until the data
3549 * we've copied into the disk buffer is written to disk. If we did we
3550 * might overwrite the copy of the inode in the log with all the data
3551 * after re-logging only part of it, and in the face of a crash we
3552 * wouldn't have all the data we need to recover.
3553 *
3554 * What we do is move the bits to the ili_last_fields field. When
3555 * logging the inode, these bits are moved back to the ili_fields field.
3556 * In the xfs_iflush_done() routine we clear ili_last_fields, since we
3557 * know that the information those bits represent is permanently on
3558 * disk. As long as the flush completes before the inode is logged
3559 * again, then both ili_fields and ili_last_fields will be cleared.
3560 */
3561 error = 0;
3562flush_out:
3563 spin_lock(&iip->ili_lock);
3564 iip->ili_last_fields = iip->ili_fields;
3565 iip->ili_fields = 0;
3566 iip->ili_fsync_fields = 0;
3567 spin_unlock(&iip->ili_lock);
3568
3569 /*
3570 * Store the current LSN of the inode so that we can tell whether the
3571 * item has moved in the AIL from xfs_iflush_done().
3572 */
3573 xfs_trans_ail_copy_lsn(mp->m_ail, &iip->ili_flush_lsn,
3574 &iip->ili_item.li_lsn);
3575
3576 /* generate the checksum. */
3577 xfs_dinode_calc_crc(mp, dip);
3578 return error;
3579}
3580
3581/*
3582 * Non-blocking flush of dirty inode metadata into the backing buffer.
3583 *
3584 * The caller must have a reference to the inode and hold the cluster buffer
3585 * locked. The function will walk across all the inodes on the cluster buffer it
3586 * can find and lock without blocking, and flush them to the cluster buffer.
3587 *
3588 * On successful flushing of at least one inode, the caller must write out the
3589 * buffer and release it. If no inodes are flushed, -EAGAIN will be returned and
3590 * the caller needs to release the buffer. On failure, the filesystem will be
3591 * shut down, the buffer will have been unlocked and released, and EFSCORRUPTED
3592 * will be returned.
3593 */
3594int
3595xfs_iflush_cluster(
3596 struct xfs_buf *bp)
3597{
3598 struct xfs_mount *mp = bp->b_mount;
3599 struct xfs_log_item *lip, *n;
3600 struct xfs_inode *ip;
3601 struct xfs_inode_log_item *iip;
3602 int clcount = 0;
3603 int error = 0;
3604
3605 /*
3606 * We must use the safe variant here as on shutdown xfs_iflush_abort()
3607 * can remove itself from the list.
3608 */
3609 list_for_each_entry_safe(lip, n, &bp->b_li_list, li_bio_list) {
3610 iip = (struct xfs_inode_log_item *)lip;
3611 ip = iip->ili_inode;
3612
3613 /*
3614 * Quick and dirty check to avoid locks if possible.
3615 */
3616 if (__xfs_iflags_test(ip, XFS_IRECLAIM | XFS_IFLOCK))
3617 continue;
3618 if (xfs_ipincount(ip))
3619 continue;
3620
3621 /*
3622 * The inode is still attached to the buffer, which means it is
3623 * dirty but reclaim might try to grab it. Check carefully for
3624 * that, and grab the ilock while still holding the i_flags_lock
3625 * to guarantee reclaim will not be able to reclaim this inode
3626 * once we drop the i_flags_lock.
3627 */
3628 spin_lock(&ip->i_flags_lock);
3629 ASSERT(!__xfs_iflags_test(ip, XFS_ISTALE));
3630 if (__xfs_iflags_test(ip, XFS_IRECLAIM | XFS_IFLOCK)) {
3631 spin_unlock(&ip->i_flags_lock);
3632 continue;
3633 }
3634
3635 /*
3636 * ILOCK will pin the inode against reclaim and prevent
3637 * concurrent transactions modifying the inode while we are
3638 * flushing the inode.
3639 */
3640 if (!xfs_ilock_nowait(ip, XFS_ILOCK_SHARED)) {
3641 spin_unlock(&ip->i_flags_lock);
3642 continue;
3643 }
3644 spin_unlock(&ip->i_flags_lock);
3645
3646 /*
3647 * Skip inodes that are already flush locked as they have
3648 * already been written to the buffer.
3649 */
3650 if (!xfs_iflock_nowait(ip)) {
3651 xfs_iunlock(ip, XFS_ILOCK_SHARED);
3652 continue;
3653 }
3654
3655 /*
3656 * Abort flushing this inode if we are shut down because the
3657 * inode may not currently be in the AIL. This can occur when
3658 * log I/O failure unpins the inode without inserting into the
3659 * AIL, leaving a dirty/unpinned inode attached to the buffer
3660 * that otherwise looks like it should be flushed.
3661 */
3662 if (XFS_FORCED_SHUTDOWN(mp)) {
3663 xfs_iunpin_wait(ip);
3664 /* xfs_iflush_abort() drops the flush lock */
3665 xfs_iflush_abort(ip);
3666 xfs_iunlock(ip, XFS_ILOCK_SHARED);
3667 error = -EIO;
3668 continue;
3669 }
3670
3671 /* don't block waiting on a log force to unpin dirty inodes */
3672 if (xfs_ipincount(ip)) {
3673 xfs_ifunlock(ip);
3674 xfs_iunlock(ip, XFS_ILOCK_SHARED);
3675 continue;
3676 }
3677
3678 if (!xfs_inode_clean(ip))
3679 error = xfs_iflush(ip, bp);
3680 else
3681 xfs_ifunlock(ip);
3682 xfs_iunlock(ip, XFS_ILOCK_SHARED);
3683 if (error)
3684 break;
3685 clcount++;
3686 }
3687
3688 if (error) {
3689 bp->b_flags |= XBF_ASYNC;
3690 xfs_buf_ioend_fail(bp);
3691 xfs_force_shutdown(mp, SHUTDOWN_CORRUPT_INCORE);
3692 return error;
3693 }
3694
3695 if (!clcount)
3696 return -EAGAIN;
3697
3698 XFS_STATS_INC(mp, xs_icluster_flushcnt);
3699 XFS_STATS_ADD(mp, xs_icluster_flushinode, clcount);
3700 return 0;
3701
3702}
3703
3704/* Release an inode. */
3705void
3706xfs_irele(
3707 struct xfs_inode *ip)
3708{
3709 trace_xfs_irele(ip, _RET_IP_);
3710 iput(VFS_I(ip));
3711}
3712
3713/*
3714 * Ensure all commited transactions touching the inode are written to the log.
3715 */
3716int
3717xfs_log_force_inode(
3718 struct xfs_inode *ip)
3719{
3720 xfs_lsn_t lsn = 0;
3721
3722 xfs_ilock(ip, XFS_ILOCK_SHARED);
3723 if (xfs_ipincount(ip))
3724 lsn = ip->i_itemp->ili_last_lsn;
3725 xfs_iunlock(ip, XFS_ILOCK_SHARED);
3726
3727 if (!lsn)
3728 return 0;
3729 return xfs_log_force_lsn(ip->i_mount, lsn, XFS_LOG_SYNC, NULL);
3730}
3731
3732/*
3733 * Grab the exclusive iolock for a data copy from src to dest, making sure to
3734 * abide vfs locking order (lowest pointer value goes first) and breaking the
3735 * layout leases before proceeding. The loop is needed because we cannot call
3736 * the blocking break_layout() with the iolocks held, and therefore have to
3737 * back out both locks.
3738 */
3739static int
3740xfs_iolock_two_inodes_and_break_layout(
3741 struct inode *src,
3742 struct inode *dest)
3743{
3744 int error;
3745
3746 if (src > dest)
3747 swap(src, dest);
3748
3749retry:
3750 /* Wait to break both inodes' layouts before we start locking. */
3751 error = break_layout(src, true);
3752 if (error)
3753 return error;
3754 if (src != dest) {
3755 error = break_layout(dest, true);
3756 if (error)
3757 return error;
3758 }
3759
3760 /* Lock one inode and make sure nobody got in and leased it. */
3761 inode_lock(src);
3762 error = break_layout(src, false);
3763 if (error) {
3764 inode_unlock(src);
3765 if (error == -EWOULDBLOCK)
3766 goto retry;
3767 return error;
3768 }
3769
3770 if (src == dest)
3771 return 0;
3772
3773 /* Lock the other inode and make sure nobody got in and leased it. */
3774 inode_lock_nested(dest, I_MUTEX_NONDIR2);
3775 error = break_layout(dest, false);
3776 if (error) {
3777 inode_unlock(src);
3778 inode_unlock(dest);
3779 if (error == -EWOULDBLOCK)
3780 goto retry;
3781 return error;
3782 }
3783
3784 return 0;
3785}
3786
3787/*
3788 * Lock two inodes so that userspace cannot initiate I/O via file syscalls or
3789 * mmap activity.
3790 */
3791int
3792xfs_ilock2_io_mmap(
3793 struct xfs_inode *ip1,
3794 struct xfs_inode *ip2)
3795{
3796 int ret;
3797
3798 ret = xfs_iolock_two_inodes_and_break_layout(VFS_I(ip1), VFS_I(ip2));
3799 if (ret)
3800 return ret;
3801 if (ip1 == ip2)
3802 xfs_ilock(ip1, XFS_MMAPLOCK_EXCL);
3803 else
3804 xfs_lock_two_inodes(ip1, XFS_MMAPLOCK_EXCL,
3805 ip2, XFS_MMAPLOCK_EXCL);
3806 return 0;
3807}
3808
3809/* Unlock both inodes to allow IO and mmap activity. */
3810void
3811xfs_iunlock2_io_mmap(
3812 struct xfs_inode *ip1,
3813 struct xfs_inode *ip2)
3814{
3815 bool same_inode = (ip1 == ip2);
3816
3817 xfs_iunlock(ip2, XFS_MMAPLOCK_EXCL);
3818 if (!same_inode)
3819 xfs_iunlock(ip1, XFS_MMAPLOCK_EXCL);
3820 inode_unlock(VFS_I(ip2));
3821 if (!same_inode)
3822 inode_unlock(VFS_I(ip1));
3823}
1/*
2 * Copyright (c) 2000-2006 Silicon Graphics, Inc.
3 * All Rights Reserved.
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it would be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write the Free Software Foundation,
16 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
17 */
18#include <linux/log2.h>
19#include <linux/iversion.h>
20
21#include "xfs.h"
22#include "xfs_fs.h"
23#include "xfs_shared.h"
24#include "xfs_format.h"
25#include "xfs_log_format.h"
26#include "xfs_trans_resv.h"
27#include "xfs_sb.h"
28#include "xfs_mount.h"
29#include "xfs_defer.h"
30#include "xfs_inode.h"
31#include "xfs_da_format.h"
32#include "xfs_da_btree.h"
33#include "xfs_dir2.h"
34#include "xfs_attr_sf.h"
35#include "xfs_attr.h"
36#include "xfs_trans_space.h"
37#include "xfs_trans.h"
38#include "xfs_buf_item.h"
39#include "xfs_inode_item.h"
40#include "xfs_ialloc.h"
41#include "xfs_bmap.h"
42#include "xfs_bmap_util.h"
43#include "xfs_errortag.h"
44#include "xfs_error.h"
45#include "xfs_quota.h"
46#include "xfs_filestream.h"
47#include "xfs_cksum.h"
48#include "xfs_trace.h"
49#include "xfs_icache.h"
50#include "xfs_symlink.h"
51#include "xfs_trans_priv.h"
52#include "xfs_log.h"
53#include "xfs_bmap_btree.h"
54#include "xfs_reflink.h"
55#include "xfs_dir2_priv.h"
56
57kmem_zone_t *xfs_inode_zone;
58
59/*
60 * Used in xfs_itruncate_extents(). This is the maximum number of extents
61 * freed from a file in a single transaction.
62 */
63#define XFS_ITRUNC_MAX_EXTENTS 2
64
65STATIC int xfs_iflush_int(struct xfs_inode *, struct xfs_buf *);
66STATIC int xfs_iunlink(struct xfs_trans *, struct xfs_inode *);
67STATIC int xfs_iunlink_remove(struct xfs_trans *, struct xfs_inode *);
68
69/*
70 * helper function to extract extent size hint from inode
71 */
72xfs_extlen_t
73xfs_get_extsz_hint(
74 struct xfs_inode *ip)
75{
76 if ((ip->i_d.di_flags & XFS_DIFLAG_EXTSIZE) && ip->i_d.di_extsize)
77 return ip->i_d.di_extsize;
78 if (XFS_IS_REALTIME_INODE(ip))
79 return ip->i_mount->m_sb.sb_rextsize;
80 return 0;
81}
82
83/*
84 * Helper function to extract CoW extent size hint from inode.
85 * Between the extent size hint and the CoW extent size hint, we
86 * return the greater of the two. If the value is zero (automatic),
87 * use the default size.
88 */
89xfs_extlen_t
90xfs_get_cowextsz_hint(
91 struct xfs_inode *ip)
92{
93 xfs_extlen_t a, b;
94
95 a = 0;
96 if (ip->i_d.di_flags2 & XFS_DIFLAG2_COWEXTSIZE)
97 a = ip->i_d.di_cowextsize;
98 b = xfs_get_extsz_hint(ip);
99
100 a = max(a, b);
101 if (a == 0)
102 return XFS_DEFAULT_COWEXTSZ_HINT;
103 return a;
104}
105
106/*
107 * These two are wrapper routines around the xfs_ilock() routine used to
108 * centralize some grungy code. They are used in places that wish to lock the
109 * inode solely for reading the extents. The reason these places can't just
110 * call xfs_ilock(ip, XFS_ILOCK_SHARED) is that the inode lock also guards to
111 * bringing in of the extents from disk for a file in b-tree format. If the
112 * inode is in b-tree format, then we need to lock the inode exclusively until
113 * the extents are read in. Locking it exclusively all the time would limit
114 * our parallelism unnecessarily, though. What we do instead is check to see
115 * if the extents have been read in yet, and only lock the inode exclusively
116 * if they have not.
117 *
118 * The functions return a value which should be given to the corresponding
119 * xfs_iunlock() call.
120 */
121uint
122xfs_ilock_data_map_shared(
123 struct xfs_inode *ip)
124{
125 uint lock_mode = XFS_ILOCK_SHARED;
126
127 if (ip->i_d.di_format == XFS_DINODE_FMT_BTREE &&
128 (ip->i_df.if_flags & XFS_IFEXTENTS) == 0)
129 lock_mode = XFS_ILOCK_EXCL;
130 xfs_ilock(ip, lock_mode);
131 return lock_mode;
132}
133
134uint
135xfs_ilock_attr_map_shared(
136 struct xfs_inode *ip)
137{
138 uint lock_mode = XFS_ILOCK_SHARED;
139
140 if (ip->i_d.di_aformat == XFS_DINODE_FMT_BTREE &&
141 (ip->i_afp->if_flags & XFS_IFEXTENTS) == 0)
142 lock_mode = XFS_ILOCK_EXCL;
143 xfs_ilock(ip, lock_mode);
144 return lock_mode;
145}
146
147/*
148 * In addition to i_rwsem in the VFS inode, the xfs inode contains 2
149 * multi-reader locks: i_mmap_lock and the i_lock. This routine allows
150 * various combinations of the locks to be obtained.
151 *
152 * The 3 locks should always be ordered so that the IO lock is obtained first,
153 * the mmap lock second and the ilock last in order to prevent deadlock.
154 *
155 * Basic locking order:
156 *
157 * i_rwsem -> i_mmap_lock -> page_lock -> i_ilock
158 *
159 * mmap_sem locking order:
160 *
161 * i_rwsem -> page lock -> mmap_sem
162 * mmap_sem -> i_mmap_lock -> page_lock
163 *
164 * The difference in mmap_sem locking order mean that we cannot hold the
165 * i_mmap_lock over syscall based read(2)/write(2) based IO. These IO paths can
166 * fault in pages during copy in/out (for buffered IO) or require the mmap_sem
167 * in get_user_pages() to map the user pages into the kernel address space for
168 * direct IO. Similarly the i_rwsem cannot be taken inside a page fault because
169 * page faults already hold the mmap_sem.
170 *
171 * Hence to serialise fully against both syscall and mmap based IO, we need to
172 * take both the i_rwsem and the i_mmap_lock. These locks should *only* be both
173 * taken in places where we need to invalidate the page cache in a race
174 * free manner (e.g. truncate, hole punch and other extent manipulation
175 * functions).
176 */
177void
178xfs_ilock(
179 xfs_inode_t *ip,
180 uint lock_flags)
181{
182 trace_xfs_ilock(ip, lock_flags, _RET_IP_);
183
184 /*
185 * You can't set both SHARED and EXCL for the same lock,
186 * and only XFS_IOLOCK_SHARED, XFS_IOLOCK_EXCL, XFS_ILOCK_SHARED,
187 * and XFS_ILOCK_EXCL are valid values to set in lock_flags.
188 */
189 ASSERT((lock_flags & (XFS_IOLOCK_SHARED | XFS_IOLOCK_EXCL)) !=
190 (XFS_IOLOCK_SHARED | XFS_IOLOCK_EXCL));
191 ASSERT((lock_flags & (XFS_MMAPLOCK_SHARED | XFS_MMAPLOCK_EXCL)) !=
192 (XFS_MMAPLOCK_SHARED | XFS_MMAPLOCK_EXCL));
193 ASSERT((lock_flags & (XFS_ILOCK_SHARED | XFS_ILOCK_EXCL)) !=
194 (XFS_ILOCK_SHARED | XFS_ILOCK_EXCL));
195 ASSERT((lock_flags & ~(XFS_LOCK_MASK | XFS_LOCK_SUBCLASS_MASK)) == 0);
196
197 if (lock_flags & XFS_IOLOCK_EXCL) {
198 down_write_nested(&VFS_I(ip)->i_rwsem,
199 XFS_IOLOCK_DEP(lock_flags));
200 } else if (lock_flags & XFS_IOLOCK_SHARED) {
201 down_read_nested(&VFS_I(ip)->i_rwsem,
202 XFS_IOLOCK_DEP(lock_flags));
203 }
204
205 if (lock_flags & XFS_MMAPLOCK_EXCL)
206 mrupdate_nested(&ip->i_mmaplock, XFS_MMAPLOCK_DEP(lock_flags));
207 else if (lock_flags & XFS_MMAPLOCK_SHARED)
208 mraccess_nested(&ip->i_mmaplock, XFS_MMAPLOCK_DEP(lock_flags));
209
210 if (lock_flags & XFS_ILOCK_EXCL)
211 mrupdate_nested(&ip->i_lock, XFS_ILOCK_DEP(lock_flags));
212 else if (lock_flags & XFS_ILOCK_SHARED)
213 mraccess_nested(&ip->i_lock, XFS_ILOCK_DEP(lock_flags));
214}
215
216/*
217 * This is just like xfs_ilock(), except that the caller
218 * is guaranteed not to sleep. It returns 1 if it gets
219 * the requested locks and 0 otherwise. If the IO lock is
220 * obtained but the inode lock cannot be, then the IO lock
221 * is dropped before returning.
222 *
223 * ip -- the inode being locked
224 * lock_flags -- this parameter indicates the inode's locks to be
225 * to be locked. See the comment for xfs_ilock() for a list
226 * of valid values.
227 */
228int
229xfs_ilock_nowait(
230 xfs_inode_t *ip,
231 uint lock_flags)
232{
233 trace_xfs_ilock_nowait(ip, lock_flags, _RET_IP_);
234
235 /*
236 * You can't set both SHARED and EXCL for the same lock,
237 * and only XFS_IOLOCK_SHARED, XFS_IOLOCK_EXCL, XFS_ILOCK_SHARED,
238 * and XFS_ILOCK_EXCL are valid values to set in lock_flags.
239 */
240 ASSERT((lock_flags & (XFS_IOLOCK_SHARED | XFS_IOLOCK_EXCL)) !=
241 (XFS_IOLOCK_SHARED | XFS_IOLOCK_EXCL));
242 ASSERT((lock_flags & (XFS_MMAPLOCK_SHARED | XFS_MMAPLOCK_EXCL)) !=
243 (XFS_MMAPLOCK_SHARED | XFS_MMAPLOCK_EXCL));
244 ASSERT((lock_flags & (XFS_ILOCK_SHARED | XFS_ILOCK_EXCL)) !=
245 (XFS_ILOCK_SHARED | XFS_ILOCK_EXCL));
246 ASSERT((lock_flags & ~(XFS_LOCK_MASK | XFS_LOCK_SUBCLASS_MASK)) == 0);
247
248 if (lock_flags & XFS_IOLOCK_EXCL) {
249 if (!down_write_trylock(&VFS_I(ip)->i_rwsem))
250 goto out;
251 } else if (lock_flags & XFS_IOLOCK_SHARED) {
252 if (!down_read_trylock(&VFS_I(ip)->i_rwsem))
253 goto out;
254 }
255
256 if (lock_flags & XFS_MMAPLOCK_EXCL) {
257 if (!mrtryupdate(&ip->i_mmaplock))
258 goto out_undo_iolock;
259 } else if (lock_flags & XFS_MMAPLOCK_SHARED) {
260 if (!mrtryaccess(&ip->i_mmaplock))
261 goto out_undo_iolock;
262 }
263
264 if (lock_flags & XFS_ILOCK_EXCL) {
265 if (!mrtryupdate(&ip->i_lock))
266 goto out_undo_mmaplock;
267 } else if (lock_flags & XFS_ILOCK_SHARED) {
268 if (!mrtryaccess(&ip->i_lock))
269 goto out_undo_mmaplock;
270 }
271 return 1;
272
273out_undo_mmaplock:
274 if (lock_flags & XFS_MMAPLOCK_EXCL)
275 mrunlock_excl(&ip->i_mmaplock);
276 else if (lock_flags & XFS_MMAPLOCK_SHARED)
277 mrunlock_shared(&ip->i_mmaplock);
278out_undo_iolock:
279 if (lock_flags & XFS_IOLOCK_EXCL)
280 up_write(&VFS_I(ip)->i_rwsem);
281 else if (lock_flags & XFS_IOLOCK_SHARED)
282 up_read(&VFS_I(ip)->i_rwsem);
283out:
284 return 0;
285}
286
287/*
288 * xfs_iunlock() is used to drop the inode locks acquired with
289 * xfs_ilock() and xfs_ilock_nowait(). The caller must pass
290 * in the flags given to xfs_ilock() or xfs_ilock_nowait() so
291 * that we know which locks to drop.
292 *
293 * ip -- the inode being unlocked
294 * lock_flags -- this parameter indicates the inode's locks to be
295 * to be unlocked. See the comment for xfs_ilock() for a list
296 * of valid values for this parameter.
297 *
298 */
299void
300xfs_iunlock(
301 xfs_inode_t *ip,
302 uint lock_flags)
303{
304 /*
305 * You can't set both SHARED and EXCL for the same lock,
306 * and only XFS_IOLOCK_SHARED, XFS_IOLOCK_EXCL, XFS_ILOCK_SHARED,
307 * and XFS_ILOCK_EXCL are valid values to set in lock_flags.
308 */
309 ASSERT((lock_flags & (XFS_IOLOCK_SHARED | XFS_IOLOCK_EXCL)) !=
310 (XFS_IOLOCK_SHARED | XFS_IOLOCK_EXCL));
311 ASSERT((lock_flags & (XFS_MMAPLOCK_SHARED | XFS_MMAPLOCK_EXCL)) !=
312 (XFS_MMAPLOCK_SHARED | XFS_MMAPLOCK_EXCL));
313 ASSERT((lock_flags & (XFS_ILOCK_SHARED | XFS_ILOCK_EXCL)) !=
314 (XFS_ILOCK_SHARED | XFS_ILOCK_EXCL));
315 ASSERT((lock_flags & ~(XFS_LOCK_MASK | XFS_LOCK_SUBCLASS_MASK)) == 0);
316 ASSERT(lock_flags != 0);
317
318 if (lock_flags & XFS_IOLOCK_EXCL)
319 up_write(&VFS_I(ip)->i_rwsem);
320 else if (lock_flags & XFS_IOLOCK_SHARED)
321 up_read(&VFS_I(ip)->i_rwsem);
322
323 if (lock_flags & XFS_MMAPLOCK_EXCL)
324 mrunlock_excl(&ip->i_mmaplock);
325 else if (lock_flags & XFS_MMAPLOCK_SHARED)
326 mrunlock_shared(&ip->i_mmaplock);
327
328 if (lock_flags & XFS_ILOCK_EXCL)
329 mrunlock_excl(&ip->i_lock);
330 else if (lock_flags & XFS_ILOCK_SHARED)
331 mrunlock_shared(&ip->i_lock);
332
333 trace_xfs_iunlock(ip, lock_flags, _RET_IP_);
334}
335
336/*
337 * give up write locks. the i/o lock cannot be held nested
338 * if it is being demoted.
339 */
340void
341xfs_ilock_demote(
342 xfs_inode_t *ip,
343 uint lock_flags)
344{
345 ASSERT(lock_flags & (XFS_IOLOCK_EXCL|XFS_MMAPLOCK_EXCL|XFS_ILOCK_EXCL));
346 ASSERT((lock_flags &
347 ~(XFS_IOLOCK_EXCL|XFS_MMAPLOCK_EXCL|XFS_ILOCK_EXCL)) == 0);
348
349 if (lock_flags & XFS_ILOCK_EXCL)
350 mrdemote(&ip->i_lock);
351 if (lock_flags & XFS_MMAPLOCK_EXCL)
352 mrdemote(&ip->i_mmaplock);
353 if (lock_flags & XFS_IOLOCK_EXCL)
354 downgrade_write(&VFS_I(ip)->i_rwsem);
355
356 trace_xfs_ilock_demote(ip, lock_flags, _RET_IP_);
357}
358
359#if defined(DEBUG) || defined(XFS_WARN)
360int
361xfs_isilocked(
362 xfs_inode_t *ip,
363 uint lock_flags)
364{
365 if (lock_flags & (XFS_ILOCK_EXCL|XFS_ILOCK_SHARED)) {
366 if (!(lock_flags & XFS_ILOCK_SHARED))
367 return !!ip->i_lock.mr_writer;
368 return rwsem_is_locked(&ip->i_lock.mr_lock);
369 }
370
371 if (lock_flags & (XFS_MMAPLOCK_EXCL|XFS_MMAPLOCK_SHARED)) {
372 if (!(lock_flags & XFS_MMAPLOCK_SHARED))
373 return !!ip->i_mmaplock.mr_writer;
374 return rwsem_is_locked(&ip->i_mmaplock.mr_lock);
375 }
376
377 if (lock_flags & (XFS_IOLOCK_EXCL|XFS_IOLOCK_SHARED)) {
378 if (!(lock_flags & XFS_IOLOCK_SHARED))
379 return !debug_locks ||
380 lockdep_is_held_type(&VFS_I(ip)->i_rwsem, 0);
381 return rwsem_is_locked(&VFS_I(ip)->i_rwsem);
382 }
383
384 ASSERT(0);
385 return 0;
386}
387#endif
388
389/*
390 * xfs_lockdep_subclass_ok() is only used in an ASSERT, so is only called when
391 * DEBUG or XFS_WARN is set. And MAX_LOCKDEP_SUBCLASSES is then only defined
392 * when CONFIG_LOCKDEP is set. Hence the complex define below to avoid build
393 * errors and warnings.
394 */
395#if (defined(DEBUG) || defined(XFS_WARN)) && defined(CONFIG_LOCKDEP)
396static bool
397xfs_lockdep_subclass_ok(
398 int subclass)
399{
400 return subclass < MAX_LOCKDEP_SUBCLASSES;
401}
402#else
403#define xfs_lockdep_subclass_ok(subclass) (true)
404#endif
405
406/*
407 * Bump the subclass so xfs_lock_inodes() acquires each lock with a different
408 * value. This can be called for any type of inode lock combination, including
409 * parent locking. Care must be taken to ensure we don't overrun the subclass
410 * storage fields in the class mask we build.
411 */
412static inline int
413xfs_lock_inumorder(int lock_mode, int subclass)
414{
415 int class = 0;
416
417 ASSERT(!(lock_mode & (XFS_ILOCK_PARENT | XFS_ILOCK_RTBITMAP |
418 XFS_ILOCK_RTSUM)));
419 ASSERT(xfs_lockdep_subclass_ok(subclass));
420
421 if (lock_mode & (XFS_IOLOCK_SHARED|XFS_IOLOCK_EXCL)) {
422 ASSERT(subclass <= XFS_IOLOCK_MAX_SUBCLASS);
423 class += subclass << XFS_IOLOCK_SHIFT;
424 }
425
426 if (lock_mode & (XFS_MMAPLOCK_SHARED|XFS_MMAPLOCK_EXCL)) {
427 ASSERT(subclass <= XFS_MMAPLOCK_MAX_SUBCLASS);
428 class += subclass << XFS_MMAPLOCK_SHIFT;
429 }
430
431 if (lock_mode & (XFS_ILOCK_SHARED|XFS_ILOCK_EXCL)) {
432 ASSERT(subclass <= XFS_ILOCK_MAX_SUBCLASS);
433 class += subclass << XFS_ILOCK_SHIFT;
434 }
435
436 return (lock_mode & ~XFS_LOCK_SUBCLASS_MASK) | class;
437}
438
439/*
440 * The following routine will lock n inodes in exclusive mode. We assume the
441 * caller calls us with the inodes in i_ino order.
442 *
443 * We need to detect deadlock where an inode that we lock is in the AIL and we
444 * start waiting for another inode that is locked by a thread in a long running
445 * transaction (such as truncate). This can result in deadlock since the long
446 * running trans might need to wait for the inode we just locked in order to
447 * push the tail and free space in the log.
448 *
449 * xfs_lock_inodes() can only be used to lock one type of lock at a time -
450 * the iolock, the mmaplock or the ilock, but not more than one at a time. If we
451 * lock more than one at a time, lockdep will report false positives saying we
452 * have violated locking orders.
453 */
454static void
455xfs_lock_inodes(
456 xfs_inode_t **ips,
457 int inodes,
458 uint lock_mode)
459{
460 int attempts = 0, i, j, try_lock;
461 xfs_log_item_t *lp;
462
463 /*
464 * Currently supports between 2 and 5 inodes with exclusive locking. We
465 * support an arbitrary depth of locking here, but absolute limits on
466 * inodes depend on the the type of locking and the limits placed by
467 * lockdep annotations in xfs_lock_inumorder. These are all checked by
468 * the asserts.
469 */
470 ASSERT(ips && inodes >= 2 && inodes <= 5);
471 ASSERT(lock_mode & (XFS_IOLOCK_EXCL | XFS_MMAPLOCK_EXCL |
472 XFS_ILOCK_EXCL));
473 ASSERT(!(lock_mode & (XFS_IOLOCK_SHARED | XFS_MMAPLOCK_SHARED |
474 XFS_ILOCK_SHARED)));
475 ASSERT(!(lock_mode & XFS_MMAPLOCK_EXCL) ||
476 inodes <= XFS_MMAPLOCK_MAX_SUBCLASS + 1);
477 ASSERT(!(lock_mode & XFS_ILOCK_EXCL) ||
478 inodes <= XFS_ILOCK_MAX_SUBCLASS + 1);
479
480 if (lock_mode & XFS_IOLOCK_EXCL) {
481 ASSERT(!(lock_mode & (XFS_MMAPLOCK_EXCL | XFS_ILOCK_EXCL)));
482 } else if (lock_mode & XFS_MMAPLOCK_EXCL)
483 ASSERT(!(lock_mode & XFS_ILOCK_EXCL));
484
485 try_lock = 0;
486 i = 0;
487again:
488 for (; i < inodes; i++) {
489 ASSERT(ips[i]);
490
491 if (i && (ips[i] == ips[i - 1])) /* Already locked */
492 continue;
493
494 /*
495 * If try_lock is not set yet, make sure all locked inodes are
496 * not in the AIL. If any are, set try_lock to be used later.
497 */
498 if (!try_lock) {
499 for (j = (i - 1); j >= 0 && !try_lock; j--) {
500 lp = (xfs_log_item_t *)ips[j]->i_itemp;
501 if (lp && (lp->li_flags & XFS_LI_IN_AIL))
502 try_lock++;
503 }
504 }
505
506 /*
507 * If any of the previous locks we have locked is in the AIL,
508 * we must TRY to get the second and subsequent locks. If
509 * we can't get any, we must release all we have
510 * and try again.
511 */
512 if (!try_lock) {
513 xfs_ilock(ips[i], xfs_lock_inumorder(lock_mode, i));
514 continue;
515 }
516
517 /* try_lock means we have an inode locked that is in the AIL. */
518 ASSERT(i != 0);
519 if (xfs_ilock_nowait(ips[i], xfs_lock_inumorder(lock_mode, i)))
520 continue;
521
522 /*
523 * Unlock all previous guys and try again. xfs_iunlock will try
524 * to push the tail if the inode is in the AIL.
525 */
526 attempts++;
527 for (j = i - 1; j >= 0; j--) {
528 /*
529 * Check to see if we've already unlocked this one. Not
530 * the first one going back, and the inode ptr is the
531 * same.
532 */
533 if (j != (i - 1) && ips[j] == ips[j + 1])
534 continue;
535
536 xfs_iunlock(ips[j], lock_mode);
537 }
538
539 if ((attempts % 5) == 0) {
540 delay(1); /* Don't just spin the CPU */
541 }
542 i = 0;
543 try_lock = 0;
544 goto again;
545 }
546}
547
548/*
549 * xfs_lock_two_inodes() can only be used to lock one type of lock at a time -
550 * the mmaplock or the ilock, but not more than one type at a time. If we lock
551 * more than one at a time, lockdep will report false positives saying we have
552 * violated locking orders. The iolock must be double-locked separately since
553 * we use i_rwsem for that. We now support taking one lock EXCL and the other
554 * SHARED.
555 */
556void
557xfs_lock_two_inodes(
558 struct xfs_inode *ip0,
559 uint ip0_mode,
560 struct xfs_inode *ip1,
561 uint ip1_mode)
562{
563 struct xfs_inode *temp;
564 uint mode_temp;
565 int attempts = 0;
566 xfs_log_item_t *lp;
567
568 ASSERT(hweight32(ip0_mode) == 1);
569 ASSERT(hweight32(ip1_mode) == 1);
570 ASSERT(!(ip0_mode & (XFS_IOLOCK_SHARED|XFS_IOLOCK_EXCL)));
571 ASSERT(!(ip1_mode & (XFS_IOLOCK_SHARED|XFS_IOLOCK_EXCL)));
572 ASSERT(!(ip0_mode & (XFS_MMAPLOCK_SHARED|XFS_MMAPLOCK_EXCL)) ||
573 !(ip0_mode & (XFS_ILOCK_SHARED|XFS_ILOCK_EXCL)));
574 ASSERT(!(ip1_mode & (XFS_MMAPLOCK_SHARED|XFS_MMAPLOCK_EXCL)) ||
575 !(ip1_mode & (XFS_ILOCK_SHARED|XFS_ILOCK_EXCL)));
576 ASSERT(!(ip1_mode & (XFS_MMAPLOCK_SHARED|XFS_MMAPLOCK_EXCL)) ||
577 !(ip0_mode & (XFS_ILOCK_SHARED|XFS_ILOCK_EXCL)));
578 ASSERT(!(ip0_mode & (XFS_MMAPLOCK_SHARED|XFS_MMAPLOCK_EXCL)) ||
579 !(ip1_mode & (XFS_ILOCK_SHARED|XFS_ILOCK_EXCL)));
580
581 ASSERT(ip0->i_ino != ip1->i_ino);
582
583 if (ip0->i_ino > ip1->i_ino) {
584 temp = ip0;
585 ip0 = ip1;
586 ip1 = temp;
587 mode_temp = ip0_mode;
588 ip0_mode = ip1_mode;
589 ip1_mode = mode_temp;
590 }
591
592 again:
593 xfs_ilock(ip0, xfs_lock_inumorder(ip0_mode, 0));
594
595 /*
596 * If the first lock we have locked is in the AIL, we must TRY to get
597 * the second lock. If we can't get it, we must release the first one
598 * and try again.
599 */
600 lp = (xfs_log_item_t *)ip0->i_itemp;
601 if (lp && (lp->li_flags & XFS_LI_IN_AIL)) {
602 if (!xfs_ilock_nowait(ip1, xfs_lock_inumorder(ip1_mode, 1))) {
603 xfs_iunlock(ip0, ip0_mode);
604 if ((++attempts % 5) == 0)
605 delay(1); /* Don't just spin the CPU */
606 goto again;
607 }
608 } else {
609 xfs_ilock(ip1, xfs_lock_inumorder(ip1_mode, 1));
610 }
611}
612
613void
614__xfs_iflock(
615 struct xfs_inode *ip)
616{
617 wait_queue_head_t *wq = bit_waitqueue(&ip->i_flags, __XFS_IFLOCK_BIT);
618 DEFINE_WAIT_BIT(wait, &ip->i_flags, __XFS_IFLOCK_BIT);
619
620 do {
621 prepare_to_wait_exclusive(wq, &wait.wq_entry, TASK_UNINTERRUPTIBLE);
622 if (xfs_isiflocked(ip))
623 io_schedule();
624 } while (!xfs_iflock_nowait(ip));
625
626 finish_wait(wq, &wait.wq_entry);
627}
628
629STATIC uint
630_xfs_dic2xflags(
631 uint16_t di_flags,
632 uint64_t di_flags2,
633 bool has_attr)
634{
635 uint flags = 0;
636
637 if (di_flags & XFS_DIFLAG_ANY) {
638 if (di_flags & XFS_DIFLAG_REALTIME)
639 flags |= FS_XFLAG_REALTIME;
640 if (di_flags & XFS_DIFLAG_PREALLOC)
641 flags |= FS_XFLAG_PREALLOC;
642 if (di_flags & XFS_DIFLAG_IMMUTABLE)
643 flags |= FS_XFLAG_IMMUTABLE;
644 if (di_flags & XFS_DIFLAG_APPEND)
645 flags |= FS_XFLAG_APPEND;
646 if (di_flags & XFS_DIFLAG_SYNC)
647 flags |= FS_XFLAG_SYNC;
648 if (di_flags & XFS_DIFLAG_NOATIME)
649 flags |= FS_XFLAG_NOATIME;
650 if (di_flags & XFS_DIFLAG_NODUMP)
651 flags |= FS_XFLAG_NODUMP;
652 if (di_flags & XFS_DIFLAG_RTINHERIT)
653 flags |= FS_XFLAG_RTINHERIT;
654 if (di_flags & XFS_DIFLAG_PROJINHERIT)
655 flags |= FS_XFLAG_PROJINHERIT;
656 if (di_flags & XFS_DIFLAG_NOSYMLINKS)
657 flags |= FS_XFLAG_NOSYMLINKS;
658 if (di_flags & XFS_DIFLAG_EXTSIZE)
659 flags |= FS_XFLAG_EXTSIZE;
660 if (di_flags & XFS_DIFLAG_EXTSZINHERIT)
661 flags |= FS_XFLAG_EXTSZINHERIT;
662 if (di_flags & XFS_DIFLAG_NODEFRAG)
663 flags |= FS_XFLAG_NODEFRAG;
664 if (di_flags & XFS_DIFLAG_FILESTREAM)
665 flags |= FS_XFLAG_FILESTREAM;
666 }
667
668 if (di_flags2 & XFS_DIFLAG2_ANY) {
669 if (di_flags2 & XFS_DIFLAG2_DAX)
670 flags |= FS_XFLAG_DAX;
671 if (di_flags2 & XFS_DIFLAG2_COWEXTSIZE)
672 flags |= FS_XFLAG_COWEXTSIZE;
673 }
674
675 if (has_attr)
676 flags |= FS_XFLAG_HASATTR;
677
678 return flags;
679}
680
681uint
682xfs_ip2xflags(
683 struct xfs_inode *ip)
684{
685 struct xfs_icdinode *dic = &ip->i_d;
686
687 return _xfs_dic2xflags(dic->di_flags, dic->di_flags2, XFS_IFORK_Q(ip));
688}
689
690/*
691 * Lookups up an inode from "name". If ci_name is not NULL, then a CI match
692 * is allowed, otherwise it has to be an exact match. If a CI match is found,
693 * ci_name->name will point to a the actual name (caller must free) or
694 * will be set to NULL if an exact match is found.
695 */
696int
697xfs_lookup(
698 xfs_inode_t *dp,
699 struct xfs_name *name,
700 xfs_inode_t **ipp,
701 struct xfs_name *ci_name)
702{
703 xfs_ino_t inum;
704 int error;
705
706 trace_xfs_lookup(dp, name);
707
708 if (XFS_FORCED_SHUTDOWN(dp->i_mount))
709 return -EIO;
710
711 error = xfs_dir_lookup(NULL, dp, name, &inum, ci_name);
712 if (error)
713 goto out_unlock;
714
715 error = xfs_iget(dp->i_mount, NULL, inum, 0, 0, ipp);
716 if (error)
717 goto out_free_name;
718
719 return 0;
720
721out_free_name:
722 if (ci_name)
723 kmem_free(ci_name->name);
724out_unlock:
725 *ipp = NULL;
726 return error;
727}
728
729/*
730 * Allocate an inode on disk and return a copy of its in-core version.
731 * The in-core inode is locked exclusively. Set mode, nlink, and rdev
732 * appropriately within the inode. The uid and gid for the inode are
733 * set according to the contents of the given cred structure.
734 *
735 * Use xfs_dialloc() to allocate the on-disk inode. If xfs_dialloc()
736 * has a free inode available, call xfs_iget() to obtain the in-core
737 * version of the allocated inode. Finally, fill in the inode and
738 * log its initial contents. In this case, ialloc_context would be
739 * set to NULL.
740 *
741 * If xfs_dialloc() does not have an available inode, it will replenish
742 * its supply by doing an allocation. Since we can only do one
743 * allocation within a transaction without deadlocks, we must commit
744 * the current transaction before returning the inode itself.
745 * In this case, therefore, we will set ialloc_context and return.
746 * The caller should then commit the current transaction, start a new
747 * transaction, and call xfs_ialloc() again to actually get the inode.
748 *
749 * To ensure that some other process does not grab the inode that
750 * was allocated during the first call to xfs_ialloc(), this routine
751 * also returns the [locked] bp pointing to the head of the freelist
752 * as ialloc_context. The caller should hold this buffer across
753 * the commit and pass it back into this routine on the second call.
754 *
755 * If we are allocating quota inodes, we do not have a parent inode
756 * to attach to or associate with (i.e. pip == NULL) because they
757 * are not linked into the directory structure - they are attached
758 * directly to the superblock - and so have no parent.
759 */
760static int
761xfs_ialloc(
762 xfs_trans_t *tp,
763 xfs_inode_t *pip,
764 umode_t mode,
765 xfs_nlink_t nlink,
766 dev_t rdev,
767 prid_t prid,
768 xfs_buf_t **ialloc_context,
769 xfs_inode_t **ipp)
770{
771 struct xfs_mount *mp = tp->t_mountp;
772 xfs_ino_t ino;
773 xfs_inode_t *ip;
774 uint flags;
775 int error;
776 struct timespec tv;
777 struct inode *inode;
778
779 /*
780 * Call the space management code to pick
781 * the on-disk inode to be allocated.
782 */
783 error = xfs_dialloc(tp, pip ? pip->i_ino : 0, mode,
784 ialloc_context, &ino);
785 if (error)
786 return error;
787 if (*ialloc_context || ino == NULLFSINO) {
788 *ipp = NULL;
789 return 0;
790 }
791 ASSERT(*ialloc_context == NULL);
792
793 /*
794 * Get the in-core inode with the lock held exclusively.
795 * This is because we're setting fields here we need
796 * to prevent others from looking at until we're done.
797 */
798 error = xfs_iget(mp, tp, ino, XFS_IGET_CREATE,
799 XFS_ILOCK_EXCL, &ip);
800 if (error)
801 return error;
802 ASSERT(ip != NULL);
803 inode = VFS_I(ip);
804
805 /*
806 * We always convert v1 inodes to v2 now - we only support filesystems
807 * with >= v2 inode capability, so there is no reason for ever leaving
808 * an inode in v1 format.
809 */
810 if (ip->i_d.di_version == 1)
811 ip->i_d.di_version = 2;
812
813 inode->i_mode = mode;
814 set_nlink(inode, nlink);
815 ip->i_d.di_uid = xfs_kuid_to_uid(current_fsuid());
816 ip->i_d.di_gid = xfs_kgid_to_gid(current_fsgid());
817 inode->i_rdev = rdev;
818 xfs_set_projid(ip, prid);
819
820 if (pip && XFS_INHERIT_GID(pip)) {
821 ip->i_d.di_gid = pip->i_d.di_gid;
822 if ((VFS_I(pip)->i_mode & S_ISGID) && S_ISDIR(mode))
823 inode->i_mode |= S_ISGID;
824 }
825
826 /*
827 * If the group ID of the new file does not match the effective group
828 * ID or one of the supplementary group IDs, the S_ISGID bit is cleared
829 * (and only if the irix_sgid_inherit compatibility variable is set).
830 */
831 if ((irix_sgid_inherit) &&
832 (inode->i_mode & S_ISGID) &&
833 (!in_group_p(xfs_gid_to_kgid(ip->i_d.di_gid))))
834 inode->i_mode &= ~S_ISGID;
835
836 ip->i_d.di_size = 0;
837 ip->i_d.di_nextents = 0;
838 ASSERT(ip->i_d.di_nblocks == 0);
839
840 tv = current_time(inode);
841 inode->i_mtime = tv;
842 inode->i_atime = tv;
843 inode->i_ctime = tv;
844
845 ip->i_d.di_extsize = 0;
846 ip->i_d.di_dmevmask = 0;
847 ip->i_d.di_dmstate = 0;
848 ip->i_d.di_flags = 0;
849
850 if (ip->i_d.di_version == 3) {
851 inode_set_iversion(inode, 1);
852 ip->i_d.di_flags2 = 0;
853 ip->i_d.di_cowextsize = 0;
854 ip->i_d.di_crtime.t_sec = (int32_t)tv.tv_sec;
855 ip->i_d.di_crtime.t_nsec = (int32_t)tv.tv_nsec;
856 }
857
858
859 flags = XFS_ILOG_CORE;
860 switch (mode & S_IFMT) {
861 case S_IFIFO:
862 case S_IFCHR:
863 case S_IFBLK:
864 case S_IFSOCK:
865 ip->i_d.di_format = XFS_DINODE_FMT_DEV;
866 ip->i_df.if_flags = 0;
867 flags |= XFS_ILOG_DEV;
868 break;
869 case S_IFREG:
870 case S_IFDIR:
871 if (pip && (pip->i_d.di_flags & XFS_DIFLAG_ANY)) {
872 uint di_flags = 0;
873
874 if (S_ISDIR(mode)) {
875 if (pip->i_d.di_flags & XFS_DIFLAG_RTINHERIT)
876 di_flags |= XFS_DIFLAG_RTINHERIT;
877 if (pip->i_d.di_flags & XFS_DIFLAG_EXTSZINHERIT) {
878 di_flags |= XFS_DIFLAG_EXTSZINHERIT;
879 ip->i_d.di_extsize = pip->i_d.di_extsize;
880 }
881 if (pip->i_d.di_flags & XFS_DIFLAG_PROJINHERIT)
882 di_flags |= XFS_DIFLAG_PROJINHERIT;
883 } else if (S_ISREG(mode)) {
884 if (pip->i_d.di_flags & XFS_DIFLAG_RTINHERIT)
885 di_flags |= XFS_DIFLAG_REALTIME;
886 if (pip->i_d.di_flags & XFS_DIFLAG_EXTSZINHERIT) {
887 di_flags |= XFS_DIFLAG_EXTSIZE;
888 ip->i_d.di_extsize = pip->i_d.di_extsize;
889 }
890 }
891 if ((pip->i_d.di_flags & XFS_DIFLAG_NOATIME) &&
892 xfs_inherit_noatime)
893 di_flags |= XFS_DIFLAG_NOATIME;
894 if ((pip->i_d.di_flags & XFS_DIFLAG_NODUMP) &&
895 xfs_inherit_nodump)
896 di_flags |= XFS_DIFLAG_NODUMP;
897 if ((pip->i_d.di_flags & XFS_DIFLAG_SYNC) &&
898 xfs_inherit_sync)
899 di_flags |= XFS_DIFLAG_SYNC;
900 if ((pip->i_d.di_flags & XFS_DIFLAG_NOSYMLINKS) &&
901 xfs_inherit_nosymlinks)
902 di_flags |= XFS_DIFLAG_NOSYMLINKS;
903 if ((pip->i_d.di_flags & XFS_DIFLAG_NODEFRAG) &&
904 xfs_inherit_nodefrag)
905 di_flags |= XFS_DIFLAG_NODEFRAG;
906 if (pip->i_d.di_flags & XFS_DIFLAG_FILESTREAM)
907 di_flags |= XFS_DIFLAG_FILESTREAM;
908
909 ip->i_d.di_flags |= di_flags;
910 }
911 if (pip &&
912 (pip->i_d.di_flags2 & XFS_DIFLAG2_ANY) &&
913 pip->i_d.di_version == 3 &&
914 ip->i_d.di_version == 3) {
915 uint64_t di_flags2 = 0;
916
917 if (pip->i_d.di_flags2 & XFS_DIFLAG2_COWEXTSIZE) {
918 di_flags2 |= XFS_DIFLAG2_COWEXTSIZE;
919 ip->i_d.di_cowextsize = pip->i_d.di_cowextsize;
920 }
921 if (pip->i_d.di_flags2 & XFS_DIFLAG2_DAX)
922 di_flags2 |= XFS_DIFLAG2_DAX;
923
924 ip->i_d.di_flags2 |= di_flags2;
925 }
926 /* FALLTHROUGH */
927 case S_IFLNK:
928 ip->i_d.di_format = XFS_DINODE_FMT_EXTENTS;
929 ip->i_df.if_flags = XFS_IFEXTENTS;
930 ip->i_df.if_bytes = ip->i_df.if_real_bytes = 0;
931 ip->i_df.if_u1.if_root = NULL;
932 break;
933 default:
934 ASSERT(0);
935 }
936 /*
937 * Attribute fork settings for new inode.
938 */
939 ip->i_d.di_aformat = XFS_DINODE_FMT_EXTENTS;
940 ip->i_d.di_anextents = 0;
941
942 /*
943 * Log the new values stuffed into the inode.
944 */
945 xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL);
946 xfs_trans_log_inode(tp, ip, flags);
947
948 /* now that we have an i_mode we can setup the inode structure */
949 xfs_setup_inode(ip);
950
951 *ipp = ip;
952 return 0;
953}
954
955/*
956 * Allocates a new inode from disk and return a pointer to the
957 * incore copy. This routine will internally commit the current
958 * transaction and allocate a new one if the Space Manager needed
959 * to do an allocation to replenish the inode free-list.
960 *
961 * This routine is designed to be called from xfs_create and
962 * xfs_create_dir.
963 *
964 */
965int
966xfs_dir_ialloc(
967 xfs_trans_t **tpp, /* input: current transaction;
968 output: may be a new transaction. */
969 xfs_inode_t *dp, /* directory within whose allocate
970 the inode. */
971 umode_t mode,
972 xfs_nlink_t nlink,
973 dev_t rdev,
974 prid_t prid, /* project id */
975 xfs_inode_t **ipp) /* pointer to inode; it will be
976 locked. */
977{
978 xfs_trans_t *tp;
979 xfs_inode_t *ip;
980 xfs_buf_t *ialloc_context = NULL;
981 int code;
982 void *dqinfo;
983 uint tflags;
984
985 tp = *tpp;
986 ASSERT(tp->t_flags & XFS_TRANS_PERM_LOG_RES);
987
988 /*
989 * xfs_ialloc will return a pointer to an incore inode if
990 * the Space Manager has an available inode on the free
991 * list. Otherwise, it will do an allocation and replenish
992 * the freelist. Since we can only do one allocation per
993 * transaction without deadlocks, we will need to commit the
994 * current transaction and start a new one. We will then
995 * need to call xfs_ialloc again to get the inode.
996 *
997 * If xfs_ialloc did an allocation to replenish the freelist,
998 * it returns the bp containing the head of the freelist as
999 * ialloc_context. We will hold a lock on it across the
1000 * transaction commit so that no other process can steal
1001 * the inode(s) that we've just allocated.
1002 */
1003 code = xfs_ialloc(tp, dp, mode, nlink, rdev, prid, &ialloc_context,
1004 &ip);
1005
1006 /*
1007 * Return an error if we were unable to allocate a new inode.
1008 * This should only happen if we run out of space on disk or
1009 * encounter a disk error.
1010 */
1011 if (code) {
1012 *ipp = NULL;
1013 return code;
1014 }
1015 if (!ialloc_context && !ip) {
1016 *ipp = NULL;
1017 return -ENOSPC;
1018 }
1019
1020 /*
1021 * If the AGI buffer is non-NULL, then we were unable to get an
1022 * inode in one operation. We need to commit the current
1023 * transaction and call xfs_ialloc() again. It is guaranteed
1024 * to succeed the second time.
1025 */
1026 if (ialloc_context) {
1027 /*
1028 * Normally, xfs_trans_commit releases all the locks.
1029 * We call bhold to hang on to the ialloc_context across
1030 * the commit. Holding this buffer prevents any other
1031 * processes from doing any allocations in this
1032 * allocation group.
1033 */
1034 xfs_trans_bhold(tp, ialloc_context);
1035
1036 /*
1037 * We want the quota changes to be associated with the next
1038 * transaction, NOT this one. So, detach the dqinfo from this
1039 * and attach it to the next transaction.
1040 */
1041 dqinfo = NULL;
1042 tflags = 0;
1043 if (tp->t_dqinfo) {
1044 dqinfo = (void *)tp->t_dqinfo;
1045 tp->t_dqinfo = NULL;
1046 tflags = tp->t_flags & XFS_TRANS_DQ_DIRTY;
1047 tp->t_flags &= ~(XFS_TRANS_DQ_DIRTY);
1048 }
1049
1050 code = xfs_trans_roll(&tp);
1051
1052 /*
1053 * Re-attach the quota info that we detached from prev trx.
1054 */
1055 if (dqinfo) {
1056 tp->t_dqinfo = dqinfo;
1057 tp->t_flags |= tflags;
1058 }
1059
1060 if (code) {
1061 xfs_buf_relse(ialloc_context);
1062 *tpp = tp;
1063 *ipp = NULL;
1064 return code;
1065 }
1066 xfs_trans_bjoin(tp, ialloc_context);
1067
1068 /*
1069 * Call ialloc again. Since we've locked out all
1070 * other allocations in this allocation group,
1071 * this call should always succeed.
1072 */
1073 code = xfs_ialloc(tp, dp, mode, nlink, rdev, prid,
1074 &ialloc_context, &ip);
1075
1076 /*
1077 * If we get an error at this point, return to the caller
1078 * so that the current transaction can be aborted.
1079 */
1080 if (code) {
1081 *tpp = tp;
1082 *ipp = NULL;
1083 return code;
1084 }
1085 ASSERT(!ialloc_context && ip);
1086
1087 }
1088
1089 *ipp = ip;
1090 *tpp = tp;
1091
1092 return 0;
1093}
1094
1095/*
1096 * Decrement the link count on an inode & log the change. If this causes the
1097 * link count to go to zero, move the inode to AGI unlinked list so that it can
1098 * be freed when the last active reference goes away via xfs_inactive().
1099 */
1100static int /* error */
1101xfs_droplink(
1102 xfs_trans_t *tp,
1103 xfs_inode_t *ip)
1104{
1105 xfs_trans_ichgtime(tp, ip, XFS_ICHGTIME_CHG);
1106
1107 drop_nlink(VFS_I(ip));
1108 xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
1109
1110 if (VFS_I(ip)->i_nlink)
1111 return 0;
1112
1113 return xfs_iunlink(tp, ip);
1114}
1115
1116/*
1117 * Increment the link count on an inode & log the change.
1118 */
1119static int
1120xfs_bumplink(
1121 xfs_trans_t *tp,
1122 xfs_inode_t *ip)
1123{
1124 xfs_trans_ichgtime(tp, ip, XFS_ICHGTIME_CHG);
1125
1126 ASSERT(ip->i_d.di_version > 1);
1127 inc_nlink(VFS_I(ip));
1128 xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
1129 return 0;
1130}
1131
1132int
1133xfs_create(
1134 xfs_inode_t *dp,
1135 struct xfs_name *name,
1136 umode_t mode,
1137 dev_t rdev,
1138 xfs_inode_t **ipp)
1139{
1140 int is_dir = S_ISDIR(mode);
1141 struct xfs_mount *mp = dp->i_mount;
1142 struct xfs_inode *ip = NULL;
1143 struct xfs_trans *tp = NULL;
1144 int error;
1145 struct xfs_defer_ops dfops;
1146 xfs_fsblock_t first_block;
1147 bool unlock_dp_on_error = false;
1148 prid_t prid;
1149 struct xfs_dquot *udqp = NULL;
1150 struct xfs_dquot *gdqp = NULL;
1151 struct xfs_dquot *pdqp = NULL;
1152 struct xfs_trans_res *tres;
1153 uint resblks;
1154
1155 trace_xfs_create(dp, name);
1156
1157 if (XFS_FORCED_SHUTDOWN(mp))
1158 return -EIO;
1159
1160 prid = xfs_get_initial_prid(dp);
1161
1162 /*
1163 * Make sure that we have allocated dquot(s) on disk.
1164 */
1165 error = xfs_qm_vop_dqalloc(dp, xfs_kuid_to_uid(current_fsuid()),
1166 xfs_kgid_to_gid(current_fsgid()), prid,
1167 XFS_QMOPT_QUOTALL | XFS_QMOPT_INHERIT,
1168 &udqp, &gdqp, &pdqp);
1169 if (error)
1170 return error;
1171
1172 if (is_dir) {
1173 resblks = XFS_MKDIR_SPACE_RES(mp, name->len);
1174 tres = &M_RES(mp)->tr_mkdir;
1175 } else {
1176 resblks = XFS_CREATE_SPACE_RES(mp, name->len);
1177 tres = &M_RES(mp)->tr_create;
1178 }
1179
1180 /*
1181 * Initially assume that the file does not exist and
1182 * reserve the resources for that case. If that is not
1183 * the case we'll drop the one we have and get a more
1184 * appropriate transaction later.
1185 */
1186 error = xfs_trans_alloc(mp, tres, resblks, 0, 0, &tp);
1187 if (error == -ENOSPC) {
1188 /* flush outstanding delalloc blocks and retry */
1189 xfs_flush_inodes(mp);
1190 error = xfs_trans_alloc(mp, tres, resblks, 0, 0, &tp);
1191 }
1192 if (error)
1193 goto out_release_inode;
1194
1195 xfs_ilock(dp, XFS_ILOCK_EXCL | XFS_ILOCK_PARENT);
1196 unlock_dp_on_error = true;
1197
1198 xfs_defer_init(&dfops, &first_block);
1199
1200 /*
1201 * Reserve disk quota and the inode.
1202 */
1203 error = xfs_trans_reserve_quota(tp, mp, udqp, gdqp,
1204 pdqp, resblks, 1, 0);
1205 if (error)
1206 goto out_trans_cancel;
1207
1208 /*
1209 * A newly created regular or special file just has one directory
1210 * entry pointing to them, but a directory also the "." entry
1211 * pointing to itself.
1212 */
1213 error = xfs_dir_ialloc(&tp, dp, mode, is_dir ? 2 : 1, rdev, prid, &ip);
1214 if (error)
1215 goto out_trans_cancel;
1216
1217 /*
1218 * Now we join the directory inode to the transaction. We do not do it
1219 * earlier because xfs_dir_ialloc might commit the previous transaction
1220 * (and release all the locks). An error from here on will result in
1221 * the transaction cancel unlocking dp so don't do it explicitly in the
1222 * error path.
1223 */
1224 xfs_trans_ijoin(tp, dp, XFS_ILOCK_EXCL);
1225 unlock_dp_on_error = false;
1226
1227 error = xfs_dir_createname(tp, dp, name, ip->i_ino,
1228 &first_block, &dfops, resblks ?
1229 resblks - XFS_IALLOC_SPACE_RES(mp) : 0);
1230 if (error) {
1231 ASSERT(error != -ENOSPC);
1232 goto out_trans_cancel;
1233 }
1234 xfs_trans_ichgtime(tp, dp, XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG);
1235 xfs_trans_log_inode(tp, dp, XFS_ILOG_CORE);
1236
1237 if (is_dir) {
1238 error = xfs_dir_init(tp, ip, dp);
1239 if (error)
1240 goto out_bmap_cancel;
1241
1242 error = xfs_bumplink(tp, dp);
1243 if (error)
1244 goto out_bmap_cancel;
1245 }
1246
1247 /*
1248 * If this is a synchronous mount, make sure that the
1249 * create transaction goes to disk before returning to
1250 * the user.
1251 */
1252 if (mp->m_flags & (XFS_MOUNT_WSYNC|XFS_MOUNT_DIRSYNC))
1253 xfs_trans_set_sync(tp);
1254
1255 /*
1256 * Attach the dquot(s) to the inodes and modify them incore.
1257 * These ids of the inode couldn't have changed since the new
1258 * inode has been locked ever since it was created.
1259 */
1260 xfs_qm_vop_create_dqattach(tp, ip, udqp, gdqp, pdqp);
1261
1262 error = xfs_defer_finish(&tp, &dfops);
1263 if (error)
1264 goto out_bmap_cancel;
1265
1266 error = xfs_trans_commit(tp);
1267 if (error)
1268 goto out_release_inode;
1269
1270 xfs_qm_dqrele(udqp);
1271 xfs_qm_dqrele(gdqp);
1272 xfs_qm_dqrele(pdqp);
1273
1274 *ipp = ip;
1275 return 0;
1276
1277 out_bmap_cancel:
1278 xfs_defer_cancel(&dfops);
1279 out_trans_cancel:
1280 xfs_trans_cancel(tp);
1281 out_release_inode:
1282 /*
1283 * Wait until after the current transaction is aborted to finish the
1284 * setup of the inode and release the inode. This prevents recursive
1285 * transactions and deadlocks from xfs_inactive.
1286 */
1287 if (ip) {
1288 xfs_finish_inode_setup(ip);
1289 IRELE(ip);
1290 }
1291
1292 xfs_qm_dqrele(udqp);
1293 xfs_qm_dqrele(gdqp);
1294 xfs_qm_dqrele(pdqp);
1295
1296 if (unlock_dp_on_error)
1297 xfs_iunlock(dp, XFS_ILOCK_EXCL);
1298 return error;
1299}
1300
1301int
1302xfs_create_tmpfile(
1303 struct xfs_inode *dp,
1304 umode_t mode,
1305 struct xfs_inode **ipp)
1306{
1307 struct xfs_mount *mp = dp->i_mount;
1308 struct xfs_inode *ip = NULL;
1309 struct xfs_trans *tp = NULL;
1310 int error;
1311 prid_t prid;
1312 struct xfs_dquot *udqp = NULL;
1313 struct xfs_dquot *gdqp = NULL;
1314 struct xfs_dquot *pdqp = NULL;
1315 struct xfs_trans_res *tres;
1316 uint resblks;
1317
1318 if (XFS_FORCED_SHUTDOWN(mp))
1319 return -EIO;
1320
1321 prid = xfs_get_initial_prid(dp);
1322
1323 /*
1324 * Make sure that we have allocated dquot(s) on disk.
1325 */
1326 error = xfs_qm_vop_dqalloc(dp, xfs_kuid_to_uid(current_fsuid()),
1327 xfs_kgid_to_gid(current_fsgid()), prid,
1328 XFS_QMOPT_QUOTALL | XFS_QMOPT_INHERIT,
1329 &udqp, &gdqp, &pdqp);
1330 if (error)
1331 return error;
1332
1333 resblks = XFS_IALLOC_SPACE_RES(mp);
1334 tres = &M_RES(mp)->tr_create_tmpfile;
1335
1336 error = xfs_trans_alloc(mp, tres, resblks, 0, 0, &tp);
1337 if (error)
1338 goto out_release_inode;
1339
1340 error = xfs_trans_reserve_quota(tp, mp, udqp, gdqp,
1341 pdqp, resblks, 1, 0);
1342 if (error)
1343 goto out_trans_cancel;
1344
1345 error = xfs_dir_ialloc(&tp, dp, mode, 1, 0, prid, &ip);
1346 if (error)
1347 goto out_trans_cancel;
1348
1349 if (mp->m_flags & XFS_MOUNT_WSYNC)
1350 xfs_trans_set_sync(tp);
1351
1352 /*
1353 * Attach the dquot(s) to the inodes and modify them incore.
1354 * These ids of the inode couldn't have changed since the new
1355 * inode has been locked ever since it was created.
1356 */
1357 xfs_qm_vop_create_dqattach(tp, ip, udqp, gdqp, pdqp);
1358
1359 error = xfs_iunlink(tp, ip);
1360 if (error)
1361 goto out_trans_cancel;
1362
1363 error = xfs_trans_commit(tp);
1364 if (error)
1365 goto out_release_inode;
1366
1367 xfs_qm_dqrele(udqp);
1368 xfs_qm_dqrele(gdqp);
1369 xfs_qm_dqrele(pdqp);
1370
1371 *ipp = ip;
1372 return 0;
1373
1374 out_trans_cancel:
1375 xfs_trans_cancel(tp);
1376 out_release_inode:
1377 /*
1378 * Wait until after the current transaction is aborted to finish the
1379 * setup of the inode and release the inode. This prevents recursive
1380 * transactions and deadlocks from xfs_inactive.
1381 */
1382 if (ip) {
1383 xfs_finish_inode_setup(ip);
1384 IRELE(ip);
1385 }
1386
1387 xfs_qm_dqrele(udqp);
1388 xfs_qm_dqrele(gdqp);
1389 xfs_qm_dqrele(pdqp);
1390
1391 return error;
1392}
1393
1394int
1395xfs_link(
1396 xfs_inode_t *tdp,
1397 xfs_inode_t *sip,
1398 struct xfs_name *target_name)
1399{
1400 xfs_mount_t *mp = tdp->i_mount;
1401 xfs_trans_t *tp;
1402 int error;
1403 struct xfs_defer_ops dfops;
1404 xfs_fsblock_t first_block;
1405 int resblks;
1406
1407 trace_xfs_link(tdp, target_name);
1408
1409 ASSERT(!S_ISDIR(VFS_I(sip)->i_mode));
1410
1411 if (XFS_FORCED_SHUTDOWN(mp))
1412 return -EIO;
1413
1414 error = xfs_qm_dqattach(sip, 0);
1415 if (error)
1416 goto std_return;
1417
1418 error = xfs_qm_dqattach(tdp, 0);
1419 if (error)
1420 goto std_return;
1421
1422 resblks = XFS_LINK_SPACE_RES(mp, target_name->len);
1423 error = xfs_trans_alloc(mp, &M_RES(mp)->tr_link, resblks, 0, 0, &tp);
1424 if (error == -ENOSPC) {
1425 resblks = 0;
1426 error = xfs_trans_alloc(mp, &M_RES(mp)->tr_link, 0, 0, 0, &tp);
1427 }
1428 if (error)
1429 goto std_return;
1430
1431 xfs_lock_two_inodes(sip, XFS_ILOCK_EXCL, tdp, XFS_ILOCK_EXCL);
1432
1433 xfs_trans_ijoin(tp, sip, XFS_ILOCK_EXCL);
1434 xfs_trans_ijoin(tp, tdp, XFS_ILOCK_EXCL);
1435
1436 /*
1437 * If we are using project inheritance, we only allow hard link
1438 * creation in our tree when the project IDs are the same; else
1439 * the tree quota mechanism could be circumvented.
1440 */
1441 if (unlikely((tdp->i_d.di_flags & XFS_DIFLAG_PROJINHERIT) &&
1442 (xfs_get_projid(tdp) != xfs_get_projid(sip)))) {
1443 error = -EXDEV;
1444 goto error_return;
1445 }
1446
1447 if (!resblks) {
1448 error = xfs_dir_canenter(tp, tdp, target_name);
1449 if (error)
1450 goto error_return;
1451 }
1452
1453 xfs_defer_init(&dfops, &first_block);
1454
1455 /*
1456 * Handle initial link state of O_TMPFILE inode
1457 */
1458 if (VFS_I(sip)->i_nlink == 0) {
1459 error = xfs_iunlink_remove(tp, sip);
1460 if (error)
1461 goto error_return;
1462 }
1463
1464 error = xfs_dir_createname(tp, tdp, target_name, sip->i_ino,
1465 &first_block, &dfops, resblks);
1466 if (error)
1467 goto error_return;
1468 xfs_trans_ichgtime(tp, tdp, XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG);
1469 xfs_trans_log_inode(tp, tdp, XFS_ILOG_CORE);
1470
1471 error = xfs_bumplink(tp, sip);
1472 if (error)
1473 goto error_return;
1474
1475 /*
1476 * If this is a synchronous mount, make sure that the
1477 * link transaction goes to disk before returning to
1478 * the user.
1479 */
1480 if (mp->m_flags & (XFS_MOUNT_WSYNC|XFS_MOUNT_DIRSYNC))
1481 xfs_trans_set_sync(tp);
1482
1483 error = xfs_defer_finish(&tp, &dfops);
1484 if (error) {
1485 xfs_defer_cancel(&dfops);
1486 goto error_return;
1487 }
1488
1489 return xfs_trans_commit(tp);
1490
1491 error_return:
1492 xfs_trans_cancel(tp);
1493 std_return:
1494 return error;
1495}
1496
1497/* Clear the reflink flag and the cowblocks tag if possible. */
1498static void
1499xfs_itruncate_clear_reflink_flags(
1500 struct xfs_inode *ip)
1501{
1502 struct xfs_ifork *dfork;
1503 struct xfs_ifork *cfork;
1504
1505 if (!xfs_is_reflink_inode(ip))
1506 return;
1507 dfork = XFS_IFORK_PTR(ip, XFS_DATA_FORK);
1508 cfork = XFS_IFORK_PTR(ip, XFS_COW_FORK);
1509 if (dfork->if_bytes == 0 && cfork->if_bytes == 0)
1510 ip->i_d.di_flags2 &= ~XFS_DIFLAG2_REFLINK;
1511 if (cfork->if_bytes == 0)
1512 xfs_inode_clear_cowblocks_tag(ip);
1513}
1514
1515/*
1516 * Free up the underlying blocks past new_size. The new size must be smaller
1517 * than the current size. This routine can be used both for the attribute and
1518 * data fork, and does not modify the inode size, which is left to the caller.
1519 *
1520 * The transaction passed to this routine must have made a permanent log
1521 * reservation of at least XFS_ITRUNCATE_LOG_RES. This routine may commit the
1522 * given transaction and start new ones, so make sure everything involved in
1523 * the transaction is tidy before calling here. Some transaction will be
1524 * returned to the caller to be committed. The incoming transaction must
1525 * already include the inode, and both inode locks must be held exclusively.
1526 * The inode must also be "held" within the transaction. On return the inode
1527 * will be "held" within the returned transaction. This routine does NOT
1528 * require any disk space to be reserved for it within the transaction.
1529 *
1530 * If we get an error, we must return with the inode locked and linked into the
1531 * current transaction. This keeps things simple for the higher level code,
1532 * because it always knows that the inode is locked and held in the transaction
1533 * that returns to it whether errors occur or not. We don't mark the inode
1534 * dirty on error so that transactions can be easily aborted if possible.
1535 */
1536int
1537xfs_itruncate_extents(
1538 struct xfs_trans **tpp,
1539 struct xfs_inode *ip,
1540 int whichfork,
1541 xfs_fsize_t new_size)
1542{
1543 struct xfs_mount *mp = ip->i_mount;
1544 struct xfs_trans *tp = *tpp;
1545 struct xfs_defer_ops dfops;
1546 xfs_fsblock_t first_block;
1547 xfs_fileoff_t first_unmap_block;
1548 xfs_fileoff_t last_block;
1549 xfs_filblks_t unmap_len;
1550 int error = 0;
1551 int done = 0;
1552
1553 ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
1554 ASSERT(!atomic_read(&VFS_I(ip)->i_count) ||
1555 xfs_isilocked(ip, XFS_IOLOCK_EXCL));
1556 ASSERT(new_size <= XFS_ISIZE(ip));
1557 ASSERT(tp->t_flags & XFS_TRANS_PERM_LOG_RES);
1558 ASSERT(ip->i_itemp != NULL);
1559 ASSERT(ip->i_itemp->ili_lock_flags == 0);
1560 ASSERT(!XFS_NOT_DQATTACHED(mp, ip));
1561
1562 trace_xfs_itruncate_extents_start(ip, new_size);
1563
1564 /*
1565 * Since it is possible for space to become allocated beyond
1566 * the end of the file (in a crash where the space is allocated
1567 * but the inode size is not yet updated), simply remove any
1568 * blocks which show up between the new EOF and the maximum
1569 * possible file size. If the first block to be removed is
1570 * beyond the maximum file size (ie it is the same as last_block),
1571 * then there is nothing to do.
1572 */
1573 first_unmap_block = XFS_B_TO_FSB(mp, (xfs_ufsize_t)new_size);
1574 last_block = XFS_B_TO_FSB(mp, mp->m_super->s_maxbytes);
1575 if (first_unmap_block == last_block)
1576 return 0;
1577
1578 ASSERT(first_unmap_block < last_block);
1579 unmap_len = last_block - first_unmap_block + 1;
1580 while (!done) {
1581 xfs_defer_init(&dfops, &first_block);
1582 error = xfs_bunmapi(tp, ip,
1583 first_unmap_block, unmap_len,
1584 xfs_bmapi_aflag(whichfork),
1585 XFS_ITRUNC_MAX_EXTENTS,
1586 &first_block, &dfops,
1587 &done);
1588 if (error)
1589 goto out_bmap_cancel;
1590
1591 /*
1592 * Duplicate the transaction that has the permanent
1593 * reservation and commit the old transaction.
1594 */
1595 xfs_defer_ijoin(&dfops, ip);
1596 error = xfs_defer_finish(&tp, &dfops);
1597 if (error)
1598 goto out_bmap_cancel;
1599
1600 error = xfs_trans_roll_inode(&tp, ip);
1601 if (error)
1602 goto out;
1603 }
1604
1605 if (whichfork == XFS_DATA_FORK) {
1606 /* Remove all pending CoW reservations. */
1607 error = xfs_reflink_cancel_cow_blocks(ip, &tp,
1608 first_unmap_block, last_block, true);
1609 if (error)
1610 goto out;
1611
1612 xfs_itruncate_clear_reflink_flags(ip);
1613 }
1614
1615 /*
1616 * Always re-log the inode so that our permanent transaction can keep
1617 * on rolling it forward in the log.
1618 */
1619 xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
1620
1621 trace_xfs_itruncate_extents_end(ip, new_size);
1622
1623out:
1624 *tpp = tp;
1625 return error;
1626out_bmap_cancel:
1627 /*
1628 * If the bunmapi call encounters an error, return to the caller where
1629 * the transaction can be properly aborted. We just need to make sure
1630 * we're not holding any resources that we were not when we came in.
1631 */
1632 xfs_defer_cancel(&dfops);
1633 goto out;
1634}
1635
1636int
1637xfs_release(
1638 xfs_inode_t *ip)
1639{
1640 xfs_mount_t *mp = ip->i_mount;
1641 int error;
1642
1643 if (!S_ISREG(VFS_I(ip)->i_mode) || (VFS_I(ip)->i_mode == 0))
1644 return 0;
1645
1646 /* If this is a read-only mount, don't do this (would generate I/O) */
1647 if (mp->m_flags & XFS_MOUNT_RDONLY)
1648 return 0;
1649
1650 if (!XFS_FORCED_SHUTDOWN(mp)) {
1651 int truncated;
1652
1653 /*
1654 * If we previously truncated this file and removed old data
1655 * in the process, we want to initiate "early" writeout on
1656 * the last close. This is an attempt to combat the notorious
1657 * NULL files problem which is particularly noticeable from a
1658 * truncate down, buffered (re-)write (delalloc), followed by
1659 * a crash. What we are effectively doing here is
1660 * significantly reducing the time window where we'd otherwise
1661 * be exposed to that problem.
1662 */
1663 truncated = xfs_iflags_test_and_clear(ip, XFS_ITRUNCATED);
1664 if (truncated) {
1665 xfs_iflags_clear(ip, XFS_IDIRTY_RELEASE);
1666 if (ip->i_delayed_blks > 0) {
1667 error = filemap_flush(VFS_I(ip)->i_mapping);
1668 if (error)
1669 return error;
1670 }
1671 }
1672 }
1673
1674 if (VFS_I(ip)->i_nlink == 0)
1675 return 0;
1676
1677 if (xfs_can_free_eofblocks(ip, false)) {
1678
1679 /*
1680 * Check if the inode is being opened, written and closed
1681 * frequently and we have delayed allocation blocks outstanding
1682 * (e.g. streaming writes from the NFS server), truncating the
1683 * blocks past EOF will cause fragmentation to occur.
1684 *
1685 * In this case don't do the truncation, but we have to be
1686 * careful how we detect this case. Blocks beyond EOF show up as
1687 * i_delayed_blks even when the inode is clean, so we need to
1688 * truncate them away first before checking for a dirty release.
1689 * Hence on the first dirty close we will still remove the
1690 * speculative allocation, but after that we will leave it in
1691 * place.
1692 */
1693 if (xfs_iflags_test(ip, XFS_IDIRTY_RELEASE))
1694 return 0;
1695 /*
1696 * If we can't get the iolock just skip truncating the blocks
1697 * past EOF because we could deadlock with the mmap_sem
1698 * otherwise. We'll get another chance to drop them once the
1699 * last reference to the inode is dropped, so we'll never leak
1700 * blocks permanently.
1701 */
1702 if (xfs_ilock_nowait(ip, XFS_IOLOCK_EXCL)) {
1703 error = xfs_free_eofblocks(ip);
1704 xfs_iunlock(ip, XFS_IOLOCK_EXCL);
1705 if (error)
1706 return error;
1707 }
1708
1709 /* delalloc blocks after truncation means it really is dirty */
1710 if (ip->i_delayed_blks)
1711 xfs_iflags_set(ip, XFS_IDIRTY_RELEASE);
1712 }
1713 return 0;
1714}
1715
1716/*
1717 * xfs_inactive_truncate
1718 *
1719 * Called to perform a truncate when an inode becomes unlinked.
1720 */
1721STATIC int
1722xfs_inactive_truncate(
1723 struct xfs_inode *ip)
1724{
1725 struct xfs_mount *mp = ip->i_mount;
1726 struct xfs_trans *tp;
1727 int error;
1728
1729 error = xfs_trans_alloc(mp, &M_RES(mp)->tr_itruncate, 0, 0, 0, &tp);
1730 if (error) {
1731 ASSERT(XFS_FORCED_SHUTDOWN(mp));
1732 return error;
1733 }
1734
1735 xfs_ilock(ip, XFS_ILOCK_EXCL);
1736 xfs_trans_ijoin(tp, ip, 0);
1737
1738 /*
1739 * Log the inode size first to prevent stale data exposure in the event
1740 * of a system crash before the truncate completes. See the related
1741 * comment in xfs_vn_setattr_size() for details.
1742 */
1743 ip->i_d.di_size = 0;
1744 xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
1745
1746 error = xfs_itruncate_extents(&tp, ip, XFS_DATA_FORK, 0);
1747 if (error)
1748 goto error_trans_cancel;
1749
1750 ASSERT(ip->i_d.di_nextents == 0);
1751
1752 error = xfs_trans_commit(tp);
1753 if (error)
1754 goto error_unlock;
1755
1756 xfs_iunlock(ip, XFS_ILOCK_EXCL);
1757 return 0;
1758
1759error_trans_cancel:
1760 xfs_trans_cancel(tp);
1761error_unlock:
1762 xfs_iunlock(ip, XFS_ILOCK_EXCL);
1763 return error;
1764}
1765
1766/*
1767 * xfs_inactive_ifree()
1768 *
1769 * Perform the inode free when an inode is unlinked.
1770 */
1771STATIC int
1772xfs_inactive_ifree(
1773 struct xfs_inode *ip)
1774{
1775 struct xfs_defer_ops dfops;
1776 xfs_fsblock_t first_block;
1777 struct xfs_mount *mp = ip->i_mount;
1778 struct xfs_trans *tp;
1779 int error;
1780
1781 /*
1782 * We try to use a per-AG reservation for any block needed by the finobt
1783 * tree, but as the finobt feature predates the per-AG reservation
1784 * support a degraded file system might not have enough space for the
1785 * reservation at mount time. In that case try to dip into the reserved
1786 * pool and pray.
1787 *
1788 * Send a warning if the reservation does happen to fail, as the inode
1789 * now remains allocated and sits on the unlinked list until the fs is
1790 * repaired.
1791 */
1792 if (unlikely(mp->m_inotbt_nores)) {
1793 error = xfs_trans_alloc(mp, &M_RES(mp)->tr_ifree,
1794 XFS_IFREE_SPACE_RES(mp), 0, XFS_TRANS_RESERVE,
1795 &tp);
1796 } else {
1797 error = xfs_trans_alloc(mp, &M_RES(mp)->tr_ifree, 0, 0, 0, &tp);
1798 }
1799 if (error) {
1800 if (error == -ENOSPC) {
1801 xfs_warn_ratelimited(mp,
1802 "Failed to remove inode(s) from unlinked list. "
1803 "Please free space, unmount and run xfs_repair.");
1804 } else {
1805 ASSERT(XFS_FORCED_SHUTDOWN(mp));
1806 }
1807 return error;
1808 }
1809
1810 xfs_ilock(ip, XFS_ILOCK_EXCL);
1811 xfs_trans_ijoin(tp, ip, 0);
1812
1813 xfs_defer_init(&dfops, &first_block);
1814 error = xfs_ifree(tp, ip, &dfops);
1815 if (error) {
1816 /*
1817 * If we fail to free the inode, shut down. The cancel
1818 * might do that, we need to make sure. Otherwise the
1819 * inode might be lost for a long time or forever.
1820 */
1821 if (!XFS_FORCED_SHUTDOWN(mp)) {
1822 xfs_notice(mp, "%s: xfs_ifree returned error %d",
1823 __func__, error);
1824 xfs_force_shutdown(mp, SHUTDOWN_META_IO_ERROR);
1825 }
1826 xfs_trans_cancel(tp);
1827 xfs_iunlock(ip, XFS_ILOCK_EXCL);
1828 return error;
1829 }
1830
1831 /*
1832 * Credit the quota account(s). The inode is gone.
1833 */
1834 xfs_trans_mod_dquot_byino(tp, ip, XFS_TRANS_DQ_ICOUNT, -1);
1835
1836 /*
1837 * Just ignore errors at this point. There is nothing we can do except
1838 * to try to keep going. Make sure it's not a silent error.
1839 */
1840 error = xfs_defer_finish(&tp, &dfops);
1841 if (error) {
1842 xfs_notice(mp, "%s: xfs_defer_finish returned error %d",
1843 __func__, error);
1844 xfs_defer_cancel(&dfops);
1845 }
1846 error = xfs_trans_commit(tp);
1847 if (error)
1848 xfs_notice(mp, "%s: xfs_trans_commit returned error %d",
1849 __func__, error);
1850
1851 xfs_iunlock(ip, XFS_ILOCK_EXCL);
1852 return 0;
1853}
1854
1855/*
1856 * xfs_inactive
1857 *
1858 * This is called when the vnode reference count for the vnode
1859 * goes to zero. If the file has been unlinked, then it must
1860 * now be truncated. Also, we clear all of the read-ahead state
1861 * kept for the inode here since the file is now closed.
1862 */
1863void
1864xfs_inactive(
1865 xfs_inode_t *ip)
1866{
1867 struct xfs_mount *mp;
1868 struct xfs_ifork *cow_ifp = XFS_IFORK_PTR(ip, XFS_COW_FORK);
1869 int error;
1870 int truncate = 0;
1871
1872 /*
1873 * If the inode is already free, then there can be nothing
1874 * to clean up here.
1875 */
1876 if (VFS_I(ip)->i_mode == 0) {
1877 ASSERT(ip->i_df.if_real_bytes == 0);
1878 ASSERT(ip->i_df.if_broot_bytes == 0);
1879 return;
1880 }
1881
1882 mp = ip->i_mount;
1883 ASSERT(!xfs_iflags_test(ip, XFS_IRECOVERY));
1884
1885 /* If this is a read-only mount, don't do this (would generate I/O) */
1886 if (mp->m_flags & XFS_MOUNT_RDONLY)
1887 return;
1888
1889 /* Try to clean out the cow blocks if there are any. */
1890 if (xfs_is_reflink_inode(ip) && cow_ifp->if_bytes > 0)
1891 xfs_reflink_cancel_cow_range(ip, 0, NULLFILEOFF, true);
1892
1893 if (VFS_I(ip)->i_nlink != 0) {
1894 /*
1895 * force is true because we are evicting an inode from the
1896 * cache. Post-eof blocks must be freed, lest we end up with
1897 * broken free space accounting.
1898 *
1899 * Note: don't bother with iolock here since lockdep complains
1900 * about acquiring it in reclaim context. We have the only
1901 * reference to the inode at this point anyways.
1902 */
1903 if (xfs_can_free_eofblocks(ip, true))
1904 xfs_free_eofblocks(ip);
1905
1906 return;
1907 }
1908
1909 if (S_ISREG(VFS_I(ip)->i_mode) &&
1910 (ip->i_d.di_size != 0 || XFS_ISIZE(ip) != 0 ||
1911 ip->i_d.di_nextents > 0 || ip->i_delayed_blks > 0))
1912 truncate = 1;
1913
1914 error = xfs_qm_dqattach(ip, 0);
1915 if (error)
1916 return;
1917
1918 if (S_ISLNK(VFS_I(ip)->i_mode))
1919 error = xfs_inactive_symlink(ip);
1920 else if (truncate)
1921 error = xfs_inactive_truncate(ip);
1922 if (error)
1923 return;
1924
1925 /*
1926 * If there are attributes associated with the file then blow them away
1927 * now. The code calls a routine that recursively deconstructs the
1928 * attribute fork. If also blows away the in-core attribute fork.
1929 */
1930 if (XFS_IFORK_Q(ip)) {
1931 error = xfs_attr_inactive(ip);
1932 if (error)
1933 return;
1934 }
1935
1936 ASSERT(!ip->i_afp);
1937 ASSERT(ip->i_d.di_anextents == 0);
1938 ASSERT(ip->i_d.di_forkoff == 0);
1939
1940 /*
1941 * Free the inode.
1942 */
1943 error = xfs_inactive_ifree(ip);
1944 if (error)
1945 return;
1946
1947 /*
1948 * Release the dquots held by inode, if any.
1949 */
1950 xfs_qm_dqdetach(ip);
1951}
1952
1953/*
1954 * This is called when the inode's link count goes to 0 or we are creating a
1955 * tmpfile via O_TMPFILE. In the case of a tmpfile, @ignore_linkcount will be
1956 * set to true as the link count is dropped to zero by the VFS after we've
1957 * created the file successfully, so we have to add it to the unlinked list
1958 * while the link count is non-zero.
1959 *
1960 * We place the on-disk inode on a list in the AGI. It will be pulled from this
1961 * list when the inode is freed.
1962 */
1963STATIC int
1964xfs_iunlink(
1965 struct xfs_trans *tp,
1966 struct xfs_inode *ip)
1967{
1968 xfs_mount_t *mp = tp->t_mountp;
1969 xfs_agi_t *agi;
1970 xfs_dinode_t *dip;
1971 xfs_buf_t *agibp;
1972 xfs_buf_t *ibp;
1973 xfs_agino_t agino;
1974 short bucket_index;
1975 int offset;
1976 int error;
1977
1978 ASSERT(VFS_I(ip)->i_mode != 0);
1979
1980 /*
1981 * Get the agi buffer first. It ensures lock ordering
1982 * on the list.
1983 */
1984 error = xfs_read_agi(mp, tp, XFS_INO_TO_AGNO(mp, ip->i_ino), &agibp);
1985 if (error)
1986 return error;
1987 agi = XFS_BUF_TO_AGI(agibp);
1988
1989 /*
1990 * Get the index into the agi hash table for the
1991 * list this inode will go on.
1992 */
1993 agino = XFS_INO_TO_AGINO(mp, ip->i_ino);
1994 ASSERT(agino != 0);
1995 bucket_index = agino % XFS_AGI_UNLINKED_BUCKETS;
1996 ASSERT(agi->agi_unlinked[bucket_index]);
1997 ASSERT(be32_to_cpu(agi->agi_unlinked[bucket_index]) != agino);
1998
1999 if (agi->agi_unlinked[bucket_index] != cpu_to_be32(NULLAGINO)) {
2000 /*
2001 * There is already another inode in the bucket we need
2002 * to add ourselves to. Add us at the front of the list.
2003 * Here we put the head pointer into our next pointer,
2004 * and then we fall through to point the head at us.
2005 */
2006 error = xfs_imap_to_bp(mp, tp, &ip->i_imap, &dip, &ibp,
2007 0, 0);
2008 if (error)
2009 return error;
2010
2011 ASSERT(dip->di_next_unlinked == cpu_to_be32(NULLAGINO));
2012 dip->di_next_unlinked = agi->agi_unlinked[bucket_index];
2013 offset = ip->i_imap.im_boffset +
2014 offsetof(xfs_dinode_t, di_next_unlinked);
2015
2016 /* need to recalc the inode CRC if appropriate */
2017 xfs_dinode_calc_crc(mp, dip);
2018
2019 xfs_trans_inode_buf(tp, ibp);
2020 xfs_trans_log_buf(tp, ibp, offset,
2021 (offset + sizeof(xfs_agino_t) - 1));
2022 xfs_inobp_check(mp, ibp);
2023 }
2024
2025 /*
2026 * Point the bucket head pointer at the inode being inserted.
2027 */
2028 ASSERT(agino != 0);
2029 agi->agi_unlinked[bucket_index] = cpu_to_be32(agino);
2030 offset = offsetof(xfs_agi_t, agi_unlinked) +
2031 (sizeof(xfs_agino_t) * bucket_index);
2032 xfs_trans_log_buf(tp, agibp, offset,
2033 (offset + sizeof(xfs_agino_t) - 1));
2034 return 0;
2035}
2036
2037/*
2038 * Pull the on-disk inode from the AGI unlinked list.
2039 */
2040STATIC int
2041xfs_iunlink_remove(
2042 xfs_trans_t *tp,
2043 xfs_inode_t *ip)
2044{
2045 xfs_ino_t next_ino;
2046 xfs_mount_t *mp;
2047 xfs_agi_t *agi;
2048 xfs_dinode_t *dip;
2049 xfs_buf_t *agibp;
2050 xfs_buf_t *ibp;
2051 xfs_agnumber_t agno;
2052 xfs_agino_t agino;
2053 xfs_agino_t next_agino;
2054 xfs_buf_t *last_ibp;
2055 xfs_dinode_t *last_dip = NULL;
2056 short bucket_index;
2057 int offset, last_offset = 0;
2058 int error;
2059
2060 mp = tp->t_mountp;
2061 agno = XFS_INO_TO_AGNO(mp, ip->i_ino);
2062
2063 /*
2064 * Get the agi buffer first. It ensures lock ordering
2065 * on the list.
2066 */
2067 error = xfs_read_agi(mp, tp, agno, &agibp);
2068 if (error)
2069 return error;
2070
2071 agi = XFS_BUF_TO_AGI(agibp);
2072
2073 /*
2074 * Get the index into the agi hash table for the
2075 * list this inode will go on.
2076 */
2077 agino = XFS_INO_TO_AGINO(mp, ip->i_ino);
2078 ASSERT(agino != 0);
2079 bucket_index = agino % XFS_AGI_UNLINKED_BUCKETS;
2080 ASSERT(agi->agi_unlinked[bucket_index] != cpu_to_be32(NULLAGINO));
2081 ASSERT(agi->agi_unlinked[bucket_index]);
2082
2083 if (be32_to_cpu(agi->agi_unlinked[bucket_index]) == agino) {
2084 /*
2085 * We're at the head of the list. Get the inode's on-disk
2086 * buffer to see if there is anyone after us on the list.
2087 * Only modify our next pointer if it is not already NULLAGINO.
2088 * This saves us the overhead of dealing with the buffer when
2089 * there is no need to change it.
2090 */
2091 error = xfs_imap_to_bp(mp, tp, &ip->i_imap, &dip, &ibp,
2092 0, 0);
2093 if (error) {
2094 xfs_warn(mp, "%s: xfs_imap_to_bp returned error %d.",
2095 __func__, error);
2096 return error;
2097 }
2098 next_agino = be32_to_cpu(dip->di_next_unlinked);
2099 ASSERT(next_agino != 0);
2100 if (next_agino != NULLAGINO) {
2101 dip->di_next_unlinked = cpu_to_be32(NULLAGINO);
2102 offset = ip->i_imap.im_boffset +
2103 offsetof(xfs_dinode_t, di_next_unlinked);
2104
2105 /* need to recalc the inode CRC if appropriate */
2106 xfs_dinode_calc_crc(mp, dip);
2107
2108 xfs_trans_inode_buf(tp, ibp);
2109 xfs_trans_log_buf(tp, ibp, offset,
2110 (offset + sizeof(xfs_agino_t) - 1));
2111 xfs_inobp_check(mp, ibp);
2112 } else {
2113 xfs_trans_brelse(tp, ibp);
2114 }
2115 /*
2116 * Point the bucket head pointer at the next inode.
2117 */
2118 ASSERT(next_agino != 0);
2119 ASSERT(next_agino != agino);
2120 agi->agi_unlinked[bucket_index] = cpu_to_be32(next_agino);
2121 offset = offsetof(xfs_agi_t, agi_unlinked) +
2122 (sizeof(xfs_agino_t) * bucket_index);
2123 xfs_trans_log_buf(tp, agibp, offset,
2124 (offset + sizeof(xfs_agino_t) - 1));
2125 } else {
2126 /*
2127 * We need to search the list for the inode being freed.
2128 */
2129 next_agino = be32_to_cpu(agi->agi_unlinked[bucket_index]);
2130 last_ibp = NULL;
2131 while (next_agino != agino) {
2132 struct xfs_imap imap;
2133
2134 if (last_ibp)
2135 xfs_trans_brelse(tp, last_ibp);
2136
2137 imap.im_blkno = 0;
2138 next_ino = XFS_AGINO_TO_INO(mp, agno, next_agino);
2139
2140 error = xfs_imap(mp, tp, next_ino, &imap, 0);
2141 if (error) {
2142 xfs_warn(mp,
2143 "%s: xfs_imap returned error %d.",
2144 __func__, error);
2145 return error;
2146 }
2147
2148 error = xfs_imap_to_bp(mp, tp, &imap, &last_dip,
2149 &last_ibp, 0, 0);
2150 if (error) {
2151 xfs_warn(mp,
2152 "%s: xfs_imap_to_bp returned error %d.",
2153 __func__, error);
2154 return error;
2155 }
2156
2157 last_offset = imap.im_boffset;
2158 next_agino = be32_to_cpu(last_dip->di_next_unlinked);
2159 ASSERT(next_agino != NULLAGINO);
2160 ASSERT(next_agino != 0);
2161 }
2162
2163 /*
2164 * Now last_ibp points to the buffer previous to us on the
2165 * unlinked list. Pull us from the list.
2166 */
2167 error = xfs_imap_to_bp(mp, tp, &ip->i_imap, &dip, &ibp,
2168 0, 0);
2169 if (error) {
2170 xfs_warn(mp, "%s: xfs_imap_to_bp(2) returned error %d.",
2171 __func__, error);
2172 return error;
2173 }
2174 next_agino = be32_to_cpu(dip->di_next_unlinked);
2175 ASSERT(next_agino != 0);
2176 ASSERT(next_agino != agino);
2177 if (next_agino != NULLAGINO) {
2178 dip->di_next_unlinked = cpu_to_be32(NULLAGINO);
2179 offset = ip->i_imap.im_boffset +
2180 offsetof(xfs_dinode_t, di_next_unlinked);
2181
2182 /* need to recalc the inode CRC if appropriate */
2183 xfs_dinode_calc_crc(mp, dip);
2184
2185 xfs_trans_inode_buf(tp, ibp);
2186 xfs_trans_log_buf(tp, ibp, offset,
2187 (offset + sizeof(xfs_agino_t) - 1));
2188 xfs_inobp_check(mp, ibp);
2189 } else {
2190 xfs_trans_brelse(tp, ibp);
2191 }
2192 /*
2193 * Point the previous inode on the list to the next inode.
2194 */
2195 last_dip->di_next_unlinked = cpu_to_be32(next_agino);
2196 ASSERT(next_agino != 0);
2197 offset = last_offset + offsetof(xfs_dinode_t, di_next_unlinked);
2198
2199 /* need to recalc the inode CRC if appropriate */
2200 xfs_dinode_calc_crc(mp, last_dip);
2201
2202 xfs_trans_inode_buf(tp, last_ibp);
2203 xfs_trans_log_buf(tp, last_ibp, offset,
2204 (offset + sizeof(xfs_agino_t) - 1));
2205 xfs_inobp_check(mp, last_ibp);
2206 }
2207 return 0;
2208}
2209
2210/*
2211 * A big issue when freeing the inode cluster is that we _cannot_ skip any
2212 * inodes that are in memory - they all must be marked stale and attached to
2213 * the cluster buffer.
2214 */
2215STATIC int
2216xfs_ifree_cluster(
2217 xfs_inode_t *free_ip,
2218 xfs_trans_t *tp,
2219 struct xfs_icluster *xic)
2220{
2221 xfs_mount_t *mp = free_ip->i_mount;
2222 int blks_per_cluster;
2223 int inodes_per_cluster;
2224 int nbufs;
2225 int i, j;
2226 int ioffset;
2227 xfs_daddr_t blkno;
2228 xfs_buf_t *bp;
2229 xfs_inode_t *ip;
2230 xfs_inode_log_item_t *iip;
2231 struct xfs_log_item *lip;
2232 struct xfs_perag *pag;
2233 xfs_ino_t inum;
2234
2235 inum = xic->first_ino;
2236 pag = xfs_perag_get(mp, XFS_INO_TO_AGNO(mp, inum));
2237 blks_per_cluster = xfs_icluster_size_fsb(mp);
2238 inodes_per_cluster = blks_per_cluster << mp->m_sb.sb_inopblog;
2239 nbufs = mp->m_ialloc_blks / blks_per_cluster;
2240
2241 for (j = 0; j < nbufs; j++, inum += inodes_per_cluster) {
2242 /*
2243 * The allocation bitmap tells us which inodes of the chunk were
2244 * physically allocated. Skip the cluster if an inode falls into
2245 * a sparse region.
2246 */
2247 ioffset = inum - xic->first_ino;
2248 if ((xic->alloc & XFS_INOBT_MASK(ioffset)) == 0) {
2249 ASSERT(do_mod(ioffset, inodes_per_cluster) == 0);
2250 continue;
2251 }
2252
2253 blkno = XFS_AGB_TO_DADDR(mp, XFS_INO_TO_AGNO(mp, inum),
2254 XFS_INO_TO_AGBNO(mp, inum));
2255
2256 /*
2257 * We obtain and lock the backing buffer first in the process
2258 * here, as we have to ensure that any dirty inode that we
2259 * can't get the flush lock on is attached to the buffer.
2260 * If we scan the in-memory inodes first, then buffer IO can
2261 * complete before we get a lock on it, and hence we may fail
2262 * to mark all the active inodes on the buffer stale.
2263 */
2264 bp = xfs_trans_get_buf(tp, mp->m_ddev_targp, blkno,
2265 mp->m_bsize * blks_per_cluster,
2266 XBF_UNMAPPED);
2267
2268 if (!bp)
2269 return -ENOMEM;
2270
2271 /*
2272 * This buffer may not have been correctly initialised as we
2273 * didn't read it from disk. That's not important because we are
2274 * only using to mark the buffer as stale in the log, and to
2275 * attach stale cached inodes on it. That means it will never be
2276 * dispatched for IO. If it is, we want to know about it, and we
2277 * want it to fail. We can acheive this by adding a write
2278 * verifier to the buffer.
2279 */
2280 bp->b_ops = &xfs_inode_buf_ops;
2281
2282 /*
2283 * Walk the inodes already attached to the buffer and mark them
2284 * stale. These will all have the flush locks held, so an
2285 * in-memory inode walk can't lock them. By marking them all
2286 * stale first, we will not attempt to lock them in the loop
2287 * below as the XFS_ISTALE flag will be set.
2288 */
2289 list_for_each_entry(lip, &bp->b_li_list, li_bio_list) {
2290 if (lip->li_type == XFS_LI_INODE) {
2291 iip = (xfs_inode_log_item_t *)lip;
2292 ASSERT(iip->ili_logged == 1);
2293 lip->li_cb = xfs_istale_done;
2294 xfs_trans_ail_copy_lsn(mp->m_ail,
2295 &iip->ili_flush_lsn,
2296 &iip->ili_item.li_lsn);
2297 xfs_iflags_set(iip->ili_inode, XFS_ISTALE);
2298 }
2299 }
2300
2301
2302 /*
2303 * For each inode in memory attempt to add it to the inode
2304 * buffer and set it up for being staled on buffer IO
2305 * completion. This is safe as we've locked out tail pushing
2306 * and flushing by locking the buffer.
2307 *
2308 * We have already marked every inode that was part of a
2309 * transaction stale above, which means there is no point in
2310 * even trying to lock them.
2311 */
2312 for (i = 0; i < inodes_per_cluster; i++) {
2313retry:
2314 rcu_read_lock();
2315 ip = radix_tree_lookup(&pag->pag_ici_root,
2316 XFS_INO_TO_AGINO(mp, (inum + i)));
2317
2318 /* Inode not in memory, nothing to do */
2319 if (!ip) {
2320 rcu_read_unlock();
2321 continue;
2322 }
2323
2324 /*
2325 * because this is an RCU protected lookup, we could
2326 * find a recently freed or even reallocated inode
2327 * during the lookup. We need to check under the
2328 * i_flags_lock for a valid inode here. Skip it if it
2329 * is not valid, the wrong inode or stale.
2330 */
2331 spin_lock(&ip->i_flags_lock);
2332 if (ip->i_ino != inum + i ||
2333 __xfs_iflags_test(ip, XFS_ISTALE)) {
2334 spin_unlock(&ip->i_flags_lock);
2335 rcu_read_unlock();
2336 continue;
2337 }
2338 spin_unlock(&ip->i_flags_lock);
2339
2340 /*
2341 * Don't try to lock/unlock the current inode, but we
2342 * _cannot_ skip the other inodes that we did not find
2343 * in the list attached to the buffer and are not
2344 * already marked stale. If we can't lock it, back off
2345 * and retry.
2346 */
2347 if (ip != free_ip) {
2348 if (!xfs_ilock_nowait(ip, XFS_ILOCK_EXCL)) {
2349 rcu_read_unlock();
2350 delay(1);
2351 goto retry;
2352 }
2353
2354 /*
2355 * Check the inode number again in case we're
2356 * racing with freeing in xfs_reclaim_inode().
2357 * See the comments in that function for more
2358 * information as to why the initial check is
2359 * not sufficient.
2360 */
2361 if (ip->i_ino != inum + i) {
2362 xfs_iunlock(ip, XFS_ILOCK_EXCL);
2363 rcu_read_unlock();
2364 continue;
2365 }
2366 }
2367 rcu_read_unlock();
2368
2369 xfs_iflock(ip);
2370 xfs_iflags_set(ip, XFS_ISTALE);
2371
2372 /*
2373 * we don't need to attach clean inodes or those only
2374 * with unlogged changes (which we throw away, anyway).
2375 */
2376 iip = ip->i_itemp;
2377 if (!iip || xfs_inode_clean(ip)) {
2378 ASSERT(ip != free_ip);
2379 xfs_ifunlock(ip);
2380 xfs_iunlock(ip, XFS_ILOCK_EXCL);
2381 continue;
2382 }
2383
2384 iip->ili_last_fields = iip->ili_fields;
2385 iip->ili_fields = 0;
2386 iip->ili_fsync_fields = 0;
2387 iip->ili_logged = 1;
2388 xfs_trans_ail_copy_lsn(mp->m_ail, &iip->ili_flush_lsn,
2389 &iip->ili_item.li_lsn);
2390
2391 xfs_buf_attach_iodone(bp, xfs_istale_done,
2392 &iip->ili_item);
2393
2394 if (ip != free_ip)
2395 xfs_iunlock(ip, XFS_ILOCK_EXCL);
2396 }
2397
2398 xfs_trans_stale_inode_buf(tp, bp);
2399 xfs_trans_binval(tp, bp);
2400 }
2401
2402 xfs_perag_put(pag);
2403 return 0;
2404}
2405
2406/*
2407 * Free any local-format buffers sitting around before we reset to
2408 * extents format.
2409 */
2410static inline void
2411xfs_ifree_local_data(
2412 struct xfs_inode *ip,
2413 int whichfork)
2414{
2415 struct xfs_ifork *ifp;
2416
2417 if (XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_LOCAL)
2418 return;
2419
2420 ifp = XFS_IFORK_PTR(ip, whichfork);
2421 xfs_idata_realloc(ip, -ifp->if_bytes, whichfork);
2422}
2423
2424/*
2425 * This is called to return an inode to the inode free list.
2426 * The inode should already be truncated to 0 length and have
2427 * no pages associated with it. This routine also assumes that
2428 * the inode is already a part of the transaction.
2429 *
2430 * The on-disk copy of the inode will have been added to the list
2431 * of unlinked inodes in the AGI. We need to remove the inode from
2432 * that list atomically with respect to freeing it here.
2433 */
2434int
2435xfs_ifree(
2436 xfs_trans_t *tp,
2437 xfs_inode_t *ip,
2438 struct xfs_defer_ops *dfops)
2439{
2440 int error;
2441 struct xfs_icluster xic = { 0 };
2442
2443 ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
2444 ASSERT(VFS_I(ip)->i_nlink == 0);
2445 ASSERT(ip->i_d.di_nextents == 0);
2446 ASSERT(ip->i_d.di_anextents == 0);
2447 ASSERT(ip->i_d.di_size == 0 || !S_ISREG(VFS_I(ip)->i_mode));
2448 ASSERT(ip->i_d.di_nblocks == 0);
2449
2450 /*
2451 * Pull the on-disk inode from the AGI unlinked list.
2452 */
2453 error = xfs_iunlink_remove(tp, ip);
2454 if (error)
2455 return error;
2456
2457 error = xfs_difree(tp, ip->i_ino, dfops, &xic);
2458 if (error)
2459 return error;
2460
2461 xfs_ifree_local_data(ip, XFS_DATA_FORK);
2462 xfs_ifree_local_data(ip, XFS_ATTR_FORK);
2463
2464 VFS_I(ip)->i_mode = 0; /* mark incore inode as free */
2465 ip->i_d.di_flags = 0;
2466 ip->i_d.di_flags2 = 0;
2467 ip->i_d.di_dmevmask = 0;
2468 ip->i_d.di_forkoff = 0; /* mark the attr fork not in use */
2469 ip->i_d.di_format = XFS_DINODE_FMT_EXTENTS;
2470 ip->i_d.di_aformat = XFS_DINODE_FMT_EXTENTS;
2471
2472 /* Don't attempt to replay owner changes for a deleted inode */
2473 ip->i_itemp->ili_fields &= ~(XFS_ILOG_AOWNER|XFS_ILOG_DOWNER);
2474
2475 /*
2476 * Bump the generation count so no one will be confused
2477 * by reincarnations of this inode.
2478 */
2479 VFS_I(ip)->i_generation++;
2480 xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
2481
2482 if (xic.deleted)
2483 error = xfs_ifree_cluster(ip, tp, &xic);
2484
2485 return error;
2486}
2487
2488/*
2489 * This is called to unpin an inode. The caller must have the inode locked
2490 * in at least shared mode so that the buffer cannot be subsequently pinned
2491 * once someone is waiting for it to be unpinned.
2492 */
2493static void
2494xfs_iunpin(
2495 struct xfs_inode *ip)
2496{
2497 ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL|XFS_ILOCK_SHARED));
2498
2499 trace_xfs_inode_unpin_nowait(ip, _RET_IP_);
2500
2501 /* Give the log a push to start the unpinning I/O */
2502 xfs_log_force_lsn(ip->i_mount, ip->i_itemp->ili_last_lsn, 0, NULL);
2503
2504}
2505
2506static void
2507__xfs_iunpin_wait(
2508 struct xfs_inode *ip)
2509{
2510 wait_queue_head_t *wq = bit_waitqueue(&ip->i_flags, __XFS_IPINNED_BIT);
2511 DEFINE_WAIT_BIT(wait, &ip->i_flags, __XFS_IPINNED_BIT);
2512
2513 xfs_iunpin(ip);
2514
2515 do {
2516 prepare_to_wait(wq, &wait.wq_entry, TASK_UNINTERRUPTIBLE);
2517 if (xfs_ipincount(ip))
2518 io_schedule();
2519 } while (xfs_ipincount(ip));
2520 finish_wait(wq, &wait.wq_entry);
2521}
2522
2523void
2524xfs_iunpin_wait(
2525 struct xfs_inode *ip)
2526{
2527 if (xfs_ipincount(ip))
2528 __xfs_iunpin_wait(ip);
2529}
2530
2531/*
2532 * Removing an inode from the namespace involves removing the directory entry
2533 * and dropping the link count on the inode. Removing the directory entry can
2534 * result in locking an AGF (directory blocks were freed) and removing a link
2535 * count can result in placing the inode on an unlinked list which results in
2536 * locking an AGI.
2537 *
2538 * The big problem here is that we have an ordering constraint on AGF and AGI
2539 * locking - inode allocation locks the AGI, then can allocate a new extent for
2540 * new inodes, locking the AGF after the AGI. Similarly, freeing the inode
2541 * removes the inode from the unlinked list, requiring that we lock the AGI
2542 * first, and then freeing the inode can result in an inode chunk being freed
2543 * and hence freeing disk space requiring that we lock an AGF.
2544 *
2545 * Hence the ordering that is imposed by other parts of the code is AGI before
2546 * AGF. This means we cannot remove the directory entry before we drop the inode
2547 * reference count and put it on the unlinked list as this results in a lock
2548 * order of AGF then AGI, and this can deadlock against inode allocation and
2549 * freeing. Therefore we must drop the link counts before we remove the
2550 * directory entry.
2551 *
2552 * This is still safe from a transactional point of view - it is not until we
2553 * get to xfs_defer_finish() that we have the possibility of multiple
2554 * transactions in this operation. Hence as long as we remove the directory
2555 * entry and drop the link count in the first transaction of the remove
2556 * operation, there are no transactional constraints on the ordering here.
2557 */
2558int
2559xfs_remove(
2560 xfs_inode_t *dp,
2561 struct xfs_name *name,
2562 xfs_inode_t *ip)
2563{
2564 xfs_mount_t *mp = dp->i_mount;
2565 xfs_trans_t *tp = NULL;
2566 int is_dir = S_ISDIR(VFS_I(ip)->i_mode);
2567 int error = 0;
2568 struct xfs_defer_ops dfops;
2569 xfs_fsblock_t first_block;
2570 uint resblks;
2571
2572 trace_xfs_remove(dp, name);
2573
2574 if (XFS_FORCED_SHUTDOWN(mp))
2575 return -EIO;
2576
2577 error = xfs_qm_dqattach(dp, 0);
2578 if (error)
2579 goto std_return;
2580
2581 error = xfs_qm_dqattach(ip, 0);
2582 if (error)
2583 goto std_return;
2584
2585 /*
2586 * We try to get the real space reservation first,
2587 * allowing for directory btree deletion(s) implying
2588 * possible bmap insert(s). If we can't get the space
2589 * reservation then we use 0 instead, and avoid the bmap
2590 * btree insert(s) in the directory code by, if the bmap
2591 * insert tries to happen, instead trimming the LAST
2592 * block from the directory.
2593 */
2594 resblks = XFS_REMOVE_SPACE_RES(mp);
2595 error = xfs_trans_alloc(mp, &M_RES(mp)->tr_remove, resblks, 0, 0, &tp);
2596 if (error == -ENOSPC) {
2597 resblks = 0;
2598 error = xfs_trans_alloc(mp, &M_RES(mp)->tr_remove, 0, 0, 0,
2599 &tp);
2600 }
2601 if (error) {
2602 ASSERT(error != -ENOSPC);
2603 goto std_return;
2604 }
2605
2606 xfs_lock_two_inodes(dp, XFS_ILOCK_EXCL, ip, XFS_ILOCK_EXCL);
2607
2608 xfs_trans_ijoin(tp, dp, XFS_ILOCK_EXCL);
2609 xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL);
2610
2611 /*
2612 * If we're removing a directory perform some additional validation.
2613 */
2614 if (is_dir) {
2615 ASSERT(VFS_I(ip)->i_nlink >= 2);
2616 if (VFS_I(ip)->i_nlink != 2) {
2617 error = -ENOTEMPTY;
2618 goto out_trans_cancel;
2619 }
2620 if (!xfs_dir_isempty(ip)) {
2621 error = -ENOTEMPTY;
2622 goto out_trans_cancel;
2623 }
2624
2625 /* Drop the link from ip's "..". */
2626 error = xfs_droplink(tp, dp);
2627 if (error)
2628 goto out_trans_cancel;
2629
2630 /* Drop the "." link from ip to self. */
2631 error = xfs_droplink(tp, ip);
2632 if (error)
2633 goto out_trans_cancel;
2634 } else {
2635 /*
2636 * When removing a non-directory we need to log the parent
2637 * inode here. For a directory this is done implicitly
2638 * by the xfs_droplink call for the ".." entry.
2639 */
2640 xfs_trans_log_inode(tp, dp, XFS_ILOG_CORE);
2641 }
2642 xfs_trans_ichgtime(tp, dp, XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG);
2643
2644 /* Drop the link from dp to ip. */
2645 error = xfs_droplink(tp, ip);
2646 if (error)
2647 goto out_trans_cancel;
2648
2649 xfs_defer_init(&dfops, &first_block);
2650 error = xfs_dir_removename(tp, dp, name, ip->i_ino,
2651 &first_block, &dfops, resblks);
2652 if (error) {
2653 ASSERT(error != -ENOENT);
2654 goto out_bmap_cancel;
2655 }
2656
2657 /*
2658 * If this is a synchronous mount, make sure that the
2659 * remove transaction goes to disk before returning to
2660 * the user.
2661 */
2662 if (mp->m_flags & (XFS_MOUNT_WSYNC|XFS_MOUNT_DIRSYNC))
2663 xfs_trans_set_sync(tp);
2664
2665 error = xfs_defer_finish(&tp, &dfops);
2666 if (error)
2667 goto out_bmap_cancel;
2668
2669 error = xfs_trans_commit(tp);
2670 if (error)
2671 goto std_return;
2672
2673 if (is_dir && xfs_inode_is_filestream(ip))
2674 xfs_filestream_deassociate(ip);
2675
2676 return 0;
2677
2678 out_bmap_cancel:
2679 xfs_defer_cancel(&dfops);
2680 out_trans_cancel:
2681 xfs_trans_cancel(tp);
2682 std_return:
2683 return error;
2684}
2685
2686/*
2687 * Enter all inodes for a rename transaction into a sorted array.
2688 */
2689#define __XFS_SORT_INODES 5
2690STATIC void
2691xfs_sort_for_rename(
2692 struct xfs_inode *dp1, /* in: old (source) directory inode */
2693 struct xfs_inode *dp2, /* in: new (target) directory inode */
2694 struct xfs_inode *ip1, /* in: inode of old entry */
2695 struct xfs_inode *ip2, /* in: inode of new entry */
2696 struct xfs_inode *wip, /* in: whiteout inode */
2697 struct xfs_inode **i_tab,/* out: sorted array of inodes */
2698 int *num_inodes) /* in/out: inodes in array */
2699{
2700 int i, j;
2701
2702 ASSERT(*num_inodes == __XFS_SORT_INODES);
2703 memset(i_tab, 0, *num_inodes * sizeof(struct xfs_inode *));
2704
2705 /*
2706 * i_tab contains a list of pointers to inodes. We initialize
2707 * the table here & we'll sort it. We will then use it to
2708 * order the acquisition of the inode locks.
2709 *
2710 * Note that the table may contain duplicates. e.g., dp1 == dp2.
2711 */
2712 i = 0;
2713 i_tab[i++] = dp1;
2714 i_tab[i++] = dp2;
2715 i_tab[i++] = ip1;
2716 if (ip2)
2717 i_tab[i++] = ip2;
2718 if (wip)
2719 i_tab[i++] = wip;
2720 *num_inodes = i;
2721
2722 /*
2723 * Sort the elements via bubble sort. (Remember, there are at
2724 * most 5 elements to sort, so this is adequate.)
2725 */
2726 for (i = 0; i < *num_inodes; i++) {
2727 for (j = 1; j < *num_inodes; j++) {
2728 if (i_tab[j]->i_ino < i_tab[j-1]->i_ino) {
2729 struct xfs_inode *temp = i_tab[j];
2730 i_tab[j] = i_tab[j-1];
2731 i_tab[j-1] = temp;
2732 }
2733 }
2734 }
2735}
2736
2737static int
2738xfs_finish_rename(
2739 struct xfs_trans *tp,
2740 struct xfs_defer_ops *dfops)
2741{
2742 int error;
2743
2744 /*
2745 * If this is a synchronous mount, make sure that the rename transaction
2746 * goes to disk before returning to the user.
2747 */
2748 if (tp->t_mountp->m_flags & (XFS_MOUNT_WSYNC|XFS_MOUNT_DIRSYNC))
2749 xfs_trans_set_sync(tp);
2750
2751 error = xfs_defer_finish(&tp, dfops);
2752 if (error) {
2753 xfs_defer_cancel(dfops);
2754 xfs_trans_cancel(tp);
2755 return error;
2756 }
2757
2758 return xfs_trans_commit(tp);
2759}
2760
2761/*
2762 * xfs_cross_rename()
2763 *
2764 * responsible for handling RENAME_EXCHANGE flag in renameat2() sytemcall
2765 */
2766STATIC int
2767xfs_cross_rename(
2768 struct xfs_trans *tp,
2769 struct xfs_inode *dp1,
2770 struct xfs_name *name1,
2771 struct xfs_inode *ip1,
2772 struct xfs_inode *dp2,
2773 struct xfs_name *name2,
2774 struct xfs_inode *ip2,
2775 struct xfs_defer_ops *dfops,
2776 xfs_fsblock_t *first_block,
2777 int spaceres)
2778{
2779 int error = 0;
2780 int ip1_flags = 0;
2781 int ip2_flags = 0;
2782 int dp2_flags = 0;
2783
2784 /* Swap inode number for dirent in first parent */
2785 error = xfs_dir_replace(tp, dp1, name1,
2786 ip2->i_ino,
2787 first_block, dfops, spaceres);
2788 if (error)
2789 goto out_trans_abort;
2790
2791 /* Swap inode number for dirent in second parent */
2792 error = xfs_dir_replace(tp, dp2, name2,
2793 ip1->i_ino,
2794 first_block, dfops, spaceres);
2795 if (error)
2796 goto out_trans_abort;
2797
2798 /*
2799 * If we're renaming one or more directories across different parents,
2800 * update the respective ".." entries (and link counts) to match the new
2801 * parents.
2802 */
2803 if (dp1 != dp2) {
2804 dp2_flags = XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG;
2805
2806 if (S_ISDIR(VFS_I(ip2)->i_mode)) {
2807 error = xfs_dir_replace(tp, ip2, &xfs_name_dotdot,
2808 dp1->i_ino, first_block,
2809 dfops, spaceres);
2810 if (error)
2811 goto out_trans_abort;
2812
2813 /* transfer ip2 ".." reference to dp1 */
2814 if (!S_ISDIR(VFS_I(ip1)->i_mode)) {
2815 error = xfs_droplink(tp, dp2);
2816 if (error)
2817 goto out_trans_abort;
2818 error = xfs_bumplink(tp, dp1);
2819 if (error)
2820 goto out_trans_abort;
2821 }
2822
2823 /*
2824 * Although ip1 isn't changed here, userspace needs
2825 * to be warned about the change, so that applications
2826 * relying on it (like backup ones), will properly
2827 * notify the change
2828 */
2829 ip1_flags |= XFS_ICHGTIME_CHG;
2830 ip2_flags |= XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG;
2831 }
2832
2833 if (S_ISDIR(VFS_I(ip1)->i_mode)) {
2834 error = xfs_dir_replace(tp, ip1, &xfs_name_dotdot,
2835 dp2->i_ino, first_block,
2836 dfops, spaceres);
2837 if (error)
2838 goto out_trans_abort;
2839
2840 /* transfer ip1 ".." reference to dp2 */
2841 if (!S_ISDIR(VFS_I(ip2)->i_mode)) {
2842 error = xfs_droplink(tp, dp1);
2843 if (error)
2844 goto out_trans_abort;
2845 error = xfs_bumplink(tp, dp2);
2846 if (error)
2847 goto out_trans_abort;
2848 }
2849
2850 /*
2851 * Although ip2 isn't changed here, userspace needs
2852 * to be warned about the change, so that applications
2853 * relying on it (like backup ones), will properly
2854 * notify the change
2855 */
2856 ip1_flags |= XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG;
2857 ip2_flags |= XFS_ICHGTIME_CHG;
2858 }
2859 }
2860
2861 if (ip1_flags) {
2862 xfs_trans_ichgtime(tp, ip1, ip1_flags);
2863 xfs_trans_log_inode(tp, ip1, XFS_ILOG_CORE);
2864 }
2865 if (ip2_flags) {
2866 xfs_trans_ichgtime(tp, ip2, ip2_flags);
2867 xfs_trans_log_inode(tp, ip2, XFS_ILOG_CORE);
2868 }
2869 if (dp2_flags) {
2870 xfs_trans_ichgtime(tp, dp2, dp2_flags);
2871 xfs_trans_log_inode(tp, dp2, XFS_ILOG_CORE);
2872 }
2873 xfs_trans_ichgtime(tp, dp1, XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG);
2874 xfs_trans_log_inode(tp, dp1, XFS_ILOG_CORE);
2875 return xfs_finish_rename(tp, dfops);
2876
2877out_trans_abort:
2878 xfs_defer_cancel(dfops);
2879 xfs_trans_cancel(tp);
2880 return error;
2881}
2882
2883/*
2884 * xfs_rename_alloc_whiteout()
2885 *
2886 * Return a referenced, unlinked, unlocked inode that that can be used as a
2887 * whiteout in a rename transaction. We use a tmpfile inode here so that if we
2888 * crash between allocating the inode and linking it into the rename transaction
2889 * recovery will free the inode and we won't leak it.
2890 */
2891static int
2892xfs_rename_alloc_whiteout(
2893 struct xfs_inode *dp,
2894 struct xfs_inode **wip)
2895{
2896 struct xfs_inode *tmpfile;
2897 int error;
2898
2899 error = xfs_create_tmpfile(dp, S_IFCHR | WHITEOUT_MODE, &tmpfile);
2900 if (error)
2901 return error;
2902
2903 /*
2904 * Prepare the tmpfile inode as if it were created through the VFS.
2905 * Otherwise, the link increment paths will complain about nlink 0->1.
2906 * Drop the link count as done by d_tmpfile(), complete the inode setup
2907 * and flag it as linkable.
2908 */
2909 drop_nlink(VFS_I(tmpfile));
2910 xfs_setup_iops(tmpfile);
2911 xfs_finish_inode_setup(tmpfile);
2912 VFS_I(tmpfile)->i_state |= I_LINKABLE;
2913
2914 *wip = tmpfile;
2915 return 0;
2916}
2917
2918/*
2919 * xfs_rename
2920 */
2921int
2922xfs_rename(
2923 struct xfs_inode *src_dp,
2924 struct xfs_name *src_name,
2925 struct xfs_inode *src_ip,
2926 struct xfs_inode *target_dp,
2927 struct xfs_name *target_name,
2928 struct xfs_inode *target_ip,
2929 unsigned int flags)
2930{
2931 struct xfs_mount *mp = src_dp->i_mount;
2932 struct xfs_trans *tp;
2933 struct xfs_defer_ops dfops;
2934 xfs_fsblock_t first_block;
2935 struct xfs_inode *wip = NULL; /* whiteout inode */
2936 struct xfs_inode *inodes[__XFS_SORT_INODES];
2937 int num_inodes = __XFS_SORT_INODES;
2938 bool new_parent = (src_dp != target_dp);
2939 bool src_is_directory = S_ISDIR(VFS_I(src_ip)->i_mode);
2940 int spaceres;
2941 int error;
2942
2943 trace_xfs_rename(src_dp, target_dp, src_name, target_name);
2944
2945 if ((flags & RENAME_EXCHANGE) && !target_ip)
2946 return -EINVAL;
2947
2948 /*
2949 * If we are doing a whiteout operation, allocate the whiteout inode
2950 * we will be placing at the target and ensure the type is set
2951 * appropriately.
2952 */
2953 if (flags & RENAME_WHITEOUT) {
2954 ASSERT(!(flags & (RENAME_NOREPLACE | RENAME_EXCHANGE)));
2955 error = xfs_rename_alloc_whiteout(target_dp, &wip);
2956 if (error)
2957 return error;
2958
2959 /* setup target dirent info as whiteout */
2960 src_name->type = XFS_DIR3_FT_CHRDEV;
2961 }
2962
2963 xfs_sort_for_rename(src_dp, target_dp, src_ip, target_ip, wip,
2964 inodes, &num_inodes);
2965
2966 spaceres = XFS_RENAME_SPACE_RES(mp, target_name->len);
2967 error = xfs_trans_alloc(mp, &M_RES(mp)->tr_rename, spaceres, 0, 0, &tp);
2968 if (error == -ENOSPC) {
2969 spaceres = 0;
2970 error = xfs_trans_alloc(mp, &M_RES(mp)->tr_rename, 0, 0, 0,
2971 &tp);
2972 }
2973 if (error)
2974 goto out_release_wip;
2975
2976 /*
2977 * Attach the dquots to the inodes
2978 */
2979 error = xfs_qm_vop_rename_dqattach(inodes);
2980 if (error)
2981 goto out_trans_cancel;
2982
2983 /*
2984 * Lock all the participating inodes. Depending upon whether
2985 * the target_name exists in the target directory, and
2986 * whether the target directory is the same as the source
2987 * directory, we can lock from 2 to 4 inodes.
2988 */
2989 xfs_lock_inodes(inodes, num_inodes, XFS_ILOCK_EXCL);
2990
2991 /*
2992 * Join all the inodes to the transaction. From this point on,
2993 * we can rely on either trans_commit or trans_cancel to unlock
2994 * them.
2995 */
2996 xfs_trans_ijoin(tp, src_dp, XFS_ILOCK_EXCL);
2997 if (new_parent)
2998 xfs_trans_ijoin(tp, target_dp, XFS_ILOCK_EXCL);
2999 xfs_trans_ijoin(tp, src_ip, XFS_ILOCK_EXCL);
3000 if (target_ip)
3001 xfs_trans_ijoin(tp, target_ip, XFS_ILOCK_EXCL);
3002 if (wip)
3003 xfs_trans_ijoin(tp, wip, XFS_ILOCK_EXCL);
3004
3005 /*
3006 * If we are using project inheritance, we only allow renames
3007 * into our tree when the project IDs are the same; else the
3008 * tree quota mechanism would be circumvented.
3009 */
3010 if (unlikely((target_dp->i_d.di_flags & XFS_DIFLAG_PROJINHERIT) &&
3011 (xfs_get_projid(target_dp) != xfs_get_projid(src_ip)))) {
3012 error = -EXDEV;
3013 goto out_trans_cancel;
3014 }
3015
3016 xfs_defer_init(&dfops, &first_block);
3017
3018 /* RENAME_EXCHANGE is unique from here on. */
3019 if (flags & RENAME_EXCHANGE)
3020 return xfs_cross_rename(tp, src_dp, src_name, src_ip,
3021 target_dp, target_name, target_ip,
3022 &dfops, &first_block, spaceres);
3023
3024 /*
3025 * Set up the target.
3026 */
3027 if (target_ip == NULL) {
3028 /*
3029 * If there's no space reservation, check the entry will
3030 * fit before actually inserting it.
3031 */
3032 if (!spaceres) {
3033 error = xfs_dir_canenter(tp, target_dp, target_name);
3034 if (error)
3035 goto out_trans_cancel;
3036 }
3037 /*
3038 * If target does not exist and the rename crosses
3039 * directories, adjust the target directory link count
3040 * to account for the ".." reference from the new entry.
3041 */
3042 error = xfs_dir_createname(tp, target_dp, target_name,
3043 src_ip->i_ino, &first_block,
3044 &dfops, spaceres);
3045 if (error)
3046 goto out_bmap_cancel;
3047
3048 xfs_trans_ichgtime(tp, target_dp,
3049 XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG);
3050
3051 if (new_parent && src_is_directory) {
3052 error = xfs_bumplink(tp, target_dp);
3053 if (error)
3054 goto out_bmap_cancel;
3055 }
3056 } else { /* target_ip != NULL */
3057 /*
3058 * If target exists and it's a directory, check that both
3059 * target and source are directories and that target can be
3060 * destroyed, or that neither is a directory.
3061 */
3062 if (S_ISDIR(VFS_I(target_ip)->i_mode)) {
3063 /*
3064 * Make sure target dir is empty.
3065 */
3066 if (!(xfs_dir_isempty(target_ip)) ||
3067 (VFS_I(target_ip)->i_nlink > 2)) {
3068 error = -EEXIST;
3069 goto out_trans_cancel;
3070 }
3071 }
3072
3073 /*
3074 * Link the source inode under the target name.
3075 * If the source inode is a directory and we are moving
3076 * it across directories, its ".." entry will be
3077 * inconsistent until we replace that down below.
3078 *
3079 * In case there is already an entry with the same
3080 * name at the destination directory, remove it first.
3081 */
3082 error = xfs_dir_replace(tp, target_dp, target_name,
3083 src_ip->i_ino,
3084 &first_block, &dfops, spaceres);
3085 if (error)
3086 goto out_bmap_cancel;
3087
3088 xfs_trans_ichgtime(tp, target_dp,
3089 XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG);
3090
3091 /*
3092 * Decrement the link count on the target since the target
3093 * dir no longer points to it.
3094 */
3095 error = xfs_droplink(tp, target_ip);
3096 if (error)
3097 goto out_bmap_cancel;
3098
3099 if (src_is_directory) {
3100 /*
3101 * Drop the link from the old "." entry.
3102 */
3103 error = xfs_droplink(tp, target_ip);
3104 if (error)
3105 goto out_bmap_cancel;
3106 }
3107 } /* target_ip != NULL */
3108
3109 /*
3110 * Remove the source.
3111 */
3112 if (new_parent && src_is_directory) {
3113 /*
3114 * Rewrite the ".." entry to point to the new
3115 * directory.
3116 */
3117 error = xfs_dir_replace(tp, src_ip, &xfs_name_dotdot,
3118 target_dp->i_ino,
3119 &first_block, &dfops, spaceres);
3120 ASSERT(error != -EEXIST);
3121 if (error)
3122 goto out_bmap_cancel;
3123 }
3124
3125 /*
3126 * We always want to hit the ctime on the source inode.
3127 *
3128 * This isn't strictly required by the standards since the source
3129 * inode isn't really being changed, but old unix file systems did
3130 * it and some incremental backup programs won't work without it.
3131 */
3132 xfs_trans_ichgtime(tp, src_ip, XFS_ICHGTIME_CHG);
3133 xfs_trans_log_inode(tp, src_ip, XFS_ILOG_CORE);
3134
3135 /*
3136 * Adjust the link count on src_dp. This is necessary when
3137 * renaming a directory, either within one parent when
3138 * the target existed, or across two parent directories.
3139 */
3140 if (src_is_directory && (new_parent || target_ip != NULL)) {
3141
3142 /*
3143 * Decrement link count on src_directory since the
3144 * entry that's moved no longer points to it.
3145 */
3146 error = xfs_droplink(tp, src_dp);
3147 if (error)
3148 goto out_bmap_cancel;
3149 }
3150
3151 /*
3152 * For whiteouts, we only need to update the source dirent with the
3153 * inode number of the whiteout inode rather than removing it
3154 * altogether.
3155 */
3156 if (wip) {
3157 error = xfs_dir_replace(tp, src_dp, src_name, wip->i_ino,
3158 &first_block, &dfops, spaceres);
3159 } else
3160 error = xfs_dir_removename(tp, src_dp, src_name, src_ip->i_ino,
3161 &first_block, &dfops, spaceres);
3162 if (error)
3163 goto out_bmap_cancel;
3164
3165 /*
3166 * For whiteouts, we need to bump the link count on the whiteout inode.
3167 * This means that failures all the way up to this point leave the inode
3168 * on the unlinked list and so cleanup is a simple matter of dropping
3169 * the remaining reference to it. If we fail here after bumping the link
3170 * count, we're shutting down the filesystem so we'll never see the
3171 * intermediate state on disk.
3172 */
3173 if (wip) {
3174 ASSERT(VFS_I(wip)->i_nlink == 0);
3175 error = xfs_bumplink(tp, wip);
3176 if (error)
3177 goto out_bmap_cancel;
3178 error = xfs_iunlink_remove(tp, wip);
3179 if (error)
3180 goto out_bmap_cancel;
3181 xfs_trans_log_inode(tp, wip, XFS_ILOG_CORE);
3182
3183 /*
3184 * Now we have a real link, clear the "I'm a tmpfile" state
3185 * flag from the inode so it doesn't accidentally get misused in
3186 * future.
3187 */
3188 VFS_I(wip)->i_state &= ~I_LINKABLE;
3189 }
3190
3191 xfs_trans_ichgtime(tp, src_dp, XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG);
3192 xfs_trans_log_inode(tp, src_dp, XFS_ILOG_CORE);
3193 if (new_parent)
3194 xfs_trans_log_inode(tp, target_dp, XFS_ILOG_CORE);
3195
3196 error = xfs_finish_rename(tp, &dfops);
3197 if (wip)
3198 IRELE(wip);
3199 return error;
3200
3201out_bmap_cancel:
3202 xfs_defer_cancel(&dfops);
3203out_trans_cancel:
3204 xfs_trans_cancel(tp);
3205out_release_wip:
3206 if (wip)
3207 IRELE(wip);
3208 return error;
3209}
3210
3211STATIC int
3212xfs_iflush_cluster(
3213 struct xfs_inode *ip,
3214 struct xfs_buf *bp)
3215{
3216 struct xfs_mount *mp = ip->i_mount;
3217 struct xfs_perag *pag;
3218 unsigned long first_index, mask;
3219 unsigned long inodes_per_cluster;
3220 int cilist_size;
3221 struct xfs_inode **cilist;
3222 struct xfs_inode *cip;
3223 int nr_found;
3224 int clcount = 0;
3225 int bufwasdelwri;
3226 int i;
3227
3228 pag = xfs_perag_get(mp, XFS_INO_TO_AGNO(mp, ip->i_ino));
3229
3230 inodes_per_cluster = mp->m_inode_cluster_size >> mp->m_sb.sb_inodelog;
3231 cilist_size = inodes_per_cluster * sizeof(xfs_inode_t *);
3232 cilist = kmem_alloc(cilist_size, KM_MAYFAIL|KM_NOFS);
3233 if (!cilist)
3234 goto out_put;
3235
3236 mask = ~(((mp->m_inode_cluster_size >> mp->m_sb.sb_inodelog)) - 1);
3237 first_index = XFS_INO_TO_AGINO(mp, ip->i_ino) & mask;
3238 rcu_read_lock();
3239 /* really need a gang lookup range call here */
3240 nr_found = radix_tree_gang_lookup(&pag->pag_ici_root, (void**)cilist,
3241 first_index, inodes_per_cluster);
3242 if (nr_found == 0)
3243 goto out_free;
3244
3245 for (i = 0; i < nr_found; i++) {
3246 cip = cilist[i];
3247 if (cip == ip)
3248 continue;
3249
3250 /*
3251 * because this is an RCU protected lookup, we could find a
3252 * recently freed or even reallocated inode during the lookup.
3253 * We need to check under the i_flags_lock for a valid inode
3254 * here. Skip it if it is not valid or the wrong inode.
3255 */
3256 spin_lock(&cip->i_flags_lock);
3257 if (!cip->i_ino ||
3258 __xfs_iflags_test(cip, XFS_ISTALE)) {
3259 spin_unlock(&cip->i_flags_lock);
3260 continue;
3261 }
3262
3263 /*
3264 * Once we fall off the end of the cluster, no point checking
3265 * any more inodes in the list because they will also all be
3266 * outside the cluster.
3267 */
3268 if ((XFS_INO_TO_AGINO(mp, cip->i_ino) & mask) != first_index) {
3269 spin_unlock(&cip->i_flags_lock);
3270 break;
3271 }
3272 spin_unlock(&cip->i_flags_lock);
3273
3274 /*
3275 * Do an un-protected check to see if the inode is dirty and
3276 * is a candidate for flushing. These checks will be repeated
3277 * later after the appropriate locks are acquired.
3278 */
3279 if (xfs_inode_clean(cip) && xfs_ipincount(cip) == 0)
3280 continue;
3281
3282 /*
3283 * Try to get locks. If any are unavailable or it is pinned,
3284 * then this inode cannot be flushed and is skipped.
3285 */
3286
3287 if (!xfs_ilock_nowait(cip, XFS_ILOCK_SHARED))
3288 continue;
3289 if (!xfs_iflock_nowait(cip)) {
3290 xfs_iunlock(cip, XFS_ILOCK_SHARED);
3291 continue;
3292 }
3293 if (xfs_ipincount(cip)) {
3294 xfs_ifunlock(cip);
3295 xfs_iunlock(cip, XFS_ILOCK_SHARED);
3296 continue;
3297 }
3298
3299
3300 /*
3301 * Check the inode number again, just to be certain we are not
3302 * racing with freeing in xfs_reclaim_inode(). See the comments
3303 * in that function for more information as to why the initial
3304 * check is not sufficient.
3305 */
3306 if (!cip->i_ino) {
3307 xfs_ifunlock(cip);
3308 xfs_iunlock(cip, XFS_ILOCK_SHARED);
3309 continue;
3310 }
3311
3312 /*
3313 * arriving here means that this inode can be flushed. First
3314 * re-check that it's dirty before flushing.
3315 */
3316 if (!xfs_inode_clean(cip)) {
3317 int error;
3318 error = xfs_iflush_int(cip, bp);
3319 if (error) {
3320 xfs_iunlock(cip, XFS_ILOCK_SHARED);
3321 goto cluster_corrupt_out;
3322 }
3323 clcount++;
3324 } else {
3325 xfs_ifunlock(cip);
3326 }
3327 xfs_iunlock(cip, XFS_ILOCK_SHARED);
3328 }
3329
3330 if (clcount) {
3331 XFS_STATS_INC(mp, xs_icluster_flushcnt);
3332 XFS_STATS_ADD(mp, xs_icluster_flushinode, clcount);
3333 }
3334
3335out_free:
3336 rcu_read_unlock();
3337 kmem_free(cilist);
3338out_put:
3339 xfs_perag_put(pag);
3340 return 0;
3341
3342
3343cluster_corrupt_out:
3344 /*
3345 * Corruption detected in the clustering loop. Invalidate the
3346 * inode buffer and shut down the filesystem.
3347 */
3348 rcu_read_unlock();
3349 /*
3350 * Clean up the buffer. If it was delwri, just release it --
3351 * brelse can handle it with no problems. If not, shut down the
3352 * filesystem before releasing the buffer.
3353 */
3354 bufwasdelwri = (bp->b_flags & _XBF_DELWRI_Q);
3355 if (bufwasdelwri)
3356 xfs_buf_relse(bp);
3357
3358 xfs_force_shutdown(mp, SHUTDOWN_CORRUPT_INCORE);
3359
3360 if (!bufwasdelwri) {
3361 /*
3362 * Just like incore_relse: if we have b_iodone functions,
3363 * mark the buffer as an error and call them. Otherwise
3364 * mark it as stale and brelse.
3365 */
3366 if (bp->b_iodone) {
3367 bp->b_flags &= ~XBF_DONE;
3368 xfs_buf_stale(bp);
3369 xfs_buf_ioerror(bp, -EIO);
3370 xfs_buf_ioend(bp);
3371 } else {
3372 xfs_buf_stale(bp);
3373 xfs_buf_relse(bp);
3374 }
3375 }
3376
3377 /*
3378 * Unlocks the flush lock
3379 */
3380 xfs_iflush_abort(cip, false);
3381 kmem_free(cilist);
3382 xfs_perag_put(pag);
3383 return -EFSCORRUPTED;
3384}
3385
3386/*
3387 * Flush dirty inode metadata into the backing buffer.
3388 *
3389 * The caller must have the inode lock and the inode flush lock held. The
3390 * inode lock will still be held upon return to the caller, and the inode
3391 * flush lock will be released after the inode has reached the disk.
3392 *
3393 * The caller must write out the buffer returned in *bpp and release it.
3394 */
3395int
3396xfs_iflush(
3397 struct xfs_inode *ip,
3398 struct xfs_buf **bpp)
3399{
3400 struct xfs_mount *mp = ip->i_mount;
3401 struct xfs_buf *bp = NULL;
3402 struct xfs_dinode *dip;
3403 int error;
3404
3405 XFS_STATS_INC(mp, xs_iflush_count);
3406
3407 ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL|XFS_ILOCK_SHARED));
3408 ASSERT(xfs_isiflocked(ip));
3409 ASSERT(ip->i_d.di_format != XFS_DINODE_FMT_BTREE ||
3410 ip->i_d.di_nextents > XFS_IFORK_MAXEXT(ip, XFS_DATA_FORK));
3411
3412 *bpp = NULL;
3413
3414 xfs_iunpin_wait(ip);
3415
3416 /*
3417 * For stale inodes we cannot rely on the backing buffer remaining
3418 * stale in cache for the remaining life of the stale inode and so
3419 * xfs_imap_to_bp() below may give us a buffer that no longer contains
3420 * inodes below. We have to check this after ensuring the inode is
3421 * unpinned so that it is safe to reclaim the stale inode after the
3422 * flush call.
3423 */
3424 if (xfs_iflags_test(ip, XFS_ISTALE)) {
3425 xfs_ifunlock(ip);
3426 return 0;
3427 }
3428
3429 /*
3430 * This may have been unpinned because the filesystem is shutting
3431 * down forcibly. If that's the case we must not write this inode
3432 * to disk, because the log record didn't make it to disk.
3433 *
3434 * We also have to remove the log item from the AIL in this case,
3435 * as we wait for an empty AIL as part of the unmount process.
3436 */
3437 if (XFS_FORCED_SHUTDOWN(mp)) {
3438 error = -EIO;
3439 goto abort_out;
3440 }
3441
3442 /*
3443 * Get the buffer containing the on-disk inode. We are doing a try-lock
3444 * operation here, so we may get an EAGAIN error. In that case, we
3445 * simply want to return with the inode still dirty.
3446 *
3447 * If we get any other error, we effectively have a corruption situation
3448 * and we cannot flush the inode, so we treat it the same as failing
3449 * xfs_iflush_int().
3450 */
3451 error = xfs_imap_to_bp(mp, NULL, &ip->i_imap, &dip, &bp, XBF_TRYLOCK,
3452 0);
3453 if (error == -EAGAIN) {
3454 xfs_ifunlock(ip);
3455 return error;
3456 }
3457 if (error)
3458 goto corrupt_out;
3459
3460 /*
3461 * First flush out the inode that xfs_iflush was called with.
3462 */
3463 error = xfs_iflush_int(ip, bp);
3464 if (error)
3465 goto corrupt_out;
3466
3467 /*
3468 * If the buffer is pinned then push on the log now so we won't
3469 * get stuck waiting in the write for too long.
3470 */
3471 if (xfs_buf_ispinned(bp))
3472 xfs_log_force(mp, 0);
3473
3474 /*
3475 * inode clustering:
3476 * see if other inodes can be gathered into this write
3477 */
3478 error = xfs_iflush_cluster(ip, bp);
3479 if (error)
3480 goto cluster_corrupt_out;
3481
3482 *bpp = bp;
3483 return 0;
3484
3485corrupt_out:
3486 if (bp)
3487 xfs_buf_relse(bp);
3488 xfs_force_shutdown(mp, SHUTDOWN_CORRUPT_INCORE);
3489cluster_corrupt_out:
3490 error = -EFSCORRUPTED;
3491abort_out:
3492 /*
3493 * Unlocks the flush lock
3494 */
3495 xfs_iflush_abort(ip, false);
3496 return error;
3497}
3498
3499/*
3500 * If there are inline format data / attr forks attached to this inode,
3501 * make sure they're not corrupt.
3502 */
3503bool
3504xfs_inode_verify_forks(
3505 struct xfs_inode *ip)
3506{
3507 struct xfs_ifork *ifp;
3508 xfs_failaddr_t fa;
3509
3510 fa = xfs_ifork_verify_data(ip, &xfs_default_ifork_ops);
3511 if (fa) {
3512 ifp = XFS_IFORK_PTR(ip, XFS_DATA_FORK);
3513 xfs_inode_verifier_error(ip, -EFSCORRUPTED, "data fork",
3514 ifp->if_u1.if_data, ifp->if_bytes, fa);
3515 return false;
3516 }
3517
3518 fa = xfs_ifork_verify_attr(ip, &xfs_default_ifork_ops);
3519 if (fa) {
3520 ifp = XFS_IFORK_PTR(ip, XFS_ATTR_FORK);
3521 xfs_inode_verifier_error(ip, -EFSCORRUPTED, "attr fork",
3522 ifp ? ifp->if_u1.if_data : NULL,
3523 ifp ? ifp->if_bytes : 0, fa);
3524 return false;
3525 }
3526 return true;
3527}
3528
3529STATIC int
3530xfs_iflush_int(
3531 struct xfs_inode *ip,
3532 struct xfs_buf *bp)
3533{
3534 struct xfs_inode_log_item *iip = ip->i_itemp;
3535 struct xfs_dinode *dip;
3536 struct xfs_mount *mp = ip->i_mount;
3537
3538 ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL|XFS_ILOCK_SHARED));
3539 ASSERT(xfs_isiflocked(ip));
3540 ASSERT(ip->i_d.di_format != XFS_DINODE_FMT_BTREE ||
3541 ip->i_d.di_nextents > XFS_IFORK_MAXEXT(ip, XFS_DATA_FORK));
3542 ASSERT(iip != NULL && iip->ili_fields != 0);
3543 ASSERT(ip->i_d.di_version > 1);
3544
3545 /* set *dip = inode's place in the buffer */
3546 dip = xfs_buf_offset(bp, ip->i_imap.im_boffset);
3547
3548 if (XFS_TEST_ERROR(dip->di_magic != cpu_to_be16(XFS_DINODE_MAGIC),
3549 mp, XFS_ERRTAG_IFLUSH_1)) {
3550 xfs_alert_tag(mp, XFS_PTAG_IFLUSH,
3551 "%s: Bad inode %Lu magic number 0x%x, ptr "PTR_FMT,
3552 __func__, ip->i_ino, be16_to_cpu(dip->di_magic), dip);
3553 goto corrupt_out;
3554 }
3555 if (S_ISREG(VFS_I(ip)->i_mode)) {
3556 if (XFS_TEST_ERROR(
3557 (ip->i_d.di_format != XFS_DINODE_FMT_EXTENTS) &&
3558 (ip->i_d.di_format != XFS_DINODE_FMT_BTREE),
3559 mp, XFS_ERRTAG_IFLUSH_3)) {
3560 xfs_alert_tag(mp, XFS_PTAG_IFLUSH,
3561 "%s: Bad regular inode %Lu, ptr "PTR_FMT,
3562 __func__, ip->i_ino, ip);
3563 goto corrupt_out;
3564 }
3565 } else if (S_ISDIR(VFS_I(ip)->i_mode)) {
3566 if (XFS_TEST_ERROR(
3567 (ip->i_d.di_format != XFS_DINODE_FMT_EXTENTS) &&
3568 (ip->i_d.di_format != XFS_DINODE_FMT_BTREE) &&
3569 (ip->i_d.di_format != XFS_DINODE_FMT_LOCAL),
3570 mp, XFS_ERRTAG_IFLUSH_4)) {
3571 xfs_alert_tag(mp, XFS_PTAG_IFLUSH,
3572 "%s: Bad directory inode %Lu, ptr "PTR_FMT,
3573 __func__, ip->i_ino, ip);
3574 goto corrupt_out;
3575 }
3576 }
3577 if (XFS_TEST_ERROR(ip->i_d.di_nextents + ip->i_d.di_anextents >
3578 ip->i_d.di_nblocks, mp, XFS_ERRTAG_IFLUSH_5)) {
3579 xfs_alert_tag(mp, XFS_PTAG_IFLUSH,
3580 "%s: detected corrupt incore inode %Lu, "
3581 "total extents = %d, nblocks = %Ld, ptr "PTR_FMT,
3582 __func__, ip->i_ino,
3583 ip->i_d.di_nextents + ip->i_d.di_anextents,
3584 ip->i_d.di_nblocks, ip);
3585 goto corrupt_out;
3586 }
3587 if (XFS_TEST_ERROR(ip->i_d.di_forkoff > mp->m_sb.sb_inodesize,
3588 mp, XFS_ERRTAG_IFLUSH_6)) {
3589 xfs_alert_tag(mp, XFS_PTAG_IFLUSH,
3590 "%s: bad inode %Lu, forkoff 0x%x, ptr "PTR_FMT,
3591 __func__, ip->i_ino, ip->i_d.di_forkoff, ip);
3592 goto corrupt_out;
3593 }
3594
3595 /*
3596 * Inode item log recovery for v2 inodes are dependent on the
3597 * di_flushiter count for correct sequencing. We bump the flush
3598 * iteration count so we can detect flushes which postdate a log record
3599 * during recovery. This is redundant as we now log every change and
3600 * hence this can't happen but we need to still do it to ensure
3601 * backwards compatibility with old kernels that predate logging all
3602 * inode changes.
3603 */
3604 if (ip->i_d.di_version < 3)
3605 ip->i_d.di_flushiter++;
3606
3607 /* Check the inline fork data before we write out. */
3608 if (!xfs_inode_verify_forks(ip))
3609 goto corrupt_out;
3610
3611 /*
3612 * Copy the dirty parts of the inode into the on-disk inode. We always
3613 * copy out the core of the inode, because if the inode is dirty at all
3614 * the core must be.
3615 */
3616 xfs_inode_to_disk(ip, dip, iip->ili_item.li_lsn);
3617
3618 /* Wrap, we never let the log put out DI_MAX_FLUSH */
3619 if (ip->i_d.di_flushiter == DI_MAX_FLUSH)
3620 ip->i_d.di_flushiter = 0;
3621
3622 xfs_iflush_fork(ip, dip, iip, XFS_DATA_FORK);
3623 if (XFS_IFORK_Q(ip))
3624 xfs_iflush_fork(ip, dip, iip, XFS_ATTR_FORK);
3625 xfs_inobp_check(mp, bp);
3626
3627 /*
3628 * We've recorded everything logged in the inode, so we'd like to clear
3629 * the ili_fields bits so we don't log and flush things unnecessarily.
3630 * However, we can't stop logging all this information until the data
3631 * we've copied into the disk buffer is written to disk. If we did we
3632 * might overwrite the copy of the inode in the log with all the data
3633 * after re-logging only part of it, and in the face of a crash we
3634 * wouldn't have all the data we need to recover.
3635 *
3636 * What we do is move the bits to the ili_last_fields field. When
3637 * logging the inode, these bits are moved back to the ili_fields field.
3638 * In the xfs_iflush_done() routine we clear ili_last_fields, since we
3639 * know that the information those bits represent is permanently on
3640 * disk. As long as the flush completes before the inode is logged
3641 * again, then both ili_fields and ili_last_fields will be cleared.
3642 *
3643 * We can play with the ili_fields bits here, because the inode lock
3644 * must be held exclusively in order to set bits there and the flush
3645 * lock protects the ili_last_fields bits. Set ili_logged so the flush
3646 * done routine can tell whether or not to look in the AIL. Also, store
3647 * the current LSN of the inode so that we can tell whether the item has
3648 * moved in the AIL from xfs_iflush_done(). In order to read the lsn we
3649 * need the AIL lock, because it is a 64 bit value that cannot be read
3650 * atomically.
3651 */
3652 iip->ili_last_fields = iip->ili_fields;
3653 iip->ili_fields = 0;
3654 iip->ili_fsync_fields = 0;
3655 iip->ili_logged = 1;
3656
3657 xfs_trans_ail_copy_lsn(mp->m_ail, &iip->ili_flush_lsn,
3658 &iip->ili_item.li_lsn);
3659
3660 /*
3661 * Attach the function xfs_iflush_done to the inode's
3662 * buffer. This will remove the inode from the AIL
3663 * and unlock the inode's flush lock when the inode is
3664 * completely written to disk.
3665 */
3666 xfs_buf_attach_iodone(bp, xfs_iflush_done, &iip->ili_item);
3667
3668 /* generate the checksum. */
3669 xfs_dinode_calc_crc(mp, dip);
3670
3671 ASSERT(!list_empty(&bp->b_li_list));
3672 ASSERT(bp->b_iodone != NULL);
3673 return 0;
3674
3675corrupt_out:
3676 return -EFSCORRUPTED;
3677}