Loading...
1/*
2 * Copyright (c) 2000-2006 Silicon Graphics, Inc.
3 * Copyright (c) 2012 Red Hat, Inc.
4 * All Rights Reserved.
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License as
8 * published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it would be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
18 */
19#include "xfs.h"
20#include "xfs_fs.h"
21#include "xfs_shared.h"
22#include "xfs_format.h"
23#include "xfs_log_format.h"
24#include "xfs_trans_resv.h"
25#include "xfs_bit.h"
26#include "xfs_mount.h"
27#include "xfs_da_format.h"
28#include "xfs_defer.h"
29#include "xfs_inode.h"
30#include "xfs_btree.h"
31#include "xfs_trans.h"
32#include "xfs_extfree_item.h"
33#include "xfs_alloc.h"
34#include "xfs_bmap.h"
35#include "xfs_bmap_util.h"
36#include "xfs_bmap_btree.h"
37#include "xfs_rtalloc.h"
38#include "xfs_error.h"
39#include "xfs_quota.h"
40#include "xfs_trans_space.h"
41#include "xfs_trace.h"
42#include "xfs_icache.h"
43#include "xfs_log.h"
44#include "xfs_rmap_btree.h"
45#include "xfs_iomap.h"
46#include "xfs_reflink.h"
47#include "xfs_refcount.h"
48
49/* Kernel only BMAP related definitions and functions */
50
51/*
52 * Convert the given file system block to a disk block. We have to treat it
53 * differently based on whether the file is a real time file or not, because the
54 * bmap code does.
55 */
56xfs_daddr_t
57xfs_fsb_to_db(struct xfs_inode *ip, xfs_fsblock_t fsb)
58{
59 return (XFS_IS_REALTIME_INODE(ip) ? \
60 (xfs_daddr_t)XFS_FSB_TO_BB((ip)->i_mount, (fsb)) : \
61 XFS_FSB_TO_DADDR((ip)->i_mount, (fsb)));
62}
63
64/*
65 * Routine to zero an extent on disk allocated to the specific inode.
66 *
67 * The VFS functions take a linearised filesystem block offset, so we have to
68 * convert the sparse xfs fsb to the right format first.
69 * VFS types are real funky, too.
70 */
71int
72xfs_zero_extent(
73 struct xfs_inode *ip,
74 xfs_fsblock_t start_fsb,
75 xfs_off_t count_fsb)
76{
77 struct xfs_mount *mp = ip->i_mount;
78 xfs_daddr_t sector = xfs_fsb_to_db(ip, start_fsb);
79 sector_t block = XFS_BB_TO_FSBT(mp, sector);
80
81 return blkdev_issue_zeroout(xfs_find_bdev_for_inode(VFS_I(ip)),
82 block << (mp->m_super->s_blocksize_bits - 9),
83 count_fsb << (mp->m_super->s_blocksize_bits - 9),
84 GFP_NOFS, 0);
85}
86
87#ifdef CONFIG_XFS_RT
88int
89xfs_bmap_rtalloc(
90 struct xfs_bmalloca *ap) /* bmap alloc argument struct */
91{
92 int error; /* error return value */
93 xfs_mount_t *mp; /* mount point structure */
94 xfs_extlen_t prod = 0; /* product factor for allocators */
95 xfs_extlen_t ralen = 0; /* realtime allocation length */
96 xfs_extlen_t align; /* minimum allocation alignment */
97 xfs_rtblock_t rtb;
98
99 mp = ap->ip->i_mount;
100 align = xfs_get_extsz_hint(ap->ip);
101 prod = align / mp->m_sb.sb_rextsize;
102 error = xfs_bmap_extsize_align(mp, &ap->got, &ap->prev,
103 align, 1, ap->eof, 0,
104 ap->conv, &ap->offset, &ap->length);
105 if (error)
106 return error;
107 ASSERT(ap->length);
108 ASSERT(ap->length % mp->m_sb.sb_rextsize == 0);
109
110 /*
111 * If the offset & length are not perfectly aligned
112 * then kill prod, it will just get us in trouble.
113 */
114 if (do_mod(ap->offset, align) || ap->length % align)
115 prod = 1;
116 /*
117 * Set ralen to be the actual requested length in rtextents.
118 */
119 ralen = ap->length / mp->m_sb.sb_rextsize;
120 /*
121 * If the old value was close enough to MAXEXTLEN that
122 * we rounded up to it, cut it back so it's valid again.
123 * Note that if it's a really large request (bigger than
124 * MAXEXTLEN), we don't hear about that number, and can't
125 * adjust the starting point to match it.
126 */
127 if (ralen * mp->m_sb.sb_rextsize >= MAXEXTLEN)
128 ralen = MAXEXTLEN / mp->m_sb.sb_rextsize;
129
130 /*
131 * Lock out modifications to both the RT bitmap and summary inodes
132 */
133 xfs_ilock(mp->m_rbmip, XFS_ILOCK_EXCL|XFS_ILOCK_RTBITMAP);
134 xfs_trans_ijoin(ap->tp, mp->m_rbmip, XFS_ILOCK_EXCL);
135 xfs_ilock(mp->m_rsumip, XFS_ILOCK_EXCL|XFS_ILOCK_RTSUM);
136 xfs_trans_ijoin(ap->tp, mp->m_rsumip, XFS_ILOCK_EXCL);
137
138 /*
139 * If it's an allocation to an empty file at offset 0,
140 * pick an extent that will space things out in the rt area.
141 */
142 if (ap->eof && ap->offset == 0) {
143 xfs_rtblock_t uninitialized_var(rtx); /* realtime extent no */
144
145 error = xfs_rtpick_extent(mp, ap->tp, ralen, &rtx);
146 if (error)
147 return error;
148 ap->blkno = rtx * mp->m_sb.sb_rextsize;
149 } else {
150 ap->blkno = 0;
151 }
152
153 xfs_bmap_adjacent(ap);
154
155 /*
156 * Realtime allocation, done through xfs_rtallocate_extent.
157 */
158 do_div(ap->blkno, mp->m_sb.sb_rextsize);
159 rtb = ap->blkno;
160 ap->length = ralen;
161 error = xfs_rtallocate_extent(ap->tp, ap->blkno, 1, ap->length,
162 &ralen, ap->wasdel, prod, &rtb);
163 if (error)
164 return error;
165
166 ap->blkno = rtb;
167 if (ap->blkno != NULLFSBLOCK) {
168 ap->blkno *= mp->m_sb.sb_rextsize;
169 ralen *= mp->m_sb.sb_rextsize;
170 ap->length = ralen;
171 ap->ip->i_d.di_nblocks += ralen;
172 xfs_trans_log_inode(ap->tp, ap->ip, XFS_ILOG_CORE);
173 if (ap->wasdel)
174 ap->ip->i_delayed_blks -= ralen;
175 /*
176 * Adjust the disk quota also. This was reserved
177 * earlier.
178 */
179 xfs_trans_mod_dquot_byino(ap->tp, ap->ip,
180 ap->wasdel ? XFS_TRANS_DQ_DELRTBCOUNT :
181 XFS_TRANS_DQ_RTBCOUNT, (long) ralen);
182
183 /* Zero the extent if we were asked to do so */
184 if (ap->datatype & XFS_ALLOC_USERDATA_ZERO) {
185 error = xfs_zero_extent(ap->ip, ap->blkno, ap->length);
186 if (error)
187 return error;
188 }
189 } else {
190 ap->length = 0;
191 }
192 return 0;
193}
194#endif /* CONFIG_XFS_RT */
195
196/*
197 * Check if the endoff is outside the last extent. If so the caller will grow
198 * the allocation to a stripe unit boundary. All offsets are considered outside
199 * the end of file for an empty fork, so 1 is returned in *eof in that case.
200 */
201int
202xfs_bmap_eof(
203 struct xfs_inode *ip,
204 xfs_fileoff_t endoff,
205 int whichfork,
206 int *eof)
207{
208 struct xfs_bmbt_irec rec;
209 int error;
210
211 error = xfs_bmap_last_extent(NULL, ip, whichfork, &rec, eof);
212 if (error || *eof)
213 return error;
214
215 *eof = endoff >= rec.br_startoff + rec.br_blockcount;
216 return 0;
217}
218
219/*
220 * Extent tree block counting routines.
221 */
222
223/*
224 * Count leaf blocks given a range of extent records. Delayed allocation
225 * extents are not counted towards the totals.
226 */
227xfs_extnum_t
228xfs_bmap_count_leaves(
229 struct xfs_ifork *ifp,
230 xfs_filblks_t *count)
231{
232 struct xfs_iext_cursor icur;
233 struct xfs_bmbt_irec got;
234 xfs_extnum_t numrecs = 0;
235
236 for_each_xfs_iext(ifp, &icur, &got) {
237 if (!isnullstartblock(got.br_startblock)) {
238 *count += got.br_blockcount;
239 numrecs++;
240 }
241 }
242
243 return numrecs;
244}
245
246/*
247 * Count leaf blocks given a range of extent records originally
248 * in btree format.
249 */
250STATIC void
251xfs_bmap_disk_count_leaves(
252 struct xfs_mount *mp,
253 struct xfs_btree_block *block,
254 int numrecs,
255 xfs_filblks_t *count)
256{
257 int b;
258 xfs_bmbt_rec_t *frp;
259
260 for (b = 1; b <= numrecs; b++) {
261 frp = XFS_BMBT_REC_ADDR(mp, block, b);
262 *count += xfs_bmbt_disk_get_blockcount(frp);
263 }
264}
265
266/*
267 * Recursively walks each level of a btree
268 * to count total fsblocks in use.
269 */
270STATIC int
271xfs_bmap_count_tree(
272 struct xfs_mount *mp,
273 struct xfs_trans *tp,
274 struct xfs_ifork *ifp,
275 xfs_fsblock_t blockno,
276 int levelin,
277 xfs_extnum_t *nextents,
278 xfs_filblks_t *count)
279{
280 int error;
281 struct xfs_buf *bp, *nbp;
282 int level = levelin;
283 __be64 *pp;
284 xfs_fsblock_t bno = blockno;
285 xfs_fsblock_t nextbno;
286 struct xfs_btree_block *block, *nextblock;
287 int numrecs;
288
289 error = xfs_btree_read_bufl(mp, tp, bno, 0, &bp, XFS_BMAP_BTREE_REF,
290 &xfs_bmbt_buf_ops);
291 if (error)
292 return error;
293 *count += 1;
294 block = XFS_BUF_TO_BLOCK(bp);
295
296 if (--level) {
297 /* Not at node above leaves, count this level of nodes */
298 nextbno = be64_to_cpu(block->bb_u.l.bb_rightsib);
299 while (nextbno != NULLFSBLOCK) {
300 error = xfs_btree_read_bufl(mp, tp, nextbno, 0, &nbp,
301 XFS_BMAP_BTREE_REF,
302 &xfs_bmbt_buf_ops);
303 if (error)
304 return error;
305 *count += 1;
306 nextblock = XFS_BUF_TO_BLOCK(nbp);
307 nextbno = be64_to_cpu(nextblock->bb_u.l.bb_rightsib);
308 xfs_trans_brelse(tp, nbp);
309 }
310
311 /* Dive to the next level */
312 pp = XFS_BMBT_PTR_ADDR(mp, block, 1, mp->m_bmap_dmxr[1]);
313 bno = be64_to_cpu(*pp);
314 error = xfs_bmap_count_tree(mp, tp, ifp, bno, level, nextents,
315 count);
316 if (error) {
317 xfs_trans_brelse(tp, bp);
318 XFS_ERROR_REPORT("xfs_bmap_count_tree(1)",
319 XFS_ERRLEVEL_LOW, mp);
320 return -EFSCORRUPTED;
321 }
322 xfs_trans_brelse(tp, bp);
323 } else {
324 /* count all level 1 nodes and their leaves */
325 for (;;) {
326 nextbno = be64_to_cpu(block->bb_u.l.bb_rightsib);
327 numrecs = be16_to_cpu(block->bb_numrecs);
328 (*nextents) += numrecs;
329 xfs_bmap_disk_count_leaves(mp, block, numrecs, count);
330 xfs_trans_brelse(tp, bp);
331 if (nextbno == NULLFSBLOCK)
332 break;
333 bno = nextbno;
334 error = xfs_btree_read_bufl(mp, tp, bno, 0, &bp,
335 XFS_BMAP_BTREE_REF,
336 &xfs_bmbt_buf_ops);
337 if (error)
338 return error;
339 *count += 1;
340 block = XFS_BUF_TO_BLOCK(bp);
341 }
342 }
343 return 0;
344}
345
346/*
347 * Count fsblocks of the given fork. Delayed allocation extents are
348 * not counted towards the totals.
349 */
350int
351xfs_bmap_count_blocks(
352 struct xfs_trans *tp,
353 struct xfs_inode *ip,
354 int whichfork,
355 xfs_extnum_t *nextents,
356 xfs_filblks_t *count)
357{
358 struct xfs_mount *mp; /* file system mount structure */
359 __be64 *pp; /* pointer to block address */
360 struct xfs_btree_block *block; /* current btree block */
361 struct xfs_ifork *ifp; /* fork structure */
362 xfs_fsblock_t bno; /* block # of "block" */
363 int level; /* btree level, for checking */
364 int error;
365
366 bno = NULLFSBLOCK;
367 mp = ip->i_mount;
368 *nextents = 0;
369 *count = 0;
370 ifp = XFS_IFORK_PTR(ip, whichfork);
371 if (!ifp)
372 return 0;
373
374 switch (XFS_IFORK_FORMAT(ip, whichfork)) {
375 case XFS_DINODE_FMT_EXTENTS:
376 *nextents = xfs_bmap_count_leaves(ifp, count);
377 return 0;
378 case XFS_DINODE_FMT_BTREE:
379 if (!(ifp->if_flags & XFS_IFEXTENTS)) {
380 error = xfs_iread_extents(tp, ip, whichfork);
381 if (error)
382 return error;
383 }
384
385 /*
386 * Root level must use BMAP_BROOT_PTR_ADDR macro to get ptr out.
387 */
388 block = ifp->if_broot;
389 level = be16_to_cpu(block->bb_level);
390 ASSERT(level > 0);
391 pp = XFS_BMAP_BROOT_PTR_ADDR(mp, block, 1, ifp->if_broot_bytes);
392 bno = be64_to_cpu(*pp);
393 ASSERT(bno != NULLFSBLOCK);
394 ASSERT(XFS_FSB_TO_AGNO(mp, bno) < mp->m_sb.sb_agcount);
395 ASSERT(XFS_FSB_TO_AGBNO(mp, bno) < mp->m_sb.sb_agblocks);
396
397 error = xfs_bmap_count_tree(mp, tp, ifp, bno, level,
398 nextents, count);
399 if (error) {
400 XFS_ERROR_REPORT("xfs_bmap_count_blocks(2)",
401 XFS_ERRLEVEL_LOW, mp);
402 return -EFSCORRUPTED;
403 }
404 return 0;
405 }
406
407 return 0;
408}
409
410static int
411xfs_getbmap_report_one(
412 struct xfs_inode *ip,
413 struct getbmapx *bmv,
414 struct kgetbmap *out,
415 int64_t bmv_end,
416 struct xfs_bmbt_irec *got)
417{
418 struct kgetbmap *p = out + bmv->bmv_entries;
419 bool shared = false, trimmed = false;
420 int error;
421
422 error = xfs_reflink_trim_around_shared(ip, got, &shared, &trimmed);
423 if (error)
424 return error;
425
426 if (isnullstartblock(got->br_startblock) ||
427 got->br_startblock == DELAYSTARTBLOCK) {
428 /*
429 * Delalloc extents that start beyond EOF can occur due to
430 * speculative EOF allocation when the delalloc extent is larger
431 * than the largest freespace extent at conversion time. These
432 * extents cannot be converted by data writeback, so can exist
433 * here even if we are not supposed to be finding delalloc
434 * extents.
435 */
436 if (got->br_startoff < XFS_B_TO_FSB(ip->i_mount, XFS_ISIZE(ip)))
437 ASSERT((bmv->bmv_iflags & BMV_IF_DELALLOC) != 0);
438
439 p->bmv_oflags |= BMV_OF_DELALLOC;
440 p->bmv_block = -2;
441 } else {
442 p->bmv_block = xfs_fsb_to_db(ip, got->br_startblock);
443 }
444
445 if (got->br_state == XFS_EXT_UNWRITTEN &&
446 (bmv->bmv_iflags & BMV_IF_PREALLOC))
447 p->bmv_oflags |= BMV_OF_PREALLOC;
448
449 if (shared)
450 p->bmv_oflags |= BMV_OF_SHARED;
451
452 p->bmv_offset = XFS_FSB_TO_BB(ip->i_mount, got->br_startoff);
453 p->bmv_length = XFS_FSB_TO_BB(ip->i_mount, got->br_blockcount);
454
455 bmv->bmv_offset = p->bmv_offset + p->bmv_length;
456 bmv->bmv_length = max(0LL, bmv_end - bmv->bmv_offset);
457 bmv->bmv_entries++;
458 return 0;
459}
460
461static void
462xfs_getbmap_report_hole(
463 struct xfs_inode *ip,
464 struct getbmapx *bmv,
465 struct kgetbmap *out,
466 int64_t bmv_end,
467 xfs_fileoff_t bno,
468 xfs_fileoff_t end)
469{
470 struct kgetbmap *p = out + bmv->bmv_entries;
471
472 if (bmv->bmv_iflags & BMV_IF_NO_HOLES)
473 return;
474
475 p->bmv_block = -1;
476 p->bmv_offset = XFS_FSB_TO_BB(ip->i_mount, bno);
477 p->bmv_length = XFS_FSB_TO_BB(ip->i_mount, end - bno);
478
479 bmv->bmv_offset = p->bmv_offset + p->bmv_length;
480 bmv->bmv_length = max(0LL, bmv_end - bmv->bmv_offset);
481 bmv->bmv_entries++;
482}
483
484static inline bool
485xfs_getbmap_full(
486 struct getbmapx *bmv)
487{
488 return bmv->bmv_length == 0 || bmv->bmv_entries >= bmv->bmv_count - 1;
489}
490
491static bool
492xfs_getbmap_next_rec(
493 struct xfs_bmbt_irec *rec,
494 xfs_fileoff_t total_end)
495{
496 xfs_fileoff_t end = rec->br_startoff + rec->br_blockcount;
497
498 if (end == total_end)
499 return false;
500
501 rec->br_startoff += rec->br_blockcount;
502 if (!isnullstartblock(rec->br_startblock) &&
503 rec->br_startblock != DELAYSTARTBLOCK)
504 rec->br_startblock += rec->br_blockcount;
505 rec->br_blockcount = total_end - end;
506 return true;
507}
508
509/*
510 * Get inode's extents as described in bmv, and format for output.
511 * Calls formatter to fill the user's buffer until all extents
512 * are mapped, until the passed-in bmv->bmv_count slots have
513 * been filled, or until the formatter short-circuits the loop,
514 * if it is tracking filled-in extents on its own.
515 */
516int /* error code */
517xfs_getbmap(
518 struct xfs_inode *ip,
519 struct getbmapx *bmv, /* user bmap structure */
520 struct kgetbmap *out)
521{
522 struct xfs_mount *mp = ip->i_mount;
523 int iflags = bmv->bmv_iflags;
524 int whichfork, lock, error = 0;
525 int64_t bmv_end, max_len;
526 xfs_fileoff_t bno, first_bno;
527 struct xfs_ifork *ifp;
528 struct xfs_bmbt_irec got, rec;
529 xfs_filblks_t len;
530 struct xfs_iext_cursor icur;
531
532 if (bmv->bmv_iflags & ~BMV_IF_VALID)
533 return -EINVAL;
534#ifndef DEBUG
535 /* Only allow CoW fork queries if we're debugging. */
536 if (iflags & BMV_IF_COWFORK)
537 return -EINVAL;
538#endif
539 if ((iflags & BMV_IF_ATTRFORK) && (iflags & BMV_IF_COWFORK))
540 return -EINVAL;
541
542 if (bmv->bmv_length < -1)
543 return -EINVAL;
544 bmv->bmv_entries = 0;
545 if (bmv->bmv_length == 0)
546 return 0;
547
548 if (iflags & BMV_IF_ATTRFORK)
549 whichfork = XFS_ATTR_FORK;
550 else if (iflags & BMV_IF_COWFORK)
551 whichfork = XFS_COW_FORK;
552 else
553 whichfork = XFS_DATA_FORK;
554 ifp = XFS_IFORK_PTR(ip, whichfork);
555
556 xfs_ilock(ip, XFS_IOLOCK_SHARED);
557 switch (whichfork) {
558 case XFS_ATTR_FORK:
559 if (!XFS_IFORK_Q(ip))
560 goto out_unlock_iolock;
561
562 max_len = 1LL << 32;
563 lock = xfs_ilock_attr_map_shared(ip);
564 break;
565 case XFS_COW_FORK:
566 /* No CoW fork? Just return */
567 if (!ifp)
568 goto out_unlock_iolock;
569
570 if (xfs_get_cowextsz_hint(ip))
571 max_len = mp->m_super->s_maxbytes;
572 else
573 max_len = XFS_ISIZE(ip);
574
575 lock = XFS_ILOCK_SHARED;
576 xfs_ilock(ip, lock);
577 break;
578 case XFS_DATA_FORK:
579 if (!(iflags & BMV_IF_DELALLOC) &&
580 (ip->i_delayed_blks || XFS_ISIZE(ip) > ip->i_d.di_size)) {
581 error = filemap_write_and_wait(VFS_I(ip)->i_mapping);
582 if (error)
583 goto out_unlock_iolock;
584
585 /*
586 * Even after flushing the inode, there can still be
587 * delalloc blocks on the inode beyond EOF due to
588 * speculative preallocation. These are not removed
589 * until the release function is called or the inode
590 * is inactivated. Hence we cannot assert here that
591 * ip->i_delayed_blks == 0.
592 */
593 }
594
595 if (xfs_get_extsz_hint(ip) ||
596 (ip->i_d.di_flags &
597 (XFS_DIFLAG_PREALLOC | XFS_DIFLAG_APPEND)))
598 max_len = mp->m_super->s_maxbytes;
599 else
600 max_len = XFS_ISIZE(ip);
601
602 lock = xfs_ilock_data_map_shared(ip);
603 break;
604 }
605
606 switch (XFS_IFORK_FORMAT(ip, whichfork)) {
607 case XFS_DINODE_FMT_EXTENTS:
608 case XFS_DINODE_FMT_BTREE:
609 break;
610 case XFS_DINODE_FMT_LOCAL:
611 /* Local format inode forks report no extents. */
612 goto out_unlock_ilock;
613 default:
614 error = -EINVAL;
615 goto out_unlock_ilock;
616 }
617
618 if (bmv->bmv_length == -1) {
619 max_len = XFS_FSB_TO_BB(mp, XFS_B_TO_FSB(mp, max_len));
620 bmv->bmv_length = max(0LL, max_len - bmv->bmv_offset);
621 }
622
623 bmv_end = bmv->bmv_offset + bmv->bmv_length;
624
625 first_bno = bno = XFS_BB_TO_FSBT(mp, bmv->bmv_offset);
626 len = XFS_BB_TO_FSB(mp, bmv->bmv_length);
627
628 if (!(ifp->if_flags & XFS_IFEXTENTS)) {
629 error = xfs_iread_extents(NULL, ip, whichfork);
630 if (error)
631 goto out_unlock_ilock;
632 }
633
634 if (!xfs_iext_lookup_extent(ip, ifp, bno, &icur, &got)) {
635 /*
636 * Report a whole-file hole if the delalloc flag is set to
637 * stay compatible with the old implementation.
638 */
639 if (iflags & BMV_IF_DELALLOC)
640 xfs_getbmap_report_hole(ip, bmv, out, bmv_end, bno,
641 XFS_B_TO_FSB(mp, XFS_ISIZE(ip)));
642 goto out_unlock_ilock;
643 }
644
645 while (!xfs_getbmap_full(bmv)) {
646 xfs_trim_extent(&got, first_bno, len);
647
648 /*
649 * Report an entry for a hole if this extent doesn't directly
650 * follow the previous one.
651 */
652 if (got.br_startoff > bno) {
653 xfs_getbmap_report_hole(ip, bmv, out, bmv_end, bno,
654 got.br_startoff);
655 if (xfs_getbmap_full(bmv))
656 break;
657 }
658
659 /*
660 * In order to report shared extents accurately, we report each
661 * distinct shared / unshared part of a single bmbt record with
662 * an individual getbmapx record.
663 */
664 bno = got.br_startoff + got.br_blockcount;
665 rec = got;
666 do {
667 error = xfs_getbmap_report_one(ip, bmv, out, bmv_end,
668 &rec);
669 if (error || xfs_getbmap_full(bmv))
670 goto out_unlock_ilock;
671 } while (xfs_getbmap_next_rec(&rec, bno));
672
673 if (!xfs_iext_next_extent(ifp, &icur, &got)) {
674 xfs_fileoff_t end = XFS_B_TO_FSB(mp, XFS_ISIZE(ip));
675
676 out[bmv->bmv_entries - 1].bmv_oflags |= BMV_OF_LAST;
677
678 if (whichfork != XFS_ATTR_FORK && bno < end &&
679 !xfs_getbmap_full(bmv)) {
680 xfs_getbmap_report_hole(ip, bmv, out, bmv_end,
681 bno, end);
682 }
683 break;
684 }
685
686 if (bno >= first_bno + len)
687 break;
688 }
689
690out_unlock_ilock:
691 xfs_iunlock(ip, lock);
692out_unlock_iolock:
693 xfs_iunlock(ip, XFS_IOLOCK_SHARED);
694 return error;
695}
696
697/*
698 * dead simple method of punching delalyed allocation blocks from a range in
699 * the inode. Walks a block at a time so will be slow, but is only executed in
700 * rare error cases so the overhead is not critical. This will always punch out
701 * both the start and end blocks, even if the ranges only partially overlap
702 * them, so it is up to the caller to ensure that partial blocks are not
703 * passed in.
704 */
705int
706xfs_bmap_punch_delalloc_range(
707 struct xfs_inode *ip,
708 xfs_fileoff_t start_fsb,
709 xfs_fileoff_t length)
710{
711 xfs_fileoff_t remaining = length;
712 int error = 0;
713
714 ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
715
716 do {
717 int done;
718 xfs_bmbt_irec_t imap;
719 int nimaps = 1;
720 xfs_fsblock_t firstblock;
721 struct xfs_defer_ops dfops;
722
723 /*
724 * Map the range first and check that it is a delalloc extent
725 * before trying to unmap the range. Otherwise we will be
726 * trying to remove a real extent (which requires a
727 * transaction) or a hole, which is probably a bad idea...
728 */
729 error = xfs_bmapi_read(ip, start_fsb, 1, &imap, &nimaps,
730 XFS_BMAPI_ENTIRE);
731
732 if (error) {
733 /* something screwed, just bail */
734 if (!XFS_FORCED_SHUTDOWN(ip->i_mount)) {
735 xfs_alert(ip->i_mount,
736 "Failed delalloc mapping lookup ino %lld fsb %lld.",
737 ip->i_ino, start_fsb);
738 }
739 break;
740 }
741 if (!nimaps) {
742 /* nothing there */
743 goto next_block;
744 }
745 if (imap.br_startblock != DELAYSTARTBLOCK) {
746 /* been converted, ignore */
747 goto next_block;
748 }
749 WARN_ON(imap.br_blockcount == 0);
750
751 /*
752 * Note: while we initialise the firstblock/dfops pair, they
753 * should never be used because blocks should never be
754 * allocated or freed for a delalloc extent and hence we need
755 * don't cancel or finish them after the xfs_bunmapi() call.
756 */
757 xfs_defer_init(&dfops, &firstblock);
758 error = xfs_bunmapi(NULL, ip, start_fsb, 1, 0, 1, &firstblock,
759 &dfops, &done);
760 if (error)
761 break;
762
763 ASSERT(!xfs_defer_has_unfinished_work(&dfops));
764next_block:
765 start_fsb++;
766 remaining--;
767 } while(remaining > 0);
768
769 return error;
770}
771
772/*
773 * Test whether it is appropriate to check an inode for and free post EOF
774 * blocks. The 'force' parameter determines whether we should also consider
775 * regular files that are marked preallocated or append-only.
776 */
777bool
778xfs_can_free_eofblocks(struct xfs_inode *ip, bool force)
779{
780 /* prealloc/delalloc exists only on regular files */
781 if (!S_ISREG(VFS_I(ip)->i_mode))
782 return false;
783
784 /*
785 * Zero sized files with no cached pages and delalloc blocks will not
786 * have speculative prealloc/delalloc blocks to remove.
787 */
788 if (VFS_I(ip)->i_size == 0 &&
789 VFS_I(ip)->i_mapping->nrpages == 0 &&
790 ip->i_delayed_blks == 0)
791 return false;
792
793 /* If we haven't read in the extent list, then don't do it now. */
794 if (!(ip->i_df.if_flags & XFS_IFEXTENTS))
795 return false;
796
797 /*
798 * Do not free real preallocated or append-only files unless the file
799 * has delalloc blocks and we are forced to remove them.
800 */
801 if (ip->i_d.di_flags & (XFS_DIFLAG_PREALLOC | XFS_DIFLAG_APPEND))
802 if (!force || ip->i_delayed_blks == 0)
803 return false;
804
805 return true;
806}
807
808/*
809 * This is called to free any blocks beyond eof. The caller must hold
810 * IOLOCK_EXCL unless we are in the inode reclaim path and have the only
811 * reference to the inode.
812 */
813int
814xfs_free_eofblocks(
815 struct xfs_inode *ip)
816{
817 struct xfs_trans *tp;
818 int error;
819 xfs_fileoff_t end_fsb;
820 xfs_fileoff_t last_fsb;
821 xfs_filblks_t map_len;
822 int nimaps;
823 struct xfs_bmbt_irec imap;
824 struct xfs_mount *mp = ip->i_mount;
825
826 /*
827 * Figure out if there are any blocks beyond the end
828 * of the file. If not, then there is nothing to do.
829 */
830 end_fsb = XFS_B_TO_FSB(mp, (xfs_ufsize_t)XFS_ISIZE(ip));
831 last_fsb = XFS_B_TO_FSB(mp, mp->m_super->s_maxbytes);
832 if (last_fsb <= end_fsb)
833 return 0;
834 map_len = last_fsb - end_fsb;
835
836 nimaps = 1;
837 xfs_ilock(ip, XFS_ILOCK_SHARED);
838 error = xfs_bmapi_read(ip, end_fsb, map_len, &imap, &nimaps, 0);
839 xfs_iunlock(ip, XFS_ILOCK_SHARED);
840
841 /*
842 * If there are blocks after the end of file, truncate the file to its
843 * current size to free them up.
844 */
845 if (!error && (nimaps != 0) &&
846 (imap.br_startblock != HOLESTARTBLOCK ||
847 ip->i_delayed_blks)) {
848 /*
849 * Attach the dquots to the inode up front.
850 */
851 error = xfs_qm_dqattach(ip, 0);
852 if (error)
853 return error;
854
855 /* wait on dio to ensure i_size has settled */
856 inode_dio_wait(VFS_I(ip));
857
858 error = xfs_trans_alloc(mp, &M_RES(mp)->tr_itruncate, 0, 0, 0,
859 &tp);
860 if (error) {
861 ASSERT(XFS_FORCED_SHUTDOWN(mp));
862 return error;
863 }
864
865 xfs_ilock(ip, XFS_ILOCK_EXCL);
866 xfs_trans_ijoin(tp, ip, 0);
867
868 /*
869 * Do not update the on-disk file size. If we update the
870 * on-disk file size and then the system crashes before the
871 * contents of the file are flushed to disk then the files
872 * may be full of holes (ie NULL files bug).
873 */
874 error = xfs_itruncate_extents(&tp, ip, XFS_DATA_FORK,
875 XFS_ISIZE(ip));
876 if (error) {
877 /*
878 * If we get an error at this point we simply don't
879 * bother truncating the file.
880 */
881 xfs_trans_cancel(tp);
882 } else {
883 error = xfs_trans_commit(tp);
884 if (!error)
885 xfs_inode_clear_eofblocks_tag(ip);
886 }
887
888 xfs_iunlock(ip, XFS_ILOCK_EXCL);
889 }
890 return error;
891}
892
893int
894xfs_alloc_file_space(
895 struct xfs_inode *ip,
896 xfs_off_t offset,
897 xfs_off_t len,
898 int alloc_type)
899{
900 xfs_mount_t *mp = ip->i_mount;
901 xfs_off_t count;
902 xfs_filblks_t allocated_fsb;
903 xfs_filblks_t allocatesize_fsb;
904 xfs_extlen_t extsz, temp;
905 xfs_fileoff_t startoffset_fsb;
906 xfs_fsblock_t firstfsb;
907 int nimaps;
908 int quota_flag;
909 int rt;
910 xfs_trans_t *tp;
911 xfs_bmbt_irec_t imaps[1], *imapp;
912 struct xfs_defer_ops dfops;
913 uint qblocks, resblks, resrtextents;
914 int error;
915
916 trace_xfs_alloc_file_space(ip);
917
918 if (XFS_FORCED_SHUTDOWN(mp))
919 return -EIO;
920
921 error = xfs_qm_dqattach(ip, 0);
922 if (error)
923 return error;
924
925 if (len <= 0)
926 return -EINVAL;
927
928 rt = XFS_IS_REALTIME_INODE(ip);
929 extsz = xfs_get_extsz_hint(ip);
930
931 count = len;
932 imapp = &imaps[0];
933 nimaps = 1;
934 startoffset_fsb = XFS_B_TO_FSBT(mp, offset);
935 allocatesize_fsb = XFS_B_TO_FSB(mp, count);
936
937 /*
938 * Allocate file space until done or until there is an error
939 */
940 while (allocatesize_fsb && !error) {
941 xfs_fileoff_t s, e;
942
943 /*
944 * Determine space reservations for data/realtime.
945 */
946 if (unlikely(extsz)) {
947 s = startoffset_fsb;
948 do_div(s, extsz);
949 s *= extsz;
950 e = startoffset_fsb + allocatesize_fsb;
951 if ((temp = do_mod(startoffset_fsb, extsz)))
952 e += temp;
953 if ((temp = do_mod(e, extsz)))
954 e += extsz - temp;
955 } else {
956 s = 0;
957 e = allocatesize_fsb;
958 }
959
960 /*
961 * The transaction reservation is limited to a 32-bit block
962 * count, hence we need to limit the number of blocks we are
963 * trying to reserve to avoid an overflow. We can't allocate
964 * more than @nimaps extents, and an extent is limited on disk
965 * to MAXEXTLEN (21 bits), so use that to enforce the limit.
966 */
967 resblks = min_t(xfs_fileoff_t, (e - s), (MAXEXTLEN * nimaps));
968 if (unlikely(rt)) {
969 resrtextents = qblocks = resblks;
970 resrtextents /= mp->m_sb.sb_rextsize;
971 resblks = XFS_DIOSTRAT_SPACE_RES(mp, 0);
972 quota_flag = XFS_QMOPT_RES_RTBLKS;
973 } else {
974 resrtextents = 0;
975 resblks = qblocks = XFS_DIOSTRAT_SPACE_RES(mp, resblks);
976 quota_flag = XFS_QMOPT_RES_REGBLKS;
977 }
978
979 /*
980 * Allocate and setup the transaction.
981 */
982 error = xfs_trans_alloc(mp, &M_RES(mp)->tr_write, resblks,
983 resrtextents, 0, &tp);
984
985 /*
986 * Check for running out of space
987 */
988 if (error) {
989 /*
990 * Free the transaction structure.
991 */
992 ASSERT(error == -ENOSPC || XFS_FORCED_SHUTDOWN(mp));
993 break;
994 }
995 xfs_ilock(ip, XFS_ILOCK_EXCL);
996 error = xfs_trans_reserve_quota_nblks(tp, ip, qblocks,
997 0, quota_flag);
998 if (error)
999 goto error1;
1000
1001 xfs_trans_ijoin(tp, ip, 0);
1002
1003 xfs_defer_init(&dfops, &firstfsb);
1004 error = xfs_bmapi_write(tp, ip, startoffset_fsb,
1005 allocatesize_fsb, alloc_type, &firstfsb,
1006 resblks, imapp, &nimaps, &dfops);
1007 if (error)
1008 goto error0;
1009
1010 /*
1011 * Complete the transaction
1012 */
1013 error = xfs_defer_finish(&tp, &dfops);
1014 if (error)
1015 goto error0;
1016
1017 error = xfs_trans_commit(tp);
1018 xfs_iunlock(ip, XFS_ILOCK_EXCL);
1019 if (error)
1020 break;
1021
1022 allocated_fsb = imapp->br_blockcount;
1023
1024 if (nimaps == 0) {
1025 error = -ENOSPC;
1026 break;
1027 }
1028
1029 startoffset_fsb += allocated_fsb;
1030 allocatesize_fsb -= allocated_fsb;
1031 }
1032
1033 return error;
1034
1035error0: /* Cancel bmap, unlock inode, unreserve quota blocks, cancel trans */
1036 xfs_defer_cancel(&dfops);
1037 xfs_trans_unreserve_quota_nblks(tp, ip, (long)qblocks, 0, quota_flag);
1038
1039error1: /* Just cancel transaction */
1040 xfs_trans_cancel(tp);
1041 xfs_iunlock(ip, XFS_ILOCK_EXCL);
1042 return error;
1043}
1044
1045static int
1046xfs_unmap_extent(
1047 struct xfs_inode *ip,
1048 xfs_fileoff_t startoffset_fsb,
1049 xfs_filblks_t len_fsb,
1050 int *done)
1051{
1052 struct xfs_mount *mp = ip->i_mount;
1053 struct xfs_trans *tp;
1054 struct xfs_defer_ops dfops;
1055 xfs_fsblock_t firstfsb;
1056 uint resblks = XFS_DIOSTRAT_SPACE_RES(mp, 0);
1057 int error;
1058
1059 error = xfs_trans_alloc(mp, &M_RES(mp)->tr_write, resblks, 0, 0, &tp);
1060 if (error) {
1061 ASSERT(error == -ENOSPC || XFS_FORCED_SHUTDOWN(mp));
1062 return error;
1063 }
1064
1065 xfs_ilock(ip, XFS_ILOCK_EXCL);
1066 error = xfs_trans_reserve_quota(tp, mp, ip->i_udquot, ip->i_gdquot,
1067 ip->i_pdquot, resblks, 0, XFS_QMOPT_RES_REGBLKS);
1068 if (error)
1069 goto out_trans_cancel;
1070
1071 xfs_trans_ijoin(tp, ip, 0);
1072
1073 xfs_defer_init(&dfops, &firstfsb);
1074 error = xfs_bunmapi(tp, ip, startoffset_fsb, len_fsb, 0, 2, &firstfsb,
1075 &dfops, done);
1076 if (error)
1077 goto out_bmap_cancel;
1078
1079 xfs_defer_ijoin(&dfops, ip);
1080 error = xfs_defer_finish(&tp, &dfops);
1081 if (error)
1082 goto out_bmap_cancel;
1083
1084 error = xfs_trans_commit(tp);
1085out_unlock:
1086 xfs_iunlock(ip, XFS_ILOCK_EXCL);
1087 return error;
1088
1089out_bmap_cancel:
1090 xfs_defer_cancel(&dfops);
1091out_trans_cancel:
1092 xfs_trans_cancel(tp);
1093 goto out_unlock;
1094}
1095
1096static int
1097xfs_adjust_extent_unmap_boundaries(
1098 struct xfs_inode *ip,
1099 xfs_fileoff_t *startoffset_fsb,
1100 xfs_fileoff_t *endoffset_fsb)
1101{
1102 struct xfs_mount *mp = ip->i_mount;
1103 struct xfs_bmbt_irec imap;
1104 int nimap, error;
1105 xfs_extlen_t mod = 0;
1106
1107 nimap = 1;
1108 error = xfs_bmapi_read(ip, *startoffset_fsb, 1, &imap, &nimap, 0);
1109 if (error)
1110 return error;
1111
1112 if (nimap && imap.br_startblock != HOLESTARTBLOCK) {
1113 ASSERT(imap.br_startblock != DELAYSTARTBLOCK);
1114 mod = do_mod(imap.br_startblock, mp->m_sb.sb_rextsize);
1115 if (mod)
1116 *startoffset_fsb += mp->m_sb.sb_rextsize - mod;
1117 }
1118
1119 nimap = 1;
1120 error = xfs_bmapi_read(ip, *endoffset_fsb - 1, 1, &imap, &nimap, 0);
1121 if (error)
1122 return error;
1123
1124 if (nimap && imap.br_startblock != HOLESTARTBLOCK) {
1125 ASSERT(imap.br_startblock != DELAYSTARTBLOCK);
1126 mod++;
1127 if (mod && mod != mp->m_sb.sb_rextsize)
1128 *endoffset_fsb -= mod;
1129 }
1130
1131 return 0;
1132}
1133
1134static int
1135xfs_flush_unmap_range(
1136 struct xfs_inode *ip,
1137 xfs_off_t offset,
1138 xfs_off_t len)
1139{
1140 struct xfs_mount *mp = ip->i_mount;
1141 struct inode *inode = VFS_I(ip);
1142 xfs_off_t rounding, start, end;
1143 int error;
1144
1145 /* wait for the completion of any pending DIOs */
1146 inode_dio_wait(inode);
1147
1148 rounding = max_t(xfs_off_t, 1 << mp->m_sb.sb_blocklog, PAGE_SIZE);
1149 start = round_down(offset, rounding);
1150 end = round_up(offset + len, rounding) - 1;
1151
1152 error = filemap_write_and_wait_range(inode->i_mapping, start, end);
1153 if (error)
1154 return error;
1155 truncate_pagecache_range(inode, start, end);
1156 return 0;
1157}
1158
1159int
1160xfs_free_file_space(
1161 struct xfs_inode *ip,
1162 xfs_off_t offset,
1163 xfs_off_t len)
1164{
1165 struct xfs_mount *mp = ip->i_mount;
1166 xfs_fileoff_t startoffset_fsb;
1167 xfs_fileoff_t endoffset_fsb;
1168 int done = 0, error;
1169
1170 trace_xfs_free_file_space(ip);
1171
1172 error = xfs_qm_dqattach(ip, 0);
1173 if (error)
1174 return error;
1175
1176 if (len <= 0) /* if nothing being freed */
1177 return 0;
1178
1179 error = xfs_flush_unmap_range(ip, offset, len);
1180 if (error)
1181 return error;
1182
1183 startoffset_fsb = XFS_B_TO_FSB(mp, offset);
1184 endoffset_fsb = XFS_B_TO_FSBT(mp, offset + len);
1185
1186 /*
1187 * Need to zero the stuff we're not freeing, on disk. If it's a RT file
1188 * and we can't use unwritten extents then we actually need to ensure
1189 * to zero the whole extent, otherwise we just need to take of block
1190 * boundaries, and xfs_bunmapi will handle the rest.
1191 */
1192 if (XFS_IS_REALTIME_INODE(ip) &&
1193 !xfs_sb_version_hasextflgbit(&mp->m_sb)) {
1194 error = xfs_adjust_extent_unmap_boundaries(ip, &startoffset_fsb,
1195 &endoffset_fsb);
1196 if (error)
1197 return error;
1198 }
1199
1200 if (endoffset_fsb > startoffset_fsb) {
1201 while (!done) {
1202 error = xfs_unmap_extent(ip, startoffset_fsb,
1203 endoffset_fsb - startoffset_fsb, &done);
1204 if (error)
1205 return error;
1206 }
1207 }
1208
1209 /*
1210 * Now that we've unmap all full blocks we'll have to zero out any
1211 * partial block at the beginning and/or end. iomap_zero_range is smart
1212 * enough to skip any holes, including those we just created, but we
1213 * must take care not to zero beyond EOF and enlarge i_size.
1214 */
1215 if (offset >= XFS_ISIZE(ip))
1216 return 0;
1217 if (offset + len > XFS_ISIZE(ip))
1218 len = XFS_ISIZE(ip) - offset;
1219 return iomap_zero_range(VFS_I(ip), offset, len, NULL, &xfs_iomap_ops);
1220}
1221
1222/*
1223 * Preallocate and zero a range of a file. This mechanism has the allocation
1224 * semantics of fallocate and in addition converts data in the range to zeroes.
1225 */
1226int
1227xfs_zero_file_space(
1228 struct xfs_inode *ip,
1229 xfs_off_t offset,
1230 xfs_off_t len)
1231{
1232 struct xfs_mount *mp = ip->i_mount;
1233 uint blksize;
1234 int error;
1235
1236 trace_xfs_zero_file_space(ip);
1237
1238 blksize = 1 << mp->m_sb.sb_blocklog;
1239
1240 /*
1241 * Punch a hole and prealloc the range. We use hole punch rather than
1242 * unwritten extent conversion for two reasons:
1243 *
1244 * 1.) Hole punch handles partial block zeroing for us.
1245 *
1246 * 2.) If prealloc returns ENOSPC, the file range is still zero-valued
1247 * by virtue of the hole punch.
1248 */
1249 error = xfs_free_file_space(ip, offset, len);
1250 if (error)
1251 goto out;
1252
1253 error = xfs_alloc_file_space(ip, round_down(offset, blksize),
1254 round_up(offset + len, blksize) -
1255 round_down(offset, blksize),
1256 XFS_BMAPI_PREALLOC);
1257out:
1258 return error;
1259
1260}
1261
1262static int
1263xfs_prepare_shift(
1264 struct xfs_inode *ip,
1265 loff_t offset)
1266{
1267 int error;
1268
1269 /*
1270 * Trim eofblocks to avoid shifting uninitialized post-eof preallocation
1271 * into the accessible region of the file.
1272 */
1273 if (xfs_can_free_eofblocks(ip, true)) {
1274 error = xfs_free_eofblocks(ip);
1275 if (error)
1276 return error;
1277 }
1278
1279 /*
1280 * Writeback and invalidate cache for the remainder of the file as we're
1281 * about to shift down every extent from offset to EOF.
1282 */
1283 error = filemap_write_and_wait_range(VFS_I(ip)->i_mapping, offset, -1);
1284 if (error)
1285 return error;
1286 error = invalidate_inode_pages2_range(VFS_I(ip)->i_mapping,
1287 offset >> PAGE_SHIFT, -1);
1288 if (error)
1289 return error;
1290
1291 /*
1292 * Clean out anything hanging around in the cow fork now that
1293 * we've flushed all the dirty data out to disk to avoid having
1294 * CoW extents at the wrong offsets.
1295 */
1296 if (xfs_is_reflink_inode(ip)) {
1297 error = xfs_reflink_cancel_cow_range(ip, offset, NULLFILEOFF,
1298 true);
1299 if (error)
1300 return error;
1301 }
1302
1303 return 0;
1304}
1305
1306/*
1307 * xfs_collapse_file_space()
1308 * This routine frees disk space and shift extent for the given file.
1309 * The first thing we do is to free data blocks in the specified range
1310 * by calling xfs_free_file_space(). It would also sync dirty data
1311 * and invalidate page cache over the region on which collapse range
1312 * is working. And Shift extent records to the left to cover a hole.
1313 * RETURNS:
1314 * 0 on success
1315 * errno on error
1316 *
1317 */
1318int
1319xfs_collapse_file_space(
1320 struct xfs_inode *ip,
1321 xfs_off_t offset,
1322 xfs_off_t len)
1323{
1324 struct xfs_mount *mp = ip->i_mount;
1325 struct xfs_trans *tp;
1326 int error;
1327 struct xfs_defer_ops dfops;
1328 xfs_fsblock_t first_block;
1329 xfs_fileoff_t next_fsb = XFS_B_TO_FSB(mp, offset + len);
1330 xfs_fileoff_t shift_fsb = XFS_B_TO_FSB(mp, len);
1331 uint resblks = XFS_DIOSTRAT_SPACE_RES(mp, 0);
1332 bool done = false;
1333
1334 ASSERT(xfs_isilocked(ip, XFS_IOLOCK_EXCL));
1335 ASSERT(xfs_isilocked(ip, XFS_MMAPLOCK_EXCL));
1336
1337 trace_xfs_collapse_file_space(ip);
1338
1339 error = xfs_free_file_space(ip, offset, len);
1340 if (error)
1341 return error;
1342
1343 error = xfs_prepare_shift(ip, offset);
1344 if (error)
1345 return error;
1346
1347 while (!error && !done) {
1348 error = xfs_trans_alloc(mp, &M_RES(mp)->tr_write, resblks, 0, 0,
1349 &tp);
1350 if (error)
1351 break;
1352
1353 xfs_ilock(ip, XFS_ILOCK_EXCL);
1354 error = xfs_trans_reserve_quota(tp, mp, ip->i_udquot,
1355 ip->i_gdquot, ip->i_pdquot, resblks, 0,
1356 XFS_QMOPT_RES_REGBLKS);
1357 if (error)
1358 goto out_trans_cancel;
1359 xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL);
1360
1361 xfs_defer_init(&dfops, &first_block);
1362 error = xfs_bmap_collapse_extents(tp, ip, &next_fsb, shift_fsb,
1363 &done, &first_block, &dfops);
1364 if (error)
1365 goto out_bmap_cancel;
1366
1367 error = xfs_defer_finish(&tp, &dfops);
1368 if (error)
1369 goto out_bmap_cancel;
1370 error = xfs_trans_commit(tp);
1371 }
1372
1373 return error;
1374
1375out_bmap_cancel:
1376 xfs_defer_cancel(&dfops);
1377out_trans_cancel:
1378 xfs_trans_cancel(tp);
1379 return error;
1380}
1381
1382/*
1383 * xfs_insert_file_space()
1384 * This routine create hole space by shifting extents for the given file.
1385 * The first thing we do is to sync dirty data and invalidate page cache
1386 * over the region on which insert range is working. And split an extent
1387 * to two extents at given offset by calling xfs_bmap_split_extent.
1388 * And shift all extent records which are laying between [offset,
1389 * last allocated extent] to the right to reserve hole range.
1390 * RETURNS:
1391 * 0 on success
1392 * errno on error
1393 */
1394int
1395xfs_insert_file_space(
1396 struct xfs_inode *ip,
1397 loff_t offset,
1398 loff_t len)
1399{
1400 struct xfs_mount *mp = ip->i_mount;
1401 struct xfs_trans *tp;
1402 int error;
1403 struct xfs_defer_ops dfops;
1404 xfs_fsblock_t first_block;
1405 xfs_fileoff_t stop_fsb = XFS_B_TO_FSB(mp, offset);
1406 xfs_fileoff_t next_fsb = NULLFSBLOCK;
1407 xfs_fileoff_t shift_fsb = XFS_B_TO_FSB(mp, len);
1408 bool done = false;
1409
1410 ASSERT(xfs_isilocked(ip, XFS_IOLOCK_EXCL));
1411 ASSERT(xfs_isilocked(ip, XFS_MMAPLOCK_EXCL));
1412
1413 trace_xfs_insert_file_space(ip);
1414
1415 error = xfs_prepare_shift(ip, offset);
1416 if (error)
1417 return error;
1418
1419 /*
1420 * The extent shifting code works on extent granularity. So, if stop_fsb
1421 * is not the starting block of extent, we need to split the extent at
1422 * stop_fsb.
1423 */
1424 error = xfs_bmap_split_extent(ip, stop_fsb);
1425 if (error)
1426 return error;
1427
1428 while (!error && !done) {
1429 error = xfs_trans_alloc(mp, &M_RES(mp)->tr_write, 0, 0, 0,
1430 &tp);
1431 if (error)
1432 break;
1433
1434 xfs_ilock(ip, XFS_ILOCK_EXCL);
1435 xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL);
1436 xfs_defer_init(&dfops, &first_block);
1437 error = xfs_bmap_insert_extents(tp, ip, &next_fsb, shift_fsb,
1438 &done, stop_fsb, &first_block, &dfops);
1439 if (error)
1440 goto out_bmap_cancel;
1441
1442 error = xfs_defer_finish(&tp, &dfops);
1443 if (error)
1444 goto out_bmap_cancel;
1445 error = xfs_trans_commit(tp);
1446 }
1447
1448 return error;
1449
1450out_bmap_cancel:
1451 xfs_defer_cancel(&dfops);
1452 xfs_trans_cancel(tp);
1453 return error;
1454}
1455
1456/*
1457 * We need to check that the format of the data fork in the temporary inode is
1458 * valid for the target inode before doing the swap. This is not a problem with
1459 * attr1 because of the fixed fork offset, but attr2 has a dynamically sized
1460 * data fork depending on the space the attribute fork is taking so we can get
1461 * invalid formats on the target inode.
1462 *
1463 * E.g. target has space for 7 extents in extent format, temp inode only has
1464 * space for 6. If we defragment down to 7 extents, then the tmp format is a
1465 * btree, but when swapped it needs to be in extent format. Hence we can't just
1466 * blindly swap data forks on attr2 filesystems.
1467 *
1468 * Note that we check the swap in both directions so that we don't end up with
1469 * a corrupt temporary inode, either.
1470 *
1471 * Note that fixing the way xfs_fsr sets up the attribute fork in the source
1472 * inode will prevent this situation from occurring, so all we do here is
1473 * reject and log the attempt. basically we are putting the responsibility on
1474 * userspace to get this right.
1475 */
1476static int
1477xfs_swap_extents_check_format(
1478 struct xfs_inode *ip, /* target inode */
1479 struct xfs_inode *tip) /* tmp inode */
1480{
1481
1482 /* Should never get a local format */
1483 if (ip->i_d.di_format == XFS_DINODE_FMT_LOCAL ||
1484 tip->i_d.di_format == XFS_DINODE_FMT_LOCAL)
1485 return -EINVAL;
1486
1487 /*
1488 * if the target inode has less extents that then temporary inode then
1489 * why did userspace call us?
1490 */
1491 if (ip->i_d.di_nextents < tip->i_d.di_nextents)
1492 return -EINVAL;
1493
1494 /*
1495 * If we have to use the (expensive) rmap swap method, we can
1496 * handle any number of extents and any format.
1497 */
1498 if (xfs_sb_version_hasrmapbt(&ip->i_mount->m_sb))
1499 return 0;
1500
1501 /*
1502 * if the target inode is in extent form and the temp inode is in btree
1503 * form then we will end up with the target inode in the wrong format
1504 * as we already know there are less extents in the temp inode.
1505 */
1506 if (ip->i_d.di_format == XFS_DINODE_FMT_EXTENTS &&
1507 tip->i_d.di_format == XFS_DINODE_FMT_BTREE)
1508 return -EINVAL;
1509
1510 /* Check temp in extent form to max in target */
1511 if (tip->i_d.di_format == XFS_DINODE_FMT_EXTENTS &&
1512 XFS_IFORK_NEXTENTS(tip, XFS_DATA_FORK) >
1513 XFS_IFORK_MAXEXT(ip, XFS_DATA_FORK))
1514 return -EINVAL;
1515
1516 /* Check target in extent form to max in temp */
1517 if (ip->i_d.di_format == XFS_DINODE_FMT_EXTENTS &&
1518 XFS_IFORK_NEXTENTS(ip, XFS_DATA_FORK) >
1519 XFS_IFORK_MAXEXT(tip, XFS_DATA_FORK))
1520 return -EINVAL;
1521
1522 /*
1523 * If we are in a btree format, check that the temp root block will fit
1524 * in the target and that it has enough extents to be in btree format
1525 * in the target.
1526 *
1527 * Note that we have to be careful to allow btree->extent conversions
1528 * (a common defrag case) which will occur when the temp inode is in
1529 * extent format...
1530 */
1531 if (tip->i_d.di_format == XFS_DINODE_FMT_BTREE) {
1532 if (XFS_IFORK_Q(ip) &&
1533 XFS_BMAP_BMDR_SPACE(tip->i_df.if_broot) > XFS_IFORK_BOFF(ip))
1534 return -EINVAL;
1535 if (XFS_IFORK_NEXTENTS(tip, XFS_DATA_FORK) <=
1536 XFS_IFORK_MAXEXT(ip, XFS_DATA_FORK))
1537 return -EINVAL;
1538 }
1539
1540 /* Reciprocal target->temp btree format checks */
1541 if (ip->i_d.di_format == XFS_DINODE_FMT_BTREE) {
1542 if (XFS_IFORK_Q(tip) &&
1543 XFS_BMAP_BMDR_SPACE(ip->i_df.if_broot) > XFS_IFORK_BOFF(tip))
1544 return -EINVAL;
1545 if (XFS_IFORK_NEXTENTS(ip, XFS_DATA_FORK) <=
1546 XFS_IFORK_MAXEXT(tip, XFS_DATA_FORK))
1547 return -EINVAL;
1548 }
1549
1550 return 0;
1551}
1552
1553static int
1554xfs_swap_extent_flush(
1555 struct xfs_inode *ip)
1556{
1557 int error;
1558
1559 error = filemap_write_and_wait(VFS_I(ip)->i_mapping);
1560 if (error)
1561 return error;
1562 truncate_pagecache_range(VFS_I(ip), 0, -1);
1563
1564 /* Verify O_DIRECT for ftmp */
1565 if (VFS_I(ip)->i_mapping->nrpages)
1566 return -EINVAL;
1567 return 0;
1568}
1569
1570/*
1571 * Move extents from one file to another, when rmap is enabled.
1572 */
1573STATIC int
1574xfs_swap_extent_rmap(
1575 struct xfs_trans **tpp,
1576 struct xfs_inode *ip,
1577 struct xfs_inode *tip)
1578{
1579 struct xfs_bmbt_irec irec;
1580 struct xfs_bmbt_irec uirec;
1581 struct xfs_bmbt_irec tirec;
1582 xfs_fileoff_t offset_fsb;
1583 xfs_fileoff_t end_fsb;
1584 xfs_filblks_t count_fsb;
1585 xfs_fsblock_t firstfsb;
1586 struct xfs_defer_ops dfops;
1587 int error;
1588 xfs_filblks_t ilen;
1589 xfs_filblks_t rlen;
1590 int nimaps;
1591 uint64_t tip_flags2;
1592
1593 /*
1594 * If the source file has shared blocks, we must flag the donor
1595 * file as having shared blocks so that we get the shared-block
1596 * rmap functions when we go to fix up the rmaps. The flags
1597 * will be switch for reals later.
1598 */
1599 tip_flags2 = tip->i_d.di_flags2;
1600 if (ip->i_d.di_flags2 & XFS_DIFLAG2_REFLINK)
1601 tip->i_d.di_flags2 |= XFS_DIFLAG2_REFLINK;
1602
1603 offset_fsb = 0;
1604 end_fsb = XFS_B_TO_FSB(ip->i_mount, i_size_read(VFS_I(ip)));
1605 count_fsb = (xfs_filblks_t)(end_fsb - offset_fsb);
1606
1607 while (count_fsb) {
1608 /* Read extent from the donor file */
1609 nimaps = 1;
1610 error = xfs_bmapi_read(tip, offset_fsb, count_fsb, &tirec,
1611 &nimaps, 0);
1612 if (error)
1613 goto out;
1614 ASSERT(nimaps == 1);
1615 ASSERT(tirec.br_startblock != DELAYSTARTBLOCK);
1616
1617 trace_xfs_swap_extent_rmap_remap(tip, &tirec);
1618 ilen = tirec.br_blockcount;
1619
1620 /* Unmap the old blocks in the source file. */
1621 while (tirec.br_blockcount) {
1622 xfs_defer_init(&dfops, &firstfsb);
1623 trace_xfs_swap_extent_rmap_remap_piece(tip, &tirec);
1624
1625 /* Read extent from the source file */
1626 nimaps = 1;
1627 error = xfs_bmapi_read(ip, tirec.br_startoff,
1628 tirec.br_blockcount, &irec,
1629 &nimaps, 0);
1630 if (error)
1631 goto out_defer;
1632 ASSERT(nimaps == 1);
1633 ASSERT(tirec.br_startoff == irec.br_startoff);
1634 trace_xfs_swap_extent_rmap_remap_piece(ip, &irec);
1635
1636 /* Trim the extent. */
1637 uirec = tirec;
1638 uirec.br_blockcount = rlen = min_t(xfs_filblks_t,
1639 tirec.br_blockcount,
1640 irec.br_blockcount);
1641 trace_xfs_swap_extent_rmap_remap_piece(tip, &uirec);
1642
1643 /* Remove the mapping from the donor file. */
1644 error = xfs_bmap_unmap_extent((*tpp)->t_mountp, &dfops,
1645 tip, &uirec);
1646 if (error)
1647 goto out_defer;
1648
1649 /* Remove the mapping from the source file. */
1650 error = xfs_bmap_unmap_extent((*tpp)->t_mountp, &dfops,
1651 ip, &irec);
1652 if (error)
1653 goto out_defer;
1654
1655 /* Map the donor file's blocks into the source file. */
1656 error = xfs_bmap_map_extent((*tpp)->t_mountp, &dfops,
1657 ip, &uirec);
1658 if (error)
1659 goto out_defer;
1660
1661 /* Map the source file's blocks into the donor file. */
1662 error = xfs_bmap_map_extent((*tpp)->t_mountp, &dfops,
1663 tip, &irec);
1664 if (error)
1665 goto out_defer;
1666
1667 xfs_defer_ijoin(&dfops, ip);
1668 error = xfs_defer_finish(tpp, &dfops);
1669 if (error)
1670 goto out_defer;
1671
1672 tirec.br_startoff += rlen;
1673 if (tirec.br_startblock != HOLESTARTBLOCK &&
1674 tirec.br_startblock != DELAYSTARTBLOCK)
1675 tirec.br_startblock += rlen;
1676 tirec.br_blockcount -= rlen;
1677 }
1678
1679 /* Roll on... */
1680 count_fsb -= ilen;
1681 offset_fsb += ilen;
1682 }
1683
1684 tip->i_d.di_flags2 = tip_flags2;
1685 return 0;
1686
1687out_defer:
1688 xfs_defer_cancel(&dfops);
1689out:
1690 trace_xfs_swap_extent_rmap_error(ip, error, _RET_IP_);
1691 tip->i_d.di_flags2 = tip_flags2;
1692 return error;
1693}
1694
1695/* Swap the extents of two files by swapping data forks. */
1696STATIC int
1697xfs_swap_extent_forks(
1698 struct xfs_trans *tp,
1699 struct xfs_inode *ip,
1700 struct xfs_inode *tip,
1701 int *src_log_flags,
1702 int *target_log_flags)
1703{
1704 struct xfs_ifork tempifp, *ifp, *tifp;
1705 xfs_filblks_t aforkblks = 0;
1706 xfs_filblks_t taforkblks = 0;
1707 xfs_extnum_t junk;
1708 uint64_t tmp;
1709 int error;
1710
1711 /*
1712 * Count the number of extended attribute blocks
1713 */
1714 if ( ((XFS_IFORK_Q(ip) != 0) && (ip->i_d.di_anextents > 0)) &&
1715 (ip->i_d.di_aformat != XFS_DINODE_FMT_LOCAL)) {
1716 error = xfs_bmap_count_blocks(tp, ip, XFS_ATTR_FORK, &junk,
1717 &aforkblks);
1718 if (error)
1719 return error;
1720 }
1721 if ( ((XFS_IFORK_Q(tip) != 0) && (tip->i_d.di_anextents > 0)) &&
1722 (tip->i_d.di_aformat != XFS_DINODE_FMT_LOCAL)) {
1723 error = xfs_bmap_count_blocks(tp, tip, XFS_ATTR_FORK, &junk,
1724 &taforkblks);
1725 if (error)
1726 return error;
1727 }
1728
1729 /*
1730 * Btree format (v3) inodes have the inode number stamped in the bmbt
1731 * block headers. We can't start changing the bmbt blocks until the
1732 * inode owner change is logged so recovery does the right thing in the
1733 * event of a crash. Set the owner change log flags now and leave the
1734 * bmbt scan as the last step.
1735 */
1736 if (ip->i_d.di_version == 3 &&
1737 ip->i_d.di_format == XFS_DINODE_FMT_BTREE)
1738 (*target_log_flags) |= XFS_ILOG_DOWNER;
1739 if (tip->i_d.di_version == 3 &&
1740 tip->i_d.di_format == XFS_DINODE_FMT_BTREE)
1741 (*src_log_flags) |= XFS_ILOG_DOWNER;
1742
1743 /*
1744 * Swap the data forks of the inodes
1745 */
1746 ifp = &ip->i_df;
1747 tifp = &tip->i_df;
1748 tempifp = *ifp; /* struct copy */
1749 *ifp = *tifp; /* struct copy */
1750 *tifp = tempifp; /* struct copy */
1751
1752 /*
1753 * Fix the on-disk inode values
1754 */
1755 tmp = (uint64_t)ip->i_d.di_nblocks;
1756 ip->i_d.di_nblocks = tip->i_d.di_nblocks - taforkblks + aforkblks;
1757 tip->i_d.di_nblocks = tmp + taforkblks - aforkblks;
1758
1759 tmp = (uint64_t) ip->i_d.di_nextents;
1760 ip->i_d.di_nextents = tip->i_d.di_nextents;
1761 tip->i_d.di_nextents = tmp;
1762
1763 tmp = (uint64_t) ip->i_d.di_format;
1764 ip->i_d.di_format = tip->i_d.di_format;
1765 tip->i_d.di_format = tmp;
1766
1767 /*
1768 * The extents in the source inode could still contain speculative
1769 * preallocation beyond EOF (e.g. the file is open but not modified
1770 * while defrag is in progress). In that case, we need to copy over the
1771 * number of delalloc blocks the data fork in the source inode is
1772 * tracking beyond EOF so that when the fork is truncated away when the
1773 * temporary inode is unlinked we don't underrun the i_delayed_blks
1774 * counter on that inode.
1775 */
1776 ASSERT(tip->i_delayed_blks == 0);
1777 tip->i_delayed_blks = ip->i_delayed_blks;
1778 ip->i_delayed_blks = 0;
1779
1780 switch (ip->i_d.di_format) {
1781 case XFS_DINODE_FMT_EXTENTS:
1782 (*src_log_flags) |= XFS_ILOG_DEXT;
1783 break;
1784 case XFS_DINODE_FMT_BTREE:
1785 ASSERT(ip->i_d.di_version < 3 ||
1786 (*src_log_flags & XFS_ILOG_DOWNER));
1787 (*src_log_flags) |= XFS_ILOG_DBROOT;
1788 break;
1789 }
1790
1791 switch (tip->i_d.di_format) {
1792 case XFS_DINODE_FMT_EXTENTS:
1793 (*target_log_flags) |= XFS_ILOG_DEXT;
1794 break;
1795 case XFS_DINODE_FMT_BTREE:
1796 (*target_log_flags) |= XFS_ILOG_DBROOT;
1797 ASSERT(tip->i_d.di_version < 3 ||
1798 (*target_log_flags & XFS_ILOG_DOWNER));
1799 break;
1800 }
1801
1802 return 0;
1803}
1804
1805/*
1806 * Fix up the owners of the bmbt blocks to refer to the current inode. The
1807 * change owner scan attempts to order all modified buffers in the current
1808 * transaction. In the event of ordered buffer failure, the offending buffer is
1809 * physically logged as a fallback and the scan returns -EAGAIN. We must roll
1810 * the transaction in this case to replenish the fallback log reservation and
1811 * restart the scan. This process repeats until the scan completes.
1812 */
1813static int
1814xfs_swap_change_owner(
1815 struct xfs_trans **tpp,
1816 struct xfs_inode *ip,
1817 struct xfs_inode *tmpip)
1818{
1819 int error;
1820 struct xfs_trans *tp = *tpp;
1821
1822 do {
1823 error = xfs_bmbt_change_owner(tp, ip, XFS_DATA_FORK, ip->i_ino,
1824 NULL);
1825 /* success or fatal error */
1826 if (error != -EAGAIN)
1827 break;
1828
1829 error = xfs_trans_roll(tpp);
1830 if (error)
1831 break;
1832 tp = *tpp;
1833
1834 /*
1835 * Redirty both inodes so they can relog and keep the log tail
1836 * moving forward.
1837 */
1838 xfs_trans_ijoin(tp, ip, 0);
1839 xfs_trans_ijoin(tp, tmpip, 0);
1840 xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
1841 xfs_trans_log_inode(tp, tmpip, XFS_ILOG_CORE);
1842 } while (true);
1843
1844 return error;
1845}
1846
1847int
1848xfs_swap_extents(
1849 struct xfs_inode *ip, /* target inode */
1850 struct xfs_inode *tip, /* tmp inode */
1851 struct xfs_swapext *sxp)
1852{
1853 struct xfs_mount *mp = ip->i_mount;
1854 struct xfs_trans *tp;
1855 struct xfs_bstat *sbp = &sxp->sx_stat;
1856 int src_log_flags, target_log_flags;
1857 int error = 0;
1858 int lock_flags;
1859 struct xfs_ifork *cowfp;
1860 uint64_t f;
1861 int resblks = 0;
1862
1863 /*
1864 * Lock the inodes against other IO, page faults and truncate to
1865 * begin with. Then we can ensure the inodes are flushed and have no
1866 * page cache safely. Once we have done this we can take the ilocks and
1867 * do the rest of the checks.
1868 */
1869 lock_two_nondirectories(VFS_I(ip), VFS_I(tip));
1870 lock_flags = XFS_MMAPLOCK_EXCL;
1871 xfs_lock_two_inodes(ip, XFS_MMAPLOCK_EXCL, tip, XFS_MMAPLOCK_EXCL);
1872
1873 /* Verify that both files have the same format */
1874 if ((VFS_I(ip)->i_mode & S_IFMT) != (VFS_I(tip)->i_mode & S_IFMT)) {
1875 error = -EINVAL;
1876 goto out_unlock;
1877 }
1878
1879 /* Verify both files are either real-time or non-realtime */
1880 if (XFS_IS_REALTIME_INODE(ip) != XFS_IS_REALTIME_INODE(tip)) {
1881 error = -EINVAL;
1882 goto out_unlock;
1883 }
1884
1885 error = xfs_swap_extent_flush(ip);
1886 if (error)
1887 goto out_unlock;
1888 error = xfs_swap_extent_flush(tip);
1889 if (error)
1890 goto out_unlock;
1891
1892 /*
1893 * Extent "swapping" with rmap requires a permanent reservation and
1894 * a block reservation because it's really just a remap operation
1895 * performed with log redo items!
1896 */
1897 if (xfs_sb_version_hasrmapbt(&mp->m_sb)) {
1898 int w = XFS_DATA_FORK;
1899 uint32_t ipnext = XFS_IFORK_NEXTENTS(ip, w);
1900 uint32_t tipnext = XFS_IFORK_NEXTENTS(tip, w);
1901
1902 /*
1903 * Conceptually this shouldn't affect the shape of either bmbt,
1904 * but since we atomically move extents one by one, we reserve
1905 * enough space to rebuild both trees.
1906 */
1907 resblks = XFS_SWAP_RMAP_SPACE_RES(mp, ipnext, w);
1908 resblks += XFS_SWAP_RMAP_SPACE_RES(mp, tipnext, w);
1909
1910 /*
1911 * Handle the corner case where either inode might straddle the
1912 * btree format boundary. If so, the inode could bounce between
1913 * btree <-> extent format on unmap -> remap cycles, freeing and
1914 * allocating a bmapbt block each time.
1915 */
1916 if (ipnext == (XFS_IFORK_MAXEXT(ip, w) + 1))
1917 resblks += XFS_IFORK_MAXEXT(ip, w);
1918 if (tipnext == (XFS_IFORK_MAXEXT(tip, w) + 1))
1919 resblks += XFS_IFORK_MAXEXT(tip, w);
1920 }
1921 error = xfs_trans_alloc(mp, &M_RES(mp)->tr_write, resblks, 0, 0, &tp);
1922 if (error)
1923 goto out_unlock;
1924
1925 /*
1926 * Lock and join the inodes to the tansaction so that transaction commit
1927 * or cancel will unlock the inodes from this point onwards.
1928 */
1929 xfs_lock_two_inodes(ip, XFS_ILOCK_EXCL, tip, XFS_ILOCK_EXCL);
1930 lock_flags |= XFS_ILOCK_EXCL;
1931 xfs_trans_ijoin(tp, ip, 0);
1932 xfs_trans_ijoin(tp, tip, 0);
1933
1934
1935 /* Verify all data are being swapped */
1936 if (sxp->sx_offset != 0 ||
1937 sxp->sx_length != ip->i_d.di_size ||
1938 sxp->sx_length != tip->i_d.di_size) {
1939 error = -EFAULT;
1940 goto out_trans_cancel;
1941 }
1942
1943 trace_xfs_swap_extent_before(ip, 0);
1944 trace_xfs_swap_extent_before(tip, 1);
1945
1946 /* check inode formats now that data is flushed */
1947 error = xfs_swap_extents_check_format(ip, tip);
1948 if (error) {
1949 xfs_notice(mp,
1950 "%s: inode 0x%llx format is incompatible for exchanging.",
1951 __func__, ip->i_ino);
1952 goto out_trans_cancel;
1953 }
1954
1955 /*
1956 * Compare the current change & modify times with that
1957 * passed in. If they differ, we abort this swap.
1958 * This is the mechanism used to ensure the calling
1959 * process that the file was not changed out from
1960 * under it.
1961 */
1962 if ((sbp->bs_ctime.tv_sec != VFS_I(ip)->i_ctime.tv_sec) ||
1963 (sbp->bs_ctime.tv_nsec != VFS_I(ip)->i_ctime.tv_nsec) ||
1964 (sbp->bs_mtime.tv_sec != VFS_I(ip)->i_mtime.tv_sec) ||
1965 (sbp->bs_mtime.tv_nsec != VFS_I(ip)->i_mtime.tv_nsec)) {
1966 error = -EBUSY;
1967 goto out_trans_cancel;
1968 }
1969
1970 /*
1971 * Note the trickiness in setting the log flags - we set the owner log
1972 * flag on the opposite inode (i.e. the inode we are setting the new
1973 * owner to be) because once we swap the forks and log that, log
1974 * recovery is going to see the fork as owned by the swapped inode,
1975 * not the pre-swapped inodes.
1976 */
1977 src_log_flags = XFS_ILOG_CORE;
1978 target_log_flags = XFS_ILOG_CORE;
1979
1980 if (xfs_sb_version_hasrmapbt(&mp->m_sb))
1981 error = xfs_swap_extent_rmap(&tp, ip, tip);
1982 else
1983 error = xfs_swap_extent_forks(tp, ip, tip, &src_log_flags,
1984 &target_log_flags);
1985 if (error)
1986 goto out_trans_cancel;
1987
1988 /* Do we have to swap reflink flags? */
1989 if ((ip->i_d.di_flags2 & XFS_DIFLAG2_REFLINK) ^
1990 (tip->i_d.di_flags2 & XFS_DIFLAG2_REFLINK)) {
1991 f = ip->i_d.di_flags2 & XFS_DIFLAG2_REFLINK;
1992 ip->i_d.di_flags2 &= ~XFS_DIFLAG2_REFLINK;
1993 ip->i_d.di_flags2 |= tip->i_d.di_flags2 & XFS_DIFLAG2_REFLINK;
1994 tip->i_d.di_flags2 &= ~XFS_DIFLAG2_REFLINK;
1995 tip->i_d.di_flags2 |= f & XFS_DIFLAG2_REFLINK;
1996 }
1997
1998 /* Swap the cow forks. */
1999 if (xfs_sb_version_hasreflink(&mp->m_sb)) {
2000 xfs_extnum_t extnum;
2001
2002 ASSERT(ip->i_cformat == XFS_DINODE_FMT_EXTENTS);
2003 ASSERT(tip->i_cformat == XFS_DINODE_FMT_EXTENTS);
2004
2005 extnum = ip->i_cnextents;
2006 ip->i_cnextents = tip->i_cnextents;
2007 tip->i_cnextents = extnum;
2008
2009 cowfp = ip->i_cowfp;
2010 ip->i_cowfp = tip->i_cowfp;
2011 tip->i_cowfp = cowfp;
2012
2013 if (ip->i_cowfp && ip->i_cowfp->if_bytes)
2014 xfs_inode_set_cowblocks_tag(ip);
2015 else
2016 xfs_inode_clear_cowblocks_tag(ip);
2017 if (tip->i_cowfp && tip->i_cowfp->if_bytes)
2018 xfs_inode_set_cowblocks_tag(tip);
2019 else
2020 xfs_inode_clear_cowblocks_tag(tip);
2021 }
2022
2023 xfs_trans_log_inode(tp, ip, src_log_flags);
2024 xfs_trans_log_inode(tp, tip, target_log_flags);
2025
2026 /*
2027 * The extent forks have been swapped, but crc=1,rmapbt=0 filesystems
2028 * have inode number owner values in the bmbt blocks that still refer to
2029 * the old inode. Scan each bmbt to fix up the owner values with the
2030 * inode number of the current inode.
2031 */
2032 if (src_log_flags & XFS_ILOG_DOWNER) {
2033 error = xfs_swap_change_owner(&tp, ip, tip);
2034 if (error)
2035 goto out_trans_cancel;
2036 }
2037 if (target_log_flags & XFS_ILOG_DOWNER) {
2038 error = xfs_swap_change_owner(&tp, tip, ip);
2039 if (error)
2040 goto out_trans_cancel;
2041 }
2042
2043 /*
2044 * If this is a synchronous mount, make sure that the
2045 * transaction goes to disk before returning to the user.
2046 */
2047 if (mp->m_flags & XFS_MOUNT_WSYNC)
2048 xfs_trans_set_sync(tp);
2049
2050 error = xfs_trans_commit(tp);
2051
2052 trace_xfs_swap_extent_after(ip, 0);
2053 trace_xfs_swap_extent_after(tip, 1);
2054
2055out_unlock:
2056 xfs_iunlock(ip, lock_flags);
2057 xfs_iunlock(tip, lock_flags);
2058 unlock_two_nondirectories(VFS_I(ip), VFS_I(tip));
2059 return error;
2060
2061out_trans_cancel:
2062 xfs_trans_cancel(tp);
2063 goto out_unlock;
2064}
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Copyright (c) 2000-2006 Silicon Graphics, Inc.
4 * Copyright (c) 2012 Red Hat, Inc.
5 * All Rights Reserved.
6 */
7#include "xfs.h"
8#include "xfs_fs.h"
9#include "xfs_shared.h"
10#include "xfs_format.h"
11#include "xfs_log_format.h"
12#include "xfs_trans_resv.h"
13#include "xfs_bit.h"
14#include "xfs_mount.h"
15#include "xfs_defer.h"
16#include "xfs_inode.h"
17#include "xfs_btree.h"
18#include "xfs_trans.h"
19#include "xfs_alloc.h"
20#include "xfs_bmap.h"
21#include "xfs_bmap_util.h"
22#include "xfs_bmap_btree.h"
23#include "xfs_rtalloc.h"
24#include "xfs_error.h"
25#include "xfs_quota.h"
26#include "xfs_trans_space.h"
27#include "xfs_trace.h"
28#include "xfs_icache.h"
29#include "xfs_iomap.h"
30#include "xfs_reflink.h"
31#include "xfs_rtbitmap.h"
32#include "xfs_rtgroup.h"
33
34/* Kernel only BMAP related definitions and functions */
35
36/*
37 * Convert the given file system block to a disk block. We have to treat it
38 * differently based on whether the file is a real time file or not, because the
39 * bmap code does.
40 */
41xfs_daddr_t
42xfs_fsb_to_db(struct xfs_inode *ip, xfs_fsblock_t fsb)
43{
44 if (XFS_IS_REALTIME_INODE(ip))
45 return xfs_rtb_to_daddr(ip->i_mount, fsb);
46 return XFS_FSB_TO_DADDR(ip->i_mount, fsb);
47}
48
49/*
50 * Routine to zero an extent on disk allocated to the specific inode.
51 */
52int
53xfs_zero_extent(
54 struct xfs_inode *ip,
55 xfs_fsblock_t start_fsb,
56 xfs_off_t count_fsb)
57{
58 return blkdev_issue_zeroout(xfs_inode_buftarg(ip)->bt_bdev,
59 xfs_fsb_to_db(ip, start_fsb),
60 XFS_FSB_TO_BB(ip->i_mount, count_fsb),
61 GFP_KERNEL, 0);
62}
63
64/*
65 * Extent tree block counting routines.
66 */
67
68/*
69 * Count leaf blocks given a range of extent records. Delayed allocation
70 * extents are not counted towards the totals.
71 */
72xfs_extnum_t
73xfs_bmap_count_leaves(
74 struct xfs_ifork *ifp,
75 xfs_filblks_t *count)
76{
77 struct xfs_iext_cursor icur;
78 struct xfs_bmbt_irec got;
79 xfs_extnum_t numrecs = 0;
80
81 for_each_xfs_iext(ifp, &icur, &got) {
82 if (!isnullstartblock(got.br_startblock)) {
83 *count += got.br_blockcount;
84 numrecs++;
85 }
86 }
87
88 return numrecs;
89}
90
91/*
92 * Count fsblocks of the given fork. Delayed allocation extents are
93 * not counted towards the totals.
94 */
95int
96xfs_bmap_count_blocks(
97 struct xfs_trans *tp,
98 struct xfs_inode *ip,
99 int whichfork,
100 xfs_extnum_t *nextents,
101 xfs_filblks_t *count)
102{
103 struct xfs_mount *mp = ip->i_mount;
104 struct xfs_ifork *ifp = xfs_ifork_ptr(ip, whichfork);
105 struct xfs_btree_cur *cur;
106 xfs_filblks_t btblocks = 0;
107 int error;
108
109 *nextents = 0;
110 *count = 0;
111
112 if (!ifp)
113 return 0;
114
115 switch (ifp->if_format) {
116 case XFS_DINODE_FMT_BTREE:
117 error = xfs_iread_extents(tp, ip, whichfork);
118 if (error)
119 return error;
120
121 cur = xfs_bmbt_init_cursor(mp, tp, ip, whichfork);
122 error = xfs_btree_count_blocks(cur, &btblocks);
123 xfs_btree_del_cursor(cur, error);
124 if (error)
125 return error;
126
127 /*
128 * xfs_btree_count_blocks includes the root block contained in
129 * the inode fork in @btblocks, so subtract one because we're
130 * only interested in allocated disk blocks.
131 */
132 *count += btblocks - 1;
133
134 fallthrough;
135 case XFS_DINODE_FMT_EXTENTS:
136 *nextents = xfs_bmap_count_leaves(ifp, count);
137 break;
138 }
139
140 return 0;
141}
142
143static int
144xfs_getbmap_report_one(
145 struct xfs_inode *ip,
146 struct getbmapx *bmv,
147 struct kgetbmap *out,
148 int64_t bmv_end,
149 struct xfs_bmbt_irec *got)
150{
151 struct kgetbmap *p = out + bmv->bmv_entries;
152 bool shared = false;
153 int error;
154
155 error = xfs_reflink_trim_around_shared(ip, got, &shared);
156 if (error)
157 return error;
158
159 if (isnullstartblock(got->br_startblock) ||
160 got->br_startblock == DELAYSTARTBLOCK) {
161 /*
162 * Take the flush completion as being a point-in-time snapshot
163 * where there are no delalloc extents, and if any new ones
164 * have been created racily, just skip them as being 'after'
165 * the flush and so don't get reported.
166 */
167 if (!(bmv->bmv_iflags & BMV_IF_DELALLOC))
168 return 0;
169
170 p->bmv_oflags |= BMV_OF_DELALLOC;
171 p->bmv_block = -2;
172 } else {
173 p->bmv_block = xfs_fsb_to_db(ip, got->br_startblock);
174 }
175
176 if (got->br_state == XFS_EXT_UNWRITTEN &&
177 (bmv->bmv_iflags & BMV_IF_PREALLOC))
178 p->bmv_oflags |= BMV_OF_PREALLOC;
179
180 if (shared)
181 p->bmv_oflags |= BMV_OF_SHARED;
182
183 p->bmv_offset = XFS_FSB_TO_BB(ip->i_mount, got->br_startoff);
184 p->bmv_length = XFS_FSB_TO_BB(ip->i_mount, got->br_blockcount);
185
186 bmv->bmv_offset = p->bmv_offset + p->bmv_length;
187 bmv->bmv_length = max(0LL, bmv_end - bmv->bmv_offset);
188 bmv->bmv_entries++;
189 return 0;
190}
191
192static void
193xfs_getbmap_report_hole(
194 struct xfs_inode *ip,
195 struct getbmapx *bmv,
196 struct kgetbmap *out,
197 int64_t bmv_end,
198 xfs_fileoff_t bno,
199 xfs_fileoff_t end)
200{
201 struct kgetbmap *p = out + bmv->bmv_entries;
202
203 if (bmv->bmv_iflags & BMV_IF_NO_HOLES)
204 return;
205
206 p->bmv_block = -1;
207 p->bmv_offset = XFS_FSB_TO_BB(ip->i_mount, bno);
208 p->bmv_length = XFS_FSB_TO_BB(ip->i_mount, end - bno);
209
210 bmv->bmv_offset = p->bmv_offset + p->bmv_length;
211 bmv->bmv_length = max(0LL, bmv_end - bmv->bmv_offset);
212 bmv->bmv_entries++;
213}
214
215static inline bool
216xfs_getbmap_full(
217 struct getbmapx *bmv)
218{
219 return bmv->bmv_length == 0 || bmv->bmv_entries >= bmv->bmv_count - 1;
220}
221
222static bool
223xfs_getbmap_next_rec(
224 struct xfs_bmbt_irec *rec,
225 xfs_fileoff_t total_end)
226{
227 xfs_fileoff_t end = rec->br_startoff + rec->br_blockcount;
228
229 if (end == total_end)
230 return false;
231
232 rec->br_startoff += rec->br_blockcount;
233 if (!isnullstartblock(rec->br_startblock) &&
234 rec->br_startblock != DELAYSTARTBLOCK)
235 rec->br_startblock += rec->br_blockcount;
236 rec->br_blockcount = total_end - end;
237 return true;
238}
239
240/*
241 * Get inode's extents as described in bmv, and format for output.
242 * Calls formatter to fill the user's buffer until all extents
243 * are mapped, until the passed-in bmv->bmv_count slots have
244 * been filled, or until the formatter short-circuits the loop,
245 * if it is tracking filled-in extents on its own.
246 */
247int /* error code */
248xfs_getbmap(
249 struct xfs_inode *ip,
250 struct getbmapx *bmv, /* user bmap structure */
251 struct kgetbmap *out)
252{
253 struct xfs_mount *mp = ip->i_mount;
254 int iflags = bmv->bmv_iflags;
255 int whichfork, lock, error = 0;
256 int64_t bmv_end, max_len;
257 xfs_fileoff_t bno, first_bno;
258 struct xfs_ifork *ifp;
259 struct xfs_bmbt_irec got, rec;
260 xfs_filblks_t len;
261 struct xfs_iext_cursor icur;
262
263 if (bmv->bmv_iflags & ~BMV_IF_VALID)
264 return -EINVAL;
265#ifndef DEBUG
266 /* Only allow CoW fork queries if we're debugging. */
267 if (iflags & BMV_IF_COWFORK)
268 return -EINVAL;
269#endif
270 if ((iflags & BMV_IF_ATTRFORK) && (iflags & BMV_IF_COWFORK))
271 return -EINVAL;
272
273 if (bmv->bmv_length < -1)
274 return -EINVAL;
275 bmv->bmv_entries = 0;
276 if (bmv->bmv_length == 0)
277 return 0;
278
279 if (iflags & BMV_IF_ATTRFORK)
280 whichfork = XFS_ATTR_FORK;
281 else if (iflags & BMV_IF_COWFORK)
282 whichfork = XFS_COW_FORK;
283 else
284 whichfork = XFS_DATA_FORK;
285
286 xfs_ilock(ip, XFS_IOLOCK_SHARED);
287 switch (whichfork) {
288 case XFS_ATTR_FORK:
289 lock = xfs_ilock_attr_map_shared(ip);
290 if (!xfs_inode_has_attr_fork(ip))
291 goto out_unlock_ilock;
292
293 max_len = 1LL << 32;
294 break;
295 case XFS_COW_FORK:
296 lock = XFS_ILOCK_SHARED;
297 xfs_ilock(ip, lock);
298
299 /* No CoW fork? Just return */
300 if (!xfs_ifork_ptr(ip, whichfork))
301 goto out_unlock_ilock;
302
303 if (xfs_get_cowextsz_hint(ip))
304 max_len = mp->m_super->s_maxbytes;
305 else
306 max_len = XFS_ISIZE(ip);
307 break;
308 case XFS_DATA_FORK:
309 if (!(iflags & BMV_IF_DELALLOC) &&
310 (ip->i_delayed_blks || XFS_ISIZE(ip) > ip->i_disk_size)) {
311 error = filemap_write_and_wait(VFS_I(ip)->i_mapping);
312 if (error)
313 goto out_unlock_iolock;
314
315 /*
316 * Even after flushing the inode, there can still be
317 * delalloc blocks on the inode beyond EOF due to
318 * speculative preallocation. These are not removed
319 * until the release function is called or the inode
320 * is inactivated. Hence we cannot assert here that
321 * ip->i_delayed_blks == 0.
322 */
323 }
324
325 if (xfs_get_extsz_hint(ip) ||
326 (ip->i_diflags & XFS_DIFLAG_PREALLOC))
327 max_len = mp->m_super->s_maxbytes;
328 else
329 max_len = XFS_ISIZE(ip);
330
331 lock = xfs_ilock_data_map_shared(ip);
332 break;
333 }
334
335 ifp = xfs_ifork_ptr(ip, whichfork);
336
337 switch (ifp->if_format) {
338 case XFS_DINODE_FMT_EXTENTS:
339 case XFS_DINODE_FMT_BTREE:
340 break;
341 case XFS_DINODE_FMT_LOCAL:
342 /* Local format inode forks report no extents. */
343 goto out_unlock_ilock;
344 default:
345 error = -EINVAL;
346 goto out_unlock_ilock;
347 }
348
349 if (bmv->bmv_length == -1) {
350 max_len = XFS_FSB_TO_BB(mp, XFS_B_TO_FSB(mp, max_len));
351 bmv->bmv_length = max(0LL, max_len - bmv->bmv_offset);
352 }
353
354 bmv_end = bmv->bmv_offset + bmv->bmv_length;
355
356 first_bno = bno = XFS_BB_TO_FSBT(mp, bmv->bmv_offset);
357 len = XFS_BB_TO_FSB(mp, bmv->bmv_length);
358
359 error = xfs_iread_extents(NULL, ip, whichfork);
360 if (error)
361 goto out_unlock_ilock;
362
363 if (!xfs_iext_lookup_extent(ip, ifp, bno, &icur, &got)) {
364 /*
365 * Report a whole-file hole if the delalloc flag is set to
366 * stay compatible with the old implementation.
367 */
368 if (iflags & BMV_IF_DELALLOC)
369 xfs_getbmap_report_hole(ip, bmv, out, bmv_end, bno,
370 XFS_B_TO_FSB(mp, XFS_ISIZE(ip)));
371 goto out_unlock_ilock;
372 }
373
374 while (!xfs_getbmap_full(bmv)) {
375 xfs_trim_extent(&got, first_bno, len);
376
377 /*
378 * Report an entry for a hole if this extent doesn't directly
379 * follow the previous one.
380 */
381 if (got.br_startoff > bno) {
382 xfs_getbmap_report_hole(ip, bmv, out, bmv_end, bno,
383 got.br_startoff);
384 if (xfs_getbmap_full(bmv))
385 break;
386 }
387
388 /*
389 * In order to report shared extents accurately, we report each
390 * distinct shared / unshared part of a single bmbt record with
391 * an individual getbmapx record.
392 */
393 bno = got.br_startoff + got.br_blockcount;
394 rec = got;
395 do {
396 error = xfs_getbmap_report_one(ip, bmv, out, bmv_end,
397 &rec);
398 if (error || xfs_getbmap_full(bmv))
399 goto out_unlock_ilock;
400 } while (xfs_getbmap_next_rec(&rec, bno));
401
402 if (!xfs_iext_next_extent(ifp, &icur, &got)) {
403 xfs_fileoff_t end = XFS_B_TO_FSB(mp, XFS_ISIZE(ip));
404
405 if (bmv->bmv_entries > 0)
406 out[bmv->bmv_entries - 1].bmv_oflags |=
407 BMV_OF_LAST;
408
409 if (whichfork != XFS_ATTR_FORK && bno < end &&
410 !xfs_getbmap_full(bmv)) {
411 xfs_getbmap_report_hole(ip, bmv, out, bmv_end,
412 bno, end);
413 }
414 break;
415 }
416
417 if (bno >= first_bno + len)
418 break;
419 }
420
421out_unlock_ilock:
422 xfs_iunlock(ip, lock);
423out_unlock_iolock:
424 xfs_iunlock(ip, XFS_IOLOCK_SHARED);
425 return error;
426}
427
428/*
429 * Dead simple method of punching delalyed allocation blocks from a range in
430 * the inode. This will always punch out both the start and end blocks, even
431 * if the ranges only partially overlap them, so it is up to the caller to
432 * ensure that partial blocks are not passed in.
433 */
434void
435xfs_bmap_punch_delalloc_range(
436 struct xfs_inode *ip,
437 int whichfork,
438 xfs_off_t start_byte,
439 xfs_off_t end_byte)
440{
441 struct xfs_mount *mp = ip->i_mount;
442 struct xfs_ifork *ifp = xfs_ifork_ptr(ip, whichfork);
443 xfs_fileoff_t start_fsb = XFS_B_TO_FSBT(mp, start_byte);
444 xfs_fileoff_t end_fsb = XFS_B_TO_FSB(mp, end_byte);
445 struct xfs_bmbt_irec got, del;
446 struct xfs_iext_cursor icur;
447
448 ASSERT(!xfs_need_iread_extents(ifp));
449
450 xfs_ilock(ip, XFS_ILOCK_EXCL);
451 if (!xfs_iext_lookup_extent_before(ip, ifp, &end_fsb, &icur, &got))
452 goto out_unlock;
453
454 while (got.br_startoff + got.br_blockcount > start_fsb) {
455 del = got;
456 xfs_trim_extent(&del, start_fsb, end_fsb - start_fsb);
457
458 /*
459 * A delete can push the cursor forward. Step back to the
460 * previous extent on non-delalloc or extents outside the
461 * target range.
462 */
463 if (!del.br_blockcount ||
464 !isnullstartblock(del.br_startblock)) {
465 if (!xfs_iext_prev_extent(ifp, &icur, &got))
466 break;
467 continue;
468 }
469
470 xfs_bmap_del_extent_delay(ip, whichfork, &icur, &got, &del);
471 if (!xfs_iext_get_extent(ifp, &icur, &got))
472 break;
473 }
474
475 if (whichfork == XFS_COW_FORK && !ifp->if_bytes)
476 xfs_inode_clear_cowblocks_tag(ip);
477
478out_unlock:
479 xfs_iunlock(ip, XFS_ILOCK_EXCL);
480}
481
482/*
483 * Test whether it is appropriate to check an inode for and free post EOF
484 * blocks.
485 */
486bool
487xfs_can_free_eofblocks(
488 struct xfs_inode *ip)
489{
490 struct xfs_mount *mp = ip->i_mount;
491 bool found_blocks = false;
492 xfs_fileoff_t end_fsb;
493 xfs_fileoff_t last_fsb;
494 struct xfs_bmbt_irec imap;
495 struct xfs_iext_cursor icur;
496
497 /*
498 * Caller must either hold the exclusive io lock; or be inactivating
499 * the inode, which guarantees there are no other users of the inode.
500 */
501 if (!(VFS_I(ip)->i_state & I_FREEING))
502 xfs_assert_ilocked(ip, XFS_IOLOCK_EXCL);
503
504 /* prealloc/delalloc exists only on regular files */
505 if (!S_ISREG(VFS_I(ip)->i_mode))
506 return false;
507
508 /*
509 * Zero sized files with no cached pages and delalloc blocks will not
510 * have speculative prealloc/delalloc blocks to remove.
511 */
512 if (VFS_I(ip)->i_size == 0 &&
513 VFS_I(ip)->i_mapping->nrpages == 0 &&
514 ip->i_delayed_blks == 0)
515 return false;
516
517 /* If we haven't read in the extent list, then don't do it now. */
518 if (xfs_need_iread_extents(&ip->i_df))
519 return false;
520
521 /*
522 * Do not free real extents in preallocated files unless the file has
523 * delalloc blocks and we are forced to remove them.
524 */
525 if ((ip->i_diflags & XFS_DIFLAG_PREALLOC) && !ip->i_delayed_blks)
526 return false;
527
528 /*
529 * Do not try to free post-EOF blocks if EOF is beyond the end of the
530 * range supported by the page cache, because the truncation will loop
531 * forever.
532 */
533 end_fsb = XFS_B_TO_FSB(mp, (xfs_ufsize_t)XFS_ISIZE(ip));
534 if (xfs_inode_has_bigrtalloc(ip))
535 end_fsb = xfs_fileoff_roundup_rtx(mp, end_fsb);
536 last_fsb = XFS_B_TO_FSB(mp, mp->m_super->s_maxbytes);
537 if (last_fsb <= end_fsb)
538 return false;
539
540 /*
541 * Check if there is an post-EOF extent to free. If there are any
542 * delalloc blocks attached to the inode (data fork delalloc
543 * reservations or CoW extents of any kind), we need to free them so
544 * that inactivation doesn't fail to erase them.
545 */
546 xfs_ilock(ip, XFS_ILOCK_SHARED);
547 if (ip->i_delayed_blks ||
548 xfs_iext_lookup_extent(ip, &ip->i_df, end_fsb, &icur, &imap))
549 found_blocks = true;
550 xfs_iunlock(ip, XFS_ILOCK_SHARED);
551 return found_blocks;
552}
553
554/*
555 * This is called to free any blocks beyond eof. The caller must hold
556 * IOLOCK_EXCL unless we are in the inode reclaim path and have the only
557 * reference to the inode.
558 */
559int
560xfs_free_eofblocks(
561 struct xfs_inode *ip)
562{
563 struct xfs_trans *tp;
564 struct xfs_mount *mp = ip->i_mount;
565 int error;
566
567 /* Attach the dquots to the inode up front. */
568 error = xfs_qm_dqattach(ip);
569 if (error)
570 return error;
571
572 /* Wait on dio to ensure i_size has settled. */
573 inode_dio_wait(VFS_I(ip));
574
575 /*
576 * For preallocated files only free delayed allocations.
577 *
578 * Note that this means we also leave speculative preallocations in
579 * place for preallocated files.
580 */
581 if (ip->i_diflags & (XFS_DIFLAG_PREALLOC | XFS_DIFLAG_APPEND)) {
582 if (ip->i_delayed_blks) {
583 xfs_bmap_punch_delalloc_range(ip, XFS_DATA_FORK,
584 round_up(XFS_ISIZE(ip), mp->m_sb.sb_blocksize),
585 LLONG_MAX);
586 }
587 xfs_inode_clear_eofblocks_tag(ip);
588 return 0;
589 }
590
591 error = xfs_trans_alloc(mp, &M_RES(mp)->tr_itruncate, 0, 0, 0, &tp);
592 if (error) {
593 ASSERT(xfs_is_shutdown(mp));
594 return error;
595 }
596
597 xfs_ilock(ip, XFS_ILOCK_EXCL);
598 xfs_trans_ijoin(tp, ip, 0);
599
600 /*
601 * Do not update the on-disk file size. If we update the on-disk file
602 * size and then the system crashes before the contents of the file are
603 * flushed to disk then the files may be full of holes (ie NULL files
604 * bug).
605 */
606 error = xfs_itruncate_extents_flags(&tp, ip, XFS_DATA_FORK,
607 XFS_ISIZE(ip), XFS_BMAPI_NODISCARD);
608 if (error)
609 goto err_cancel;
610
611 error = xfs_trans_commit(tp);
612 if (error)
613 goto out_unlock;
614
615 xfs_inode_clear_eofblocks_tag(ip);
616 goto out_unlock;
617
618err_cancel:
619 /*
620 * If we get an error at this point we simply don't
621 * bother truncating the file.
622 */
623 xfs_trans_cancel(tp);
624out_unlock:
625 xfs_iunlock(ip, XFS_ILOCK_EXCL);
626 return error;
627}
628
629int
630xfs_alloc_file_space(
631 struct xfs_inode *ip,
632 xfs_off_t offset,
633 xfs_off_t len)
634{
635 xfs_mount_t *mp = ip->i_mount;
636 xfs_off_t count;
637 xfs_filblks_t allocatesize_fsb;
638 xfs_extlen_t extsz, temp;
639 xfs_fileoff_t startoffset_fsb;
640 xfs_fileoff_t endoffset_fsb;
641 int rt;
642 xfs_trans_t *tp;
643 xfs_bmbt_irec_t imaps[1], *imapp;
644 int error;
645
646 if (xfs_is_always_cow_inode(ip))
647 return 0;
648
649 trace_xfs_alloc_file_space(ip);
650
651 if (xfs_is_shutdown(mp))
652 return -EIO;
653
654 error = xfs_qm_dqattach(ip);
655 if (error)
656 return error;
657
658 if (len <= 0)
659 return -EINVAL;
660
661 rt = XFS_IS_REALTIME_INODE(ip);
662 extsz = xfs_get_extsz_hint(ip);
663
664 count = len;
665 imapp = &imaps[0];
666 startoffset_fsb = XFS_B_TO_FSBT(mp, offset);
667 endoffset_fsb = XFS_B_TO_FSB(mp, offset + count);
668 allocatesize_fsb = endoffset_fsb - startoffset_fsb;
669
670 /*
671 * Allocate file space until done or until there is an error
672 */
673 while (allocatesize_fsb && !error) {
674 xfs_fileoff_t s, e;
675 unsigned int dblocks, rblocks, resblks;
676 int nimaps = 1;
677
678 /*
679 * Determine space reservations for data/realtime.
680 */
681 if (unlikely(extsz)) {
682 s = startoffset_fsb;
683 do_div(s, extsz);
684 s *= extsz;
685 e = startoffset_fsb + allocatesize_fsb;
686 div_u64_rem(startoffset_fsb, extsz, &temp);
687 if (temp)
688 e += temp;
689 div_u64_rem(e, extsz, &temp);
690 if (temp)
691 e += extsz - temp;
692 } else {
693 s = 0;
694 e = allocatesize_fsb;
695 }
696
697 /*
698 * The transaction reservation is limited to a 32-bit block
699 * count, hence we need to limit the number of blocks we are
700 * trying to reserve to avoid an overflow. We can't allocate
701 * more than @nimaps extents, and an extent is limited on disk
702 * to XFS_BMBT_MAX_EXTLEN (21 bits), so use that to enforce the
703 * limit.
704 */
705 resblks = min_t(xfs_fileoff_t, (e - s),
706 (XFS_MAX_BMBT_EXTLEN * nimaps));
707 if (unlikely(rt)) {
708 dblocks = XFS_DIOSTRAT_SPACE_RES(mp, 0);
709 rblocks = resblks;
710 } else {
711 dblocks = XFS_DIOSTRAT_SPACE_RES(mp, resblks);
712 rblocks = 0;
713 }
714
715 error = xfs_trans_alloc_inode(ip, &M_RES(mp)->tr_write,
716 dblocks, rblocks, false, &tp);
717 if (error)
718 break;
719
720 error = xfs_iext_count_extend(tp, ip, XFS_DATA_FORK,
721 XFS_IEXT_ADD_NOSPLIT_CNT);
722 if (error)
723 goto error;
724
725 /*
726 * If the allocator cannot find a single free extent large
727 * enough to cover the start block of the requested range,
728 * xfs_bmapi_write will return -ENOSR.
729 *
730 * In that case we simply need to keep looping with the same
731 * startoffset_fsb so that one of the following allocations
732 * will eventually reach the requested range.
733 */
734 error = xfs_bmapi_write(tp, ip, startoffset_fsb,
735 allocatesize_fsb, XFS_BMAPI_PREALLOC, 0, imapp,
736 &nimaps);
737 if (error) {
738 if (error != -ENOSR)
739 goto error;
740 error = 0;
741 } else {
742 startoffset_fsb += imapp->br_blockcount;
743 allocatesize_fsb -= imapp->br_blockcount;
744 }
745
746 ip->i_diflags |= XFS_DIFLAG_PREALLOC;
747 xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
748
749 error = xfs_trans_commit(tp);
750 xfs_iunlock(ip, XFS_ILOCK_EXCL);
751 }
752
753 return error;
754
755error:
756 xfs_trans_cancel(tp);
757 xfs_iunlock(ip, XFS_ILOCK_EXCL);
758 return error;
759}
760
761static int
762xfs_unmap_extent(
763 struct xfs_inode *ip,
764 xfs_fileoff_t startoffset_fsb,
765 xfs_filblks_t len_fsb,
766 int *done)
767{
768 struct xfs_mount *mp = ip->i_mount;
769 struct xfs_trans *tp;
770 uint resblks = XFS_DIOSTRAT_SPACE_RES(mp, 0);
771 int error;
772
773 error = xfs_trans_alloc_inode(ip, &M_RES(mp)->tr_write, resblks, 0,
774 false, &tp);
775 if (error)
776 return error;
777
778 error = xfs_iext_count_extend(tp, ip, XFS_DATA_FORK,
779 XFS_IEXT_PUNCH_HOLE_CNT);
780 if (error)
781 goto out_trans_cancel;
782
783 error = xfs_bunmapi(tp, ip, startoffset_fsb, len_fsb, 0, 2, done);
784 if (error)
785 goto out_trans_cancel;
786
787 error = xfs_trans_commit(tp);
788out_unlock:
789 xfs_iunlock(ip, XFS_ILOCK_EXCL);
790 return error;
791
792out_trans_cancel:
793 xfs_trans_cancel(tp);
794 goto out_unlock;
795}
796
797/* Caller must first wait for the completion of any pending DIOs if required. */
798int
799xfs_flush_unmap_range(
800 struct xfs_inode *ip,
801 xfs_off_t offset,
802 xfs_off_t len)
803{
804 struct inode *inode = VFS_I(ip);
805 xfs_off_t rounding, start, end;
806 int error;
807
808 /*
809 * Make sure we extend the flush out to extent alignment
810 * boundaries so any extent range overlapping the start/end
811 * of the modification we are about to do is clean and idle.
812 */
813 rounding = max_t(xfs_off_t, xfs_inode_alloc_unitsize(ip), PAGE_SIZE);
814 start = rounddown_64(offset, rounding);
815 end = roundup_64(offset + len, rounding) - 1;
816
817 error = filemap_write_and_wait_range(inode->i_mapping, start, end);
818 if (error)
819 return error;
820 truncate_pagecache_range(inode, start, end);
821 return 0;
822}
823
824int
825xfs_free_file_space(
826 struct xfs_inode *ip,
827 xfs_off_t offset,
828 xfs_off_t len)
829{
830 struct xfs_mount *mp = ip->i_mount;
831 xfs_fileoff_t startoffset_fsb;
832 xfs_fileoff_t endoffset_fsb;
833 int done = 0, error;
834
835 trace_xfs_free_file_space(ip);
836
837 error = xfs_qm_dqattach(ip);
838 if (error)
839 return error;
840
841 if (len <= 0) /* if nothing being freed */
842 return 0;
843
844 /*
845 * Now AIO and DIO has drained we flush and (if necessary) invalidate
846 * the cached range over the first operation we are about to run.
847 */
848 error = xfs_flush_unmap_range(ip, offset, len);
849 if (error)
850 return error;
851
852 startoffset_fsb = XFS_B_TO_FSB(mp, offset);
853 endoffset_fsb = XFS_B_TO_FSBT(mp, offset + len);
854
855 /* We can only free complete realtime extents. */
856 if (xfs_inode_has_bigrtalloc(ip)) {
857 startoffset_fsb = xfs_fileoff_roundup_rtx(mp, startoffset_fsb);
858 endoffset_fsb = xfs_fileoff_rounddown_rtx(mp, endoffset_fsb);
859 }
860
861 /*
862 * Need to zero the stuff we're not freeing, on disk.
863 */
864 if (endoffset_fsb > startoffset_fsb) {
865 while (!done) {
866 error = xfs_unmap_extent(ip, startoffset_fsb,
867 endoffset_fsb - startoffset_fsb, &done);
868 if (error)
869 return error;
870 }
871 }
872
873 /*
874 * Now that we've unmap all full blocks we'll have to zero out any
875 * partial block at the beginning and/or end. xfs_zero_range is smart
876 * enough to skip any holes, including those we just created, but we
877 * must take care not to zero beyond EOF and enlarge i_size.
878 */
879 if (offset >= XFS_ISIZE(ip))
880 return 0;
881 if (offset + len > XFS_ISIZE(ip))
882 len = XFS_ISIZE(ip) - offset;
883 error = xfs_zero_range(ip, offset, len, NULL);
884 if (error)
885 return error;
886
887 /*
888 * If we zeroed right up to EOF and EOF straddles a page boundary we
889 * must make sure that the post-EOF area is also zeroed because the
890 * page could be mmap'd and xfs_zero_range doesn't do that for us.
891 * Writeback of the eof page will do this, albeit clumsily.
892 */
893 if (offset + len >= XFS_ISIZE(ip) && offset_in_page(offset + len) > 0) {
894 error = filemap_write_and_wait_range(VFS_I(ip)->i_mapping,
895 round_down(offset + len, PAGE_SIZE), LLONG_MAX);
896 }
897
898 return error;
899}
900
901static int
902xfs_prepare_shift(
903 struct xfs_inode *ip,
904 loff_t offset)
905{
906 unsigned int rounding;
907 int error;
908
909 /*
910 * Trim eofblocks to avoid shifting uninitialized post-eof preallocation
911 * into the accessible region of the file.
912 */
913 if (xfs_can_free_eofblocks(ip)) {
914 error = xfs_free_eofblocks(ip);
915 if (error)
916 return error;
917 }
918
919 /*
920 * Shift operations must stabilize the start block offset boundary along
921 * with the full range of the operation. If we don't, a COW writeback
922 * completion could race with an insert, front merge with the start
923 * extent (after split) during the shift and corrupt the file. Start
924 * with the allocation unit just prior to the start to stabilize the
925 * boundary.
926 */
927 rounding = xfs_inode_alloc_unitsize(ip);
928 offset = rounddown_64(offset, rounding);
929 if (offset)
930 offset -= rounding;
931
932 /*
933 * Writeback and invalidate cache for the remainder of the file as we're
934 * about to shift down every extent from offset to EOF.
935 */
936 error = xfs_flush_unmap_range(ip, offset, XFS_ISIZE(ip));
937 if (error)
938 return error;
939
940 /*
941 * Clean out anything hanging around in the cow fork now that
942 * we've flushed all the dirty data out to disk to avoid having
943 * CoW extents at the wrong offsets.
944 */
945 if (xfs_inode_has_cow_data(ip)) {
946 error = xfs_reflink_cancel_cow_range(ip, offset, NULLFILEOFF,
947 true);
948 if (error)
949 return error;
950 }
951
952 return 0;
953}
954
955/*
956 * xfs_collapse_file_space()
957 * This routine frees disk space and shift extent for the given file.
958 * The first thing we do is to free data blocks in the specified range
959 * by calling xfs_free_file_space(). It would also sync dirty data
960 * and invalidate page cache over the region on which collapse range
961 * is working. And Shift extent records to the left to cover a hole.
962 * RETURNS:
963 * 0 on success
964 * errno on error
965 *
966 */
967int
968xfs_collapse_file_space(
969 struct xfs_inode *ip,
970 xfs_off_t offset,
971 xfs_off_t len)
972{
973 struct xfs_mount *mp = ip->i_mount;
974 struct xfs_trans *tp;
975 int error;
976 xfs_fileoff_t next_fsb = XFS_B_TO_FSB(mp, offset + len);
977 xfs_fileoff_t shift_fsb = XFS_B_TO_FSB(mp, len);
978 bool done = false;
979
980 xfs_assert_ilocked(ip, XFS_IOLOCK_EXCL | XFS_MMAPLOCK_EXCL);
981
982 trace_xfs_collapse_file_space(ip);
983
984 error = xfs_free_file_space(ip, offset, len);
985 if (error)
986 return error;
987
988 error = xfs_prepare_shift(ip, offset);
989 if (error)
990 return error;
991
992 error = xfs_trans_alloc(mp, &M_RES(mp)->tr_write, 0, 0, 0, &tp);
993 if (error)
994 return error;
995
996 xfs_ilock(ip, XFS_ILOCK_EXCL);
997 xfs_trans_ijoin(tp, ip, 0);
998
999 while (!done) {
1000 error = xfs_bmap_collapse_extents(tp, ip, &next_fsb, shift_fsb,
1001 &done);
1002 if (error)
1003 goto out_trans_cancel;
1004 if (done)
1005 break;
1006
1007 /* finish any deferred frees and roll the transaction */
1008 error = xfs_defer_finish(&tp);
1009 if (error)
1010 goto out_trans_cancel;
1011 }
1012
1013 error = xfs_trans_commit(tp);
1014 xfs_iunlock(ip, XFS_ILOCK_EXCL);
1015 return error;
1016
1017out_trans_cancel:
1018 xfs_trans_cancel(tp);
1019 xfs_iunlock(ip, XFS_ILOCK_EXCL);
1020 return error;
1021}
1022
1023/*
1024 * xfs_insert_file_space()
1025 * This routine create hole space by shifting extents for the given file.
1026 * The first thing we do is to sync dirty data and invalidate page cache
1027 * over the region on which insert range is working. And split an extent
1028 * to two extents at given offset by calling xfs_bmap_split_extent.
1029 * And shift all extent records which are laying between [offset,
1030 * last allocated extent] to the right to reserve hole range.
1031 * RETURNS:
1032 * 0 on success
1033 * errno on error
1034 */
1035int
1036xfs_insert_file_space(
1037 struct xfs_inode *ip,
1038 loff_t offset,
1039 loff_t len)
1040{
1041 struct xfs_mount *mp = ip->i_mount;
1042 struct xfs_trans *tp;
1043 int error;
1044 xfs_fileoff_t stop_fsb = XFS_B_TO_FSB(mp, offset);
1045 xfs_fileoff_t next_fsb = NULLFSBLOCK;
1046 xfs_fileoff_t shift_fsb = XFS_B_TO_FSB(mp, len);
1047 bool done = false;
1048
1049 xfs_assert_ilocked(ip, XFS_IOLOCK_EXCL | XFS_MMAPLOCK_EXCL);
1050
1051 trace_xfs_insert_file_space(ip);
1052
1053 error = xfs_bmap_can_insert_extents(ip, stop_fsb, shift_fsb);
1054 if (error)
1055 return error;
1056
1057 error = xfs_prepare_shift(ip, offset);
1058 if (error)
1059 return error;
1060
1061 error = xfs_trans_alloc(mp, &M_RES(mp)->tr_write,
1062 XFS_DIOSTRAT_SPACE_RES(mp, 0), 0, 0, &tp);
1063 if (error)
1064 return error;
1065
1066 xfs_ilock(ip, XFS_ILOCK_EXCL);
1067 xfs_trans_ijoin(tp, ip, 0);
1068
1069 error = xfs_iext_count_extend(tp, ip, XFS_DATA_FORK,
1070 XFS_IEXT_PUNCH_HOLE_CNT);
1071 if (error)
1072 goto out_trans_cancel;
1073
1074 /*
1075 * The extent shifting code works on extent granularity. So, if stop_fsb
1076 * is not the starting block of extent, we need to split the extent at
1077 * stop_fsb.
1078 */
1079 error = xfs_bmap_split_extent(tp, ip, stop_fsb);
1080 if (error)
1081 goto out_trans_cancel;
1082
1083 do {
1084 error = xfs_defer_finish(&tp);
1085 if (error)
1086 goto out_trans_cancel;
1087
1088 error = xfs_bmap_insert_extents(tp, ip, &next_fsb, shift_fsb,
1089 &done, stop_fsb);
1090 if (error)
1091 goto out_trans_cancel;
1092 } while (!done);
1093
1094 error = xfs_trans_commit(tp);
1095 xfs_iunlock(ip, XFS_ILOCK_EXCL);
1096 return error;
1097
1098out_trans_cancel:
1099 xfs_trans_cancel(tp);
1100 xfs_iunlock(ip, XFS_ILOCK_EXCL);
1101 return error;
1102}
1103
1104/*
1105 * We need to check that the format of the data fork in the temporary inode is
1106 * valid for the target inode before doing the swap. This is not a problem with
1107 * attr1 because of the fixed fork offset, but attr2 has a dynamically sized
1108 * data fork depending on the space the attribute fork is taking so we can get
1109 * invalid formats on the target inode.
1110 *
1111 * E.g. target has space for 7 extents in extent format, temp inode only has
1112 * space for 6. If we defragment down to 7 extents, then the tmp format is a
1113 * btree, but when swapped it needs to be in extent format. Hence we can't just
1114 * blindly swap data forks on attr2 filesystems.
1115 *
1116 * Note that we check the swap in both directions so that we don't end up with
1117 * a corrupt temporary inode, either.
1118 *
1119 * Note that fixing the way xfs_fsr sets up the attribute fork in the source
1120 * inode will prevent this situation from occurring, so all we do here is
1121 * reject and log the attempt. basically we are putting the responsibility on
1122 * userspace to get this right.
1123 */
1124static int
1125xfs_swap_extents_check_format(
1126 struct xfs_inode *ip, /* target inode */
1127 struct xfs_inode *tip) /* tmp inode */
1128{
1129 struct xfs_ifork *ifp = &ip->i_df;
1130 struct xfs_ifork *tifp = &tip->i_df;
1131
1132 /* User/group/project quota ids must match if quotas are enforced. */
1133 if (XFS_IS_QUOTA_ON(ip->i_mount) &&
1134 (!uid_eq(VFS_I(ip)->i_uid, VFS_I(tip)->i_uid) ||
1135 !gid_eq(VFS_I(ip)->i_gid, VFS_I(tip)->i_gid) ||
1136 ip->i_projid != tip->i_projid))
1137 return -EINVAL;
1138
1139 /* Should never get a local format */
1140 if (ifp->if_format == XFS_DINODE_FMT_LOCAL ||
1141 tifp->if_format == XFS_DINODE_FMT_LOCAL)
1142 return -EINVAL;
1143
1144 /*
1145 * if the target inode has less extents that then temporary inode then
1146 * why did userspace call us?
1147 */
1148 if (ifp->if_nextents < tifp->if_nextents)
1149 return -EINVAL;
1150
1151 /*
1152 * If we have to use the (expensive) rmap swap method, we can
1153 * handle any number of extents and any format.
1154 */
1155 if (xfs_has_rmapbt(ip->i_mount))
1156 return 0;
1157
1158 /*
1159 * if the target inode is in extent form and the temp inode is in btree
1160 * form then we will end up with the target inode in the wrong format
1161 * as we already know there are less extents in the temp inode.
1162 */
1163 if (ifp->if_format == XFS_DINODE_FMT_EXTENTS &&
1164 tifp->if_format == XFS_DINODE_FMT_BTREE)
1165 return -EINVAL;
1166
1167 /* Check temp in extent form to max in target */
1168 if (tifp->if_format == XFS_DINODE_FMT_EXTENTS &&
1169 tifp->if_nextents > XFS_IFORK_MAXEXT(ip, XFS_DATA_FORK))
1170 return -EINVAL;
1171
1172 /* Check target in extent form to max in temp */
1173 if (ifp->if_format == XFS_DINODE_FMT_EXTENTS &&
1174 ifp->if_nextents > XFS_IFORK_MAXEXT(tip, XFS_DATA_FORK))
1175 return -EINVAL;
1176
1177 /*
1178 * If we are in a btree format, check that the temp root block will fit
1179 * in the target and that it has enough extents to be in btree format
1180 * in the target.
1181 *
1182 * Note that we have to be careful to allow btree->extent conversions
1183 * (a common defrag case) which will occur when the temp inode is in
1184 * extent format...
1185 */
1186 if (tifp->if_format == XFS_DINODE_FMT_BTREE) {
1187 if (xfs_inode_has_attr_fork(ip) &&
1188 xfs_bmap_bmdr_space(tifp->if_broot) > xfs_inode_fork_boff(ip))
1189 return -EINVAL;
1190 if (tifp->if_nextents <= XFS_IFORK_MAXEXT(ip, XFS_DATA_FORK))
1191 return -EINVAL;
1192 }
1193
1194 /* Reciprocal target->temp btree format checks */
1195 if (ifp->if_format == XFS_DINODE_FMT_BTREE) {
1196 if (xfs_inode_has_attr_fork(tip) &&
1197 xfs_bmap_bmdr_space(ip->i_df.if_broot) > xfs_inode_fork_boff(tip))
1198 return -EINVAL;
1199 if (ifp->if_nextents <= XFS_IFORK_MAXEXT(tip, XFS_DATA_FORK))
1200 return -EINVAL;
1201 }
1202
1203 return 0;
1204}
1205
1206static int
1207xfs_swap_extent_flush(
1208 struct xfs_inode *ip)
1209{
1210 int error;
1211
1212 error = filemap_write_and_wait(VFS_I(ip)->i_mapping);
1213 if (error)
1214 return error;
1215 truncate_pagecache_range(VFS_I(ip), 0, -1);
1216
1217 /* Verify O_DIRECT for ftmp */
1218 if (VFS_I(ip)->i_mapping->nrpages)
1219 return -EINVAL;
1220 return 0;
1221}
1222
1223/*
1224 * Move extents from one file to another, when rmap is enabled.
1225 */
1226STATIC int
1227xfs_swap_extent_rmap(
1228 struct xfs_trans **tpp,
1229 struct xfs_inode *ip,
1230 struct xfs_inode *tip)
1231{
1232 struct xfs_trans *tp = *tpp;
1233 struct xfs_bmbt_irec irec;
1234 struct xfs_bmbt_irec uirec;
1235 struct xfs_bmbt_irec tirec;
1236 xfs_fileoff_t offset_fsb;
1237 xfs_fileoff_t end_fsb;
1238 xfs_filblks_t count_fsb;
1239 int error;
1240 xfs_filblks_t ilen;
1241 xfs_filblks_t rlen;
1242 int nimaps;
1243 uint64_t tip_flags2;
1244
1245 /*
1246 * If the source file has shared blocks, we must flag the donor
1247 * file as having shared blocks so that we get the shared-block
1248 * rmap functions when we go to fix up the rmaps. The flags
1249 * will be switch for reals later.
1250 */
1251 tip_flags2 = tip->i_diflags2;
1252 if (ip->i_diflags2 & XFS_DIFLAG2_REFLINK)
1253 tip->i_diflags2 |= XFS_DIFLAG2_REFLINK;
1254
1255 offset_fsb = 0;
1256 end_fsb = XFS_B_TO_FSB(ip->i_mount, i_size_read(VFS_I(ip)));
1257 count_fsb = (xfs_filblks_t)(end_fsb - offset_fsb);
1258
1259 while (count_fsb) {
1260 /* Read extent from the donor file */
1261 nimaps = 1;
1262 error = xfs_bmapi_read(tip, offset_fsb, count_fsb, &tirec,
1263 &nimaps, 0);
1264 if (error)
1265 goto out;
1266 ASSERT(nimaps == 1);
1267 ASSERT(tirec.br_startblock != DELAYSTARTBLOCK);
1268
1269 trace_xfs_swap_extent_rmap_remap(tip, &tirec);
1270 ilen = tirec.br_blockcount;
1271
1272 /* Unmap the old blocks in the source file. */
1273 while (tirec.br_blockcount) {
1274 ASSERT(tp->t_highest_agno == NULLAGNUMBER);
1275 trace_xfs_swap_extent_rmap_remap_piece(tip, &tirec);
1276
1277 /* Read extent from the source file */
1278 nimaps = 1;
1279 error = xfs_bmapi_read(ip, tirec.br_startoff,
1280 tirec.br_blockcount, &irec,
1281 &nimaps, 0);
1282 if (error)
1283 goto out;
1284 ASSERT(nimaps == 1);
1285 ASSERT(tirec.br_startoff == irec.br_startoff);
1286 trace_xfs_swap_extent_rmap_remap_piece(ip, &irec);
1287
1288 /* Trim the extent. */
1289 uirec = tirec;
1290 uirec.br_blockcount = rlen = min_t(xfs_filblks_t,
1291 tirec.br_blockcount,
1292 irec.br_blockcount);
1293 trace_xfs_swap_extent_rmap_remap_piece(tip, &uirec);
1294
1295 if (xfs_bmap_is_real_extent(&uirec)) {
1296 error = xfs_iext_count_extend(tp, ip,
1297 XFS_DATA_FORK,
1298 XFS_IEXT_SWAP_RMAP_CNT);
1299 if (error)
1300 goto out;
1301 }
1302
1303 if (xfs_bmap_is_real_extent(&irec)) {
1304 error = xfs_iext_count_extend(tp, tip,
1305 XFS_DATA_FORK,
1306 XFS_IEXT_SWAP_RMAP_CNT);
1307 if (error)
1308 goto out;
1309 }
1310
1311 /* Remove the mapping from the donor file. */
1312 xfs_bmap_unmap_extent(tp, tip, XFS_DATA_FORK, &uirec);
1313
1314 /* Remove the mapping from the source file. */
1315 xfs_bmap_unmap_extent(tp, ip, XFS_DATA_FORK, &irec);
1316
1317 /* Map the donor file's blocks into the source file. */
1318 xfs_bmap_map_extent(tp, ip, XFS_DATA_FORK, &uirec);
1319
1320 /* Map the source file's blocks into the donor file. */
1321 xfs_bmap_map_extent(tp, tip, XFS_DATA_FORK, &irec);
1322
1323 error = xfs_defer_finish(tpp);
1324 tp = *tpp;
1325 if (error)
1326 goto out;
1327
1328 tirec.br_startoff += rlen;
1329 if (tirec.br_startblock != HOLESTARTBLOCK &&
1330 tirec.br_startblock != DELAYSTARTBLOCK)
1331 tirec.br_startblock += rlen;
1332 tirec.br_blockcount -= rlen;
1333 }
1334
1335 /* Roll on... */
1336 count_fsb -= ilen;
1337 offset_fsb += ilen;
1338 }
1339
1340 tip->i_diflags2 = tip_flags2;
1341 return 0;
1342
1343out:
1344 trace_xfs_swap_extent_rmap_error(ip, error, _RET_IP_);
1345 tip->i_diflags2 = tip_flags2;
1346 return error;
1347}
1348
1349/* Swap the extents of two files by swapping data forks. */
1350STATIC int
1351xfs_swap_extent_forks(
1352 struct xfs_trans *tp,
1353 struct xfs_inode *ip,
1354 struct xfs_inode *tip,
1355 int *src_log_flags,
1356 int *target_log_flags)
1357{
1358 xfs_filblks_t aforkblks = 0;
1359 xfs_filblks_t taforkblks = 0;
1360 xfs_extnum_t junk;
1361 uint64_t tmp;
1362 int error;
1363
1364 /*
1365 * Count the number of extended attribute blocks
1366 */
1367 if (xfs_inode_has_attr_fork(ip) && ip->i_af.if_nextents > 0 &&
1368 ip->i_af.if_format != XFS_DINODE_FMT_LOCAL) {
1369 error = xfs_bmap_count_blocks(tp, ip, XFS_ATTR_FORK, &junk,
1370 &aforkblks);
1371 if (error)
1372 return error;
1373 }
1374 if (xfs_inode_has_attr_fork(tip) && tip->i_af.if_nextents > 0 &&
1375 tip->i_af.if_format != XFS_DINODE_FMT_LOCAL) {
1376 error = xfs_bmap_count_blocks(tp, tip, XFS_ATTR_FORK, &junk,
1377 &taforkblks);
1378 if (error)
1379 return error;
1380 }
1381
1382 /*
1383 * Btree format (v3) inodes have the inode number stamped in the bmbt
1384 * block headers. We can't start changing the bmbt blocks until the
1385 * inode owner change is logged so recovery does the right thing in the
1386 * event of a crash. Set the owner change log flags now and leave the
1387 * bmbt scan as the last step.
1388 */
1389 if (xfs_has_v3inodes(ip->i_mount)) {
1390 if (ip->i_df.if_format == XFS_DINODE_FMT_BTREE)
1391 (*target_log_flags) |= XFS_ILOG_DOWNER;
1392 if (tip->i_df.if_format == XFS_DINODE_FMT_BTREE)
1393 (*src_log_flags) |= XFS_ILOG_DOWNER;
1394 }
1395
1396 /*
1397 * Swap the data forks of the inodes
1398 */
1399 swap(ip->i_df, tip->i_df);
1400
1401 /*
1402 * Fix the on-disk inode values
1403 */
1404 tmp = (uint64_t)ip->i_nblocks;
1405 ip->i_nblocks = tip->i_nblocks - taforkblks + aforkblks;
1406 tip->i_nblocks = tmp + taforkblks - aforkblks;
1407
1408 /*
1409 * The extents in the source inode could still contain speculative
1410 * preallocation beyond EOF (e.g. the file is open but not modified
1411 * while defrag is in progress). In that case, we need to copy over the
1412 * number of delalloc blocks the data fork in the source inode is
1413 * tracking beyond EOF so that when the fork is truncated away when the
1414 * temporary inode is unlinked we don't underrun the i_delayed_blks
1415 * counter on that inode.
1416 */
1417 ASSERT(tip->i_delayed_blks == 0);
1418 tip->i_delayed_blks = ip->i_delayed_blks;
1419 ip->i_delayed_blks = 0;
1420
1421 switch (ip->i_df.if_format) {
1422 case XFS_DINODE_FMT_EXTENTS:
1423 (*src_log_flags) |= XFS_ILOG_DEXT;
1424 break;
1425 case XFS_DINODE_FMT_BTREE:
1426 ASSERT(!xfs_has_v3inodes(ip->i_mount) ||
1427 (*src_log_flags & XFS_ILOG_DOWNER));
1428 (*src_log_flags) |= XFS_ILOG_DBROOT;
1429 break;
1430 }
1431
1432 switch (tip->i_df.if_format) {
1433 case XFS_DINODE_FMT_EXTENTS:
1434 (*target_log_flags) |= XFS_ILOG_DEXT;
1435 break;
1436 case XFS_DINODE_FMT_BTREE:
1437 (*target_log_flags) |= XFS_ILOG_DBROOT;
1438 ASSERT(!xfs_has_v3inodes(ip->i_mount) ||
1439 (*target_log_flags & XFS_ILOG_DOWNER));
1440 break;
1441 }
1442
1443 return 0;
1444}
1445
1446/*
1447 * Fix up the owners of the bmbt blocks to refer to the current inode. The
1448 * change owner scan attempts to order all modified buffers in the current
1449 * transaction. In the event of ordered buffer failure, the offending buffer is
1450 * physically logged as a fallback and the scan returns -EAGAIN. We must roll
1451 * the transaction in this case to replenish the fallback log reservation and
1452 * restart the scan. This process repeats until the scan completes.
1453 */
1454static int
1455xfs_swap_change_owner(
1456 struct xfs_trans **tpp,
1457 struct xfs_inode *ip,
1458 struct xfs_inode *tmpip)
1459{
1460 int error;
1461 struct xfs_trans *tp = *tpp;
1462
1463 do {
1464 error = xfs_bmbt_change_owner(tp, ip, XFS_DATA_FORK, ip->i_ino,
1465 NULL);
1466 /* success or fatal error */
1467 if (error != -EAGAIN)
1468 break;
1469
1470 error = xfs_trans_roll(tpp);
1471 if (error)
1472 break;
1473 tp = *tpp;
1474
1475 /*
1476 * Redirty both inodes so they can relog and keep the log tail
1477 * moving forward.
1478 */
1479 xfs_trans_ijoin(tp, ip, 0);
1480 xfs_trans_ijoin(tp, tmpip, 0);
1481 xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
1482 xfs_trans_log_inode(tp, tmpip, XFS_ILOG_CORE);
1483 } while (true);
1484
1485 return error;
1486}
1487
1488int
1489xfs_swap_extents(
1490 struct xfs_inode *ip, /* target inode */
1491 struct xfs_inode *tip, /* tmp inode */
1492 struct xfs_swapext *sxp)
1493{
1494 struct xfs_mount *mp = ip->i_mount;
1495 struct xfs_trans *tp;
1496 struct xfs_bstat *sbp = &sxp->sx_stat;
1497 int src_log_flags, target_log_flags;
1498 int error = 0;
1499 uint64_t f;
1500 int resblks = 0;
1501 unsigned int flags = 0;
1502 struct timespec64 ctime, mtime;
1503
1504 /*
1505 * Lock the inodes against other IO, page faults and truncate to
1506 * begin with. Then we can ensure the inodes are flushed and have no
1507 * page cache safely. Once we have done this we can take the ilocks and
1508 * do the rest of the checks.
1509 */
1510 lock_two_nondirectories(VFS_I(ip), VFS_I(tip));
1511 filemap_invalidate_lock_two(VFS_I(ip)->i_mapping,
1512 VFS_I(tip)->i_mapping);
1513
1514 /* Verify that both files have the same format */
1515 if ((VFS_I(ip)->i_mode & S_IFMT) != (VFS_I(tip)->i_mode & S_IFMT)) {
1516 error = -EINVAL;
1517 goto out_unlock;
1518 }
1519
1520 /* Verify both files are either real-time or non-realtime */
1521 if (XFS_IS_REALTIME_INODE(ip) != XFS_IS_REALTIME_INODE(tip)) {
1522 error = -EINVAL;
1523 goto out_unlock;
1524 }
1525
1526 /*
1527 * The rmapbt implementation is unable to resume a swapext operation
1528 * after a crash if the allocation unit size is larger than a block.
1529 * This (deprecated) interface will not be upgraded to handle this
1530 * situation. Defragmentation must be performed with the commit range
1531 * ioctl.
1532 */
1533 if (XFS_IS_REALTIME_INODE(ip) && xfs_has_rtgroups(ip->i_mount)) {
1534 error = -EOPNOTSUPP;
1535 goto out_unlock;
1536 }
1537
1538 error = xfs_qm_dqattach(ip);
1539 if (error)
1540 goto out_unlock;
1541
1542 error = xfs_qm_dqattach(tip);
1543 if (error)
1544 goto out_unlock;
1545
1546 error = xfs_swap_extent_flush(ip);
1547 if (error)
1548 goto out_unlock;
1549 error = xfs_swap_extent_flush(tip);
1550 if (error)
1551 goto out_unlock;
1552
1553 if (xfs_inode_has_cow_data(tip)) {
1554 error = xfs_reflink_cancel_cow_range(tip, 0, NULLFILEOFF, true);
1555 if (error)
1556 goto out_unlock;
1557 }
1558
1559 /*
1560 * Extent "swapping" with rmap requires a permanent reservation and
1561 * a block reservation because it's really just a remap operation
1562 * performed with log redo items!
1563 */
1564 if (xfs_has_rmapbt(mp)) {
1565 int w = XFS_DATA_FORK;
1566 uint32_t ipnext = ip->i_df.if_nextents;
1567 uint32_t tipnext = tip->i_df.if_nextents;
1568
1569 /*
1570 * Conceptually this shouldn't affect the shape of either bmbt,
1571 * but since we atomically move extents one by one, we reserve
1572 * enough space to rebuild both trees.
1573 */
1574 resblks = XFS_SWAP_RMAP_SPACE_RES(mp, ipnext, w);
1575 resblks += XFS_SWAP_RMAP_SPACE_RES(mp, tipnext, w);
1576
1577 /*
1578 * If either inode straddles a bmapbt block allocation boundary,
1579 * the rmapbt algorithm triggers repeated allocs and frees as
1580 * extents are remapped. This can exhaust the block reservation
1581 * prematurely and cause shutdown. Return freed blocks to the
1582 * transaction reservation to counter this behavior.
1583 */
1584 flags |= XFS_TRANS_RES_FDBLKS;
1585 }
1586 error = xfs_trans_alloc(mp, &M_RES(mp)->tr_write, resblks, 0, flags,
1587 &tp);
1588 if (error)
1589 goto out_unlock;
1590
1591 /*
1592 * Lock and join the inodes to the tansaction so that transaction commit
1593 * or cancel will unlock the inodes from this point onwards.
1594 */
1595 xfs_lock_two_inodes(ip, XFS_ILOCK_EXCL, tip, XFS_ILOCK_EXCL);
1596 xfs_trans_ijoin(tp, ip, 0);
1597 xfs_trans_ijoin(tp, tip, 0);
1598
1599
1600 /* Verify all data are being swapped */
1601 if (sxp->sx_offset != 0 ||
1602 sxp->sx_length != ip->i_disk_size ||
1603 sxp->sx_length != tip->i_disk_size) {
1604 error = -EFAULT;
1605 goto out_trans_cancel;
1606 }
1607
1608 trace_xfs_swap_extent_before(ip, 0);
1609 trace_xfs_swap_extent_before(tip, 1);
1610
1611 /* check inode formats now that data is flushed */
1612 error = xfs_swap_extents_check_format(ip, tip);
1613 if (error) {
1614 xfs_notice(mp,
1615 "%s: inode 0x%llx format is incompatible for exchanging.",
1616 __func__, ip->i_ino);
1617 goto out_trans_cancel;
1618 }
1619
1620 /*
1621 * Compare the current change & modify times with that
1622 * passed in. If they differ, we abort this swap.
1623 * This is the mechanism used to ensure the calling
1624 * process that the file was not changed out from
1625 * under it.
1626 */
1627 ctime = inode_get_ctime(VFS_I(ip));
1628 mtime = inode_get_mtime(VFS_I(ip));
1629 if ((sbp->bs_ctime.tv_sec != ctime.tv_sec) ||
1630 (sbp->bs_ctime.tv_nsec != ctime.tv_nsec) ||
1631 (sbp->bs_mtime.tv_sec != mtime.tv_sec) ||
1632 (sbp->bs_mtime.tv_nsec != mtime.tv_nsec)) {
1633 error = -EBUSY;
1634 goto out_trans_cancel;
1635 }
1636
1637 /*
1638 * Note the trickiness in setting the log flags - we set the owner log
1639 * flag on the opposite inode (i.e. the inode we are setting the new
1640 * owner to be) because once we swap the forks and log that, log
1641 * recovery is going to see the fork as owned by the swapped inode,
1642 * not the pre-swapped inodes.
1643 */
1644 src_log_flags = XFS_ILOG_CORE;
1645 target_log_flags = XFS_ILOG_CORE;
1646
1647 if (xfs_has_rmapbt(mp))
1648 error = xfs_swap_extent_rmap(&tp, ip, tip);
1649 else
1650 error = xfs_swap_extent_forks(tp, ip, tip, &src_log_flags,
1651 &target_log_flags);
1652 if (error)
1653 goto out_trans_cancel;
1654
1655 /* Do we have to swap reflink flags? */
1656 if ((ip->i_diflags2 & XFS_DIFLAG2_REFLINK) ^
1657 (tip->i_diflags2 & XFS_DIFLAG2_REFLINK)) {
1658 f = ip->i_diflags2 & XFS_DIFLAG2_REFLINK;
1659 ip->i_diflags2 &= ~XFS_DIFLAG2_REFLINK;
1660 ip->i_diflags2 |= tip->i_diflags2 & XFS_DIFLAG2_REFLINK;
1661 tip->i_diflags2 &= ~XFS_DIFLAG2_REFLINK;
1662 tip->i_diflags2 |= f & XFS_DIFLAG2_REFLINK;
1663 }
1664
1665 /* Swap the cow forks. */
1666 if (xfs_has_reflink(mp)) {
1667 ASSERT(!ip->i_cowfp ||
1668 ip->i_cowfp->if_format == XFS_DINODE_FMT_EXTENTS);
1669 ASSERT(!tip->i_cowfp ||
1670 tip->i_cowfp->if_format == XFS_DINODE_FMT_EXTENTS);
1671
1672 swap(ip->i_cowfp, tip->i_cowfp);
1673
1674 if (ip->i_cowfp && ip->i_cowfp->if_bytes)
1675 xfs_inode_set_cowblocks_tag(ip);
1676 else
1677 xfs_inode_clear_cowblocks_tag(ip);
1678 if (tip->i_cowfp && tip->i_cowfp->if_bytes)
1679 xfs_inode_set_cowblocks_tag(tip);
1680 else
1681 xfs_inode_clear_cowblocks_tag(tip);
1682 }
1683
1684 xfs_trans_log_inode(tp, ip, src_log_flags);
1685 xfs_trans_log_inode(tp, tip, target_log_flags);
1686
1687 /*
1688 * The extent forks have been swapped, but crc=1,rmapbt=0 filesystems
1689 * have inode number owner values in the bmbt blocks that still refer to
1690 * the old inode. Scan each bmbt to fix up the owner values with the
1691 * inode number of the current inode.
1692 */
1693 if (src_log_flags & XFS_ILOG_DOWNER) {
1694 error = xfs_swap_change_owner(&tp, ip, tip);
1695 if (error)
1696 goto out_trans_cancel;
1697 }
1698 if (target_log_flags & XFS_ILOG_DOWNER) {
1699 error = xfs_swap_change_owner(&tp, tip, ip);
1700 if (error)
1701 goto out_trans_cancel;
1702 }
1703
1704 /*
1705 * If this is a synchronous mount, make sure that the
1706 * transaction goes to disk before returning to the user.
1707 */
1708 if (xfs_has_wsync(mp))
1709 xfs_trans_set_sync(tp);
1710
1711 error = xfs_trans_commit(tp);
1712
1713 trace_xfs_swap_extent_after(ip, 0);
1714 trace_xfs_swap_extent_after(tip, 1);
1715
1716out_unlock_ilock:
1717 xfs_iunlock(ip, XFS_ILOCK_EXCL);
1718 xfs_iunlock(tip, XFS_ILOCK_EXCL);
1719out_unlock:
1720 filemap_invalidate_unlock_two(VFS_I(ip)->i_mapping,
1721 VFS_I(tip)->i_mapping);
1722 unlock_two_nondirectories(VFS_I(ip), VFS_I(tip));
1723 return error;
1724
1725out_trans_cancel:
1726 xfs_trans_cancel(tp);
1727 goto out_unlock_ilock;
1728}