Loading...
1/*
2 * Copyright (c) 2000-2006 Silicon Graphics, Inc.
3 * Copyright (c) 2012 Red Hat, Inc.
4 * All Rights Reserved.
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License as
8 * published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it would be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
18 */
19#include "xfs.h"
20#include "xfs_fs.h"
21#include "xfs_shared.h"
22#include "xfs_format.h"
23#include "xfs_log_format.h"
24#include "xfs_trans_resv.h"
25#include "xfs_bit.h"
26#include "xfs_mount.h"
27#include "xfs_da_format.h"
28#include "xfs_defer.h"
29#include "xfs_inode.h"
30#include "xfs_btree.h"
31#include "xfs_trans.h"
32#include "xfs_extfree_item.h"
33#include "xfs_alloc.h"
34#include "xfs_bmap.h"
35#include "xfs_bmap_util.h"
36#include "xfs_bmap_btree.h"
37#include "xfs_rtalloc.h"
38#include "xfs_error.h"
39#include "xfs_quota.h"
40#include "xfs_trans_space.h"
41#include "xfs_trace.h"
42#include "xfs_icache.h"
43#include "xfs_log.h"
44#include "xfs_rmap_btree.h"
45#include "xfs_iomap.h"
46#include "xfs_reflink.h"
47#include "xfs_refcount.h"
48
49/* Kernel only BMAP related definitions and functions */
50
51/*
52 * Convert the given file system block to a disk block. We have to treat it
53 * differently based on whether the file is a real time file or not, because the
54 * bmap code does.
55 */
56xfs_daddr_t
57xfs_fsb_to_db(struct xfs_inode *ip, xfs_fsblock_t fsb)
58{
59 return (XFS_IS_REALTIME_INODE(ip) ? \
60 (xfs_daddr_t)XFS_FSB_TO_BB((ip)->i_mount, (fsb)) : \
61 XFS_FSB_TO_DADDR((ip)->i_mount, (fsb)));
62}
63
64/*
65 * Routine to zero an extent on disk allocated to the specific inode.
66 *
67 * The VFS functions take a linearised filesystem block offset, so we have to
68 * convert the sparse xfs fsb to the right format first.
69 * VFS types are real funky, too.
70 */
71int
72xfs_zero_extent(
73 struct xfs_inode *ip,
74 xfs_fsblock_t start_fsb,
75 xfs_off_t count_fsb)
76{
77 struct xfs_mount *mp = ip->i_mount;
78 xfs_daddr_t sector = xfs_fsb_to_db(ip, start_fsb);
79 sector_t block = XFS_BB_TO_FSBT(mp, sector);
80
81 return blkdev_issue_zeroout(xfs_find_bdev_for_inode(VFS_I(ip)),
82 block << (mp->m_super->s_blocksize_bits - 9),
83 count_fsb << (mp->m_super->s_blocksize_bits - 9),
84 GFP_NOFS, true);
85}
86
87int
88xfs_bmap_rtalloc(
89 struct xfs_bmalloca *ap) /* bmap alloc argument struct */
90{
91 xfs_alloctype_t atype = 0; /* type for allocation routines */
92 int error; /* error return value */
93 xfs_mount_t *mp; /* mount point structure */
94 xfs_extlen_t prod = 0; /* product factor for allocators */
95 xfs_extlen_t ralen = 0; /* realtime allocation length */
96 xfs_extlen_t align; /* minimum allocation alignment */
97 xfs_rtblock_t rtb;
98
99 mp = ap->ip->i_mount;
100 align = xfs_get_extsz_hint(ap->ip);
101 prod = align / mp->m_sb.sb_rextsize;
102 error = xfs_bmap_extsize_align(mp, &ap->got, &ap->prev,
103 align, 1, ap->eof, 0,
104 ap->conv, &ap->offset, &ap->length);
105 if (error)
106 return error;
107 ASSERT(ap->length);
108 ASSERT(ap->length % mp->m_sb.sb_rextsize == 0);
109
110 /*
111 * If the offset & length are not perfectly aligned
112 * then kill prod, it will just get us in trouble.
113 */
114 if (do_mod(ap->offset, align) || ap->length % align)
115 prod = 1;
116 /*
117 * Set ralen to be the actual requested length in rtextents.
118 */
119 ralen = ap->length / mp->m_sb.sb_rextsize;
120 /*
121 * If the old value was close enough to MAXEXTLEN that
122 * we rounded up to it, cut it back so it's valid again.
123 * Note that if it's a really large request (bigger than
124 * MAXEXTLEN), we don't hear about that number, and can't
125 * adjust the starting point to match it.
126 */
127 if (ralen * mp->m_sb.sb_rextsize >= MAXEXTLEN)
128 ralen = MAXEXTLEN / mp->m_sb.sb_rextsize;
129
130 /*
131 * Lock out modifications to both the RT bitmap and summary inodes
132 */
133 xfs_ilock(mp->m_rbmip, XFS_ILOCK_EXCL|XFS_ILOCK_RTBITMAP);
134 xfs_trans_ijoin(ap->tp, mp->m_rbmip, XFS_ILOCK_EXCL);
135 xfs_ilock(mp->m_rsumip, XFS_ILOCK_EXCL|XFS_ILOCK_RTSUM);
136 xfs_trans_ijoin(ap->tp, mp->m_rsumip, XFS_ILOCK_EXCL);
137
138 /*
139 * If it's an allocation to an empty file at offset 0,
140 * pick an extent that will space things out in the rt area.
141 */
142 if (ap->eof && ap->offset == 0) {
143 xfs_rtblock_t uninitialized_var(rtx); /* realtime extent no */
144
145 error = xfs_rtpick_extent(mp, ap->tp, ralen, &rtx);
146 if (error)
147 return error;
148 ap->blkno = rtx * mp->m_sb.sb_rextsize;
149 } else {
150 ap->blkno = 0;
151 }
152
153 xfs_bmap_adjacent(ap);
154
155 /*
156 * Realtime allocation, done through xfs_rtallocate_extent.
157 */
158 atype = ap->blkno == 0 ? XFS_ALLOCTYPE_ANY_AG : XFS_ALLOCTYPE_NEAR_BNO;
159 do_div(ap->blkno, mp->m_sb.sb_rextsize);
160 rtb = ap->blkno;
161 ap->length = ralen;
162 if ((error = xfs_rtallocate_extent(ap->tp, ap->blkno, 1, ap->length,
163 &ralen, atype, ap->wasdel, prod, &rtb)))
164 return error;
165 if (rtb == NULLFSBLOCK && prod > 1 &&
166 (error = xfs_rtallocate_extent(ap->tp, ap->blkno, 1,
167 ap->length, &ralen, atype,
168 ap->wasdel, 1, &rtb)))
169 return error;
170 ap->blkno = rtb;
171 if (ap->blkno != NULLFSBLOCK) {
172 ap->blkno *= mp->m_sb.sb_rextsize;
173 ralen *= mp->m_sb.sb_rextsize;
174 ap->length = ralen;
175 ap->ip->i_d.di_nblocks += ralen;
176 xfs_trans_log_inode(ap->tp, ap->ip, XFS_ILOG_CORE);
177 if (ap->wasdel)
178 ap->ip->i_delayed_blks -= ralen;
179 /*
180 * Adjust the disk quota also. This was reserved
181 * earlier.
182 */
183 xfs_trans_mod_dquot_byino(ap->tp, ap->ip,
184 ap->wasdel ? XFS_TRANS_DQ_DELRTBCOUNT :
185 XFS_TRANS_DQ_RTBCOUNT, (long) ralen);
186
187 /* Zero the extent if we were asked to do so */
188 if (ap->datatype & XFS_ALLOC_USERDATA_ZERO) {
189 error = xfs_zero_extent(ap->ip, ap->blkno, ap->length);
190 if (error)
191 return error;
192 }
193 } else {
194 ap->length = 0;
195 }
196 return 0;
197}
198
199/*
200 * Check if the endoff is outside the last extent. If so the caller will grow
201 * the allocation to a stripe unit boundary. All offsets are considered outside
202 * the end of file for an empty fork, so 1 is returned in *eof in that case.
203 */
204int
205xfs_bmap_eof(
206 struct xfs_inode *ip,
207 xfs_fileoff_t endoff,
208 int whichfork,
209 int *eof)
210{
211 struct xfs_bmbt_irec rec;
212 int error;
213
214 error = xfs_bmap_last_extent(NULL, ip, whichfork, &rec, eof);
215 if (error || *eof)
216 return error;
217
218 *eof = endoff >= rec.br_startoff + rec.br_blockcount;
219 return 0;
220}
221
222/*
223 * Extent tree block counting routines.
224 */
225
226/*
227 * Count leaf blocks given a range of extent records.
228 */
229STATIC void
230xfs_bmap_count_leaves(
231 xfs_ifork_t *ifp,
232 xfs_extnum_t idx,
233 int numrecs,
234 int *count)
235{
236 int b;
237
238 for (b = 0; b < numrecs; b++) {
239 xfs_bmbt_rec_host_t *frp = xfs_iext_get_ext(ifp, idx + b);
240 *count += xfs_bmbt_get_blockcount(frp);
241 }
242}
243
244/*
245 * Count leaf blocks given a range of extent records originally
246 * in btree format.
247 */
248STATIC void
249xfs_bmap_disk_count_leaves(
250 struct xfs_mount *mp,
251 struct xfs_btree_block *block,
252 int numrecs,
253 int *count)
254{
255 int b;
256 xfs_bmbt_rec_t *frp;
257
258 for (b = 1; b <= numrecs; b++) {
259 frp = XFS_BMBT_REC_ADDR(mp, block, b);
260 *count += xfs_bmbt_disk_get_blockcount(frp);
261 }
262}
263
264/*
265 * Recursively walks each level of a btree
266 * to count total fsblocks in use.
267 */
268STATIC int /* error */
269xfs_bmap_count_tree(
270 xfs_mount_t *mp, /* file system mount point */
271 xfs_trans_t *tp, /* transaction pointer */
272 xfs_ifork_t *ifp, /* inode fork pointer */
273 xfs_fsblock_t blockno, /* file system block number */
274 int levelin, /* level in btree */
275 int *count) /* Count of blocks */
276{
277 int error;
278 xfs_buf_t *bp, *nbp;
279 int level = levelin;
280 __be64 *pp;
281 xfs_fsblock_t bno = blockno;
282 xfs_fsblock_t nextbno;
283 struct xfs_btree_block *block, *nextblock;
284 int numrecs;
285
286 error = xfs_btree_read_bufl(mp, tp, bno, 0, &bp, XFS_BMAP_BTREE_REF,
287 &xfs_bmbt_buf_ops);
288 if (error)
289 return error;
290 *count += 1;
291 block = XFS_BUF_TO_BLOCK(bp);
292
293 if (--level) {
294 /* Not at node above leaves, count this level of nodes */
295 nextbno = be64_to_cpu(block->bb_u.l.bb_rightsib);
296 while (nextbno != NULLFSBLOCK) {
297 error = xfs_btree_read_bufl(mp, tp, nextbno, 0, &nbp,
298 XFS_BMAP_BTREE_REF,
299 &xfs_bmbt_buf_ops);
300 if (error)
301 return error;
302 *count += 1;
303 nextblock = XFS_BUF_TO_BLOCK(nbp);
304 nextbno = be64_to_cpu(nextblock->bb_u.l.bb_rightsib);
305 xfs_trans_brelse(tp, nbp);
306 }
307
308 /* Dive to the next level */
309 pp = XFS_BMBT_PTR_ADDR(mp, block, 1, mp->m_bmap_dmxr[1]);
310 bno = be64_to_cpu(*pp);
311 if (unlikely((error =
312 xfs_bmap_count_tree(mp, tp, ifp, bno, level, count)) < 0)) {
313 xfs_trans_brelse(tp, bp);
314 XFS_ERROR_REPORT("xfs_bmap_count_tree(1)",
315 XFS_ERRLEVEL_LOW, mp);
316 return -EFSCORRUPTED;
317 }
318 xfs_trans_brelse(tp, bp);
319 } else {
320 /* count all level 1 nodes and their leaves */
321 for (;;) {
322 nextbno = be64_to_cpu(block->bb_u.l.bb_rightsib);
323 numrecs = be16_to_cpu(block->bb_numrecs);
324 xfs_bmap_disk_count_leaves(mp, block, numrecs, count);
325 xfs_trans_brelse(tp, bp);
326 if (nextbno == NULLFSBLOCK)
327 break;
328 bno = nextbno;
329 error = xfs_btree_read_bufl(mp, tp, bno, 0, &bp,
330 XFS_BMAP_BTREE_REF,
331 &xfs_bmbt_buf_ops);
332 if (error)
333 return error;
334 *count += 1;
335 block = XFS_BUF_TO_BLOCK(bp);
336 }
337 }
338 return 0;
339}
340
341/*
342 * Count fsblocks of the given fork.
343 */
344static int /* error */
345xfs_bmap_count_blocks(
346 xfs_trans_t *tp, /* transaction pointer */
347 xfs_inode_t *ip, /* incore inode */
348 int whichfork, /* data or attr fork */
349 int *count) /* out: count of blocks */
350{
351 struct xfs_btree_block *block; /* current btree block */
352 xfs_fsblock_t bno; /* block # of "block" */
353 xfs_ifork_t *ifp; /* fork structure */
354 int level; /* btree level, for checking */
355 xfs_mount_t *mp; /* file system mount structure */
356 __be64 *pp; /* pointer to block address */
357
358 bno = NULLFSBLOCK;
359 mp = ip->i_mount;
360 ifp = XFS_IFORK_PTR(ip, whichfork);
361 if ( XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_EXTENTS ) {
362 xfs_bmap_count_leaves(ifp, 0, xfs_iext_count(ifp), count);
363 return 0;
364 }
365
366 /*
367 * Root level must use BMAP_BROOT_PTR_ADDR macro to get ptr out.
368 */
369 block = ifp->if_broot;
370 level = be16_to_cpu(block->bb_level);
371 ASSERT(level > 0);
372 pp = XFS_BMAP_BROOT_PTR_ADDR(mp, block, 1, ifp->if_broot_bytes);
373 bno = be64_to_cpu(*pp);
374 ASSERT(bno != NULLFSBLOCK);
375 ASSERT(XFS_FSB_TO_AGNO(mp, bno) < mp->m_sb.sb_agcount);
376 ASSERT(XFS_FSB_TO_AGBNO(mp, bno) < mp->m_sb.sb_agblocks);
377
378 if (unlikely(xfs_bmap_count_tree(mp, tp, ifp, bno, level, count) < 0)) {
379 XFS_ERROR_REPORT("xfs_bmap_count_blocks(2)", XFS_ERRLEVEL_LOW,
380 mp);
381 return -EFSCORRUPTED;
382 }
383
384 return 0;
385}
386
387/*
388 * returns 1 for success, 0 if we failed to map the extent.
389 */
390STATIC int
391xfs_getbmapx_fix_eof_hole(
392 xfs_inode_t *ip, /* xfs incore inode pointer */
393 int whichfork,
394 struct getbmapx *out, /* output structure */
395 int prealloced, /* this is a file with
396 * preallocated data space */
397 __int64_t end, /* last block requested */
398 xfs_fsblock_t startblock,
399 bool moretocome)
400{
401 __int64_t fixlen;
402 xfs_mount_t *mp; /* file system mount point */
403 xfs_ifork_t *ifp; /* inode fork pointer */
404 xfs_extnum_t lastx; /* last extent pointer */
405 xfs_fileoff_t fileblock;
406
407 if (startblock == HOLESTARTBLOCK) {
408 mp = ip->i_mount;
409 out->bmv_block = -1;
410 fixlen = XFS_FSB_TO_BB(mp, XFS_B_TO_FSB(mp, XFS_ISIZE(ip)));
411 fixlen -= out->bmv_offset;
412 if (prealloced && out->bmv_offset + out->bmv_length == end) {
413 /* Came to hole at EOF. Trim it. */
414 if (fixlen <= 0)
415 return 0;
416 out->bmv_length = fixlen;
417 }
418 } else {
419 if (startblock == DELAYSTARTBLOCK)
420 out->bmv_block = -2;
421 else
422 out->bmv_block = xfs_fsb_to_db(ip, startblock);
423 fileblock = XFS_BB_TO_FSB(ip->i_mount, out->bmv_offset);
424 ifp = XFS_IFORK_PTR(ip, whichfork);
425 if (!moretocome &&
426 xfs_iext_bno_to_ext(ifp, fileblock, &lastx) &&
427 (lastx == xfs_iext_count(ifp) - 1))
428 out->bmv_oflags |= BMV_OF_LAST;
429 }
430
431 return 1;
432}
433
434/* Adjust the reported bmap around shared/unshared extent transitions. */
435STATIC int
436xfs_getbmap_adjust_shared(
437 struct xfs_inode *ip,
438 int whichfork,
439 struct xfs_bmbt_irec *map,
440 struct getbmapx *out,
441 struct xfs_bmbt_irec *next_map)
442{
443 struct xfs_mount *mp = ip->i_mount;
444 xfs_agnumber_t agno;
445 xfs_agblock_t agbno;
446 xfs_agblock_t ebno;
447 xfs_extlen_t elen;
448 xfs_extlen_t nlen;
449 int error;
450
451 next_map->br_startblock = NULLFSBLOCK;
452 next_map->br_startoff = NULLFILEOFF;
453 next_map->br_blockcount = 0;
454
455 /* Only written data blocks can be shared. */
456 if (!xfs_is_reflink_inode(ip) || whichfork != XFS_DATA_FORK ||
457 map->br_startblock == DELAYSTARTBLOCK ||
458 map->br_startblock == HOLESTARTBLOCK ||
459 ISUNWRITTEN(map))
460 return 0;
461
462 agno = XFS_FSB_TO_AGNO(mp, map->br_startblock);
463 agbno = XFS_FSB_TO_AGBNO(mp, map->br_startblock);
464 error = xfs_reflink_find_shared(mp, agno, agbno, map->br_blockcount,
465 &ebno, &elen, true);
466 if (error)
467 return error;
468
469 if (ebno == NULLAGBLOCK) {
470 /* No shared blocks at all. */
471 return 0;
472 } else if (agbno == ebno) {
473 /*
474 * Shared extent at (agbno, elen). Shrink the reported
475 * extent length and prepare to move the start of map[i]
476 * to agbno+elen, with the aim of (re)formatting the new
477 * map[i] the next time through the inner loop.
478 */
479 out->bmv_length = XFS_FSB_TO_BB(mp, elen);
480 out->bmv_oflags |= BMV_OF_SHARED;
481 if (elen != map->br_blockcount) {
482 *next_map = *map;
483 next_map->br_startblock += elen;
484 next_map->br_startoff += elen;
485 next_map->br_blockcount -= elen;
486 }
487 map->br_blockcount -= elen;
488 } else {
489 /*
490 * There's an unshared extent (agbno, ebno - agbno)
491 * followed by shared extent at (ebno, elen). Shrink
492 * the reported extent length to cover only the unshared
493 * extent and prepare to move up the start of map[i] to
494 * ebno, with the aim of (re)formatting the new map[i]
495 * the next time through the inner loop.
496 */
497 *next_map = *map;
498 nlen = ebno - agbno;
499 out->bmv_length = XFS_FSB_TO_BB(mp, nlen);
500 next_map->br_startblock += nlen;
501 next_map->br_startoff += nlen;
502 next_map->br_blockcount -= nlen;
503 map->br_blockcount -= nlen;
504 }
505
506 return 0;
507}
508
509/*
510 * Get inode's extents as described in bmv, and format for output.
511 * Calls formatter to fill the user's buffer until all extents
512 * are mapped, until the passed-in bmv->bmv_count slots have
513 * been filled, or until the formatter short-circuits the loop,
514 * if it is tracking filled-in extents on its own.
515 */
516int /* error code */
517xfs_getbmap(
518 xfs_inode_t *ip,
519 struct getbmapx *bmv, /* user bmap structure */
520 xfs_bmap_format_t formatter, /* format to user */
521 void *arg) /* formatter arg */
522{
523 __int64_t bmvend; /* last block requested */
524 int error = 0; /* return value */
525 __int64_t fixlen; /* length for -1 case */
526 int i; /* extent number */
527 int lock; /* lock state */
528 xfs_bmbt_irec_t *map; /* buffer for user's data */
529 xfs_mount_t *mp; /* file system mount point */
530 int nex; /* # of user extents can do */
531 int subnex; /* # of bmapi's can do */
532 int nmap; /* number of map entries */
533 struct getbmapx *out; /* output structure */
534 int whichfork; /* data or attr fork */
535 int prealloced; /* this is a file with
536 * preallocated data space */
537 int iflags; /* interface flags */
538 int bmapi_flags; /* flags for xfs_bmapi */
539 int cur_ext = 0;
540 struct xfs_bmbt_irec inject_map;
541
542 mp = ip->i_mount;
543 iflags = bmv->bmv_iflags;
544
545#ifndef DEBUG
546 /* Only allow CoW fork queries if we're debugging. */
547 if (iflags & BMV_IF_COWFORK)
548 return -EINVAL;
549#endif
550 if ((iflags & BMV_IF_ATTRFORK) && (iflags & BMV_IF_COWFORK))
551 return -EINVAL;
552
553 if (iflags & BMV_IF_ATTRFORK)
554 whichfork = XFS_ATTR_FORK;
555 else if (iflags & BMV_IF_COWFORK)
556 whichfork = XFS_COW_FORK;
557 else
558 whichfork = XFS_DATA_FORK;
559
560 switch (whichfork) {
561 case XFS_ATTR_FORK:
562 if (XFS_IFORK_Q(ip)) {
563 if (ip->i_d.di_aformat != XFS_DINODE_FMT_EXTENTS &&
564 ip->i_d.di_aformat != XFS_DINODE_FMT_BTREE &&
565 ip->i_d.di_aformat != XFS_DINODE_FMT_LOCAL)
566 return -EINVAL;
567 } else if (unlikely(
568 ip->i_d.di_aformat != 0 &&
569 ip->i_d.di_aformat != XFS_DINODE_FMT_EXTENTS)) {
570 XFS_ERROR_REPORT("xfs_getbmap", XFS_ERRLEVEL_LOW,
571 ip->i_mount);
572 return -EFSCORRUPTED;
573 }
574
575 prealloced = 0;
576 fixlen = 1LL << 32;
577 break;
578 case XFS_COW_FORK:
579 if (ip->i_cformat != XFS_DINODE_FMT_EXTENTS)
580 return -EINVAL;
581
582 if (xfs_get_cowextsz_hint(ip)) {
583 prealloced = 1;
584 fixlen = mp->m_super->s_maxbytes;
585 } else {
586 prealloced = 0;
587 fixlen = XFS_ISIZE(ip);
588 }
589 break;
590 default:
591 if (ip->i_d.di_format != XFS_DINODE_FMT_EXTENTS &&
592 ip->i_d.di_format != XFS_DINODE_FMT_BTREE &&
593 ip->i_d.di_format != XFS_DINODE_FMT_LOCAL)
594 return -EINVAL;
595
596 if (xfs_get_extsz_hint(ip) ||
597 ip->i_d.di_flags & (XFS_DIFLAG_PREALLOC|XFS_DIFLAG_APPEND)){
598 prealloced = 1;
599 fixlen = mp->m_super->s_maxbytes;
600 } else {
601 prealloced = 0;
602 fixlen = XFS_ISIZE(ip);
603 }
604 break;
605 }
606
607 if (bmv->bmv_length == -1) {
608 fixlen = XFS_FSB_TO_BB(mp, XFS_B_TO_FSB(mp, fixlen));
609 bmv->bmv_length =
610 max_t(__int64_t, fixlen - bmv->bmv_offset, 0);
611 } else if (bmv->bmv_length == 0) {
612 bmv->bmv_entries = 0;
613 return 0;
614 } else if (bmv->bmv_length < 0) {
615 return -EINVAL;
616 }
617
618 nex = bmv->bmv_count - 1;
619 if (nex <= 0)
620 return -EINVAL;
621 bmvend = bmv->bmv_offset + bmv->bmv_length;
622
623
624 if (bmv->bmv_count > ULONG_MAX / sizeof(struct getbmapx))
625 return -ENOMEM;
626 out = kmem_zalloc_large(bmv->bmv_count * sizeof(struct getbmapx), 0);
627 if (!out)
628 return -ENOMEM;
629
630 xfs_ilock(ip, XFS_IOLOCK_SHARED);
631 switch (whichfork) {
632 case XFS_DATA_FORK:
633 if (!(iflags & BMV_IF_DELALLOC) &&
634 (ip->i_delayed_blks || XFS_ISIZE(ip) > ip->i_d.di_size)) {
635 error = filemap_write_and_wait(VFS_I(ip)->i_mapping);
636 if (error)
637 goto out_unlock_iolock;
638
639 /*
640 * Even after flushing the inode, there can still be
641 * delalloc blocks on the inode beyond EOF due to
642 * speculative preallocation. These are not removed
643 * until the release function is called or the inode
644 * is inactivated. Hence we cannot assert here that
645 * ip->i_delayed_blks == 0.
646 */
647 }
648
649 lock = xfs_ilock_data_map_shared(ip);
650 break;
651 case XFS_COW_FORK:
652 lock = XFS_ILOCK_SHARED;
653 xfs_ilock(ip, lock);
654 break;
655 case XFS_ATTR_FORK:
656 lock = xfs_ilock_attr_map_shared(ip);
657 break;
658 }
659
660 /*
661 * Don't let nex be bigger than the number of extents
662 * we can have assuming alternating holes and real extents.
663 */
664 if (nex > XFS_IFORK_NEXTENTS(ip, whichfork) * 2 + 1)
665 nex = XFS_IFORK_NEXTENTS(ip, whichfork) * 2 + 1;
666
667 bmapi_flags = xfs_bmapi_aflag(whichfork);
668 if (!(iflags & BMV_IF_PREALLOC))
669 bmapi_flags |= XFS_BMAPI_IGSTATE;
670
671 /*
672 * Allocate enough space to handle "subnex" maps at a time.
673 */
674 error = -ENOMEM;
675 subnex = 16;
676 map = kmem_alloc(subnex * sizeof(*map), KM_MAYFAIL | KM_NOFS);
677 if (!map)
678 goto out_unlock_ilock;
679
680 bmv->bmv_entries = 0;
681
682 if (XFS_IFORK_NEXTENTS(ip, whichfork) == 0 &&
683 (whichfork == XFS_ATTR_FORK || !(iflags & BMV_IF_DELALLOC))) {
684 error = 0;
685 goto out_free_map;
686 }
687
688 do {
689 nmap = (nex> subnex) ? subnex : nex;
690 error = xfs_bmapi_read(ip, XFS_BB_TO_FSBT(mp, bmv->bmv_offset),
691 XFS_BB_TO_FSB(mp, bmv->bmv_length),
692 map, &nmap, bmapi_flags);
693 if (error)
694 goto out_free_map;
695 ASSERT(nmap <= subnex);
696
697 for (i = 0; i < nmap && bmv->bmv_length &&
698 cur_ext < bmv->bmv_count - 1; i++) {
699 out[cur_ext].bmv_oflags = 0;
700 if (map[i].br_state == XFS_EXT_UNWRITTEN)
701 out[cur_ext].bmv_oflags |= BMV_OF_PREALLOC;
702 else if (map[i].br_startblock == DELAYSTARTBLOCK)
703 out[cur_ext].bmv_oflags |= BMV_OF_DELALLOC;
704 out[cur_ext].bmv_offset =
705 XFS_FSB_TO_BB(mp, map[i].br_startoff);
706 out[cur_ext].bmv_length =
707 XFS_FSB_TO_BB(mp, map[i].br_blockcount);
708 out[cur_ext].bmv_unused1 = 0;
709 out[cur_ext].bmv_unused2 = 0;
710
711 /*
712 * delayed allocation extents that start beyond EOF can
713 * occur due to speculative EOF allocation when the
714 * delalloc extent is larger than the largest freespace
715 * extent at conversion time. These extents cannot be
716 * converted by data writeback, so can exist here even
717 * if we are not supposed to be finding delalloc
718 * extents.
719 */
720 if (map[i].br_startblock == DELAYSTARTBLOCK &&
721 map[i].br_startoff <= XFS_B_TO_FSB(mp, XFS_ISIZE(ip)))
722 ASSERT((iflags & BMV_IF_DELALLOC) != 0);
723
724 if (map[i].br_startblock == HOLESTARTBLOCK &&
725 whichfork == XFS_ATTR_FORK) {
726 /* came to the end of attribute fork */
727 out[cur_ext].bmv_oflags |= BMV_OF_LAST;
728 goto out_free_map;
729 }
730
731 /* Is this a shared block? */
732 error = xfs_getbmap_adjust_shared(ip, whichfork,
733 &map[i], &out[cur_ext], &inject_map);
734 if (error)
735 goto out_free_map;
736
737 if (!xfs_getbmapx_fix_eof_hole(ip, whichfork,
738 &out[cur_ext], prealloced, bmvend,
739 map[i].br_startblock,
740 inject_map.br_startblock != NULLFSBLOCK))
741 goto out_free_map;
742
743 bmv->bmv_offset =
744 out[cur_ext].bmv_offset +
745 out[cur_ext].bmv_length;
746 bmv->bmv_length =
747 max_t(__int64_t, 0, bmvend - bmv->bmv_offset);
748
749 /*
750 * In case we don't want to return the hole,
751 * don't increase cur_ext so that we can reuse
752 * it in the next loop.
753 */
754 if ((iflags & BMV_IF_NO_HOLES) &&
755 map[i].br_startblock == HOLESTARTBLOCK) {
756 memset(&out[cur_ext], 0, sizeof(out[cur_ext]));
757 continue;
758 }
759
760 /*
761 * In order to report shared extents accurately,
762 * we report each distinct shared/unshared part
763 * of a single bmbt record using multiple bmap
764 * extents. To make that happen, we iterate the
765 * same map array item multiple times, each
766 * time trimming out the subextent that we just
767 * reported.
768 *
769 * Because of this, we must check the out array
770 * index (cur_ext) directly against bmv_count-1
771 * to avoid overflows.
772 */
773 if (inject_map.br_startblock != NULLFSBLOCK) {
774 map[i] = inject_map;
775 i--;
776 }
777 bmv->bmv_entries++;
778 cur_ext++;
779 }
780 } while (nmap && bmv->bmv_length && cur_ext < bmv->bmv_count - 1);
781
782 out_free_map:
783 kmem_free(map);
784 out_unlock_ilock:
785 xfs_iunlock(ip, lock);
786 out_unlock_iolock:
787 xfs_iunlock(ip, XFS_IOLOCK_SHARED);
788
789 for (i = 0; i < cur_ext; i++) {
790 int full = 0; /* user array is full */
791
792 /* format results & advance arg */
793 error = formatter(&arg, &out[i], &full);
794 if (error || full)
795 break;
796 }
797
798 kmem_free(out);
799 return error;
800}
801
802/*
803 * dead simple method of punching delalyed allocation blocks from a range in
804 * the inode. Walks a block at a time so will be slow, but is only executed in
805 * rare error cases so the overhead is not critical. This will always punch out
806 * both the start and end blocks, even if the ranges only partially overlap
807 * them, so it is up to the caller to ensure that partial blocks are not
808 * passed in.
809 */
810int
811xfs_bmap_punch_delalloc_range(
812 struct xfs_inode *ip,
813 xfs_fileoff_t start_fsb,
814 xfs_fileoff_t length)
815{
816 xfs_fileoff_t remaining = length;
817 int error = 0;
818
819 ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
820
821 do {
822 int done;
823 xfs_bmbt_irec_t imap;
824 int nimaps = 1;
825 xfs_fsblock_t firstblock;
826 struct xfs_defer_ops dfops;
827
828 /*
829 * Map the range first and check that it is a delalloc extent
830 * before trying to unmap the range. Otherwise we will be
831 * trying to remove a real extent (which requires a
832 * transaction) or a hole, which is probably a bad idea...
833 */
834 error = xfs_bmapi_read(ip, start_fsb, 1, &imap, &nimaps,
835 XFS_BMAPI_ENTIRE);
836
837 if (error) {
838 /* something screwed, just bail */
839 if (!XFS_FORCED_SHUTDOWN(ip->i_mount)) {
840 xfs_alert(ip->i_mount,
841 "Failed delalloc mapping lookup ino %lld fsb %lld.",
842 ip->i_ino, start_fsb);
843 }
844 break;
845 }
846 if (!nimaps) {
847 /* nothing there */
848 goto next_block;
849 }
850 if (imap.br_startblock != DELAYSTARTBLOCK) {
851 /* been converted, ignore */
852 goto next_block;
853 }
854 WARN_ON(imap.br_blockcount == 0);
855
856 /*
857 * Note: while we initialise the firstblock/dfops pair, they
858 * should never be used because blocks should never be
859 * allocated or freed for a delalloc extent and hence we need
860 * don't cancel or finish them after the xfs_bunmapi() call.
861 */
862 xfs_defer_init(&dfops, &firstblock);
863 error = xfs_bunmapi(NULL, ip, start_fsb, 1, 0, 1, &firstblock,
864 &dfops, &done);
865 if (error)
866 break;
867
868 ASSERT(!xfs_defer_has_unfinished_work(&dfops));
869next_block:
870 start_fsb++;
871 remaining--;
872 } while(remaining > 0);
873
874 return error;
875}
876
877/*
878 * Test whether it is appropriate to check an inode for and free post EOF
879 * blocks. The 'force' parameter determines whether we should also consider
880 * regular files that are marked preallocated or append-only.
881 */
882bool
883xfs_can_free_eofblocks(struct xfs_inode *ip, bool force)
884{
885 /* prealloc/delalloc exists only on regular files */
886 if (!S_ISREG(VFS_I(ip)->i_mode))
887 return false;
888
889 /*
890 * Zero sized files with no cached pages and delalloc blocks will not
891 * have speculative prealloc/delalloc blocks to remove.
892 */
893 if (VFS_I(ip)->i_size == 0 &&
894 VFS_I(ip)->i_mapping->nrpages == 0 &&
895 ip->i_delayed_blks == 0)
896 return false;
897
898 /* If we haven't read in the extent list, then don't do it now. */
899 if (!(ip->i_df.if_flags & XFS_IFEXTENTS))
900 return false;
901
902 /*
903 * Do not free real preallocated or append-only files unless the file
904 * has delalloc blocks and we are forced to remove them.
905 */
906 if (ip->i_d.di_flags & (XFS_DIFLAG_PREALLOC | XFS_DIFLAG_APPEND))
907 if (!force || ip->i_delayed_blks == 0)
908 return false;
909
910 return true;
911}
912
913/*
914 * This is called by xfs_inactive to free any blocks beyond eof
915 * when the link count isn't zero and by xfs_dm_punch_hole() when
916 * punching a hole to EOF.
917 */
918int
919xfs_free_eofblocks(
920 struct xfs_inode *ip)
921{
922 struct xfs_trans *tp;
923 int error;
924 xfs_fileoff_t end_fsb;
925 xfs_fileoff_t last_fsb;
926 xfs_filblks_t map_len;
927 int nimaps;
928 struct xfs_bmbt_irec imap;
929 struct xfs_mount *mp = ip->i_mount;
930
931 ASSERT(xfs_isilocked(ip, XFS_IOLOCK_EXCL));
932
933 /*
934 * Figure out if there are any blocks beyond the end
935 * of the file. If not, then there is nothing to do.
936 */
937 end_fsb = XFS_B_TO_FSB(mp, (xfs_ufsize_t)XFS_ISIZE(ip));
938 last_fsb = XFS_B_TO_FSB(mp, mp->m_super->s_maxbytes);
939 if (last_fsb <= end_fsb)
940 return 0;
941 map_len = last_fsb - end_fsb;
942
943 nimaps = 1;
944 xfs_ilock(ip, XFS_ILOCK_SHARED);
945 error = xfs_bmapi_read(ip, end_fsb, map_len, &imap, &nimaps, 0);
946 xfs_iunlock(ip, XFS_ILOCK_SHARED);
947
948 /*
949 * If there are blocks after the end of file, truncate the file to its
950 * current size to free them up.
951 */
952 if (!error && (nimaps != 0) &&
953 (imap.br_startblock != HOLESTARTBLOCK ||
954 ip->i_delayed_blks)) {
955 /*
956 * Attach the dquots to the inode up front.
957 */
958 error = xfs_qm_dqattach(ip, 0);
959 if (error)
960 return error;
961
962 /* wait on dio to ensure i_size has settled */
963 inode_dio_wait(VFS_I(ip));
964
965 error = xfs_trans_alloc(mp, &M_RES(mp)->tr_itruncate, 0, 0, 0,
966 &tp);
967 if (error) {
968 ASSERT(XFS_FORCED_SHUTDOWN(mp));
969 return error;
970 }
971
972 xfs_ilock(ip, XFS_ILOCK_EXCL);
973 xfs_trans_ijoin(tp, ip, 0);
974
975 /*
976 * Do not update the on-disk file size. If we update the
977 * on-disk file size and then the system crashes before the
978 * contents of the file are flushed to disk then the files
979 * may be full of holes (ie NULL files bug).
980 */
981 error = xfs_itruncate_extents(&tp, ip, XFS_DATA_FORK,
982 XFS_ISIZE(ip));
983 if (error) {
984 /*
985 * If we get an error at this point we simply don't
986 * bother truncating the file.
987 */
988 xfs_trans_cancel(tp);
989 } else {
990 error = xfs_trans_commit(tp);
991 if (!error)
992 xfs_inode_clear_eofblocks_tag(ip);
993 }
994
995 xfs_iunlock(ip, XFS_ILOCK_EXCL);
996 }
997 return error;
998}
999
1000int
1001xfs_alloc_file_space(
1002 struct xfs_inode *ip,
1003 xfs_off_t offset,
1004 xfs_off_t len,
1005 int alloc_type)
1006{
1007 xfs_mount_t *mp = ip->i_mount;
1008 xfs_off_t count;
1009 xfs_filblks_t allocated_fsb;
1010 xfs_filblks_t allocatesize_fsb;
1011 xfs_extlen_t extsz, temp;
1012 xfs_fileoff_t startoffset_fsb;
1013 xfs_fsblock_t firstfsb;
1014 int nimaps;
1015 int quota_flag;
1016 int rt;
1017 xfs_trans_t *tp;
1018 xfs_bmbt_irec_t imaps[1], *imapp;
1019 struct xfs_defer_ops dfops;
1020 uint qblocks, resblks, resrtextents;
1021 int error;
1022
1023 trace_xfs_alloc_file_space(ip);
1024
1025 if (XFS_FORCED_SHUTDOWN(mp))
1026 return -EIO;
1027
1028 error = xfs_qm_dqattach(ip, 0);
1029 if (error)
1030 return error;
1031
1032 if (len <= 0)
1033 return -EINVAL;
1034
1035 rt = XFS_IS_REALTIME_INODE(ip);
1036 extsz = xfs_get_extsz_hint(ip);
1037
1038 count = len;
1039 imapp = &imaps[0];
1040 nimaps = 1;
1041 startoffset_fsb = XFS_B_TO_FSBT(mp, offset);
1042 allocatesize_fsb = XFS_B_TO_FSB(mp, count);
1043
1044 /*
1045 * Allocate file space until done or until there is an error
1046 */
1047 while (allocatesize_fsb && !error) {
1048 xfs_fileoff_t s, e;
1049
1050 /*
1051 * Determine space reservations for data/realtime.
1052 */
1053 if (unlikely(extsz)) {
1054 s = startoffset_fsb;
1055 do_div(s, extsz);
1056 s *= extsz;
1057 e = startoffset_fsb + allocatesize_fsb;
1058 if ((temp = do_mod(startoffset_fsb, extsz)))
1059 e += temp;
1060 if ((temp = do_mod(e, extsz)))
1061 e += extsz - temp;
1062 } else {
1063 s = 0;
1064 e = allocatesize_fsb;
1065 }
1066
1067 /*
1068 * The transaction reservation is limited to a 32-bit block
1069 * count, hence we need to limit the number of blocks we are
1070 * trying to reserve to avoid an overflow. We can't allocate
1071 * more than @nimaps extents, and an extent is limited on disk
1072 * to MAXEXTLEN (21 bits), so use that to enforce the limit.
1073 */
1074 resblks = min_t(xfs_fileoff_t, (e - s), (MAXEXTLEN * nimaps));
1075 if (unlikely(rt)) {
1076 resrtextents = qblocks = resblks;
1077 resrtextents /= mp->m_sb.sb_rextsize;
1078 resblks = XFS_DIOSTRAT_SPACE_RES(mp, 0);
1079 quota_flag = XFS_QMOPT_RES_RTBLKS;
1080 } else {
1081 resrtextents = 0;
1082 resblks = qblocks = XFS_DIOSTRAT_SPACE_RES(mp, resblks);
1083 quota_flag = XFS_QMOPT_RES_REGBLKS;
1084 }
1085
1086 /*
1087 * Allocate and setup the transaction.
1088 */
1089 error = xfs_trans_alloc(mp, &M_RES(mp)->tr_write, resblks,
1090 resrtextents, 0, &tp);
1091
1092 /*
1093 * Check for running out of space
1094 */
1095 if (error) {
1096 /*
1097 * Free the transaction structure.
1098 */
1099 ASSERT(error == -ENOSPC || XFS_FORCED_SHUTDOWN(mp));
1100 break;
1101 }
1102 xfs_ilock(ip, XFS_ILOCK_EXCL);
1103 error = xfs_trans_reserve_quota_nblks(tp, ip, qblocks,
1104 0, quota_flag);
1105 if (error)
1106 goto error1;
1107
1108 xfs_trans_ijoin(tp, ip, 0);
1109
1110 xfs_defer_init(&dfops, &firstfsb);
1111 error = xfs_bmapi_write(tp, ip, startoffset_fsb,
1112 allocatesize_fsb, alloc_type, &firstfsb,
1113 resblks, imapp, &nimaps, &dfops);
1114 if (error)
1115 goto error0;
1116
1117 /*
1118 * Complete the transaction
1119 */
1120 error = xfs_defer_finish(&tp, &dfops, NULL);
1121 if (error)
1122 goto error0;
1123
1124 error = xfs_trans_commit(tp);
1125 xfs_iunlock(ip, XFS_ILOCK_EXCL);
1126 if (error)
1127 break;
1128
1129 allocated_fsb = imapp->br_blockcount;
1130
1131 if (nimaps == 0) {
1132 error = -ENOSPC;
1133 break;
1134 }
1135
1136 startoffset_fsb += allocated_fsb;
1137 allocatesize_fsb -= allocated_fsb;
1138 }
1139
1140 return error;
1141
1142error0: /* Cancel bmap, unlock inode, unreserve quota blocks, cancel trans */
1143 xfs_defer_cancel(&dfops);
1144 xfs_trans_unreserve_quota_nblks(tp, ip, (long)qblocks, 0, quota_flag);
1145
1146error1: /* Just cancel transaction */
1147 xfs_trans_cancel(tp);
1148 xfs_iunlock(ip, XFS_ILOCK_EXCL);
1149 return error;
1150}
1151
1152static int
1153xfs_unmap_extent(
1154 struct xfs_inode *ip,
1155 xfs_fileoff_t startoffset_fsb,
1156 xfs_filblks_t len_fsb,
1157 int *done)
1158{
1159 struct xfs_mount *mp = ip->i_mount;
1160 struct xfs_trans *tp;
1161 struct xfs_defer_ops dfops;
1162 xfs_fsblock_t firstfsb;
1163 uint resblks = XFS_DIOSTRAT_SPACE_RES(mp, 0);
1164 int error;
1165
1166 error = xfs_trans_alloc(mp, &M_RES(mp)->tr_write, resblks, 0, 0, &tp);
1167 if (error) {
1168 ASSERT(error == -ENOSPC || XFS_FORCED_SHUTDOWN(mp));
1169 return error;
1170 }
1171
1172 xfs_ilock(ip, XFS_ILOCK_EXCL);
1173 error = xfs_trans_reserve_quota(tp, mp, ip->i_udquot, ip->i_gdquot,
1174 ip->i_pdquot, resblks, 0, XFS_QMOPT_RES_REGBLKS);
1175 if (error)
1176 goto out_trans_cancel;
1177
1178 xfs_trans_ijoin(tp, ip, 0);
1179
1180 xfs_defer_init(&dfops, &firstfsb);
1181 error = xfs_bunmapi(tp, ip, startoffset_fsb, len_fsb, 0, 2, &firstfsb,
1182 &dfops, done);
1183 if (error)
1184 goto out_bmap_cancel;
1185
1186 error = xfs_defer_finish(&tp, &dfops, ip);
1187 if (error)
1188 goto out_bmap_cancel;
1189
1190 error = xfs_trans_commit(tp);
1191out_unlock:
1192 xfs_iunlock(ip, XFS_ILOCK_EXCL);
1193 return error;
1194
1195out_bmap_cancel:
1196 xfs_defer_cancel(&dfops);
1197out_trans_cancel:
1198 xfs_trans_cancel(tp);
1199 goto out_unlock;
1200}
1201
1202static int
1203xfs_adjust_extent_unmap_boundaries(
1204 struct xfs_inode *ip,
1205 xfs_fileoff_t *startoffset_fsb,
1206 xfs_fileoff_t *endoffset_fsb)
1207{
1208 struct xfs_mount *mp = ip->i_mount;
1209 struct xfs_bmbt_irec imap;
1210 int nimap, error;
1211 xfs_extlen_t mod = 0;
1212
1213 nimap = 1;
1214 error = xfs_bmapi_read(ip, *startoffset_fsb, 1, &imap, &nimap, 0);
1215 if (error)
1216 return error;
1217
1218 if (nimap && imap.br_startblock != HOLESTARTBLOCK) {
1219 xfs_daddr_t block;
1220
1221 ASSERT(imap.br_startblock != DELAYSTARTBLOCK);
1222 block = imap.br_startblock;
1223 mod = do_div(block, mp->m_sb.sb_rextsize);
1224 if (mod)
1225 *startoffset_fsb += mp->m_sb.sb_rextsize - mod;
1226 }
1227
1228 nimap = 1;
1229 error = xfs_bmapi_read(ip, *endoffset_fsb - 1, 1, &imap, &nimap, 0);
1230 if (error)
1231 return error;
1232
1233 if (nimap && imap.br_startblock != HOLESTARTBLOCK) {
1234 ASSERT(imap.br_startblock != DELAYSTARTBLOCK);
1235 mod++;
1236 if (mod && mod != mp->m_sb.sb_rextsize)
1237 *endoffset_fsb -= mod;
1238 }
1239
1240 return 0;
1241}
1242
1243static int
1244xfs_flush_unmap_range(
1245 struct xfs_inode *ip,
1246 xfs_off_t offset,
1247 xfs_off_t len)
1248{
1249 struct xfs_mount *mp = ip->i_mount;
1250 struct inode *inode = VFS_I(ip);
1251 xfs_off_t rounding, start, end;
1252 int error;
1253
1254 /* wait for the completion of any pending DIOs */
1255 inode_dio_wait(inode);
1256
1257 rounding = max_t(xfs_off_t, 1 << mp->m_sb.sb_blocklog, PAGE_SIZE);
1258 start = round_down(offset, rounding);
1259 end = round_up(offset + len, rounding) - 1;
1260
1261 error = filemap_write_and_wait_range(inode->i_mapping, start, end);
1262 if (error)
1263 return error;
1264 truncate_pagecache_range(inode, start, end);
1265 return 0;
1266}
1267
1268int
1269xfs_free_file_space(
1270 struct xfs_inode *ip,
1271 xfs_off_t offset,
1272 xfs_off_t len)
1273{
1274 struct xfs_mount *mp = ip->i_mount;
1275 xfs_fileoff_t startoffset_fsb;
1276 xfs_fileoff_t endoffset_fsb;
1277 int done = 0, error;
1278
1279 trace_xfs_free_file_space(ip);
1280
1281 error = xfs_qm_dqattach(ip, 0);
1282 if (error)
1283 return error;
1284
1285 if (len <= 0) /* if nothing being freed */
1286 return 0;
1287
1288 error = xfs_flush_unmap_range(ip, offset, len);
1289 if (error)
1290 return error;
1291
1292 startoffset_fsb = XFS_B_TO_FSB(mp, offset);
1293 endoffset_fsb = XFS_B_TO_FSBT(mp, offset + len);
1294
1295 /*
1296 * Need to zero the stuff we're not freeing, on disk. If it's a RT file
1297 * and we can't use unwritten extents then we actually need to ensure
1298 * to zero the whole extent, otherwise we just need to take of block
1299 * boundaries, and xfs_bunmapi will handle the rest.
1300 */
1301 if (XFS_IS_REALTIME_INODE(ip) &&
1302 !xfs_sb_version_hasextflgbit(&mp->m_sb)) {
1303 error = xfs_adjust_extent_unmap_boundaries(ip, &startoffset_fsb,
1304 &endoffset_fsb);
1305 if (error)
1306 return error;
1307 }
1308
1309 if (endoffset_fsb > startoffset_fsb) {
1310 while (!done) {
1311 error = xfs_unmap_extent(ip, startoffset_fsb,
1312 endoffset_fsb - startoffset_fsb, &done);
1313 if (error)
1314 return error;
1315 }
1316 }
1317
1318 /*
1319 * Now that we've unmap all full blocks we'll have to zero out any
1320 * partial block at the beginning and/or end. xfs_zero_range is
1321 * smart enough to skip any holes, including those we just created,
1322 * but we must take care not to zero beyond EOF and enlarge i_size.
1323 */
1324
1325 if (offset >= XFS_ISIZE(ip))
1326 return 0;
1327
1328 if (offset + len > XFS_ISIZE(ip))
1329 len = XFS_ISIZE(ip) - offset;
1330
1331 return xfs_zero_range(ip, offset, len, NULL);
1332}
1333
1334/*
1335 * Preallocate and zero a range of a file. This mechanism has the allocation
1336 * semantics of fallocate and in addition converts data in the range to zeroes.
1337 */
1338int
1339xfs_zero_file_space(
1340 struct xfs_inode *ip,
1341 xfs_off_t offset,
1342 xfs_off_t len)
1343{
1344 struct xfs_mount *mp = ip->i_mount;
1345 uint blksize;
1346 int error;
1347
1348 trace_xfs_zero_file_space(ip);
1349
1350 blksize = 1 << mp->m_sb.sb_blocklog;
1351
1352 /*
1353 * Punch a hole and prealloc the range. We use hole punch rather than
1354 * unwritten extent conversion for two reasons:
1355 *
1356 * 1.) Hole punch handles partial block zeroing for us.
1357 *
1358 * 2.) If prealloc returns ENOSPC, the file range is still zero-valued
1359 * by virtue of the hole punch.
1360 */
1361 error = xfs_free_file_space(ip, offset, len);
1362 if (error)
1363 goto out;
1364
1365 error = xfs_alloc_file_space(ip, round_down(offset, blksize),
1366 round_up(offset + len, blksize) -
1367 round_down(offset, blksize),
1368 XFS_BMAPI_PREALLOC);
1369out:
1370 return error;
1371
1372}
1373
1374/*
1375 * @next_fsb will keep track of the extent currently undergoing shift.
1376 * @stop_fsb will keep track of the extent at which we have to stop.
1377 * If we are shifting left, we will start with block (offset + len) and
1378 * shift each extent till last extent.
1379 * If we are shifting right, we will start with last extent inside file space
1380 * and continue until we reach the block corresponding to offset.
1381 */
1382static int
1383xfs_shift_file_space(
1384 struct xfs_inode *ip,
1385 xfs_off_t offset,
1386 xfs_off_t len,
1387 enum shift_direction direction)
1388{
1389 int done = 0;
1390 struct xfs_mount *mp = ip->i_mount;
1391 struct xfs_trans *tp;
1392 int error;
1393 struct xfs_defer_ops dfops;
1394 xfs_fsblock_t first_block;
1395 xfs_fileoff_t stop_fsb;
1396 xfs_fileoff_t next_fsb;
1397 xfs_fileoff_t shift_fsb;
1398 uint resblks;
1399
1400 ASSERT(direction == SHIFT_LEFT || direction == SHIFT_RIGHT);
1401
1402 if (direction == SHIFT_LEFT) {
1403 /*
1404 * Reserve blocks to cover potential extent merges after left
1405 * shift operations.
1406 */
1407 resblks = XFS_DIOSTRAT_SPACE_RES(mp, 0);
1408 next_fsb = XFS_B_TO_FSB(mp, offset + len);
1409 stop_fsb = XFS_B_TO_FSB(mp, VFS_I(ip)->i_size);
1410 } else {
1411 /*
1412 * If right shift, delegate the work of initialization of
1413 * next_fsb to xfs_bmap_shift_extent as it has ilock held.
1414 */
1415 resblks = 0;
1416 next_fsb = NULLFSBLOCK;
1417 stop_fsb = XFS_B_TO_FSB(mp, offset);
1418 }
1419
1420 shift_fsb = XFS_B_TO_FSB(mp, len);
1421
1422 /*
1423 * Trim eofblocks to avoid shifting uninitialized post-eof preallocation
1424 * into the accessible region of the file.
1425 */
1426 if (xfs_can_free_eofblocks(ip, true)) {
1427 error = xfs_free_eofblocks(ip);
1428 if (error)
1429 return error;
1430 }
1431
1432 /*
1433 * Writeback and invalidate cache for the remainder of the file as we're
1434 * about to shift down every extent from offset to EOF.
1435 */
1436 error = filemap_write_and_wait_range(VFS_I(ip)->i_mapping,
1437 offset, -1);
1438 if (error)
1439 return error;
1440 error = invalidate_inode_pages2_range(VFS_I(ip)->i_mapping,
1441 offset >> PAGE_SHIFT, -1);
1442 if (error)
1443 return error;
1444
1445 /*
1446 * The extent shiting code works on extent granularity. So, if
1447 * stop_fsb is not the starting block of extent, we need to split
1448 * the extent at stop_fsb.
1449 */
1450 if (direction == SHIFT_RIGHT) {
1451 error = xfs_bmap_split_extent(ip, stop_fsb);
1452 if (error)
1453 return error;
1454 }
1455
1456 while (!error && !done) {
1457 error = xfs_trans_alloc(mp, &M_RES(mp)->tr_write, resblks, 0, 0,
1458 &tp);
1459 if (error)
1460 break;
1461
1462 xfs_ilock(ip, XFS_ILOCK_EXCL);
1463 error = xfs_trans_reserve_quota(tp, mp, ip->i_udquot,
1464 ip->i_gdquot, ip->i_pdquot, resblks, 0,
1465 XFS_QMOPT_RES_REGBLKS);
1466 if (error)
1467 goto out_trans_cancel;
1468
1469 xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL);
1470
1471 xfs_defer_init(&dfops, &first_block);
1472
1473 /*
1474 * We are using the write transaction in which max 2 bmbt
1475 * updates are allowed
1476 */
1477 error = xfs_bmap_shift_extents(tp, ip, &next_fsb, shift_fsb,
1478 &done, stop_fsb, &first_block, &dfops,
1479 direction, XFS_BMAP_MAX_SHIFT_EXTENTS);
1480 if (error)
1481 goto out_bmap_cancel;
1482
1483 error = xfs_defer_finish(&tp, &dfops, NULL);
1484 if (error)
1485 goto out_bmap_cancel;
1486
1487 error = xfs_trans_commit(tp);
1488 }
1489
1490 return error;
1491
1492out_bmap_cancel:
1493 xfs_defer_cancel(&dfops);
1494out_trans_cancel:
1495 xfs_trans_cancel(tp);
1496 return error;
1497}
1498
1499/*
1500 * xfs_collapse_file_space()
1501 * This routine frees disk space and shift extent for the given file.
1502 * The first thing we do is to free data blocks in the specified range
1503 * by calling xfs_free_file_space(). It would also sync dirty data
1504 * and invalidate page cache over the region on which collapse range
1505 * is working. And Shift extent records to the left to cover a hole.
1506 * RETURNS:
1507 * 0 on success
1508 * errno on error
1509 *
1510 */
1511int
1512xfs_collapse_file_space(
1513 struct xfs_inode *ip,
1514 xfs_off_t offset,
1515 xfs_off_t len)
1516{
1517 int error;
1518
1519 ASSERT(xfs_isilocked(ip, XFS_IOLOCK_EXCL));
1520 trace_xfs_collapse_file_space(ip);
1521
1522 error = xfs_free_file_space(ip, offset, len);
1523 if (error)
1524 return error;
1525
1526 return xfs_shift_file_space(ip, offset, len, SHIFT_LEFT);
1527}
1528
1529/*
1530 * xfs_insert_file_space()
1531 * This routine create hole space by shifting extents for the given file.
1532 * The first thing we do is to sync dirty data and invalidate page cache
1533 * over the region on which insert range is working. And split an extent
1534 * to two extents at given offset by calling xfs_bmap_split_extent.
1535 * And shift all extent records which are laying between [offset,
1536 * last allocated extent] to the right to reserve hole range.
1537 * RETURNS:
1538 * 0 on success
1539 * errno on error
1540 */
1541int
1542xfs_insert_file_space(
1543 struct xfs_inode *ip,
1544 loff_t offset,
1545 loff_t len)
1546{
1547 ASSERT(xfs_isilocked(ip, XFS_IOLOCK_EXCL));
1548 trace_xfs_insert_file_space(ip);
1549
1550 return xfs_shift_file_space(ip, offset, len, SHIFT_RIGHT);
1551}
1552
1553/*
1554 * We need to check that the format of the data fork in the temporary inode is
1555 * valid for the target inode before doing the swap. This is not a problem with
1556 * attr1 because of the fixed fork offset, but attr2 has a dynamically sized
1557 * data fork depending on the space the attribute fork is taking so we can get
1558 * invalid formats on the target inode.
1559 *
1560 * E.g. target has space for 7 extents in extent format, temp inode only has
1561 * space for 6. If we defragment down to 7 extents, then the tmp format is a
1562 * btree, but when swapped it needs to be in extent format. Hence we can't just
1563 * blindly swap data forks on attr2 filesystems.
1564 *
1565 * Note that we check the swap in both directions so that we don't end up with
1566 * a corrupt temporary inode, either.
1567 *
1568 * Note that fixing the way xfs_fsr sets up the attribute fork in the source
1569 * inode will prevent this situation from occurring, so all we do here is
1570 * reject and log the attempt. basically we are putting the responsibility on
1571 * userspace to get this right.
1572 */
1573static int
1574xfs_swap_extents_check_format(
1575 struct xfs_inode *ip, /* target inode */
1576 struct xfs_inode *tip) /* tmp inode */
1577{
1578
1579 /* Should never get a local format */
1580 if (ip->i_d.di_format == XFS_DINODE_FMT_LOCAL ||
1581 tip->i_d.di_format == XFS_DINODE_FMT_LOCAL)
1582 return -EINVAL;
1583
1584 /*
1585 * if the target inode has less extents that then temporary inode then
1586 * why did userspace call us?
1587 */
1588 if (ip->i_d.di_nextents < tip->i_d.di_nextents)
1589 return -EINVAL;
1590
1591 /*
1592 * If we have to use the (expensive) rmap swap method, we can
1593 * handle any number of extents and any format.
1594 */
1595 if (xfs_sb_version_hasrmapbt(&ip->i_mount->m_sb))
1596 return 0;
1597
1598 /*
1599 * if the target inode is in extent form and the temp inode is in btree
1600 * form then we will end up with the target inode in the wrong format
1601 * as we already know there are less extents in the temp inode.
1602 */
1603 if (ip->i_d.di_format == XFS_DINODE_FMT_EXTENTS &&
1604 tip->i_d.di_format == XFS_DINODE_FMT_BTREE)
1605 return -EINVAL;
1606
1607 /* Check temp in extent form to max in target */
1608 if (tip->i_d.di_format == XFS_DINODE_FMT_EXTENTS &&
1609 XFS_IFORK_NEXTENTS(tip, XFS_DATA_FORK) >
1610 XFS_IFORK_MAXEXT(ip, XFS_DATA_FORK))
1611 return -EINVAL;
1612
1613 /* Check target in extent form to max in temp */
1614 if (ip->i_d.di_format == XFS_DINODE_FMT_EXTENTS &&
1615 XFS_IFORK_NEXTENTS(ip, XFS_DATA_FORK) >
1616 XFS_IFORK_MAXEXT(tip, XFS_DATA_FORK))
1617 return -EINVAL;
1618
1619 /*
1620 * If we are in a btree format, check that the temp root block will fit
1621 * in the target and that it has enough extents to be in btree format
1622 * in the target.
1623 *
1624 * Note that we have to be careful to allow btree->extent conversions
1625 * (a common defrag case) which will occur when the temp inode is in
1626 * extent format...
1627 */
1628 if (tip->i_d.di_format == XFS_DINODE_FMT_BTREE) {
1629 if (XFS_IFORK_BOFF(ip) &&
1630 XFS_BMAP_BMDR_SPACE(tip->i_df.if_broot) > XFS_IFORK_BOFF(ip))
1631 return -EINVAL;
1632 if (XFS_IFORK_NEXTENTS(tip, XFS_DATA_FORK) <=
1633 XFS_IFORK_MAXEXT(ip, XFS_DATA_FORK))
1634 return -EINVAL;
1635 }
1636
1637 /* Reciprocal target->temp btree format checks */
1638 if (ip->i_d.di_format == XFS_DINODE_FMT_BTREE) {
1639 if (XFS_IFORK_BOFF(tip) &&
1640 XFS_BMAP_BMDR_SPACE(ip->i_df.if_broot) > XFS_IFORK_BOFF(tip))
1641 return -EINVAL;
1642 if (XFS_IFORK_NEXTENTS(ip, XFS_DATA_FORK) <=
1643 XFS_IFORK_MAXEXT(tip, XFS_DATA_FORK))
1644 return -EINVAL;
1645 }
1646
1647 return 0;
1648}
1649
1650static int
1651xfs_swap_extent_flush(
1652 struct xfs_inode *ip)
1653{
1654 int error;
1655
1656 error = filemap_write_and_wait(VFS_I(ip)->i_mapping);
1657 if (error)
1658 return error;
1659 truncate_pagecache_range(VFS_I(ip), 0, -1);
1660
1661 /* Verify O_DIRECT for ftmp */
1662 if (VFS_I(ip)->i_mapping->nrpages)
1663 return -EINVAL;
1664 return 0;
1665}
1666
1667/*
1668 * Move extents from one file to another, when rmap is enabled.
1669 */
1670STATIC int
1671xfs_swap_extent_rmap(
1672 struct xfs_trans **tpp,
1673 struct xfs_inode *ip,
1674 struct xfs_inode *tip)
1675{
1676 struct xfs_bmbt_irec irec;
1677 struct xfs_bmbt_irec uirec;
1678 struct xfs_bmbt_irec tirec;
1679 xfs_fileoff_t offset_fsb;
1680 xfs_fileoff_t end_fsb;
1681 xfs_filblks_t count_fsb;
1682 xfs_fsblock_t firstfsb;
1683 struct xfs_defer_ops dfops;
1684 int error;
1685 xfs_filblks_t ilen;
1686 xfs_filblks_t rlen;
1687 int nimaps;
1688 __uint64_t tip_flags2;
1689
1690 /*
1691 * If the source file has shared blocks, we must flag the donor
1692 * file as having shared blocks so that we get the shared-block
1693 * rmap functions when we go to fix up the rmaps. The flags
1694 * will be switch for reals later.
1695 */
1696 tip_flags2 = tip->i_d.di_flags2;
1697 if (ip->i_d.di_flags2 & XFS_DIFLAG2_REFLINK)
1698 tip->i_d.di_flags2 |= XFS_DIFLAG2_REFLINK;
1699
1700 offset_fsb = 0;
1701 end_fsb = XFS_B_TO_FSB(ip->i_mount, i_size_read(VFS_I(ip)));
1702 count_fsb = (xfs_filblks_t)(end_fsb - offset_fsb);
1703
1704 while (count_fsb) {
1705 /* Read extent from the donor file */
1706 nimaps = 1;
1707 error = xfs_bmapi_read(tip, offset_fsb, count_fsb, &tirec,
1708 &nimaps, 0);
1709 if (error)
1710 goto out;
1711 ASSERT(nimaps == 1);
1712 ASSERT(tirec.br_startblock != DELAYSTARTBLOCK);
1713
1714 trace_xfs_swap_extent_rmap_remap(tip, &tirec);
1715 ilen = tirec.br_blockcount;
1716
1717 /* Unmap the old blocks in the source file. */
1718 while (tirec.br_blockcount) {
1719 xfs_defer_init(&dfops, &firstfsb);
1720 trace_xfs_swap_extent_rmap_remap_piece(tip, &tirec);
1721
1722 /* Read extent from the source file */
1723 nimaps = 1;
1724 error = xfs_bmapi_read(ip, tirec.br_startoff,
1725 tirec.br_blockcount, &irec,
1726 &nimaps, 0);
1727 if (error)
1728 goto out_defer;
1729 ASSERT(nimaps == 1);
1730 ASSERT(tirec.br_startoff == irec.br_startoff);
1731 trace_xfs_swap_extent_rmap_remap_piece(ip, &irec);
1732
1733 /* Trim the extent. */
1734 uirec = tirec;
1735 uirec.br_blockcount = rlen = min_t(xfs_filblks_t,
1736 tirec.br_blockcount,
1737 irec.br_blockcount);
1738 trace_xfs_swap_extent_rmap_remap_piece(tip, &uirec);
1739
1740 /* Remove the mapping from the donor file. */
1741 error = xfs_bmap_unmap_extent((*tpp)->t_mountp, &dfops,
1742 tip, &uirec);
1743 if (error)
1744 goto out_defer;
1745
1746 /* Remove the mapping from the source file. */
1747 error = xfs_bmap_unmap_extent((*tpp)->t_mountp, &dfops,
1748 ip, &irec);
1749 if (error)
1750 goto out_defer;
1751
1752 /* Map the donor file's blocks into the source file. */
1753 error = xfs_bmap_map_extent((*tpp)->t_mountp, &dfops,
1754 ip, &uirec);
1755 if (error)
1756 goto out_defer;
1757
1758 /* Map the source file's blocks into the donor file. */
1759 error = xfs_bmap_map_extent((*tpp)->t_mountp, &dfops,
1760 tip, &irec);
1761 if (error)
1762 goto out_defer;
1763
1764 error = xfs_defer_finish(tpp, &dfops, ip);
1765 if (error)
1766 goto out_defer;
1767
1768 tirec.br_startoff += rlen;
1769 if (tirec.br_startblock != HOLESTARTBLOCK &&
1770 tirec.br_startblock != DELAYSTARTBLOCK)
1771 tirec.br_startblock += rlen;
1772 tirec.br_blockcount -= rlen;
1773 }
1774
1775 /* Roll on... */
1776 count_fsb -= ilen;
1777 offset_fsb += ilen;
1778 }
1779
1780 tip->i_d.di_flags2 = tip_flags2;
1781 return 0;
1782
1783out_defer:
1784 xfs_defer_cancel(&dfops);
1785out:
1786 trace_xfs_swap_extent_rmap_error(ip, error, _RET_IP_);
1787 tip->i_d.di_flags2 = tip_flags2;
1788 return error;
1789}
1790
1791/* Swap the extents of two files by swapping data forks. */
1792STATIC int
1793xfs_swap_extent_forks(
1794 struct xfs_trans *tp,
1795 struct xfs_inode *ip,
1796 struct xfs_inode *tip,
1797 int *src_log_flags,
1798 int *target_log_flags)
1799{
1800 struct xfs_ifork tempifp, *ifp, *tifp;
1801 int aforkblks = 0;
1802 int taforkblks = 0;
1803 xfs_extnum_t nextents;
1804 __uint64_t tmp;
1805 int error;
1806
1807 /*
1808 * Count the number of extended attribute blocks
1809 */
1810 if ( ((XFS_IFORK_Q(ip) != 0) && (ip->i_d.di_anextents > 0)) &&
1811 (ip->i_d.di_aformat != XFS_DINODE_FMT_LOCAL)) {
1812 error = xfs_bmap_count_blocks(tp, ip, XFS_ATTR_FORK,
1813 &aforkblks);
1814 if (error)
1815 return error;
1816 }
1817 if ( ((XFS_IFORK_Q(tip) != 0) && (tip->i_d.di_anextents > 0)) &&
1818 (tip->i_d.di_aformat != XFS_DINODE_FMT_LOCAL)) {
1819 error = xfs_bmap_count_blocks(tp, tip, XFS_ATTR_FORK,
1820 &taforkblks);
1821 if (error)
1822 return error;
1823 }
1824
1825 /*
1826 * Before we've swapped the forks, lets set the owners of the forks
1827 * appropriately. We have to do this as we are demand paging the btree
1828 * buffers, and so the validation done on read will expect the owner
1829 * field to be correctly set. Once we change the owners, we can swap the
1830 * inode forks.
1831 */
1832 if (ip->i_d.di_version == 3 &&
1833 ip->i_d.di_format == XFS_DINODE_FMT_BTREE) {
1834 (*target_log_flags) |= XFS_ILOG_DOWNER;
1835 error = xfs_bmbt_change_owner(tp, ip, XFS_DATA_FORK,
1836 tip->i_ino, NULL);
1837 if (error)
1838 return error;
1839 }
1840
1841 if (tip->i_d.di_version == 3 &&
1842 tip->i_d.di_format == XFS_DINODE_FMT_BTREE) {
1843 (*src_log_flags) |= XFS_ILOG_DOWNER;
1844 error = xfs_bmbt_change_owner(tp, tip, XFS_DATA_FORK,
1845 ip->i_ino, NULL);
1846 if (error)
1847 return error;
1848 }
1849
1850 /*
1851 * Swap the data forks of the inodes
1852 */
1853 ifp = &ip->i_df;
1854 tifp = &tip->i_df;
1855 tempifp = *ifp; /* struct copy */
1856 *ifp = *tifp; /* struct copy */
1857 *tifp = tempifp; /* struct copy */
1858
1859 /*
1860 * Fix the on-disk inode values
1861 */
1862 tmp = (__uint64_t)ip->i_d.di_nblocks;
1863 ip->i_d.di_nblocks = tip->i_d.di_nblocks - taforkblks + aforkblks;
1864 tip->i_d.di_nblocks = tmp + taforkblks - aforkblks;
1865
1866 tmp = (__uint64_t) ip->i_d.di_nextents;
1867 ip->i_d.di_nextents = tip->i_d.di_nextents;
1868 tip->i_d.di_nextents = tmp;
1869
1870 tmp = (__uint64_t) ip->i_d.di_format;
1871 ip->i_d.di_format = tip->i_d.di_format;
1872 tip->i_d.di_format = tmp;
1873
1874 /*
1875 * The extents in the source inode could still contain speculative
1876 * preallocation beyond EOF (e.g. the file is open but not modified
1877 * while defrag is in progress). In that case, we need to copy over the
1878 * number of delalloc blocks the data fork in the source inode is
1879 * tracking beyond EOF so that when the fork is truncated away when the
1880 * temporary inode is unlinked we don't underrun the i_delayed_blks
1881 * counter on that inode.
1882 */
1883 ASSERT(tip->i_delayed_blks == 0);
1884 tip->i_delayed_blks = ip->i_delayed_blks;
1885 ip->i_delayed_blks = 0;
1886
1887 switch (ip->i_d.di_format) {
1888 case XFS_DINODE_FMT_EXTENTS:
1889 /*
1890 * If the extents fit in the inode, fix the pointer. Otherwise
1891 * it's already NULL or pointing to the extent.
1892 */
1893 nextents = xfs_iext_count(&ip->i_df);
1894 if (nextents <= XFS_INLINE_EXTS)
1895 ifp->if_u1.if_extents = ifp->if_u2.if_inline_ext;
1896 (*src_log_flags) |= XFS_ILOG_DEXT;
1897 break;
1898 case XFS_DINODE_FMT_BTREE:
1899 ASSERT(ip->i_d.di_version < 3 ||
1900 (*src_log_flags & XFS_ILOG_DOWNER));
1901 (*src_log_flags) |= XFS_ILOG_DBROOT;
1902 break;
1903 }
1904
1905 switch (tip->i_d.di_format) {
1906 case XFS_DINODE_FMT_EXTENTS:
1907 /*
1908 * If the extents fit in the inode, fix the pointer. Otherwise
1909 * it's already NULL or pointing to the extent.
1910 */
1911 nextents = xfs_iext_count(&tip->i_df);
1912 if (nextents <= XFS_INLINE_EXTS)
1913 tifp->if_u1.if_extents = tifp->if_u2.if_inline_ext;
1914 (*target_log_flags) |= XFS_ILOG_DEXT;
1915 break;
1916 case XFS_DINODE_FMT_BTREE:
1917 (*target_log_flags) |= XFS_ILOG_DBROOT;
1918 ASSERT(tip->i_d.di_version < 3 ||
1919 (*target_log_flags & XFS_ILOG_DOWNER));
1920 break;
1921 }
1922
1923 return 0;
1924}
1925
1926int
1927xfs_swap_extents(
1928 struct xfs_inode *ip, /* target inode */
1929 struct xfs_inode *tip, /* tmp inode */
1930 struct xfs_swapext *sxp)
1931{
1932 struct xfs_mount *mp = ip->i_mount;
1933 struct xfs_trans *tp;
1934 struct xfs_bstat *sbp = &sxp->sx_stat;
1935 int src_log_flags, target_log_flags;
1936 int error = 0;
1937 int lock_flags;
1938 struct xfs_ifork *cowfp;
1939 __uint64_t f;
1940 int resblks;
1941
1942 /*
1943 * Lock the inodes against other IO, page faults and truncate to
1944 * begin with. Then we can ensure the inodes are flushed and have no
1945 * page cache safely. Once we have done this we can take the ilocks and
1946 * do the rest of the checks.
1947 */
1948 lock_two_nondirectories(VFS_I(ip), VFS_I(tip));
1949 lock_flags = XFS_MMAPLOCK_EXCL;
1950 xfs_lock_two_inodes(ip, tip, XFS_MMAPLOCK_EXCL);
1951
1952 /* Verify that both files have the same format */
1953 if ((VFS_I(ip)->i_mode & S_IFMT) != (VFS_I(tip)->i_mode & S_IFMT)) {
1954 error = -EINVAL;
1955 goto out_unlock;
1956 }
1957
1958 /* Verify both files are either real-time or non-realtime */
1959 if (XFS_IS_REALTIME_INODE(ip) != XFS_IS_REALTIME_INODE(tip)) {
1960 error = -EINVAL;
1961 goto out_unlock;
1962 }
1963
1964 error = xfs_swap_extent_flush(ip);
1965 if (error)
1966 goto out_unlock;
1967 error = xfs_swap_extent_flush(tip);
1968 if (error)
1969 goto out_unlock;
1970
1971 /*
1972 * Extent "swapping" with rmap requires a permanent reservation and
1973 * a block reservation because it's really just a remap operation
1974 * performed with log redo items!
1975 */
1976 if (xfs_sb_version_hasrmapbt(&mp->m_sb)) {
1977 /*
1978 * Conceptually this shouldn't affect the shape of either
1979 * bmbt, but since we atomically move extents one by one,
1980 * we reserve enough space to rebuild both trees.
1981 */
1982 resblks = XFS_SWAP_RMAP_SPACE_RES(mp,
1983 XFS_IFORK_NEXTENTS(ip, XFS_DATA_FORK),
1984 XFS_DATA_FORK) +
1985 XFS_SWAP_RMAP_SPACE_RES(mp,
1986 XFS_IFORK_NEXTENTS(tip, XFS_DATA_FORK),
1987 XFS_DATA_FORK);
1988 error = xfs_trans_alloc(mp, &M_RES(mp)->tr_write, resblks,
1989 0, 0, &tp);
1990 } else
1991 error = xfs_trans_alloc(mp, &M_RES(mp)->tr_ichange, 0,
1992 0, 0, &tp);
1993 if (error)
1994 goto out_unlock;
1995
1996 /*
1997 * Lock and join the inodes to the tansaction so that transaction commit
1998 * or cancel will unlock the inodes from this point onwards.
1999 */
2000 xfs_lock_two_inodes(ip, tip, XFS_ILOCK_EXCL);
2001 lock_flags |= XFS_ILOCK_EXCL;
2002 xfs_trans_ijoin(tp, ip, 0);
2003 xfs_trans_ijoin(tp, tip, 0);
2004
2005
2006 /* Verify all data are being swapped */
2007 if (sxp->sx_offset != 0 ||
2008 sxp->sx_length != ip->i_d.di_size ||
2009 sxp->sx_length != tip->i_d.di_size) {
2010 error = -EFAULT;
2011 goto out_trans_cancel;
2012 }
2013
2014 trace_xfs_swap_extent_before(ip, 0);
2015 trace_xfs_swap_extent_before(tip, 1);
2016
2017 /* check inode formats now that data is flushed */
2018 error = xfs_swap_extents_check_format(ip, tip);
2019 if (error) {
2020 xfs_notice(mp,
2021 "%s: inode 0x%llx format is incompatible for exchanging.",
2022 __func__, ip->i_ino);
2023 goto out_trans_cancel;
2024 }
2025
2026 /*
2027 * Compare the current change & modify times with that
2028 * passed in. If they differ, we abort this swap.
2029 * This is the mechanism used to ensure the calling
2030 * process that the file was not changed out from
2031 * under it.
2032 */
2033 if ((sbp->bs_ctime.tv_sec != VFS_I(ip)->i_ctime.tv_sec) ||
2034 (sbp->bs_ctime.tv_nsec != VFS_I(ip)->i_ctime.tv_nsec) ||
2035 (sbp->bs_mtime.tv_sec != VFS_I(ip)->i_mtime.tv_sec) ||
2036 (sbp->bs_mtime.tv_nsec != VFS_I(ip)->i_mtime.tv_nsec)) {
2037 error = -EBUSY;
2038 goto out_trans_cancel;
2039 }
2040
2041 /*
2042 * Note the trickiness in setting the log flags - we set the owner log
2043 * flag on the opposite inode (i.e. the inode we are setting the new
2044 * owner to be) because once we swap the forks and log that, log
2045 * recovery is going to see the fork as owned by the swapped inode,
2046 * not the pre-swapped inodes.
2047 */
2048 src_log_flags = XFS_ILOG_CORE;
2049 target_log_flags = XFS_ILOG_CORE;
2050
2051 if (xfs_sb_version_hasrmapbt(&mp->m_sb))
2052 error = xfs_swap_extent_rmap(&tp, ip, tip);
2053 else
2054 error = xfs_swap_extent_forks(tp, ip, tip, &src_log_flags,
2055 &target_log_flags);
2056 if (error)
2057 goto out_trans_cancel;
2058
2059 /* Do we have to swap reflink flags? */
2060 if ((ip->i_d.di_flags2 & XFS_DIFLAG2_REFLINK) ^
2061 (tip->i_d.di_flags2 & XFS_DIFLAG2_REFLINK)) {
2062 f = ip->i_d.di_flags2 & XFS_DIFLAG2_REFLINK;
2063 ip->i_d.di_flags2 &= ~XFS_DIFLAG2_REFLINK;
2064 ip->i_d.di_flags2 |= tip->i_d.di_flags2 & XFS_DIFLAG2_REFLINK;
2065 tip->i_d.di_flags2 &= ~XFS_DIFLAG2_REFLINK;
2066 tip->i_d.di_flags2 |= f & XFS_DIFLAG2_REFLINK;
2067 cowfp = ip->i_cowfp;
2068 ip->i_cowfp = tip->i_cowfp;
2069 tip->i_cowfp = cowfp;
2070 xfs_inode_set_cowblocks_tag(ip);
2071 xfs_inode_set_cowblocks_tag(tip);
2072 }
2073
2074 xfs_trans_log_inode(tp, ip, src_log_flags);
2075 xfs_trans_log_inode(tp, tip, target_log_flags);
2076
2077 /*
2078 * If this is a synchronous mount, make sure that the
2079 * transaction goes to disk before returning to the user.
2080 */
2081 if (mp->m_flags & XFS_MOUNT_WSYNC)
2082 xfs_trans_set_sync(tp);
2083
2084 error = xfs_trans_commit(tp);
2085
2086 trace_xfs_swap_extent_after(ip, 0);
2087 trace_xfs_swap_extent_after(tip, 1);
2088
2089out_unlock:
2090 xfs_iunlock(ip, lock_flags);
2091 xfs_iunlock(tip, lock_flags);
2092 unlock_two_nondirectories(VFS_I(ip), VFS_I(tip));
2093 return error;
2094
2095out_trans_cancel:
2096 xfs_trans_cancel(tp);
2097 goto out_unlock;
2098}
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Copyright (c) 2000-2006 Silicon Graphics, Inc.
4 * Copyright (c) 2012 Red Hat, Inc.
5 * All Rights Reserved.
6 */
7#include "xfs.h"
8#include "xfs_fs.h"
9#include "xfs_shared.h"
10#include "xfs_format.h"
11#include "xfs_log_format.h"
12#include "xfs_trans_resv.h"
13#include "xfs_bit.h"
14#include "xfs_mount.h"
15#include "xfs_defer.h"
16#include "xfs_inode.h"
17#include "xfs_btree.h"
18#include "xfs_trans.h"
19#include "xfs_alloc.h"
20#include "xfs_bmap.h"
21#include "xfs_bmap_util.h"
22#include "xfs_bmap_btree.h"
23#include "xfs_rtalloc.h"
24#include "xfs_error.h"
25#include "xfs_quota.h"
26#include "xfs_trans_space.h"
27#include "xfs_trace.h"
28#include "xfs_icache.h"
29#include "xfs_iomap.h"
30#include "xfs_reflink.h"
31
32/* Kernel only BMAP related definitions and functions */
33
34/*
35 * Convert the given file system block to a disk block. We have to treat it
36 * differently based on whether the file is a real time file or not, because the
37 * bmap code does.
38 */
39xfs_daddr_t
40xfs_fsb_to_db(struct xfs_inode *ip, xfs_fsblock_t fsb)
41{
42 if (XFS_IS_REALTIME_INODE(ip))
43 return XFS_FSB_TO_BB(ip->i_mount, fsb);
44 return XFS_FSB_TO_DADDR(ip->i_mount, fsb);
45}
46
47/*
48 * Routine to zero an extent on disk allocated to the specific inode.
49 *
50 * The VFS functions take a linearised filesystem block offset, so we have to
51 * convert the sparse xfs fsb to the right format first.
52 * VFS types are real funky, too.
53 */
54int
55xfs_zero_extent(
56 struct xfs_inode *ip,
57 xfs_fsblock_t start_fsb,
58 xfs_off_t count_fsb)
59{
60 struct xfs_mount *mp = ip->i_mount;
61 struct xfs_buftarg *target = xfs_inode_buftarg(ip);
62 xfs_daddr_t sector = xfs_fsb_to_db(ip, start_fsb);
63 sector_t block = XFS_BB_TO_FSBT(mp, sector);
64
65 return blkdev_issue_zeroout(target->bt_bdev,
66 block << (mp->m_super->s_blocksize_bits - 9),
67 count_fsb << (mp->m_super->s_blocksize_bits - 9),
68 GFP_NOFS, 0);
69}
70
71#ifdef CONFIG_XFS_RT
72int
73xfs_bmap_rtalloc(
74 struct xfs_bmalloca *ap)
75{
76 struct xfs_mount *mp = ap->ip->i_mount;
77 xfs_fileoff_t orig_offset = ap->offset;
78 xfs_rtblock_t rtb;
79 xfs_extlen_t prod = 0; /* product factor for allocators */
80 xfs_extlen_t mod = 0; /* product factor for allocators */
81 xfs_extlen_t ralen = 0; /* realtime allocation length */
82 xfs_extlen_t align; /* minimum allocation alignment */
83 xfs_extlen_t orig_length = ap->length;
84 xfs_extlen_t minlen = mp->m_sb.sb_rextsize;
85 xfs_extlen_t raminlen;
86 bool rtlocked = false;
87 bool ignore_locality = false;
88 int error;
89
90 align = xfs_get_extsz_hint(ap->ip);
91retry:
92 prod = align / mp->m_sb.sb_rextsize;
93 error = xfs_bmap_extsize_align(mp, &ap->got, &ap->prev,
94 align, 1, ap->eof, 0,
95 ap->conv, &ap->offset, &ap->length);
96 if (error)
97 return error;
98 ASSERT(ap->length);
99 ASSERT(ap->length % mp->m_sb.sb_rextsize == 0);
100
101 /*
102 * If we shifted the file offset downward to satisfy an extent size
103 * hint, increase minlen by that amount so that the allocator won't
104 * give us an allocation that's too short to cover at least one of the
105 * blocks that the caller asked for.
106 */
107 if (ap->offset != orig_offset)
108 minlen += orig_offset - ap->offset;
109
110 /*
111 * If the offset & length are not perfectly aligned
112 * then kill prod, it will just get us in trouble.
113 */
114 div_u64_rem(ap->offset, align, &mod);
115 if (mod || ap->length % align)
116 prod = 1;
117 /*
118 * Set ralen to be the actual requested length in rtextents.
119 */
120 ralen = ap->length / mp->m_sb.sb_rextsize;
121 /*
122 * If the old value was close enough to MAXEXTLEN that
123 * we rounded up to it, cut it back so it's valid again.
124 * Note that if it's a really large request (bigger than
125 * MAXEXTLEN), we don't hear about that number, and can't
126 * adjust the starting point to match it.
127 */
128 if (ralen * mp->m_sb.sb_rextsize >= MAXEXTLEN)
129 ralen = MAXEXTLEN / mp->m_sb.sb_rextsize;
130
131 /*
132 * Lock out modifications to both the RT bitmap and summary inodes
133 */
134 if (!rtlocked) {
135 xfs_ilock(mp->m_rbmip, XFS_ILOCK_EXCL|XFS_ILOCK_RTBITMAP);
136 xfs_trans_ijoin(ap->tp, mp->m_rbmip, XFS_ILOCK_EXCL);
137 xfs_ilock(mp->m_rsumip, XFS_ILOCK_EXCL|XFS_ILOCK_RTSUM);
138 xfs_trans_ijoin(ap->tp, mp->m_rsumip, XFS_ILOCK_EXCL);
139 rtlocked = true;
140 }
141
142 /*
143 * If it's an allocation to an empty file at offset 0,
144 * pick an extent that will space things out in the rt area.
145 */
146 if (ap->eof && ap->offset == 0) {
147 xfs_rtblock_t rtx; /* realtime extent no */
148
149 error = xfs_rtpick_extent(mp, ap->tp, ralen, &rtx);
150 if (error)
151 return error;
152 ap->blkno = rtx * mp->m_sb.sb_rextsize;
153 } else {
154 ap->blkno = 0;
155 }
156
157 xfs_bmap_adjacent(ap);
158
159 /*
160 * Realtime allocation, done through xfs_rtallocate_extent.
161 */
162 if (ignore_locality)
163 ap->blkno = 0;
164 else
165 do_div(ap->blkno, mp->m_sb.sb_rextsize);
166 rtb = ap->blkno;
167 ap->length = ralen;
168 raminlen = max_t(xfs_extlen_t, 1, minlen / mp->m_sb.sb_rextsize);
169 error = xfs_rtallocate_extent(ap->tp, ap->blkno, raminlen, ap->length,
170 &ralen, ap->wasdel, prod, &rtb);
171 if (error)
172 return error;
173
174 if (rtb != NULLRTBLOCK) {
175 ap->blkno = rtb * mp->m_sb.sb_rextsize;
176 ap->length = ralen * mp->m_sb.sb_rextsize;
177 ap->ip->i_nblocks += ap->length;
178 xfs_trans_log_inode(ap->tp, ap->ip, XFS_ILOG_CORE);
179 if (ap->wasdel)
180 ap->ip->i_delayed_blks -= ap->length;
181 /*
182 * Adjust the disk quota also. This was reserved
183 * earlier.
184 */
185 xfs_trans_mod_dquot_byino(ap->tp, ap->ip,
186 ap->wasdel ? XFS_TRANS_DQ_DELRTBCOUNT :
187 XFS_TRANS_DQ_RTBCOUNT, ap->length);
188 return 0;
189 }
190
191 if (align > mp->m_sb.sb_rextsize) {
192 /*
193 * We previously enlarged the request length to try to satisfy
194 * an extent size hint. The allocator didn't return anything,
195 * so reset the parameters to the original values and try again
196 * without alignment criteria.
197 */
198 ap->offset = orig_offset;
199 ap->length = orig_length;
200 minlen = align = mp->m_sb.sb_rextsize;
201 goto retry;
202 }
203
204 if (!ignore_locality && ap->blkno != 0) {
205 /*
206 * If we can't allocate near a specific rt extent, try again
207 * without locality criteria.
208 */
209 ignore_locality = true;
210 goto retry;
211 }
212
213 ap->blkno = NULLFSBLOCK;
214 ap->length = 0;
215 return 0;
216}
217#endif /* CONFIG_XFS_RT */
218
219/*
220 * Extent tree block counting routines.
221 */
222
223/*
224 * Count leaf blocks given a range of extent records. Delayed allocation
225 * extents are not counted towards the totals.
226 */
227xfs_extnum_t
228xfs_bmap_count_leaves(
229 struct xfs_ifork *ifp,
230 xfs_filblks_t *count)
231{
232 struct xfs_iext_cursor icur;
233 struct xfs_bmbt_irec got;
234 xfs_extnum_t numrecs = 0;
235
236 for_each_xfs_iext(ifp, &icur, &got) {
237 if (!isnullstartblock(got.br_startblock)) {
238 *count += got.br_blockcount;
239 numrecs++;
240 }
241 }
242
243 return numrecs;
244}
245
246/*
247 * Count fsblocks of the given fork. Delayed allocation extents are
248 * not counted towards the totals.
249 */
250int
251xfs_bmap_count_blocks(
252 struct xfs_trans *tp,
253 struct xfs_inode *ip,
254 int whichfork,
255 xfs_extnum_t *nextents,
256 xfs_filblks_t *count)
257{
258 struct xfs_mount *mp = ip->i_mount;
259 struct xfs_ifork *ifp = XFS_IFORK_PTR(ip, whichfork);
260 struct xfs_btree_cur *cur;
261 xfs_extlen_t btblocks = 0;
262 int error;
263
264 *nextents = 0;
265 *count = 0;
266
267 if (!ifp)
268 return 0;
269
270 switch (ifp->if_format) {
271 case XFS_DINODE_FMT_BTREE:
272 error = xfs_iread_extents(tp, ip, whichfork);
273 if (error)
274 return error;
275
276 cur = xfs_bmbt_init_cursor(mp, tp, ip, whichfork);
277 error = xfs_btree_count_blocks(cur, &btblocks);
278 xfs_btree_del_cursor(cur, error);
279 if (error)
280 return error;
281
282 /*
283 * xfs_btree_count_blocks includes the root block contained in
284 * the inode fork in @btblocks, so subtract one because we're
285 * only interested in allocated disk blocks.
286 */
287 *count += btblocks - 1;
288
289 fallthrough;
290 case XFS_DINODE_FMT_EXTENTS:
291 *nextents = xfs_bmap_count_leaves(ifp, count);
292 break;
293 }
294
295 return 0;
296}
297
298static int
299xfs_getbmap_report_one(
300 struct xfs_inode *ip,
301 struct getbmapx *bmv,
302 struct kgetbmap *out,
303 int64_t bmv_end,
304 struct xfs_bmbt_irec *got)
305{
306 struct kgetbmap *p = out + bmv->bmv_entries;
307 bool shared = false;
308 int error;
309
310 error = xfs_reflink_trim_around_shared(ip, got, &shared);
311 if (error)
312 return error;
313
314 if (isnullstartblock(got->br_startblock) ||
315 got->br_startblock == DELAYSTARTBLOCK) {
316 /*
317 * Delalloc extents that start beyond EOF can occur due to
318 * speculative EOF allocation when the delalloc extent is larger
319 * than the largest freespace extent at conversion time. These
320 * extents cannot be converted by data writeback, so can exist
321 * here even if we are not supposed to be finding delalloc
322 * extents.
323 */
324 if (got->br_startoff < XFS_B_TO_FSB(ip->i_mount, XFS_ISIZE(ip)))
325 ASSERT((bmv->bmv_iflags & BMV_IF_DELALLOC) != 0);
326
327 p->bmv_oflags |= BMV_OF_DELALLOC;
328 p->bmv_block = -2;
329 } else {
330 p->bmv_block = xfs_fsb_to_db(ip, got->br_startblock);
331 }
332
333 if (got->br_state == XFS_EXT_UNWRITTEN &&
334 (bmv->bmv_iflags & BMV_IF_PREALLOC))
335 p->bmv_oflags |= BMV_OF_PREALLOC;
336
337 if (shared)
338 p->bmv_oflags |= BMV_OF_SHARED;
339
340 p->bmv_offset = XFS_FSB_TO_BB(ip->i_mount, got->br_startoff);
341 p->bmv_length = XFS_FSB_TO_BB(ip->i_mount, got->br_blockcount);
342
343 bmv->bmv_offset = p->bmv_offset + p->bmv_length;
344 bmv->bmv_length = max(0LL, bmv_end - bmv->bmv_offset);
345 bmv->bmv_entries++;
346 return 0;
347}
348
349static void
350xfs_getbmap_report_hole(
351 struct xfs_inode *ip,
352 struct getbmapx *bmv,
353 struct kgetbmap *out,
354 int64_t bmv_end,
355 xfs_fileoff_t bno,
356 xfs_fileoff_t end)
357{
358 struct kgetbmap *p = out + bmv->bmv_entries;
359
360 if (bmv->bmv_iflags & BMV_IF_NO_HOLES)
361 return;
362
363 p->bmv_block = -1;
364 p->bmv_offset = XFS_FSB_TO_BB(ip->i_mount, bno);
365 p->bmv_length = XFS_FSB_TO_BB(ip->i_mount, end - bno);
366
367 bmv->bmv_offset = p->bmv_offset + p->bmv_length;
368 bmv->bmv_length = max(0LL, bmv_end - bmv->bmv_offset);
369 bmv->bmv_entries++;
370}
371
372static inline bool
373xfs_getbmap_full(
374 struct getbmapx *bmv)
375{
376 return bmv->bmv_length == 0 || bmv->bmv_entries >= bmv->bmv_count - 1;
377}
378
379static bool
380xfs_getbmap_next_rec(
381 struct xfs_bmbt_irec *rec,
382 xfs_fileoff_t total_end)
383{
384 xfs_fileoff_t end = rec->br_startoff + rec->br_blockcount;
385
386 if (end == total_end)
387 return false;
388
389 rec->br_startoff += rec->br_blockcount;
390 if (!isnullstartblock(rec->br_startblock) &&
391 rec->br_startblock != DELAYSTARTBLOCK)
392 rec->br_startblock += rec->br_blockcount;
393 rec->br_blockcount = total_end - end;
394 return true;
395}
396
397/*
398 * Get inode's extents as described in bmv, and format for output.
399 * Calls formatter to fill the user's buffer until all extents
400 * are mapped, until the passed-in bmv->bmv_count slots have
401 * been filled, or until the formatter short-circuits the loop,
402 * if it is tracking filled-in extents on its own.
403 */
404int /* error code */
405xfs_getbmap(
406 struct xfs_inode *ip,
407 struct getbmapx *bmv, /* user bmap structure */
408 struct kgetbmap *out)
409{
410 struct xfs_mount *mp = ip->i_mount;
411 int iflags = bmv->bmv_iflags;
412 int whichfork, lock, error = 0;
413 int64_t bmv_end, max_len;
414 xfs_fileoff_t bno, first_bno;
415 struct xfs_ifork *ifp;
416 struct xfs_bmbt_irec got, rec;
417 xfs_filblks_t len;
418 struct xfs_iext_cursor icur;
419
420 if (bmv->bmv_iflags & ~BMV_IF_VALID)
421 return -EINVAL;
422#ifndef DEBUG
423 /* Only allow CoW fork queries if we're debugging. */
424 if (iflags & BMV_IF_COWFORK)
425 return -EINVAL;
426#endif
427 if ((iflags & BMV_IF_ATTRFORK) && (iflags & BMV_IF_COWFORK))
428 return -EINVAL;
429
430 if (bmv->bmv_length < -1)
431 return -EINVAL;
432 bmv->bmv_entries = 0;
433 if (bmv->bmv_length == 0)
434 return 0;
435
436 if (iflags & BMV_IF_ATTRFORK)
437 whichfork = XFS_ATTR_FORK;
438 else if (iflags & BMV_IF_COWFORK)
439 whichfork = XFS_COW_FORK;
440 else
441 whichfork = XFS_DATA_FORK;
442 ifp = XFS_IFORK_PTR(ip, whichfork);
443
444 xfs_ilock(ip, XFS_IOLOCK_SHARED);
445 switch (whichfork) {
446 case XFS_ATTR_FORK:
447 if (!XFS_IFORK_Q(ip))
448 goto out_unlock_iolock;
449
450 max_len = 1LL << 32;
451 lock = xfs_ilock_attr_map_shared(ip);
452 break;
453 case XFS_COW_FORK:
454 /* No CoW fork? Just return */
455 if (!ifp)
456 goto out_unlock_iolock;
457
458 if (xfs_get_cowextsz_hint(ip))
459 max_len = mp->m_super->s_maxbytes;
460 else
461 max_len = XFS_ISIZE(ip);
462
463 lock = XFS_ILOCK_SHARED;
464 xfs_ilock(ip, lock);
465 break;
466 case XFS_DATA_FORK:
467 if (!(iflags & BMV_IF_DELALLOC) &&
468 (ip->i_delayed_blks || XFS_ISIZE(ip) > ip->i_disk_size)) {
469 error = filemap_write_and_wait(VFS_I(ip)->i_mapping);
470 if (error)
471 goto out_unlock_iolock;
472
473 /*
474 * Even after flushing the inode, there can still be
475 * delalloc blocks on the inode beyond EOF due to
476 * speculative preallocation. These are not removed
477 * until the release function is called or the inode
478 * is inactivated. Hence we cannot assert here that
479 * ip->i_delayed_blks == 0.
480 */
481 }
482
483 if (xfs_get_extsz_hint(ip) ||
484 (ip->i_diflags &
485 (XFS_DIFLAG_PREALLOC | XFS_DIFLAG_APPEND)))
486 max_len = mp->m_super->s_maxbytes;
487 else
488 max_len = XFS_ISIZE(ip);
489
490 lock = xfs_ilock_data_map_shared(ip);
491 break;
492 }
493
494 switch (ifp->if_format) {
495 case XFS_DINODE_FMT_EXTENTS:
496 case XFS_DINODE_FMT_BTREE:
497 break;
498 case XFS_DINODE_FMT_LOCAL:
499 /* Local format inode forks report no extents. */
500 goto out_unlock_ilock;
501 default:
502 error = -EINVAL;
503 goto out_unlock_ilock;
504 }
505
506 if (bmv->bmv_length == -1) {
507 max_len = XFS_FSB_TO_BB(mp, XFS_B_TO_FSB(mp, max_len));
508 bmv->bmv_length = max(0LL, max_len - bmv->bmv_offset);
509 }
510
511 bmv_end = bmv->bmv_offset + bmv->bmv_length;
512
513 first_bno = bno = XFS_BB_TO_FSBT(mp, bmv->bmv_offset);
514 len = XFS_BB_TO_FSB(mp, bmv->bmv_length);
515
516 error = xfs_iread_extents(NULL, ip, whichfork);
517 if (error)
518 goto out_unlock_ilock;
519
520 if (!xfs_iext_lookup_extent(ip, ifp, bno, &icur, &got)) {
521 /*
522 * Report a whole-file hole if the delalloc flag is set to
523 * stay compatible with the old implementation.
524 */
525 if (iflags & BMV_IF_DELALLOC)
526 xfs_getbmap_report_hole(ip, bmv, out, bmv_end, bno,
527 XFS_B_TO_FSB(mp, XFS_ISIZE(ip)));
528 goto out_unlock_ilock;
529 }
530
531 while (!xfs_getbmap_full(bmv)) {
532 xfs_trim_extent(&got, first_bno, len);
533
534 /*
535 * Report an entry for a hole if this extent doesn't directly
536 * follow the previous one.
537 */
538 if (got.br_startoff > bno) {
539 xfs_getbmap_report_hole(ip, bmv, out, bmv_end, bno,
540 got.br_startoff);
541 if (xfs_getbmap_full(bmv))
542 break;
543 }
544
545 /*
546 * In order to report shared extents accurately, we report each
547 * distinct shared / unshared part of a single bmbt record with
548 * an individual getbmapx record.
549 */
550 bno = got.br_startoff + got.br_blockcount;
551 rec = got;
552 do {
553 error = xfs_getbmap_report_one(ip, bmv, out, bmv_end,
554 &rec);
555 if (error || xfs_getbmap_full(bmv))
556 goto out_unlock_ilock;
557 } while (xfs_getbmap_next_rec(&rec, bno));
558
559 if (!xfs_iext_next_extent(ifp, &icur, &got)) {
560 xfs_fileoff_t end = XFS_B_TO_FSB(mp, XFS_ISIZE(ip));
561
562 out[bmv->bmv_entries - 1].bmv_oflags |= BMV_OF_LAST;
563
564 if (whichfork != XFS_ATTR_FORK && bno < end &&
565 !xfs_getbmap_full(bmv)) {
566 xfs_getbmap_report_hole(ip, bmv, out, bmv_end,
567 bno, end);
568 }
569 break;
570 }
571
572 if (bno >= first_bno + len)
573 break;
574 }
575
576out_unlock_ilock:
577 xfs_iunlock(ip, lock);
578out_unlock_iolock:
579 xfs_iunlock(ip, XFS_IOLOCK_SHARED);
580 return error;
581}
582
583/*
584 * Dead simple method of punching delalyed allocation blocks from a range in
585 * the inode. This will always punch out both the start and end blocks, even
586 * if the ranges only partially overlap them, so it is up to the caller to
587 * ensure that partial blocks are not passed in.
588 */
589int
590xfs_bmap_punch_delalloc_range(
591 struct xfs_inode *ip,
592 xfs_fileoff_t start_fsb,
593 xfs_fileoff_t length)
594{
595 struct xfs_ifork *ifp = &ip->i_df;
596 xfs_fileoff_t end_fsb = start_fsb + length;
597 struct xfs_bmbt_irec got, del;
598 struct xfs_iext_cursor icur;
599 int error = 0;
600
601 ASSERT(!xfs_need_iread_extents(ifp));
602
603 xfs_ilock(ip, XFS_ILOCK_EXCL);
604 if (!xfs_iext_lookup_extent_before(ip, ifp, &end_fsb, &icur, &got))
605 goto out_unlock;
606
607 while (got.br_startoff + got.br_blockcount > start_fsb) {
608 del = got;
609 xfs_trim_extent(&del, start_fsb, length);
610
611 /*
612 * A delete can push the cursor forward. Step back to the
613 * previous extent on non-delalloc or extents outside the
614 * target range.
615 */
616 if (!del.br_blockcount ||
617 !isnullstartblock(del.br_startblock)) {
618 if (!xfs_iext_prev_extent(ifp, &icur, &got))
619 break;
620 continue;
621 }
622
623 error = xfs_bmap_del_extent_delay(ip, XFS_DATA_FORK, &icur,
624 &got, &del);
625 if (error || !xfs_iext_get_extent(ifp, &icur, &got))
626 break;
627 }
628
629out_unlock:
630 xfs_iunlock(ip, XFS_ILOCK_EXCL);
631 return error;
632}
633
634/*
635 * Test whether it is appropriate to check an inode for and free post EOF
636 * blocks. The 'force' parameter determines whether we should also consider
637 * regular files that are marked preallocated or append-only.
638 */
639bool
640xfs_can_free_eofblocks(
641 struct xfs_inode *ip,
642 bool force)
643{
644 struct xfs_bmbt_irec imap;
645 struct xfs_mount *mp = ip->i_mount;
646 xfs_fileoff_t end_fsb;
647 xfs_fileoff_t last_fsb;
648 int nimaps = 1;
649 int error;
650
651 /*
652 * Caller must either hold the exclusive io lock; or be inactivating
653 * the inode, which guarantees there are no other users of the inode.
654 */
655 ASSERT(xfs_isilocked(ip, XFS_IOLOCK_EXCL) ||
656 (VFS_I(ip)->i_state & I_FREEING));
657
658 /* prealloc/delalloc exists only on regular files */
659 if (!S_ISREG(VFS_I(ip)->i_mode))
660 return false;
661
662 /*
663 * Zero sized files with no cached pages and delalloc blocks will not
664 * have speculative prealloc/delalloc blocks to remove.
665 */
666 if (VFS_I(ip)->i_size == 0 &&
667 VFS_I(ip)->i_mapping->nrpages == 0 &&
668 ip->i_delayed_blks == 0)
669 return false;
670
671 /* If we haven't read in the extent list, then don't do it now. */
672 if (xfs_need_iread_extents(&ip->i_df))
673 return false;
674
675 /*
676 * Do not free real preallocated or append-only files unless the file
677 * has delalloc blocks and we are forced to remove them.
678 */
679 if (ip->i_diflags & (XFS_DIFLAG_PREALLOC | XFS_DIFLAG_APPEND))
680 if (!force || ip->i_delayed_blks == 0)
681 return false;
682
683 /*
684 * Do not try to free post-EOF blocks if EOF is beyond the end of the
685 * range supported by the page cache, because the truncation will loop
686 * forever.
687 */
688 end_fsb = XFS_B_TO_FSB(mp, (xfs_ufsize_t)XFS_ISIZE(ip));
689 last_fsb = XFS_B_TO_FSB(mp, mp->m_super->s_maxbytes);
690 if (last_fsb <= end_fsb)
691 return false;
692
693 /*
694 * Look up the mapping for the first block past EOF. If we can't find
695 * it, there's nothing to free.
696 */
697 xfs_ilock(ip, XFS_ILOCK_SHARED);
698 error = xfs_bmapi_read(ip, end_fsb, last_fsb - end_fsb, &imap, &nimaps,
699 0);
700 xfs_iunlock(ip, XFS_ILOCK_SHARED);
701 if (error || nimaps == 0)
702 return false;
703
704 /*
705 * If there's a real mapping there or there are delayed allocation
706 * reservations, then we have post-EOF blocks to try to free.
707 */
708 return imap.br_startblock != HOLESTARTBLOCK || ip->i_delayed_blks;
709}
710
711/*
712 * This is called to free any blocks beyond eof. The caller must hold
713 * IOLOCK_EXCL unless we are in the inode reclaim path and have the only
714 * reference to the inode.
715 */
716int
717xfs_free_eofblocks(
718 struct xfs_inode *ip)
719{
720 struct xfs_trans *tp;
721 struct xfs_mount *mp = ip->i_mount;
722 int error;
723
724 /* Attach the dquots to the inode up front. */
725 error = xfs_qm_dqattach(ip);
726 if (error)
727 return error;
728
729 /* Wait on dio to ensure i_size has settled. */
730 inode_dio_wait(VFS_I(ip));
731
732 error = xfs_trans_alloc(mp, &M_RES(mp)->tr_itruncate, 0, 0, 0, &tp);
733 if (error) {
734 ASSERT(XFS_FORCED_SHUTDOWN(mp));
735 return error;
736 }
737
738 xfs_ilock(ip, XFS_ILOCK_EXCL);
739 xfs_trans_ijoin(tp, ip, 0);
740
741 /*
742 * Do not update the on-disk file size. If we update the on-disk file
743 * size and then the system crashes before the contents of the file are
744 * flushed to disk then the files may be full of holes (ie NULL files
745 * bug).
746 */
747 error = xfs_itruncate_extents_flags(&tp, ip, XFS_DATA_FORK,
748 XFS_ISIZE(ip), XFS_BMAPI_NODISCARD);
749 if (error)
750 goto err_cancel;
751
752 error = xfs_trans_commit(tp);
753 if (error)
754 goto out_unlock;
755
756 xfs_inode_clear_eofblocks_tag(ip);
757 goto out_unlock;
758
759err_cancel:
760 /*
761 * If we get an error at this point we simply don't
762 * bother truncating the file.
763 */
764 xfs_trans_cancel(tp);
765out_unlock:
766 xfs_iunlock(ip, XFS_ILOCK_EXCL);
767 return error;
768}
769
770int
771xfs_alloc_file_space(
772 struct xfs_inode *ip,
773 xfs_off_t offset,
774 xfs_off_t len,
775 int alloc_type)
776{
777 xfs_mount_t *mp = ip->i_mount;
778 xfs_off_t count;
779 xfs_filblks_t allocated_fsb;
780 xfs_filblks_t allocatesize_fsb;
781 xfs_extlen_t extsz, temp;
782 xfs_fileoff_t startoffset_fsb;
783 xfs_fileoff_t endoffset_fsb;
784 int nimaps;
785 int rt;
786 xfs_trans_t *tp;
787 xfs_bmbt_irec_t imaps[1], *imapp;
788 int error;
789
790 trace_xfs_alloc_file_space(ip);
791
792 if (XFS_FORCED_SHUTDOWN(mp))
793 return -EIO;
794
795 error = xfs_qm_dqattach(ip);
796 if (error)
797 return error;
798
799 if (len <= 0)
800 return -EINVAL;
801
802 rt = XFS_IS_REALTIME_INODE(ip);
803 extsz = xfs_get_extsz_hint(ip);
804
805 count = len;
806 imapp = &imaps[0];
807 nimaps = 1;
808 startoffset_fsb = XFS_B_TO_FSBT(mp, offset);
809 endoffset_fsb = XFS_B_TO_FSB(mp, offset + count);
810 allocatesize_fsb = endoffset_fsb - startoffset_fsb;
811
812 /*
813 * Allocate file space until done or until there is an error
814 */
815 while (allocatesize_fsb && !error) {
816 xfs_fileoff_t s, e;
817 unsigned int dblocks, rblocks, resblks;
818
819 /*
820 * Determine space reservations for data/realtime.
821 */
822 if (unlikely(extsz)) {
823 s = startoffset_fsb;
824 do_div(s, extsz);
825 s *= extsz;
826 e = startoffset_fsb + allocatesize_fsb;
827 div_u64_rem(startoffset_fsb, extsz, &temp);
828 if (temp)
829 e += temp;
830 div_u64_rem(e, extsz, &temp);
831 if (temp)
832 e += extsz - temp;
833 } else {
834 s = 0;
835 e = allocatesize_fsb;
836 }
837
838 /*
839 * The transaction reservation is limited to a 32-bit block
840 * count, hence we need to limit the number of blocks we are
841 * trying to reserve to avoid an overflow. We can't allocate
842 * more than @nimaps extents, and an extent is limited on disk
843 * to MAXEXTLEN (21 bits), so use that to enforce the limit.
844 */
845 resblks = min_t(xfs_fileoff_t, (e - s), (MAXEXTLEN * nimaps));
846 if (unlikely(rt)) {
847 dblocks = XFS_DIOSTRAT_SPACE_RES(mp, 0);
848 rblocks = resblks;
849 } else {
850 dblocks = XFS_DIOSTRAT_SPACE_RES(mp, resblks);
851 rblocks = 0;
852 }
853
854 /*
855 * Allocate and setup the transaction.
856 */
857 error = xfs_trans_alloc_inode(ip, &M_RES(mp)->tr_write,
858 dblocks, rblocks, false, &tp);
859 if (error)
860 break;
861
862 error = xfs_iext_count_may_overflow(ip, XFS_DATA_FORK,
863 XFS_IEXT_ADD_NOSPLIT_CNT);
864 if (error)
865 goto error;
866
867 error = xfs_bmapi_write(tp, ip, startoffset_fsb,
868 allocatesize_fsb, alloc_type, 0, imapp,
869 &nimaps);
870 if (error)
871 goto error;
872
873 /*
874 * Complete the transaction
875 */
876 error = xfs_trans_commit(tp);
877 xfs_iunlock(ip, XFS_ILOCK_EXCL);
878 if (error)
879 break;
880
881 allocated_fsb = imapp->br_blockcount;
882
883 if (nimaps == 0) {
884 error = -ENOSPC;
885 break;
886 }
887
888 startoffset_fsb += allocated_fsb;
889 allocatesize_fsb -= allocated_fsb;
890 }
891
892 return error;
893
894error:
895 xfs_trans_cancel(tp);
896 xfs_iunlock(ip, XFS_ILOCK_EXCL);
897 return error;
898}
899
900static int
901xfs_unmap_extent(
902 struct xfs_inode *ip,
903 xfs_fileoff_t startoffset_fsb,
904 xfs_filblks_t len_fsb,
905 int *done)
906{
907 struct xfs_mount *mp = ip->i_mount;
908 struct xfs_trans *tp;
909 uint resblks = XFS_DIOSTRAT_SPACE_RES(mp, 0);
910 int error;
911
912 error = xfs_trans_alloc_inode(ip, &M_RES(mp)->tr_write, resblks, 0,
913 false, &tp);
914 if (error)
915 return error;
916
917 error = xfs_iext_count_may_overflow(ip, XFS_DATA_FORK,
918 XFS_IEXT_PUNCH_HOLE_CNT);
919 if (error)
920 goto out_trans_cancel;
921
922 error = xfs_bunmapi(tp, ip, startoffset_fsb, len_fsb, 0, 2, done);
923 if (error)
924 goto out_trans_cancel;
925
926 error = xfs_trans_commit(tp);
927out_unlock:
928 xfs_iunlock(ip, XFS_ILOCK_EXCL);
929 return error;
930
931out_trans_cancel:
932 xfs_trans_cancel(tp);
933 goto out_unlock;
934}
935
936/* Caller must first wait for the completion of any pending DIOs if required. */
937int
938xfs_flush_unmap_range(
939 struct xfs_inode *ip,
940 xfs_off_t offset,
941 xfs_off_t len)
942{
943 struct xfs_mount *mp = ip->i_mount;
944 struct inode *inode = VFS_I(ip);
945 xfs_off_t rounding, start, end;
946 int error;
947
948 rounding = max_t(xfs_off_t, mp->m_sb.sb_blocksize, PAGE_SIZE);
949 start = round_down(offset, rounding);
950 end = round_up(offset + len, rounding) - 1;
951
952 error = filemap_write_and_wait_range(inode->i_mapping, start, end);
953 if (error)
954 return error;
955 truncate_pagecache_range(inode, start, end);
956 return 0;
957}
958
959int
960xfs_free_file_space(
961 struct xfs_inode *ip,
962 xfs_off_t offset,
963 xfs_off_t len)
964{
965 struct xfs_mount *mp = ip->i_mount;
966 xfs_fileoff_t startoffset_fsb;
967 xfs_fileoff_t endoffset_fsb;
968 int done = 0, error;
969
970 trace_xfs_free_file_space(ip);
971
972 error = xfs_qm_dqattach(ip);
973 if (error)
974 return error;
975
976 if (len <= 0) /* if nothing being freed */
977 return 0;
978
979 startoffset_fsb = XFS_B_TO_FSB(mp, offset);
980 endoffset_fsb = XFS_B_TO_FSBT(mp, offset + len);
981
982 /* We can only free complete realtime extents. */
983 if (XFS_IS_REALTIME_INODE(ip) && mp->m_sb.sb_rextsize > 1) {
984 startoffset_fsb = roundup_64(startoffset_fsb,
985 mp->m_sb.sb_rextsize);
986 endoffset_fsb = rounddown_64(endoffset_fsb,
987 mp->m_sb.sb_rextsize);
988 }
989
990 /*
991 * Need to zero the stuff we're not freeing, on disk.
992 */
993 if (endoffset_fsb > startoffset_fsb) {
994 while (!done) {
995 error = xfs_unmap_extent(ip, startoffset_fsb,
996 endoffset_fsb - startoffset_fsb, &done);
997 if (error)
998 return error;
999 }
1000 }
1001
1002 /*
1003 * Now that we've unmap all full blocks we'll have to zero out any
1004 * partial block at the beginning and/or end. iomap_zero_range is smart
1005 * enough to skip any holes, including those we just created, but we
1006 * must take care not to zero beyond EOF and enlarge i_size.
1007 */
1008 if (offset >= XFS_ISIZE(ip))
1009 return 0;
1010 if (offset + len > XFS_ISIZE(ip))
1011 len = XFS_ISIZE(ip) - offset;
1012 error = iomap_zero_range(VFS_I(ip), offset, len, NULL,
1013 &xfs_buffered_write_iomap_ops);
1014 if (error)
1015 return error;
1016
1017 /*
1018 * If we zeroed right up to EOF and EOF straddles a page boundary we
1019 * must make sure that the post-EOF area is also zeroed because the
1020 * page could be mmap'd and iomap_zero_range doesn't do that for us.
1021 * Writeback of the eof page will do this, albeit clumsily.
1022 */
1023 if (offset + len >= XFS_ISIZE(ip) && offset_in_page(offset + len) > 0) {
1024 error = filemap_write_and_wait_range(VFS_I(ip)->i_mapping,
1025 round_down(offset + len, PAGE_SIZE), LLONG_MAX);
1026 }
1027
1028 return error;
1029}
1030
1031static int
1032xfs_prepare_shift(
1033 struct xfs_inode *ip,
1034 loff_t offset)
1035{
1036 struct xfs_mount *mp = ip->i_mount;
1037 int error;
1038
1039 /*
1040 * Trim eofblocks to avoid shifting uninitialized post-eof preallocation
1041 * into the accessible region of the file.
1042 */
1043 if (xfs_can_free_eofblocks(ip, true)) {
1044 error = xfs_free_eofblocks(ip);
1045 if (error)
1046 return error;
1047 }
1048
1049 /*
1050 * Shift operations must stabilize the start block offset boundary along
1051 * with the full range of the operation. If we don't, a COW writeback
1052 * completion could race with an insert, front merge with the start
1053 * extent (after split) during the shift and corrupt the file. Start
1054 * with the block just prior to the start to stabilize the boundary.
1055 */
1056 offset = round_down(offset, mp->m_sb.sb_blocksize);
1057 if (offset)
1058 offset -= mp->m_sb.sb_blocksize;
1059
1060 /*
1061 * Writeback and invalidate cache for the remainder of the file as we're
1062 * about to shift down every extent from offset to EOF.
1063 */
1064 error = xfs_flush_unmap_range(ip, offset, XFS_ISIZE(ip));
1065 if (error)
1066 return error;
1067
1068 /*
1069 * Clean out anything hanging around in the cow fork now that
1070 * we've flushed all the dirty data out to disk to avoid having
1071 * CoW extents at the wrong offsets.
1072 */
1073 if (xfs_inode_has_cow_data(ip)) {
1074 error = xfs_reflink_cancel_cow_range(ip, offset, NULLFILEOFF,
1075 true);
1076 if (error)
1077 return error;
1078 }
1079
1080 return 0;
1081}
1082
1083/*
1084 * xfs_collapse_file_space()
1085 * This routine frees disk space and shift extent for the given file.
1086 * The first thing we do is to free data blocks in the specified range
1087 * by calling xfs_free_file_space(). It would also sync dirty data
1088 * and invalidate page cache over the region on which collapse range
1089 * is working. And Shift extent records to the left to cover a hole.
1090 * RETURNS:
1091 * 0 on success
1092 * errno on error
1093 *
1094 */
1095int
1096xfs_collapse_file_space(
1097 struct xfs_inode *ip,
1098 xfs_off_t offset,
1099 xfs_off_t len)
1100{
1101 struct xfs_mount *mp = ip->i_mount;
1102 struct xfs_trans *tp;
1103 int error;
1104 xfs_fileoff_t next_fsb = XFS_B_TO_FSB(mp, offset + len);
1105 xfs_fileoff_t shift_fsb = XFS_B_TO_FSB(mp, len);
1106 bool done = false;
1107
1108 ASSERT(xfs_isilocked(ip, XFS_IOLOCK_EXCL));
1109 ASSERT(xfs_isilocked(ip, XFS_MMAPLOCK_EXCL));
1110
1111 trace_xfs_collapse_file_space(ip);
1112
1113 error = xfs_free_file_space(ip, offset, len);
1114 if (error)
1115 return error;
1116
1117 error = xfs_prepare_shift(ip, offset);
1118 if (error)
1119 return error;
1120
1121 error = xfs_trans_alloc(mp, &M_RES(mp)->tr_write, 0, 0, 0, &tp);
1122 if (error)
1123 return error;
1124
1125 xfs_ilock(ip, XFS_ILOCK_EXCL);
1126 xfs_trans_ijoin(tp, ip, 0);
1127
1128 while (!done) {
1129 error = xfs_bmap_collapse_extents(tp, ip, &next_fsb, shift_fsb,
1130 &done);
1131 if (error)
1132 goto out_trans_cancel;
1133 if (done)
1134 break;
1135
1136 /* finish any deferred frees and roll the transaction */
1137 error = xfs_defer_finish(&tp);
1138 if (error)
1139 goto out_trans_cancel;
1140 }
1141
1142 error = xfs_trans_commit(tp);
1143 xfs_iunlock(ip, XFS_ILOCK_EXCL);
1144 return error;
1145
1146out_trans_cancel:
1147 xfs_trans_cancel(tp);
1148 xfs_iunlock(ip, XFS_ILOCK_EXCL);
1149 return error;
1150}
1151
1152/*
1153 * xfs_insert_file_space()
1154 * This routine create hole space by shifting extents for the given file.
1155 * The first thing we do is to sync dirty data and invalidate page cache
1156 * over the region on which insert range is working. And split an extent
1157 * to two extents at given offset by calling xfs_bmap_split_extent.
1158 * And shift all extent records which are laying between [offset,
1159 * last allocated extent] to the right to reserve hole range.
1160 * RETURNS:
1161 * 0 on success
1162 * errno on error
1163 */
1164int
1165xfs_insert_file_space(
1166 struct xfs_inode *ip,
1167 loff_t offset,
1168 loff_t len)
1169{
1170 struct xfs_mount *mp = ip->i_mount;
1171 struct xfs_trans *tp;
1172 int error;
1173 xfs_fileoff_t stop_fsb = XFS_B_TO_FSB(mp, offset);
1174 xfs_fileoff_t next_fsb = NULLFSBLOCK;
1175 xfs_fileoff_t shift_fsb = XFS_B_TO_FSB(mp, len);
1176 bool done = false;
1177
1178 ASSERT(xfs_isilocked(ip, XFS_IOLOCK_EXCL));
1179 ASSERT(xfs_isilocked(ip, XFS_MMAPLOCK_EXCL));
1180
1181 trace_xfs_insert_file_space(ip);
1182
1183 error = xfs_bmap_can_insert_extents(ip, stop_fsb, shift_fsb);
1184 if (error)
1185 return error;
1186
1187 error = xfs_prepare_shift(ip, offset);
1188 if (error)
1189 return error;
1190
1191 error = xfs_trans_alloc(mp, &M_RES(mp)->tr_write,
1192 XFS_DIOSTRAT_SPACE_RES(mp, 0), 0, 0, &tp);
1193 if (error)
1194 return error;
1195
1196 xfs_ilock(ip, XFS_ILOCK_EXCL);
1197 xfs_trans_ijoin(tp, ip, 0);
1198
1199 error = xfs_iext_count_may_overflow(ip, XFS_DATA_FORK,
1200 XFS_IEXT_PUNCH_HOLE_CNT);
1201 if (error)
1202 goto out_trans_cancel;
1203
1204 /*
1205 * The extent shifting code works on extent granularity. So, if stop_fsb
1206 * is not the starting block of extent, we need to split the extent at
1207 * stop_fsb.
1208 */
1209 error = xfs_bmap_split_extent(tp, ip, stop_fsb);
1210 if (error)
1211 goto out_trans_cancel;
1212
1213 do {
1214 error = xfs_defer_finish(&tp);
1215 if (error)
1216 goto out_trans_cancel;
1217
1218 error = xfs_bmap_insert_extents(tp, ip, &next_fsb, shift_fsb,
1219 &done, stop_fsb);
1220 if (error)
1221 goto out_trans_cancel;
1222 } while (!done);
1223
1224 error = xfs_trans_commit(tp);
1225 xfs_iunlock(ip, XFS_ILOCK_EXCL);
1226 return error;
1227
1228out_trans_cancel:
1229 xfs_trans_cancel(tp);
1230 xfs_iunlock(ip, XFS_ILOCK_EXCL);
1231 return error;
1232}
1233
1234/*
1235 * We need to check that the format of the data fork in the temporary inode is
1236 * valid for the target inode before doing the swap. This is not a problem with
1237 * attr1 because of the fixed fork offset, but attr2 has a dynamically sized
1238 * data fork depending on the space the attribute fork is taking so we can get
1239 * invalid formats on the target inode.
1240 *
1241 * E.g. target has space for 7 extents in extent format, temp inode only has
1242 * space for 6. If we defragment down to 7 extents, then the tmp format is a
1243 * btree, but when swapped it needs to be in extent format. Hence we can't just
1244 * blindly swap data forks on attr2 filesystems.
1245 *
1246 * Note that we check the swap in both directions so that we don't end up with
1247 * a corrupt temporary inode, either.
1248 *
1249 * Note that fixing the way xfs_fsr sets up the attribute fork in the source
1250 * inode will prevent this situation from occurring, so all we do here is
1251 * reject and log the attempt. basically we are putting the responsibility on
1252 * userspace to get this right.
1253 */
1254static int
1255xfs_swap_extents_check_format(
1256 struct xfs_inode *ip, /* target inode */
1257 struct xfs_inode *tip) /* tmp inode */
1258{
1259 struct xfs_ifork *ifp = &ip->i_df;
1260 struct xfs_ifork *tifp = &tip->i_df;
1261
1262 /* User/group/project quota ids must match if quotas are enforced. */
1263 if (XFS_IS_QUOTA_ON(ip->i_mount) &&
1264 (!uid_eq(VFS_I(ip)->i_uid, VFS_I(tip)->i_uid) ||
1265 !gid_eq(VFS_I(ip)->i_gid, VFS_I(tip)->i_gid) ||
1266 ip->i_projid != tip->i_projid))
1267 return -EINVAL;
1268
1269 /* Should never get a local format */
1270 if (ifp->if_format == XFS_DINODE_FMT_LOCAL ||
1271 tifp->if_format == XFS_DINODE_FMT_LOCAL)
1272 return -EINVAL;
1273
1274 /*
1275 * if the target inode has less extents that then temporary inode then
1276 * why did userspace call us?
1277 */
1278 if (ifp->if_nextents < tifp->if_nextents)
1279 return -EINVAL;
1280
1281 /*
1282 * If we have to use the (expensive) rmap swap method, we can
1283 * handle any number of extents and any format.
1284 */
1285 if (xfs_sb_version_hasrmapbt(&ip->i_mount->m_sb))
1286 return 0;
1287
1288 /*
1289 * if the target inode is in extent form and the temp inode is in btree
1290 * form then we will end up with the target inode in the wrong format
1291 * as we already know there are less extents in the temp inode.
1292 */
1293 if (ifp->if_format == XFS_DINODE_FMT_EXTENTS &&
1294 tifp->if_format == XFS_DINODE_FMT_BTREE)
1295 return -EINVAL;
1296
1297 /* Check temp in extent form to max in target */
1298 if (tifp->if_format == XFS_DINODE_FMT_EXTENTS &&
1299 tifp->if_nextents > XFS_IFORK_MAXEXT(ip, XFS_DATA_FORK))
1300 return -EINVAL;
1301
1302 /* Check target in extent form to max in temp */
1303 if (ifp->if_format == XFS_DINODE_FMT_EXTENTS &&
1304 ifp->if_nextents > XFS_IFORK_MAXEXT(tip, XFS_DATA_FORK))
1305 return -EINVAL;
1306
1307 /*
1308 * If we are in a btree format, check that the temp root block will fit
1309 * in the target and that it has enough extents to be in btree format
1310 * in the target.
1311 *
1312 * Note that we have to be careful to allow btree->extent conversions
1313 * (a common defrag case) which will occur when the temp inode is in
1314 * extent format...
1315 */
1316 if (tifp->if_format == XFS_DINODE_FMT_BTREE) {
1317 if (XFS_IFORK_Q(ip) &&
1318 XFS_BMAP_BMDR_SPACE(tifp->if_broot) > XFS_IFORK_BOFF(ip))
1319 return -EINVAL;
1320 if (tifp->if_nextents <= XFS_IFORK_MAXEXT(ip, XFS_DATA_FORK))
1321 return -EINVAL;
1322 }
1323
1324 /* Reciprocal target->temp btree format checks */
1325 if (ifp->if_format == XFS_DINODE_FMT_BTREE) {
1326 if (XFS_IFORK_Q(tip) &&
1327 XFS_BMAP_BMDR_SPACE(ip->i_df.if_broot) > XFS_IFORK_BOFF(tip))
1328 return -EINVAL;
1329 if (ifp->if_nextents <= XFS_IFORK_MAXEXT(tip, XFS_DATA_FORK))
1330 return -EINVAL;
1331 }
1332
1333 return 0;
1334}
1335
1336static int
1337xfs_swap_extent_flush(
1338 struct xfs_inode *ip)
1339{
1340 int error;
1341
1342 error = filemap_write_and_wait(VFS_I(ip)->i_mapping);
1343 if (error)
1344 return error;
1345 truncate_pagecache_range(VFS_I(ip), 0, -1);
1346
1347 /* Verify O_DIRECT for ftmp */
1348 if (VFS_I(ip)->i_mapping->nrpages)
1349 return -EINVAL;
1350 return 0;
1351}
1352
1353/*
1354 * Move extents from one file to another, when rmap is enabled.
1355 */
1356STATIC int
1357xfs_swap_extent_rmap(
1358 struct xfs_trans **tpp,
1359 struct xfs_inode *ip,
1360 struct xfs_inode *tip)
1361{
1362 struct xfs_trans *tp = *tpp;
1363 struct xfs_bmbt_irec irec;
1364 struct xfs_bmbt_irec uirec;
1365 struct xfs_bmbt_irec tirec;
1366 xfs_fileoff_t offset_fsb;
1367 xfs_fileoff_t end_fsb;
1368 xfs_filblks_t count_fsb;
1369 int error;
1370 xfs_filblks_t ilen;
1371 xfs_filblks_t rlen;
1372 int nimaps;
1373 uint64_t tip_flags2;
1374
1375 /*
1376 * If the source file has shared blocks, we must flag the donor
1377 * file as having shared blocks so that we get the shared-block
1378 * rmap functions when we go to fix up the rmaps. The flags
1379 * will be switch for reals later.
1380 */
1381 tip_flags2 = tip->i_diflags2;
1382 if (ip->i_diflags2 & XFS_DIFLAG2_REFLINK)
1383 tip->i_diflags2 |= XFS_DIFLAG2_REFLINK;
1384
1385 offset_fsb = 0;
1386 end_fsb = XFS_B_TO_FSB(ip->i_mount, i_size_read(VFS_I(ip)));
1387 count_fsb = (xfs_filblks_t)(end_fsb - offset_fsb);
1388
1389 while (count_fsb) {
1390 /* Read extent from the donor file */
1391 nimaps = 1;
1392 error = xfs_bmapi_read(tip, offset_fsb, count_fsb, &tirec,
1393 &nimaps, 0);
1394 if (error)
1395 goto out;
1396 ASSERT(nimaps == 1);
1397 ASSERT(tirec.br_startblock != DELAYSTARTBLOCK);
1398
1399 trace_xfs_swap_extent_rmap_remap(tip, &tirec);
1400 ilen = tirec.br_blockcount;
1401
1402 /* Unmap the old blocks in the source file. */
1403 while (tirec.br_blockcount) {
1404 ASSERT(tp->t_firstblock == NULLFSBLOCK);
1405 trace_xfs_swap_extent_rmap_remap_piece(tip, &tirec);
1406
1407 /* Read extent from the source file */
1408 nimaps = 1;
1409 error = xfs_bmapi_read(ip, tirec.br_startoff,
1410 tirec.br_blockcount, &irec,
1411 &nimaps, 0);
1412 if (error)
1413 goto out;
1414 ASSERT(nimaps == 1);
1415 ASSERT(tirec.br_startoff == irec.br_startoff);
1416 trace_xfs_swap_extent_rmap_remap_piece(ip, &irec);
1417
1418 /* Trim the extent. */
1419 uirec = tirec;
1420 uirec.br_blockcount = rlen = min_t(xfs_filblks_t,
1421 tirec.br_blockcount,
1422 irec.br_blockcount);
1423 trace_xfs_swap_extent_rmap_remap_piece(tip, &uirec);
1424
1425 if (xfs_bmap_is_real_extent(&uirec)) {
1426 error = xfs_iext_count_may_overflow(ip,
1427 XFS_DATA_FORK,
1428 XFS_IEXT_SWAP_RMAP_CNT);
1429 if (error)
1430 goto out;
1431 }
1432
1433 if (xfs_bmap_is_real_extent(&irec)) {
1434 error = xfs_iext_count_may_overflow(tip,
1435 XFS_DATA_FORK,
1436 XFS_IEXT_SWAP_RMAP_CNT);
1437 if (error)
1438 goto out;
1439 }
1440
1441 /* Remove the mapping from the donor file. */
1442 xfs_bmap_unmap_extent(tp, tip, &uirec);
1443
1444 /* Remove the mapping from the source file. */
1445 xfs_bmap_unmap_extent(tp, ip, &irec);
1446
1447 /* Map the donor file's blocks into the source file. */
1448 xfs_bmap_map_extent(tp, ip, &uirec);
1449
1450 /* Map the source file's blocks into the donor file. */
1451 xfs_bmap_map_extent(tp, tip, &irec);
1452
1453 error = xfs_defer_finish(tpp);
1454 tp = *tpp;
1455 if (error)
1456 goto out;
1457
1458 tirec.br_startoff += rlen;
1459 if (tirec.br_startblock != HOLESTARTBLOCK &&
1460 tirec.br_startblock != DELAYSTARTBLOCK)
1461 tirec.br_startblock += rlen;
1462 tirec.br_blockcount -= rlen;
1463 }
1464
1465 /* Roll on... */
1466 count_fsb -= ilen;
1467 offset_fsb += ilen;
1468 }
1469
1470 tip->i_diflags2 = tip_flags2;
1471 return 0;
1472
1473out:
1474 trace_xfs_swap_extent_rmap_error(ip, error, _RET_IP_);
1475 tip->i_diflags2 = tip_flags2;
1476 return error;
1477}
1478
1479/* Swap the extents of two files by swapping data forks. */
1480STATIC int
1481xfs_swap_extent_forks(
1482 struct xfs_trans *tp,
1483 struct xfs_inode *ip,
1484 struct xfs_inode *tip,
1485 int *src_log_flags,
1486 int *target_log_flags)
1487{
1488 xfs_filblks_t aforkblks = 0;
1489 xfs_filblks_t taforkblks = 0;
1490 xfs_extnum_t junk;
1491 uint64_t tmp;
1492 int error;
1493
1494 /*
1495 * Count the number of extended attribute blocks
1496 */
1497 if (XFS_IFORK_Q(ip) && ip->i_afp->if_nextents > 0 &&
1498 ip->i_afp->if_format != XFS_DINODE_FMT_LOCAL) {
1499 error = xfs_bmap_count_blocks(tp, ip, XFS_ATTR_FORK, &junk,
1500 &aforkblks);
1501 if (error)
1502 return error;
1503 }
1504 if (XFS_IFORK_Q(tip) && tip->i_afp->if_nextents > 0 &&
1505 tip->i_afp->if_format != XFS_DINODE_FMT_LOCAL) {
1506 error = xfs_bmap_count_blocks(tp, tip, XFS_ATTR_FORK, &junk,
1507 &taforkblks);
1508 if (error)
1509 return error;
1510 }
1511
1512 /*
1513 * Btree format (v3) inodes have the inode number stamped in the bmbt
1514 * block headers. We can't start changing the bmbt blocks until the
1515 * inode owner change is logged so recovery does the right thing in the
1516 * event of a crash. Set the owner change log flags now and leave the
1517 * bmbt scan as the last step.
1518 */
1519 if (xfs_sb_version_has_v3inode(&ip->i_mount->m_sb)) {
1520 if (ip->i_df.if_format == XFS_DINODE_FMT_BTREE)
1521 (*target_log_flags) |= XFS_ILOG_DOWNER;
1522 if (tip->i_df.if_format == XFS_DINODE_FMT_BTREE)
1523 (*src_log_flags) |= XFS_ILOG_DOWNER;
1524 }
1525
1526 /*
1527 * Swap the data forks of the inodes
1528 */
1529 swap(ip->i_df, tip->i_df);
1530
1531 /*
1532 * Fix the on-disk inode values
1533 */
1534 tmp = (uint64_t)ip->i_nblocks;
1535 ip->i_nblocks = tip->i_nblocks - taforkblks + aforkblks;
1536 tip->i_nblocks = tmp + taforkblks - aforkblks;
1537
1538 /*
1539 * The extents in the source inode could still contain speculative
1540 * preallocation beyond EOF (e.g. the file is open but not modified
1541 * while defrag is in progress). In that case, we need to copy over the
1542 * number of delalloc blocks the data fork in the source inode is
1543 * tracking beyond EOF so that when the fork is truncated away when the
1544 * temporary inode is unlinked we don't underrun the i_delayed_blks
1545 * counter on that inode.
1546 */
1547 ASSERT(tip->i_delayed_blks == 0);
1548 tip->i_delayed_blks = ip->i_delayed_blks;
1549 ip->i_delayed_blks = 0;
1550
1551 switch (ip->i_df.if_format) {
1552 case XFS_DINODE_FMT_EXTENTS:
1553 (*src_log_flags) |= XFS_ILOG_DEXT;
1554 break;
1555 case XFS_DINODE_FMT_BTREE:
1556 ASSERT(!xfs_sb_version_has_v3inode(&ip->i_mount->m_sb) ||
1557 (*src_log_flags & XFS_ILOG_DOWNER));
1558 (*src_log_flags) |= XFS_ILOG_DBROOT;
1559 break;
1560 }
1561
1562 switch (tip->i_df.if_format) {
1563 case XFS_DINODE_FMT_EXTENTS:
1564 (*target_log_flags) |= XFS_ILOG_DEXT;
1565 break;
1566 case XFS_DINODE_FMT_BTREE:
1567 (*target_log_flags) |= XFS_ILOG_DBROOT;
1568 ASSERT(!xfs_sb_version_has_v3inode(&ip->i_mount->m_sb) ||
1569 (*target_log_flags & XFS_ILOG_DOWNER));
1570 break;
1571 }
1572
1573 return 0;
1574}
1575
1576/*
1577 * Fix up the owners of the bmbt blocks to refer to the current inode. The
1578 * change owner scan attempts to order all modified buffers in the current
1579 * transaction. In the event of ordered buffer failure, the offending buffer is
1580 * physically logged as a fallback and the scan returns -EAGAIN. We must roll
1581 * the transaction in this case to replenish the fallback log reservation and
1582 * restart the scan. This process repeats until the scan completes.
1583 */
1584static int
1585xfs_swap_change_owner(
1586 struct xfs_trans **tpp,
1587 struct xfs_inode *ip,
1588 struct xfs_inode *tmpip)
1589{
1590 int error;
1591 struct xfs_trans *tp = *tpp;
1592
1593 do {
1594 error = xfs_bmbt_change_owner(tp, ip, XFS_DATA_FORK, ip->i_ino,
1595 NULL);
1596 /* success or fatal error */
1597 if (error != -EAGAIN)
1598 break;
1599
1600 error = xfs_trans_roll(tpp);
1601 if (error)
1602 break;
1603 tp = *tpp;
1604
1605 /*
1606 * Redirty both inodes so they can relog and keep the log tail
1607 * moving forward.
1608 */
1609 xfs_trans_ijoin(tp, ip, 0);
1610 xfs_trans_ijoin(tp, tmpip, 0);
1611 xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
1612 xfs_trans_log_inode(tp, tmpip, XFS_ILOG_CORE);
1613 } while (true);
1614
1615 return error;
1616}
1617
1618int
1619xfs_swap_extents(
1620 struct xfs_inode *ip, /* target inode */
1621 struct xfs_inode *tip, /* tmp inode */
1622 struct xfs_swapext *sxp)
1623{
1624 struct xfs_mount *mp = ip->i_mount;
1625 struct xfs_trans *tp;
1626 struct xfs_bstat *sbp = &sxp->sx_stat;
1627 int src_log_flags, target_log_flags;
1628 int error = 0;
1629 int lock_flags;
1630 uint64_t f;
1631 int resblks = 0;
1632 unsigned int flags = 0;
1633
1634 /*
1635 * Lock the inodes against other IO, page faults and truncate to
1636 * begin with. Then we can ensure the inodes are flushed and have no
1637 * page cache safely. Once we have done this we can take the ilocks and
1638 * do the rest of the checks.
1639 */
1640 lock_two_nondirectories(VFS_I(ip), VFS_I(tip));
1641 lock_flags = XFS_MMAPLOCK_EXCL;
1642 xfs_lock_two_inodes(ip, XFS_MMAPLOCK_EXCL, tip, XFS_MMAPLOCK_EXCL);
1643
1644 /* Verify that both files have the same format */
1645 if ((VFS_I(ip)->i_mode & S_IFMT) != (VFS_I(tip)->i_mode & S_IFMT)) {
1646 error = -EINVAL;
1647 goto out_unlock;
1648 }
1649
1650 /* Verify both files are either real-time or non-realtime */
1651 if (XFS_IS_REALTIME_INODE(ip) != XFS_IS_REALTIME_INODE(tip)) {
1652 error = -EINVAL;
1653 goto out_unlock;
1654 }
1655
1656 error = xfs_qm_dqattach(ip);
1657 if (error)
1658 goto out_unlock;
1659
1660 error = xfs_qm_dqattach(tip);
1661 if (error)
1662 goto out_unlock;
1663
1664 error = xfs_swap_extent_flush(ip);
1665 if (error)
1666 goto out_unlock;
1667 error = xfs_swap_extent_flush(tip);
1668 if (error)
1669 goto out_unlock;
1670
1671 if (xfs_inode_has_cow_data(tip)) {
1672 error = xfs_reflink_cancel_cow_range(tip, 0, NULLFILEOFF, true);
1673 if (error)
1674 goto out_unlock;
1675 }
1676
1677 /*
1678 * Extent "swapping" with rmap requires a permanent reservation and
1679 * a block reservation because it's really just a remap operation
1680 * performed with log redo items!
1681 */
1682 if (xfs_sb_version_hasrmapbt(&mp->m_sb)) {
1683 int w = XFS_DATA_FORK;
1684 uint32_t ipnext = ip->i_df.if_nextents;
1685 uint32_t tipnext = tip->i_df.if_nextents;
1686
1687 /*
1688 * Conceptually this shouldn't affect the shape of either bmbt,
1689 * but since we atomically move extents one by one, we reserve
1690 * enough space to rebuild both trees.
1691 */
1692 resblks = XFS_SWAP_RMAP_SPACE_RES(mp, ipnext, w);
1693 resblks += XFS_SWAP_RMAP_SPACE_RES(mp, tipnext, w);
1694
1695 /*
1696 * If either inode straddles a bmapbt block allocation boundary,
1697 * the rmapbt algorithm triggers repeated allocs and frees as
1698 * extents are remapped. This can exhaust the block reservation
1699 * prematurely and cause shutdown. Return freed blocks to the
1700 * transaction reservation to counter this behavior.
1701 */
1702 flags |= XFS_TRANS_RES_FDBLKS;
1703 }
1704 error = xfs_trans_alloc(mp, &M_RES(mp)->tr_write, resblks, 0, flags,
1705 &tp);
1706 if (error)
1707 goto out_unlock;
1708
1709 /*
1710 * Lock and join the inodes to the tansaction so that transaction commit
1711 * or cancel will unlock the inodes from this point onwards.
1712 */
1713 xfs_lock_two_inodes(ip, XFS_ILOCK_EXCL, tip, XFS_ILOCK_EXCL);
1714 lock_flags |= XFS_ILOCK_EXCL;
1715 xfs_trans_ijoin(tp, ip, 0);
1716 xfs_trans_ijoin(tp, tip, 0);
1717
1718
1719 /* Verify all data are being swapped */
1720 if (sxp->sx_offset != 0 ||
1721 sxp->sx_length != ip->i_disk_size ||
1722 sxp->sx_length != tip->i_disk_size) {
1723 error = -EFAULT;
1724 goto out_trans_cancel;
1725 }
1726
1727 trace_xfs_swap_extent_before(ip, 0);
1728 trace_xfs_swap_extent_before(tip, 1);
1729
1730 /* check inode formats now that data is flushed */
1731 error = xfs_swap_extents_check_format(ip, tip);
1732 if (error) {
1733 xfs_notice(mp,
1734 "%s: inode 0x%llx format is incompatible for exchanging.",
1735 __func__, ip->i_ino);
1736 goto out_trans_cancel;
1737 }
1738
1739 /*
1740 * Compare the current change & modify times with that
1741 * passed in. If they differ, we abort this swap.
1742 * This is the mechanism used to ensure the calling
1743 * process that the file was not changed out from
1744 * under it.
1745 */
1746 if ((sbp->bs_ctime.tv_sec != VFS_I(ip)->i_ctime.tv_sec) ||
1747 (sbp->bs_ctime.tv_nsec != VFS_I(ip)->i_ctime.tv_nsec) ||
1748 (sbp->bs_mtime.tv_sec != VFS_I(ip)->i_mtime.tv_sec) ||
1749 (sbp->bs_mtime.tv_nsec != VFS_I(ip)->i_mtime.tv_nsec)) {
1750 error = -EBUSY;
1751 goto out_trans_cancel;
1752 }
1753
1754 /*
1755 * Note the trickiness in setting the log flags - we set the owner log
1756 * flag on the opposite inode (i.e. the inode we are setting the new
1757 * owner to be) because once we swap the forks and log that, log
1758 * recovery is going to see the fork as owned by the swapped inode,
1759 * not the pre-swapped inodes.
1760 */
1761 src_log_flags = XFS_ILOG_CORE;
1762 target_log_flags = XFS_ILOG_CORE;
1763
1764 if (xfs_sb_version_hasrmapbt(&mp->m_sb))
1765 error = xfs_swap_extent_rmap(&tp, ip, tip);
1766 else
1767 error = xfs_swap_extent_forks(tp, ip, tip, &src_log_flags,
1768 &target_log_flags);
1769 if (error)
1770 goto out_trans_cancel;
1771
1772 /* Do we have to swap reflink flags? */
1773 if ((ip->i_diflags2 & XFS_DIFLAG2_REFLINK) ^
1774 (tip->i_diflags2 & XFS_DIFLAG2_REFLINK)) {
1775 f = ip->i_diflags2 & XFS_DIFLAG2_REFLINK;
1776 ip->i_diflags2 &= ~XFS_DIFLAG2_REFLINK;
1777 ip->i_diflags2 |= tip->i_diflags2 & XFS_DIFLAG2_REFLINK;
1778 tip->i_diflags2 &= ~XFS_DIFLAG2_REFLINK;
1779 tip->i_diflags2 |= f & XFS_DIFLAG2_REFLINK;
1780 }
1781
1782 /* Swap the cow forks. */
1783 if (xfs_sb_version_hasreflink(&mp->m_sb)) {
1784 ASSERT(!ip->i_cowfp ||
1785 ip->i_cowfp->if_format == XFS_DINODE_FMT_EXTENTS);
1786 ASSERT(!tip->i_cowfp ||
1787 tip->i_cowfp->if_format == XFS_DINODE_FMT_EXTENTS);
1788
1789 swap(ip->i_cowfp, tip->i_cowfp);
1790
1791 if (ip->i_cowfp && ip->i_cowfp->if_bytes)
1792 xfs_inode_set_cowblocks_tag(ip);
1793 else
1794 xfs_inode_clear_cowblocks_tag(ip);
1795 if (tip->i_cowfp && tip->i_cowfp->if_bytes)
1796 xfs_inode_set_cowblocks_tag(tip);
1797 else
1798 xfs_inode_clear_cowblocks_tag(tip);
1799 }
1800
1801 xfs_trans_log_inode(tp, ip, src_log_flags);
1802 xfs_trans_log_inode(tp, tip, target_log_flags);
1803
1804 /*
1805 * The extent forks have been swapped, but crc=1,rmapbt=0 filesystems
1806 * have inode number owner values in the bmbt blocks that still refer to
1807 * the old inode. Scan each bmbt to fix up the owner values with the
1808 * inode number of the current inode.
1809 */
1810 if (src_log_flags & XFS_ILOG_DOWNER) {
1811 error = xfs_swap_change_owner(&tp, ip, tip);
1812 if (error)
1813 goto out_trans_cancel;
1814 }
1815 if (target_log_flags & XFS_ILOG_DOWNER) {
1816 error = xfs_swap_change_owner(&tp, tip, ip);
1817 if (error)
1818 goto out_trans_cancel;
1819 }
1820
1821 /*
1822 * If this is a synchronous mount, make sure that the
1823 * transaction goes to disk before returning to the user.
1824 */
1825 if (mp->m_flags & XFS_MOUNT_WSYNC)
1826 xfs_trans_set_sync(tp);
1827
1828 error = xfs_trans_commit(tp);
1829
1830 trace_xfs_swap_extent_after(ip, 0);
1831 trace_xfs_swap_extent_after(tip, 1);
1832
1833out_unlock:
1834 xfs_iunlock(ip, lock_flags);
1835 xfs_iunlock(tip, lock_flags);
1836 unlock_two_nondirectories(VFS_I(ip), VFS_I(tip));
1837 return error;
1838
1839out_trans_cancel:
1840 xfs_trans_cancel(tp);
1841 goto out_unlock;
1842}