Linux Audio

Check our new training course

Loading...
v5.9
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 * Copyright (c) 2000-2006 Silicon Graphics, Inc.
   4 * Copyright (c) 2012 Red Hat, Inc.
   5 * All Rights Reserved.
   6 */
   7#include "xfs.h"
   8#include "xfs_fs.h"
   9#include "xfs_shared.h"
  10#include "xfs_format.h"
  11#include "xfs_log_format.h"
  12#include "xfs_trans_resv.h"
  13#include "xfs_bit.h"
  14#include "xfs_mount.h"
  15#include "xfs_defer.h"
  16#include "xfs_inode.h"
  17#include "xfs_btree.h"
  18#include "xfs_trans.h"
  19#include "xfs_alloc.h"
  20#include "xfs_bmap.h"
  21#include "xfs_bmap_util.h"
  22#include "xfs_bmap_btree.h"
  23#include "xfs_rtalloc.h"
  24#include "xfs_error.h"
  25#include "xfs_quota.h"
  26#include "xfs_trans_space.h"
  27#include "xfs_trace.h"
  28#include "xfs_icache.h"
  29#include "xfs_iomap.h"
  30#include "xfs_reflink.h"
 
 
  31
  32/* Kernel only BMAP related definitions and functions */
  33
  34/*
  35 * Convert the given file system block to a disk block.  We have to treat it
  36 * differently based on whether the file is a real time file or not, because the
  37 * bmap code does.
  38 */
  39xfs_daddr_t
  40xfs_fsb_to_db(struct xfs_inode *ip, xfs_fsblock_t fsb)
  41{
  42	if (XFS_IS_REALTIME_INODE(ip))
  43		return XFS_FSB_TO_BB(ip->i_mount, fsb);
  44	return XFS_FSB_TO_DADDR(ip->i_mount, fsb);
  45}
  46
  47/*
  48 * Routine to zero an extent on disk allocated to the specific inode.
  49 *
  50 * The VFS functions take a linearised filesystem block offset, so we have to
  51 * convert the sparse xfs fsb to the right format first.
  52 * VFS types are real funky, too.
  53 */
  54int
  55xfs_zero_extent(
  56	struct xfs_inode	*ip,
  57	xfs_fsblock_t		start_fsb,
  58	xfs_off_t		count_fsb)
  59{
  60	struct xfs_mount	*mp = ip->i_mount;
  61	struct xfs_buftarg	*target = xfs_inode_buftarg(ip);
  62	xfs_daddr_t		sector = xfs_fsb_to_db(ip, start_fsb);
  63	sector_t		block = XFS_BB_TO_FSBT(mp, sector);
  64
  65	return blkdev_issue_zeroout(target->bt_bdev,
  66		block << (mp->m_super->s_blocksize_bits - 9),
  67		count_fsb << (mp->m_super->s_blocksize_bits - 9),
  68		GFP_NOFS, 0);
  69}
  70
  71#ifdef CONFIG_XFS_RT
  72int
  73xfs_bmap_rtalloc(
  74	struct xfs_bmalloca	*ap)	/* bmap alloc argument struct */
  75{
  76	int		error;		/* error return value */
  77	xfs_mount_t	*mp;		/* mount point structure */
  78	xfs_extlen_t	prod = 0;	/* product factor for allocators */
  79	xfs_extlen_t	mod = 0;	/* product factor for allocators */
  80	xfs_extlen_t	ralen = 0;	/* realtime allocation length */
  81	xfs_extlen_t	align;		/* minimum allocation alignment */
  82	xfs_rtblock_t	rtb;
  83
  84	mp = ap->ip->i_mount;
  85	align = xfs_get_extsz_hint(ap->ip);
  86	prod = align / mp->m_sb.sb_rextsize;
  87	error = xfs_bmap_extsize_align(mp, &ap->got, &ap->prev,
  88					align, 1, ap->eof, 0,
  89					ap->conv, &ap->offset, &ap->length);
  90	if (error)
  91		return error;
  92	ASSERT(ap->length);
  93	ASSERT(ap->length % mp->m_sb.sb_rextsize == 0);
  94
  95	/*
  96	 * If the offset & length are not perfectly aligned
  97	 * then kill prod, it will just get us in trouble.
  98	 */
  99	div_u64_rem(ap->offset, align, &mod);
 100	if (mod || ap->length % align)
 101		prod = 1;
 102	/*
 103	 * Set ralen to be the actual requested length in rtextents.
 104	 */
 105	ralen = ap->length / mp->m_sb.sb_rextsize;
 106	/*
 107	 * If the old value was close enough to MAXEXTLEN that
 108	 * we rounded up to it, cut it back so it's valid again.
 109	 * Note that if it's a really large request (bigger than
 110	 * MAXEXTLEN), we don't hear about that number, and can't
 111	 * adjust the starting point to match it.
 112	 */
 113	if (ralen * mp->m_sb.sb_rextsize >= MAXEXTLEN)
 114		ralen = MAXEXTLEN / mp->m_sb.sb_rextsize;
 115
 116	/*
 117	 * Lock out modifications to both the RT bitmap and summary inodes
 118	 */
 119	xfs_ilock(mp->m_rbmip, XFS_ILOCK_EXCL|XFS_ILOCK_RTBITMAP);
 120	xfs_trans_ijoin(ap->tp, mp->m_rbmip, XFS_ILOCK_EXCL);
 121	xfs_ilock(mp->m_rsumip, XFS_ILOCK_EXCL|XFS_ILOCK_RTSUM);
 122	xfs_trans_ijoin(ap->tp, mp->m_rsumip, XFS_ILOCK_EXCL);
 123
 124	/*
 125	 * If it's an allocation to an empty file at offset 0,
 126	 * pick an extent that will space things out in the rt area.
 127	 */
 128	if (ap->eof && ap->offset == 0) {
 129		xfs_rtblock_t rtx; /* realtime extent no */
 130
 131		error = xfs_rtpick_extent(mp, ap->tp, ralen, &rtx);
 132		if (error)
 133			return error;
 134		ap->blkno = rtx * mp->m_sb.sb_rextsize;
 135	} else {
 136		ap->blkno = 0;
 137	}
 138
 139	xfs_bmap_adjacent(ap);
 140
 141	/*
 142	 * Realtime allocation, done through xfs_rtallocate_extent.
 143	 */
 144	do_div(ap->blkno, mp->m_sb.sb_rextsize);
 145	rtb = ap->blkno;
 146	ap->length = ralen;
 147	error = xfs_rtallocate_extent(ap->tp, ap->blkno, 1, ap->length,
 148				&ralen, ap->wasdel, prod, &rtb);
 149	if (error)
 150		return error;
 151
 152	ap->blkno = rtb;
 153	if (ap->blkno != NULLFSBLOCK) {
 154		ap->blkno *= mp->m_sb.sb_rextsize;
 155		ralen *= mp->m_sb.sb_rextsize;
 156		ap->length = ralen;
 157		ap->ip->i_d.di_nblocks += ralen;
 158		xfs_trans_log_inode(ap->tp, ap->ip, XFS_ILOG_CORE);
 159		if (ap->wasdel)
 160			ap->ip->i_delayed_blks -= ralen;
 161		/*
 162		 * Adjust the disk quota also. This was reserved
 163		 * earlier.
 164		 */
 165		xfs_trans_mod_dquot_byino(ap->tp, ap->ip,
 166			ap->wasdel ? XFS_TRANS_DQ_DELRTBCOUNT :
 167					XFS_TRANS_DQ_RTBCOUNT, (long) ralen);
 168	} else {
 169		ap->length = 0;
 170	}
 171	return 0;
 172}
 173#endif /* CONFIG_XFS_RT */
 174
 175/*
 176 * Extent tree block counting routines.
 177 */
 178
 179/*
 180 * Count leaf blocks given a range of extent records.  Delayed allocation
 181 * extents are not counted towards the totals.
 182 */
 183xfs_extnum_t
 184xfs_bmap_count_leaves(
 185	struct xfs_ifork	*ifp,
 186	xfs_filblks_t		*count)
 187{
 188	struct xfs_iext_cursor	icur;
 189	struct xfs_bmbt_irec	got;
 190	xfs_extnum_t		numrecs = 0;
 191
 192	for_each_xfs_iext(ifp, &icur, &got) {
 193		if (!isnullstartblock(got.br_startblock)) {
 194			*count += got.br_blockcount;
 195			numrecs++;
 196		}
 197	}
 198
 199	return numrecs;
 200}
 201
 202/*
 203 * Count fsblocks of the given fork.  Delayed allocation extents are
 204 * not counted towards the totals.
 205 */
 206int
 207xfs_bmap_count_blocks(
 208	struct xfs_trans	*tp,
 209	struct xfs_inode	*ip,
 210	int			whichfork,
 211	xfs_extnum_t		*nextents,
 212	xfs_filblks_t		*count)
 213{
 214	struct xfs_mount	*mp = ip->i_mount;
 215	struct xfs_ifork	*ifp = XFS_IFORK_PTR(ip, whichfork);
 216	struct xfs_btree_cur	*cur;
 217	xfs_extlen_t		btblocks = 0;
 218	int			error;
 219
 220	*nextents = 0;
 221	*count = 0;
 222
 223	if (!ifp)
 224		return 0;
 225
 226	switch (ifp->if_format) {
 227	case XFS_DINODE_FMT_BTREE:
 228		if (!(ifp->if_flags & XFS_IFEXTENTS)) {
 229			error = xfs_iread_extents(tp, ip, whichfork);
 230			if (error)
 231				return error;
 232		}
 233
 234		cur = xfs_bmbt_init_cursor(mp, tp, ip, whichfork);
 235		error = xfs_btree_count_blocks(cur, &btblocks);
 236		xfs_btree_del_cursor(cur, error);
 237		if (error)
 238			return error;
 239
 240		/*
 241		 * xfs_btree_count_blocks includes the root block contained in
 242		 * the inode fork in @btblocks, so subtract one because we're
 243		 * only interested in allocated disk blocks.
 244		 */
 245		*count += btblocks - 1;
 246
 247		/* fall through */
 248	case XFS_DINODE_FMT_EXTENTS:
 249		*nextents = xfs_bmap_count_leaves(ifp, count);
 250		break;
 251	}
 252
 253	return 0;
 254}
 255
 256static int
 257xfs_getbmap_report_one(
 258	struct xfs_inode	*ip,
 259	struct getbmapx		*bmv,
 260	struct kgetbmap		*out,
 261	int64_t			bmv_end,
 262	struct xfs_bmbt_irec	*got)
 263{
 264	struct kgetbmap		*p = out + bmv->bmv_entries;
 265	bool			shared = false;
 266	int			error;
 267
 268	error = xfs_reflink_trim_around_shared(ip, got, &shared);
 269	if (error)
 270		return error;
 271
 272	if (isnullstartblock(got->br_startblock) ||
 273	    got->br_startblock == DELAYSTARTBLOCK) {
 274		/*
 275		 * Delalloc extents that start beyond EOF can occur due to
 276		 * speculative EOF allocation when the delalloc extent is larger
 277		 * than the largest freespace extent at conversion time.  These
 278		 * extents cannot be converted by data writeback, so can exist
 279		 * here even if we are not supposed to be finding delalloc
 280		 * extents.
 281		 */
 282		if (got->br_startoff < XFS_B_TO_FSB(ip->i_mount, XFS_ISIZE(ip)))
 283			ASSERT((bmv->bmv_iflags & BMV_IF_DELALLOC) != 0);
 284
 285		p->bmv_oflags |= BMV_OF_DELALLOC;
 286		p->bmv_block = -2;
 287	} else {
 288		p->bmv_block = xfs_fsb_to_db(ip, got->br_startblock);
 289	}
 290
 291	if (got->br_state == XFS_EXT_UNWRITTEN &&
 292	    (bmv->bmv_iflags & BMV_IF_PREALLOC))
 293		p->bmv_oflags |= BMV_OF_PREALLOC;
 294
 295	if (shared)
 296		p->bmv_oflags |= BMV_OF_SHARED;
 297
 298	p->bmv_offset = XFS_FSB_TO_BB(ip->i_mount, got->br_startoff);
 299	p->bmv_length = XFS_FSB_TO_BB(ip->i_mount, got->br_blockcount);
 300
 301	bmv->bmv_offset = p->bmv_offset + p->bmv_length;
 302	bmv->bmv_length = max(0LL, bmv_end - bmv->bmv_offset);
 303	bmv->bmv_entries++;
 304	return 0;
 305}
 306
 307static void
 308xfs_getbmap_report_hole(
 309	struct xfs_inode	*ip,
 310	struct getbmapx		*bmv,
 311	struct kgetbmap		*out,
 312	int64_t			bmv_end,
 313	xfs_fileoff_t		bno,
 314	xfs_fileoff_t		end)
 315{
 316	struct kgetbmap		*p = out + bmv->bmv_entries;
 317
 318	if (bmv->bmv_iflags & BMV_IF_NO_HOLES)
 319		return;
 320
 321	p->bmv_block = -1;
 322	p->bmv_offset = XFS_FSB_TO_BB(ip->i_mount, bno);
 323	p->bmv_length = XFS_FSB_TO_BB(ip->i_mount, end - bno);
 324
 325	bmv->bmv_offset = p->bmv_offset + p->bmv_length;
 326	bmv->bmv_length = max(0LL, bmv_end - bmv->bmv_offset);
 327	bmv->bmv_entries++;
 328}
 329
 330static inline bool
 331xfs_getbmap_full(
 332	struct getbmapx		*bmv)
 333{
 334	return bmv->bmv_length == 0 || bmv->bmv_entries >= bmv->bmv_count - 1;
 335}
 336
 337static bool
 338xfs_getbmap_next_rec(
 339	struct xfs_bmbt_irec	*rec,
 340	xfs_fileoff_t		total_end)
 341{
 342	xfs_fileoff_t		end = rec->br_startoff + rec->br_blockcount;
 343
 344	if (end == total_end)
 345		return false;
 346
 347	rec->br_startoff += rec->br_blockcount;
 348	if (!isnullstartblock(rec->br_startblock) &&
 349	    rec->br_startblock != DELAYSTARTBLOCK)
 350		rec->br_startblock += rec->br_blockcount;
 351	rec->br_blockcount = total_end - end;
 352	return true;
 353}
 354
 355/*
 356 * Get inode's extents as described in bmv, and format for output.
 357 * Calls formatter to fill the user's buffer until all extents
 358 * are mapped, until the passed-in bmv->bmv_count slots have
 359 * been filled, or until the formatter short-circuits the loop,
 360 * if it is tracking filled-in extents on its own.
 361 */
 362int						/* error code */
 363xfs_getbmap(
 364	struct xfs_inode	*ip,
 365	struct getbmapx		*bmv,		/* user bmap structure */
 366	struct kgetbmap		*out)
 367{
 368	struct xfs_mount	*mp = ip->i_mount;
 369	int			iflags = bmv->bmv_iflags;
 370	int			whichfork, lock, error = 0;
 371	int64_t			bmv_end, max_len;
 372	xfs_fileoff_t		bno, first_bno;
 373	struct xfs_ifork	*ifp;
 374	struct xfs_bmbt_irec	got, rec;
 375	xfs_filblks_t		len;
 376	struct xfs_iext_cursor	icur;
 377
 378	if (bmv->bmv_iflags & ~BMV_IF_VALID)
 379		return -EINVAL;
 380#ifndef DEBUG
 381	/* Only allow CoW fork queries if we're debugging. */
 382	if (iflags & BMV_IF_COWFORK)
 383		return -EINVAL;
 384#endif
 385	if ((iflags & BMV_IF_ATTRFORK) && (iflags & BMV_IF_COWFORK))
 386		return -EINVAL;
 387
 388	if (bmv->bmv_length < -1)
 389		return -EINVAL;
 390	bmv->bmv_entries = 0;
 391	if (bmv->bmv_length == 0)
 392		return 0;
 393
 394	if (iflags & BMV_IF_ATTRFORK)
 395		whichfork = XFS_ATTR_FORK;
 396	else if (iflags & BMV_IF_COWFORK)
 397		whichfork = XFS_COW_FORK;
 398	else
 399		whichfork = XFS_DATA_FORK;
 400	ifp = XFS_IFORK_PTR(ip, whichfork);
 401
 402	xfs_ilock(ip, XFS_IOLOCK_SHARED);
 403	switch (whichfork) {
 404	case XFS_ATTR_FORK:
 405		if (!XFS_IFORK_Q(ip))
 406			goto out_unlock_iolock;
 
 407
 408		max_len = 1LL << 32;
 409		lock = xfs_ilock_attr_map_shared(ip);
 410		break;
 411	case XFS_COW_FORK:
 
 
 
 412		/* No CoW fork? Just return */
 413		if (!ifp)
 414			goto out_unlock_iolock;
 415
 416		if (xfs_get_cowextsz_hint(ip))
 417			max_len = mp->m_super->s_maxbytes;
 418		else
 419			max_len = XFS_ISIZE(ip);
 420
 421		lock = XFS_ILOCK_SHARED;
 422		xfs_ilock(ip, lock);
 423		break;
 424	case XFS_DATA_FORK:
 425		if (!(iflags & BMV_IF_DELALLOC) &&
 426		    (ip->i_delayed_blks || XFS_ISIZE(ip) > ip->i_d.di_size)) {
 427			error = filemap_write_and_wait(VFS_I(ip)->i_mapping);
 428			if (error)
 429				goto out_unlock_iolock;
 430
 431			/*
 432			 * Even after flushing the inode, there can still be
 433			 * delalloc blocks on the inode beyond EOF due to
 434			 * speculative preallocation.  These are not removed
 435			 * until the release function is called or the inode
 436			 * is inactivated.  Hence we cannot assert here that
 437			 * ip->i_delayed_blks == 0.
 438			 */
 439		}
 440
 441		if (xfs_get_extsz_hint(ip) ||
 442		    (ip->i_d.di_flags &
 443		     (XFS_DIFLAG_PREALLOC | XFS_DIFLAG_APPEND)))
 444			max_len = mp->m_super->s_maxbytes;
 445		else
 446			max_len = XFS_ISIZE(ip);
 447
 448		lock = xfs_ilock_data_map_shared(ip);
 449		break;
 450	}
 451
 
 
 452	switch (ifp->if_format) {
 453	case XFS_DINODE_FMT_EXTENTS:
 454	case XFS_DINODE_FMT_BTREE:
 455		break;
 456	case XFS_DINODE_FMT_LOCAL:
 457		/* Local format inode forks report no extents. */
 458		goto out_unlock_ilock;
 459	default:
 460		error = -EINVAL;
 461		goto out_unlock_ilock;
 462	}
 463
 464	if (bmv->bmv_length == -1) {
 465		max_len = XFS_FSB_TO_BB(mp, XFS_B_TO_FSB(mp, max_len));
 466		bmv->bmv_length = max(0LL, max_len - bmv->bmv_offset);
 467	}
 468
 469	bmv_end = bmv->bmv_offset + bmv->bmv_length;
 470
 471	first_bno = bno = XFS_BB_TO_FSBT(mp, bmv->bmv_offset);
 472	len = XFS_BB_TO_FSB(mp, bmv->bmv_length);
 473
 474	if (!(ifp->if_flags & XFS_IFEXTENTS)) {
 475		error = xfs_iread_extents(NULL, ip, whichfork);
 476		if (error)
 477			goto out_unlock_ilock;
 478	}
 479
 480	if (!xfs_iext_lookup_extent(ip, ifp, bno, &icur, &got)) {
 481		/*
 482		 * Report a whole-file hole if the delalloc flag is set to
 483		 * stay compatible with the old implementation.
 484		 */
 485		if (iflags & BMV_IF_DELALLOC)
 486			xfs_getbmap_report_hole(ip, bmv, out, bmv_end, bno,
 487					XFS_B_TO_FSB(mp, XFS_ISIZE(ip)));
 488		goto out_unlock_ilock;
 489	}
 490
 491	while (!xfs_getbmap_full(bmv)) {
 492		xfs_trim_extent(&got, first_bno, len);
 493
 494		/*
 495		 * Report an entry for a hole if this extent doesn't directly
 496		 * follow the previous one.
 497		 */
 498		if (got.br_startoff > bno) {
 499			xfs_getbmap_report_hole(ip, bmv, out, bmv_end, bno,
 500					got.br_startoff);
 501			if (xfs_getbmap_full(bmv))
 502				break;
 503		}
 504
 505		/*
 506		 * In order to report shared extents accurately, we report each
 507		 * distinct shared / unshared part of a single bmbt record with
 508		 * an individual getbmapx record.
 509		 */
 510		bno = got.br_startoff + got.br_blockcount;
 511		rec = got;
 512		do {
 513			error = xfs_getbmap_report_one(ip, bmv, out, bmv_end,
 514					&rec);
 515			if (error || xfs_getbmap_full(bmv))
 516				goto out_unlock_ilock;
 517		} while (xfs_getbmap_next_rec(&rec, bno));
 518
 519		if (!xfs_iext_next_extent(ifp, &icur, &got)) {
 520			xfs_fileoff_t	end = XFS_B_TO_FSB(mp, XFS_ISIZE(ip));
 521
 522			out[bmv->bmv_entries - 1].bmv_oflags |= BMV_OF_LAST;
 
 
 523
 524			if (whichfork != XFS_ATTR_FORK && bno < end &&
 525			    !xfs_getbmap_full(bmv)) {
 526				xfs_getbmap_report_hole(ip, bmv, out, bmv_end,
 527						bno, end);
 528			}
 529			break;
 530		}
 531
 532		if (bno >= first_bno + len)
 533			break;
 534	}
 535
 536out_unlock_ilock:
 537	xfs_iunlock(ip, lock);
 538out_unlock_iolock:
 539	xfs_iunlock(ip, XFS_IOLOCK_SHARED);
 540	return error;
 541}
 542
 543/*
 544 * Dead simple method of punching delalyed allocation blocks from a range in
 545 * the inode.  This will always punch out both the start and end blocks, even
 546 * if the ranges only partially overlap them, so it is up to the caller to
 547 * ensure that partial blocks are not passed in.
 548 */
 549int
 550xfs_bmap_punch_delalloc_range(
 551	struct xfs_inode	*ip,
 552	xfs_fileoff_t		start_fsb,
 553	xfs_fileoff_t		length)
 
 554{
 555	struct xfs_ifork	*ifp = &ip->i_df;
 556	xfs_fileoff_t		end_fsb = start_fsb + length;
 
 
 557	struct xfs_bmbt_irec	got, del;
 558	struct xfs_iext_cursor	icur;
 559	int			error = 0;
 560
 561	ASSERT(ifp->if_flags & XFS_IFEXTENTS);
 562
 563	xfs_ilock(ip, XFS_ILOCK_EXCL);
 564	if (!xfs_iext_lookup_extent_before(ip, ifp, &end_fsb, &icur, &got))
 565		goto out_unlock;
 566
 567	while (got.br_startoff + got.br_blockcount > start_fsb) {
 568		del = got;
 569		xfs_trim_extent(&del, start_fsb, length);
 570
 571		/*
 572		 * A delete can push the cursor forward. Step back to the
 573		 * previous extent on non-delalloc or extents outside the
 574		 * target range.
 575		 */
 576		if (!del.br_blockcount ||
 577		    !isnullstartblock(del.br_startblock)) {
 578			if (!xfs_iext_prev_extent(ifp, &icur, &got))
 579				break;
 580			continue;
 581		}
 582
 583		error = xfs_bmap_del_extent_delay(ip, XFS_DATA_FORK, &icur,
 584						  &got, &del);
 585		if (error || !xfs_iext_get_extent(ifp, &icur, &got))
 586			break;
 587	}
 588
 
 
 
 589out_unlock:
 590	xfs_iunlock(ip, XFS_ILOCK_EXCL);
 591	return error;
 592}
 593
 594/*
 595 * Test whether it is appropriate to check an inode for and free post EOF
 596 * blocks. The 'force' parameter determines whether we should also consider
 597 * regular files that are marked preallocated or append-only.
 598 */
 599bool
 600xfs_can_free_eofblocks(struct xfs_inode *ip, bool force)
 
 601{
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 602	/* prealloc/delalloc exists only on regular files */
 603	if (!S_ISREG(VFS_I(ip)->i_mode))
 604		return false;
 605
 606	/*
 607	 * Zero sized files with no cached pages and delalloc blocks will not
 608	 * have speculative prealloc/delalloc blocks to remove.
 609	 */
 610	if (VFS_I(ip)->i_size == 0 &&
 611	    VFS_I(ip)->i_mapping->nrpages == 0 &&
 612	    ip->i_delayed_blks == 0)
 613		return false;
 614
 615	/* If we haven't read in the extent list, then don't do it now. */
 616	if (!(ip->i_df.if_flags & XFS_IFEXTENTS))
 617		return false;
 618
 619	/*
 620	 * Do not free real preallocated or append-only files unless the file
 621	 * has delalloc blocks and we are forced to remove them.
 622	 */
 623	if (ip->i_d.di_flags & (XFS_DIFLAG_PREALLOC | XFS_DIFLAG_APPEND))
 624		if (!force || ip->i_delayed_blks == 0)
 625			return false;
 626
 627	return true;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 628}
 629
 630/*
 631 * This is called to free any blocks beyond eof. The caller must hold
 632 * IOLOCK_EXCL unless we are in the inode reclaim path and have the only
 633 * reference to the inode.
 634 */
 635int
 636xfs_free_eofblocks(
 637	struct xfs_inode	*ip)
 638{
 639	struct xfs_trans	*tp;
 640	int			error;
 641	xfs_fileoff_t		end_fsb;
 642	xfs_fileoff_t		last_fsb;
 643	xfs_filblks_t		map_len;
 644	int			nimaps;
 645	struct xfs_bmbt_irec	imap;
 646	struct xfs_mount	*mp = ip->i_mount;
 
 
 
 
 
 
 
 
 
 647
 648	/*
 649	 * Figure out if there are any blocks beyond the end
 650	 * of the file.  If not, then there is nothing to do.
 
 
 651	 */
 652	end_fsb = XFS_B_TO_FSB(mp, (xfs_ufsize_t)XFS_ISIZE(ip));
 653	last_fsb = XFS_B_TO_FSB(mp, mp->m_super->s_maxbytes);
 654	if (last_fsb <= end_fsb)
 
 
 
 
 655		return 0;
 656	map_len = last_fsb - end_fsb;
 657
 658	nimaps = 1;
 659	xfs_ilock(ip, XFS_ILOCK_SHARED);
 660	error = xfs_bmapi_read(ip, end_fsb, map_len, &imap, &nimaps, 0);
 661	xfs_iunlock(ip, XFS_ILOCK_SHARED);
 
 
 
 
 662
 663	/*
 664	 * If there are blocks after the end of file, truncate the file to its
 665	 * current size to free them up.
 
 
 666	 */
 667	if (!error && (nimaps != 0) &&
 668	    (imap.br_startblock != HOLESTARTBLOCK ||
 669	     ip->i_delayed_blks)) {
 670		/*
 671		 * Attach the dquots to the inode up front.
 672		 */
 673		error = xfs_qm_dqattach(ip);
 674		if (error)
 675			return error;
 676
 677		/* wait on dio to ensure i_size has settled */
 678		inode_dio_wait(VFS_I(ip));
 679
 680		error = xfs_trans_alloc(mp, &M_RES(mp)->tr_itruncate, 0, 0, 0,
 681				&tp);
 682		if (error) {
 683			ASSERT(XFS_FORCED_SHUTDOWN(mp));
 684			return error;
 685		}
 686
 687		xfs_ilock(ip, XFS_ILOCK_EXCL);
 688		xfs_trans_ijoin(tp, ip, 0);
 
 689
 690		/*
 691		 * Do not update the on-disk file size.  If we update the
 692		 * on-disk file size and then the system crashes before the
 693		 * contents of the file are flushed to disk then the files
 694		 * may be full of holes (ie NULL files bug).
 695		 */
 696		error = xfs_itruncate_extents_flags(&tp, ip, XFS_DATA_FORK,
 697					XFS_ISIZE(ip), XFS_BMAPI_NODISCARD);
 698		if (error) {
 699			/*
 700			 * If we get an error at this point we simply don't
 701			 * bother truncating the file.
 702			 */
 703			xfs_trans_cancel(tp);
 704		} else {
 705			error = xfs_trans_commit(tp);
 706			if (!error)
 707				xfs_inode_clear_eofblocks_tag(ip);
 708		}
 709
 710		xfs_iunlock(ip, XFS_ILOCK_EXCL);
 711	}
 
 
 
 
 
 
 712	return error;
 713}
 714
 715int
 716xfs_alloc_file_space(
 717	struct xfs_inode	*ip,
 718	xfs_off_t		offset,
 719	xfs_off_t		len,
 720	int			alloc_type)
 721{
 722	xfs_mount_t		*mp = ip->i_mount;
 723	xfs_off_t		count;
 724	xfs_filblks_t		allocated_fsb;
 725	xfs_filblks_t		allocatesize_fsb;
 726	xfs_extlen_t		extsz, temp;
 727	xfs_fileoff_t		startoffset_fsb;
 728	xfs_fileoff_t		endoffset_fsb;
 729	int			nimaps;
 730	int			quota_flag;
 731	int			rt;
 732	xfs_trans_t		*tp;
 733	xfs_bmbt_irec_t		imaps[1], *imapp;
 734	uint			qblocks, resblks, resrtextents;
 735	int			error;
 736
 
 
 
 737	trace_xfs_alloc_file_space(ip);
 738
 739	if (XFS_FORCED_SHUTDOWN(mp))
 740		return -EIO;
 741
 742	error = xfs_qm_dqattach(ip);
 743	if (error)
 744		return error;
 745
 746	if (len <= 0)
 747		return -EINVAL;
 748
 749	rt = XFS_IS_REALTIME_INODE(ip);
 750	extsz = xfs_get_extsz_hint(ip);
 751
 752	count = len;
 753	imapp = &imaps[0];
 754	nimaps = 1;
 755	startoffset_fsb	= XFS_B_TO_FSBT(mp, offset);
 756	endoffset_fsb = XFS_B_TO_FSB(mp, offset + count);
 757	allocatesize_fsb = endoffset_fsb - startoffset_fsb;
 758
 759	/*
 760	 * Allocate file space until done or until there is an error
 761	 */
 762	while (allocatesize_fsb && !error) {
 763		xfs_fileoff_t	s, e;
 
 
 764
 765		/*
 766		 * Determine space reservations for data/realtime.
 767		 */
 768		if (unlikely(extsz)) {
 769			s = startoffset_fsb;
 770			do_div(s, extsz);
 771			s *= extsz;
 772			e = startoffset_fsb + allocatesize_fsb;
 773			div_u64_rem(startoffset_fsb, extsz, &temp);
 774			if (temp)
 775				e += temp;
 776			div_u64_rem(e, extsz, &temp);
 777			if (temp)
 778				e += extsz - temp;
 779		} else {
 780			s = 0;
 781			e = allocatesize_fsb;
 782		}
 783
 784		/*
 785		 * The transaction reservation is limited to a 32-bit block
 786		 * count, hence we need to limit the number of blocks we are
 787		 * trying to reserve to avoid an overflow. We can't allocate
 788		 * more than @nimaps extents, and an extent is limited on disk
 789		 * to MAXEXTLEN (21 bits), so use that to enforce the limit.
 
 790		 */
 791		resblks = min_t(xfs_fileoff_t, (e - s), (MAXEXTLEN * nimaps));
 
 792		if (unlikely(rt)) {
 793			resrtextents = qblocks = resblks;
 794			resrtextents /= mp->m_sb.sb_rextsize;
 795			resblks = XFS_DIOSTRAT_SPACE_RES(mp, 0);
 796			quota_flag = XFS_QMOPT_RES_RTBLKS;
 797		} else {
 798			resrtextents = 0;
 799			resblks = qblocks = XFS_DIOSTRAT_SPACE_RES(mp, resblks);
 800			quota_flag = XFS_QMOPT_RES_REGBLKS;
 801		}
 802
 803		/*
 804		 * Allocate and setup the transaction.
 805		 */
 806		error = xfs_trans_alloc(mp, &M_RES(mp)->tr_write, resblks,
 807				resrtextents, 0, &tp);
 
 
 
 
 808
 809		/*
 810		 * Check for running out of space
 
 
 
 
 
 
 811		 */
 
 
 
 812		if (error) {
 813			/*
 814			 * Free the transaction structure.
 815			 */
 816			ASSERT(error == -ENOSPC || XFS_FORCED_SHUTDOWN(mp));
 817			break;
 
 818		}
 819		xfs_ilock(ip, XFS_ILOCK_EXCL);
 820		error = xfs_trans_reserve_quota_nblks(tp, ip, qblocks,
 821						      0, quota_flag);
 822		if (error)
 823			goto error1;
 824
 825		xfs_trans_ijoin(tp, ip, 0);
 826
 827		error = xfs_bmapi_write(tp, ip, startoffset_fsb,
 828					allocatesize_fsb, alloc_type, 0, imapp,
 829					&nimaps);
 830		if (error)
 831			goto error0;
 832
 833		/*
 834		 * Complete the transaction
 835		 */
 836		error = xfs_trans_commit(tp);
 837		xfs_iunlock(ip, XFS_ILOCK_EXCL);
 838		if (error)
 839			break;
 840
 841		allocated_fsb = imapp->br_blockcount;
 842
 843		if (nimaps == 0) {
 844			error = -ENOSPC;
 845			break;
 846		}
 847
 848		startoffset_fsb += allocated_fsb;
 849		allocatesize_fsb -= allocated_fsb;
 850	}
 851
 852	return error;
 853
 854error0:	/* unlock inode, unreserve quota blocks, cancel trans */
 855	xfs_trans_unreserve_quota_nblks(tp, ip, (long)qblocks, 0, quota_flag);
 856
 857error1:	/* Just cancel transaction */
 858	xfs_trans_cancel(tp);
 859	xfs_iunlock(ip, XFS_ILOCK_EXCL);
 860	return error;
 861}
 862
 863static int
 864xfs_unmap_extent(
 865	struct xfs_inode	*ip,
 866	xfs_fileoff_t		startoffset_fsb,
 867	xfs_filblks_t		len_fsb,
 868	int			*done)
 869{
 870	struct xfs_mount	*mp = ip->i_mount;
 871	struct xfs_trans	*tp;
 872	uint			resblks = XFS_DIOSTRAT_SPACE_RES(mp, 0);
 873	int			error;
 874
 875	error = xfs_trans_alloc(mp, &M_RES(mp)->tr_write, resblks, 0, 0, &tp);
 876	if (error) {
 877		ASSERT(error == -ENOSPC || XFS_FORCED_SHUTDOWN(mp));
 878		return error;
 879	}
 880
 881	xfs_ilock(ip, XFS_ILOCK_EXCL);
 882	error = xfs_trans_reserve_quota(tp, mp, ip->i_udquot, ip->i_gdquot,
 883			ip->i_pdquot, resblks, 0, XFS_QMOPT_RES_REGBLKS);
 884	if (error)
 885		goto out_trans_cancel;
 886
 887	xfs_trans_ijoin(tp, ip, 0);
 888
 889	error = xfs_bunmapi(tp, ip, startoffset_fsb, len_fsb, 0, 2, done);
 890	if (error)
 891		goto out_trans_cancel;
 892
 893	error = xfs_trans_commit(tp);
 894out_unlock:
 895	xfs_iunlock(ip, XFS_ILOCK_EXCL);
 896	return error;
 897
 898out_trans_cancel:
 899	xfs_trans_cancel(tp);
 900	goto out_unlock;
 901}
 902
 903/* Caller must first wait for the completion of any pending DIOs if required. */
 904int
 905xfs_flush_unmap_range(
 906	struct xfs_inode	*ip,
 907	xfs_off_t		offset,
 908	xfs_off_t		len)
 909{
 910	struct xfs_mount	*mp = ip->i_mount;
 911	struct inode		*inode = VFS_I(ip);
 912	xfs_off_t		rounding, start, end;
 913	int			error;
 914
 915	rounding = max_t(xfs_off_t, 1 << mp->m_sb.sb_blocklog, PAGE_SIZE);
 916	start = round_down(offset, rounding);
 917	end = round_up(offset + len, rounding) - 1;
 
 
 
 
 
 918
 919	error = filemap_write_and_wait_range(inode->i_mapping, start, end);
 920	if (error)
 921		return error;
 922	truncate_pagecache_range(inode, start, end);
 923	return 0;
 924}
 925
 926int
 927xfs_free_file_space(
 928	struct xfs_inode	*ip,
 929	xfs_off_t		offset,
 930	xfs_off_t		len)
 931{
 932	struct xfs_mount	*mp = ip->i_mount;
 933	xfs_fileoff_t		startoffset_fsb;
 934	xfs_fileoff_t		endoffset_fsb;
 935	int			done = 0, error;
 936
 937	trace_xfs_free_file_space(ip);
 938
 939	error = xfs_qm_dqattach(ip);
 940	if (error)
 941		return error;
 942
 943	if (len <= 0)	/* if nothing being freed */
 944		return 0;
 945
 
 
 
 
 
 
 
 
 946	startoffset_fsb = XFS_B_TO_FSB(mp, offset);
 947	endoffset_fsb = XFS_B_TO_FSBT(mp, offset + len);
 948
 
 
 
 
 
 
 949	/*
 950	 * Need to zero the stuff we're not freeing, on disk.
 951	 */
 952	if (endoffset_fsb > startoffset_fsb) {
 953		while (!done) {
 954			error = xfs_unmap_extent(ip, startoffset_fsb,
 955					endoffset_fsb - startoffset_fsb, &done);
 956			if (error)
 957				return error;
 958		}
 959	}
 960
 961	/*
 962	 * Now that we've unmap all full blocks we'll have to zero out any
 963	 * partial block at the beginning and/or end.  iomap_zero_range is smart
 964	 * enough to skip any holes, including those we just created, but we
 965	 * must take care not to zero beyond EOF and enlarge i_size.
 966	 */
 967	if (offset >= XFS_ISIZE(ip))
 968		return 0;
 969	if (offset + len > XFS_ISIZE(ip))
 970		len = XFS_ISIZE(ip) - offset;
 971	error = iomap_zero_range(VFS_I(ip), offset, len, NULL,
 972			&xfs_buffered_write_iomap_ops);
 973	if (error)
 974		return error;
 975
 976	/*
 977	 * If we zeroed right up to EOF and EOF straddles a page boundary we
 978	 * must make sure that the post-EOF area is also zeroed because the
 979	 * page could be mmap'd and iomap_zero_range doesn't do that for us.
 980	 * Writeback of the eof page will do this, albeit clumsily.
 981	 */
 982	if (offset + len >= XFS_ISIZE(ip) && offset_in_page(offset + len) > 0) {
 983		error = filemap_write_and_wait_range(VFS_I(ip)->i_mapping,
 984				round_down(offset + len, PAGE_SIZE), LLONG_MAX);
 985	}
 986
 987	return error;
 988}
 989
 990static int
 991xfs_prepare_shift(
 992	struct xfs_inode	*ip,
 993	loff_t			offset)
 994{
 995	struct xfs_mount	*mp = ip->i_mount;
 996	int			error;
 997
 998	/*
 999	 * Trim eofblocks to avoid shifting uninitialized post-eof preallocation
1000	 * into the accessible region of the file.
1001	 */
1002	if (xfs_can_free_eofblocks(ip, true)) {
1003		error = xfs_free_eofblocks(ip);
1004		if (error)
1005			return error;
1006	}
1007
1008	/*
1009	 * Shift operations must stabilize the start block offset boundary along
1010	 * with the full range of the operation. If we don't, a COW writeback
1011	 * completion could race with an insert, front merge with the start
1012	 * extent (after split) during the shift and corrupt the file. Start
1013	 * with the block just prior to the start to stabilize the boundary.
 
1014	 */
1015	offset = round_down(offset, 1 << mp->m_sb.sb_blocklog);
 
1016	if (offset)
1017		offset -= (1 << mp->m_sb.sb_blocklog);
1018
1019	/*
1020	 * Writeback and invalidate cache for the remainder of the file as we're
1021	 * about to shift down every extent from offset to EOF.
1022	 */
1023	error = xfs_flush_unmap_range(ip, offset, XFS_ISIZE(ip));
1024	if (error)
1025		return error;
1026
1027	/*
1028	 * Clean out anything hanging around in the cow fork now that
1029	 * we've flushed all the dirty data out to disk to avoid having
1030	 * CoW extents at the wrong offsets.
1031	 */
1032	if (xfs_inode_has_cow_data(ip)) {
1033		error = xfs_reflink_cancel_cow_range(ip, offset, NULLFILEOFF,
1034				true);
1035		if (error)
1036			return error;
1037	}
1038
1039	return 0;
1040}
1041
1042/*
1043 * xfs_collapse_file_space()
1044 *	This routine frees disk space and shift extent for the given file.
1045 *	The first thing we do is to free data blocks in the specified range
1046 *	by calling xfs_free_file_space(). It would also sync dirty data
1047 *	and invalidate page cache over the region on which collapse range
1048 *	is working. And Shift extent records to the left to cover a hole.
1049 * RETURNS:
1050 *	0 on success
1051 *	errno on error
1052 *
1053 */
1054int
1055xfs_collapse_file_space(
1056	struct xfs_inode	*ip,
1057	xfs_off_t		offset,
1058	xfs_off_t		len)
1059{
1060	struct xfs_mount	*mp = ip->i_mount;
1061	struct xfs_trans	*tp;
1062	int			error;
1063	xfs_fileoff_t		next_fsb = XFS_B_TO_FSB(mp, offset + len);
1064	xfs_fileoff_t		shift_fsb = XFS_B_TO_FSB(mp, len);
1065	bool			done = false;
1066
1067	ASSERT(xfs_isilocked(ip, XFS_IOLOCK_EXCL));
1068	ASSERT(xfs_isilocked(ip, XFS_MMAPLOCK_EXCL));
1069
1070	trace_xfs_collapse_file_space(ip);
1071
1072	error = xfs_free_file_space(ip, offset, len);
1073	if (error)
1074		return error;
1075
1076	error = xfs_prepare_shift(ip, offset);
1077	if (error)
1078		return error;
1079
1080	error = xfs_trans_alloc(mp, &M_RES(mp)->tr_write, 0, 0, 0, &tp);
1081	if (error)
1082		return error;
1083
1084	xfs_ilock(ip, XFS_ILOCK_EXCL);
1085	xfs_trans_ijoin(tp, ip, 0);
1086
1087	while (!done) {
1088		error = xfs_bmap_collapse_extents(tp, ip, &next_fsb, shift_fsb,
1089				&done);
1090		if (error)
1091			goto out_trans_cancel;
1092		if (done)
1093			break;
1094
1095		/* finish any deferred frees and roll the transaction */
1096		error = xfs_defer_finish(&tp);
1097		if (error)
1098			goto out_trans_cancel;
1099	}
1100
1101	error = xfs_trans_commit(tp);
1102	xfs_iunlock(ip, XFS_ILOCK_EXCL);
1103	return error;
1104
1105out_trans_cancel:
1106	xfs_trans_cancel(tp);
1107	xfs_iunlock(ip, XFS_ILOCK_EXCL);
1108	return error;
1109}
1110
1111/*
1112 * xfs_insert_file_space()
1113 *	This routine create hole space by shifting extents for the given file.
1114 *	The first thing we do is to sync dirty data and invalidate page cache
1115 *	over the region on which insert range is working. And split an extent
1116 *	to two extents at given offset by calling xfs_bmap_split_extent.
1117 *	And shift all extent records which are laying between [offset,
1118 *	last allocated extent] to the right to reserve hole range.
1119 * RETURNS:
1120 *	0 on success
1121 *	errno on error
1122 */
1123int
1124xfs_insert_file_space(
1125	struct xfs_inode	*ip,
1126	loff_t			offset,
1127	loff_t			len)
1128{
1129	struct xfs_mount	*mp = ip->i_mount;
1130	struct xfs_trans	*tp;
1131	int			error;
1132	xfs_fileoff_t		stop_fsb = XFS_B_TO_FSB(mp, offset);
1133	xfs_fileoff_t		next_fsb = NULLFSBLOCK;
1134	xfs_fileoff_t		shift_fsb = XFS_B_TO_FSB(mp, len);
1135	bool			done = false;
1136
1137	ASSERT(xfs_isilocked(ip, XFS_IOLOCK_EXCL));
1138	ASSERT(xfs_isilocked(ip, XFS_MMAPLOCK_EXCL));
1139
1140	trace_xfs_insert_file_space(ip);
1141
1142	error = xfs_bmap_can_insert_extents(ip, stop_fsb, shift_fsb);
1143	if (error)
1144		return error;
1145
1146	error = xfs_prepare_shift(ip, offset);
1147	if (error)
1148		return error;
1149
1150	error = xfs_trans_alloc(mp, &M_RES(mp)->tr_write,
1151			XFS_DIOSTRAT_SPACE_RES(mp, 0), 0, 0, &tp);
1152	if (error)
1153		return error;
1154
1155	xfs_ilock(ip, XFS_ILOCK_EXCL);
1156	xfs_trans_ijoin(tp, ip, 0);
1157
 
 
 
 
 
1158	/*
1159	 * The extent shifting code works on extent granularity. So, if stop_fsb
1160	 * is not the starting block of extent, we need to split the extent at
1161	 * stop_fsb.
1162	 */
1163	error = xfs_bmap_split_extent(tp, ip, stop_fsb);
1164	if (error)
1165		goto out_trans_cancel;
1166
1167	do {
1168		error = xfs_defer_finish(&tp);
1169		if (error)
1170			goto out_trans_cancel;
1171
1172		error = xfs_bmap_insert_extents(tp, ip, &next_fsb, shift_fsb,
1173				&done, stop_fsb);
1174		if (error)
1175			goto out_trans_cancel;
1176	} while (!done);
1177
1178	error = xfs_trans_commit(tp);
1179	xfs_iunlock(ip, XFS_ILOCK_EXCL);
1180	return error;
1181
1182out_trans_cancel:
1183	xfs_trans_cancel(tp);
1184	xfs_iunlock(ip, XFS_ILOCK_EXCL);
1185	return error;
1186}
1187
1188/*
1189 * We need to check that the format of the data fork in the temporary inode is
1190 * valid for the target inode before doing the swap. This is not a problem with
1191 * attr1 because of the fixed fork offset, but attr2 has a dynamically sized
1192 * data fork depending on the space the attribute fork is taking so we can get
1193 * invalid formats on the target inode.
1194 *
1195 * E.g. target has space for 7 extents in extent format, temp inode only has
1196 * space for 6.  If we defragment down to 7 extents, then the tmp format is a
1197 * btree, but when swapped it needs to be in extent format. Hence we can't just
1198 * blindly swap data forks on attr2 filesystems.
1199 *
1200 * Note that we check the swap in both directions so that we don't end up with
1201 * a corrupt temporary inode, either.
1202 *
1203 * Note that fixing the way xfs_fsr sets up the attribute fork in the source
1204 * inode will prevent this situation from occurring, so all we do here is
1205 * reject and log the attempt. basically we are putting the responsibility on
1206 * userspace to get this right.
1207 */
1208static int
1209xfs_swap_extents_check_format(
1210	struct xfs_inode	*ip,	/* target inode */
1211	struct xfs_inode	*tip)	/* tmp inode */
1212{
1213	struct xfs_ifork	*ifp = &ip->i_df;
1214	struct xfs_ifork	*tifp = &tip->i_df;
1215
1216	/* User/group/project quota ids must match if quotas are enforced. */
1217	if (XFS_IS_QUOTA_ON(ip->i_mount) &&
1218	    (!uid_eq(VFS_I(ip)->i_uid, VFS_I(tip)->i_uid) ||
1219	     !gid_eq(VFS_I(ip)->i_gid, VFS_I(tip)->i_gid) ||
1220	     ip->i_d.di_projid != tip->i_d.di_projid))
1221		return -EINVAL;
1222
1223	/* Should never get a local format */
1224	if (ifp->if_format == XFS_DINODE_FMT_LOCAL ||
1225	    tifp->if_format == XFS_DINODE_FMT_LOCAL)
1226		return -EINVAL;
1227
1228	/*
1229	 * if the target inode has less extents that then temporary inode then
1230	 * why did userspace call us?
1231	 */
1232	if (ifp->if_nextents < tifp->if_nextents)
1233		return -EINVAL;
1234
1235	/*
1236	 * If we have to use the (expensive) rmap swap method, we can
1237	 * handle any number of extents and any format.
1238	 */
1239	if (xfs_sb_version_hasrmapbt(&ip->i_mount->m_sb))
1240		return 0;
1241
1242	/*
1243	 * if the target inode is in extent form and the temp inode is in btree
1244	 * form then we will end up with the target inode in the wrong format
1245	 * as we already know there are less extents in the temp inode.
1246	 */
1247	if (ifp->if_format == XFS_DINODE_FMT_EXTENTS &&
1248	    tifp->if_format == XFS_DINODE_FMT_BTREE)
1249		return -EINVAL;
1250
1251	/* Check temp in extent form to max in target */
1252	if (tifp->if_format == XFS_DINODE_FMT_EXTENTS &&
1253	    tifp->if_nextents > XFS_IFORK_MAXEXT(ip, XFS_DATA_FORK))
1254		return -EINVAL;
1255
1256	/* Check target in extent form to max in temp */
1257	if (ifp->if_format == XFS_DINODE_FMT_EXTENTS &&
1258	    ifp->if_nextents > XFS_IFORK_MAXEXT(tip, XFS_DATA_FORK))
1259		return -EINVAL;
1260
1261	/*
1262	 * If we are in a btree format, check that the temp root block will fit
1263	 * in the target and that it has enough extents to be in btree format
1264	 * in the target.
1265	 *
1266	 * Note that we have to be careful to allow btree->extent conversions
1267	 * (a common defrag case) which will occur when the temp inode is in
1268	 * extent format...
1269	 */
1270	if (tifp->if_format == XFS_DINODE_FMT_BTREE) {
1271		if (XFS_IFORK_Q(ip) &&
1272		    XFS_BMAP_BMDR_SPACE(tifp->if_broot) > XFS_IFORK_BOFF(ip))
1273			return -EINVAL;
1274		if (tifp->if_nextents <= XFS_IFORK_MAXEXT(ip, XFS_DATA_FORK))
1275			return -EINVAL;
1276	}
1277
1278	/* Reciprocal target->temp btree format checks */
1279	if (ifp->if_format == XFS_DINODE_FMT_BTREE) {
1280		if (XFS_IFORK_Q(tip) &&
1281		    XFS_BMAP_BMDR_SPACE(ip->i_df.if_broot) > XFS_IFORK_BOFF(tip))
1282			return -EINVAL;
1283		if (ifp->if_nextents <= XFS_IFORK_MAXEXT(tip, XFS_DATA_FORK))
1284			return -EINVAL;
1285	}
1286
1287	return 0;
1288}
1289
1290static int
1291xfs_swap_extent_flush(
1292	struct xfs_inode	*ip)
1293{
1294	int	error;
1295
1296	error = filemap_write_and_wait(VFS_I(ip)->i_mapping);
1297	if (error)
1298		return error;
1299	truncate_pagecache_range(VFS_I(ip), 0, -1);
1300
1301	/* Verify O_DIRECT for ftmp */
1302	if (VFS_I(ip)->i_mapping->nrpages)
1303		return -EINVAL;
1304	return 0;
1305}
1306
1307/*
1308 * Move extents from one file to another, when rmap is enabled.
1309 */
1310STATIC int
1311xfs_swap_extent_rmap(
1312	struct xfs_trans		**tpp,
1313	struct xfs_inode		*ip,
1314	struct xfs_inode		*tip)
1315{
1316	struct xfs_trans		*tp = *tpp;
1317	struct xfs_bmbt_irec		irec;
1318	struct xfs_bmbt_irec		uirec;
1319	struct xfs_bmbt_irec		tirec;
1320	xfs_fileoff_t			offset_fsb;
1321	xfs_fileoff_t			end_fsb;
1322	xfs_filblks_t			count_fsb;
1323	int				error;
1324	xfs_filblks_t			ilen;
1325	xfs_filblks_t			rlen;
1326	int				nimaps;
1327	uint64_t			tip_flags2;
1328
1329	/*
1330	 * If the source file has shared blocks, we must flag the donor
1331	 * file as having shared blocks so that we get the shared-block
1332	 * rmap functions when we go to fix up the rmaps.  The flags
1333	 * will be switch for reals later.
1334	 */
1335	tip_flags2 = tip->i_d.di_flags2;
1336	if (ip->i_d.di_flags2 & XFS_DIFLAG2_REFLINK)
1337		tip->i_d.di_flags2 |= XFS_DIFLAG2_REFLINK;
1338
1339	offset_fsb = 0;
1340	end_fsb = XFS_B_TO_FSB(ip->i_mount, i_size_read(VFS_I(ip)));
1341	count_fsb = (xfs_filblks_t)(end_fsb - offset_fsb);
1342
1343	while (count_fsb) {
1344		/* Read extent from the donor file */
1345		nimaps = 1;
1346		error = xfs_bmapi_read(tip, offset_fsb, count_fsb, &tirec,
1347				&nimaps, 0);
1348		if (error)
1349			goto out;
1350		ASSERT(nimaps == 1);
1351		ASSERT(tirec.br_startblock != DELAYSTARTBLOCK);
1352
1353		trace_xfs_swap_extent_rmap_remap(tip, &tirec);
1354		ilen = tirec.br_blockcount;
1355
1356		/* Unmap the old blocks in the source file. */
1357		while (tirec.br_blockcount) {
1358			ASSERT(tp->t_firstblock == NULLFSBLOCK);
1359			trace_xfs_swap_extent_rmap_remap_piece(tip, &tirec);
1360
1361			/* Read extent from the source file */
1362			nimaps = 1;
1363			error = xfs_bmapi_read(ip, tirec.br_startoff,
1364					tirec.br_blockcount, &irec,
1365					&nimaps, 0);
1366			if (error)
1367				goto out;
1368			ASSERT(nimaps == 1);
1369			ASSERT(tirec.br_startoff == irec.br_startoff);
1370			trace_xfs_swap_extent_rmap_remap_piece(ip, &irec);
1371
1372			/* Trim the extent. */
1373			uirec = tirec;
1374			uirec.br_blockcount = rlen = min_t(xfs_filblks_t,
1375					tirec.br_blockcount,
1376					irec.br_blockcount);
1377			trace_xfs_swap_extent_rmap_remap_piece(tip, &uirec);
1378
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1379			/* Remove the mapping from the donor file. */
1380			xfs_bmap_unmap_extent(tp, tip, &uirec);
1381
1382			/* Remove the mapping from the source file. */
1383			xfs_bmap_unmap_extent(tp, ip, &irec);
1384
1385			/* Map the donor file's blocks into the source file. */
1386			xfs_bmap_map_extent(tp, ip, &uirec);
1387
1388			/* Map the source file's blocks into the donor file. */
1389			xfs_bmap_map_extent(tp, tip, &irec);
1390
1391			error = xfs_defer_finish(tpp);
1392			tp = *tpp;
1393			if (error)
1394				goto out;
1395
1396			tirec.br_startoff += rlen;
1397			if (tirec.br_startblock != HOLESTARTBLOCK &&
1398			    tirec.br_startblock != DELAYSTARTBLOCK)
1399				tirec.br_startblock += rlen;
1400			tirec.br_blockcount -= rlen;
1401		}
1402
1403		/* Roll on... */
1404		count_fsb -= ilen;
1405		offset_fsb += ilen;
1406	}
1407
1408	tip->i_d.di_flags2 = tip_flags2;
1409	return 0;
1410
1411out:
1412	trace_xfs_swap_extent_rmap_error(ip, error, _RET_IP_);
1413	tip->i_d.di_flags2 = tip_flags2;
1414	return error;
1415}
1416
1417/* Swap the extents of two files by swapping data forks. */
1418STATIC int
1419xfs_swap_extent_forks(
1420	struct xfs_trans	*tp,
1421	struct xfs_inode	*ip,
1422	struct xfs_inode	*tip,
1423	int			*src_log_flags,
1424	int			*target_log_flags)
1425{
1426	xfs_filblks_t		aforkblks = 0;
1427	xfs_filblks_t		taforkblks = 0;
1428	xfs_extnum_t		junk;
1429	uint64_t		tmp;
1430	int			error;
1431
1432	/*
1433	 * Count the number of extended attribute blocks
1434	 */
1435	if (XFS_IFORK_Q(ip) && ip->i_afp->if_nextents > 0 &&
1436	    ip->i_afp->if_format != XFS_DINODE_FMT_LOCAL) {
1437		error = xfs_bmap_count_blocks(tp, ip, XFS_ATTR_FORK, &junk,
1438				&aforkblks);
1439		if (error)
1440			return error;
1441	}
1442	if (XFS_IFORK_Q(tip) && tip->i_afp->if_nextents > 0 &&
1443	    tip->i_afp->if_format != XFS_DINODE_FMT_LOCAL) {
1444		error = xfs_bmap_count_blocks(tp, tip, XFS_ATTR_FORK, &junk,
1445				&taforkblks);
1446		if (error)
1447			return error;
1448	}
1449
1450	/*
1451	 * Btree format (v3) inodes have the inode number stamped in the bmbt
1452	 * block headers. We can't start changing the bmbt blocks until the
1453	 * inode owner change is logged so recovery does the right thing in the
1454	 * event of a crash. Set the owner change log flags now and leave the
1455	 * bmbt scan as the last step.
1456	 */
1457	if (xfs_sb_version_has_v3inode(&ip->i_mount->m_sb)) {
1458		if (ip->i_df.if_format == XFS_DINODE_FMT_BTREE)
1459			(*target_log_flags) |= XFS_ILOG_DOWNER;
1460		if (tip->i_df.if_format == XFS_DINODE_FMT_BTREE)
1461			(*src_log_flags) |= XFS_ILOG_DOWNER;
1462	}
1463
1464	/*
1465	 * Swap the data forks of the inodes
1466	 */
1467	swap(ip->i_df, tip->i_df);
1468
1469	/*
1470	 * Fix the on-disk inode values
1471	 */
1472	tmp = (uint64_t)ip->i_d.di_nblocks;
1473	ip->i_d.di_nblocks = tip->i_d.di_nblocks - taforkblks + aforkblks;
1474	tip->i_d.di_nblocks = tmp + taforkblks - aforkblks;
1475
1476	/*
1477	 * The extents in the source inode could still contain speculative
1478	 * preallocation beyond EOF (e.g. the file is open but not modified
1479	 * while defrag is in progress). In that case, we need to copy over the
1480	 * number of delalloc blocks the data fork in the source inode is
1481	 * tracking beyond EOF so that when the fork is truncated away when the
1482	 * temporary inode is unlinked we don't underrun the i_delayed_blks
1483	 * counter on that inode.
1484	 */
1485	ASSERT(tip->i_delayed_blks == 0);
1486	tip->i_delayed_blks = ip->i_delayed_blks;
1487	ip->i_delayed_blks = 0;
1488
1489	switch (ip->i_df.if_format) {
1490	case XFS_DINODE_FMT_EXTENTS:
1491		(*src_log_flags) |= XFS_ILOG_DEXT;
1492		break;
1493	case XFS_DINODE_FMT_BTREE:
1494		ASSERT(!xfs_sb_version_has_v3inode(&ip->i_mount->m_sb) ||
1495		       (*src_log_flags & XFS_ILOG_DOWNER));
1496		(*src_log_flags) |= XFS_ILOG_DBROOT;
1497		break;
1498	}
1499
1500	switch (tip->i_df.if_format) {
1501	case XFS_DINODE_FMT_EXTENTS:
1502		(*target_log_flags) |= XFS_ILOG_DEXT;
1503		break;
1504	case XFS_DINODE_FMT_BTREE:
1505		(*target_log_flags) |= XFS_ILOG_DBROOT;
1506		ASSERT(!xfs_sb_version_has_v3inode(&ip->i_mount->m_sb) ||
1507		       (*target_log_flags & XFS_ILOG_DOWNER));
1508		break;
1509	}
1510
1511	return 0;
1512}
1513
1514/*
1515 * Fix up the owners of the bmbt blocks to refer to the current inode. The
1516 * change owner scan attempts to order all modified buffers in the current
1517 * transaction. In the event of ordered buffer failure, the offending buffer is
1518 * physically logged as a fallback and the scan returns -EAGAIN. We must roll
1519 * the transaction in this case to replenish the fallback log reservation and
1520 * restart the scan. This process repeats until the scan completes.
1521 */
1522static int
1523xfs_swap_change_owner(
1524	struct xfs_trans	**tpp,
1525	struct xfs_inode	*ip,
1526	struct xfs_inode	*tmpip)
1527{
1528	int			error;
1529	struct xfs_trans	*tp = *tpp;
1530
1531	do {
1532		error = xfs_bmbt_change_owner(tp, ip, XFS_DATA_FORK, ip->i_ino,
1533					      NULL);
1534		/* success or fatal error */
1535		if (error != -EAGAIN)
1536			break;
1537
1538		error = xfs_trans_roll(tpp);
1539		if (error)
1540			break;
1541		tp = *tpp;
1542
1543		/*
1544		 * Redirty both inodes so they can relog and keep the log tail
1545		 * moving forward.
1546		 */
1547		xfs_trans_ijoin(tp, ip, 0);
1548		xfs_trans_ijoin(tp, tmpip, 0);
1549		xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
1550		xfs_trans_log_inode(tp, tmpip, XFS_ILOG_CORE);
1551	} while (true);
1552
1553	return error;
1554}
1555
1556int
1557xfs_swap_extents(
1558	struct xfs_inode	*ip,	/* target inode */
1559	struct xfs_inode	*tip,	/* tmp inode */
1560	struct xfs_swapext	*sxp)
1561{
1562	struct xfs_mount	*mp = ip->i_mount;
1563	struct xfs_trans	*tp;
1564	struct xfs_bstat	*sbp = &sxp->sx_stat;
1565	int			src_log_flags, target_log_flags;
1566	int			error = 0;
1567	int			lock_flags;
1568	uint64_t		f;
1569	int			resblks = 0;
1570	unsigned int		flags = 0;
 
1571
1572	/*
1573	 * Lock the inodes against other IO, page faults and truncate to
1574	 * begin with.  Then we can ensure the inodes are flushed and have no
1575	 * page cache safely. Once we have done this we can take the ilocks and
1576	 * do the rest of the checks.
1577	 */
1578	lock_two_nondirectories(VFS_I(ip), VFS_I(tip));
1579	lock_flags = XFS_MMAPLOCK_EXCL;
1580	xfs_lock_two_inodes(ip, XFS_MMAPLOCK_EXCL, tip, XFS_MMAPLOCK_EXCL);
1581
1582	/* Verify that both files have the same format */
1583	if ((VFS_I(ip)->i_mode & S_IFMT) != (VFS_I(tip)->i_mode & S_IFMT)) {
1584		error = -EINVAL;
1585		goto out_unlock;
1586	}
1587
1588	/* Verify both files are either real-time or non-realtime */
1589	if (XFS_IS_REALTIME_INODE(ip) != XFS_IS_REALTIME_INODE(tip)) {
1590		error = -EINVAL;
1591		goto out_unlock;
1592	}
1593
 
 
 
 
 
 
 
 
 
 
 
 
1594	error = xfs_qm_dqattach(ip);
1595	if (error)
1596		goto out_unlock;
1597
1598	error = xfs_qm_dqattach(tip);
1599	if (error)
1600		goto out_unlock;
1601
1602	error = xfs_swap_extent_flush(ip);
1603	if (error)
1604		goto out_unlock;
1605	error = xfs_swap_extent_flush(tip);
1606	if (error)
1607		goto out_unlock;
1608
1609	if (xfs_inode_has_cow_data(tip)) {
1610		error = xfs_reflink_cancel_cow_range(tip, 0, NULLFILEOFF, true);
1611		if (error)
1612			goto out_unlock;
1613	}
1614
1615	/*
1616	 * Extent "swapping" with rmap requires a permanent reservation and
1617	 * a block reservation because it's really just a remap operation
1618	 * performed with log redo items!
1619	 */
1620	if (xfs_sb_version_hasrmapbt(&mp->m_sb)) {
1621		int		w = XFS_DATA_FORK;
1622		uint32_t	ipnext = ip->i_df.if_nextents;
1623		uint32_t	tipnext	= tip->i_df.if_nextents;
1624
1625		/*
1626		 * Conceptually this shouldn't affect the shape of either bmbt,
1627		 * but since we atomically move extents one by one, we reserve
1628		 * enough space to rebuild both trees.
1629		 */
1630		resblks = XFS_SWAP_RMAP_SPACE_RES(mp, ipnext, w);
1631		resblks +=  XFS_SWAP_RMAP_SPACE_RES(mp, tipnext, w);
1632
1633		/*
1634		 * If either inode straddles a bmapbt block allocation boundary,
1635		 * the rmapbt algorithm triggers repeated allocs and frees as
1636		 * extents are remapped. This can exhaust the block reservation
1637		 * prematurely and cause shutdown. Return freed blocks to the
1638		 * transaction reservation to counter this behavior.
1639		 */
1640		flags |= XFS_TRANS_RES_FDBLKS;
1641	}
1642	error = xfs_trans_alloc(mp, &M_RES(mp)->tr_write, resblks, 0, flags,
1643				&tp);
1644	if (error)
1645		goto out_unlock;
1646
1647	/*
1648	 * Lock and join the inodes to the tansaction so that transaction commit
1649	 * or cancel will unlock the inodes from this point onwards.
1650	 */
1651	xfs_lock_two_inodes(ip, XFS_ILOCK_EXCL, tip, XFS_ILOCK_EXCL);
1652	lock_flags |= XFS_ILOCK_EXCL;
1653	xfs_trans_ijoin(tp, ip, 0);
1654	xfs_trans_ijoin(tp, tip, 0);
1655
1656
1657	/* Verify all data are being swapped */
1658	if (sxp->sx_offset != 0 ||
1659	    sxp->sx_length != ip->i_d.di_size ||
1660	    sxp->sx_length != tip->i_d.di_size) {
1661		error = -EFAULT;
1662		goto out_trans_cancel;
1663	}
1664
1665	trace_xfs_swap_extent_before(ip, 0);
1666	trace_xfs_swap_extent_before(tip, 1);
1667
1668	/* check inode formats now that data is flushed */
1669	error = xfs_swap_extents_check_format(ip, tip);
1670	if (error) {
1671		xfs_notice(mp,
1672		    "%s: inode 0x%llx format is incompatible for exchanging.",
1673				__func__, ip->i_ino);
1674		goto out_trans_cancel;
1675	}
1676
1677	/*
1678	 * Compare the current change & modify times with that
1679	 * passed in.  If they differ, we abort this swap.
1680	 * This is the mechanism used to ensure the calling
1681	 * process that the file was not changed out from
1682	 * under it.
1683	 */
1684	if ((sbp->bs_ctime.tv_sec != VFS_I(ip)->i_ctime.tv_sec) ||
1685	    (sbp->bs_ctime.tv_nsec != VFS_I(ip)->i_ctime.tv_nsec) ||
1686	    (sbp->bs_mtime.tv_sec != VFS_I(ip)->i_mtime.tv_sec) ||
1687	    (sbp->bs_mtime.tv_nsec != VFS_I(ip)->i_mtime.tv_nsec)) {
 
 
1688		error = -EBUSY;
1689		goto out_trans_cancel;
1690	}
1691
1692	/*
1693	 * Note the trickiness in setting the log flags - we set the owner log
1694	 * flag on the opposite inode (i.e. the inode we are setting the new
1695	 * owner to be) because once we swap the forks and log that, log
1696	 * recovery is going to see the fork as owned by the swapped inode,
1697	 * not the pre-swapped inodes.
1698	 */
1699	src_log_flags = XFS_ILOG_CORE;
1700	target_log_flags = XFS_ILOG_CORE;
1701
1702	if (xfs_sb_version_hasrmapbt(&mp->m_sb))
1703		error = xfs_swap_extent_rmap(&tp, ip, tip);
1704	else
1705		error = xfs_swap_extent_forks(tp, ip, tip, &src_log_flags,
1706				&target_log_flags);
1707	if (error)
1708		goto out_trans_cancel;
1709
1710	/* Do we have to swap reflink flags? */
1711	if ((ip->i_d.di_flags2 & XFS_DIFLAG2_REFLINK) ^
1712	    (tip->i_d.di_flags2 & XFS_DIFLAG2_REFLINK)) {
1713		f = ip->i_d.di_flags2 & XFS_DIFLAG2_REFLINK;
1714		ip->i_d.di_flags2 &= ~XFS_DIFLAG2_REFLINK;
1715		ip->i_d.di_flags2 |= tip->i_d.di_flags2 & XFS_DIFLAG2_REFLINK;
1716		tip->i_d.di_flags2 &= ~XFS_DIFLAG2_REFLINK;
1717		tip->i_d.di_flags2 |= f & XFS_DIFLAG2_REFLINK;
1718	}
1719
1720	/* Swap the cow forks. */
1721	if (xfs_sb_version_hasreflink(&mp->m_sb)) {
1722		ASSERT(!ip->i_cowfp ||
1723		       ip->i_cowfp->if_format == XFS_DINODE_FMT_EXTENTS);
1724		ASSERT(!tip->i_cowfp ||
1725		       tip->i_cowfp->if_format == XFS_DINODE_FMT_EXTENTS);
1726
1727		swap(ip->i_cowfp, tip->i_cowfp);
1728
1729		if (ip->i_cowfp && ip->i_cowfp->if_bytes)
1730			xfs_inode_set_cowblocks_tag(ip);
1731		else
1732			xfs_inode_clear_cowblocks_tag(ip);
1733		if (tip->i_cowfp && tip->i_cowfp->if_bytes)
1734			xfs_inode_set_cowblocks_tag(tip);
1735		else
1736			xfs_inode_clear_cowblocks_tag(tip);
1737	}
1738
1739	xfs_trans_log_inode(tp, ip,  src_log_flags);
1740	xfs_trans_log_inode(tp, tip, target_log_flags);
1741
1742	/*
1743	 * The extent forks have been swapped, but crc=1,rmapbt=0 filesystems
1744	 * have inode number owner values in the bmbt blocks that still refer to
1745	 * the old inode. Scan each bmbt to fix up the owner values with the
1746	 * inode number of the current inode.
1747	 */
1748	if (src_log_flags & XFS_ILOG_DOWNER) {
1749		error = xfs_swap_change_owner(&tp, ip, tip);
1750		if (error)
1751			goto out_trans_cancel;
1752	}
1753	if (target_log_flags & XFS_ILOG_DOWNER) {
1754		error = xfs_swap_change_owner(&tp, tip, ip);
1755		if (error)
1756			goto out_trans_cancel;
1757	}
1758
1759	/*
1760	 * If this is a synchronous mount, make sure that the
1761	 * transaction goes to disk before returning to the user.
1762	 */
1763	if (mp->m_flags & XFS_MOUNT_WSYNC)
1764		xfs_trans_set_sync(tp);
1765
1766	error = xfs_trans_commit(tp);
1767
1768	trace_xfs_swap_extent_after(ip, 0);
1769	trace_xfs_swap_extent_after(tip, 1);
1770
 
 
 
1771out_unlock:
1772	xfs_iunlock(ip, lock_flags);
1773	xfs_iunlock(tip, lock_flags);
1774	unlock_two_nondirectories(VFS_I(ip), VFS_I(tip));
1775	return error;
1776
1777out_trans_cancel:
1778	xfs_trans_cancel(tp);
1779	goto out_unlock;
1780}
v6.13.7
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 * Copyright (c) 2000-2006 Silicon Graphics, Inc.
   4 * Copyright (c) 2012 Red Hat, Inc.
   5 * All Rights Reserved.
   6 */
   7#include "xfs.h"
   8#include "xfs_fs.h"
   9#include "xfs_shared.h"
  10#include "xfs_format.h"
  11#include "xfs_log_format.h"
  12#include "xfs_trans_resv.h"
  13#include "xfs_bit.h"
  14#include "xfs_mount.h"
  15#include "xfs_defer.h"
  16#include "xfs_inode.h"
  17#include "xfs_btree.h"
  18#include "xfs_trans.h"
  19#include "xfs_alloc.h"
  20#include "xfs_bmap.h"
  21#include "xfs_bmap_util.h"
  22#include "xfs_bmap_btree.h"
  23#include "xfs_rtalloc.h"
  24#include "xfs_error.h"
  25#include "xfs_quota.h"
  26#include "xfs_trans_space.h"
  27#include "xfs_trace.h"
  28#include "xfs_icache.h"
  29#include "xfs_iomap.h"
  30#include "xfs_reflink.h"
  31#include "xfs_rtbitmap.h"
  32#include "xfs_rtgroup.h"
  33
  34/* Kernel only BMAP related definitions and functions */
  35
  36/*
  37 * Convert the given file system block to a disk block.  We have to treat it
  38 * differently based on whether the file is a real time file or not, because the
  39 * bmap code does.
  40 */
  41xfs_daddr_t
  42xfs_fsb_to_db(struct xfs_inode *ip, xfs_fsblock_t fsb)
  43{
  44	if (XFS_IS_REALTIME_INODE(ip))
  45		return xfs_rtb_to_daddr(ip->i_mount, fsb);
  46	return XFS_FSB_TO_DADDR(ip->i_mount, fsb);
  47}
  48
  49/*
  50 * Routine to zero an extent on disk allocated to the specific inode.
 
 
 
 
  51 */
  52int
  53xfs_zero_extent(
  54	struct xfs_inode	*ip,
  55	xfs_fsblock_t		start_fsb,
  56	xfs_off_t		count_fsb)
  57{
  58	return blkdev_issue_zeroout(xfs_inode_buftarg(ip)->bt_bdev,
  59			xfs_fsb_to_db(ip, start_fsb),
  60			XFS_FSB_TO_BB(ip->i_mount, count_fsb),
  61			GFP_KERNEL, 0);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
  62}
 
  63
  64/*
  65 * Extent tree block counting routines.
  66 */
  67
  68/*
  69 * Count leaf blocks given a range of extent records.  Delayed allocation
  70 * extents are not counted towards the totals.
  71 */
  72xfs_extnum_t
  73xfs_bmap_count_leaves(
  74	struct xfs_ifork	*ifp,
  75	xfs_filblks_t		*count)
  76{
  77	struct xfs_iext_cursor	icur;
  78	struct xfs_bmbt_irec	got;
  79	xfs_extnum_t		numrecs = 0;
  80
  81	for_each_xfs_iext(ifp, &icur, &got) {
  82		if (!isnullstartblock(got.br_startblock)) {
  83			*count += got.br_blockcount;
  84			numrecs++;
  85		}
  86	}
  87
  88	return numrecs;
  89}
  90
  91/*
  92 * Count fsblocks of the given fork.  Delayed allocation extents are
  93 * not counted towards the totals.
  94 */
  95int
  96xfs_bmap_count_blocks(
  97	struct xfs_trans	*tp,
  98	struct xfs_inode	*ip,
  99	int			whichfork,
 100	xfs_extnum_t		*nextents,
 101	xfs_filblks_t		*count)
 102{
 103	struct xfs_mount	*mp = ip->i_mount;
 104	struct xfs_ifork	*ifp = xfs_ifork_ptr(ip, whichfork);
 105	struct xfs_btree_cur	*cur;
 106	xfs_filblks_t		btblocks = 0;
 107	int			error;
 108
 109	*nextents = 0;
 110	*count = 0;
 111
 112	if (!ifp)
 113		return 0;
 114
 115	switch (ifp->if_format) {
 116	case XFS_DINODE_FMT_BTREE:
 117		error = xfs_iread_extents(tp, ip, whichfork);
 118		if (error)
 119			return error;
 
 
 120
 121		cur = xfs_bmbt_init_cursor(mp, tp, ip, whichfork);
 122		error = xfs_btree_count_blocks(cur, &btblocks);
 123		xfs_btree_del_cursor(cur, error);
 124		if (error)
 125			return error;
 126
 127		/*
 128		 * xfs_btree_count_blocks includes the root block contained in
 129		 * the inode fork in @btblocks, so subtract one because we're
 130		 * only interested in allocated disk blocks.
 131		 */
 132		*count += btblocks - 1;
 133
 134		fallthrough;
 135	case XFS_DINODE_FMT_EXTENTS:
 136		*nextents = xfs_bmap_count_leaves(ifp, count);
 137		break;
 138	}
 139
 140	return 0;
 141}
 142
 143static int
 144xfs_getbmap_report_one(
 145	struct xfs_inode	*ip,
 146	struct getbmapx		*bmv,
 147	struct kgetbmap		*out,
 148	int64_t			bmv_end,
 149	struct xfs_bmbt_irec	*got)
 150{
 151	struct kgetbmap		*p = out + bmv->bmv_entries;
 152	bool			shared = false;
 153	int			error;
 154
 155	error = xfs_reflink_trim_around_shared(ip, got, &shared);
 156	if (error)
 157		return error;
 158
 159	if (isnullstartblock(got->br_startblock) ||
 160	    got->br_startblock == DELAYSTARTBLOCK) {
 161		/*
 162		 * Take the flush completion as being a point-in-time snapshot
 163		 * where there are no delalloc extents, and if any new ones
 164		 * have been created racily, just skip them as being 'after'
 165		 * the flush and so don't get reported.
 
 
 166		 */
 167		if (!(bmv->bmv_iflags & BMV_IF_DELALLOC))
 168			return 0;
 169
 170		p->bmv_oflags |= BMV_OF_DELALLOC;
 171		p->bmv_block = -2;
 172	} else {
 173		p->bmv_block = xfs_fsb_to_db(ip, got->br_startblock);
 174	}
 175
 176	if (got->br_state == XFS_EXT_UNWRITTEN &&
 177	    (bmv->bmv_iflags & BMV_IF_PREALLOC))
 178		p->bmv_oflags |= BMV_OF_PREALLOC;
 179
 180	if (shared)
 181		p->bmv_oflags |= BMV_OF_SHARED;
 182
 183	p->bmv_offset = XFS_FSB_TO_BB(ip->i_mount, got->br_startoff);
 184	p->bmv_length = XFS_FSB_TO_BB(ip->i_mount, got->br_blockcount);
 185
 186	bmv->bmv_offset = p->bmv_offset + p->bmv_length;
 187	bmv->bmv_length = max(0LL, bmv_end - bmv->bmv_offset);
 188	bmv->bmv_entries++;
 189	return 0;
 190}
 191
 192static void
 193xfs_getbmap_report_hole(
 194	struct xfs_inode	*ip,
 195	struct getbmapx		*bmv,
 196	struct kgetbmap		*out,
 197	int64_t			bmv_end,
 198	xfs_fileoff_t		bno,
 199	xfs_fileoff_t		end)
 200{
 201	struct kgetbmap		*p = out + bmv->bmv_entries;
 202
 203	if (bmv->bmv_iflags & BMV_IF_NO_HOLES)
 204		return;
 205
 206	p->bmv_block = -1;
 207	p->bmv_offset = XFS_FSB_TO_BB(ip->i_mount, bno);
 208	p->bmv_length = XFS_FSB_TO_BB(ip->i_mount, end - bno);
 209
 210	bmv->bmv_offset = p->bmv_offset + p->bmv_length;
 211	bmv->bmv_length = max(0LL, bmv_end - bmv->bmv_offset);
 212	bmv->bmv_entries++;
 213}
 214
 215static inline bool
 216xfs_getbmap_full(
 217	struct getbmapx		*bmv)
 218{
 219	return bmv->bmv_length == 0 || bmv->bmv_entries >= bmv->bmv_count - 1;
 220}
 221
 222static bool
 223xfs_getbmap_next_rec(
 224	struct xfs_bmbt_irec	*rec,
 225	xfs_fileoff_t		total_end)
 226{
 227	xfs_fileoff_t		end = rec->br_startoff + rec->br_blockcount;
 228
 229	if (end == total_end)
 230		return false;
 231
 232	rec->br_startoff += rec->br_blockcount;
 233	if (!isnullstartblock(rec->br_startblock) &&
 234	    rec->br_startblock != DELAYSTARTBLOCK)
 235		rec->br_startblock += rec->br_blockcount;
 236	rec->br_blockcount = total_end - end;
 237	return true;
 238}
 239
 240/*
 241 * Get inode's extents as described in bmv, and format for output.
 242 * Calls formatter to fill the user's buffer until all extents
 243 * are mapped, until the passed-in bmv->bmv_count slots have
 244 * been filled, or until the formatter short-circuits the loop,
 245 * if it is tracking filled-in extents on its own.
 246 */
 247int						/* error code */
 248xfs_getbmap(
 249	struct xfs_inode	*ip,
 250	struct getbmapx		*bmv,		/* user bmap structure */
 251	struct kgetbmap		*out)
 252{
 253	struct xfs_mount	*mp = ip->i_mount;
 254	int			iflags = bmv->bmv_iflags;
 255	int			whichfork, lock, error = 0;
 256	int64_t			bmv_end, max_len;
 257	xfs_fileoff_t		bno, first_bno;
 258	struct xfs_ifork	*ifp;
 259	struct xfs_bmbt_irec	got, rec;
 260	xfs_filblks_t		len;
 261	struct xfs_iext_cursor	icur;
 262
 263	if (bmv->bmv_iflags & ~BMV_IF_VALID)
 264		return -EINVAL;
 265#ifndef DEBUG
 266	/* Only allow CoW fork queries if we're debugging. */
 267	if (iflags & BMV_IF_COWFORK)
 268		return -EINVAL;
 269#endif
 270	if ((iflags & BMV_IF_ATTRFORK) && (iflags & BMV_IF_COWFORK))
 271		return -EINVAL;
 272
 273	if (bmv->bmv_length < -1)
 274		return -EINVAL;
 275	bmv->bmv_entries = 0;
 276	if (bmv->bmv_length == 0)
 277		return 0;
 278
 279	if (iflags & BMV_IF_ATTRFORK)
 280		whichfork = XFS_ATTR_FORK;
 281	else if (iflags & BMV_IF_COWFORK)
 282		whichfork = XFS_COW_FORK;
 283	else
 284		whichfork = XFS_DATA_FORK;
 
 285
 286	xfs_ilock(ip, XFS_IOLOCK_SHARED);
 287	switch (whichfork) {
 288	case XFS_ATTR_FORK:
 289		lock = xfs_ilock_attr_map_shared(ip);
 290		if (!xfs_inode_has_attr_fork(ip))
 291			goto out_unlock_ilock;
 292
 293		max_len = 1LL << 32;
 
 294		break;
 295	case XFS_COW_FORK:
 296		lock = XFS_ILOCK_SHARED;
 297		xfs_ilock(ip, lock);
 298
 299		/* No CoW fork? Just return */
 300		if (!xfs_ifork_ptr(ip, whichfork))
 301			goto out_unlock_ilock;
 302
 303		if (xfs_get_cowextsz_hint(ip))
 304			max_len = mp->m_super->s_maxbytes;
 305		else
 306			max_len = XFS_ISIZE(ip);
 
 
 
 307		break;
 308	case XFS_DATA_FORK:
 309		if (!(iflags & BMV_IF_DELALLOC) &&
 310		    (ip->i_delayed_blks || XFS_ISIZE(ip) > ip->i_disk_size)) {
 311			error = filemap_write_and_wait(VFS_I(ip)->i_mapping);
 312			if (error)
 313				goto out_unlock_iolock;
 314
 315			/*
 316			 * Even after flushing the inode, there can still be
 317			 * delalloc blocks on the inode beyond EOF due to
 318			 * speculative preallocation.  These are not removed
 319			 * until the release function is called or the inode
 320			 * is inactivated.  Hence we cannot assert here that
 321			 * ip->i_delayed_blks == 0.
 322			 */
 323		}
 324
 325		if (xfs_get_extsz_hint(ip) ||
 326		    (ip->i_diflags & XFS_DIFLAG_PREALLOC))
 
 327			max_len = mp->m_super->s_maxbytes;
 328		else
 329			max_len = XFS_ISIZE(ip);
 330
 331		lock = xfs_ilock_data_map_shared(ip);
 332		break;
 333	}
 334
 335	ifp = xfs_ifork_ptr(ip, whichfork);
 336
 337	switch (ifp->if_format) {
 338	case XFS_DINODE_FMT_EXTENTS:
 339	case XFS_DINODE_FMT_BTREE:
 340		break;
 341	case XFS_DINODE_FMT_LOCAL:
 342		/* Local format inode forks report no extents. */
 343		goto out_unlock_ilock;
 344	default:
 345		error = -EINVAL;
 346		goto out_unlock_ilock;
 347	}
 348
 349	if (bmv->bmv_length == -1) {
 350		max_len = XFS_FSB_TO_BB(mp, XFS_B_TO_FSB(mp, max_len));
 351		bmv->bmv_length = max(0LL, max_len - bmv->bmv_offset);
 352	}
 353
 354	bmv_end = bmv->bmv_offset + bmv->bmv_length;
 355
 356	first_bno = bno = XFS_BB_TO_FSBT(mp, bmv->bmv_offset);
 357	len = XFS_BB_TO_FSB(mp, bmv->bmv_length);
 358
 359	error = xfs_iread_extents(NULL, ip, whichfork);
 360	if (error)
 361		goto out_unlock_ilock;
 
 
 362
 363	if (!xfs_iext_lookup_extent(ip, ifp, bno, &icur, &got)) {
 364		/*
 365		 * Report a whole-file hole if the delalloc flag is set to
 366		 * stay compatible with the old implementation.
 367		 */
 368		if (iflags & BMV_IF_DELALLOC)
 369			xfs_getbmap_report_hole(ip, bmv, out, bmv_end, bno,
 370					XFS_B_TO_FSB(mp, XFS_ISIZE(ip)));
 371		goto out_unlock_ilock;
 372	}
 373
 374	while (!xfs_getbmap_full(bmv)) {
 375		xfs_trim_extent(&got, first_bno, len);
 376
 377		/*
 378		 * Report an entry for a hole if this extent doesn't directly
 379		 * follow the previous one.
 380		 */
 381		if (got.br_startoff > bno) {
 382			xfs_getbmap_report_hole(ip, bmv, out, bmv_end, bno,
 383					got.br_startoff);
 384			if (xfs_getbmap_full(bmv))
 385				break;
 386		}
 387
 388		/*
 389		 * In order to report shared extents accurately, we report each
 390		 * distinct shared / unshared part of a single bmbt record with
 391		 * an individual getbmapx record.
 392		 */
 393		bno = got.br_startoff + got.br_blockcount;
 394		rec = got;
 395		do {
 396			error = xfs_getbmap_report_one(ip, bmv, out, bmv_end,
 397					&rec);
 398			if (error || xfs_getbmap_full(bmv))
 399				goto out_unlock_ilock;
 400		} while (xfs_getbmap_next_rec(&rec, bno));
 401
 402		if (!xfs_iext_next_extent(ifp, &icur, &got)) {
 403			xfs_fileoff_t	end = XFS_B_TO_FSB(mp, XFS_ISIZE(ip));
 404
 405			if (bmv->bmv_entries > 0)
 406				out[bmv->bmv_entries - 1].bmv_oflags |=
 407								BMV_OF_LAST;
 408
 409			if (whichfork != XFS_ATTR_FORK && bno < end &&
 410			    !xfs_getbmap_full(bmv)) {
 411				xfs_getbmap_report_hole(ip, bmv, out, bmv_end,
 412						bno, end);
 413			}
 414			break;
 415		}
 416
 417		if (bno >= first_bno + len)
 418			break;
 419	}
 420
 421out_unlock_ilock:
 422	xfs_iunlock(ip, lock);
 423out_unlock_iolock:
 424	xfs_iunlock(ip, XFS_IOLOCK_SHARED);
 425	return error;
 426}
 427
 428/*
 429 * Dead simple method of punching delalyed allocation blocks from a range in
 430 * the inode.  This will always punch out both the start and end blocks, even
 431 * if the ranges only partially overlap them, so it is up to the caller to
 432 * ensure that partial blocks are not passed in.
 433 */
 434void
 435xfs_bmap_punch_delalloc_range(
 436	struct xfs_inode	*ip,
 437	int			whichfork,
 438	xfs_off_t		start_byte,
 439	xfs_off_t		end_byte)
 440{
 441	struct xfs_mount	*mp = ip->i_mount;
 442	struct xfs_ifork	*ifp = xfs_ifork_ptr(ip, whichfork);
 443	xfs_fileoff_t		start_fsb = XFS_B_TO_FSBT(mp, start_byte);
 444	xfs_fileoff_t		end_fsb = XFS_B_TO_FSB(mp, end_byte);
 445	struct xfs_bmbt_irec	got, del;
 446	struct xfs_iext_cursor	icur;
 
 447
 448	ASSERT(!xfs_need_iread_extents(ifp));
 449
 450	xfs_ilock(ip, XFS_ILOCK_EXCL);
 451	if (!xfs_iext_lookup_extent_before(ip, ifp, &end_fsb, &icur, &got))
 452		goto out_unlock;
 453
 454	while (got.br_startoff + got.br_blockcount > start_fsb) {
 455		del = got;
 456		xfs_trim_extent(&del, start_fsb, end_fsb - start_fsb);
 457
 458		/*
 459		 * A delete can push the cursor forward. Step back to the
 460		 * previous extent on non-delalloc or extents outside the
 461		 * target range.
 462		 */
 463		if (!del.br_blockcount ||
 464		    !isnullstartblock(del.br_startblock)) {
 465			if (!xfs_iext_prev_extent(ifp, &icur, &got))
 466				break;
 467			continue;
 468		}
 469
 470		xfs_bmap_del_extent_delay(ip, whichfork, &icur, &got, &del);
 471		if (!xfs_iext_get_extent(ifp, &icur, &got))
 
 472			break;
 473	}
 474
 475	if (whichfork == XFS_COW_FORK && !ifp->if_bytes)
 476		xfs_inode_clear_cowblocks_tag(ip);
 477
 478out_unlock:
 479	xfs_iunlock(ip, XFS_ILOCK_EXCL);
 
 480}
 481
 482/*
 483 * Test whether it is appropriate to check an inode for and free post EOF
 484 * blocks.
 
 485 */
 486bool
 487xfs_can_free_eofblocks(
 488	struct xfs_inode	*ip)
 489{
 490	struct xfs_mount	*mp = ip->i_mount;
 491	bool			found_blocks = false;
 492	xfs_fileoff_t		end_fsb;
 493	xfs_fileoff_t		last_fsb;
 494	struct xfs_bmbt_irec	imap;
 495	struct xfs_iext_cursor	icur;
 496
 497	/*
 498	 * Caller must either hold the exclusive io lock; or be inactivating
 499	 * the inode, which guarantees there are no other users of the inode.
 500	 */
 501	if (!(VFS_I(ip)->i_state & I_FREEING))
 502		xfs_assert_ilocked(ip, XFS_IOLOCK_EXCL);
 503
 504	/* prealloc/delalloc exists only on regular files */
 505	if (!S_ISREG(VFS_I(ip)->i_mode))
 506		return false;
 507
 508	/*
 509	 * Zero sized files with no cached pages and delalloc blocks will not
 510	 * have speculative prealloc/delalloc blocks to remove.
 511	 */
 512	if (VFS_I(ip)->i_size == 0 &&
 513	    VFS_I(ip)->i_mapping->nrpages == 0 &&
 514	    ip->i_delayed_blks == 0)
 515		return false;
 516
 517	/* If we haven't read in the extent list, then don't do it now. */
 518	if (xfs_need_iread_extents(&ip->i_df))
 519		return false;
 520
 521	/*
 522	 * Do not free real extents in preallocated files unless the file has
 523	 * delalloc blocks and we are forced to remove them.
 524	 */
 525	if ((ip->i_diflags & XFS_DIFLAG_PREALLOC) && !ip->i_delayed_blks)
 526		return false;
 
 527
 528	/*
 529	 * Do not try to free post-EOF blocks if EOF is beyond the end of the
 530	 * range supported by the page cache, because the truncation will loop
 531	 * forever.
 532	 */
 533	end_fsb = XFS_B_TO_FSB(mp, (xfs_ufsize_t)XFS_ISIZE(ip));
 534	if (xfs_inode_has_bigrtalloc(ip))
 535		end_fsb = xfs_fileoff_roundup_rtx(mp, end_fsb);
 536	last_fsb = XFS_B_TO_FSB(mp, mp->m_super->s_maxbytes);
 537	if (last_fsb <= end_fsb)
 538		return false;
 539
 540	/*
 541	 * Check if there is an post-EOF extent to free.  If there are any
 542	 * delalloc blocks attached to the inode (data fork delalloc
 543	 * reservations or CoW extents of any kind), we need to free them so
 544	 * that inactivation doesn't fail to erase them.
 545	 */
 546	xfs_ilock(ip, XFS_ILOCK_SHARED);
 547	if (ip->i_delayed_blks ||
 548	    xfs_iext_lookup_extent(ip, &ip->i_df, end_fsb, &icur, &imap))
 549		found_blocks = true;
 550	xfs_iunlock(ip, XFS_ILOCK_SHARED);
 551	return found_blocks;
 552}
 553
 554/*
 555 * This is called to free any blocks beyond eof. The caller must hold
 556 * IOLOCK_EXCL unless we are in the inode reclaim path and have the only
 557 * reference to the inode.
 558 */
 559int
 560xfs_free_eofblocks(
 561	struct xfs_inode	*ip)
 562{
 563	struct xfs_trans	*tp;
 
 
 
 
 
 
 564	struct xfs_mount	*mp = ip->i_mount;
 565	int			error;
 566
 567	/* Attach the dquots to the inode up front. */
 568	error = xfs_qm_dqattach(ip);
 569	if (error)
 570		return error;
 571
 572	/* Wait on dio to ensure i_size has settled. */
 573	inode_dio_wait(VFS_I(ip));
 574
 575	/*
 576	 * For preallocated files only free delayed allocations.
 577	 *
 578	 * Note that this means we also leave speculative preallocations in
 579	 * place for preallocated files.
 580	 */
 581	if (ip->i_diflags & (XFS_DIFLAG_PREALLOC | XFS_DIFLAG_APPEND)) {
 582		if (ip->i_delayed_blks) {
 583			xfs_bmap_punch_delalloc_range(ip, XFS_DATA_FORK,
 584				round_up(XFS_ISIZE(ip), mp->m_sb.sb_blocksize),
 585				LLONG_MAX);
 586		}
 587		xfs_inode_clear_eofblocks_tag(ip);
 588		return 0;
 589	}
 590
 591	error = xfs_trans_alloc(mp, &M_RES(mp)->tr_itruncate, 0, 0, 0, &tp);
 592	if (error) {
 593		ASSERT(xfs_is_shutdown(mp));
 594		return error;
 595	}
 596
 597	xfs_ilock(ip, XFS_ILOCK_EXCL);
 598	xfs_trans_ijoin(tp, ip, 0);
 599
 600	/*
 601	 * Do not update the on-disk file size.  If we update the on-disk file
 602	 * size and then the system crashes before the contents of the file are
 603	 * flushed to disk then the files may be full of holes (ie NULL files
 604	 * bug).
 605	 */
 606	error = xfs_itruncate_extents_flags(&tp, ip, XFS_DATA_FORK,
 607				XFS_ISIZE(ip), XFS_BMAPI_NODISCARD);
 608	if (error)
 609		goto err_cancel;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 610
 611	error = xfs_trans_commit(tp);
 612	if (error)
 613		goto out_unlock;
 614
 615	xfs_inode_clear_eofblocks_tag(ip);
 616	goto out_unlock;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 617
 618err_cancel:
 619	/*
 620	 * If we get an error at this point we simply don't
 621	 * bother truncating the file.
 622	 */
 623	xfs_trans_cancel(tp);
 624out_unlock:
 625	xfs_iunlock(ip, XFS_ILOCK_EXCL);
 626	return error;
 627}
 628
 629int
 630xfs_alloc_file_space(
 631	struct xfs_inode	*ip,
 632	xfs_off_t		offset,
 633	xfs_off_t		len)
 
 634{
 635	xfs_mount_t		*mp = ip->i_mount;
 636	xfs_off_t		count;
 
 637	xfs_filblks_t		allocatesize_fsb;
 638	xfs_extlen_t		extsz, temp;
 639	xfs_fileoff_t		startoffset_fsb;
 640	xfs_fileoff_t		endoffset_fsb;
 
 
 641	int			rt;
 642	xfs_trans_t		*tp;
 643	xfs_bmbt_irec_t		imaps[1], *imapp;
 
 644	int			error;
 645
 646	if (xfs_is_always_cow_inode(ip))
 647		return 0;
 648
 649	trace_xfs_alloc_file_space(ip);
 650
 651	if (xfs_is_shutdown(mp))
 652		return -EIO;
 653
 654	error = xfs_qm_dqattach(ip);
 655	if (error)
 656		return error;
 657
 658	if (len <= 0)
 659		return -EINVAL;
 660
 661	rt = XFS_IS_REALTIME_INODE(ip);
 662	extsz = xfs_get_extsz_hint(ip);
 663
 664	count = len;
 665	imapp = &imaps[0];
 
 666	startoffset_fsb	= XFS_B_TO_FSBT(mp, offset);
 667	endoffset_fsb = XFS_B_TO_FSB(mp, offset + count);
 668	allocatesize_fsb = endoffset_fsb - startoffset_fsb;
 669
 670	/*
 671	 * Allocate file space until done or until there is an error
 672	 */
 673	while (allocatesize_fsb && !error) {
 674		xfs_fileoff_t	s, e;
 675		unsigned int	dblocks, rblocks, resblks;
 676		int		nimaps = 1;
 677
 678		/*
 679		 * Determine space reservations for data/realtime.
 680		 */
 681		if (unlikely(extsz)) {
 682			s = startoffset_fsb;
 683			do_div(s, extsz);
 684			s *= extsz;
 685			e = startoffset_fsb + allocatesize_fsb;
 686			div_u64_rem(startoffset_fsb, extsz, &temp);
 687			if (temp)
 688				e += temp;
 689			div_u64_rem(e, extsz, &temp);
 690			if (temp)
 691				e += extsz - temp;
 692		} else {
 693			s = 0;
 694			e = allocatesize_fsb;
 695		}
 696
 697		/*
 698		 * The transaction reservation is limited to a 32-bit block
 699		 * count, hence we need to limit the number of blocks we are
 700		 * trying to reserve to avoid an overflow. We can't allocate
 701		 * more than @nimaps extents, and an extent is limited on disk
 702		 * to XFS_BMBT_MAX_EXTLEN (21 bits), so use that to enforce the
 703		 * limit.
 704		 */
 705		resblks = min_t(xfs_fileoff_t, (e - s),
 706				(XFS_MAX_BMBT_EXTLEN * nimaps));
 707		if (unlikely(rt)) {
 708			dblocks = XFS_DIOSTRAT_SPACE_RES(mp, 0);
 709			rblocks = resblks;
 
 
 710		} else {
 711			dblocks = XFS_DIOSTRAT_SPACE_RES(mp, resblks);
 712			rblocks = 0;
 
 713		}
 714
 715		error = xfs_trans_alloc_inode(ip, &M_RES(mp)->tr_write,
 716				dblocks, rblocks, false, &tp);
 717		if (error)
 718			break;
 719
 720		error = xfs_iext_count_extend(tp, ip, XFS_DATA_FORK,
 721				XFS_IEXT_ADD_NOSPLIT_CNT);
 722		if (error)
 723			goto error;
 724
 725		/*
 726		 * If the allocator cannot find a single free extent large
 727		 * enough to cover the start block of the requested range,
 728		 * xfs_bmapi_write will return -ENOSR.
 729		 *
 730		 * In that case we simply need to keep looping with the same
 731		 * startoffset_fsb so that one of the following allocations
 732		 * will eventually reach the requested range.
 733		 */
 734		error = xfs_bmapi_write(tp, ip, startoffset_fsb,
 735				allocatesize_fsb, XFS_BMAPI_PREALLOC, 0, imapp,
 736				&nimaps);
 737		if (error) {
 738			if (error != -ENOSR)
 739				goto error;
 740			error = 0;
 741		} else {
 742			startoffset_fsb += imapp->br_blockcount;
 743			allocatesize_fsb -= imapp->br_blockcount;
 744		}
 
 
 
 
 
 
 
 745
 746		ip->i_diflags |= XFS_DIFLAG_PREALLOC;
 747		xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
 
 
 
 748
 
 
 
 749		error = xfs_trans_commit(tp);
 750		xfs_iunlock(ip, XFS_ILOCK_EXCL);
 
 
 
 
 
 
 
 
 
 
 
 
 751	}
 752
 753	return error;
 754
 755error:
 
 
 
 756	xfs_trans_cancel(tp);
 757	xfs_iunlock(ip, XFS_ILOCK_EXCL);
 758	return error;
 759}
 760
 761static int
 762xfs_unmap_extent(
 763	struct xfs_inode	*ip,
 764	xfs_fileoff_t		startoffset_fsb,
 765	xfs_filblks_t		len_fsb,
 766	int			*done)
 767{
 768	struct xfs_mount	*mp = ip->i_mount;
 769	struct xfs_trans	*tp;
 770	uint			resblks = XFS_DIOSTRAT_SPACE_RES(mp, 0);
 771	int			error;
 772
 773	error = xfs_trans_alloc_inode(ip, &M_RES(mp)->tr_write, resblks, 0,
 774			false, &tp);
 775	if (error)
 776		return error;
 
 777
 778	error = xfs_iext_count_extend(tp, ip, XFS_DATA_FORK,
 779			XFS_IEXT_PUNCH_HOLE_CNT);
 
 780	if (error)
 781		goto out_trans_cancel;
 782
 
 
 783	error = xfs_bunmapi(tp, ip, startoffset_fsb, len_fsb, 0, 2, done);
 784	if (error)
 785		goto out_trans_cancel;
 786
 787	error = xfs_trans_commit(tp);
 788out_unlock:
 789	xfs_iunlock(ip, XFS_ILOCK_EXCL);
 790	return error;
 791
 792out_trans_cancel:
 793	xfs_trans_cancel(tp);
 794	goto out_unlock;
 795}
 796
 797/* Caller must first wait for the completion of any pending DIOs if required. */
 798int
 799xfs_flush_unmap_range(
 800	struct xfs_inode	*ip,
 801	xfs_off_t		offset,
 802	xfs_off_t		len)
 803{
 
 804	struct inode		*inode = VFS_I(ip);
 805	xfs_off_t		rounding, start, end;
 806	int			error;
 807
 808	/*
 809	 * Make sure we extend the flush out to extent alignment
 810	 * boundaries so any extent range overlapping the start/end
 811	 * of the modification we are about to do is clean and idle.
 812	 */
 813	rounding = max_t(xfs_off_t, xfs_inode_alloc_unitsize(ip), PAGE_SIZE);
 814	start = rounddown_64(offset, rounding);
 815	end = roundup_64(offset + len, rounding) - 1;
 816
 817	error = filemap_write_and_wait_range(inode->i_mapping, start, end);
 818	if (error)
 819		return error;
 820	truncate_pagecache_range(inode, start, end);
 821	return 0;
 822}
 823
 824int
 825xfs_free_file_space(
 826	struct xfs_inode	*ip,
 827	xfs_off_t		offset,
 828	xfs_off_t		len)
 829{
 830	struct xfs_mount	*mp = ip->i_mount;
 831	xfs_fileoff_t		startoffset_fsb;
 832	xfs_fileoff_t		endoffset_fsb;
 833	int			done = 0, error;
 834
 835	trace_xfs_free_file_space(ip);
 836
 837	error = xfs_qm_dqattach(ip);
 838	if (error)
 839		return error;
 840
 841	if (len <= 0)	/* if nothing being freed */
 842		return 0;
 843
 844	/*
 845	 * Now AIO and DIO has drained we flush and (if necessary) invalidate
 846	 * the cached range over the first operation we are about to run.
 847	 */
 848	error = xfs_flush_unmap_range(ip, offset, len);
 849	if (error)
 850		return error;
 851
 852	startoffset_fsb = XFS_B_TO_FSB(mp, offset);
 853	endoffset_fsb = XFS_B_TO_FSBT(mp, offset + len);
 854
 855	/* We can only free complete realtime extents. */
 856	if (xfs_inode_has_bigrtalloc(ip)) {
 857		startoffset_fsb = xfs_fileoff_roundup_rtx(mp, startoffset_fsb);
 858		endoffset_fsb = xfs_fileoff_rounddown_rtx(mp, endoffset_fsb);
 859	}
 860
 861	/*
 862	 * Need to zero the stuff we're not freeing, on disk.
 863	 */
 864	if (endoffset_fsb > startoffset_fsb) {
 865		while (!done) {
 866			error = xfs_unmap_extent(ip, startoffset_fsb,
 867					endoffset_fsb - startoffset_fsb, &done);
 868			if (error)
 869				return error;
 870		}
 871	}
 872
 873	/*
 874	 * Now that we've unmap all full blocks we'll have to zero out any
 875	 * partial block at the beginning and/or end.  xfs_zero_range is smart
 876	 * enough to skip any holes, including those we just created, but we
 877	 * must take care not to zero beyond EOF and enlarge i_size.
 878	 */
 879	if (offset >= XFS_ISIZE(ip))
 880		return 0;
 881	if (offset + len > XFS_ISIZE(ip))
 882		len = XFS_ISIZE(ip) - offset;
 883	error = xfs_zero_range(ip, offset, len, NULL);
 
 884	if (error)
 885		return error;
 886
 887	/*
 888	 * If we zeroed right up to EOF and EOF straddles a page boundary we
 889	 * must make sure that the post-EOF area is also zeroed because the
 890	 * page could be mmap'd and xfs_zero_range doesn't do that for us.
 891	 * Writeback of the eof page will do this, albeit clumsily.
 892	 */
 893	if (offset + len >= XFS_ISIZE(ip) && offset_in_page(offset + len) > 0) {
 894		error = filemap_write_and_wait_range(VFS_I(ip)->i_mapping,
 895				round_down(offset + len, PAGE_SIZE), LLONG_MAX);
 896	}
 897
 898	return error;
 899}
 900
 901static int
 902xfs_prepare_shift(
 903	struct xfs_inode	*ip,
 904	loff_t			offset)
 905{
 906	unsigned int		rounding;
 907	int			error;
 908
 909	/*
 910	 * Trim eofblocks to avoid shifting uninitialized post-eof preallocation
 911	 * into the accessible region of the file.
 912	 */
 913	if (xfs_can_free_eofblocks(ip)) {
 914		error = xfs_free_eofblocks(ip);
 915		if (error)
 916			return error;
 917	}
 918
 919	/*
 920	 * Shift operations must stabilize the start block offset boundary along
 921	 * with the full range of the operation. If we don't, a COW writeback
 922	 * completion could race with an insert, front merge with the start
 923	 * extent (after split) during the shift and corrupt the file. Start
 924	 * with the allocation unit just prior to the start to stabilize the
 925	 * boundary.
 926	 */
 927	rounding = xfs_inode_alloc_unitsize(ip);
 928	offset = rounddown_64(offset, rounding);
 929	if (offset)
 930		offset -= rounding;
 931
 932	/*
 933	 * Writeback and invalidate cache for the remainder of the file as we're
 934	 * about to shift down every extent from offset to EOF.
 935	 */
 936	error = xfs_flush_unmap_range(ip, offset, XFS_ISIZE(ip));
 937	if (error)
 938		return error;
 939
 940	/*
 941	 * Clean out anything hanging around in the cow fork now that
 942	 * we've flushed all the dirty data out to disk to avoid having
 943	 * CoW extents at the wrong offsets.
 944	 */
 945	if (xfs_inode_has_cow_data(ip)) {
 946		error = xfs_reflink_cancel_cow_range(ip, offset, NULLFILEOFF,
 947				true);
 948		if (error)
 949			return error;
 950	}
 951
 952	return 0;
 953}
 954
 955/*
 956 * xfs_collapse_file_space()
 957 *	This routine frees disk space and shift extent for the given file.
 958 *	The first thing we do is to free data blocks in the specified range
 959 *	by calling xfs_free_file_space(). It would also sync dirty data
 960 *	and invalidate page cache over the region on which collapse range
 961 *	is working. And Shift extent records to the left to cover a hole.
 962 * RETURNS:
 963 *	0 on success
 964 *	errno on error
 965 *
 966 */
 967int
 968xfs_collapse_file_space(
 969	struct xfs_inode	*ip,
 970	xfs_off_t		offset,
 971	xfs_off_t		len)
 972{
 973	struct xfs_mount	*mp = ip->i_mount;
 974	struct xfs_trans	*tp;
 975	int			error;
 976	xfs_fileoff_t		next_fsb = XFS_B_TO_FSB(mp, offset + len);
 977	xfs_fileoff_t		shift_fsb = XFS_B_TO_FSB(mp, len);
 978	bool			done = false;
 979
 980	xfs_assert_ilocked(ip, XFS_IOLOCK_EXCL | XFS_MMAPLOCK_EXCL);
 
 981
 982	trace_xfs_collapse_file_space(ip);
 983
 984	error = xfs_free_file_space(ip, offset, len);
 985	if (error)
 986		return error;
 987
 988	error = xfs_prepare_shift(ip, offset);
 989	if (error)
 990		return error;
 991
 992	error = xfs_trans_alloc(mp, &M_RES(mp)->tr_write, 0, 0, 0, &tp);
 993	if (error)
 994		return error;
 995
 996	xfs_ilock(ip, XFS_ILOCK_EXCL);
 997	xfs_trans_ijoin(tp, ip, 0);
 998
 999	while (!done) {
1000		error = xfs_bmap_collapse_extents(tp, ip, &next_fsb, shift_fsb,
1001				&done);
1002		if (error)
1003			goto out_trans_cancel;
1004		if (done)
1005			break;
1006
1007		/* finish any deferred frees and roll the transaction */
1008		error = xfs_defer_finish(&tp);
1009		if (error)
1010			goto out_trans_cancel;
1011	}
1012
1013	error = xfs_trans_commit(tp);
1014	xfs_iunlock(ip, XFS_ILOCK_EXCL);
1015	return error;
1016
1017out_trans_cancel:
1018	xfs_trans_cancel(tp);
1019	xfs_iunlock(ip, XFS_ILOCK_EXCL);
1020	return error;
1021}
1022
1023/*
1024 * xfs_insert_file_space()
1025 *	This routine create hole space by shifting extents for the given file.
1026 *	The first thing we do is to sync dirty data and invalidate page cache
1027 *	over the region on which insert range is working. And split an extent
1028 *	to two extents at given offset by calling xfs_bmap_split_extent.
1029 *	And shift all extent records which are laying between [offset,
1030 *	last allocated extent] to the right to reserve hole range.
1031 * RETURNS:
1032 *	0 on success
1033 *	errno on error
1034 */
1035int
1036xfs_insert_file_space(
1037	struct xfs_inode	*ip,
1038	loff_t			offset,
1039	loff_t			len)
1040{
1041	struct xfs_mount	*mp = ip->i_mount;
1042	struct xfs_trans	*tp;
1043	int			error;
1044	xfs_fileoff_t		stop_fsb = XFS_B_TO_FSB(mp, offset);
1045	xfs_fileoff_t		next_fsb = NULLFSBLOCK;
1046	xfs_fileoff_t		shift_fsb = XFS_B_TO_FSB(mp, len);
1047	bool			done = false;
1048
1049	xfs_assert_ilocked(ip, XFS_IOLOCK_EXCL | XFS_MMAPLOCK_EXCL);
 
1050
1051	trace_xfs_insert_file_space(ip);
1052
1053	error = xfs_bmap_can_insert_extents(ip, stop_fsb, shift_fsb);
1054	if (error)
1055		return error;
1056
1057	error = xfs_prepare_shift(ip, offset);
1058	if (error)
1059		return error;
1060
1061	error = xfs_trans_alloc(mp, &M_RES(mp)->tr_write,
1062			XFS_DIOSTRAT_SPACE_RES(mp, 0), 0, 0, &tp);
1063	if (error)
1064		return error;
1065
1066	xfs_ilock(ip, XFS_ILOCK_EXCL);
1067	xfs_trans_ijoin(tp, ip, 0);
1068
1069	error = xfs_iext_count_extend(tp, ip, XFS_DATA_FORK,
1070			XFS_IEXT_PUNCH_HOLE_CNT);
1071	if (error)
1072		goto out_trans_cancel;
1073
1074	/*
1075	 * The extent shifting code works on extent granularity. So, if stop_fsb
1076	 * is not the starting block of extent, we need to split the extent at
1077	 * stop_fsb.
1078	 */
1079	error = xfs_bmap_split_extent(tp, ip, stop_fsb);
1080	if (error)
1081		goto out_trans_cancel;
1082
1083	do {
1084		error = xfs_defer_finish(&tp);
1085		if (error)
1086			goto out_trans_cancel;
1087
1088		error = xfs_bmap_insert_extents(tp, ip, &next_fsb, shift_fsb,
1089				&done, stop_fsb);
1090		if (error)
1091			goto out_trans_cancel;
1092	} while (!done);
1093
1094	error = xfs_trans_commit(tp);
1095	xfs_iunlock(ip, XFS_ILOCK_EXCL);
1096	return error;
1097
1098out_trans_cancel:
1099	xfs_trans_cancel(tp);
1100	xfs_iunlock(ip, XFS_ILOCK_EXCL);
1101	return error;
1102}
1103
1104/*
1105 * We need to check that the format of the data fork in the temporary inode is
1106 * valid for the target inode before doing the swap. This is not a problem with
1107 * attr1 because of the fixed fork offset, but attr2 has a dynamically sized
1108 * data fork depending on the space the attribute fork is taking so we can get
1109 * invalid formats on the target inode.
1110 *
1111 * E.g. target has space for 7 extents in extent format, temp inode only has
1112 * space for 6.  If we defragment down to 7 extents, then the tmp format is a
1113 * btree, but when swapped it needs to be in extent format. Hence we can't just
1114 * blindly swap data forks on attr2 filesystems.
1115 *
1116 * Note that we check the swap in both directions so that we don't end up with
1117 * a corrupt temporary inode, either.
1118 *
1119 * Note that fixing the way xfs_fsr sets up the attribute fork in the source
1120 * inode will prevent this situation from occurring, so all we do here is
1121 * reject and log the attempt. basically we are putting the responsibility on
1122 * userspace to get this right.
1123 */
1124static int
1125xfs_swap_extents_check_format(
1126	struct xfs_inode	*ip,	/* target inode */
1127	struct xfs_inode	*tip)	/* tmp inode */
1128{
1129	struct xfs_ifork	*ifp = &ip->i_df;
1130	struct xfs_ifork	*tifp = &tip->i_df;
1131
1132	/* User/group/project quota ids must match if quotas are enforced. */
1133	if (XFS_IS_QUOTA_ON(ip->i_mount) &&
1134	    (!uid_eq(VFS_I(ip)->i_uid, VFS_I(tip)->i_uid) ||
1135	     !gid_eq(VFS_I(ip)->i_gid, VFS_I(tip)->i_gid) ||
1136	     ip->i_projid != tip->i_projid))
1137		return -EINVAL;
1138
1139	/* Should never get a local format */
1140	if (ifp->if_format == XFS_DINODE_FMT_LOCAL ||
1141	    tifp->if_format == XFS_DINODE_FMT_LOCAL)
1142		return -EINVAL;
1143
1144	/*
1145	 * if the target inode has less extents that then temporary inode then
1146	 * why did userspace call us?
1147	 */
1148	if (ifp->if_nextents < tifp->if_nextents)
1149		return -EINVAL;
1150
1151	/*
1152	 * If we have to use the (expensive) rmap swap method, we can
1153	 * handle any number of extents and any format.
1154	 */
1155	if (xfs_has_rmapbt(ip->i_mount))
1156		return 0;
1157
1158	/*
1159	 * if the target inode is in extent form and the temp inode is in btree
1160	 * form then we will end up with the target inode in the wrong format
1161	 * as we already know there are less extents in the temp inode.
1162	 */
1163	if (ifp->if_format == XFS_DINODE_FMT_EXTENTS &&
1164	    tifp->if_format == XFS_DINODE_FMT_BTREE)
1165		return -EINVAL;
1166
1167	/* Check temp in extent form to max in target */
1168	if (tifp->if_format == XFS_DINODE_FMT_EXTENTS &&
1169	    tifp->if_nextents > XFS_IFORK_MAXEXT(ip, XFS_DATA_FORK))
1170		return -EINVAL;
1171
1172	/* Check target in extent form to max in temp */
1173	if (ifp->if_format == XFS_DINODE_FMT_EXTENTS &&
1174	    ifp->if_nextents > XFS_IFORK_MAXEXT(tip, XFS_DATA_FORK))
1175		return -EINVAL;
1176
1177	/*
1178	 * If we are in a btree format, check that the temp root block will fit
1179	 * in the target and that it has enough extents to be in btree format
1180	 * in the target.
1181	 *
1182	 * Note that we have to be careful to allow btree->extent conversions
1183	 * (a common defrag case) which will occur when the temp inode is in
1184	 * extent format...
1185	 */
1186	if (tifp->if_format == XFS_DINODE_FMT_BTREE) {
1187		if (xfs_inode_has_attr_fork(ip) &&
1188		    xfs_bmap_bmdr_space(tifp->if_broot) > xfs_inode_fork_boff(ip))
1189			return -EINVAL;
1190		if (tifp->if_nextents <= XFS_IFORK_MAXEXT(ip, XFS_DATA_FORK))
1191			return -EINVAL;
1192	}
1193
1194	/* Reciprocal target->temp btree format checks */
1195	if (ifp->if_format == XFS_DINODE_FMT_BTREE) {
1196		if (xfs_inode_has_attr_fork(tip) &&
1197		    xfs_bmap_bmdr_space(ip->i_df.if_broot) > xfs_inode_fork_boff(tip))
1198			return -EINVAL;
1199		if (ifp->if_nextents <= XFS_IFORK_MAXEXT(tip, XFS_DATA_FORK))
1200			return -EINVAL;
1201	}
1202
1203	return 0;
1204}
1205
1206static int
1207xfs_swap_extent_flush(
1208	struct xfs_inode	*ip)
1209{
1210	int	error;
1211
1212	error = filemap_write_and_wait(VFS_I(ip)->i_mapping);
1213	if (error)
1214		return error;
1215	truncate_pagecache_range(VFS_I(ip), 0, -1);
1216
1217	/* Verify O_DIRECT for ftmp */
1218	if (VFS_I(ip)->i_mapping->nrpages)
1219		return -EINVAL;
1220	return 0;
1221}
1222
1223/*
1224 * Move extents from one file to another, when rmap is enabled.
1225 */
1226STATIC int
1227xfs_swap_extent_rmap(
1228	struct xfs_trans		**tpp,
1229	struct xfs_inode		*ip,
1230	struct xfs_inode		*tip)
1231{
1232	struct xfs_trans		*tp = *tpp;
1233	struct xfs_bmbt_irec		irec;
1234	struct xfs_bmbt_irec		uirec;
1235	struct xfs_bmbt_irec		tirec;
1236	xfs_fileoff_t			offset_fsb;
1237	xfs_fileoff_t			end_fsb;
1238	xfs_filblks_t			count_fsb;
1239	int				error;
1240	xfs_filblks_t			ilen;
1241	xfs_filblks_t			rlen;
1242	int				nimaps;
1243	uint64_t			tip_flags2;
1244
1245	/*
1246	 * If the source file has shared blocks, we must flag the donor
1247	 * file as having shared blocks so that we get the shared-block
1248	 * rmap functions when we go to fix up the rmaps.  The flags
1249	 * will be switch for reals later.
1250	 */
1251	tip_flags2 = tip->i_diflags2;
1252	if (ip->i_diflags2 & XFS_DIFLAG2_REFLINK)
1253		tip->i_diflags2 |= XFS_DIFLAG2_REFLINK;
1254
1255	offset_fsb = 0;
1256	end_fsb = XFS_B_TO_FSB(ip->i_mount, i_size_read(VFS_I(ip)));
1257	count_fsb = (xfs_filblks_t)(end_fsb - offset_fsb);
1258
1259	while (count_fsb) {
1260		/* Read extent from the donor file */
1261		nimaps = 1;
1262		error = xfs_bmapi_read(tip, offset_fsb, count_fsb, &tirec,
1263				&nimaps, 0);
1264		if (error)
1265			goto out;
1266		ASSERT(nimaps == 1);
1267		ASSERT(tirec.br_startblock != DELAYSTARTBLOCK);
1268
1269		trace_xfs_swap_extent_rmap_remap(tip, &tirec);
1270		ilen = tirec.br_blockcount;
1271
1272		/* Unmap the old blocks in the source file. */
1273		while (tirec.br_blockcount) {
1274			ASSERT(tp->t_highest_agno == NULLAGNUMBER);
1275			trace_xfs_swap_extent_rmap_remap_piece(tip, &tirec);
1276
1277			/* Read extent from the source file */
1278			nimaps = 1;
1279			error = xfs_bmapi_read(ip, tirec.br_startoff,
1280					tirec.br_blockcount, &irec,
1281					&nimaps, 0);
1282			if (error)
1283				goto out;
1284			ASSERT(nimaps == 1);
1285			ASSERT(tirec.br_startoff == irec.br_startoff);
1286			trace_xfs_swap_extent_rmap_remap_piece(ip, &irec);
1287
1288			/* Trim the extent. */
1289			uirec = tirec;
1290			uirec.br_blockcount = rlen = min_t(xfs_filblks_t,
1291					tirec.br_blockcount,
1292					irec.br_blockcount);
1293			trace_xfs_swap_extent_rmap_remap_piece(tip, &uirec);
1294
1295			if (xfs_bmap_is_real_extent(&uirec)) {
1296				error = xfs_iext_count_extend(tp, ip,
1297						XFS_DATA_FORK,
1298						XFS_IEXT_SWAP_RMAP_CNT);
1299				if (error)
1300					goto out;
1301			}
1302
1303			if (xfs_bmap_is_real_extent(&irec)) {
1304				error = xfs_iext_count_extend(tp, tip,
1305						XFS_DATA_FORK,
1306						XFS_IEXT_SWAP_RMAP_CNT);
1307				if (error)
1308					goto out;
1309			}
1310
1311			/* Remove the mapping from the donor file. */
1312			xfs_bmap_unmap_extent(tp, tip, XFS_DATA_FORK, &uirec);
1313
1314			/* Remove the mapping from the source file. */
1315			xfs_bmap_unmap_extent(tp, ip, XFS_DATA_FORK, &irec);
1316
1317			/* Map the donor file's blocks into the source file. */
1318			xfs_bmap_map_extent(tp, ip, XFS_DATA_FORK, &uirec);
1319
1320			/* Map the source file's blocks into the donor file. */
1321			xfs_bmap_map_extent(tp, tip, XFS_DATA_FORK, &irec);
1322
1323			error = xfs_defer_finish(tpp);
1324			tp = *tpp;
1325			if (error)
1326				goto out;
1327
1328			tirec.br_startoff += rlen;
1329			if (tirec.br_startblock != HOLESTARTBLOCK &&
1330			    tirec.br_startblock != DELAYSTARTBLOCK)
1331				tirec.br_startblock += rlen;
1332			tirec.br_blockcount -= rlen;
1333		}
1334
1335		/* Roll on... */
1336		count_fsb -= ilen;
1337		offset_fsb += ilen;
1338	}
1339
1340	tip->i_diflags2 = tip_flags2;
1341	return 0;
1342
1343out:
1344	trace_xfs_swap_extent_rmap_error(ip, error, _RET_IP_);
1345	tip->i_diflags2 = tip_flags2;
1346	return error;
1347}
1348
1349/* Swap the extents of two files by swapping data forks. */
1350STATIC int
1351xfs_swap_extent_forks(
1352	struct xfs_trans	*tp,
1353	struct xfs_inode	*ip,
1354	struct xfs_inode	*tip,
1355	int			*src_log_flags,
1356	int			*target_log_flags)
1357{
1358	xfs_filblks_t		aforkblks = 0;
1359	xfs_filblks_t		taforkblks = 0;
1360	xfs_extnum_t		junk;
1361	uint64_t		tmp;
1362	int			error;
1363
1364	/*
1365	 * Count the number of extended attribute blocks
1366	 */
1367	if (xfs_inode_has_attr_fork(ip) && ip->i_af.if_nextents > 0 &&
1368	    ip->i_af.if_format != XFS_DINODE_FMT_LOCAL) {
1369		error = xfs_bmap_count_blocks(tp, ip, XFS_ATTR_FORK, &junk,
1370				&aforkblks);
1371		if (error)
1372			return error;
1373	}
1374	if (xfs_inode_has_attr_fork(tip) && tip->i_af.if_nextents > 0 &&
1375	    tip->i_af.if_format != XFS_DINODE_FMT_LOCAL) {
1376		error = xfs_bmap_count_blocks(tp, tip, XFS_ATTR_FORK, &junk,
1377				&taforkblks);
1378		if (error)
1379			return error;
1380	}
1381
1382	/*
1383	 * Btree format (v3) inodes have the inode number stamped in the bmbt
1384	 * block headers. We can't start changing the bmbt blocks until the
1385	 * inode owner change is logged so recovery does the right thing in the
1386	 * event of a crash. Set the owner change log flags now and leave the
1387	 * bmbt scan as the last step.
1388	 */
1389	if (xfs_has_v3inodes(ip->i_mount)) {
1390		if (ip->i_df.if_format == XFS_DINODE_FMT_BTREE)
1391			(*target_log_flags) |= XFS_ILOG_DOWNER;
1392		if (tip->i_df.if_format == XFS_DINODE_FMT_BTREE)
1393			(*src_log_flags) |= XFS_ILOG_DOWNER;
1394	}
1395
1396	/*
1397	 * Swap the data forks of the inodes
1398	 */
1399	swap(ip->i_df, tip->i_df);
1400
1401	/*
1402	 * Fix the on-disk inode values
1403	 */
1404	tmp = (uint64_t)ip->i_nblocks;
1405	ip->i_nblocks = tip->i_nblocks - taforkblks + aforkblks;
1406	tip->i_nblocks = tmp + taforkblks - aforkblks;
1407
1408	/*
1409	 * The extents in the source inode could still contain speculative
1410	 * preallocation beyond EOF (e.g. the file is open but not modified
1411	 * while defrag is in progress). In that case, we need to copy over the
1412	 * number of delalloc blocks the data fork in the source inode is
1413	 * tracking beyond EOF so that when the fork is truncated away when the
1414	 * temporary inode is unlinked we don't underrun the i_delayed_blks
1415	 * counter on that inode.
1416	 */
1417	ASSERT(tip->i_delayed_blks == 0);
1418	tip->i_delayed_blks = ip->i_delayed_blks;
1419	ip->i_delayed_blks = 0;
1420
1421	switch (ip->i_df.if_format) {
1422	case XFS_DINODE_FMT_EXTENTS:
1423		(*src_log_flags) |= XFS_ILOG_DEXT;
1424		break;
1425	case XFS_DINODE_FMT_BTREE:
1426		ASSERT(!xfs_has_v3inodes(ip->i_mount) ||
1427		       (*src_log_flags & XFS_ILOG_DOWNER));
1428		(*src_log_flags) |= XFS_ILOG_DBROOT;
1429		break;
1430	}
1431
1432	switch (tip->i_df.if_format) {
1433	case XFS_DINODE_FMT_EXTENTS:
1434		(*target_log_flags) |= XFS_ILOG_DEXT;
1435		break;
1436	case XFS_DINODE_FMT_BTREE:
1437		(*target_log_flags) |= XFS_ILOG_DBROOT;
1438		ASSERT(!xfs_has_v3inodes(ip->i_mount) ||
1439		       (*target_log_flags & XFS_ILOG_DOWNER));
1440		break;
1441	}
1442
1443	return 0;
1444}
1445
1446/*
1447 * Fix up the owners of the bmbt blocks to refer to the current inode. The
1448 * change owner scan attempts to order all modified buffers in the current
1449 * transaction. In the event of ordered buffer failure, the offending buffer is
1450 * physically logged as a fallback and the scan returns -EAGAIN. We must roll
1451 * the transaction in this case to replenish the fallback log reservation and
1452 * restart the scan. This process repeats until the scan completes.
1453 */
1454static int
1455xfs_swap_change_owner(
1456	struct xfs_trans	**tpp,
1457	struct xfs_inode	*ip,
1458	struct xfs_inode	*tmpip)
1459{
1460	int			error;
1461	struct xfs_trans	*tp = *tpp;
1462
1463	do {
1464		error = xfs_bmbt_change_owner(tp, ip, XFS_DATA_FORK, ip->i_ino,
1465					      NULL);
1466		/* success or fatal error */
1467		if (error != -EAGAIN)
1468			break;
1469
1470		error = xfs_trans_roll(tpp);
1471		if (error)
1472			break;
1473		tp = *tpp;
1474
1475		/*
1476		 * Redirty both inodes so they can relog and keep the log tail
1477		 * moving forward.
1478		 */
1479		xfs_trans_ijoin(tp, ip, 0);
1480		xfs_trans_ijoin(tp, tmpip, 0);
1481		xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
1482		xfs_trans_log_inode(tp, tmpip, XFS_ILOG_CORE);
1483	} while (true);
1484
1485	return error;
1486}
1487
1488int
1489xfs_swap_extents(
1490	struct xfs_inode	*ip,	/* target inode */
1491	struct xfs_inode	*tip,	/* tmp inode */
1492	struct xfs_swapext	*sxp)
1493{
1494	struct xfs_mount	*mp = ip->i_mount;
1495	struct xfs_trans	*tp;
1496	struct xfs_bstat	*sbp = &sxp->sx_stat;
1497	int			src_log_flags, target_log_flags;
1498	int			error = 0;
 
1499	uint64_t		f;
1500	int			resblks = 0;
1501	unsigned int		flags = 0;
1502	struct timespec64	ctime, mtime;
1503
1504	/*
1505	 * Lock the inodes against other IO, page faults and truncate to
1506	 * begin with.  Then we can ensure the inodes are flushed and have no
1507	 * page cache safely. Once we have done this we can take the ilocks and
1508	 * do the rest of the checks.
1509	 */
1510	lock_two_nondirectories(VFS_I(ip), VFS_I(tip));
1511	filemap_invalidate_lock_two(VFS_I(ip)->i_mapping,
1512				    VFS_I(tip)->i_mapping);
1513
1514	/* Verify that both files have the same format */
1515	if ((VFS_I(ip)->i_mode & S_IFMT) != (VFS_I(tip)->i_mode & S_IFMT)) {
1516		error = -EINVAL;
1517		goto out_unlock;
1518	}
1519
1520	/* Verify both files are either real-time or non-realtime */
1521	if (XFS_IS_REALTIME_INODE(ip) != XFS_IS_REALTIME_INODE(tip)) {
1522		error = -EINVAL;
1523		goto out_unlock;
1524	}
1525
1526	/*
1527	 * The rmapbt implementation is unable to resume a swapext operation
1528	 * after a crash if the allocation unit size is larger than a block.
1529	 * This (deprecated) interface will not be upgraded to handle this
1530	 * situation.  Defragmentation must be performed with the commit range
1531	 * ioctl.
1532	 */
1533	if (XFS_IS_REALTIME_INODE(ip) && xfs_has_rtgroups(ip->i_mount)) {
1534		error = -EOPNOTSUPP;
1535		goto out_unlock;
1536	}
1537
1538	error = xfs_qm_dqattach(ip);
1539	if (error)
1540		goto out_unlock;
1541
1542	error = xfs_qm_dqattach(tip);
1543	if (error)
1544		goto out_unlock;
1545
1546	error = xfs_swap_extent_flush(ip);
1547	if (error)
1548		goto out_unlock;
1549	error = xfs_swap_extent_flush(tip);
1550	if (error)
1551		goto out_unlock;
1552
1553	if (xfs_inode_has_cow_data(tip)) {
1554		error = xfs_reflink_cancel_cow_range(tip, 0, NULLFILEOFF, true);
1555		if (error)
1556			goto out_unlock;
1557	}
1558
1559	/*
1560	 * Extent "swapping" with rmap requires a permanent reservation and
1561	 * a block reservation because it's really just a remap operation
1562	 * performed with log redo items!
1563	 */
1564	if (xfs_has_rmapbt(mp)) {
1565		int		w = XFS_DATA_FORK;
1566		uint32_t	ipnext = ip->i_df.if_nextents;
1567		uint32_t	tipnext	= tip->i_df.if_nextents;
1568
1569		/*
1570		 * Conceptually this shouldn't affect the shape of either bmbt,
1571		 * but since we atomically move extents one by one, we reserve
1572		 * enough space to rebuild both trees.
1573		 */
1574		resblks = XFS_SWAP_RMAP_SPACE_RES(mp, ipnext, w);
1575		resblks +=  XFS_SWAP_RMAP_SPACE_RES(mp, tipnext, w);
1576
1577		/*
1578		 * If either inode straddles a bmapbt block allocation boundary,
1579		 * the rmapbt algorithm triggers repeated allocs and frees as
1580		 * extents are remapped. This can exhaust the block reservation
1581		 * prematurely and cause shutdown. Return freed blocks to the
1582		 * transaction reservation to counter this behavior.
1583		 */
1584		flags |= XFS_TRANS_RES_FDBLKS;
1585	}
1586	error = xfs_trans_alloc(mp, &M_RES(mp)->tr_write, resblks, 0, flags,
1587				&tp);
1588	if (error)
1589		goto out_unlock;
1590
1591	/*
1592	 * Lock and join the inodes to the tansaction so that transaction commit
1593	 * or cancel will unlock the inodes from this point onwards.
1594	 */
1595	xfs_lock_two_inodes(ip, XFS_ILOCK_EXCL, tip, XFS_ILOCK_EXCL);
 
1596	xfs_trans_ijoin(tp, ip, 0);
1597	xfs_trans_ijoin(tp, tip, 0);
1598
1599
1600	/* Verify all data are being swapped */
1601	if (sxp->sx_offset != 0 ||
1602	    sxp->sx_length != ip->i_disk_size ||
1603	    sxp->sx_length != tip->i_disk_size) {
1604		error = -EFAULT;
1605		goto out_trans_cancel;
1606	}
1607
1608	trace_xfs_swap_extent_before(ip, 0);
1609	trace_xfs_swap_extent_before(tip, 1);
1610
1611	/* check inode formats now that data is flushed */
1612	error = xfs_swap_extents_check_format(ip, tip);
1613	if (error) {
1614		xfs_notice(mp,
1615		    "%s: inode 0x%llx format is incompatible for exchanging.",
1616				__func__, ip->i_ino);
1617		goto out_trans_cancel;
1618	}
1619
1620	/*
1621	 * Compare the current change & modify times with that
1622	 * passed in.  If they differ, we abort this swap.
1623	 * This is the mechanism used to ensure the calling
1624	 * process that the file was not changed out from
1625	 * under it.
1626	 */
1627	ctime = inode_get_ctime(VFS_I(ip));
1628	mtime = inode_get_mtime(VFS_I(ip));
1629	if ((sbp->bs_ctime.tv_sec != ctime.tv_sec) ||
1630	    (sbp->bs_ctime.tv_nsec != ctime.tv_nsec) ||
1631	    (sbp->bs_mtime.tv_sec != mtime.tv_sec) ||
1632	    (sbp->bs_mtime.tv_nsec != mtime.tv_nsec)) {
1633		error = -EBUSY;
1634		goto out_trans_cancel;
1635	}
1636
1637	/*
1638	 * Note the trickiness in setting the log flags - we set the owner log
1639	 * flag on the opposite inode (i.e. the inode we are setting the new
1640	 * owner to be) because once we swap the forks and log that, log
1641	 * recovery is going to see the fork as owned by the swapped inode,
1642	 * not the pre-swapped inodes.
1643	 */
1644	src_log_flags = XFS_ILOG_CORE;
1645	target_log_flags = XFS_ILOG_CORE;
1646
1647	if (xfs_has_rmapbt(mp))
1648		error = xfs_swap_extent_rmap(&tp, ip, tip);
1649	else
1650		error = xfs_swap_extent_forks(tp, ip, tip, &src_log_flags,
1651				&target_log_flags);
1652	if (error)
1653		goto out_trans_cancel;
1654
1655	/* Do we have to swap reflink flags? */
1656	if ((ip->i_diflags2 & XFS_DIFLAG2_REFLINK) ^
1657	    (tip->i_diflags2 & XFS_DIFLAG2_REFLINK)) {
1658		f = ip->i_diflags2 & XFS_DIFLAG2_REFLINK;
1659		ip->i_diflags2 &= ~XFS_DIFLAG2_REFLINK;
1660		ip->i_diflags2 |= tip->i_diflags2 & XFS_DIFLAG2_REFLINK;
1661		tip->i_diflags2 &= ~XFS_DIFLAG2_REFLINK;
1662		tip->i_diflags2 |= f & XFS_DIFLAG2_REFLINK;
1663	}
1664
1665	/* Swap the cow forks. */
1666	if (xfs_has_reflink(mp)) {
1667		ASSERT(!ip->i_cowfp ||
1668		       ip->i_cowfp->if_format == XFS_DINODE_FMT_EXTENTS);
1669		ASSERT(!tip->i_cowfp ||
1670		       tip->i_cowfp->if_format == XFS_DINODE_FMT_EXTENTS);
1671
1672		swap(ip->i_cowfp, tip->i_cowfp);
1673
1674		if (ip->i_cowfp && ip->i_cowfp->if_bytes)
1675			xfs_inode_set_cowblocks_tag(ip);
1676		else
1677			xfs_inode_clear_cowblocks_tag(ip);
1678		if (tip->i_cowfp && tip->i_cowfp->if_bytes)
1679			xfs_inode_set_cowblocks_tag(tip);
1680		else
1681			xfs_inode_clear_cowblocks_tag(tip);
1682	}
1683
1684	xfs_trans_log_inode(tp, ip,  src_log_flags);
1685	xfs_trans_log_inode(tp, tip, target_log_flags);
1686
1687	/*
1688	 * The extent forks have been swapped, but crc=1,rmapbt=0 filesystems
1689	 * have inode number owner values in the bmbt blocks that still refer to
1690	 * the old inode. Scan each bmbt to fix up the owner values with the
1691	 * inode number of the current inode.
1692	 */
1693	if (src_log_flags & XFS_ILOG_DOWNER) {
1694		error = xfs_swap_change_owner(&tp, ip, tip);
1695		if (error)
1696			goto out_trans_cancel;
1697	}
1698	if (target_log_flags & XFS_ILOG_DOWNER) {
1699		error = xfs_swap_change_owner(&tp, tip, ip);
1700		if (error)
1701			goto out_trans_cancel;
1702	}
1703
1704	/*
1705	 * If this is a synchronous mount, make sure that the
1706	 * transaction goes to disk before returning to the user.
1707	 */
1708	if (xfs_has_wsync(mp))
1709		xfs_trans_set_sync(tp);
1710
1711	error = xfs_trans_commit(tp);
1712
1713	trace_xfs_swap_extent_after(ip, 0);
1714	trace_xfs_swap_extent_after(tip, 1);
1715
1716out_unlock_ilock:
1717	xfs_iunlock(ip, XFS_ILOCK_EXCL);
1718	xfs_iunlock(tip, XFS_ILOCK_EXCL);
1719out_unlock:
1720	filemap_invalidate_unlock_two(VFS_I(ip)->i_mapping,
1721				      VFS_I(tip)->i_mapping);
1722	unlock_two_nondirectories(VFS_I(ip), VFS_I(tip));
1723	return error;
1724
1725out_trans_cancel:
1726	xfs_trans_cancel(tp);
1727	goto out_unlock_ilock;
1728}