Linux Audio

Check our new training course

Loading...
v3.15
 
   1/*
   2 * Copyright (c) 2000-2006 Silicon Graphics, Inc.
   3 * Copyright (c) 2012 Red Hat, Inc.
   4 * All Rights Reserved.
   5 *
   6 * This program is free software; you can redistribute it and/or
   7 * modify it under the terms of the GNU General Public License as
   8 * published by the Free Software Foundation.
   9 *
  10 * This program is distributed in the hope that it would be useful,
  11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
  12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  13 * GNU General Public License for more details.
  14 *
  15 * You should have received a copy of the GNU General Public License
  16 * along with this program; if not, write the Free Software Foundation,
  17 * Inc.,  51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
  18 */
  19#include "xfs.h"
  20#include "xfs_fs.h"
  21#include "xfs_shared.h"
  22#include "xfs_format.h"
  23#include "xfs_log_format.h"
  24#include "xfs_trans_resv.h"
  25#include "xfs_bit.h"
  26#include "xfs_sb.h"
  27#include "xfs_ag.h"
  28#include "xfs_mount.h"
  29#include "xfs_da_format.h"
  30#include "xfs_inode.h"
  31#include "xfs_btree.h"
  32#include "xfs_trans.h"
  33#include "xfs_extfree_item.h"
  34#include "xfs_alloc.h"
  35#include "xfs_bmap.h"
  36#include "xfs_bmap_util.h"
  37#include "xfs_bmap_btree.h"
  38#include "xfs_rtalloc.h"
  39#include "xfs_error.h"
  40#include "xfs_quota.h"
  41#include "xfs_trans_space.h"
  42#include "xfs_trace.h"
  43#include "xfs_icache.h"
  44#include "xfs_log.h"
  45#include "xfs_dinode.h"
  46
  47/* Kernel only BMAP related definitions and functions */
  48
  49/*
  50 * Convert the given file system block to a disk block.  We have to treat it
  51 * differently based on whether the file is a real time file or not, because the
  52 * bmap code does.
  53 */
  54xfs_daddr_t
  55xfs_fsb_to_db(struct xfs_inode *ip, xfs_fsblock_t fsb)
  56{
  57	return (XFS_IS_REALTIME_INODE(ip) ? \
  58		 (xfs_daddr_t)XFS_FSB_TO_BB((ip)->i_mount, (fsb)) : \
  59		 XFS_FSB_TO_DADDR((ip)->i_mount, (fsb)));
  60}
  61
  62/*
  63 * Routine to be called at transaction's end by xfs_bmapi, xfs_bunmapi
  64 * caller.  Frees all the extents that need freeing, which must be done
  65 * last due to locking considerations.  We never free any extents in
  66 * the first transaction.
  67 *
  68 * Return 1 if the given transaction was committed and a new one
  69 * started, and 0 otherwise in the committed parameter.
 
  70 */
  71int						/* error */
  72xfs_bmap_finish(
  73	xfs_trans_t		**tp,		/* transaction pointer addr */
  74	xfs_bmap_free_t		*flist,		/* i/o: list extents to free */
  75	int			*committed)	/* xact committed or not */
  76{
  77	xfs_efd_log_item_t	*efd;		/* extent free data */
  78	xfs_efi_log_item_t	*efi;		/* extent free intention */
  79	int			error;		/* error return value */
  80	xfs_bmap_free_item_t	*free;		/* free extent item */
  81	struct xfs_trans_res	tres;		/* new log reservation */
  82	xfs_mount_t		*mp;		/* filesystem mount structure */
  83	xfs_bmap_free_item_t	*next;		/* next item on free list */
  84	xfs_trans_t		*ntp;		/* new transaction pointer */
  85
  86	ASSERT((*tp)->t_flags & XFS_TRANS_PERM_LOG_RES);
  87	if (flist->xbf_count == 0) {
  88		*committed = 0;
  89		return 0;
  90	}
  91	ntp = *tp;
  92	efi = xfs_trans_get_efi(ntp, flist->xbf_count);
  93	for (free = flist->xbf_first; free; free = free->xbfi_next)
  94		xfs_trans_log_efi_extent(ntp, efi, free->xbfi_startblock,
  95			free->xbfi_blockcount);
  96
  97	tres.tr_logres = ntp->t_log_res;
  98	tres.tr_logcount = ntp->t_log_count;
  99	tres.tr_logflags = XFS_TRANS_PERM_LOG_RES;
 100	ntp = xfs_trans_dup(*tp);
 101	error = xfs_trans_commit(*tp, 0);
 102	*tp = ntp;
 103	*committed = 1;
 104	/*
 105	 * We have a new transaction, so we should return committed=1,
 106	 * even though we're returning an error.
 107	 */
 108	if (error)
 109		return error;
 110
 111	/*
 112	 * transaction commit worked ok so we can drop the extra ticket
 113	 * reference that we gained in xfs_trans_dup()
 114	 */
 115	xfs_log_ticket_put(ntp->t_ticket);
 116
 117	error = xfs_trans_reserve(ntp, &tres, 0, 0);
 118	if (error)
 119		return error;
 120	efd = xfs_trans_get_efd(ntp, efi, flist->xbf_count);
 121	for (free = flist->xbf_first; free != NULL; free = next) {
 122		next = free->xbfi_next;
 123		if ((error = xfs_free_extent(ntp, free->xbfi_startblock,
 124				free->xbfi_blockcount))) {
 125			/*
 126			 * The bmap free list will be cleaned up at a
 127			 * higher level.  The EFI will be canceled when
 128			 * this transaction is aborted.
 129			 * Need to force shutdown here to make sure it
 130			 * happens, since this transaction may not be
 131			 * dirty yet.
 132			 */
 133			mp = ntp->t_mountp;
 134			if (!XFS_FORCED_SHUTDOWN(mp))
 135				xfs_force_shutdown(mp,
 136						   (error == EFSCORRUPTED) ?
 137						   SHUTDOWN_CORRUPT_INCORE :
 138						   SHUTDOWN_META_IO_ERROR);
 139			return error;
 140		}
 141		xfs_trans_log_efd_extent(ntp, efd, free->xbfi_startblock,
 142			free->xbfi_blockcount);
 143		xfs_bmap_del_free(flist, NULL, free);
 144	}
 145	return 0;
 146}
 147
 
 148int
 149xfs_bmap_rtalloc(
 150	struct xfs_bmalloca	*ap)	/* bmap alloc argument struct */
 151{
 152	xfs_alloctype_t	atype = 0;	/* type for allocation routines */
 153	int		error;		/* error return value */
 154	xfs_mount_t	*mp;		/* mount point structure */
 155	xfs_extlen_t	prod = 0;	/* product factor for allocators */
 156	xfs_extlen_t	ralen = 0;	/* realtime allocation length */
 157	xfs_extlen_t	align;		/* minimum allocation alignment */
 158	xfs_rtblock_t	rtb;
 
 
 
 
 
 
 159
 160	mp = ap->ip->i_mount;
 161	align = xfs_get_extsz_hint(ap->ip);
 
 162	prod = align / mp->m_sb.sb_rextsize;
 163	error = xfs_bmap_extsize_align(mp, &ap->got, &ap->prev,
 164					align, 1, ap->eof, 0,
 165					ap->conv, &ap->offset, &ap->length);
 166	if (error)
 167		return error;
 168	ASSERT(ap->length);
 169	ASSERT(ap->length % mp->m_sb.sb_rextsize == 0);
 170
 171	/*
 
 
 
 
 
 
 
 
 
 172	 * If the offset & length are not perfectly aligned
 173	 * then kill prod, it will just get us in trouble.
 174	 */
 175	if (do_mod(ap->offset, align) || ap->length % align)
 
 176		prod = 1;
 177	/*
 178	 * Set ralen to be the actual requested length in rtextents.
 179	 */
 180	ralen = ap->length / mp->m_sb.sb_rextsize;
 181	/*
 182	 * If the old value was close enough to MAXEXTLEN that
 183	 * we rounded up to it, cut it back so it's valid again.
 184	 * Note that if it's a really large request (bigger than
 185	 * MAXEXTLEN), we don't hear about that number, and can't
 186	 * adjust the starting point to match it.
 187	 */
 188	if (ralen * mp->m_sb.sb_rextsize >= MAXEXTLEN)
 189		ralen = MAXEXTLEN / mp->m_sb.sb_rextsize;
 190
 191	/*
 192	 * Lock out other modifications to the RT bitmap inode.
 193	 */
 194	xfs_ilock(mp->m_rbmip, XFS_ILOCK_EXCL);
 195	xfs_trans_ijoin(ap->tp, mp->m_rbmip, XFS_ILOCK_EXCL);
 
 
 
 
 
 196
 197	/*
 198	 * If it's an allocation to an empty file at offset 0,
 199	 * pick an extent that will space things out in the rt area.
 200	 */
 201	if (ap->eof && ap->offset == 0) {
 202		xfs_rtblock_t uninitialized_var(rtx); /* realtime extent no */
 203
 204		error = xfs_rtpick_extent(mp, ap->tp, ralen, &rtx);
 205		if (error)
 206			return error;
 207		ap->blkno = rtx * mp->m_sb.sb_rextsize;
 208	} else {
 209		ap->blkno = 0;
 210	}
 211
 212	xfs_bmap_adjacent(ap);
 213
 214	/*
 215	 * Realtime allocation, done through xfs_rtallocate_extent.
 216	 */
 217	atype = ap->blkno == 0 ?  XFS_ALLOCTYPE_ANY_AG : XFS_ALLOCTYPE_NEAR_BNO;
 218	do_div(ap->blkno, mp->m_sb.sb_rextsize);
 
 
 219	rtb = ap->blkno;
 220	ap->length = ralen;
 221	if ((error = xfs_rtallocate_extent(ap->tp, ap->blkno, 1, ap->length,
 222				&ralen, atype, ap->wasdel, prod, &rtb)))
 223		return error;
 224	if (rtb == NULLFSBLOCK && prod > 1 &&
 225	    (error = xfs_rtallocate_extent(ap->tp, ap->blkno, 1,
 226					   ap->length, &ralen, atype,
 227					   ap->wasdel, 1, &rtb)))
 228		return error;
 229	ap->blkno = rtb;
 230	if (ap->blkno != NULLFSBLOCK) {
 231		ap->blkno *= mp->m_sb.sb_rextsize;
 232		ralen *= mp->m_sb.sb_rextsize;
 233		ap->length = ralen;
 234		ap->ip->i_d.di_nblocks += ralen;
 235		xfs_trans_log_inode(ap->tp, ap->ip, XFS_ILOG_CORE);
 236		if (ap->wasdel)
 237			ap->ip->i_delayed_blks -= ralen;
 238		/*
 239		 * Adjust the disk quota also. This was reserved
 240		 * earlier.
 241		 */
 242		xfs_trans_mod_dquot_byino(ap->tp, ap->ip,
 243			ap->wasdel ? XFS_TRANS_DQ_DELRTBCOUNT :
 244					XFS_TRANS_DQ_RTBCOUNT, (long) ralen);
 245	} else {
 246		ap->length = 0;
 247	}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 248	return 0;
 249}
 
 250
 251/*
 252 * Stack switching interfaces for allocation
 253 */
 254static void
 255xfs_bmapi_allocate_worker(
 256	struct work_struct	*work)
 257{
 258	struct xfs_bmalloca	*args = container_of(work,
 259						struct xfs_bmalloca, work);
 260	unsigned long		pflags;
 261
 262	/* we are in a transaction context here */
 263	current_set_flags_nested(&pflags, PF_FSTRANS);
 264
 265	args->result = __xfs_bmapi_allocate(args);
 266	complete(args->done);
 267
 268	current_restore_flags_nested(&pflags, PF_FSTRANS);
 269}
 270
 271/*
 272 * Some allocation requests often come in with little stack to work on. Push
 273 * them off to a worker thread so there is lots of stack to use. Otherwise just
 274 * call directly to avoid the context switch overhead here.
 275 */
 276int
 277xfs_bmapi_allocate(
 278	struct xfs_bmalloca	*args)
 
 279{
 280	DECLARE_COMPLETION_ONSTACK(done);
 281
 282	if (!args->stack_switch)
 283		return __xfs_bmapi_allocate(args);
 284
 
 
 
 
 
 
 285
 286	args->done = &done;
 287	INIT_WORK_ONSTACK(&args->work, xfs_bmapi_allocate_worker);
 288	queue_work(xfs_alloc_wq, &args->work);
 289	wait_for_completion(&done);
 290	destroy_work_on_stack(&args->work);
 291	return args->result;
 292}
 293
 294/*
 295 * Check if the endoff is outside the last extent. If so the caller will grow
 296 * the allocation to a stripe unit boundary.  All offsets are considered outside
 297 * the end of file for an empty fork, so 1 is returned in *eof in that case.
 298 */
 299int
 300xfs_bmap_eof(
 
 301	struct xfs_inode	*ip,
 302	xfs_fileoff_t		endoff,
 303	int			whichfork,
 304	int			*eof)
 
 305{
 306	struct xfs_bmbt_irec	rec;
 
 
 
 307	int			error;
 308
 309	error = xfs_bmap_last_extent(NULL, ip, whichfork, &rec, eof);
 310	if (error || *eof)
 311		return error;
 312
 313	*eof = endoff >= rec.br_startoff + rec.br_blockcount;
 314	return 0;
 315}
 316
 317/*
 318 * Extent tree block counting routines.
 319 */
 
 
 320
 321/*
 322 * Count leaf blocks given a range of extent records.
 323 */
 324STATIC void
 325xfs_bmap_count_leaves(
 326	xfs_ifork_t		*ifp,
 327	xfs_extnum_t		idx,
 328	int			numrecs,
 329	int			*count)
 330{
 331	int		b;
 332
 333	for (b = 0; b < numrecs; b++) {
 334		xfs_bmbt_rec_host_t *frp = xfs_iext_get_ext(ifp, idx + b);
 335		*count += xfs_bmbt_get_blockcount(frp);
 336	}
 337}
 338
 339/*
 340 * Count leaf blocks given a range of extent records originally
 341 * in btree format.
 342 */
 343STATIC void
 344xfs_bmap_disk_count_leaves(
 345	struct xfs_mount	*mp,
 346	struct xfs_btree_block	*block,
 347	int			numrecs,
 348	int			*count)
 349{
 350	int		b;
 351	xfs_bmbt_rec_t	*frp;
 352
 353	for (b = 1; b <= numrecs; b++) {
 354		frp = XFS_BMBT_REC_ADDR(mp, block, b);
 355		*count += xfs_bmbt_disk_get_blockcount(frp);
 356	}
 
 
 357}
 358
 359/*
 360 * Recursively walks each level of a btree
 361 * to count total fsblocks in use.
 362 */
 363STATIC int                                     /* error */
 364xfs_bmap_count_tree(
 365	xfs_mount_t     *mp,            /* file system mount point */
 366	xfs_trans_t     *tp,            /* transaction pointer */
 367	xfs_ifork_t	*ifp,		/* inode fork pointer */
 368	xfs_fsblock_t   blockno,	/* file system block number */
 369	int             levelin,	/* level in btree */
 370	int		*count)		/* Count of blocks */
 371{
 
 
 372	int			error;
 373	xfs_buf_t		*bp, *nbp;
 374	int			level = levelin;
 375	__be64			*pp;
 376	xfs_fsblock_t           bno = blockno;
 377	xfs_fsblock_t		nextbno;
 378	struct xfs_btree_block	*block, *nextblock;
 379	int			numrecs;
 380
 381	error = xfs_btree_read_bufl(mp, tp, bno, 0, &bp, XFS_BMAP_BTREE_REF,
 382						&xfs_bmbt_buf_ops);
 383	if (error)
 384		return error;
 385	*count += 1;
 386	block = XFS_BUF_TO_BLOCK(bp);
 387
 388	if (--level) {
 389		/* Not at node above leaves, count this level of nodes */
 390		nextbno = be64_to_cpu(block->bb_u.l.bb_rightsib);
 391		while (nextbno != NULLFSBLOCK) {
 392			error = xfs_btree_read_bufl(mp, tp, nextbno, 0, &nbp,
 393						XFS_BMAP_BTREE_REF,
 394						&xfs_bmbt_buf_ops);
 395			if (error)
 396				return error;
 397			*count += 1;
 398			nextblock = XFS_BUF_TO_BLOCK(nbp);
 399			nextbno = be64_to_cpu(nextblock->bb_u.l.bb_rightsib);
 400			xfs_trans_brelse(tp, nbp);
 401		}
 402
 403		/* Dive to the next level */
 404		pp = XFS_BMBT_PTR_ADDR(mp, block, 1, mp->m_bmap_dmxr[1]);
 405		bno = be64_to_cpu(*pp);
 406		if (unlikely((error =
 407		     xfs_bmap_count_tree(mp, tp, ifp, bno, level, count)) < 0)) {
 408			xfs_trans_brelse(tp, bp);
 409			XFS_ERROR_REPORT("xfs_bmap_count_tree(1)",
 410					 XFS_ERRLEVEL_LOW, mp);
 411			return XFS_ERROR(EFSCORRUPTED);
 412		}
 413		xfs_trans_brelse(tp, bp);
 414	} else {
 415		/* count all level 1 nodes and their leaves */
 416		for (;;) {
 417			nextbno = be64_to_cpu(block->bb_u.l.bb_rightsib);
 418			numrecs = be16_to_cpu(block->bb_numrecs);
 419			xfs_bmap_disk_count_leaves(mp, block, numrecs, count);
 420			xfs_trans_brelse(tp, bp);
 421			if (nextbno == NULLFSBLOCK)
 422				break;
 423			bno = nextbno;
 424			error = xfs_btree_read_bufl(mp, tp, bno, 0, &bp,
 425						XFS_BMAP_BTREE_REF,
 426						&xfs_bmbt_buf_ops);
 427			if (error)
 428				return error;
 429			*count += 1;
 430			block = XFS_BUF_TO_BLOCK(bp);
 431		}
 432	}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 433	return 0;
 434}
 435
 436/*
 437 * Count fsblocks of the given fork.
 438 */
 439int						/* error */
 440xfs_bmap_count_blocks(
 441	xfs_trans_t		*tp,		/* transaction pointer */
 442	xfs_inode_t		*ip,		/* incore inode */
 443	int			whichfork,	/* data or attr fork */
 444	int			*count)		/* out: count of blocks */
 445{
 446	struct xfs_btree_block	*block;	/* current btree block */
 447	xfs_fsblock_t		bno;	/* block # of "block" */
 448	xfs_ifork_t		*ifp;	/* fork structure */
 449	int			level;	/* btree level, for checking */
 450	xfs_mount_t		*mp;	/* file system mount structure */
 451	__be64			*pp;	/* pointer to block address */
 452
 453	bno = NULLFSBLOCK;
 454	mp = ip->i_mount;
 455	ifp = XFS_IFORK_PTR(ip, whichfork);
 456	if ( XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_EXTENTS ) {
 457		xfs_bmap_count_leaves(ifp, 0,
 458			ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t),
 459			count);
 460		return 0;
 461	}
 462
 463	/*
 464	 * Root level must use BMAP_BROOT_PTR_ADDR macro to get ptr out.
 465	 */
 466	block = ifp->if_broot;
 467	level = be16_to_cpu(block->bb_level);
 468	ASSERT(level > 0);
 469	pp = XFS_BMAP_BROOT_PTR_ADDR(mp, block, 1, ifp->if_broot_bytes);
 470	bno = be64_to_cpu(*pp);
 471	ASSERT(bno != NULLDFSBNO);
 472	ASSERT(XFS_FSB_TO_AGNO(mp, bno) < mp->m_sb.sb_agcount);
 473	ASSERT(XFS_FSB_TO_AGBNO(mp, bno) < mp->m_sb.sb_agblocks);
 474
 475	if (unlikely(xfs_bmap_count_tree(mp, tp, ifp, bno, level, count) < 0)) {
 476		XFS_ERROR_REPORT("xfs_bmap_count_blocks(2)", XFS_ERRLEVEL_LOW,
 477				 mp);
 478		return XFS_ERROR(EFSCORRUPTED);
 479	}
 480
 481	return 0;
 
 
 
 
 
 
 482}
 483
 484/*
 485 * returns 1 for success, 0 if we failed to map the extent.
 486 */
 487STATIC int
 488xfs_getbmapx_fix_eof_hole(
 489	xfs_inode_t		*ip,		/* xfs incore inode pointer */
 490	struct getbmapx		*out,		/* output structure */
 491	int			prealloced,	/* this is a file with
 492						 * preallocated data space */
 493	__int64_t		end,		/* last block requested */
 494	xfs_fsblock_t		startblock)
 495{
 496	__int64_t		fixlen;
 497	xfs_mount_t		*mp;		/* file system mount point */
 498	xfs_ifork_t		*ifp;		/* inode fork pointer */
 499	xfs_extnum_t		lastx;		/* last extent pointer */
 500	xfs_fileoff_t		fileblock;
 501
 502	if (startblock == HOLESTARTBLOCK) {
 503		mp = ip->i_mount;
 504		out->bmv_block = -1;
 505		fixlen = XFS_FSB_TO_BB(mp, XFS_B_TO_FSB(mp, XFS_ISIZE(ip)));
 506		fixlen -= out->bmv_offset;
 507		if (prealloced && out->bmv_offset + out->bmv_length == end) {
 508			/* Came to hole at EOF. Trim it. */
 509			if (fixlen <= 0)
 510				return 0;
 511			out->bmv_length = fixlen;
 512		}
 513	} else {
 514		if (startblock == DELAYSTARTBLOCK)
 515			out->bmv_block = -2;
 516		else
 517			out->bmv_block = xfs_fsb_to_db(ip, startblock);
 518		fileblock = XFS_BB_TO_FSB(ip->i_mount, out->bmv_offset);
 519		ifp = XFS_IFORK_PTR(ip, XFS_DATA_FORK);
 520		if (xfs_iext_bno_to_ext(ifp, fileblock, &lastx) &&
 521		   (lastx == (ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t))-1))
 522			out->bmv_oflags |= BMV_OF_LAST;
 523	}
 524
 525	return 1;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 526}
 527
 528/*
 529 * Get inode's extents as described in bmv, and format for output.
 530 * Calls formatter to fill the user's buffer until all extents
 531 * are mapped, until the passed-in bmv->bmv_count slots have
 532 * been filled, or until the formatter short-circuits the loop,
 533 * if it is tracking filled-in extents on its own.
 534 */
 535int						/* error code */
 536xfs_getbmap(
 537	xfs_inode_t		*ip,
 538	struct getbmapx		*bmv,		/* user bmap structure */
 539	xfs_bmap_format_t	formatter,	/* format to user */
 540	void			*arg)		/* formatter arg */
 541{
 542	__int64_t		bmvend;		/* last block requested */
 543	int			error = 0;	/* return value */
 544	__int64_t		fixlen;		/* length for -1 case */
 545	int			i;		/* extent number */
 546	int			lock;		/* lock state */
 547	xfs_bmbt_irec_t		*map;		/* buffer for user's data */
 548	xfs_mount_t		*mp;		/* file system mount point */
 549	int			nex;		/* # of user extents can do */
 550	int			nexleft;	/* # of user extents left */
 551	int			subnex;		/* # of bmapi's can do */
 552	int			nmap;		/* number of map entries */
 553	struct getbmapx		*out;		/* output structure */
 554	int			whichfork;	/* data or attr fork */
 555	int			prealloced;	/* this is a file with
 556						 * preallocated data space */
 557	int			iflags;		/* interface flags */
 558	int			bmapi_flags;	/* flags for xfs_bmapi */
 559	int			cur_ext = 0;
 560
 561	mp = ip->i_mount;
 562	iflags = bmv->bmv_iflags;
 563	whichfork = iflags & BMV_IF_ATTRFORK ? XFS_ATTR_FORK : XFS_DATA_FORK;
 564
 565	if (whichfork == XFS_ATTR_FORK) {
 566		if (XFS_IFORK_Q(ip)) {
 567			if (ip->i_d.di_aformat != XFS_DINODE_FMT_EXTENTS &&
 568			    ip->i_d.di_aformat != XFS_DINODE_FMT_BTREE &&
 569			    ip->i_d.di_aformat != XFS_DINODE_FMT_LOCAL)
 570				return XFS_ERROR(EINVAL);
 571		} else if (unlikely(
 572			   ip->i_d.di_aformat != 0 &&
 573			   ip->i_d.di_aformat != XFS_DINODE_FMT_EXTENTS)) {
 574			XFS_ERROR_REPORT("xfs_getbmap", XFS_ERRLEVEL_LOW,
 575					 ip->i_mount);
 576			return XFS_ERROR(EFSCORRUPTED);
 577		}
 578
 579		prealloced = 0;
 580		fixlen = 1LL << 32;
 581	} else {
 582		if (ip->i_d.di_format != XFS_DINODE_FMT_EXTENTS &&
 583		    ip->i_d.di_format != XFS_DINODE_FMT_BTREE &&
 584		    ip->i_d.di_format != XFS_DINODE_FMT_LOCAL)
 585			return XFS_ERROR(EINVAL);
 586
 587		if (xfs_get_extsz_hint(ip) ||
 588		    ip->i_d.di_flags & (XFS_DIFLAG_PREALLOC|XFS_DIFLAG_APPEND)){
 589			prealloced = 1;
 590			fixlen = mp->m_super->s_maxbytes;
 591		} else {
 592			prealloced = 0;
 593			fixlen = XFS_ISIZE(ip);
 594		}
 595	}
 596
 597	if (bmv->bmv_length == -1) {
 598		fixlen = XFS_FSB_TO_BB(mp, XFS_B_TO_FSB(mp, fixlen));
 599		bmv->bmv_length =
 600			max_t(__int64_t, fixlen - bmv->bmv_offset, 0);
 601	} else if (bmv->bmv_length == 0) {
 602		bmv->bmv_entries = 0;
 603		return 0;
 604	} else if (bmv->bmv_length < 0) {
 605		return XFS_ERROR(EINVAL);
 606	}
 607
 608	nex = bmv->bmv_count - 1;
 609	if (nex <= 0)
 610		return XFS_ERROR(EINVAL);
 611	bmvend = bmv->bmv_offset + bmv->bmv_length;
 
 
 612
 
 
 
 
 
 
 613
 614	if (bmv->bmv_count > ULONG_MAX / sizeof(struct getbmapx))
 615		return XFS_ERROR(ENOMEM);
 616	out = kmem_zalloc_large(bmv->bmv_count * sizeof(struct getbmapx), 0);
 617	if (!out)
 618		return XFS_ERROR(ENOMEM);
 
 
 
 
 619
 620	xfs_ilock(ip, XFS_IOLOCK_SHARED);
 621	if (whichfork == XFS_DATA_FORK) {
 
 
 
 
 622		if (!(iflags & BMV_IF_DELALLOC) &&
 623		    (ip->i_delayed_blks || XFS_ISIZE(ip) > ip->i_d.di_size)) {
 624			error = -filemap_write_and_wait(VFS_I(ip)->i_mapping);
 625			if (error)
 626				goto out_unlock_iolock;
 627
 628			/*
 629			 * Even after flushing the inode, there can still be
 630			 * delalloc blocks on the inode beyond EOF due to
 631			 * speculative preallocation.  These are not removed
 632			 * until the release function is called or the inode
 633			 * is inactivated.  Hence we cannot assert here that
 634			 * ip->i_delayed_blks == 0.
 635			 */
 636		}
 637
 
 
 
 
 
 
 
 638		lock = xfs_ilock_data_map_shared(ip);
 639	} else {
 640		lock = xfs_ilock_attr_map_shared(ip);
 641	}
 642
 643	/*
 644	 * Don't let nex be bigger than the number of extents
 645	 * we can have assuming alternating holes and real extents.
 646	 */
 647	if (nex > XFS_IFORK_NEXTENTS(ip, whichfork) * 2 + 1)
 648		nex = XFS_IFORK_NEXTENTS(ip, whichfork) * 2 + 1;
 649
 650	bmapi_flags = xfs_bmapi_aflag(whichfork);
 651	if (!(iflags & BMV_IF_PREALLOC))
 652		bmapi_flags |= XFS_BMAPI_IGSTATE;
 653
 654	/*
 655	 * Allocate enough space to handle "subnex" maps at a time.
 656	 */
 657	error = ENOMEM;
 658	subnex = 16;
 659	map = kmem_alloc(subnex * sizeof(*map), KM_MAYFAIL | KM_NOFS);
 660	if (!map)
 
 
 661		goto out_unlock_ilock;
 662
 663	bmv->bmv_entries = 0;
 664
 665	if (XFS_IFORK_NEXTENTS(ip, whichfork) == 0 &&
 666	    (whichfork == XFS_ATTR_FORK || !(iflags & BMV_IF_DELALLOC))) {
 667		error = 0;
 668		goto out_free_map;
 669	}
 670
 671	nexleft = nex;
 
 
 
 672
 673	do {
 674		nmap = (nexleft > subnex) ? subnex : nexleft;
 675		error = xfs_bmapi_read(ip, XFS_BB_TO_FSBT(mp, bmv->bmv_offset),
 676				       XFS_BB_TO_FSB(mp, bmv->bmv_length),
 677				       map, &nmap, bmapi_flags);
 678		if (error)
 679			goto out_free_map;
 680		ASSERT(nmap <= subnex);
 681
 682		for (i = 0; i < nmap && nexleft && bmv->bmv_length; i++) {
 683			out[cur_ext].bmv_oflags = 0;
 684			if (map[i].br_state == XFS_EXT_UNWRITTEN)
 685				out[cur_ext].bmv_oflags |= BMV_OF_PREALLOC;
 686			else if (map[i].br_startblock == DELAYSTARTBLOCK)
 687				out[cur_ext].bmv_oflags |= BMV_OF_DELALLOC;
 688			out[cur_ext].bmv_offset =
 689				XFS_FSB_TO_BB(mp, map[i].br_startoff);
 690			out[cur_ext].bmv_length =
 691				XFS_FSB_TO_BB(mp, map[i].br_blockcount);
 692			out[cur_ext].bmv_unused1 = 0;
 693			out[cur_ext].bmv_unused2 = 0;
 694
 695			/*
 696			 * delayed allocation extents that start beyond EOF can
 697			 * occur due to speculative EOF allocation when the
 698			 * delalloc extent is larger than the largest freespace
 699			 * extent at conversion time. These extents cannot be
 700			 * converted by data writeback, so can exist here even
 701			 * if we are not supposed to be finding delalloc
 702			 * extents.
 703			 */
 704			if (map[i].br_startblock == DELAYSTARTBLOCK &&
 705			    map[i].br_startoff <= XFS_B_TO_FSB(mp, XFS_ISIZE(ip)))
 706				ASSERT((iflags & BMV_IF_DELALLOC) != 0);
 707
 708                        if (map[i].br_startblock == HOLESTARTBLOCK &&
 709			    whichfork == XFS_ATTR_FORK) {
 710				/* came to the end of attribute fork */
 711				out[cur_ext].bmv_oflags |= BMV_OF_LAST;
 712				goto out_free_map;
 713			}
 714
 715			if (!xfs_getbmapx_fix_eof_hole(ip, &out[cur_ext],
 716					prealloced, bmvend,
 717					map[i].br_startblock))
 718				goto out_free_map;
 719
 720			bmv->bmv_offset =
 721				out[cur_ext].bmv_offset +
 722				out[cur_ext].bmv_length;
 723			bmv->bmv_length =
 724				max_t(__int64_t, 0, bmvend - bmv->bmv_offset);
 725
 726			/*
 727			 * In case we don't want to return the hole,
 728			 * don't increase cur_ext so that we can reuse
 729			 * it in the next loop.
 730			 */
 731			if ((iflags & BMV_IF_NO_HOLES) &&
 732			    map[i].br_startblock == HOLESTARTBLOCK) {
 733				memset(&out[cur_ext], 0, sizeof(out[cur_ext]));
 734				continue;
 735			}
 736
 737			nexleft--;
 738			bmv->bmv_entries++;
 739			cur_ext++;
 
 
 
 
 
 
 740		}
 741	} while (nmap && nexleft && bmv->bmv_length);
 742
 743 out_free_map:
 744	kmem_free(map);
 745 out_unlock_ilock:
 746	xfs_iunlock(ip, lock);
 747 out_unlock_iolock:
 748	xfs_iunlock(ip, XFS_IOLOCK_SHARED);
 749
 750	for (i = 0; i < cur_ext; i++) {
 751		int full = 0;	/* user array is full */
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 752
 753		/* format results & advance arg */
 754		error = formatter(&arg, &out[i], &full);
 755		if (error || full)
 756			break;
 757	}
 758
 759	kmem_free(out);
 
 
 
 760	return error;
 761}
 762
 763/*
 764 * dead simple method of punching delalyed allocation blocks from a range in
 765 * the inode. Walks a block at a time so will be slow, but is only executed in
 766 * rare error cases so the overhead is not critical. This will always punch out
 767 * both the start and end blocks, even if the ranges only partially overlap
 768 * them, so it is up to the caller to ensure that partial blocks are not
 769 * passed in.
 770 */
 771int
 772xfs_bmap_punch_delalloc_range(
 773	struct xfs_inode	*ip,
 774	xfs_fileoff_t		start_fsb,
 775	xfs_fileoff_t		length)
 776{
 777	xfs_fileoff_t		remaining = length;
 
 
 
 
 
 778	int			error = 0;
 779
 780	ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
 781
 782	do {
 783		int		done;
 784		xfs_bmbt_irec_t	imap;
 785		int		nimaps = 1;
 786		xfs_fsblock_t	firstblock;
 787		xfs_bmap_free_t flist;
 788
 789		/*
 790		 * Map the range first and check that it is a delalloc extent
 791		 * before trying to unmap the range. Otherwise we will be
 792		 * trying to remove a real extent (which requires a
 793		 * transaction) or a hole, which is probably a bad idea...
 794		 */
 795		error = xfs_bmapi_read(ip, start_fsb, 1, &imap, &nimaps,
 796				       XFS_BMAPI_ENTIRE);
 797
 798		if (error) {
 799			/* something screwed, just bail */
 800			if (!XFS_FORCED_SHUTDOWN(ip->i_mount)) {
 801				xfs_alert(ip->i_mount,
 802			"Failed delalloc mapping lookup ino %lld fsb %lld.",
 803						ip->i_ino, start_fsb);
 804			}
 805			break;
 806		}
 807		if (!nimaps) {
 808			/* nothing there */
 809			goto next_block;
 810		}
 811		if (imap.br_startblock != DELAYSTARTBLOCK) {
 812			/* been converted, ignore */
 813			goto next_block;
 814		}
 815		WARN_ON(imap.br_blockcount == 0);
 816
 817		/*
 818		 * Note: while we initialise the firstblock/flist pair, they
 819		 * should never be used because blocks should never be
 820		 * allocated or freed for a delalloc extent and hence we need
 821		 * don't cancel or finish them after the xfs_bunmapi() call.
 822		 */
 823		xfs_bmap_init(&flist, &firstblock);
 824		error = xfs_bunmapi(NULL, ip, start_fsb, 1, 0, 1, &firstblock,
 825					&flist, &done);
 826		if (error)
 827			break;
 
 828
 829		ASSERT(!flist.xbf_count && !flist.xbf_first);
 830next_block:
 831		start_fsb++;
 832		remaining--;
 833	} while(remaining > 0);
 834
 835	return error;
 836}
 837
 838/*
 839 * Test whether it is appropriate to check an inode for and free post EOF
 840 * blocks. The 'force' parameter determines whether we should also consider
 841 * regular files that are marked preallocated or append-only.
 842 */
 843bool
 844xfs_can_free_eofblocks(struct xfs_inode *ip, bool force)
 
 
 845{
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 846	/* prealloc/delalloc exists only on regular files */
 847	if (!S_ISREG(ip->i_d.di_mode))
 848		return false;
 849
 850	/*
 851	 * Zero sized files with no cached pages and delalloc blocks will not
 852	 * have speculative prealloc/delalloc blocks to remove.
 853	 */
 854	if (VFS_I(ip)->i_size == 0 &&
 855	    VN_CACHED(VFS_I(ip)) == 0 &&
 856	    ip->i_delayed_blks == 0)
 857		return false;
 858
 859	/* If we haven't read in the extent list, then don't do it now. */
 860	if (!(ip->i_df.if_flags & XFS_IFEXTENTS))
 861		return false;
 862
 863	/*
 864	 * Do not free real preallocated or append-only files unless the file
 865	 * has delalloc blocks and we are forced to remove them.
 866	 */
 867	if (ip->i_d.di_flags & (XFS_DIFLAG_PREALLOC | XFS_DIFLAG_APPEND))
 868		if (!force || ip->i_delayed_blks == 0)
 869			return false;
 870
 871	return true;
 872}
 873
 874/*
 875 * This is called by xfs_inactive to free any blocks beyond eof
 876 * when the link count isn't zero and by xfs_dm_punch_hole() when
 877 * punching a hole to EOF.
 878 */
 879int
 880xfs_free_eofblocks(
 881	xfs_mount_t	*mp,
 882	xfs_inode_t	*ip,
 883	bool		need_iolock)
 884{
 885	xfs_trans_t	*tp;
 886	int		error;
 887	xfs_fileoff_t	end_fsb;
 888	xfs_fileoff_t	last_fsb;
 889	xfs_filblks_t	map_len;
 890	int		nimaps;
 891	xfs_bmbt_irec_t	imap;
 892
 893	/*
 894	 * Figure out if there are any blocks beyond the end
 895	 * of the file.  If not, then there is nothing to do.
 
 896	 */
 897	end_fsb = XFS_B_TO_FSB(mp, (xfs_ufsize_t)XFS_ISIZE(ip));
 
 
 898	last_fsb = XFS_B_TO_FSB(mp, mp->m_super->s_maxbytes);
 899	if (last_fsb <= end_fsb)
 900		return 0;
 901	map_len = last_fsb - end_fsb;
 902
 903	nimaps = 1;
 
 
 
 904	xfs_ilock(ip, XFS_ILOCK_SHARED);
 905	error = xfs_bmapi_read(ip, end_fsb, map_len, &imap, &nimaps, 0);
 
 906	xfs_iunlock(ip, XFS_ILOCK_SHARED);
 
 
 907
 908	if (!error && (nimaps != 0) &&
 909	    (imap.br_startblock != HOLESTARTBLOCK ||
 910	     ip->i_delayed_blks)) {
 911		/*
 912		 * Attach the dquots to the inode up front.
 913		 */
 914		error = xfs_qm_dqattach(ip, 0);
 915		if (error)
 916			return error;
 917
 918		/*
 919		 * There are blocks after the end of file.
 920		 * Free them up now by truncating the file to
 921		 * its current size.
 922		 */
 923		tp = xfs_trans_alloc(mp, XFS_TRANS_INACTIVE);
 
 
 
 
 
 
 924
 925		if (need_iolock) {
 926			if (!xfs_ilock_nowait(ip, XFS_IOLOCK_EXCL)) {
 927				xfs_trans_cancel(tp, 0);
 928				return EAGAIN;
 929			}
 930		}
 931
 932		error = xfs_trans_reserve(tp, &M_RES(mp)->tr_itruncate, 0, 0);
 933		if (error) {
 934			ASSERT(XFS_FORCED_SHUTDOWN(mp));
 935			xfs_trans_cancel(tp, 0);
 936			if (need_iolock)
 937				xfs_iunlock(ip, XFS_IOLOCK_EXCL);
 938			return error;
 939		}
 940
 941		xfs_ilock(ip, XFS_ILOCK_EXCL);
 942		xfs_trans_ijoin(tp, ip, 0);
 
 
 
 943
 944		/*
 945		 * Do not update the on-disk file size.  If we update the
 946		 * on-disk file size and then the system crashes before the
 947		 * contents of the file are flushed to disk then the files
 948		 * may be full of holes (ie NULL files bug).
 949		 */
 950		error = xfs_itruncate_extents(&tp, ip, XFS_DATA_FORK,
 951					      XFS_ISIZE(ip));
 952		if (error) {
 953			/*
 954			 * If we get an error at this point we simply don't
 955			 * bother truncating the file.
 956			 */
 957			xfs_trans_cancel(tp,
 958					 (XFS_TRANS_RELEASE_LOG_RES |
 959					  XFS_TRANS_ABORT));
 960		} else {
 961			error = xfs_trans_commit(tp,
 962						XFS_TRANS_RELEASE_LOG_RES);
 963			if (!error)
 964				xfs_inode_clear_eofblocks_tag(ip);
 965		}
 966
 967		xfs_iunlock(ip, XFS_ILOCK_EXCL);
 968		if (need_iolock)
 969			xfs_iunlock(ip, XFS_IOLOCK_EXCL);
 970	}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 971	return error;
 972}
 973
 974int
 975xfs_alloc_file_space(
 976	struct xfs_inode	*ip,
 977	xfs_off_t		offset,
 978	xfs_off_t		len,
 979	int			alloc_type)
 980{
 981	xfs_mount_t		*mp = ip->i_mount;
 982	xfs_off_t		count;
 983	xfs_filblks_t		allocated_fsb;
 984	xfs_filblks_t		allocatesize_fsb;
 985	xfs_extlen_t		extsz, temp;
 986	xfs_fileoff_t		startoffset_fsb;
 987	xfs_fsblock_t		firstfsb;
 988	int			nimaps;
 989	int			quota_flag;
 990	int			rt;
 991	xfs_trans_t		*tp;
 992	xfs_bmbt_irec_t		imaps[1], *imapp;
 993	xfs_bmap_free_t		free_list;
 994	uint			qblocks, resblks, resrtextents;
 995	int			committed;
 996	int			error;
 997
 998	trace_xfs_alloc_file_space(ip);
 999
1000	if (XFS_FORCED_SHUTDOWN(mp))
1001		return XFS_ERROR(EIO);
1002
1003	error = xfs_qm_dqattach(ip, 0);
1004	if (error)
1005		return error;
1006
1007	if (len <= 0)
1008		return XFS_ERROR(EINVAL);
1009
1010	rt = XFS_IS_REALTIME_INODE(ip);
1011	extsz = xfs_get_extsz_hint(ip);
1012
1013	count = len;
1014	imapp = &imaps[0];
1015	nimaps = 1;
1016	startoffset_fsb	= XFS_B_TO_FSBT(mp, offset);
1017	allocatesize_fsb = XFS_B_TO_FSB(mp, count);
 
1018
1019	/*
1020	 * Allocate file space until done or until there is an error
1021	 */
1022	while (allocatesize_fsb && !error) {
1023		xfs_fileoff_t	s, e;
 
1024
1025		/*
1026		 * Determine space reservations for data/realtime.
1027		 */
1028		if (unlikely(extsz)) {
1029			s = startoffset_fsb;
1030			do_div(s, extsz);
1031			s *= extsz;
1032			e = startoffset_fsb + allocatesize_fsb;
1033			if ((temp = do_mod(startoffset_fsb, extsz)))
 
1034				e += temp;
1035			if ((temp = do_mod(e, extsz)))
 
1036				e += extsz - temp;
1037		} else {
1038			s = 0;
1039			e = allocatesize_fsb;
1040		}
1041
1042		/*
1043		 * The transaction reservation is limited to a 32-bit block
1044		 * count, hence we need to limit the number of blocks we are
1045		 * trying to reserve to avoid an overflow. We can't allocate
1046		 * more than @nimaps extents, and an extent is limited on disk
1047		 * to MAXEXTLEN (21 bits), so use that to enforce the limit.
 
1048		 */
1049		resblks = min_t(xfs_fileoff_t, (e - s), (MAXEXTLEN * nimaps));
 
1050		if (unlikely(rt)) {
1051			resrtextents = qblocks = resblks;
1052			resrtextents /= mp->m_sb.sb_rextsize;
1053			resblks = XFS_DIOSTRAT_SPACE_RES(mp, 0);
1054			quota_flag = XFS_QMOPT_RES_RTBLKS;
1055		} else {
1056			resrtextents = 0;
1057			resblks = qblocks = XFS_DIOSTRAT_SPACE_RES(mp, resblks);
1058			quota_flag = XFS_QMOPT_RES_REGBLKS;
1059		}
1060
1061		/*
1062		 * Allocate and setup the transaction.
1063		 */
1064		tp = xfs_trans_alloc(mp, XFS_TRANS_DIOSTRAT);
1065		error = xfs_trans_reserve(tp, &M_RES(mp)->tr_write,
1066					  resblks, resrtextents);
1067		/*
1068		 * Check for running out of space
1069		 */
1070		if (error) {
1071			/*
1072			 * Free the transaction structure.
1073			 */
1074			ASSERT(error == ENOSPC || XFS_FORCED_SHUTDOWN(mp));
1075			xfs_trans_cancel(tp, 0);
1076			break;
1077		}
1078		xfs_ilock(ip, XFS_ILOCK_EXCL);
1079		error = xfs_trans_reserve_quota_nblks(tp, ip, qblocks,
1080						      0, quota_flag);
1081		if (error)
1082			goto error1;
1083
1084		xfs_trans_ijoin(tp, ip, 0);
 
 
 
 
 
 
1085
1086		xfs_bmap_init(&free_list, &firstfsb);
1087		error = xfs_bmapi_write(tp, ip, startoffset_fsb,
1088					allocatesize_fsb, alloc_type, &firstfsb,
1089					0, imapp, &nimaps, &free_list);
1090		if (error) {
1091			goto error0;
1092		}
1093
1094		/*
1095		 * Complete the transaction
1096		 */
1097		error = xfs_bmap_finish(&tp, &free_list, &committed);
1098		if (error) {
1099			goto error0;
1100		}
1101
1102		error = xfs_trans_commit(tp, XFS_TRANS_RELEASE_LOG_RES);
1103		xfs_iunlock(ip, XFS_ILOCK_EXCL);
1104		if (error) {
1105			break;
1106		}
1107
1108		allocated_fsb = imapp->br_blockcount;
1109
1110		if (nimaps == 0) {
1111			error = XFS_ERROR(ENOSPC);
1112			break;
1113		}
1114
1115		startoffset_fsb += allocated_fsb;
1116		allocatesize_fsb -= allocated_fsb;
1117	}
1118
1119	return error;
1120
1121error0:	/* Cancel bmap, unlock inode, unreserve quota blocks, cancel trans */
1122	xfs_bmap_cancel(&free_list);
1123	xfs_trans_unreserve_quota_nblks(tp, ip, (long)qblocks, 0, quota_flag);
1124
1125error1:	/* Just cancel transaction */
1126	xfs_trans_cancel(tp, XFS_TRANS_RELEASE_LOG_RES | XFS_TRANS_ABORT);
1127	xfs_iunlock(ip, XFS_ILOCK_EXCL);
1128	return error;
1129}
1130
1131/*
1132 * Zero file bytes between startoff and endoff inclusive.
1133 * The iolock is held exclusive and no blocks are buffered.
1134 *
1135 * This function is used by xfs_free_file_space() to zero
1136 * partial blocks when the range to free is not block aligned.
1137 * When unreserving space with boundaries that are not block
1138 * aligned we round up the start and round down the end
1139 * boundaries and then use this function to zero the parts of
1140 * the blocks that got dropped during the rounding.
1141 */
1142STATIC int
1143xfs_zero_remaining_bytes(
1144	xfs_inode_t		*ip,
1145	xfs_off_t		startoff,
1146	xfs_off_t		endoff)
1147{
1148	xfs_bmbt_irec_t		imap;
1149	xfs_fileoff_t		offset_fsb;
1150	xfs_off_t		lastoffset;
1151	xfs_off_t		offset;
1152	xfs_buf_t		*bp;
1153	xfs_mount_t		*mp = ip->i_mount;
1154	int			nimap;
1155	int			error = 0;
1156
1157	/*
1158	 * Avoid doing I/O beyond eof - it's not necessary
1159	 * since nothing can read beyond eof.  The space will
1160	 * be zeroed when the file is extended anyway.
1161	 */
1162	if (startoff >= XFS_ISIZE(ip))
1163		return 0;
1164
1165	if (endoff > XFS_ISIZE(ip))
1166		endoff = XFS_ISIZE(ip);
1167
1168	bp = xfs_buf_get_uncached(XFS_IS_REALTIME_INODE(ip) ?
1169					mp->m_rtdev_targp : mp->m_ddev_targp,
1170				  BTOBB(mp->m_sb.sb_blocksize), 0);
1171	if (!bp)
1172		return XFS_ERROR(ENOMEM);
1173
1174	xfs_buf_unlock(bp);
 
 
 
 
 
1175
1176	for (offset = startoff; offset <= endoff; offset = lastoffset + 1) {
1177		uint lock_mode;
 
1178
1179		offset_fsb = XFS_B_TO_FSBT(mp, offset);
1180		nimap = 1;
 
 
1181
1182		lock_mode = xfs_ilock_data_map_shared(ip);
1183		error = xfs_bmapi_read(ip, offset_fsb, 1, &imap, &nimap, 0);
1184		xfs_iunlock(ip, lock_mode);
 
1185
1186		if (error || nimap < 1)
1187			break;
1188		ASSERT(imap.br_blockcount >= 1);
1189		ASSERT(imap.br_startoff == offset_fsb);
1190		lastoffset = XFS_FSB_TO_B(mp, imap.br_startoff + 1) - 1;
1191		if (lastoffset > endoff)
1192			lastoffset = endoff;
1193		if (imap.br_startblock == HOLESTARTBLOCK)
1194			continue;
1195		ASSERT(imap.br_startblock != DELAYSTARTBLOCK);
1196		if (imap.br_state == XFS_EXT_UNWRITTEN)
1197			continue;
1198		XFS_BUF_UNDONE(bp);
1199		XFS_BUF_UNWRITE(bp);
1200		XFS_BUF_READ(bp);
1201		XFS_BUF_SET_ADDR(bp, xfs_fsb_to_db(ip, imap.br_startblock));
1202
1203		if (XFS_FORCED_SHUTDOWN(mp)) {
1204			error = XFS_ERROR(EIO);
1205			break;
1206		}
1207		xfs_buf_iorequest(bp);
1208		error = xfs_buf_iowait(bp);
1209		if (error) {
1210			xfs_buf_ioerror_alert(bp,
1211					"xfs_zero_remaining_bytes(read)");
1212			break;
1213		}
1214		memset(bp->b_addr +
1215			(offset - XFS_FSB_TO_B(mp, imap.br_startoff)),
1216		      0, lastoffset - offset + 1);
1217		XFS_BUF_UNDONE(bp);
1218		XFS_BUF_UNREAD(bp);
1219		XFS_BUF_WRITE(bp);
1220
1221		if (XFS_FORCED_SHUTDOWN(mp)) {
1222			error = XFS_ERROR(EIO);
1223			break;
1224		}
1225		xfs_buf_iorequest(bp);
1226		error = xfs_buf_iowait(bp);
1227		if (error) {
1228			xfs_buf_ioerror_alert(bp,
1229					"xfs_zero_remaining_bytes(write)");
1230			break;
1231		}
1232	}
1233	xfs_buf_free(bp);
1234	return error;
1235}
1236
1237int
1238xfs_free_file_space(
1239	struct xfs_inode	*ip,
1240	xfs_off_t		offset,
1241	xfs_off_t		len)
1242{
1243	int			committed;
1244	int			done;
1245	xfs_fileoff_t		endoffset_fsb;
1246	int			error;
1247	xfs_fsblock_t		firstfsb;
1248	xfs_bmap_free_t		free_list;
1249	xfs_bmbt_irec_t		imap;
1250	xfs_off_t		ioffset;
1251	xfs_extlen_t		mod=0;
1252	xfs_mount_t		*mp;
1253	int			nimap;
1254	uint			resblks;
1255	xfs_off_t		rounding;
1256	int			rt;
1257	xfs_fileoff_t		startoffset_fsb;
1258	xfs_trans_t		*tp;
1259
1260	mp = ip->i_mount;
1261
1262	trace_xfs_free_file_space(ip);
1263
1264	error = xfs_qm_dqattach(ip, 0);
1265	if (error)
1266		return error;
1267
1268	error = 0;
1269	if (len <= 0)	/* if nothing being freed */
1270		return error;
1271	rt = XFS_IS_REALTIME_INODE(ip);
1272	startoffset_fsb	= XFS_B_TO_FSB(mp, offset);
1273	endoffset_fsb = XFS_B_TO_FSBT(mp, offset + len);
1274
1275	/* wait for the completion of any pending DIOs */
1276	inode_dio_wait(VFS_I(ip));
1277
1278	rounding = max_t(xfs_off_t, 1 << mp->m_sb.sb_blocklog, PAGE_CACHE_SIZE);
1279	ioffset = offset & ~(rounding - 1);
1280	error = -filemap_write_and_wait_range(VFS_I(ip)->i_mapping,
1281					      ioffset, -1);
1282	if (error)
1283		goto out;
1284	truncate_pagecache_range(VFS_I(ip), ioffset, -1);
1285
1286	/*
1287	 * Need to zero the stuff we're not freeing, on disk.
1288	 * If it's a realtime file & can't use unwritten extents then we
1289	 * actually need to zero the extent edges.  Otherwise xfs_bunmapi
1290	 * will take care of it for us.
1291	 */
1292	if (rt && !xfs_sb_version_hasextflgbit(&mp->m_sb)) {
1293		nimap = 1;
1294		error = xfs_bmapi_read(ip, startoffset_fsb, 1,
1295					&imap, &nimap, 0);
1296		if (error)
1297			goto out;
1298		ASSERT(nimap == 0 || nimap == 1);
1299		if (nimap && imap.br_startblock != HOLESTARTBLOCK) {
1300			xfs_daddr_t	block;
1301
1302			ASSERT(imap.br_startblock != DELAYSTARTBLOCK);
1303			block = imap.br_startblock;
1304			mod = do_div(block, mp->m_sb.sb_rextsize);
1305			if (mod)
1306				startoffset_fsb += mp->m_sb.sb_rextsize - mod;
1307		}
1308		nimap = 1;
1309		error = xfs_bmapi_read(ip, endoffset_fsb - 1, 1,
1310					&imap, &nimap, 0);
1311		if (error)
1312			goto out;
1313		ASSERT(nimap == 0 || nimap == 1);
1314		if (nimap && imap.br_startblock != HOLESTARTBLOCK) {
1315			ASSERT(imap.br_startblock != DELAYSTARTBLOCK);
1316			mod++;
1317			if (mod && (mod != mp->m_sb.sb_rextsize))
1318				endoffset_fsb -= mod;
1319		}
1320	}
1321	if ((done = (endoffset_fsb <= startoffset_fsb)))
1322		/*
1323		 * One contiguous piece to clear
1324		 */
1325		error = xfs_zero_remaining_bytes(ip, offset, offset + len - 1);
1326	else {
1327		/*
1328		 * Some full blocks, possibly two pieces to clear
1329		 */
1330		if (offset < XFS_FSB_TO_B(mp, startoffset_fsb))
1331			error = xfs_zero_remaining_bytes(ip, offset,
1332				XFS_FSB_TO_B(mp, startoffset_fsb) - 1);
1333		if (!error &&
1334		    XFS_FSB_TO_B(mp, endoffset_fsb) < offset + len)
1335			error = xfs_zero_remaining_bytes(ip,
1336				XFS_FSB_TO_B(mp, endoffset_fsb),
1337				offset + len - 1);
1338	}
1339
1340	/*
1341	 * free file space until done or until there is an error
 
 
 
1342	 */
1343	resblks = XFS_DIOSTRAT_SPACE_RES(mp, 0);
1344	while (!error && !done) {
1345
1346		/*
1347		 * allocate and setup the transaction. Allow this
1348		 * transaction to dip into the reserve blocks to ensure
1349		 * the freeing of the space succeeds at ENOSPC.
1350		 */
1351		tp = xfs_trans_alloc(mp, XFS_TRANS_DIOSTRAT);
1352		error = xfs_trans_reserve(tp, &M_RES(mp)->tr_write, resblks, 0);
1353
1354		/*
1355		 * check for running out of space
1356		 */
1357		if (error) {
1358			/*
1359			 * Free the transaction structure.
1360			 */
1361			ASSERT(error == ENOSPC || XFS_FORCED_SHUTDOWN(mp));
1362			xfs_trans_cancel(tp, 0);
1363			break;
1364		}
1365		xfs_ilock(ip, XFS_ILOCK_EXCL);
1366		error = xfs_trans_reserve_quota(tp, mp,
1367				ip->i_udquot, ip->i_gdquot, ip->i_pdquot,
1368				resblks, 0, XFS_QMOPT_RES_REGBLKS);
1369		if (error)
1370			goto error1;
1371
1372		xfs_trans_ijoin(tp, ip, 0);
1373
1374		/*
1375		 * issue the bunmapi() call to free the blocks
1376		 */
1377		xfs_bmap_init(&free_list, &firstfsb);
1378		error = xfs_bunmapi(tp, ip, startoffset_fsb,
1379				  endoffset_fsb - startoffset_fsb,
1380				  0, 2, &firstfsb, &free_list, &done);
1381		if (error) {
1382			goto error0;
1383		}
1384
1385		/*
1386		 * complete the transaction
1387		 */
1388		error = xfs_bmap_finish(&tp, &free_list, &committed);
1389		if (error) {
1390			goto error0;
1391		}
1392
1393		error = xfs_trans_commit(tp, XFS_TRANS_RELEASE_LOG_RES);
1394		xfs_iunlock(ip, XFS_ILOCK_EXCL);
 
 
 
 
 
 
 
1395	}
1396
1397 out:
1398	return error;
1399
1400 error0:
1401	xfs_bmap_cancel(&free_list);
1402 error1:
1403	xfs_trans_cancel(tp, XFS_TRANS_RELEASE_LOG_RES | XFS_TRANS_ABORT);
1404	xfs_iunlock(ip, XFS_ILOCK_EXCL);
1405	goto out;
1406}
1407
1408
1409int
1410xfs_zero_file_space(
1411	struct xfs_inode	*ip,
1412	xfs_off_t		offset,
1413	xfs_off_t		len)
1414{
1415	struct xfs_mount	*mp = ip->i_mount;
1416	uint			granularity;
1417	xfs_off_t		start_boundary;
1418	xfs_off_t		end_boundary;
1419	int			error;
1420
1421	trace_xfs_zero_file_space(ip);
1422
1423	granularity = max_t(uint, 1 << mp->m_sb.sb_blocklog, PAGE_CACHE_SIZE);
1424
1425	/*
1426	 * Round the range of extents we are going to convert inwards.  If the
1427	 * offset is aligned, then it doesn't get changed so we zero from the
1428	 * start of the block offset points to.
1429	 */
1430	start_boundary = round_up(offset, granularity);
1431	end_boundary = round_down(offset + len, granularity);
1432
1433	ASSERT(start_boundary >= offset);
1434	ASSERT(end_boundary <= offset + len);
1435
1436	if (start_boundary < end_boundary - 1) {
1437		/*
1438		 * punch out delayed allocation blocks and the page cache over
1439		 * the conversion range
1440		 */
1441		xfs_ilock(ip, XFS_ILOCK_EXCL);
1442		error = xfs_bmap_punch_delalloc_range(ip,
1443				XFS_B_TO_FSBT(mp, start_boundary),
1444				XFS_B_TO_FSB(mp, end_boundary - start_boundary));
1445		xfs_iunlock(ip, XFS_ILOCK_EXCL);
1446		truncate_pagecache_range(VFS_I(ip), start_boundary,
1447					 end_boundary - 1);
1448
1449		/* convert the blocks */
1450		error = xfs_alloc_file_space(ip, start_boundary,
1451					end_boundary - start_boundary - 1,
1452					XFS_BMAPI_PREALLOC | XFS_BMAPI_CONVERT);
1453		if (error)
1454			goto out;
 
1455
1456		/* We've handled the interior of the range, now for the edges */
1457		if (start_boundary != offset) {
1458			error = xfs_iozero(ip, offset, start_boundary - offset);
1459			if (error)
1460				goto out;
1461		}
 
 
 
 
1462
1463		if (end_boundary != offset + len)
1464			error = xfs_iozero(ip, end_boundary,
1465					   offset + len - end_boundary);
 
 
 
 
1466
1467	} else {
1468		/*
1469		 * It's either a sub-granularity range or the range spanned lies
1470		 * partially across two adjacent blocks.
1471		 */
1472		error = xfs_iozero(ip, offset, len);
 
 
 
 
1473	}
1474
1475out:
1476	return error;
1477
1478}
1479
1480/*
1481 * xfs_collapse_file_space()
1482 *	This routine frees disk space and shift extent for the given file.
1483 *	The first thing we do is to free data blocks in the specified range
1484 *	by calling xfs_free_file_space(). It would also sync dirty data
1485 *	and invalidate page cache over the region on which collapse range
1486 *	is working. And Shift extent records to the left to cover a hole.
1487 * RETURNS:
1488 *	0 on success
1489 *	errno on error
1490 *
1491 */
1492int
1493xfs_collapse_file_space(
1494	struct xfs_inode	*ip,
1495	xfs_off_t		offset,
1496	xfs_off_t		len)
1497{
1498	int			done = 0;
1499	struct xfs_mount	*mp = ip->i_mount;
1500	struct xfs_trans	*tp;
1501	int			error;
1502	xfs_extnum_t		current_ext = 0;
1503	struct xfs_bmap_free	free_list;
1504	xfs_fsblock_t		first_block;
1505	int			committed;
1506	xfs_fileoff_t		start_fsb;
1507	xfs_fileoff_t		shift_fsb;
1508
1509	ASSERT(xfs_isilocked(ip, XFS_IOLOCK_EXCL));
 
1510
1511	trace_xfs_collapse_file_space(ip);
1512
1513	start_fsb = XFS_B_TO_FSB(mp, offset + len);
1514	shift_fsb = XFS_B_TO_FSB(mp, len);
1515
1516	error = xfs_free_file_space(ip, offset, len);
1517	if (error)
1518		return error;
1519
1520	while (!error && !done) {
1521		tp = xfs_trans_alloc(mp, XFS_TRANS_DIOSTRAT);
1522		tp->t_flags |= XFS_TRANS_RESERVE;
1523		/*
1524		 * We would need to reserve permanent block for transaction.
1525		 * This will come into picture when after shifting extent into
1526		 * hole we found that adjacent extents can be merged which
1527		 * may lead to freeing of a block during record update.
1528		 */
1529		error = xfs_trans_reserve(tp, &M_RES(mp)->tr_write,
1530				XFS_DIOSTRAT_SPACE_RES(mp, 0), 0);
1531		if (error) {
1532			ASSERT(error == ENOSPC || XFS_FORCED_SHUTDOWN(mp));
1533			xfs_trans_cancel(tp, 0);
 
 
 
1534			break;
1535		}
1536
1537		xfs_ilock(ip, XFS_ILOCK_EXCL);
1538		error = xfs_trans_reserve_quota(tp, mp, ip->i_udquot,
1539				ip->i_gdquot, ip->i_pdquot,
1540				XFS_DIOSTRAT_SPACE_RES(mp, 0), 0,
1541				XFS_QMOPT_RES_REGBLKS);
1542		if (error)
1543			goto out;
 
1544
1545		xfs_trans_ijoin(tp, ip, 0);
 
 
1546
1547		xfs_bmap_init(&free_list, &first_block);
 
 
 
 
1548
1549		/*
1550		 * We are using the write transaction in which max 2 bmbt
1551		 * updates are allowed
1552		 */
1553		error = xfs_bmap_shift_extents(tp, ip, &done, start_fsb,
1554					       shift_fsb, &current_ext,
1555					       &first_block, &free_list,
1556					       XFS_BMAP_MAX_SHIFT_EXTENTS);
1557		if (error)
1558			goto out;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1559
1560		error = xfs_bmap_finish(&tp, &free_list, &committed);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1561		if (error)
1562			goto out;
1563
1564		error = xfs_trans_commit(tp, XFS_TRANS_RELEASE_LOG_RES);
1565		xfs_iunlock(ip, XFS_ILOCK_EXCL);
1566	}
 
 
1567
 
 
1568	return error;
1569
1570out:
1571	xfs_trans_cancel(tp, XFS_TRANS_RELEASE_LOG_RES | XFS_TRANS_ABORT);
1572	xfs_iunlock(ip, XFS_ILOCK_EXCL);
1573	return error;
1574}
1575
1576/*
1577 * We need to check that the format of the data fork in the temporary inode is
1578 * valid for the target inode before doing the swap. This is not a problem with
1579 * attr1 because of the fixed fork offset, but attr2 has a dynamically sized
1580 * data fork depending on the space the attribute fork is taking so we can get
1581 * invalid formats on the target inode.
1582 *
1583 * E.g. target has space for 7 extents in extent format, temp inode only has
1584 * space for 6.  If we defragment down to 7 extents, then the tmp format is a
1585 * btree, but when swapped it needs to be in extent format. Hence we can't just
1586 * blindly swap data forks on attr2 filesystems.
1587 *
1588 * Note that we check the swap in both directions so that we don't end up with
1589 * a corrupt temporary inode, either.
1590 *
1591 * Note that fixing the way xfs_fsr sets up the attribute fork in the source
1592 * inode will prevent this situation from occurring, so all we do here is
1593 * reject and log the attempt. basically we are putting the responsibility on
1594 * userspace to get this right.
1595 */
1596static int
1597xfs_swap_extents_check_format(
1598	xfs_inode_t	*ip,	/* target inode */
1599	xfs_inode_t	*tip)	/* tmp inode */
1600{
 
 
 
 
 
 
 
 
 
1601
1602	/* Should never get a local format */
1603	if (ip->i_d.di_format == XFS_DINODE_FMT_LOCAL ||
1604	    tip->i_d.di_format == XFS_DINODE_FMT_LOCAL)
1605		return EINVAL;
1606
1607	/*
1608	 * if the target inode has less extents that then temporary inode then
1609	 * why did userspace call us?
1610	 */
1611	if (ip->i_d.di_nextents < tip->i_d.di_nextents)
1612		return EINVAL;
 
 
 
 
 
 
 
1613
1614	/*
1615	 * if the target inode is in extent form and the temp inode is in btree
1616	 * form then we will end up with the target inode in the wrong format
1617	 * as we already know there are less extents in the temp inode.
1618	 */
1619	if (ip->i_d.di_format == XFS_DINODE_FMT_EXTENTS &&
1620	    tip->i_d.di_format == XFS_DINODE_FMT_BTREE)
1621		return EINVAL;
1622
1623	/* Check temp in extent form to max in target */
1624	if (tip->i_d.di_format == XFS_DINODE_FMT_EXTENTS &&
1625	    XFS_IFORK_NEXTENTS(tip, XFS_DATA_FORK) >
1626			XFS_IFORK_MAXEXT(ip, XFS_DATA_FORK))
1627		return EINVAL;
1628
1629	/* Check target in extent form to max in temp */
1630	if (ip->i_d.di_format == XFS_DINODE_FMT_EXTENTS &&
1631	    XFS_IFORK_NEXTENTS(ip, XFS_DATA_FORK) >
1632			XFS_IFORK_MAXEXT(tip, XFS_DATA_FORK))
1633		return EINVAL;
1634
1635	/*
1636	 * If we are in a btree format, check that the temp root block will fit
1637	 * in the target and that it has enough extents to be in btree format
1638	 * in the target.
1639	 *
1640	 * Note that we have to be careful to allow btree->extent conversions
1641	 * (a common defrag case) which will occur when the temp inode is in
1642	 * extent format...
1643	 */
1644	if (tip->i_d.di_format == XFS_DINODE_FMT_BTREE) {
1645		if (XFS_IFORK_BOFF(ip) &&
1646		    XFS_BMAP_BMDR_SPACE(tip->i_df.if_broot) > XFS_IFORK_BOFF(ip))
1647			return EINVAL;
1648		if (XFS_IFORK_NEXTENTS(tip, XFS_DATA_FORK) <=
1649		    XFS_IFORK_MAXEXT(ip, XFS_DATA_FORK))
1650			return EINVAL;
1651	}
1652
1653	/* Reciprocal target->temp btree format checks */
1654	if (ip->i_d.di_format == XFS_DINODE_FMT_BTREE) {
1655		if (XFS_IFORK_BOFF(tip) &&
1656		    XFS_BMAP_BMDR_SPACE(ip->i_df.if_broot) > XFS_IFORK_BOFF(tip))
1657			return EINVAL;
1658		if (XFS_IFORK_NEXTENTS(ip, XFS_DATA_FORK) <=
1659		    XFS_IFORK_MAXEXT(tip, XFS_DATA_FORK))
1660			return EINVAL;
1661	}
1662
1663	return 0;
1664}
1665
1666int
1667xfs_swap_extents(
1668	xfs_inode_t	*ip,	/* target inode */
1669	xfs_inode_t	*tip,	/* tmp inode */
1670	xfs_swapext_t	*sxp)
1671{
1672	xfs_mount_t	*mp = ip->i_mount;
1673	xfs_trans_t	*tp;
1674	xfs_bstat_t	*sbp = &sxp->sx_stat;
1675	xfs_ifork_t	*tempifp, *ifp, *tifp;
1676	int		src_log_flags, target_log_flags;
1677	int		error = 0;
1678	int		aforkblks = 0;
1679	int		taforkblks = 0;
1680	__uint64_t	tmp;
1681
1682	tempifp = kmem_alloc(sizeof(xfs_ifork_t), KM_MAYFAIL);
1683	if (!tempifp) {
1684		error = XFS_ERROR(ENOMEM);
1685		goto out;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1686	}
1687
1688	/*
1689	 * we have to do two separate lock calls here to keep lockdep
1690	 * happy. If we try to get all the locks in one call, lock will
1691	 * report false positives when we drop the ILOCK and regain them
1692	 * below.
 
 
1693	 */
1694	xfs_lock_two_inodes(ip, tip, XFS_IOLOCK_EXCL);
1695	xfs_lock_two_inodes(ip, tip, XFS_ILOCK_EXCL);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1696
1697	/* Verify that both files have the same format */
1698	if ((ip->i_d.di_mode & S_IFMT) != (tip->i_d.di_mode & S_IFMT)) {
1699		error = XFS_ERROR(EINVAL);
1700		goto out_unlock;
1701	}
1702
1703	/* Verify both files are either real-time or non-realtime */
1704	if (XFS_IS_REALTIME_INODE(ip) != XFS_IS_REALTIME_INODE(tip)) {
1705		error = XFS_ERROR(EINVAL);
1706		goto out_unlock;
1707	}
1708
1709	error = -filemap_write_and_wait(VFS_I(tip)->i_mapping);
1710	if (error)
1711		goto out_unlock;
1712	truncate_pagecache_range(VFS_I(tip), 0, -1);
1713
1714	/* Verify O_DIRECT for ftmp */
1715	if (VN_CACHED(VFS_I(tip)) != 0) {
1716		error = XFS_ERROR(EINVAL);
 
 
 
 
 
 
1717		goto out_unlock;
 
 
 
 
 
1718	}
1719
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1720	/* Verify all data are being swapped */
1721	if (sxp->sx_offset != 0 ||
1722	    sxp->sx_length != ip->i_d.di_size ||
1723	    sxp->sx_length != tip->i_d.di_size) {
1724		error = XFS_ERROR(EFAULT);
1725		goto out_unlock;
1726	}
1727
1728	trace_xfs_swap_extent_before(ip, 0);
1729	trace_xfs_swap_extent_before(tip, 1);
1730
1731	/* check inode formats now that data is flushed */
1732	error = xfs_swap_extents_check_format(ip, tip);
1733	if (error) {
1734		xfs_notice(mp,
1735		    "%s: inode 0x%llx format is incompatible for exchanging.",
1736				__func__, ip->i_ino);
1737		goto out_unlock;
1738	}
1739
1740	/*
1741	 * Compare the current change & modify times with that
1742	 * passed in.  If they differ, we abort this swap.
1743	 * This is the mechanism used to ensure the calling
1744	 * process that the file was not changed out from
1745	 * under it.
1746	 */
1747	if ((sbp->bs_ctime.tv_sec != VFS_I(ip)->i_ctime.tv_sec) ||
1748	    (sbp->bs_ctime.tv_nsec != VFS_I(ip)->i_ctime.tv_nsec) ||
1749	    (sbp->bs_mtime.tv_sec != VFS_I(ip)->i_mtime.tv_sec) ||
1750	    (sbp->bs_mtime.tv_nsec != VFS_I(ip)->i_mtime.tv_nsec)) {
1751		error = XFS_ERROR(EBUSY);
1752		goto out_unlock;
1753	}
1754
1755	/* We need to fail if the file is memory mapped.  Once we have tossed
1756	 * all existing pages, the page fault will have no option
1757	 * but to go to the filesystem for pages. By making the page fault call
1758	 * vop_read (or write in the case of autogrow) they block on the iolock
1759	 * until we have switched the extents.
1760	 */
1761	if (VN_MAPPED(VFS_I(ip))) {
1762		error = XFS_ERROR(EBUSY);
1763		goto out_unlock;
1764	}
1765
1766	xfs_iunlock(ip, XFS_ILOCK_EXCL);
1767	xfs_iunlock(tip, XFS_ILOCK_EXCL);
1768
1769	/*
1770	 * There is a race condition here since we gave up the
1771	 * ilock.  However, the data fork will not change since
1772	 * we have the iolock (locked for truncation too) so we
1773	 * are safe.  We don't really care if non-io related
1774	 * fields change.
1775	 */
1776	truncate_pagecache_range(VFS_I(ip), 0, -1);
1777
1778	tp = xfs_trans_alloc(mp, XFS_TRANS_SWAPEXT);
1779	error = xfs_trans_reserve(tp, &M_RES(mp)->tr_ichange, 0, 0);
1780	if (error) {
1781		xfs_iunlock(ip,  XFS_IOLOCK_EXCL);
1782		xfs_iunlock(tip, XFS_IOLOCK_EXCL);
1783		xfs_trans_cancel(tp, 0);
1784		goto out;
1785	}
1786	xfs_lock_two_inodes(ip, tip, XFS_ILOCK_EXCL);
1787
1788	/*
1789	 * Count the number of extended attribute blocks
1790	 */
1791	if ( ((XFS_IFORK_Q(ip) != 0) && (ip->i_d.di_anextents > 0)) &&
1792	     (ip->i_d.di_aformat != XFS_DINODE_FMT_LOCAL)) {
1793		error = xfs_bmap_count_blocks(tp, ip, XFS_ATTR_FORK, &aforkblks);
1794		if (error)
1795			goto out_trans_cancel;
1796	}
1797	if ( ((XFS_IFORK_Q(tip) != 0) && (tip->i_d.di_anextents > 0)) &&
1798	     (tip->i_d.di_aformat != XFS_DINODE_FMT_LOCAL)) {
1799		error = xfs_bmap_count_blocks(tp, tip, XFS_ATTR_FORK,
1800			&taforkblks);
1801		if (error)
1802			goto out_trans_cancel;
1803	}
1804
1805	xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL | XFS_IOLOCK_EXCL);
1806	xfs_trans_ijoin(tp, tip, XFS_ILOCK_EXCL | XFS_IOLOCK_EXCL);
1807
1808	/*
1809	 * Before we've swapped the forks, lets set the owners of the forks
1810	 * appropriately. We have to do this as we are demand paging the btree
1811	 * buffers, and so the validation done on read will expect the owner
1812	 * field to be correctly set. Once we change the owners, we can swap the
1813	 * inode forks.
1814	 *
1815	 * Note the trickiness in setting the log flags - we set the owner log
1816	 * flag on the opposite inode (i.e. the inode we are setting the new
1817	 * owner to be) because once we swap the forks and log that, log
1818	 * recovery is going to see the fork as owned by the swapped inode,
1819	 * not the pre-swapped inodes.
1820	 */
1821	src_log_flags = XFS_ILOG_CORE;
1822	target_log_flags = XFS_ILOG_CORE;
1823	if (ip->i_d.di_version == 3 &&
1824	    ip->i_d.di_format == XFS_DINODE_FMT_BTREE) {
1825		target_log_flags |= XFS_ILOG_DOWNER;
1826		error = xfs_bmbt_change_owner(tp, ip, XFS_DATA_FORK,
1827					      tip->i_ino, NULL);
1828		if (error)
1829			goto out_trans_cancel;
1830	}
1831
1832	if (tip->i_d.di_version == 3 &&
1833	    tip->i_d.di_format == XFS_DINODE_FMT_BTREE) {
1834		src_log_flags |= XFS_ILOG_DOWNER;
1835		error = xfs_bmbt_change_owner(tp, tip, XFS_DATA_FORK,
1836					      ip->i_ino, NULL);
1837		if (error)
1838			goto out_trans_cancel;
1839	}
1840
1841	/*
1842	 * Swap the data forks of the inodes
1843	 */
1844	ifp = &ip->i_df;
1845	tifp = &tip->i_df;
1846	*tempifp = *ifp;	/* struct copy */
1847	*ifp = *tifp;		/* struct copy */
1848	*tifp = *tempifp;	/* struct copy */
1849
1850	/*
1851	 * Fix the on-disk inode values
1852	 */
1853	tmp = (__uint64_t)ip->i_d.di_nblocks;
1854	ip->i_d.di_nblocks = tip->i_d.di_nblocks - taforkblks + aforkblks;
1855	tip->i_d.di_nblocks = tmp + taforkblks - aforkblks;
1856
1857	tmp = (__uint64_t) ip->i_d.di_nextents;
1858	ip->i_d.di_nextents = tip->i_d.di_nextents;
1859	tip->i_d.di_nextents = tmp;
1860
1861	tmp = (__uint64_t) ip->i_d.di_format;
1862	ip->i_d.di_format = tip->i_d.di_format;
1863	tip->i_d.di_format = tmp;
1864
1865	/*
1866	 * The extents in the source inode could still contain speculative
1867	 * preallocation beyond EOF (e.g. the file is open but not modified
1868	 * while defrag is in progress). In that case, we need to copy over the
1869	 * number of delalloc blocks the data fork in the source inode is
1870	 * tracking beyond EOF so that when the fork is truncated away when the
1871	 * temporary inode is unlinked we don't underrun the i_delayed_blks
1872	 * counter on that inode.
1873	 */
1874	ASSERT(tip->i_delayed_blks == 0);
1875	tip->i_delayed_blks = ip->i_delayed_blks;
1876	ip->i_delayed_blks = 0;
 
 
 
 
1877
1878	switch (ip->i_d.di_format) {
1879	case XFS_DINODE_FMT_EXTENTS:
1880		/* If the extents fit in the inode, fix the
1881		 * pointer.  Otherwise it's already NULL or
1882		 * pointing to the extent.
1883		 */
1884		if (ip->i_d.di_nextents <= XFS_INLINE_EXTS) {
1885			ifp->if_u1.if_extents =
1886				ifp->if_u2.if_inline_ext;
1887		}
1888		src_log_flags |= XFS_ILOG_DEXT;
1889		break;
1890	case XFS_DINODE_FMT_BTREE:
1891		ASSERT(ip->i_d.di_version < 3 ||
1892		       (src_log_flags & XFS_ILOG_DOWNER));
1893		src_log_flags |= XFS_ILOG_DBROOT;
1894		break;
1895	}
1896
1897	switch (tip->i_d.di_format) {
1898	case XFS_DINODE_FMT_EXTENTS:
1899		/* If the extents fit in the inode, fix the
1900		 * pointer.  Otherwise it's already NULL or
1901		 * pointing to the extent.
1902		 */
1903		if (tip->i_d.di_nextents <= XFS_INLINE_EXTS) {
1904			tifp->if_u1.if_extents =
1905				tifp->if_u2.if_inline_ext;
1906		}
1907		target_log_flags |= XFS_ILOG_DEXT;
1908		break;
1909	case XFS_DINODE_FMT_BTREE:
1910		target_log_flags |= XFS_ILOG_DBROOT;
1911		ASSERT(tip->i_d.di_version < 3 ||
1912		       (target_log_flags & XFS_ILOG_DOWNER));
1913		break;
1914	}
1915
1916	xfs_trans_log_inode(tp, ip,  src_log_flags);
1917	xfs_trans_log_inode(tp, tip, target_log_flags);
1918
1919	/*
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1920	 * If this is a synchronous mount, make sure that the
1921	 * transaction goes to disk before returning to the user.
1922	 */
1923	if (mp->m_flags & XFS_MOUNT_WSYNC)
1924		xfs_trans_set_sync(tp);
1925
1926	error = xfs_trans_commit(tp, 0);
1927
1928	trace_xfs_swap_extent_after(ip, 0);
1929	trace_xfs_swap_extent_after(tip, 1);
1930out:
1931	kmem_free(tempifp);
1932	return error;
1933
 
 
 
1934out_unlock:
1935	xfs_iunlock(ip,  XFS_ILOCK_EXCL | XFS_IOLOCK_EXCL);
1936	xfs_iunlock(tip, XFS_ILOCK_EXCL | XFS_IOLOCK_EXCL);
1937	goto out;
 
1938
1939out_trans_cancel:
1940	xfs_trans_cancel(tp, 0);
1941	goto out_unlock;
1942}
v6.2
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 * Copyright (c) 2000-2006 Silicon Graphics, Inc.
   4 * Copyright (c) 2012 Red Hat, Inc.
   5 * All Rights Reserved.
 
 
 
 
 
 
 
 
 
 
 
 
 
   6 */
   7#include "xfs.h"
   8#include "xfs_fs.h"
   9#include "xfs_shared.h"
  10#include "xfs_format.h"
  11#include "xfs_log_format.h"
  12#include "xfs_trans_resv.h"
  13#include "xfs_bit.h"
 
 
  14#include "xfs_mount.h"
  15#include "xfs_defer.h"
  16#include "xfs_inode.h"
  17#include "xfs_btree.h"
  18#include "xfs_trans.h"
 
  19#include "xfs_alloc.h"
  20#include "xfs_bmap.h"
  21#include "xfs_bmap_util.h"
  22#include "xfs_bmap_btree.h"
  23#include "xfs_rtalloc.h"
  24#include "xfs_error.h"
  25#include "xfs_quota.h"
  26#include "xfs_trans_space.h"
  27#include "xfs_trace.h"
  28#include "xfs_icache.h"
  29#include "xfs_iomap.h"
  30#include "xfs_reflink.h"
  31
  32/* Kernel only BMAP related definitions and functions */
  33
  34/*
  35 * Convert the given file system block to a disk block.  We have to treat it
  36 * differently based on whether the file is a real time file or not, because the
  37 * bmap code does.
  38 */
  39xfs_daddr_t
  40xfs_fsb_to_db(struct xfs_inode *ip, xfs_fsblock_t fsb)
  41{
  42	if (XFS_IS_REALTIME_INODE(ip))
  43		return XFS_FSB_TO_BB(ip->i_mount, fsb);
  44	return XFS_FSB_TO_DADDR(ip->i_mount, fsb);
  45}
  46
  47/*
  48 * Routine to zero an extent on disk allocated to the specific inode.
 
 
 
  49 *
  50 * The VFS functions take a linearised filesystem block offset, so we have to
  51 * convert the sparse xfs fsb to the right format first.
  52 * VFS types are real funky, too.
  53 */
  54int
  55xfs_zero_extent(
  56	struct xfs_inode	*ip,
  57	xfs_fsblock_t		start_fsb,
  58	xfs_off_t		count_fsb)
  59{
  60	struct xfs_mount	*mp = ip->i_mount;
  61	struct xfs_buftarg	*target = xfs_inode_buftarg(ip);
  62	xfs_daddr_t		sector = xfs_fsb_to_db(ip, start_fsb);
  63	sector_t		block = XFS_BB_TO_FSBT(mp, sector);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
  64
  65	return blkdev_issue_zeroout(target->bt_bdev,
  66		block << (mp->m_super->s_blocksize_bits - 9),
  67		count_fsb << (mp->m_super->s_blocksize_bits - 9),
  68		GFP_NOFS, 0);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
  69}
  70
  71#ifdef CONFIG_XFS_RT
  72int
  73xfs_bmap_rtalloc(
  74	struct xfs_bmalloca	*ap)
  75{
  76	struct xfs_mount	*mp = ap->ip->i_mount;
  77	xfs_fileoff_t		orig_offset = ap->offset;
  78	xfs_rtblock_t		rtb;
  79	xfs_extlen_t		prod = 0;  /* product factor for allocators */
  80	xfs_extlen_t		mod = 0;   /* product factor for allocators */
  81	xfs_extlen_t		ralen = 0; /* realtime allocation length */
  82	xfs_extlen_t		align;     /* minimum allocation alignment */
  83	xfs_extlen_t		orig_length = ap->length;
  84	xfs_extlen_t		minlen = mp->m_sb.sb_rextsize;
  85	xfs_extlen_t		raminlen;
  86	bool			rtlocked = false;
  87	bool			ignore_locality = false;
  88	int			error;
  89
 
  90	align = xfs_get_extsz_hint(ap->ip);
  91retry:
  92	prod = align / mp->m_sb.sb_rextsize;
  93	error = xfs_bmap_extsize_align(mp, &ap->got, &ap->prev,
  94					align, 1, ap->eof, 0,
  95					ap->conv, &ap->offset, &ap->length);
  96	if (error)
  97		return error;
  98	ASSERT(ap->length);
  99	ASSERT(ap->length % mp->m_sb.sb_rextsize == 0);
 100
 101	/*
 102	 * If we shifted the file offset downward to satisfy an extent size
 103	 * hint, increase minlen by that amount so that the allocator won't
 104	 * give us an allocation that's too short to cover at least one of the
 105	 * blocks that the caller asked for.
 106	 */
 107	if (ap->offset != orig_offset)
 108		minlen += orig_offset - ap->offset;
 109
 110	/*
 111	 * If the offset & length are not perfectly aligned
 112	 * then kill prod, it will just get us in trouble.
 113	 */
 114	div_u64_rem(ap->offset, align, &mod);
 115	if (mod || ap->length % align)
 116		prod = 1;
 117	/*
 118	 * Set ralen to be the actual requested length in rtextents.
 119	 */
 120	ralen = ap->length / mp->m_sb.sb_rextsize;
 121	/*
 122	 * If the old value was close enough to XFS_BMBT_MAX_EXTLEN that
 123	 * we rounded up to it, cut it back so it's valid again.
 124	 * Note that if it's a really large request (bigger than
 125	 * XFS_BMBT_MAX_EXTLEN), we don't hear about that number, and can't
 126	 * adjust the starting point to match it.
 127	 */
 128	if (ralen * mp->m_sb.sb_rextsize >= XFS_MAX_BMBT_EXTLEN)
 129		ralen = XFS_MAX_BMBT_EXTLEN / mp->m_sb.sb_rextsize;
 130
 131	/*
 132	 * Lock out modifications to both the RT bitmap and summary inodes
 133	 */
 134	if (!rtlocked) {
 135		xfs_ilock(mp->m_rbmip, XFS_ILOCK_EXCL|XFS_ILOCK_RTBITMAP);
 136		xfs_trans_ijoin(ap->tp, mp->m_rbmip, XFS_ILOCK_EXCL);
 137		xfs_ilock(mp->m_rsumip, XFS_ILOCK_EXCL|XFS_ILOCK_RTSUM);
 138		xfs_trans_ijoin(ap->tp, mp->m_rsumip, XFS_ILOCK_EXCL);
 139		rtlocked = true;
 140	}
 141
 142	/*
 143	 * If it's an allocation to an empty file at offset 0,
 144	 * pick an extent that will space things out in the rt area.
 145	 */
 146	if (ap->eof && ap->offset == 0) {
 147		xfs_rtblock_t rtx; /* realtime extent no */
 148
 149		error = xfs_rtpick_extent(mp, ap->tp, ralen, &rtx);
 150		if (error)
 151			return error;
 152		ap->blkno = rtx * mp->m_sb.sb_rextsize;
 153	} else {
 154		ap->blkno = 0;
 155	}
 156
 157	xfs_bmap_adjacent(ap);
 158
 159	/*
 160	 * Realtime allocation, done through xfs_rtallocate_extent.
 161	 */
 162	if (ignore_locality)
 163		ap->blkno = 0;
 164	else
 165		do_div(ap->blkno, mp->m_sb.sb_rextsize);
 166	rtb = ap->blkno;
 167	ap->length = ralen;
 168	raminlen = max_t(xfs_extlen_t, 1, minlen / mp->m_sb.sb_rextsize);
 169	error = xfs_rtallocate_extent(ap->tp, ap->blkno, raminlen, ap->length,
 170			&ralen, ap->wasdel, prod, &rtb);
 171	if (error)
 
 
 
 172		return error;
 173
 174	if (rtb != NULLRTBLOCK) {
 175		ap->blkno = rtb * mp->m_sb.sb_rextsize;
 176		ap->length = ralen * mp->m_sb.sb_rextsize;
 177		ap->ip->i_nblocks += ap->length;
 
 178		xfs_trans_log_inode(ap->tp, ap->ip, XFS_ILOG_CORE);
 179		if (ap->wasdel)
 180			ap->ip->i_delayed_blks -= ap->length;
 181		/*
 182		 * Adjust the disk quota also. This was reserved
 183		 * earlier.
 184		 */
 185		xfs_trans_mod_dquot_byino(ap->tp, ap->ip,
 186			ap->wasdel ? XFS_TRANS_DQ_DELRTBCOUNT :
 187					XFS_TRANS_DQ_RTBCOUNT, ap->length);
 188		return 0;
 
 189	}
 190
 191	if (align > mp->m_sb.sb_rextsize) {
 192		/*
 193		 * We previously enlarged the request length to try to satisfy
 194		 * an extent size hint.  The allocator didn't return anything,
 195		 * so reset the parameters to the original values and try again
 196		 * without alignment criteria.
 197		 */
 198		ap->offset = orig_offset;
 199		ap->length = orig_length;
 200		minlen = align = mp->m_sb.sb_rextsize;
 201		goto retry;
 202	}
 203
 204	if (!ignore_locality && ap->blkno != 0) {
 205		/*
 206		 * If we can't allocate near a specific rt extent, try again
 207		 * without locality criteria.
 208		 */
 209		ignore_locality = true;
 210		goto retry;
 211	}
 212
 213	ap->blkno = NULLFSBLOCK;
 214	ap->length = 0;
 215	return 0;
 216}
 217#endif /* CONFIG_XFS_RT */
 218
 219/*
 220 * Extent tree block counting routines.
 221 */
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 222
 223/*
 224 * Count leaf blocks given a range of extent records.  Delayed allocation
 225 * extents are not counted towards the totals.
 
 226 */
 227xfs_extnum_t
 228xfs_bmap_count_leaves(
 229	struct xfs_ifork	*ifp,
 230	xfs_filblks_t		*count)
 231{
 232	struct xfs_iext_cursor	icur;
 233	struct xfs_bmbt_irec	got;
 234	xfs_extnum_t		numrecs = 0;
 
 235
 236	for_each_xfs_iext(ifp, &icur, &got) {
 237		if (!isnullstartblock(got.br_startblock)) {
 238			*count += got.br_blockcount;
 239			numrecs++;
 240		}
 241	}
 242
 243	return numrecs;
 
 
 
 
 
 244}
 245
 246/*
 247 * Count fsblocks of the given fork.  Delayed allocation extents are
 248 * not counted towards the totals.
 
 249 */
 250int
 251xfs_bmap_count_blocks(
 252	struct xfs_trans	*tp,
 253	struct xfs_inode	*ip,
 
 254	int			whichfork,
 255	xfs_extnum_t		*nextents,
 256	xfs_filblks_t		*count)
 257{
 258	struct xfs_mount	*mp = ip->i_mount;
 259	struct xfs_ifork	*ifp = xfs_ifork_ptr(ip, whichfork);
 260	struct xfs_btree_cur	*cur;
 261	xfs_extlen_t		btblocks = 0;
 262	int			error;
 263
 264	*nextents = 0;
 265	*count = 0;
 
 266
 267	if (!ifp)
 268		return 0;
 
 269
 270	switch (ifp->if_format) {
 271	case XFS_DINODE_FMT_BTREE:
 272		error = xfs_iread_extents(tp, ip, whichfork);
 273		if (error)
 274			return error;
 275
 276		cur = xfs_bmbt_init_cursor(mp, tp, ip, whichfork);
 277		error = xfs_btree_count_blocks(cur, &btblocks);
 278		xfs_btree_del_cursor(cur, error);
 279		if (error)
 280			return error;
 
 
 
 
 
 
 
 
 
 
 
 
 281
 282		/*
 283		 * xfs_btree_count_blocks includes the root block contained in
 284		 * the inode fork in @btblocks, so subtract one because we're
 285		 * only interested in allocated disk blocks.
 286		 */
 287		*count += btblocks - 1;
 288
 289		fallthrough;
 290	case XFS_DINODE_FMT_EXTENTS:
 291		*nextents = xfs_bmap_count_leaves(ifp, count);
 292		break;
 
 
 
 
 
 
 293	}
 294
 295	return 0;
 296}
 297
 298static int
 299xfs_getbmap_report_one(
 300	struct xfs_inode	*ip,
 301	struct getbmapx		*bmv,
 302	struct kgetbmap		*out,
 303	int64_t			bmv_end,
 304	struct xfs_bmbt_irec	*got)
 
 
 
 
 
 305{
 306	struct kgetbmap		*p = out + bmv->bmv_entries;
 307	bool			shared = false;
 308	int			error;
 
 
 
 
 
 
 
 309
 310	error = xfs_reflink_trim_around_shared(ip, got, &shared);
 
 311	if (error)
 312		return error;
 
 
 313
 314	if (isnullstartblock(got->br_startblock) ||
 315	    got->br_startblock == DELAYSTARTBLOCK) {
 316		/*
 317		 * Delalloc extents that start beyond EOF can occur due to
 318		 * speculative EOF allocation when the delalloc extent is larger
 319		 * than the largest freespace extent at conversion time.  These
 320		 * extents cannot be converted by data writeback, so can exist
 321		 * here even if we are not supposed to be finding delalloc
 322		 * extents.
 323		 */
 324		if (got->br_startoff < XFS_B_TO_FSB(ip->i_mount, XFS_ISIZE(ip)))
 325			ASSERT((bmv->bmv_iflags & BMV_IF_DELALLOC) != 0);
 
 
 326
 327		p->bmv_oflags |= BMV_OF_DELALLOC;
 328		p->bmv_block = -2;
 
 
 
 
 
 
 
 
 
 329	} else {
 330		p->bmv_block = xfs_fsb_to_db(ip, got->br_startblock);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 331	}
 332
 333	if (got->br_state == XFS_EXT_UNWRITTEN &&
 334	    (bmv->bmv_iflags & BMV_IF_PREALLOC))
 335		p->bmv_oflags |= BMV_OF_PREALLOC;
 336
 337	if (shared)
 338		p->bmv_oflags |= BMV_OF_SHARED;
 339
 340	p->bmv_offset = XFS_FSB_TO_BB(ip->i_mount, got->br_startoff);
 341	p->bmv_length = XFS_FSB_TO_BB(ip->i_mount, got->br_blockcount);
 342
 343	bmv->bmv_offset = p->bmv_offset + p->bmv_length;
 344	bmv->bmv_length = max(0LL, bmv_end - bmv->bmv_offset);
 345	bmv->bmv_entries++;
 346	return 0;
 347}
 348
 349static void
 350xfs_getbmap_report_hole(
 351	struct xfs_inode	*ip,
 352	struct getbmapx		*bmv,
 353	struct kgetbmap		*out,
 354	int64_t			bmv_end,
 355	xfs_fileoff_t		bno,
 356	xfs_fileoff_t		end)
 357{
 358	struct kgetbmap		*p = out + bmv->bmv_entries;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 359
 360	if (bmv->bmv_iflags & BMV_IF_NO_HOLES)
 361		return;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 362
 363	p->bmv_block = -1;
 364	p->bmv_offset = XFS_FSB_TO_BB(ip->i_mount, bno);
 365	p->bmv_length = XFS_FSB_TO_BB(ip->i_mount, end - bno);
 366
 367	bmv->bmv_offset = p->bmv_offset + p->bmv_length;
 368	bmv->bmv_length = max(0LL, bmv_end - bmv->bmv_offset);
 369	bmv->bmv_entries++;
 370}
 371
 372static inline bool
 373xfs_getbmap_full(
 374	struct getbmapx		*bmv)
 375{
 376	return bmv->bmv_length == 0 || bmv->bmv_entries >= bmv->bmv_count - 1;
 377}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 378
 379static bool
 380xfs_getbmap_next_rec(
 381	struct xfs_bmbt_irec	*rec,
 382	xfs_fileoff_t		total_end)
 383{
 384	xfs_fileoff_t		end = rec->br_startoff + rec->br_blockcount;
 385
 386	if (end == total_end)
 387		return false;
 388
 389	rec->br_startoff += rec->br_blockcount;
 390	if (!isnullstartblock(rec->br_startblock) &&
 391	    rec->br_startblock != DELAYSTARTBLOCK)
 392		rec->br_startblock += rec->br_blockcount;
 393	rec->br_blockcount = total_end - end;
 394	return true;
 395}
 396
 397/*
 398 * Get inode's extents as described in bmv, and format for output.
 399 * Calls formatter to fill the user's buffer until all extents
 400 * are mapped, until the passed-in bmv->bmv_count slots have
 401 * been filled, or until the formatter short-circuits the loop,
 402 * if it is tracking filled-in extents on its own.
 403 */
 404int						/* error code */
 405xfs_getbmap(
 406	struct xfs_inode	*ip,
 407	struct getbmapx		*bmv,		/* user bmap structure */
 408	struct kgetbmap		*out)
 
 409{
 410	struct xfs_mount	*mp = ip->i_mount;
 411	int			iflags = bmv->bmv_iflags;
 412	int			whichfork, lock, error = 0;
 413	int64_t			bmv_end, max_len;
 414	xfs_fileoff_t		bno, first_bno;
 415	struct xfs_ifork	*ifp;
 416	struct xfs_bmbt_irec	got, rec;
 417	xfs_filblks_t		len;
 418	struct xfs_iext_cursor	icur;
 419
 420	if (bmv->bmv_iflags & ~BMV_IF_VALID)
 421		return -EINVAL;
 422#ifndef DEBUG
 423	/* Only allow CoW fork queries if we're debugging. */
 424	if (iflags & BMV_IF_COWFORK)
 425		return -EINVAL;
 426#endif
 427	if ((iflags & BMV_IF_ATTRFORK) && (iflags & BMV_IF_COWFORK))
 428		return -EINVAL;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 429
 430	if (bmv->bmv_length < -1)
 431		return -EINVAL;
 432	bmv->bmv_entries = 0;
 433	if (bmv->bmv_length == 0)
 
 
 434		return 0;
 
 
 
 435
 436	if (iflags & BMV_IF_ATTRFORK)
 437		whichfork = XFS_ATTR_FORK;
 438	else if (iflags & BMV_IF_COWFORK)
 439		whichfork = XFS_COW_FORK;
 440	else
 441		whichfork = XFS_DATA_FORK;
 442
 443	xfs_ilock(ip, XFS_IOLOCK_SHARED);
 444	switch (whichfork) {
 445	case XFS_ATTR_FORK:
 446		lock = xfs_ilock_attr_map_shared(ip);
 447		if (!xfs_inode_has_attr_fork(ip))
 448			goto out_unlock_ilock;
 449
 450		max_len = 1LL << 32;
 451		break;
 452	case XFS_COW_FORK:
 453		lock = XFS_ILOCK_SHARED;
 454		xfs_ilock(ip, lock);
 455
 456		/* No CoW fork? Just return */
 457		if (!xfs_ifork_ptr(ip, whichfork))
 458			goto out_unlock_ilock;
 459
 460		if (xfs_get_cowextsz_hint(ip))
 461			max_len = mp->m_super->s_maxbytes;
 462		else
 463			max_len = XFS_ISIZE(ip);
 464		break;
 465	case XFS_DATA_FORK:
 466		if (!(iflags & BMV_IF_DELALLOC) &&
 467		    (ip->i_delayed_blks || XFS_ISIZE(ip) > ip->i_disk_size)) {
 468			error = filemap_write_and_wait(VFS_I(ip)->i_mapping);
 469			if (error)
 470				goto out_unlock_iolock;
 471
 472			/*
 473			 * Even after flushing the inode, there can still be
 474			 * delalloc blocks on the inode beyond EOF due to
 475			 * speculative preallocation.  These are not removed
 476			 * until the release function is called or the inode
 477			 * is inactivated.  Hence we cannot assert here that
 478			 * ip->i_delayed_blks == 0.
 479			 */
 480		}
 481
 482		if (xfs_get_extsz_hint(ip) ||
 483		    (ip->i_diflags &
 484		     (XFS_DIFLAG_PREALLOC | XFS_DIFLAG_APPEND)))
 485			max_len = mp->m_super->s_maxbytes;
 486		else
 487			max_len = XFS_ISIZE(ip);
 488
 489		lock = xfs_ilock_data_map_shared(ip);
 490		break;
 
 491	}
 492
 493	ifp = xfs_ifork_ptr(ip, whichfork);
 
 
 
 
 
 
 
 
 
 494
 495	switch (ifp->if_format) {
 496	case XFS_DINODE_FMT_EXTENTS:
 497	case XFS_DINODE_FMT_BTREE:
 498		break;
 499	case XFS_DINODE_FMT_LOCAL:
 500		/* Local format inode forks report no extents. */
 501		goto out_unlock_ilock;
 502	default:
 503		error = -EINVAL;
 504		goto out_unlock_ilock;
 
 
 
 
 
 
 
 505	}
 506
 507	if (bmv->bmv_length == -1) {
 508		max_len = XFS_FSB_TO_BB(mp, XFS_B_TO_FSB(mp, max_len));
 509		bmv->bmv_length = max(0LL, max_len - bmv->bmv_offset);
 510	}
 511
 512	bmv_end = bmv->bmv_offset + bmv->bmv_length;
 
 
 
 
 
 
 
 513
 514	first_bno = bno = XFS_BB_TO_FSBT(mp, bmv->bmv_offset);
 515	len = XFS_BB_TO_FSB(mp, bmv->bmv_length);
 
 
 
 
 
 
 
 
 
 
 516
 517	error = xfs_iread_extents(NULL, ip, whichfork);
 518	if (error)
 519		goto out_unlock_ilock;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 520
 521	if (!xfs_iext_lookup_extent(ip, ifp, bno, &icur, &got)) {
 522		/*
 523		 * Report a whole-file hole if the delalloc flag is set to
 524		 * stay compatible with the old implementation.
 525		 */
 526		if (iflags & BMV_IF_DELALLOC)
 527			xfs_getbmap_report_hole(ip, bmv, out, bmv_end, bno,
 528					XFS_B_TO_FSB(mp, XFS_ISIZE(ip)));
 529		goto out_unlock_ilock;
 530	}
 531
 532	while (!xfs_getbmap_full(bmv)) {
 533		xfs_trim_extent(&got, first_bno, len);
 
 
 
 
 
 
 
 
 534
 535		/*
 536		 * Report an entry for a hole if this extent doesn't directly
 537		 * follow the previous one.
 538		 */
 539		if (got.br_startoff > bno) {
 540			xfs_getbmap_report_hole(ip, bmv, out, bmv_end, bno,
 541					got.br_startoff);
 542			if (xfs_getbmap_full(bmv))
 543				break;
 544		}
 
 
 
 
 
 
 
 
 545
 546		/*
 547		 * In order to report shared extents accurately, we report each
 548		 * distinct shared / unshared part of a single bmbt record with
 549		 * an individual getbmapx record.
 550		 */
 551		bno = got.br_startoff + got.br_blockcount;
 552		rec = got;
 553		do {
 554			error = xfs_getbmap_report_one(ip, bmv, out, bmv_end,
 555					&rec);
 556			if (error || xfs_getbmap_full(bmv))
 557				goto out_unlock_ilock;
 558		} while (xfs_getbmap_next_rec(&rec, bno));
 559
 560		if (!xfs_iext_next_extent(ifp, &icur, &got)) {
 561			xfs_fileoff_t	end = XFS_B_TO_FSB(mp, XFS_ISIZE(ip));
 562
 563			out[bmv->bmv_entries - 1].bmv_oflags |= BMV_OF_LAST;
 564
 565			if (whichfork != XFS_ATTR_FORK && bno < end &&
 566			    !xfs_getbmap_full(bmv)) {
 567				xfs_getbmap_report_hole(ip, bmv, out, bmv_end,
 568						bno, end);
 569			}
 570			break;
 571		}
 572
 573		if (bno >= first_bno + len)
 
 
 574			break;
 575	}
 576
 577out_unlock_ilock:
 578	xfs_iunlock(ip, lock);
 579out_unlock_iolock:
 580	xfs_iunlock(ip, XFS_IOLOCK_SHARED);
 581	return error;
 582}
 583
 584/*
 585 * Dead simple method of punching delalyed allocation blocks from a range in
 586 * the inode.  This will always punch out both the start and end blocks, even
 587 * if the ranges only partially overlap them, so it is up to the caller to
 588 * ensure that partial blocks are not passed in.
 
 
 589 */
 590int
 591xfs_bmap_punch_delalloc_range(
 592	struct xfs_inode	*ip,
 593	xfs_off_t		start_byte,
 594	xfs_off_t		end_byte)
 595{
 596	struct xfs_mount	*mp = ip->i_mount;
 597	struct xfs_ifork	*ifp = &ip->i_df;
 598	xfs_fileoff_t		start_fsb = XFS_B_TO_FSBT(mp, start_byte);
 599	xfs_fileoff_t		end_fsb = XFS_B_TO_FSB(mp, end_byte);
 600	struct xfs_bmbt_irec	got, del;
 601	struct xfs_iext_cursor	icur;
 602	int			error = 0;
 603
 604	ASSERT(!xfs_need_iread_extents(ifp));
 605
 606	xfs_ilock(ip, XFS_ILOCK_EXCL);
 607	if (!xfs_iext_lookup_extent_before(ip, ifp, &end_fsb, &icur, &got))
 608		goto out_unlock;
 
 
 
 609
 610	while (got.br_startoff + got.br_blockcount > start_fsb) {
 611		del = got;
 612		xfs_trim_extent(&del, start_fsb, end_fsb - start_fsb);
 
 
 
 
 
 613
 614		/*
 615		 * A delete can push the cursor forward. Step back to the
 616		 * previous extent on non-delalloc or extents outside the
 617		 * target range.
 618		 */
 619		if (!del.br_blockcount ||
 620		    !isnullstartblock(del.br_startblock)) {
 621			if (!xfs_iext_prev_extent(ifp, &icur, &got))
 622				break;
 623			continue;
 
 
 
 
 
 
 624		}
 
 625
 626		error = xfs_bmap_del_extent_delay(ip, XFS_DATA_FORK, &icur,
 627						  &got, &del);
 628		if (error || !xfs_iext_get_extent(ifp, &icur, &got))
 
 
 
 
 
 
 
 629			break;
 630	}
 631
 632out_unlock:
 633	xfs_iunlock(ip, XFS_ILOCK_EXCL);
 
 
 
 
 634	return error;
 635}
 636
 637/*
 638 * Test whether it is appropriate to check an inode for and free post EOF
 639 * blocks. The 'force' parameter determines whether we should also consider
 640 * regular files that are marked preallocated or append-only.
 641 */
 642bool
 643xfs_can_free_eofblocks(
 644	struct xfs_inode	*ip,
 645	bool			force)
 646{
 647	struct xfs_bmbt_irec	imap;
 648	struct xfs_mount	*mp = ip->i_mount;
 649	xfs_fileoff_t		end_fsb;
 650	xfs_fileoff_t		last_fsb;
 651	int			nimaps = 1;
 652	int			error;
 653
 654	/*
 655	 * Caller must either hold the exclusive io lock; or be inactivating
 656	 * the inode, which guarantees there are no other users of the inode.
 657	 */
 658	ASSERT(xfs_isilocked(ip, XFS_IOLOCK_EXCL) ||
 659	       (VFS_I(ip)->i_state & I_FREEING));
 660
 661	/* prealloc/delalloc exists only on regular files */
 662	if (!S_ISREG(VFS_I(ip)->i_mode))
 663		return false;
 664
 665	/*
 666	 * Zero sized files with no cached pages and delalloc blocks will not
 667	 * have speculative prealloc/delalloc blocks to remove.
 668	 */
 669	if (VFS_I(ip)->i_size == 0 &&
 670	    VFS_I(ip)->i_mapping->nrpages == 0 &&
 671	    ip->i_delayed_blks == 0)
 672		return false;
 673
 674	/* If we haven't read in the extent list, then don't do it now. */
 675	if (xfs_need_iread_extents(&ip->i_df))
 676		return false;
 677
 678	/*
 679	 * Do not free real preallocated or append-only files unless the file
 680	 * has delalloc blocks and we are forced to remove them.
 681	 */
 682	if (ip->i_diflags & (XFS_DIFLAG_PREALLOC | XFS_DIFLAG_APPEND))
 683		if (!force || ip->i_delayed_blks == 0)
 684			return false;
 685
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 686	/*
 687	 * Do not try to free post-EOF blocks if EOF is beyond the end of the
 688	 * range supported by the page cache, because the truncation will loop
 689	 * forever.
 690	 */
 691	end_fsb = XFS_B_TO_FSB(mp, (xfs_ufsize_t)XFS_ISIZE(ip));
 692	if (XFS_IS_REALTIME_INODE(ip) && mp->m_sb.sb_rextsize > 1)
 693		end_fsb = roundup_64(end_fsb, mp->m_sb.sb_rextsize);
 694	last_fsb = XFS_B_TO_FSB(mp, mp->m_super->s_maxbytes);
 695	if (last_fsb <= end_fsb)
 696		return false;
 
 697
 698	/*
 699	 * Look up the mapping for the first block past EOF.  If we can't find
 700	 * it, there's nothing to free.
 701	 */
 702	xfs_ilock(ip, XFS_ILOCK_SHARED);
 703	error = xfs_bmapi_read(ip, end_fsb, last_fsb - end_fsb, &imap, &nimaps,
 704			0);
 705	xfs_iunlock(ip, XFS_ILOCK_SHARED);
 706	if (error || nimaps == 0)
 707		return false;
 708
 709	/*
 710	 * If there's a real mapping there or there are delayed allocation
 711	 * reservations, then we have post-EOF blocks to try to free.
 712	 */
 713	return imap.br_startblock != HOLESTARTBLOCK || ip->i_delayed_blks;
 714}
 
 
 
 715
 716/*
 717 * This is called to free any blocks beyond eof. The caller must hold
 718 * IOLOCK_EXCL unless we are in the inode reclaim path and have the only
 719 * reference to the inode.
 720 */
 721int
 722xfs_free_eofblocks(
 723	struct xfs_inode	*ip)
 724{
 725	struct xfs_trans	*tp;
 726	struct xfs_mount	*mp = ip->i_mount;
 727	int			error;
 728
 729	/* Attach the dquots to the inode up front. */
 730	error = xfs_qm_dqattach(ip);
 731	if (error)
 732		return error;
 
 
 733
 734	/* Wait on dio to ensure i_size has settled. */
 735	inode_dio_wait(VFS_I(ip));
 
 
 
 
 
 
 736
 737	error = xfs_trans_alloc(mp, &M_RES(mp)->tr_itruncate, 0, 0, 0, &tp);
 738	if (error) {
 739		ASSERT(xfs_is_shutdown(mp));
 740		return error;
 741	}
 742
 743	xfs_ilock(ip, XFS_ILOCK_EXCL);
 744	xfs_trans_ijoin(tp, ip, 0);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 745
 746	/*
 747	 * Do not update the on-disk file size.  If we update the on-disk file
 748	 * size and then the system crashes before the contents of the file are
 749	 * flushed to disk then the files may be full of holes (ie NULL files
 750	 * bug).
 751	 */
 752	error = xfs_itruncate_extents_flags(&tp, ip, XFS_DATA_FORK,
 753				XFS_ISIZE(ip), XFS_BMAPI_NODISCARD);
 754	if (error)
 755		goto err_cancel;
 756
 757	error = xfs_trans_commit(tp);
 758	if (error)
 759		goto out_unlock;
 760
 761	xfs_inode_clear_eofblocks_tag(ip);
 762	goto out_unlock;
 763
 764err_cancel:
 765	/*
 766	 * If we get an error at this point we simply don't
 767	 * bother truncating the file.
 768	 */
 769	xfs_trans_cancel(tp);
 770out_unlock:
 771	xfs_iunlock(ip, XFS_ILOCK_EXCL);
 772	return error;
 773}
 774
 775int
 776xfs_alloc_file_space(
 777	struct xfs_inode	*ip,
 778	xfs_off_t		offset,
 779	xfs_off_t		len)
 
 780{
 781	xfs_mount_t		*mp = ip->i_mount;
 782	xfs_off_t		count;
 783	xfs_filblks_t		allocated_fsb;
 784	xfs_filblks_t		allocatesize_fsb;
 785	xfs_extlen_t		extsz, temp;
 786	xfs_fileoff_t		startoffset_fsb;
 787	xfs_fileoff_t		endoffset_fsb;
 788	int			nimaps;
 
 789	int			rt;
 790	xfs_trans_t		*tp;
 791	xfs_bmbt_irec_t		imaps[1], *imapp;
 
 
 
 792	int			error;
 793
 794	trace_xfs_alloc_file_space(ip);
 795
 796	if (xfs_is_shutdown(mp))
 797		return -EIO;
 798
 799	error = xfs_qm_dqattach(ip);
 800	if (error)
 801		return error;
 802
 803	if (len <= 0)
 804		return -EINVAL;
 805
 806	rt = XFS_IS_REALTIME_INODE(ip);
 807	extsz = xfs_get_extsz_hint(ip);
 808
 809	count = len;
 810	imapp = &imaps[0];
 811	nimaps = 1;
 812	startoffset_fsb	= XFS_B_TO_FSBT(mp, offset);
 813	endoffset_fsb = XFS_B_TO_FSB(mp, offset + count);
 814	allocatesize_fsb = endoffset_fsb - startoffset_fsb;
 815
 816	/*
 817	 * Allocate file space until done or until there is an error
 818	 */
 819	while (allocatesize_fsb && !error) {
 820		xfs_fileoff_t	s, e;
 821		unsigned int	dblocks, rblocks, resblks;
 822
 823		/*
 824		 * Determine space reservations for data/realtime.
 825		 */
 826		if (unlikely(extsz)) {
 827			s = startoffset_fsb;
 828			do_div(s, extsz);
 829			s *= extsz;
 830			e = startoffset_fsb + allocatesize_fsb;
 831			div_u64_rem(startoffset_fsb, extsz, &temp);
 832			if (temp)
 833				e += temp;
 834			div_u64_rem(e, extsz, &temp);
 835			if (temp)
 836				e += extsz - temp;
 837		} else {
 838			s = 0;
 839			e = allocatesize_fsb;
 840		}
 841
 842		/*
 843		 * The transaction reservation is limited to a 32-bit block
 844		 * count, hence we need to limit the number of blocks we are
 845		 * trying to reserve to avoid an overflow. We can't allocate
 846		 * more than @nimaps extents, and an extent is limited on disk
 847		 * to XFS_BMBT_MAX_EXTLEN (21 bits), so use that to enforce the
 848		 * limit.
 849		 */
 850		resblks = min_t(xfs_fileoff_t, (e - s),
 851				(XFS_MAX_BMBT_EXTLEN * nimaps));
 852		if (unlikely(rt)) {
 853			dblocks = XFS_DIOSTRAT_SPACE_RES(mp, 0);
 854			rblocks = resblks;
 
 
 855		} else {
 856			dblocks = XFS_DIOSTRAT_SPACE_RES(mp, resblks);
 857			rblocks = 0;
 
 858		}
 859
 860		error = xfs_trans_alloc_inode(ip, &M_RES(mp)->tr_write,
 861				dblocks, rblocks, false, &tp);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 862		if (error)
 863			break;
 864
 865		error = xfs_iext_count_may_overflow(ip, XFS_DATA_FORK,
 866				XFS_IEXT_ADD_NOSPLIT_CNT);
 867		if (error == -EFBIG)
 868			error = xfs_iext_count_upgrade(tp, ip,
 869					XFS_IEXT_ADD_NOSPLIT_CNT);
 870		if (error)
 871			goto error;
 872
 
 873		error = xfs_bmapi_write(tp, ip, startoffset_fsb,
 874				allocatesize_fsb, XFS_BMAPI_PREALLOC, 0, imapp,
 875				&nimaps);
 876		if (error)
 877			goto error;
 
 878
 879		ip->i_diflags |= XFS_DIFLAG_PREALLOC;
 880		xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
 
 
 
 
 
 881
 882		error = xfs_trans_commit(tp);
 883		xfs_iunlock(ip, XFS_ILOCK_EXCL);
 884		if (error)
 885			break;
 
 886
 887		allocated_fsb = imapp->br_blockcount;
 888
 889		if (nimaps == 0) {
 890			error = -ENOSPC;
 891			break;
 892		}
 893
 894		startoffset_fsb += allocated_fsb;
 895		allocatesize_fsb -= allocated_fsb;
 896	}
 897
 898	return error;
 899
 900error:
 901	xfs_trans_cancel(tp);
 
 
 
 
 902	xfs_iunlock(ip, XFS_ILOCK_EXCL);
 903	return error;
 904}
 905
 906static int
 907xfs_unmap_extent(
 908	struct xfs_inode	*ip,
 909	xfs_fileoff_t		startoffset_fsb,
 910	xfs_filblks_t		len_fsb,
 911	int			*done)
 912{
 913	struct xfs_mount	*mp = ip->i_mount;
 914	struct xfs_trans	*tp;
 915	uint			resblks = XFS_DIOSTRAT_SPACE_RES(mp, 0);
 916	int			error;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 917
 918	error = xfs_trans_alloc_inode(ip, &M_RES(mp)->tr_write, resblks, 0,
 919			false, &tp);
 920	if (error)
 921		return error;
 
 922
 923	error = xfs_iext_count_may_overflow(ip, XFS_DATA_FORK,
 924			XFS_IEXT_PUNCH_HOLE_CNT);
 925	if (error == -EFBIG)
 926		error = xfs_iext_count_upgrade(tp, ip, XFS_IEXT_PUNCH_HOLE_CNT);
 927	if (error)
 928		goto out_trans_cancel;
 929
 930	error = xfs_bunmapi(tp, ip, startoffset_fsb, len_fsb, 0, 2, done);
 931	if (error)
 932		goto out_trans_cancel;
 933
 934	error = xfs_trans_commit(tp);
 935out_unlock:
 936	xfs_iunlock(ip, XFS_ILOCK_EXCL);
 937	return error;
 938
 939out_trans_cancel:
 940	xfs_trans_cancel(tp);
 941	goto out_unlock;
 942}
 943
 944/* Caller must first wait for the completion of any pending DIOs if required. */
 945int
 946xfs_flush_unmap_range(
 947	struct xfs_inode	*ip,
 948	xfs_off_t		offset,
 949	xfs_off_t		len)
 950{
 951	struct xfs_mount	*mp = ip->i_mount;
 952	struct inode		*inode = VFS_I(ip);
 953	xfs_off_t		rounding, start, end;
 954	int			error;
 
 
 
 
 
 955
 956	rounding = max_t(xfs_off_t, mp->m_sb.sb_blocksize, PAGE_SIZE);
 957	start = round_down(offset, rounding);
 958	end = round_up(offset + len, rounding) - 1;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 959
 960	error = filemap_write_and_wait_range(inode->i_mapping, start, end);
 961	if (error)
 962		return error;
 963	truncate_pagecache_range(inode, start, end);
 964	return 0;
 
 
 
 
 
 
 
 
 
 965}
 966
 967int
 968xfs_free_file_space(
 969	struct xfs_inode	*ip,
 970	xfs_off_t		offset,
 971	xfs_off_t		len)
 972{
 973	struct xfs_mount	*mp = ip->i_mount;
 
 
 
 
 
 
 
 
 
 
 
 
 
 974	xfs_fileoff_t		startoffset_fsb;
 975	xfs_fileoff_t		endoffset_fsb;
 976	int			done = 0, error;
 
 977
 978	trace_xfs_free_file_space(ip);
 979
 980	error = xfs_qm_dqattach(ip);
 981	if (error)
 982		return error;
 983
 
 984	if (len <= 0)	/* if nothing being freed */
 985		return 0;
 
 
 
 986
 987	startoffset_fsb = XFS_B_TO_FSB(mp, offset);
 988	endoffset_fsb = XFS_B_TO_FSBT(mp, offset + len);
 989
 990	/* We can only free complete realtime extents. */
 991	if (XFS_IS_REALTIME_INODE(ip) && mp->m_sb.sb_rextsize > 1) {
 992		startoffset_fsb = roundup_64(startoffset_fsb,
 993					     mp->m_sb.sb_rextsize);
 994		endoffset_fsb = rounddown_64(endoffset_fsb,
 995					     mp->m_sb.sb_rextsize);
 996	}
 997
 998	/*
 999	 * Need to zero the stuff we're not freeing, on disk.
1000	 */
1001	if (endoffset_fsb > startoffset_fsb) {
1002		while (!done) {
1003			error = xfs_unmap_extent(ip, startoffset_fsb,
1004					endoffset_fsb - startoffset_fsb, &done);
1005			if (error)
1006				return error;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1007		}
1008	}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1009
1010	/*
1011	 * Now that we've unmap all full blocks we'll have to zero out any
1012	 * partial block at the beginning and/or end.  xfs_zero_range is smart
1013	 * enough to skip any holes, including those we just created, but we
1014	 * must take care not to zero beyond EOF and enlarge i_size.
1015	 */
1016	if (offset >= XFS_ISIZE(ip))
1017		return 0;
1018	if (offset + len > XFS_ISIZE(ip))
1019		len = XFS_ISIZE(ip) - offset;
1020	error = xfs_zero_range(ip, offset, len, NULL);
1021	if (error)
1022		return error;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1023
1024	/*
1025	 * If we zeroed right up to EOF and EOF straddles a page boundary we
1026	 * must make sure that the post-EOF area is also zeroed because the
1027	 * page could be mmap'd and xfs_zero_range doesn't do that for us.
1028	 * Writeback of the eof page will do this, albeit clumsily.
1029	 */
1030	if (offset + len >= XFS_ISIZE(ip) && offset_in_page(offset + len) > 0) {
1031		error = filemap_write_and_wait_range(VFS_I(ip)->i_mapping,
1032				round_down(offset + len, PAGE_SIZE), LLONG_MAX);
1033	}
1034
 
1035	return error;
 
 
 
 
 
 
 
1036}
1037
1038static int
1039xfs_prepare_shift(
 
1040	struct xfs_inode	*ip,
1041	loff_t			offset)
 
1042{
1043	struct xfs_mount	*mp = ip->i_mount;
 
 
 
1044	int			error;
1045
 
 
 
 
1046	/*
1047	 * Trim eofblocks to avoid shifting uninitialized post-eof preallocation
1048	 * into the accessible region of the file.
 
1049	 */
1050	if (xfs_can_free_eofblocks(ip, true)) {
1051		error = xfs_free_eofblocks(ip);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1052		if (error)
1053			return error;
1054	}
1055
1056	/*
1057	 * Shift operations must stabilize the start block offset boundary along
1058	 * with the full range of the operation. If we don't, a COW writeback
1059	 * completion could race with an insert, front merge with the start
1060	 * extent (after split) during the shift and corrupt the file. Start
1061	 * with the block just prior to the start to stabilize the boundary.
1062	 */
1063	offset = round_down(offset, mp->m_sb.sb_blocksize);
1064	if (offset)
1065		offset -= mp->m_sb.sb_blocksize;
1066
1067	/*
1068	 * Writeback and invalidate cache for the remainder of the file as we're
1069	 * about to shift down every extent from offset to EOF.
1070	 */
1071	error = xfs_flush_unmap_range(ip, offset, XFS_ISIZE(ip));
1072	if (error)
1073		return error;
1074
1075	/*
1076	 * Clean out anything hanging around in the cow fork now that
1077	 * we've flushed all the dirty data out to disk to avoid having
1078	 * CoW extents at the wrong offsets.
1079	 */
1080	if (xfs_inode_has_cow_data(ip)) {
1081		error = xfs_reflink_cancel_cow_range(ip, offset, NULLFILEOFF,
1082				true);
1083		if (error)
1084			return error;
1085	}
1086
1087	return 0;
 
 
1088}
1089
1090/*
1091 * xfs_collapse_file_space()
1092 *	This routine frees disk space and shift extent for the given file.
1093 *	The first thing we do is to free data blocks in the specified range
1094 *	by calling xfs_free_file_space(). It would also sync dirty data
1095 *	and invalidate page cache over the region on which collapse range
1096 *	is working. And Shift extent records to the left to cover a hole.
1097 * RETURNS:
1098 *	0 on success
1099 *	errno on error
1100 *
1101 */
1102int
1103xfs_collapse_file_space(
1104	struct xfs_inode	*ip,
1105	xfs_off_t		offset,
1106	xfs_off_t		len)
1107{
 
1108	struct xfs_mount	*mp = ip->i_mount;
1109	struct xfs_trans	*tp;
1110	int			error;
1111	xfs_fileoff_t		next_fsb = XFS_B_TO_FSB(mp, offset + len);
1112	xfs_fileoff_t		shift_fsb = XFS_B_TO_FSB(mp, len);
1113	bool			done = false;
 
 
 
1114
1115	ASSERT(xfs_isilocked(ip, XFS_IOLOCK_EXCL));
1116	ASSERT(xfs_isilocked(ip, XFS_MMAPLOCK_EXCL));
1117
1118	trace_xfs_collapse_file_space(ip);
1119
 
 
 
1120	error = xfs_free_file_space(ip, offset, len);
1121	if (error)
1122		return error;
1123
1124	error = xfs_prepare_shift(ip, offset);
1125	if (error)
1126		return error;
1127
1128	error = xfs_trans_alloc(mp, &M_RES(mp)->tr_write, 0, 0, 0, &tp);
1129	if (error)
1130		return error;
1131
1132	xfs_ilock(ip, XFS_ILOCK_EXCL);
1133	xfs_trans_ijoin(tp, ip, 0);
1134
1135	while (!done) {
1136		error = xfs_bmap_collapse_extents(tp, ip, &next_fsb, shift_fsb,
1137				&done);
1138		if (error)
1139			goto out_trans_cancel;
1140		if (done)
1141			break;
 
1142
1143		/* finish any deferred frees and roll the transaction */
1144		error = xfs_defer_finish(&tp);
 
 
 
1145		if (error)
1146			goto out_trans_cancel;
1147	}
1148
1149	error = xfs_trans_commit(tp);
1150	xfs_iunlock(ip, XFS_ILOCK_EXCL);
1151	return error;
1152
1153out_trans_cancel:
1154	xfs_trans_cancel(tp);
1155	xfs_iunlock(ip, XFS_ILOCK_EXCL);
1156	return error;
1157}
1158
1159/*
1160 * xfs_insert_file_space()
1161 *	This routine create hole space by shifting extents for the given file.
1162 *	The first thing we do is to sync dirty data and invalidate page cache
1163 *	over the region on which insert range is working. And split an extent
1164 *	to two extents at given offset by calling xfs_bmap_split_extent.
1165 *	And shift all extent records which are laying between [offset,
1166 *	last allocated extent] to the right to reserve hole range.
1167 * RETURNS:
1168 *	0 on success
1169 *	errno on error
1170 */
1171int
1172xfs_insert_file_space(
1173	struct xfs_inode	*ip,
1174	loff_t			offset,
1175	loff_t			len)
1176{
1177	struct xfs_mount	*mp = ip->i_mount;
1178	struct xfs_trans	*tp;
1179	int			error;
1180	xfs_fileoff_t		stop_fsb = XFS_B_TO_FSB(mp, offset);
1181	xfs_fileoff_t		next_fsb = NULLFSBLOCK;
1182	xfs_fileoff_t		shift_fsb = XFS_B_TO_FSB(mp, len);
1183	bool			done = false;
1184
1185	ASSERT(xfs_isilocked(ip, XFS_IOLOCK_EXCL));
1186	ASSERT(xfs_isilocked(ip, XFS_MMAPLOCK_EXCL));
1187
1188	trace_xfs_insert_file_space(ip);
1189
1190	error = xfs_bmap_can_insert_extents(ip, stop_fsb, shift_fsb);
1191	if (error)
1192		return error;
1193
1194	error = xfs_prepare_shift(ip, offset);
1195	if (error)
1196		return error;
1197
1198	error = xfs_trans_alloc(mp, &M_RES(mp)->tr_write,
1199			XFS_DIOSTRAT_SPACE_RES(mp, 0), 0, 0, &tp);
1200	if (error)
1201		return error;
1202
1203	xfs_ilock(ip, XFS_ILOCK_EXCL);
1204	xfs_trans_ijoin(tp, ip, 0);
1205
1206	error = xfs_iext_count_may_overflow(ip, XFS_DATA_FORK,
1207			XFS_IEXT_PUNCH_HOLE_CNT);
1208	if (error == -EFBIG)
1209		error = xfs_iext_count_upgrade(tp, ip, XFS_IEXT_PUNCH_HOLE_CNT);
1210	if (error)
1211		goto out_trans_cancel;
1212
1213	/*
1214	 * The extent shifting code works on extent granularity. So, if stop_fsb
1215	 * is not the starting block of extent, we need to split the extent at
1216	 * stop_fsb.
1217	 */
1218	error = xfs_bmap_split_extent(tp, ip, stop_fsb);
1219	if (error)
1220		goto out_trans_cancel;
1221
1222	do {
1223		error = xfs_defer_finish(&tp);
1224		if (error)
1225			goto out_trans_cancel;
1226
1227		error = xfs_bmap_insert_extents(tp, ip, &next_fsb, shift_fsb,
1228				&done, stop_fsb);
1229		if (error)
1230			goto out_trans_cancel;
1231	} while (!done);
1232
1233	error = xfs_trans_commit(tp);
1234	xfs_iunlock(ip, XFS_ILOCK_EXCL);
1235	return error;
1236
1237out_trans_cancel:
1238	xfs_trans_cancel(tp);
1239	xfs_iunlock(ip, XFS_ILOCK_EXCL);
1240	return error;
1241}
1242
1243/*
1244 * We need to check that the format of the data fork in the temporary inode is
1245 * valid for the target inode before doing the swap. This is not a problem with
1246 * attr1 because of the fixed fork offset, but attr2 has a dynamically sized
1247 * data fork depending on the space the attribute fork is taking so we can get
1248 * invalid formats on the target inode.
1249 *
1250 * E.g. target has space for 7 extents in extent format, temp inode only has
1251 * space for 6.  If we defragment down to 7 extents, then the tmp format is a
1252 * btree, but when swapped it needs to be in extent format. Hence we can't just
1253 * blindly swap data forks on attr2 filesystems.
1254 *
1255 * Note that we check the swap in both directions so that we don't end up with
1256 * a corrupt temporary inode, either.
1257 *
1258 * Note that fixing the way xfs_fsr sets up the attribute fork in the source
1259 * inode will prevent this situation from occurring, so all we do here is
1260 * reject and log the attempt. basically we are putting the responsibility on
1261 * userspace to get this right.
1262 */
1263static int
1264xfs_swap_extents_check_format(
1265	struct xfs_inode	*ip,	/* target inode */
1266	struct xfs_inode	*tip)	/* tmp inode */
1267{
1268	struct xfs_ifork	*ifp = &ip->i_df;
1269	struct xfs_ifork	*tifp = &tip->i_df;
1270
1271	/* User/group/project quota ids must match if quotas are enforced. */
1272	if (XFS_IS_QUOTA_ON(ip->i_mount) &&
1273	    (!uid_eq(VFS_I(ip)->i_uid, VFS_I(tip)->i_uid) ||
1274	     !gid_eq(VFS_I(ip)->i_gid, VFS_I(tip)->i_gid) ||
1275	     ip->i_projid != tip->i_projid))
1276		return -EINVAL;
1277
1278	/* Should never get a local format */
1279	if (ifp->if_format == XFS_DINODE_FMT_LOCAL ||
1280	    tifp->if_format == XFS_DINODE_FMT_LOCAL)
1281		return -EINVAL;
1282
1283	/*
1284	 * if the target inode has less extents that then temporary inode then
1285	 * why did userspace call us?
1286	 */
1287	if (ifp->if_nextents < tifp->if_nextents)
1288		return -EINVAL;
1289
1290	/*
1291	 * If we have to use the (expensive) rmap swap method, we can
1292	 * handle any number of extents and any format.
1293	 */
1294	if (xfs_has_rmapbt(ip->i_mount))
1295		return 0;
1296
1297	/*
1298	 * if the target inode is in extent form and the temp inode is in btree
1299	 * form then we will end up with the target inode in the wrong format
1300	 * as we already know there are less extents in the temp inode.
1301	 */
1302	if (ifp->if_format == XFS_DINODE_FMT_EXTENTS &&
1303	    tifp->if_format == XFS_DINODE_FMT_BTREE)
1304		return -EINVAL;
1305
1306	/* Check temp in extent form to max in target */
1307	if (tifp->if_format == XFS_DINODE_FMT_EXTENTS &&
1308	    tifp->if_nextents > XFS_IFORK_MAXEXT(ip, XFS_DATA_FORK))
1309		return -EINVAL;
 
1310
1311	/* Check target in extent form to max in temp */
1312	if (ifp->if_format == XFS_DINODE_FMT_EXTENTS &&
1313	    ifp->if_nextents > XFS_IFORK_MAXEXT(tip, XFS_DATA_FORK))
1314		return -EINVAL;
 
1315
1316	/*
1317	 * If we are in a btree format, check that the temp root block will fit
1318	 * in the target and that it has enough extents to be in btree format
1319	 * in the target.
1320	 *
1321	 * Note that we have to be careful to allow btree->extent conversions
1322	 * (a common defrag case) which will occur when the temp inode is in
1323	 * extent format...
1324	 */
1325	if (tifp->if_format == XFS_DINODE_FMT_BTREE) {
1326		if (xfs_inode_has_attr_fork(ip) &&
1327		    XFS_BMAP_BMDR_SPACE(tifp->if_broot) > xfs_inode_fork_boff(ip))
1328			return -EINVAL;
1329		if (tifp->if_nextents <= XFS_IFORK_MAXEXT(ip, XFS_DATA_FORK))
1330			return -EINVAL;
 
1331	}
1332
1333	/* Reciprocal target->temp btree format checks */
1334	if (ifp->if_format == XFS_DINODE_FMT_BTREE) {
1335		if (xfs_inode_has_attr_fork(tip) &&
1336		    XFS_BMAP_BMDR_SPACE(ip->i_df.if_broot) > xfs_inode_fork_boff(tip))
1337			return -EINVAL;
1338		if (ifp->if_nextents <= XFS_IFORK_MAXEXT(tip, XFS_DATA_FORK))
1339			return -EINVAL;
 
1340	}
1341
1342	return 0;
1343}
1344
1345static int
1346xfs_swap_extent_flush(
1347	struct xfs_inode	*ip)
1348{
1349	int	error;
1350
1351	error = filemap_write_and_wait(VFS_I(ip)->i_mapping);
1352	if (error)
1353		return error;
1354	truncate_pagecache_range(VFS_I(ip), 0, -1);
1355
1356	/* Verify O_DIRECT for ftmp */
1357	if (VFS_I(ip)->i_mapping->nrpages)
1358		return -EINVAL;
1359	return 0;
1360}
1361
1362/*
1363 * Move extents from one file to another, when rmap is enabled.
1364 */
1365STATIC int
1366xfs_swap_extent_rmap(
1367	struct xfs_trans		**tpp,
1368	struct xfs_inode		*ip,
1369	struct xfs_inode		*tip)
1370{
1371	struct xfs_trans		*tp = *tpp;
1372	struct xfs_bmbt_irec		irec;
1373	struct xfs_bmbt_irec		uirec;
1374	struct xfs_bmbt_irec		tirec;
1375	xfs_fileoff_t			offset_fsb;
1376	xfs_fileoff_t			end_fsb;
1377	xfs_filblks_t			count_fsb;
1378	int				error;
1379	xfs_filblks_t			ilen;
1380	xfs_filblks_t			rlen;
1381	int				nimaps;
1382	uint64_t			tip_flags2;
1383
1384	/*
1385	 * If the source file has shared blocks, we must flag the donor
1386	 * file as having shared blocks so that we get the shared-block
1387	 * rmap functions when we go to fix up the rmaps.  The flags
1388	 * will be switch for reals later.
1389	 */
1390	tip_flags2 = tip->i_diflags2;
1391	if (ip->i_diflags2 & XFS_DIFLAG2_REFLINK)
1392		tip->i_diflags2 |= XFS_DIFLAG2_REFLINK;
1393
1394	offset_fsb = 0;
1395	end_fsb = XFS_B_TO_FSB(ip->i_mount, i_size_read(VFS_I(ip)));
1396	count_fsb = (xfs_filblks_t)(end_fsb - offset_fsb);
1397
1398	while (count_fsb) {
1399		/* Read extent from the donor file */
1400		nimaps = 1;
1401		error = xfs_bmapi_read(tip, offset_fsb, count_fsb, &tirec,
1402				&nimaps, 0);
1403		if (error)
1404			goto out;
1405		ASSERT(nimaps == 1);
1406		ASSERT(tirec.br_startblock != DELAYSTARTBLOCK);
1407
1408		trace_xfs_swap_extent_rmap_remap(tip, &tirec);
1409		ilen = tirec.br_blockcount;
1410
1411		/* Unmap the old blocks in the source file. */
1412		while (tirec.br_blockcount) {
1413			ASSERT(tp->t_firstblock == NULLFSBLOCK);
1414			trace_xfs_swap_extent_rmap_remap_piece(tip, &tirec);
1415
1416			/* Read extent from the source file */
1417			nimaps = 1;
1418			error = xfs_bmapi_read(ip, tirec.br_startoff,
1419					tirec.br_blockcount, &irec,
1420					&nimaps, 0);
1421			if (error)
1422				goto out;
1423			ASSERT(nimaps == 1);
1424			ASSERT(tirec.br_startoff == irec.br_startoff);
1425			trace_xfs_swap_extent_rmap_remap_piece(ip, &irec);
1426
1427			/* Trim the extent. */
1428			uirec = tirec;
1429			uirec.br_blockcount = rlen = min_t(xfs_filblks_t,
1430					tirec.br_blockcount,
1431					irec.br_blockcount);
1432			trace_xfs_swap_extent_rmap_remap_piece(tip, &uirec);
1433
1434			if (xfs_bmap_is_real_extent(&uirec)) {
1435				error = xfs_iext_count_may_overflow(ip,
1436						XFS_DATA_FORK,
1437						XFS_IEXT_SWAP_RMAP_CNT);
1438				if (error == -EFBIG)
1439					error = xfs_iext_count_upgrade(tp, ip,
1440							XFS_IEXT_SWAP_RMAP_CNT);
1441				if (error)
1442					goto out;
1443			}
1444
1445			if (xfs_bmap_is_real_extent(&irec)) {
1446				error = xfs_iext_count_may_overflow(tip,
1447						XFS_DATA_FORK,
1448						XFS_IEXT_SWAP_RMAP_CNT);
1449				if (error == -EFBIG)
1450					error = xfs_iext_count_upgrade(tp, ip,
1451							XFS_IEXT_SWAP_RMAP_CNT);
1452				if (error)
1453					goto out;
1454			}
1455
1456			/* Remove the mapping from the donor file. */
1457			xfs_bmap_unmap_extent(tp, tip, &uirec);
1458
1459			/* Remove the mapping from the source file. */
1460			xfs_bmap_unmap_extent(tp, ip, &irec);
1461
1462			/* Map the donor file's blocks into the source file. */
1463			xfs_bmap_map_extent(tp, ip, &uirec);
1464
1465			/* Map the source file's blocks into the donor file. */
1466			xfs_bmap_map_extent(tp, tip, &irec);
1467
1468			error = xfs_defer_finish(tpp);
1469			tp = *tpp;
1470			if (error)
1471				goto out;
1472
1473			tirec.br_startoff += rlen;
1474			if (tirec.br_startblock != HOLESTARTBLOCK &&
1475			    tirec.br_startblock != DELAYSTARTBLOCK)
1476				tirec.br_startblock += rlen;
1477			tirec.br_blockcount -= rlen;
1478		}
1479
1480		/* Roll on... */
1481		count_fsb -= ilen;
1482		offset_fsb += ilen;
1483	}
1484
1485	tip->i_diflags2 = tip_flags2;
1486	return 0;
1487
1488out:
1489	trace_xfs_swap_extent_rmap_error(ip, error, _RET_IP_);
1490	tip->i_diflags2 = tip_flags2;
1491	return error;
1492}
1493
1494/* Swap the extents of two files by swapping data forks. */
1495STATIC int
1496xfs_swap_extent_forks(
1497	struct xfs_trans	*tp,
1498	struct xfs_inode	*ip,
1499	struct xfs_inode	*tip,
1500	int			*src_log_flags,
1501	int			*target_log_flags)
1502{
1503	xfs_filblks_t		aforkblks = 0;
1504	xfs_filblks_t		taforkblks = 0;
1505	xfs_extnum_t		junk;
1506	uint64_t		tmp;
1507	int			error;
1508
1509	/*
1510	 * Count the number of extended attribute blocks
1511	 */
1512	if (xfs_inode_has_attr_fork(ip) && ip->i_af.if_nextents > 0 &&
1513	    ip->i_af.if_format != XFS_DINODE_FMT_LOCAL) {
1514		error = xfs_bmap_count_blocks(tp, ip, XFS_ATTR_FORK, &junk,
1515				&aforkblks);
1516		if (error)
1517			return error;
1518	}
1519	if (xfs_inode_has_attr_fork(tip) && tip->i_af.if_nextents > 0 &&
1520	    tip->i_af.if_format != XFS_DINODE_FMT_LOCAL) {
1521		error = xfs_bmap_count_blocks(tp, tip, XFS_ATTR_FORK, &junk,
1522				&taforkblks);
1523		if (error)
1524			return error;
1525	}
1526
1527	/*
1528	 * Btree format (v3) inodes have the inode number stamped in the bmbt
1529	 * block headers. We can't start changing the bmbt blocks until the
1530	 * inode owner change is logged so recovery does the right thing in the
1531	 * event of a crash. Set the owner change log flags now and leave the
1532	 * bmbt scan as the last step.
1533	 */
1534	if (xfs_has_v3inodes(ip->i_mount)) {
1535		if (ip->i_df.if_format == XFS_DINODE_FMT_BTREE)
1536			(*target_log_flags) |= XFS_ILOG_DOWNER;
1537		if (tip->i_df.if_format == XFS_DINODE_FMT_BTREE)
1538			(*src_log_flags) |= XFS_ILOG_DOWNER;
1539	}
1540
1541	/*
1542	 * Swap the data forks of the inodes
1543	 */
1544	swap(ip->i_df, tip->i_df);
1545
1546	/*
1547	 * Fix the on-disk inode values
1548	 */
1549	tmp = (uint64_t)ip->i_nblocks;
1550	ip->i_nblocks = tip->i_nblocks - taforkblks + aforkblks;
1551	tip->i_nblocks = tmp + taforkblks - aforkblks;
1552
1553	/*
1554	 * The extents in the source inode could still contain speculative
1555	 * preallocation beyond EOF (e.g. the file is open but not modified
1556	 * while defrag is in progress). In that case, we need to copy over the
1557	 * number of delalloc blocks the data fork in the source inode is
1558	 * tracking beyond EOF so that when the fork is truncated away when the
1559	 * temporary inode is unlinked we don't underrun the i_delayed_blks
1560	 * counter on that inode.
1561	 */
1562	ASSERT(tip->i_delayed_blks == 0);
1563	tip->i_delayed_blks = ip->i_delayed_blks;
1564	ip->i_delayed_blks = 0;
1565
1566	switch (ip->i_df.if_format) {
1567	case XFS_DINODE_FMT_EXTENTS:
1568		(*src_log_flags) |= XFS_ILOG_DEXT;
1569		break;
1570	case XFS_DINODE_FMT_BTREE:
1571		ASSERT(!xfs_has_v3inodes(ip->i_mount) ||
1572		       (*src_log_flags & XFS_ILOG_DOWNER));
1573		(*src_log_flags) |= XFS_ILOG_DBROOT;
1574		break;
1575	}
1576
1577	switch (tip->i_df.if_format) {
1578	case XFS_DINODE_FMT_EXTENTS:
1579		(*target_log_flags) |= XFS_ILOG_DEXT;
1580		break;
1581	case XFS_DINODE_FMT_BTREE:
1582		(*target_log_flags) |= XFS_ILOG_DBROOT;
1583		ASSERT(!xfs_has_v3inodes(ip->i_mount) ||
1584		       (*target_log_flags & XFS_ILOG_DOWNER));
1585		break;
1586	}
1587
1588	return 0;
1589}
1590
1591/*
1592 * Fix up the owners of the bmbt blocks to refer to the current inode. The
1593 * change owner scan attempts to order all modified buffers in the current
1594 * transaction. In the event of ordered buffer failure, the offending buffer is
1595 * physically logged as a fallback and the scan returns -EAGAIN. We must roll
1596 * the transaction in this case to replenish the fallback log reservation and
1597 * restart the scan. This process repeats until the scan completes.
1598 */
1599static int
1600xfs_swap_change_owner(
1601	struct xfs_trans	**tpp,
1602	struct xfs_inode	*ip,
1603	struct xfs_inode	*tmpip)
1604{
1605	int			error;
1606	struct xfs_trans	*tp = *tpp;
1607
1608	do {
1609		error = xfs_bmbt_change_owner(tp, ip, XFS_DATA_FORK, ip->i_ino,
1610					      NULL);
1611		/* success or fatal error */
1612		if (error != -EAGAIN)
1613			break;
1614
1615		error = xfs_trans_roll(tpp);
1616		if (error)
1617			break;
1618		tp = *tpp;
1619
1620		/*
1621		 * Redirty both inodes so they can relog and keep the log tail
1622		 * moving forward.
1623		 */
1624		xfs_trans_ijoin(tp, ip, 0);
1625		xfs_trans_ijoin(tp, tmpip, 0);
1626		xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
1627		xfs_trans_log_inode(tp, tmpip, XFS_ILOG_CORE);
1628	} while (true);
1629
1630	return error;
1631}
1632
1633int
1634xfs_swap_extents(
1635	struct xfs_inode	*ip,	/* target inode */
1636	struct xfs_inode	*tip,	/* tmp inode */
1637	struct xfs_swapext	*sxp)
1638{
1639	struct xfs_mount	*mp = ip->i_mount;
1640	struct xfs_trans	*tp;
1641	struct xfs_bstat	*sbp = &sxp->sx_stat;
1642	int			src_log_flags, target_log_flags;
1643	int			error = 0;
1644	uint64_t		f;
1645	int			resblks = 0;
1646	unsigned int		flags = 0;
1647
1648	/*
1649	 * Lock the inodes against other IO, page faults and truncate to
1650	 * begin with.  Then we can ensure the inodes are flushed and have no
1651	 * page cache safely. Once we have done this we can take the ilocks and
1652	 * do the rest of the checks.
1653	 */
1654	lock_two_nondirectories(VFS_I(ip), VFS_I(tip));
1655	filemap_invalidate_lock_two(VFS_I(ip)->i_mapping,
1656				    VFS_I(tip)->i_mapping);
1657
1658	/* Verify that both files have the same format */
1659	if ((VFS_I(ip)->i_mode & S_IFMT) != (VFS_I(tip)->i_mode & S_IFMT)) {
1660		error = -EINVAL;
1661		goto out_unlock;
1662	}
1663
1664	/* Verify both files are either real-time or non-realtime */
1665	if (XFS_IS_REALTIME_INODE(ip) != XFS_IS_REALTIME_INODE(tip)) {
1666		error = -EINVAL;
1667		goto out_unlock;
1668	}
1669
1670	error = xfs_qm_dqattach(ip);
1671	if (error)
1672		goto out_unlock;
 
1673
1674	error = xfs_qm_dqattach(tip);
1675	if (error)
1676		goto out_unlock;
1677
1678	error = xfs_swap_extent_flush(ip);
1679	if (error)
1680		goto out_unlock;
1681	error = xfs_swap_extent_flush(tip);
1682	if (error)
1683		goto out_unlock;
1684
1685	if (xfs_inode_has_cow_data(tip)) {
1686		error = xfs_reflink_cancel_cow_range(tip, 0, NULLFILEOFF, true);
1687		if (error)
1688			goto out_unlock;
1689	}
1690
1691	/*
1692	 * Extent "swapping" with rmap requires a permanent reservation and
1693	 * a block reservation because it's really just a remap operation
1694	 * performed with log redo items!
1695	 */
1696	if (xfs_has_rmapbt(mp)) {
1697		int		w = XFS_DATA_FORK;
1698		uint32_t	ipnext = ip->i_df.if_nextents;
1699		uint32_t	tipnext	= tip->i_df.if_nextents;
1700
1701		/*
1702		 * Conceptually this shouldn't affect the shape of either bmbt,
1703		 * but since we atomically move extents one by one, we reserve
1704		 * enough space to rebuild both trees.
1705		 */
1706		resblks = XFS_SWAP_RMAP_SPACE_RES(mp, ipnext, w);
1707		resblks +=  XFS_SWAP_RMAP_SPACE_RES(mp, tipnext, w);
1708
1709		/*
1710		 * If either inode straddles a bmapbt block allocation boundary,
1711		 * the rmapbt algorithm triggers repeated allocs and frees as
1712		 * extents are remapped. This can exhaust the block reservation
1713		 * prematurely and cause shutdown. Return freed blocks to the
1714		 * transaction reservation to counter this behavior.
1715		 */
1716		flags |= XFS_TRANS_RES_FDBLKS;
1717	}
1718	error = xfs_trans_alloc(mp, &M_RES(mp)->tr_write, resblks, 0, flags,
1719				&tp);
1720	if (error)
1721		goto out_unlock;
1722
1723	/*
1724	 * Lock and join the inodes to the tansaction so that transaction commit
1725	 * or cancel will unlock the inodes from this point onwards.
1726	 */
1727	xfs_lock_two_inodes(ip, XFS_ILOCK_EXCL, tip, XFS_ILOCK_EXCL);
1728	xfs_trans_ijoin(tp, ip, 0);
1729	xfs_trans_ijoin(tp, tip, 0);
1730
1731
1732	/* Verify all data are being swapped */
1733	if (sxp->sx_offset != 0 ||
1734	    sxp->sx_length != ip->i_disk_size ||
1735	    sxp->sx_length != tip->i_disk_size) {
1736		error = -EFAULT;
1737		goto out_trans_cancel;
1738	}
1739
1740	trace_xfs_swap_extent_before(ip, 0);
1741	trace_xfs_swap_extent_before(tip, 1);
1742
1743	/* check inode formats now that data is flushed */
1744	error = xfs_swap_extents_check_format(ip, tip);
1745	if (error) {
1746		xfs_notice(mp,
1747		    "%s: inode 0x%llx format is incompatible for exchanging.",
1748				__func__, ip->i_ino);
1749		goto out_trans_cancel;
1750	}
1751
1752	/*
1753	 * Compare the current change & modify times with that
1754	 * passed in.  If they differ, we abort this swap.
1755	 * This is the mechanism used to ensure the calling
1756	 * process that the file was not changed out from
1757	 * under it.
1758	 */
1759	if ((sbp->bs_ctime.tv_sec != VFS_I(ip)->i_ctime.tv_sec) ||
1760	    (sbp->bs_ctime.tv_nsec != VFS_I(ip)->i_ctime.tv_nsec) ||
1761	    (sbp->bs_mtime.tv_sec != VFS_I(ip)->i_mtime.tv_sec) ||
1762	    (sbp->bs_mtime.tv_nsec != VFS_I(ip)->i_mtime.tv_nsec)) {
1763		error = -EBUSY;
1764		goto out_trans_cancel;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1765	}
 
1766
1767	/*
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1768	 * Note the trickiness in setting the log flags - we set the owner log
1769	 * flag on the opposite inode (i.e. the inode we are setting the new
1770	 * owner to be) because once we swap the forks and log that, log
1771	 * recovery is going to see the fork as owned by the swapped inode,
1772	 * not the pre-swapped inodes.
1773	 */
1774	src_log_flags = XFS_ILOG_CORE;
1775	target_log_flags = XFS_ILOG_CORE;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1776
1777	if (xfs_has_rmapbt(mp))
1778		error = xfs_swap_extent_rmap(&tp, ip, tip);
1779	else
1780		error = xfs_swap_extent_forks(tp, ip, tip, &src_log_flags,
1781				&target_log_flags);
1782	if (error)
1783		goto out_trans_cancel;
 
 
 
 
 
 
 
1784
1785	/* Do we have to swap reflink flags? */
1786	if ((ip->i_diflags2 & XFS_DIFLAG2_REFLINK) ^
1787	    (tip->i_diflags2 & XFS_DIFLAG2_REFLINK)) {
1788		f = ip->i_diflags2 & XFS_DIFLAG2_REFLINK;
1789		ip->i_diflags2 &= ~XFS_DIFLAG2_REFLINK;
1790		ip->i_diflags2 |= tip->i_diflags2 & XFS_DIFLAG2_REFLINK;
1791		tip->i_diflags2 &= ~XFS_DIFLAG2_REFLINK;
1792		tip->i_diflags2 |= f & XFS_DIFLAG2_REFLINK;
1793	}
1794
1795	/* Swap the cow forks. */
1796	if (xfs_has_reflink(mp)) {
1797		ASSERT(!ip->i_cowfp ||
1798		       ip->i_cowfp->if_format == XFS_DINODE_FMT_EXTENTS);
1799		ASSERT(!tip->i_cowfp ||
1800		       tip->i_cowfp->if_format == XFS_DINODE_FMT_EXTENTS);
1801
1802		swap(ip->i_cowfp, tip->i_cowfp);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1803
1804		if (ip->i_cowfp && ip->i_cowfp->if_bytes)
1805			xfs_inode_set_cowblocks_tag(ip);
1806		else
1807			xfs_inode_clear_cowblocks_tag(ip);
1808		if (tip->i_cowfp && tip->i_cowfp->if_bytes)
1809			xfs_inode_set_cowblocks_tag(tip);
1810		else
1811			xfs_inode_clear_cowblocks_tag(tip);
 
 
 
 
 
 
 
 
 
1812	}
1813
1814	xfs_trans_log_inode(tp, ip,  src_log_flags);
1815	xfs_trans_log_inode(tp, tip, target_log_flags);
1816
1817	/*
1818	 * The extent forks have been swapped, but crc=1,rmapbt=0 filesystems
1819	 * have inode number owner values in the bmbt blocks that still refer to
1820	 * the old inode. Scan each bmbt to fix up the owner values with the
1821	 * inode number of the current inode.
1822	 */
1823	if (src_log_flags & XFS_ILOG_DOWNER) {
1824		error = xfs_swap_change_owner(&tp, ip, tip);
1825		if (error)
1826			goto out_trans_cancel;
1827	}
1828	if (target_log_flags & XFS_ILOG_DOWNER) {
1829		error = xfs_swap_change_owner(&tp, tip, ip);
1830		if (error)
1831			goto out_trans_cancel;
1832	}
1833
1834	/*
1835	 * If this is a synchronous mount, make sure that the
1836	 * transaction goes to disk before returning to the user.
1837	 */
1838	if (xfs_has_wsync(mp))
1839		xfs_trans_set_sync(tp);
1840
1841	error = xfs_trans_commit(tp);
1842
1843	trace_xfs_swap_extent_after(ip, 0);
1844	trace_xfs_swap_extent_after(tip, 1);
 
 
 
1845
1846out_unlock_ilock:
1847	xfs_iunlock(ip, XFS_ILOCK_EXCL);
1848	xfs_iunlock(tip, XFS_ILOCK_EXCL);
1849out_unlock:
1850	filemap_invalidate_unlock_two(VFS_I(ip)->i_mapping,
1851				      VFS_I(tip)->i_mapping);
1852	unlock_two_nondirectories(VFS_I(ip), VFS_I(tip));
1853	return error;
1854
1855out_trans_cancel:
1856	xfs_trans_cancel(tp);
1857	goto out_unlock_ilock;
1858}