Linux Audio

Check our new training course

Loading...
v5.4
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 * Copyright (c) 2000-2003,2005 Silicon Graphics, Inc.
   4 * Copyright (C) 2010 Red Hat, Inc.
   5 * All Rights Reserved.
   6 */
   7#include "xfs.h"
   8#include "xfs_fs.h"
   9#include "xfs_shared.h"
  10#include "xfs_format.h"
  11#include "xfs_log_format.h"
  12#include "xfs_trans_resv.h"
  13#include "xfs_mount.h"
  14#include "xfs_extent_busy.h"
  15#include "xfs_quota.h"
  16#include "xfs_trans.h"
  17#include "xfs_trans_priv.h"
  18#include "xfs_log.h"
 
  19#include "xfs_trace.h"
  20#include "xfs_error.h"
  21#include "xfs_defer.h"
 
 
 
 
  22
  23kmem_zone_t	*xfs_trans_zone;
  24
  25#if defined(CONFIG_TRACEPOINTS)
  26static void
  27xfs_trans_trace_reservations(
  28	struct xfs_mount	*mp)
  29{
  30	struct xfs_trans_res	resv;
  31	struct xfs_trans_res	*res;
  32	struct xfs_trans_res	*end_res;
  33	int			i;
  34
  35	res = (struct xfs_trans_res *)M_RES(mp);
  36	end_res = (struct xfs_trans_res *)(M_RES(mp) + 1);
  37	for (i = 0; res < end_res; i++, res++)
  38		trace_xfs_trans_resv_calc(mp, i, res);
  39	xfs_log_get_max_trans_res(mp, &resv);
  40	trace_xfs_trans_resv_calc(mp, -1, &resv);
  41}
  42#else
  43# define xfs_trans_trace_reservations(mp)
  44#endif
  45
  46/*
  47 * Initialize the precomputed transaction reservation values
  48 * in the mount structure.
  49 */
  50void
  51xfs_trans_init(
  52	struct xfs_mount	*mp)
  53{
  54	xfs_trans_resv_calc(mp, M_RES(mp));
  55	xfs_trans_trace_reservations(mp);
  56}
  57
  58/*
  59 * Free the transaction structure.  If there is more clean up
  60 * to do when the structure is freed, add it here.
  61 */
  62STATIC void
  63xfs_trans_free(
  64	struct xfs_trans	*tp)
  65{
  66	xfs_extent_busy_sort(&tp->t_busy);
  67	xfs_extent_busy_clear(tp->t_mountp, &tp->t_busy, false);
  68
  69	trace_xfs_trans_free(tp, _RET_IP_);
  70	atomic_dec(&tp->t_mountp->m_active_trans);
  71	if (!(tp->t_flags & XFS_TRANS_NO_WRITECOUNT))
  72		sb_end_intwrite(tp->t_mountp->m_super);
  73	xfs_trans_free_dqinfo(tp);
  74	kmem_zone_free(xfs_trans_zone, tp);
  75}
  76
  77/*
  78 * This is called to create a new transaction which will share the
  79 * permanent log reservation of the given transaction.  The remaining
  80 * unused block and rt extent reservations are also inherited.  This
  81 * implies that the original transaction is no longer allowed to allocate
  82 * blocks.  Locks and log items, however, are no inherited.  They must
  83 * be added to the new transaction explicitly.
  84 */
  85STATIC struct xfs_trans *
  86xfs_trans_dup(
  87	struct xfs_trans	*tp)
  88{
  89	struct xfs_trans	*ntp;
  90
  91	trace_xfs_trans_dup(tp, _RET_IP_);
  92
  93	ntp = kmem_zone_zalloc(xfs_trans_zone, 0);
  94
  95	/*
  96	 * Initialize the new transaction structure.
  97	 */
  98	ntp->t_magic = XFS_TRANS_HEADER_MAGIC;
  99	ntp->t_mountp = tp->t_mountp;
 100	INIT_LIST_HEAD(&ntp->t_items);
 101	INIT_LIST_HEAD(&ntp->t_busy);
 102	INIT_LIST_HEAD(&ntp->t_dfops);
 103	ntp->t_firstblock = NULLFSBLOCK;
 104
 105	ASSERT(tp->t_flags & XFS_TRANS_PERM_LOG_RES);
 106	ASSERT(tp->t_ticket != NULL);
 107
 108	ntp->t_flags = XFS_TRANS_PERM_LOG_RES |
 109		       (tp->t_flags & XFS_TRANS_RESERVE) |
 110		       (tp->t_flags & XFS_TRANS_NO_WRITECOUNT);
 
 111	/* We gave our writer reference to the new transaction */
 112	tp->t_flags |= XFS_TRANS_NO_WRITECOUNT;
 113	ntp->t_ticket = xfs_log_ticket_get(tp->t_ticket);
 114
 115	ASSERT(tp->t_blk_res >= tp->t_blk_res_used);
 116	ntp->t_blk_res = tp->t_blk_res - tp->t_blk_res_used;
 117	tp->t_blk_res = tp->t_blk_res_used;
 118
 119	ntp->t_rtx_res = tp->t_rtx_res - tp->t_rtx_res_used;
 120	tp->t_rtx_res = tp->t_rtx_res_used;
 121	ntp->t_pflags = tp->t_pflags;
 
 122
 123	/* move deferred ops over to the new tp */
 124	xfs_defer_move(ntp, tp);
 125
 126	xfs_trans_dup_dqinfo(tp, ntp);
 127
 128	atomic_inc(&tp->t_mountp->m_active_trans);
 129	return ntp;
 130}
 131
 132/*
 133 * This is called to reserve free disk blocks and log space for the
 134 * given transaction.  This must be done before allocating any resources
 135 * within the transaction.
 136 *
 137 * This will return ENOSPC if there are not enough blocks available.
 138 * It will sleep waiting for available log space.
 139 * The only valid value for the flags parameter is XFS_RES_LOG_PERM, which
 140 * is used by long running transactions.  If any one of the reservations
 141 * fails then they will all be backed out.
 142 *
 143 * This does not do quota reservations. That typically is done by the
 144 * caller afterwards.
 145 */
 146static int
 147xfs_trans_reserve(
 148	struct xfs_trans	*tp,
 149	struct xfs_trans_res	*resp,
 150	uint			blocks,
 151	uint			rtextents)
 152{
 153	int		error = 0;
 154	bool		rsvd = (tp->t_flags & XFS_TRANS_RESERVE) != 0;
 155
 156	/* Mark this thread as being in a transaction */
 157	current_set_flags_nested(&tp->t_pflags, PF_MEMALLOC_NOFS);
 158
 159	/*
 160	 * Attempt to reserve the needed disk blocks by decrementing
 161	 * the number needed from the number available.  This will
 162	 * fail if the count would go below zero.
 163	 */
 164	if (blocks > 0) {
 165		error = xfs_mod_fdblocks(tp->t_mountp, -((int64_t)blocks), rsvd);
 166		if (error != 0) {
 167			current_restore_flags_nested(&tp->t_pflags, PF_MEMALLOC_NOFS);
 168			return -ENOSPC;
 169		}
 170		tp->t_blk_res += blocks;
 171	}
 172
 173	/*
 174	 * Reserve the log space needed for this transaction.
 175	 */
 176	if (resp->tr_logres > 0) {
 177		bool	permanent = false;
 178
 179		ASSERT(tp->t_log_res == 0 ||
 180		       tp->t_log_res == resp->tr_logres);
 181		ASSERT(tp->t_log_count == 0 ||
 182		       tp->t_log_count == resp->tr_logcount);
 183
 184		if (resp->tr_logflags & XFS_TRANS_PERM_LOG_RES) {
 185			tp->t_flags |= XFS_TRANS_PERM_LOG_RES;
 186			permanent = true;
 187		} else {
 188			ASSERT(tp->t_ticket == NULL);
 189			ASSERT(!(tp->t_flags & XFS_TRANS_PERM_LOG_RES));
 190		}
 191
 192		if (tp->t_ticket != NULL) {
 193			ASSERT(resp->tr_logflags & XFS_TRANS_PERM_LOG_RES);
 194			error = xfs_log_regrant(tp->t_mountp, tp->t_ticket);
 195		} else {
 196			error = xfs_log_reserve(tp->t_mountp,
 197						resp->tr_logres,
 198						resp->tr_logcount,
 199						&tp->t_ticket, XFS_TRANSACTION,
 200						permanent);
 201		}
 202
 203		if (error)
 204			goto undo_blocks;
 205
 206		tp->t_log_res = resp->tr_logres;
 207		tp->t_log_count = resp->tr_logcount;
 208	}
 209
 210	/*
 211	 * Attempt to reserve the needed realtime extents by decrementing
 212	 * the number needed from the number available.  This will
 213	 * fail if the count would go below zero.
 214	 */
 215	if (rtextents > 0) {
 216		error = xfs_mod_frextents(tp->t_mountp, -((int64_t)rtextents));
 217		if (error) {
 218			error = -ENOSPC;
 219			goto undo_log;
 220		}
 221		tp->t_rtx_res += rtextents;
 222	}
 223
 224	return 0;
 225
 226	/*
 227	 * Error cases jump to one of these labels to undo any
 228	 * reservations which have already been performed.
 229	 */
 230undo_log:
 231	if (resp->tr_logres > 0) {
 232		xfs_log_done(tp->t_mountp, tp->t_ticket, NULL, false);
 233		tp->t_ticket = NULL;
 234		tp->t_log_res = 0;
 235		tp->t_flags &= ~XFS_TRANS_PERM_LOG_RES;
 236	}
 237
 238undo_blocks:
 239	if (blocks > 0) {
 240		xfs_mod_fdblocks(tp->t_mountp, (int64_t)blocks, rsvd);
 241		tp->t_blk_res = 0;
 242	}
 243
 244	current_restore_flags_nested(&tp->t_pflags, PF_MEMALLOC_NOFS);
 245
 246	return error;
 247}
 248
 249int
 250xfs_trans_alloc(
 251	struct xfs_mount	*mp,
 252	struct xfs_trans_res	*resp,
 253	uint			blocks,
 254	uint			rtextents,
 255	uint			flags,
 256	struct xfs_trans	**tpp)
 257{
 258	struct xfs_trans	*tp;
 
 259	int			error;
 260
 261	/*
 262	 * Allocate the handle before we do our freeze accounting and setting up
 263	 * GFP_NOFS allocation context so that we avoid lockdep false positives
 264	 * by doing GFP_KERNEL allocations inside sb_start_intwrite().
 265	 */
 266	tp = kmem_zone_zalloc(xfs_trans_zone, 0);
 
 267	if (!(flags & XFS_TRANS_NO_WRITECOUNT))
 268		sb_start_intwrite(mp->m_super);
 
 269
 270	/*
 271	 * Zero-reservation ("empty") transactions can't modify anything, so
 272	 * they're allowed to run while we're frozen.
 273	 */
 274	WARN_ON(resp->tr_logres > 0 &&
 275		mp->m_super->s_writers.frozen == SB_FREEZE_COMPLETE);
 276	atomic_inc(&mp->m_active_trans);
 
 277
 278	tp->t_magic = XFS_TRANS_HEADER_MAGIC;
 279	tp->t_flags = flags;
 280	tp->t_mountp = mp;
 281	INIT_LIST_HEAD(&tp->t_items);
 282	INIT_LIST_HEAD(&tp->t_busy);
 283	INIT_LIST_HEAD(&tp->t_dfops);
 284	tp->t_firstblock = NULLFSBLOCK;
 285
 286	error = xfs_trans_reserve(tp, resp, blocks, rtextents);
 
 
 
 
 
 
 
 
 
 
 
 
 
 287	if (error) {
 288		xfs_trans_cancel(tp);
 289		return error;
 290	}
 291
 292	trace_xfs_trans_alloc(tp, _RET_IP_);
 293
 294	*tpp = tp;
 295	return 0;
 296}
 297
 298/*
 299 * Create an empty transaction with no reservation.  This is a defensive
 300 * mechanism for routines that query metadata without actually modifying
 301 * them -- if the metadata being queried is somehow cross-linked (think a
 302 * btree block pointer that points higher in the tree), we risk deadlock.
 303 * However, blocks grabbed as part of a transaction can be re-grabbed.
 304 * The verifiers will notice the corrupt block and the operation will fail
 305 * back to userspace without deadlocking.
 306 *
 307 * Note the zero-length reservation; this transaction MUST be cancelled
 308 * without any dirty data.
 
 
 
 
 309 */
 310int
 311xfs_trans_alloc_empty(
 312	struct xfs_mount		*mp,
 313	struct xfs_trans		**tpp)
 314{
 315	struct xfs_trans_res		resv = {0};
 316
 317	return xfs_trans_alloc(mp, &resv, 0, 0, XFS_TRANS_NO_WRITECOUNT, tpp);
 318}
 319
 320/*
 321 * Record the indicated change to the given field for application
 322 * to the file system's superblock when the transaction commits.
 323 * For now, just store the change in the transaction structure.
 324 *
 325 * Mark the transaction structure to indicate that the superblock
 326 * needs to be updated before committing.
 327 *
 328 * Because we may not be keeping track of allocated/free inodes and
 329 * used filesystem blocks in the superblock, we do not mark the
 330 * superblock dirty in this transaction if we modify these fields.
 331 * We still need to update the transaction deltas so that they get
 332 * applied to the incore superblock, but we don't want them to
 333 * cause the superblock to get locked and logged if these are the
 334 * only fields in the superblock that the transaction modifies.
 335 */
 336void
 337xfs_trans_mod_sb(
 338	xfs_trans_t	*tp,
 339	uint		field,
 340	int64_t		delta)
 341{
 342	uint32_t	flags = (XFS_TRANS_DIRTY|XFS_TRANS_SB_DIRTY);
 343	xfs_mount_t	*mp = tp->t_mountp;
 344
 345	switch (field) {
 346	case XFS_TRANS_SB_ICOUNT:
 347		tp->t_icount_delta += delta;
 348		if (xfs_sb_version_haslazysbcount(&mp->m_sb))
 349			flags &= ~XFS_TRANS_SB_DIRTY;
 350		break;
 351	case XFS_TRANS_SB_IFREE:
 352		tp->t_ifree_delta += delta;
 353		if (xfs_sb_version_haslazysbcount(&mp->m_sb))
 354			flags &= ~XFS_TRANS_SB_DIRTY;
 355		break;
 356	case XFS_TRANS_SB_FDBLOCKS:
 357		/*
 358		 * Track the number of blocks allocated in the transaction.
 359		 * Make sure it does not exceed the number reserved. If so,
 360		 * shutdown as this can lead to accounting inconsistency.
 361		 */
 362		if (delta < 0) {
 363			tp->t_blk_res_used += (uint)-delta;
 364			if (tp->t_blk_res_used > tp->t_blk_res)
 365				xfs_force_shutdown(mp, SHUTDOWN_CORRUPT_INCORE);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 366		}
 367		tp->t_fdblocks_delta += delta;
 368		if (xfs_sb_version_haslazysbcount(&mp->m_sb))
 369			flags &= ~XFS_TRANS_SB_DIRTY;
 370		break;
 371	case XFS_TRANS_SB_RES_FDBLOCKS:
 372		/*
 373		 * The allocation has already been applied to the
 374		 * in-core superblock's counter.  This should only
 375		 * be applied to the on-disk superblock.
 376		 */
 377		tp->t_res_fdblocks_delta += delta;
 378		if (xfs_sb_version_haslazysbcount(&mp->m_sb))
 379			flags &= ~XFS_TRANS_SB_DIRTY;
 380		break;
 381	case XFS_TRANS_SB_FREXTENTS:
 382		/*
 383		 * Track the number of blocks allocated in the
 384		 * transaction.  Make sure it does not exceed the
 385		 * number reserved.
 386		 */
 387		if (delta < 0) {
 388			tp->t_rtx_res_used += (uint)-delta;
 389			ASSERT(tp->t_rtx_res_used <= tp->t_rtx_res);
 390		}
 391		tp->t_frextents_delta += delta;
 392		break;
 393	case XFS_TRANS_SB_RES_FREXTENTS:
 394		/*
 395		 * The allocation has already been applied to the
 396		 * in-core superblock's counter.  This should only
 397		 * be applied to the on-disk superblock.
 398		 */
 399		ASSERT(delta < 0);
 400		tp->t_res_frextents_delta += delta;
 401		break;
 402	case XFS_TRANS_SB_DBLOCKS:
 403		ASSERT(delta > 0);
 404		tp->t_dblocks_delta += delta;
 405		break;
 406	case XFS_TRANS_SB_AGCOUNT:
 407		ASSERT(delta > 0);
 408		tp->t_agcount_delta += delta;
 409		break;
 410	case XFS_TRANS_SB_IMAXPCT:
 411		tp->t_imaxpct_delta += delta;
 412		break;
 413	case XFS_TRANS_SB_REXTSIZE:
 414		tp->t_rextsize_delta += delta;
 415		break;
 416	case XFS_TRANS_SB_RBMBLOCKS:
 417		tp->t_rbmblocks_delta += delta;
 418		break;
 419	case XFS_TRANS_SB_RBLOCKS:
 420		tp->t_rblocks_delta += delta;
 421		break;
 422	case XFS_TRANS_SB_REXTENTS:
 423		tp->t_rextents_delta += delta;
 424		break;
 425	case XFS_TRANS_SB_REXTSLOG:
 426		tp->t_rextslog_delta += delta;
 427		break;
 428	default:
 429		ASSERT(0);
 430		return;
 431	}
 432
 433	tp->t_flags |= flags;
 434}
 435
 436/*
 437 * xfs_trans_apply_sb_deltas() is called from the commit code
 438 * to bring the superblock buffer into the current transaction
 439 * and modify it as requested by earlier calls to xfs_trans_mod_sb().
 440 *
 441 * For now we just look at each field allowed to change and change
 442 * it if necessary.
 443 */
 444STATIC void
 445xfs_trans_apply_sb_deltas(
 446	xfs_trans_t	*tp)
 447{
 448	xfs_dsb_t	*sbp;
 449	xfs_buf_t	*bp;
 450	int		whole = 0;
 451
 452	bp = xfs_trans_getsb(tp, tp->t_mountp);
 453	sbp = XFS_BUF_TO_SBP(bp);
 454
 455	/*
 456	 * Check that superblock mods match the mods made to AGF counters.
 457	 */
 458	ASSERT((tp->t_fdblocks_delta + tp->t_res_fdblocks_delta) ==
 459	       (tp->t_ag_freeblks_delta + tp->t_ag_flist_delta +
 460		tp->t_ag_btree_delta));
 461
 462	/*
 463	 * Only update the superblock counters if we are logging them
 464	 */
 465	if (!xfs_sb_version_haslazysbcount(&(tp->t_mountp->m_sb))) {
 466		if (tp->t_icount_delta)
 467			be64_add_cpu(&sbp->sb_icount, tp->t_icount_delta);
 468		if (tp->t_ifree_delta)
 469			be64_add_cpu(&sbp->sb_ifree, tp->t_ifree_delta);
 470		if (tp->t_fdblocks_delta)
 471			be64_add_cpu(&sbp->sb_fdblocks, tp->t_fdblocks_delta);
 472		if (tp->t_res_fdblocks_delta)
 473			be64_add_cpu(&sbp->sb_fdblocks, tp->t_res_fdblocks_delta);
 474	}
 475
 476	if (tp->t_frextents_delta)
 477		be64_add_cpu(&sbp->sb_frextents, tp->t_frextents_delta);
 478	if (tp->t_res_frextents_delta)
 479		be64_add_cpu(&sbp->sb_frextents, tp->t_res_frextents_delta);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 480
 481	if (tp->t_dblocks_delta) {
 482		be64_add_cpu(&sbp->sb_dblocks, tp->t_dblocks_delta);
 483		whole = 1;
 484	}
 485	if (tp->t_agcount_delta) {
 486		be32_add_cpu(&sbp->sb_agcount, tp->t_agcount_delta);
 487		whole = 1;
 488	}
 489	if (tp->t_imaxpct_delta) {
 490		sbp->sb_imax_pct += tp->t_imaxpct_delta;
 491		whole = 1;
 492	}
 493	if (tp->t_rextsize_delta) {
 494		be32_add_cpu(&sbp->sb_rextsize, tp->t_rextsize_delta);
 495		whole = 1;
 496	}
 497	if (tp->t_rbmblocks_delta) {
 498		be32_add_cpu(&sbp->sb_rbmblocks, tp->t_rbmblocks_delta);
 499		whole = 1;
 500	}
 501	if (tp->t_rblocks_delta) {
 502		be64_add_cpu(&sbp->sb_rblocks, tp->t_rblocks_delta);
 503		whole = 1;
 504	}
 505	if (tp->t_rextents_delta) {
 506		be64_add_cpu(&sbp->sb_rextents, tp->t_rextents_delta);
 507		whole = 1;
 508	}
 509	if (tp->t_rextslog_delta) {
 510		sbp->sb_rextslog += tp->t_rextslog_delta;
 511		whole = 1;
 512	}
 513
 514	xfs_trans_buf_set_type(tp, bp, XFS_BLFT_SB_BUF);
 515	if (whole)
 516		/*
 517		 * Log the whole thing, the fields are noncontiguous.
 518		 */
 519		xfs_trans_log_buf(tp, bp, 0, sizeof(xfs_dsb_t) - 1);
 520	else
 521		/*
 522		 * Since all the modifiable fields are contiguous, we
 523		 * can get away with this.
 524		 */
 525		xfs_trans_log_buf(tp, bp, offsetof(xfs_dsb_t, sb_icount),
 526				  offsetof(xfs_dsb_t, sb_frextents) +
 527				  sizeof(sbp->sb_frextents) - 1);
 528}
 529
 530STATIC int
 531xfs_sb_mod8(
 532	uint8_t			*field,
 533	int8_t			delta)
 534{
 535	int8_t			counter = *field;
 536
 537	counter += delta;
 538	if (counter < 0) {
 539		ASSERT(0);
 540		return -EINVAL;
 541	}
 542	*field = counter;
 543	return 0;
 544}
 545
 546STATIC int
 547xfs_sb_mod32(
 548	uint32_t		*field,
 549	int32_t			delta)
 550{
 551	int32_t			counter = *field;
 552
 553	counter += delta;
 554	if (counter < 0) {
 555		ASSERT(0);
 556		return -EINVAL;
 557	}
 558	*field = counter;
 559	return 0;
 560}
 561
 562STATIC int
 563xfs_sb_mod64(
 564	uint64_t		*field,
 565	int64_t			delta)
 566{
 567	int64_t			counter = *field;
 568
 569	counter += delta;
 570	if (counter < 0) {
 571		ASSERT(0);
 572		return -EINVAL;
 573	}
 574	*field = counter;
 575	return 0;
 576}
 577
 578/*
 579 * xfs_trans_unreserve_and_mod_sb() is called to release unused reservations
 580 * and apply superblock counter changes to the in-core superblock.  The
 581 * t_res_fdblocks_delta and t_res_frextents_delta fields are explicitly NOT
 582 * applied to the in-core superblock.  The idea is that that has already been
 583 * done.
 584 *
 585 * If we are not logging superblock counters, then the inode allocated/free and
 586 * used block counts are not updated in the on disk superblock. In this case,
 587 * XFS_TRANS_SB_DIRTY will not be set when the transaction is updated but we
 588 * still need to update the incore superblock with the changes.
 
 
 
 589 */
 
 
 590void
 591xfs_trans_unreserve_and_mod_sb(
 592	struct xfs_trans	*tp)
 593{
 594	struct xfs_mount	*mp = tp->t_mountp;
 595	bool			rsvd = (tp->t_flags & XFS_TRANS_RESERVE) != 0;
 596	int64_t			blkdelta = 0;
 597	int64_t			rtxdelta = 0;
 598	int64_t			idelta = 0;
 599	int64_t			ifreedelta = 0;
 600	int			error;
 601
 602	/* calculate deltas */
 603	if (tp->t_blk_res > 0)
 604		blkdelta = tp->t_blk_res;
 605	if ((tp->t_fdblocks_delta != 0) &&
 606	    (xfs_sb_version_haslazysbcount(&mp->m_sb) ||
 607	     (tp->t_flags & XFS_TRANS_SB_DIRTY)))
 608	        blkdelta += tp->t_fdblocks_delta;
 609
 610	if (tp->t_rtx_res > 0)
 611		rtxdelta = tp->t_rtx_res;
 612	if ((tp->t_frextents_delta != 0) &&
 613	    (tp->t_flags & XFS_TRANS_SB_DIRTY))
 614		rtxdelta += tp->t_frextents_delta;
 615
 616	if (xfs_sb_version_haslazysbcount(&mp->m_sb) ||
 617	     (tp->t_flags & XFS_TRANS_SB_DIRTY)) {
 618		idelta = tp->t_icount_delta;
 619		ifreedelta = tp->t_ifree_delta;
 620	}
 621
 622	/* apply the per-cpu counters */
 623	if (blkdelta) {
 624		error = xfs_mod_fdblocks(mp, blkdelta, rsvd);
 625		if (error)
 626			goto out;
 627	}
 628
 629	if (idelta) {
 630		error = xfs_mod_icount(mp, idelta);
 631		if (error)
 632			goto out_undo_fdblocks;
 633	}
 634
 635	if (ifreedelta) {
 636		error = xfs_mod_ifree(mp, ifreedelta);
 637		if (error)
 638			goto out_undo_icount;
 
 
 639	}
 640
 641	if (rtxdelta == 0 && !(tp->t_flags & XFS_TRANS_SB_DIRTY))
 642		return;
 643
 644	/* apply remaining deltas */
 645	spin_lock(&mp->m_sb_lock);
 646	if (rtxdelta) {
 647		error = xfs_sb_mod64(&mp->m_sb.sb_frextents, rtxdelta);
 648		if (error)
 649			goto out_undo_ifree;
 650	}
 651
 652	if (tp->t_dblocks_delta != 0) {
 653		error = xfs_sb_mod64(&mp->m_sb.sb_dblocks, tp->t_dblocks_delta);
 654		if (error)
 655			goto out_undo_frextents;
 656	}
 657	if (tp->t_agcount_delta != 0) {
 658		error = xfs_sb_mod32(&mp->m_sb.sb_agcount, tp->t_agcount_delta);
 659		if (error)
 660			goto out_undo_dblocks;
 661	}
 662	if (tp->t_imaxpct_delta != 0) {
 663		error = xfs_sb_mod8(&mp->m_sb.sb_imax_pct, tp->t_imaxpct_delta);
 664		if (error)
 665			goto out_undo_agcount;
 666	}
 667	if (tp->t_rextsize_delta != 0) {
 668		error = xfs_sb_mod32(&mp->m_sb.sb_rextsize,
 669				     tp->t_rextsize_delta);
 670		if (error)
 671			goto out_undo_imaxpct;
 672	}
 673	if (tp->t_rbmblocks_delta != 0) {
 674		error = xfs_sb_mod32(&mp->m_sb.sb_rbmblocks,
 675				     tp->t_rbmblocks_delta);
 676		if (error)
 677			goto out_undo_rextsize;
 678	}
 679	if (tp->t_rblocks_delta != 0) {
 680		error = xfs_sb_mod64(&mp->m_sb.sb_rblocks, tp->t_rblocks_delta);
 681		if (error)
 682			goto out_undo_rbmblocks;
 683	}
 684	if (tp->t_rextents_delta != 0) {
 685		error = xfs_sb_mod64(&mp->m_sb.sb_rextents,
 686				     tp->t_rextents_delta);
 687		if (error)
 688			goto out_undo_rblocks;
 689	}
 690	if (tp->t_rextslog_delta != 0) {
 691		error = xfs_sb_mod8(&mp->m_sb.sb_rextslog,
 692				     tp->t_rextslog_delta);
 693		if (error)
 694			goto out_undo_rextents;
 695	}
 696	spin_unlock(&mp->m_sb_lock);
 697	return;
 698
 699out_undo_rextents:
 700	if (tp->t_rextents_delta)
 701		xfs_sb_mod64(&mp->m_sb.sb_rextents, -tp->t_rextents_delta);
 702out_undo_rblocks:
 703	if (tp->t_rblocks_delta)
 704		xfs_sb_mod64(&mp->m_sb.sb_rblocks, -tp->t_rblocks_delta);
 705out_undo_rbmblocks:
 706	if (tp->t_rbmblocks_delta)
 707		xfs_sb_mod32(&mp->m_sb.sb_rbmblocks, -tp->t_rbmblocks_delta);
 708out_undo_rextsize:
 709	if (tp->t_rextsize_delta)
 710		xfs_sb_mod32(&mp->m_sb.sb_rextsize, -tp->t_rextsize_delta);
 711out_undo_imaxpct:
 712	if (tp->t_rextsize_delta)
 713		xfs_sb_mod8(&mp->m_sb.sb_imax_pct, -tp->t_imaxpct_delta);
 714out_undo_agcount:
 715	if (tp->t_agcount_delta)
 716		xfs_sb_mod32(&mp->m_sb.sb_agcount, -tp->t_agcount_delta);
 717out_undo_dblocks:
 718	if (tp->t_dblocks_delta)
 719		xfs_sb_mod64(&mp->m_sb.sb_dblocks, -tp->t_dblocks_delta);
 720out_undo_frextents:
 721	if (rtxdelta)
 722		xfs_sb_mod64(&mp->m_sb.sb_frextents, -rtxdelta);
 723out_undo_ifree:
 724	spin_unlock(&mp->m_sb_lock);
 725	if (ifreedelta)
 726		xfs_mod_ifree(mp, -ifreedelta);
 727out_undo_icount:
 728	if (idelta)
 729		xfs_mod_icount(mp, -idelta);
 730out_undo_fdblocks:
 731	if (blkdelta)
 732		xfs_mod_fdblocks(mp, -blkdelta, rsvd);
 733out:
 734	ASSERT(error == 0);
 735	return;
 736}
 737
 738/* Add the given log item to the transaction's list of log items. */
 739void
 740xfs_trans_add_item(
 741	struct xfs_trans	*tp,
 742	struct xfs_log_item	*lip)
 743{
 744	ASSERT(lip->li_mountp == tp->t_mountp);
 745	ASSERT(lip->li_ailp == tp->t_mountp->m_ail);
 746	ASSERT(list_empty(&lip->li_trans));
 747	ASSERT(!test_bit(XFS_LI_DIRTY, &lip->li_flags));
 748
 749	list_add_tail(&lip->li_trans, &tp->t_items);
 750	trace_xfs_trans_add_item(tp, _RET_IP_);
 751}
 752
 753/*
 754 * Unlink the log item from the transaction. the log item is no longer
 755 * considered dirty in this transaction, as the linked transaction has
 756 * finished, either by abort or commit completion.
 757 */
 758void
 759xfs_trans_del_item(
 760	struct xfs_log_item	*lip)
 761{
 762	clear_bit(XFS_LI_DIRTY, &lip->li_flags);
 763	list_del_init(&lip->li_trans);
 764}
 765
 766/* Detach and unlock all of the items in a transaction */
 767static void
 768xfs_trans_free_items(
 769	struct xfs_trans	*tp,
 770	bool			abort)
 771{
 772	struct xfs_log_item	*lip, *next;
 773
 774	trace_xfs_trans_free_items(tp, _RET_IP_);
 775
 776	list_for_each_entry_safe(lip, next, &tp->t_items, li_trans) {
 777		xfs_trans_del_item(lip);
 778		if (abort)
 779			set_bit(XFS_LI_ABORTED, &lip->li_flags);
 780		if (lip->li_ops->iop_release)
 781			lip->li_ops->iop_release(lip);
 782	}
 783}
 784
 785static inline void
 786xfs_log_item_batch_insert(
 787	struct xfs_ail		*ailp,
 788	struct xfs_ail_cursor	*cur,
 789	struct xfs_log_item	**log_items,
 790	int			nr_items,
 791	xfs_lsn_t		commit_lsn)
 792{
 793	int	i;
 794
 795	spin_lock(&ailp->ail_lock);
 796	/* xfs_trans_ail_update_bulk drops ailp->ail_lock */
 797	xfs_trans_ail_update_bulk(ailp, cur, log_items, nr_items, commit_lsn);
 798
 799	for (i = 0; i < nr_items; i++) {
 800		struct xfs_log_item *lip = log_items[i];
 801
 802		if (lip->li_ops->iop_unpin)
 803			lip->li_ops->iop_unpin(lip, 0);
 804	}
 805}
 806
 807/*
 808 * Bulk operation version of xfs_trans_committed that takes a log vector of
 809 * items to insert into the AIL. This uses bulk AIL insertion techniques to
 810 * minimise lock traffic.
 811 *
 812 * If we are called with the aborted flag set, it is because a log write during
 813 * a CIL checkpoint commit has failed. In this case, all the items in the
 814 * checkpoint have already gone through iop_committed and iop_committing, which
 815 * means that checkpoint commit abort handling is treated exactly the same
 816 * as an iclog write error even though we haven't started any IO yet. Hence in
 817 * this case all we need to do is iop_committed processing, followed by an
 818 * iop_unpin(aborted) call.
 819 *
 820 * The AIL cursor is used to optimise the insert process. If commit_lsn is not
 821 * at the end of the AIL, the insert cursor avoids the need to walk
 822 * the AIL to find the insertion point on every xfs_log_item_batch_insert()
 823 * call. This saves a lot of needless list walking and is a net win, even
 824 * though it slightly increases that amount of AIL lock traffic to set it up
 825 * and tear it down.
 826 */
 827void
 828xfs_trans_committed_bulk(
 829	struct xfs_ail		*ailp,
 830	struct xfs_log_vec	*log_vector,
 831	xfs_lsn_t		commit_lsn,
 832	bool			aborted)
 833{
 834#define LOG_ITEM_BATCH_SIZE	32
 835	struct xfs_log_item	*log_items[LOG_ITEM_BATCH_SIZE];
 836	struct xfs_log_vec	*lv;
 837	struct xfs_ail_cursor	cur;
 838	int			i = 0;
 839
 840	spin_lock(&ailp->ail_lock);
 841	xfs_trans_ail_cursor_last(ailp, &cur, commit_lsn);
 842	spin_unlock(&ailp->ail_lock);
 843
 844	/* unpin all the log items */
 845	for (lv = log_vector; lv; lv = lv->lv_next ) {
 846		struct xfs_log_item	*lip = lv->lv_item;
 847		xfs_lsn_t		item_lsn;
 848
 849		if (aborted)
 850			set_bit(XFS_LI_ABORTED, &lip->li_flags);
 851
 852		if (lip->li_ops->flags & XFS_ITEM_RELEASE_WHEN_COMMITTED) {
 853			lip->li_ops->iop_release(lip);
 854			continue;
 855		}
 856
 857		if (lip->li_ops->iop_committed)
 858			item_lsn = lip->li_ops->iop_committed(lip, commit_lsn);
 859		else
 860			item_lsn = commit_lsn;
 861
 862		/* item_lsn of -1 means the item needs no further processing */
 863		if (XFS_LSN_CMP(item_lsn, (xfs_lsn_t)-1) == 0)
 864			continue;
 865
 866		/*
 867		 * if we are aborting the operation, no point in inserting the
 868		 * object into the AIL as we are in a shutdown situation.
 869		 */
 870		if (aborted) {
 871			ASSERT(XFS_FORCED_SHUTDOWN(ailp->ail_mount));
 872			if (lip->li_ops->iop_unpin)
 873				lip->li_ops->iop_unpin(lip, 1);
 874			continue;
 875		}
 876
 877		if (item_lsn != commit_lsn) {
 878
 879			/*
 880			 * Not a bulk update option due to unusual item_lsn.
 881			 * Push into AIL immediately, rechecking the lsn once
 882			 * we have the ail lock. Then unpin the item. This does
 883			 * not affect the AIL cursor the bulk insert path is
 884			 * using.
 885			 */
 886			spin_lock(&ailp->ail_lock);
 887			if (XFS_LSN_CMP(item_lsn, lip->li_lsn) > 0)
 888				xfs_trans_ail_update(ailp, lip, item_lsn);
 889			else
 890				spin_unlock(&ailp->ail_lock);
 891			if (lip->li_ops->iop_unpin)
 892				lip->li_ops->iop_unpin(lip, 0);
 893			continue;
 894		}
 895
 896		/* Item is a candidate for bulk AIL insert.  */
 897		log_items[i++] = lv->lv_item;
 898		if (i >= LOG_ITEM_BATCH_SIZE) {
 899			xfs_log_item_batch_insert(ailp, &cur, log_items,
 900					LOG_ITEM_BATCH_SIZE, commit_lsn);
 901			i = 0;
 902		}
 903	}
 904
 905	/* make sure we insert the remainder! */
 906	if (i)
 907		xfs_log_item_batch_insert(ailp, &cur, log_items, i, commit_lsn);
 908
 909	spin_lock(&ailp->ail_lock);
 910	xfs_trans_ail_cursor_done(&cur);
 911	spin_unlock(&ailp->ail_lock);
 912}
 913
 914/*
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 915 * Commit the given transaction to the log.
 916 *
 917 * XFS disk error handling mechanism is not based on a typical
 918 * transaction abort mechanism. Logically after the filesystem
 919 * gets marked 'SHUTDOWN', we can't let any new transactions
 920 * be durable - ie. committed to disk - because some metadata might
 921 * be inconsistent. In such cases, this returns an error, and the
 922 * caller may assume that all locked objects joined to the transaction
 923 * have already been unlocked as if the commit had succeeded.
 924 * Do not reference the transaction structure after this call.
 925 */
 926static int
 927__xfs_trans_commit(
 928	struct xfs_trans	*tp,
 929	bool			regrant)
 930{
 931	struct xfs_mount	*mp = tp->t_mountp;
 932	xfs_lsn_t		commit_lsn = -1;
 
 933	int			error = 0;
 934	int			sync = tp->t_flags & XFS_TRANS_SYNC;
 935
 936	trace_xfs_trans_commit(tp, _RET_IP_);
 937
 
 
 
 
 
 
 
 938	/*
 939	 * Finish deferred items on final commit. Only permanent transactions
 940	 * should ever have deferred ops.
 941	 */
 942	WARN_ON_ONCE(!list_empty(&tp->t_dfops) &&
 943		     !(tp->t_flags & XFS_TRANS_PERM_LOG_RES));
 944	if (!regrant && (tp->t_flags & XFS_TRANS_PERM_LOG_RES)) {
 945		error = xfs_defer_finish_noroll(&tp);
 946		if (error)
 947			goto out_unreserve;
 948	}
 949
 950	/*
 951	 * If there is nothing to be logged by the transaction,
 952	 * then unlock all of the items associated with the
 953	 * transaction and free the transaction structure.
 954	 * Also make sure to return any reserved blocks to
 955	 * the free pool.
 956	 */
 957	if (!(tp->t_flags & XFS_TRANS_DIRTY))
 958		goto out_unreserve;
 959
 960	if (XFS_FORCED_SHUTDOWN(mp)) {
 
 
 
 
 
 
 961		error = -EIO;
 962		goto out_unreserve;
 963	}
 964
 965	ASSERT(tp->t_ticket != NULL);
 966
 967	/*
 968	 * If we need to update the superblock, then do it now.
 969	 */
 970	if (tp->t_flags & XFS_TRANS_SB_DIRTY)
 971		xfs_trans_apply_sb_deltas(tp);
 972	xfs_trans_apply_dquot_deltas(tp);
 973
 974	xfs_log_commit_cil(mp, tp, &commit_lsn, regrant);
 975
 976	current_restore_flags_nested(&tp->t_pflags, PF_MEMALLOC_NOFS);
 977	xfs_trans_free(tp);
 978
 979	/*
 980	 * If the transaction needs to be synchronous, then force the
 981	 * log out now and wait for it.
 982	 */
 983	if (sync) {
 984		error = xfs_log_force_lsn(mp, commit_lsn, XFS_LOG_SYNC, NULL);
 985		XFS_STATS_INC(mp, xs_trans_sync);
 986	} else {
 987		XFS_STATS_INC(mp, xs_trans_async);
 988	}
 989
 990	return error;
 991
 992out_unreserve:
 993	xfs_trans_unreserve_and_mod_sb(tp);
 994
 995	/*
 996	 * It is indeed possible for the transaction to be not dirty but
 997	 * the dqinfo portion to be.  All that means is that we have some
 998	 * (non-persistent) quota reservations that need to be unreserved.
 999	 */
1000	xfs_trans_unreserve_and_mod_dquots(tp);
1001	if (tp->t_ticket) {
1002		commit_lsn = xfs_log_done(mp, tp->t_ticket, NULL, regrant);
1003		if (commit_lsn == -1 && !error)
1004			error = -EIO;
 
1005		tp->t_ticket = NULL;
1006	}
1007	current_restore_flags_nested(&tp->t_pflags, PF_MEMALLOC_NOFS);
1008	xfs_trans_free_items(tp, !!error);
1009	xfs_trans_free(tp);
1010
1011	XFS_STATS_INC(mp, xs_trans_empty);
1012	return error;
1013}
1014
1015int
1016xfs_trans_commit(
1017	struct xfs_trans	*tp)
1018{
1019	return __xfs_trans_commit(tp, false);
1020}
1021
1022/*
1023 * Unlock all of the transaction's items and free the transaction.
1024 * The transaction must not have modified any of its items, because
1025 * there is no way to restore them to their previous state.
 
 
 
1026 *
1027 * If the transaction has made a log reservation, make sure to release
1028 * it as well.
 
 
 
 
 
1029 */
1030void
1031xfs_trans_cancel(
1032	struct xfs_trans	*tp)
1033{
1034	struct xfs_mount	*mp = tp->t_mountp;
 
1035	bool			dirty = (tp->t_flags & XFS_TRANS_DIRTY);
1036
1037	trace_xfs_trans_cancel(tp, _RET_IP_);
1038
1039	if (tp->t_flags & XFS_TRANS_PERM_LOG_RES)
 
 
 
 
 
 
 
 
1040		xfs_defer_cancel(tp);
 
1041
1042	/*
1043	 * See if the caller is relying on us to shut down the
1044	 * filesystem.  This happens in paths where we detect
1045	 * corruption and decide to give up.
 
1046	 */
1047	if (dirty && !XFS_FORCED_SHUTDOWN(mp)) {
1048		XFS_ERROR_REPORT("xfs_trans_cancel", XFS_ERRLEVEL_LOW, mp);
1049		xfs_force_shutdown(mp, SHUTDOWN_CORRUPT_INCORE);
1050	}
1051#ifdef DEBUG
1052	if (!dirty && !XFS_FORCED_SHUTDOWN(mp)) {
 
1053		struct xfs_log_item *lip;
1054
1055		list_for_each_entry(lip, &tp->t_items, li_trans)
1056			ASSERT(!(lip->li_type == XFS_LI_EFD));
1057	}
1058#endif
1059	xfs_trans_unreserve_and_mod_sb(tp);
1060	xfs_trans_unreserve_and_mod_dquots(tp);
1061
1062	if (tp->t_ticket) {
1063		xfs_log_done(mp, tp->t_ticket, NULL, false);
1064		tp->t_ticket = NULL;
1065	}
1066
1067	/* mark this thread as no longer being in a transaction */
1068	current_restore_flags_nested(&tp->t_pflags, PF_MEMALLOC_NOFS);
1069
1070	xfs_trans_free_items(tp, dirty);
1071	xfs_trans_free(tp);
1072}
1073
1074/*
1075 * Roll from one trans in the sequence of PERMANENT transactions to
1076 * the next: permanent transactions are only flushed out when
1077 * committed with xfs_trans_commit(), but we still want as soon
1078 * as possible to let chunks of it go to the log. So we commit the
1079 * chunk we've been working on and get a new transaction to continue.
1080 */
1081int
1082xfs_trans_roll(
1083	struct xfs_trans	**tpp)
1084{
1085	struct xfs_trans	*trans = *tpp;
1086	struct xfs_trans_res	tres;
1087	int			error;
1088
1089	trace_xfs_trans_roll(trans, _RET_IP_);
1090
1091	/*
1092	 * Copy the critical parameters from one trans to the next.
1093	 */
1094	tres.tr_logres = trans->t_log_res;
1095	tres.tr_logcount = trans->t_log_count;
1096
1097	*tpp = xfs_trans_dup(trans);
1098
1099	/*
1100	 * Commit the current transaction.
1101	 * If this commit failed, then it'd just unlock those items that
1102	 * are not marked ihold. That also means that a filesystem shutdown
1103	 * is in progress. The caller takes the responsibility to cancel
1104	 * the duplicate transaction that gets returned.
1105	 */
1106	error = __xfs_trans_commit(trans, true);
1107	if (error)
1108		return error;
1109
1110	/*
1111	 * Reserve space in the log for the next transaction.
1112	 * This also pushes items in the "AIL", the list of logged items,
1113	 * out to disk if they are taking up space at the tail of the log
1114	 * that we want to use.  This requires that either nothing be locked
1115	 * across this call, or that anything that is locked be logged in
1116	 * the prior and the next transactions.
1117	 */
1118	tres.tr_logflags = XFS_TRANS_PERM_LOG_RES;
1119	return xfs_trans_reserve(*tpp, &tres, 0, 0);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1120}
v6.2
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 * Copyright (c) 2000-2003,2005 Silicon Graphics, Inc.
   4 * Copyright (C) 2010 Red Hat, Inc.
   5 * All Rights Reserved.
   6 */
   7#include "xfs.h"
   8#include "xfs_fs.h"
   9#include "xfs_shared.h"
  10#include "xfs_format.h"
  11#include "xfs_log_format.h"
  12#include "xfs_trans_resv.h"
  13#include "xfs_mount.h"
  14#include "xfs_extent_busy.h"
  15#include "xfs_quota.h"
  16#include "xfs_trans.h"
  17#include "xfs_trans_priv.h"
  18#include "xfs_log.h"
  19#include "xfs_log_priv.h"
  20#include "xfs_trace.h"
  21#include "xfs_error.h"
  22#include "xfs_defer.h"
  23#include "xfs_inode.h"
  24#include "xfs_dquot_item.h"
  25#include "xfs_dquot.h"
  26#include "xfs_icache.h"
  27
  28struct kmem_cache	*xfs_trans_cache;
  29
  30#if defined(CONFIG_TRACEPOINTS)
  31static void
  32xfs_trans_trace_reservations(
  33	struct xfs_mount	*mp)
  34{
 
  35	struct xfs_trans_res	*res;
  36	struct xfs_trans_res	*end_res;
  37	int			i;
  38
  39	res = (struct xfs_trans_res *)M_RES(mp);
  40	end_res = (struct xfs_trans_res *)(M_RES(mp) + 1);
  41	for (i = 0; res < end_res; i++, res++)
  42		trace_xfs_trans_resv_calc(mp, i, res);
 
 
  43}
  44#else
  45# define xfs_trans_trace_reservations(mp)
  46#endif
  47
  48/*
  49 * Initialize the precomputed transaction reservation values
  50 * in the mount structure.
  51 */
  52void
  53xfs_trans_init(
  54	struct xfs_mount	*mp)
  55{
  56	xfs_trans_resv_calc(mp, M_RES(mp));
  57	xfs_trans_trace_reservations(mp);
  58}
  59
  60/*
  61 * Free the transaction structure.  If there is more clean up
  62 * to do when the structure is freed, add it here.
  63 */
  64STATIC void
  65xfs_trans_free(
  66	struct xfs_trans	*tp)
  67{
  68	xfs_extent_busy_sort(&tp->t_busy);
  69	xfs_extent_busy_clear(tp->t_mountp, &tp->t_busy, false);
  70
  71	trace_xfs_trans_free(tp, _RET_IP_);
  72	xfs_trans_clear_context(tp);
  73	if (!(tp->t_flags & XFS_TRANS_NO_WRITECOUNT))
  74		sb_end_intwrite(tp->t_mountp->m_super);
  75	xfs_trans_free_dqinfo(tp);
  76	kmem_cache_free(xfs_trans_cache, tp);
  77}
  78
  79/*
  80 * This is called to create a new transaction which will share the
  81 * permanent log reservation of the given transaction.  The remaining
  82 * unused block and rt extent reservations are also inherited.  This
  83 * implies that the original transaction is no longer allowed to allocate
  84 * blocks.  Locks and log items, however, are no inherited.  They must
  85 * be added to the new transaction explicitly.
  86 */
  87STATIC struct xfs_trans *
  88xfs_trans_dup(
  89	struct xfs_trans	*tp)
  90{
  91	struct xfs_trans	*ntp;
  92
  93	trace_xfs_trans_dup(tp, _RET_IP_);
  94
  95	ntp = kmem_cache_zalloc(xfs_trans_cache, GFP_KERNEL | __GFP_NOFAIL);
  96
  97	/*
  98	 * Initialize the new transaction structure.
  99	 */
 100	ntp->t_magic = XFS_TRANS_HEADER_MAGIC;
 101	ntp->t_mountp = tp->t_mountp;
 102	INIT_LIST_HEAD(&ntp->t_items);
 103	INIT_LIST_HEAD(&ntp->t_busy);
 104	INIT_LIST_HEAD(&ntp->t_dfops);
 105	ntp->t_firstblock = NULLFSBLOCK;
 106
 107	ASSERT(tp->t_flags & XFS_TRANS_PERM_LOG_RES);
 108	ASSERT(tp->t_ticket != NULL);
 109
 110	ntp->t_flags = XFS_TRANS_PERM_LOG_RES |
 111		       (tp->t_flags & XFS_TRANS_RESERVE) |
 112		       (tp->t_flags & XFS_TRANS_NO_WRITECOUNT) |
 113		       (tp->t_flags & XFS_TRANS_RES_FDBLKS);
 114	/* We gave our writer reference to the new transaction */
 115	tp->t_flags |= XFS_TRANS_NO_WRITECOUNT;
 116	ntp->t_ticket = xfs_log_ticket_get(tp->t_ticket);
 117
 118	ASSERT(tp->t_blk_res >= tp->t_blk_res_used);
 119	ntp->t_blk_res = tp->t_blk_res - tp->t_blk_res_used;
 120	tp->t_blk_res = tp->t_blk_res_used;
 121
 122	ntp->t_rtx_res = tp->t_rtx_res - tp->t_rtx_res_used;
 123	tp->t_rtx_res = tp->t_rtx_res_used;
 124
 125	xfs_trans_switch_context(tp, ntp);
 126
 127	/* move deferred ops over to the new tp */
 128	xfs_defer_move(ntp, tp);
 129
 130	xfs_trans_dup_dqinfo(tp, ntp);
 
 
 131	return ntp;
 132}
 133
 134/*
 135 * This is called to reserve free disk blocks and log space for the
 136 * given transaction.  This must be done before allocating any resources
 137 * within the transaction.
 138 *
 139 * This will return ENOSPC if there are not enough blocks available.
 140 * It will sleep waiting for available log space.
 141 * The only valid value for the flags parameter is XFS_RES_LOG_PERM, which
 142 * is used by long running transactions.  If any one of the reservations
 143 * fails then they will all be backed out.
 144 *
 145 * This does not do quota reservations. That typically is done by the
 146 * caller afterwards.
 147 */
 148static int
 149xfs_trans_reserve(
 150	struct xfs_trans	*tp,
 151	struct xfs_trans_res	*resp,
 152	uint			blocks,
 153	uint			rtextents)
 154{
 155	struct xfs_mount	*mp = tp->t_mountp;
 156	int			error = 0;
 157	bool			rsvd = (tp->t_flags & XFS_TRANS_RESERVE) != 0;
 
 
 158
 159	/*
 160	 * Attempt to reserve the needed disk blocks by decrementing
 161	 * the number needed from the number available.  This will
 162	 * fail if the count would go below zero.
 163	 */
 164	if (blocks > 0) {
 165		error = xfs_mod_fdblocks(mp, -((int64_t)blocks), rsvd);
 166		if (error != 0)
 
 167			return -ENOSPC;
 
 168		tp->t_blk_res += blocks;
 169	}
 170
 171	/*
 172	 * Reserve the log space needed for this transaction.
 173	 */
 174	if (resp->tr_logres > 0) {
 175		bool	permanent = false;
 176
 177		ASSERT(tp->t_log_res == 0 ||
 178		       tp->t_log_res == resp->tr_logres);
 179		ASSERT(tp->t_log_count == 0 ||
 180		       tp->t_log_count == resp->tr_logcount);
 181
 182		if (resp->tr_logflags & XFS_TRANS_PERM_LOG_RES) {
 183			tp->t_flags |= XFS_TRANS_PERM_LOG_RES;
 184			permanent = true;
 185		} else {
 186			ASSERT(tp->t_ticket == NULL);
 187			ASSERT(!(tp->t_flags & XFS_TRANS_PERM_LOG_RES));
 188		}
 189
 190		if (tp->t_ticket != NULL) {
 191			ASSERT(resp->tr_logflags & XFS_TRANS_PERM_LOG_RES);
 192			error = xfs_log_regrant(mp, tp->t_ticket);
 193		} else {
 194			error = xfs_log_reserve(mp, resp->tr_logres,
 
 195						resp->tr_logcount,
 196						&tp->t_ticket, permanent);
 
 197		}
 198
 199		if (error)
 200			goto undo_blocks;
 201
 202		tp->t_log_res = resp->tr_logres;
 203		tp->t_log_count = resp->tr_logcount;
 204	}
 205
 206	/*
 207	 * Attempt to reserve the needed realtime extents by decrementing
 208	 * the number needed from the number available.  This will
 209	 * fail if the count would go below zero.
 210	 */
 211	if (rtextents > 0) {
 212		error = xfs_mod_frextents(mp, -((int64_t)rtextents));
 213		if (error) {
 214			error = -ENOSPC;
 215			goto undo_log;
 216		}
 217		tp->t_rtx_res += rtextents;
 218	}
 219
 220	return 0;
 221
 222	/*
 223	 * Error cases jump to one of these labels to undo any
 224	 * reservations which have already been performed.
 225	 */
 226undo_log:
 227	if (resp->tr_logres > 0) {
 228		xfs_log_ticket_ungrant(mp->m_log, tp->t_ticket);
 229		tp->t_ticket = NULL;
 230		tp->t_log_res = 0;
 231		tp->t_flags &= ~XFS_TRANS_PERM_LOG_RES;
 232	}
 233
 234undo_blocks:
 235	if (blocks > 0) {
 236		xfs_mod_fdblocks(mp, (int64_t)blocks, rsvd);
 237		tp->t_blk_res = 0;
 238	}
 
 
 
 239	return error;
 240}
 241
 242int
 243xfs_trans_alloc(
 244	struct xfs_mount	*mp,
 245	struct xfs_trans_res	*resp,
 246	uint			blocks,
 247	uint			rtextents,
 248	uint			flags,
 249	struct xfs_trans	**tpp)
 250{
 251	struct xfs_trans	*tp;
 252	bool			want_retry = true;
 253	int			error;
 254
 255	/*
 256	 * Allocate the handle before we do our freeze accounting and setting up
 257	 * GFP_NOFS allocation context so that we avoid lockdep false positives
 258	 * by doing GFP_KERNEL allocations inside sb_start_intwrite().
 259	 */
 260retry:
 261	tp = kmem_cache_zalloc(xfs_trans_cache, GFP_KERNEL | __GFP_NOFAIL);
 262	if (!(flags & XFS_TRANS_NO_WRITECOUNT))
 263		sb_start_intwrite(mp->m_super);
 264	xfs_trans_set_context(tp);
 265
 266	/*
 267	 * Zero-reservation ("empty") transactions can't modify anything, so
 268	 * they're allowed to run while we're frozen.
 269	 */
 270	WARN_ON(resp->tr_logres > 0 &&
 271		mp->m_super->s_writers.frozen == SB_FREEZE_COMPLETE);
 272	ASSERT(!(flags & XFS_TRANS_RES_FDBLKS) ||
 273	       xfs_has_lazysbcount(mp));
 274
 275	tp->t_magic = XFS_TRANS_HEADER_MAGIC;
 276	tp->t_flags = flags;
 277	tp->t_mountp = mp;
 278	INIT_LIST_HEAD(&tp->t_items);
 279	INIT_LIST_HEAD(&tp->t_busy);
 280	INIT_LIST_HEAD(&tp->t_dfops);
 281	tp->t_firstblock = NULLFSBLOCK;
 282
 283	error = xfs_trans_reserve(tp, resp, blocks, rtextents);
 284	if (error == -ENOSPC && want_retry) {
 285		xfs_trans_cancel(tp);
 286
 287		/*
 288		 * We weren't able to reserve enough space for the transaction.
 289		 * Flush the other speculative space allocations to free space.
 290		 * Do not perform a synchronous scan because callers can hold
 291		 * other locks.
 292		 */
 293		xfs_blockgc_flush_all(mp);
 294		want_retry = false;
 295		goto retry;
 296	}
 297	if (error) {
 298		xfs_trans_cancel(tp);
 299		return error;
 300	}
 301
 302	trace_xfs_trans_alloc(tp, _RET_IP_);
 303
 304	*tpp = tp;
 305	return 0;
 306}
 307
 308/*
 309 * Create an empty transaction with no reservation.  This is a defensive
 310 * mechanism for routines that query metadata without actually modifying them --
 311 * if the metadata being queried is somehow cross-linked (think a btree block
 312 * pointer that points higher in the tree), we risk deadlock.  However, blocks
 313 * grabbed as part of a transaction can be re-grabbed.  The verifiers will
 314 * notice the corrupt block and the operation will fail back to userspace
 315 * without deadlocking.
 316 *
 317 * Note the zero-length reservation; this transaction MUST be cancelled without
 318 * any dirty data.
 319 *
 320 * Callers should obtain freeze protection to avoid a conflict with fs freezing
 321 * where we can be grabbing buffers at the same time that freeze is trying to
 322 * drain the buffer LRU list.
 323 */
 324int
 325xfs_trans_alloc_empty(
 326	struct xfs_mount		*mp,
 327	struct xfs_trans		**tpp)
 328{
 329	struct xfs_trans_res		resv = {0};
 330
 331	return xfs_trans_alloc(mp, &resv, 0, 0, XFS_TRANS_NO_WRITECOUNT, tpp);
 332}
 333
 334/*
 335 * Record the indicated change to the given field for application
 336 * to the file system's superblock when the transaction commits.
 337 * For now, just store the change in the transaction structure.
 338 *
 339 * Mark the transaction structure to indicate that the superblock
 340 * needs to be updated before committing.
 341 *
 342 * Because we may not be keeping track of allocated/free inodes and
 343 * used filesystem blocks in the superblock, we do not mark the
 344 * superblock dirty in this transaction if we modify these fields.
 345 * We still need to update the transaction deltas so that they get
 346 * applied to the incore superblock, but we don't want them to
 347 * cause the superblock to get locked and logged if these are the
 348 * only fields in the superblock that the transaction modifies.
 349 */
 350void
 351xfs_trans_mod_sb(
 352	xfs_trans_t	*tp,
 353	uint		field,
 354	int64_t		delta)
 355{
 356	uint32_t	flags = (XFS_TRANS_DIRTY|XFS_TRANS_SB_DIRTY);
 357	xfs_mount_t	*mp = tp->t_mountp;
 358
 359	switch (field) {
 360	case XFS_TRANS_SB_ICOUNT:
 361		tp->t_icount_delta += delta;
 362		if (xfs_has_lazysbcount(mp))
 363			flags &= ~XFS_TRANS_SB_DIRTY;
 364		break;
 365	case XFS_TRANS_SB_IFREE:
 366		tp->t_ifree_delta += delta;
 367		if (xfs_has_lazysbcount(mp))
 368			flags &= ~XFS_TRANS_SB_DIRTY;
 369		break;
 370	case XFS_TRANS_SB_FDBLOCKS:
 371		/*
 372		 * Track the number of blocks allocated in the transaction.
 373		 * Make sure it does not exceed the number reserved. If so,
 374		 * shutdown as this can lead to accounting inconsistency.
 375		 */
 376		if (delta < 0) {
 377			tp->t_blk_res_used += (uint)-delta;
 378			if (tp->t_blk_res_used > tp->t_blk_res)
 379				xfs_force_shutdown(mp, SHUTDOWN_CORRUPT_INCORE);
 380		} else if (delta > 0 && (tp->t_flags & XFS_TRANS_RES_FDBLKS)) {
 381			int64_t	blkres_delta;
 382
 383			/*
 384			 * Return freed blocks directly to the reservation
 385			 * instead of the global pool, being careful not to
 386			 * overflow the trans counter. This is used to preserve
 387			 * reservation across chains of transaction rolls that
 388			 * repeatedly free and allocate blocks.
 389			 */
 390			blkres_delta = min_t(int64_t, delta,
 391					     UINT_MAX - tp->t_blk_res);
 392			tp->t_blk_res += blkres_delta;
 393			delta -= blkres_delta;
 394		}
 395		tp->t_fdblocks_delta += delta;
 396		if (xfs_has_lazysbcount(mp))
 397			flags &= ~XFS_TRANS_SB_DIRTY;
 398		break;
 399	case XFS_TRANS_SB_RES_FDBLOCKS:
 400		/*
 401		 * The allocation has already been applied to the
 402		 * in-core superblock's counter.  This should only
 403		 * be applied to the on-disk superblock.
 404		 */
 405		tp->t_res_fdblocks_delta += delta;
 406		if (xfs_has_lazysbcount(mp))
 407			flags &= ~XFS_TRANS_SB_DIRTY;
 408		break;
 409	case XFS_TRANS_SB_FREXTENTS:
 410		/*
 411		 * Track the number of blocks allocated in the
 412		 * transaction.  Make sure it does not exceed the
 413		 * number reserved.
 414		 */
 415		if (delta < 0) {
 416			tp->t_rtx_res_used += (uint)-delta;
 417			ASSERT(tp->t_rtx_res_used <= tp->t_rtx_res);
 418		}
 419		tp->t_frextents_delta += delta;
 420		break;
 421	case XFS_TRANS_SB_RES_FREXTENTS:
 422		/*
 423		 * The allocation has already been applied to the
 424		 * in-core superblock's counter.  This should only
 425		 * be applied to the on-disk superblock.
 426		 */
 427		ASSERT(delta < 0);
 428		tp->t_res_frextents_delta += delta;
 429		break;
 430	case XFS_TRANS_SB_DBLOCKS:
 
 431		tp->t_dblocks_delta += delta;
 432		break;
 433	case XFS_TRANS_SB_AGCOUNT:
 434		ASSERT(delta > 0);
 435		tp->t_agcount_delta += delta;
 436		break;
 437	case XFS_TRANS_SB_IMAXPCT:
 438		tp->t_imaxpct_delta += delta;
 439		break;
 440	case XFS_TRANS_SB_REXTSIZE:
 441		tp->t_rextsize_delta += delta;
 442		break;
 443	case XFS_TRANS_SB_RBMBLOCKS:
 444		tp->t_rbmblocks_delta += delta;
 445		break;
 446	case XFS_TRANS_SB_RBLOCKS:
 447		tp->t_rblocks_delta += delta;
 448		break;
 449	case XFS_TRANS_SB_REXTENTS:
 450		tp->t_rextents_delta += delta;
 451		break;
 452	case XFS_TRANS_SB_REXTSLOG:
 453		tp->t_rextslog_delta += delta;
 454		break;
 455	default:
 456		ASSERT(0);
 457		return;
 458	}
 459
 460	tp->t_flags |= flags;
 461}
 462
 463/*
 464 * xfs_trans_apply_sb_deltas() is called from the commit code
 465 * to bring the superblock buffer into the current transaction
 466 * and modify it as requested by earlier calls to xfs_trans_mod_sb().
 467 *
 468 * For now we just look at each field allowed to change and change
 469 * it if necessary.
 470 */
 471STATIC void
 472xfs_trans_apply_sb_deltas(
 473	xfs_trans_t	*tp)
 474{
 475	struct xfs_dsb	*sbp;
 476	struct xfs_buf	*bp;
 477	int		whole = 0;
 478
 479	bp = xfs_trans_getsb(tp);
 480	sbp = bp->b_addr;
 
 
 
 
 
 
 
 481
 482	/*
 483	 * Only update the superblock counters if we are logging them
 484	 */
 485	if (!xfs_has_lazysbcount((tp->t_mountp))) {
 486		if (tp->t_icount_delta)
 487			be64_add_cpu(&sbp->sb_icount, tp->t_icount_delta);
 488		if (tp->t_ifree_delta)
 489			be64_add_cpu(&sbp->sb_ifree, tp->t_ifree_delta);
 490		if (tp->t_fdblocks_delta)
 491			be64_add_cpu(&sbp->sb_fdblocks, tp->t_fdblocks_delta);
 492		if (tp->t_res_fdblocks_delta)
 493			be64_add_cpu(&sbp->sb_fdblocks, tp->t_res_fdblocks_delta);
 494	}
 495
 496	/*
 497	 * Updating frextents requires careful handling because it does not
 498	 * behave like the lazysb counters because we cannot rely on log
 499	 * recovery in older kenels to recompute the value from the rtbitmap.
 500	 * This means that the ondisk frextents must be consistent with the
 501	 * rtbitmap.
 502	 *
 503	 * Therefore, log the frextents change to the ondisk superblock and
 504	 * update the incore superblock so that future calls to xfs_log_sb
 505	 * write the correct value ondisk.
 506	 *
 507	 * Don't touch m_frextents because it includes incore reservations,
 508	 * and those are handled by the unreserve function.
 509	 */
 510	if (tp->t_frextents_delta || tp->t_res_frextents_delta) {
 511		struct xfs_mount	*mp = tp->t_mountp;
 512		int64_t			rtxdelta;
 513
 514		rtxdelta = tp->t_frextents_delta + tp->t_res_frextents_delta;
 515
 516		spin_lock(&mp->m_sb_lock);
 517		be64_add_cpu(&sbp->sb_frextents, rtxdelta);
 518		mp->m_sb.sb_frextents += rtxdelta;
 519		spin_unlock(&mp->m_sb_lock);
 520	}
 521
 522	if (tp->t_dblocks_delta) {
 523		be64_add_cpu(&sbp->sb_dblocks, tp->t_dblocks_delta);
 524		whole = 1;
 525	}
 526	if (tp->t_agcount_delta) {
 527		be32_add_cpu(&sbp->sb_agcount, tp->t_agcount_delta);
 528		whole = 1;
 529	}
 530	if (tp->t_imaxpct_delta) {
 531		sbp->sb_imax_pct += tp->t_imaxpct_delta;
 532		whole = 1;
 533	}
 534	if (tp->t_rextsize_delta) {
 535		be32_add_cpu(&sbp->sb_rextsize, tp->t_rextsize_delta);
 536		whole = 1;
 537	}
 538	if (tp->t_rbmblocks_delta) {
 539		be32_add_cpu(&sbp->sb_rbmblocks, tp->t_rbmblocks_delta);
 540		whole = 1;
 541	}
 542	if (tp->t_rblocks_delta) {
 543		be64_add_cpu(&sbp->sb_rblocks, tp->t_rblocks_delta);
 544		whole = 1;
 545	}
 546	if (tp->t_rextents_delta) {
 547		be64_add_cpu(&sbp->sb_rextents, tp->t_rextents_delta);
 548		whole = 1;
 549	}
 550	if (tp->t_rextslog_delta) {
 551		sbp->sb_rextslog += tp->t_rextslog_delta;
 552		whole = 1;
 553	}
 554
 555	xfs_trans_buf_set_type(tp, bp, XFS_BLFT_SB_BUF);
 556	if (whole)
 557		/*
 558		 * Log the whole thing, the fields are noncontiguous.
 559		 */
 560		xfs_trans_log_buf(tp, bp, 0, sizeof(struct xfs_dsb) - 1);
 561	else
 562		/*
 563		 * Since all the modifiable fields are contiguous, we
 564		 * can get away with this.
 565		 */
 566		xfs_trans_log_buf(tp, bp, offsetof(struct xfs_dsb, sb_icount),
 567				  offsetof(struct xfs_dsb, sb_frextents) +
 568				  sizeof(sbp->sb_frextents) - 1);
 569}
 570
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 571/*
 572 * xfs_trans_unreserve_and_mod_sb() is called to release unused reservations and
 573 * apply superblock counter changes to the in-core superblock.  The
 574 * t_res_fdblocks_delta and t_res_frextents_delta fields are explicitly NOT
 575 * applied to the in-core superblock.  The idea is that that has already been
 576 * done.
 577 *
 578 * If we are not logging superblock counters, then the inode allocated/free and
 579 * used block counts are not updated in the on disk superblock. In this case,
 580 * XFS_TRANS_SB_DIRTY will not be set when the transaction is updated but we
 581 * still need to update the incore superblock with the changes.
 582 *
 583 * Deltas for the inode count are +/-64, hence we use a large batch size of 128
 584 * so we don't need to take the counter lock on every update.
 585 */
 586#define XFS_ICOUNT_BATCH	128
 587
 588void
 589xfs_trans_unreserve_and_mod_sb(
 590	struct xfs_trans	*tp)
 591{
 592	struct xfs_mount	*mp = tp->t_mountp;
 593	bool			rsvd = (tp->t_flags & XFS_TRANS_RESERVE) != 0;
 594	int64_t			blkdelta = 0;
 595	int64_t			rtxdelta = 0;
 596	int64_t			idelta = 0;
 597	int64_t			ifreedelta = 0;
 598	int			error;
 599
 600	/* calculate deltas */
 601	if (tp->t_blk_res > 0)
 602		blkdelta = tp->t_blk_res;
 603	if ((tp->t_fdblocks_delta != 0) &&
 604	    (xfs_has_lazysbcount(mp) ||
 605	     (tp->t_flags & XFS_TRANS_SB_DIRTY)))
 606	        blkdelta += tp->t_fdblocks_delta;
 607
 608	if (tp->t_rtx_res > 0)
 609		rtxdelta = tp->t_rtx_res;
 610	if ((tp->t_frextents_delta != 0) &&
 611	    (tp->t_flags & XFS_TRANS_SB_DIRTY))
 612		rtxdelta += tp->t_frextents_delta;
 613
 614	if (xfs_has_lazysbcount(mp) ||
 615	     (tp->t_flags & XFS_TRANS_SB_DIRTY)) {
 616		idelta = tp->t_icount_delta;
 617		ifreedelta = tp->t_ifree_delta;
 618	}
 619
 620	/* apply the per-cpu counters */
 621	if (blkdelta) {
 622		error = xfs_mod_fdblocks(mp, blkdelta, rsvd);
 623		ASSERT(!error);
 
 624	}
 625
 626	if (idelta)
 627		percpu_counter_add_batch(&mp->m_icount, idelta,
 628					 XFS_ICOUNT_BATCH);
 
 
 629
 630	if (ifreedelta)
 631		percpu_counter_add(&mp->m_ifree, ifreedelta);
 632
 633	if (rtxdelta) {
 634		error = xfs_mod_frextents(mp, rtxdelta);
 635		ASSERT(!error);
 636	}
 637
 638	if (!(tp->t_flags & XFS_TRANS_SB_DIRTY))
 639		return;
 640
 641	/* apply remaining deltas */
 642	spin_lock(&mp->m_sb_lock);
 643	mp->m_sb.sb_fdblocks += tp->t_fdblocks_delta + tp->t_res_fdblocks_delta;
 644	mp->m_sb.sb_icount += idelta;
 645	mp->m_sb.sb_ifree += ifreedelta;
 646	/*
 647	 * Do not touch sb_frextents here because we are dealing with incore
 648	 * reservation.  sb_frextents is not part of the lazy sb counters so it
 649	 * must be consistent with the ondisk rtbitmap and must never include
 650	 * incore reservations.
 651	 */
 652	mp->m_sb.sb_dblocks += tp->t_dblocks_delta;
 653	mp->m_sb.sb_agcount += tp->t_agcount_delta;
 654	mp->m_sb.sb_imax_pct += tp->t_imaxpct_delta;
 655	mp->m_sb.sb_rextsize += tp->t_rextsize_delta;
 656	mp->m_sb.sb_rbmblocks += tp->t_rbmblocks_delta;
 657	mp->m_sb.sb_rblocks += tp->t_rblocks_delta;
 658	mp->m_sb.sb_rextents += tp->t_rextents_delta;
 659	mp->m_sb.sb_rextslog += tp->t_rextslog_delta;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 660	spin_unlock(&mp->m_sb_lock);
 
 661
 662	/*
 663	 * Debug checks outside of the spinlock so they don't lock up the
 664	 * machine if they fail.
 665	 */
 666	ASSERT(mp->m_sb.sb_imax_pct >= 0);
 667	ASSERT(mp->m_sb.sb_rextslog >= 0);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 668	return;
 669}
 670
 671/* Add the given log item to the transaction's list of log items. */
 672void
 673xfs_trans_add_item(
 674	struct xfs_trans	*tp,
 675	struct xfs_log_item	*lip)
 676{
 677	ASSERT(lip->li_log == tp->t_mountp->m_log);
 678	ASSERT(lip->li_ailp == tp->t_mountp->m_ail);
 679	ASSERT(list_empty(&lip->li_trans));
 680	ASSERT(!test_bit(XFS_LI_DIRTY, &lip->li_flags));
 681
 682	list_add_tail(&lip->li_trans, &tp->t_items);
 683	trace_xfs_trans_add_item(tp, _RET_IP_);
 684}
 685
 686/*
 687 * Unlink the log item from the transaction. the log item is no longer
 688 * considered dirty in this transaction, as the linked transaction has
 689 * finished, either by abort or commit completion.
 690 */
 691void
 692xfs_trans_del_item(
 693	struct xfs_log_item	*lip)
 694{
 695	clear_bit(XFS_LI_DIRTY, &lip->li_flags);
 696	list_del_init(&lip->li_trans);
 697}
 698
 699/* Detach and unlock all of the items in a transaction */
 700static void
 701xfs_trans_free_items(
 702	struct xfs_trans	*tp,
 703	bool			abort)
 704{
 705	struct xfs_log_item	*lip, *next;
 706
 707	trace_xfs_trans_free_items(tp, _RET_IP_);
 708
 709	list_for_each_entry_safe(lip, next, &tp->t_items, li_trans) {
 710		xfs_trans_del_item(lip);
 711		if (abort)
 712			set_bit(XFS_LI_ABORTED, &lip->li_flags);
 713		if (lip->li_ops->iop_release)
 714			lip->li_ops->iop_release(lip);
 715	}
 716}
 717
 718static inline void
 719xfs_log_item_batch_insert(
 720	struct xfs_ail		*ailp,
 721	struct xfs_ail_cursor	*cur,
 722	struct xfs_log_item	**log_items,
 723	int			nr_items,
 724	xfs_lsn_t		commit_lsn)
 725{
 726	int	i;
 727
 728	spin_lock(&ailp->ail_lock);
 729	/* xfs_trans_ail_update_bulk drops ailp->ail_lock */
 730	xfs_trans_ail_update_bulk(ailp, cur, log_items, nr_items, commit_lsn);
 731
 732	for (i = 0; i < nr_items; i++) {
 733		struct xfs_log_item *lip = log_items[i];
 734
 735		if (lip->li_ops->iop_unpin)
 736			lip->li_ops->iop_unpin(lip, 0);
 737	}
 738}
 739
 740/*
 741 * Bulk operation version of xfs_trans_committed that takes a log vector of
 742 * items to insert into the AIL. This uses bulk AIL insertion techniques to
 743 * minimise lock traffic.
 744 *
 745 * If we are called with the aborted flag set, it is because a log write during
 746 * a CIL checkpoint commit has failed. In this case, all the items in the
 747 * checkpoint have already gone through iop_committed and iop_committing, which
 748 * means that checkpoint commit abort handling is treated exactly the same
 749 * as an iclog write error even though we haven't started any IO yet. Hence in
 750 * this case all we need to do is iop_committed processing, followed by an
 751 * iop_unpin(aborted) call.
 752 *
 753 * The AIL cursor is used to optimise the insert process. If commit_lsn is not
 754 * at the end of the AIL, the insert cursor avoids the need to walk
 755 * the AIL to find the insertion point on every xfs_log_item_batch_insert()
 756 * call. This saves a lot of needless list walking and is a net win, even
 757 * though it slightly increases that amount of AIL lock traffic to set it up
 758 * and tear it down.
 759 */
 760void
 761xfs_trans_committed_bulk(
 762	struct xfs_ail		*ailp,
 763	struct list_head	*lv_chain,
 764	xfs_lsn_t		commit_lsn,
 765	bool			aborted)
 766{
 767#define LOG_ITEM_BATCH_SIZE	32
 768	struct xfs_log_item	*log_items[LOG_ITEM_BATCH_SIZE];
 769	struct xfs_log_vec	*lv;
 770	struct xfs_ail_cursor	cur;
 771	int			i = 0;
 772
 773	spin_lock(&ailp->ail_lock);
 774	xfs_trans_ail_cursor_last(ailp, &cur, commit_lsn);
 775	spin_unlock(&ailp->ail_lock);
 776
 777	/* unpin all the log items */
 778	list_for_each_entry(lv, lv_chain, lv_list) {
 779		struct xfs_log_item	*lip = lv->lv_item;
 780		xfs_lsn_t		item_lsn;
 781
 782		if (aborted)
 783			set_bit(XFS_LI_ABORTED, &lip->li_flags);
 784
 785		if (lip->li_ops->flags & XFS_ITEM_RELEASE_WHEN_COMMITTED) {
 786			lip->li_ops->iop_release(lip);
 787			continue;
 788		}
 789
 790		if (lip->li_ops->iop_committed)
 791			item_lsn = lip->li_ops->iop_committed(lip, commit_lsn);
 792		else
 793			item_lsn = commit_lsn;
 794
 795		/* item_lsn of -1 means the item needs no further processing */
 796		if (XFS_LSN_CMP(item_lsn, (xfs_lsn_t)-1) == 0)
 797			continue;
 798
 799		/*
 800		 * if we are aborting the operation, no point in inserting the
 801		 * object into the AIL as we are in a shutdown situation.
 802		 */
 803		if (aborted) {
 804			ASSERT(xlog_is_shutdown(ailp->ail_log));
 805			if (lip->li_ops->iop_unpin)
 806				lip->li_ops->iop_unpin(lip, 1);
 807			continue;
 808		}
 809
 810		if (item_lsn != commit_lsn) {
 811
 812			/*
 813			 * Not a bulk update option due to unusual item_lsn.
 814			 * Push into AIL immediately, rechecking the lsn once
 815			 * we have the ail lock. Then unpin the item. This does
 816			 * not affect the AIL cursor the bulk insert path is
 817			 * using.
 818			 */
 819			spin_lock(&ailp->ail_lock);
 820			if (XFS_LSN_CMP(item_lsn, lip->li_lsn) > 0)
 821				xfs_trans_ail_update(ailp, lip, item_lsn);
 822			else
 823				spin_unlock(&ailp->ail_lock);
 824			if (lip->li_ops->iop_unpin)
 825				lip->li_ops->iop_unpin(lip, 0);
 826			continue;
 827		}
 828
 829		/* Item is a candidate for bulk AIL insert.  */
 830		log_items[i++] = lv->lv_item;
 831		if (i >= LOG_ITEM_BATCH_SIZE) {
 832			xfs_log_item_batch_insert(ailp, &cur, log_items,
 833					LOG_ITEM_BATCH_SIZE, commit_lsn);
 834			i = 0;
 835		}
 836	}
 837
 838	/* make sure we insert the remainder! */
 839	if (i)
 840		xfs_log_item_batch_insert(ailp, &cur, log_items, i, commit_lsn);
 841
 842	spin_lock(&ailp->ail_lock);
 843	xfs_trans_ail_cursor_done(&cur);
 844	spin_unlock(&ailp->ail_lock);
 845}
 846
 847/*
 848 * Sort transaction items prior to running precommit operations. This will
 849 * attempt to order the items such that they will always be locked in the same
 850 * order. Items that have no sort function are moved to the end of the list
 851 * and so are locked last.
 852 *
 853 * This may need refinement as different types of objects add sort functions.
 854 *
 855 * Function is more complex than it needs to be because we are comparing 64 bit
 856 * values and the function only returns 32 bit values.
 857 */
 858static int
 859xfs_trans_precommit_sort(
 860	void			*unused_arg,
 861	const struct list_head	*a,
 862	const struct list_head	*b)
 863{
 864	struct xfs_log_item	*lia = container_of(a,
 865					struct xfs_log_item, li_trans);
 866	struct xfs_log_item	*lib = container_of(b,
 867					struct xfs_log_item, li_trans);
 868	int64_t			diff;
 869
 870	/*
 871	 * If both items are non-sortable, leave them alone. If only one is
 872	 * sortable, move the non-sortable item towards the end of the list.
 873	 */
 874	if (!lia->li_ops->iop_sort && !lib->li_ops->iop_sort)
 875		return 0;
 876	if (!lia->li_ops->iop_sort)
 877		return 1;
 878	if (!lib->li_ops->iop_sort)
 879		return -1;
 880
 881	diff = lia->li_ops->iop_sort(lia) - lib->li_ops->iop_sort(lib);
 882	if (diff < 0)
 883		return -1;
 884	if (diff > 0)
 885		return 1;
 886	return 0;
 887}
 888
 889/*
 890 * Run transaction precommit functions.
 891 *
 892 * If there is an error in any of the callouts, then stop immediately and
 893 * trigger a shutdown to abort the transaction. There is no recovery possible
 894 * from errors at this point as the transaction is dirty....
 895 */
 896static int
 897xfs_trans_run_precommits(
 898	struct xfs_trans	*tp)
 899{
 900	struct xfs_mount	*mp = tp->t_mountp;
 901	struct xfs_log_item	*lip, *n;
 902	int			error = 0;
 903
 904	/*
 905	 * Sort the item list to avoid ABBA deadlocks with other transactions
 906	 * running precommit operations that lock multiple shared items such as
 907	 * inode cluster buffers.
 908	 */
 909	list_sort(NULL, &tp->t_items, xfs_trans_precommit_sort);
 910
 911	/*
 912	 * Precommit operations can remove the log item from the transaction
 913	 * if the log item exists purely to delay modifications until they
 914	 * can be ordered against other operations. Hence we have to use
 915	 * list_for_each_entry_safe() here.
 916	 */
 917	list_for_each_entry_safe(lip, n, &tp->t_items, li_trans) {
 918		if (!test_bit(XFS_LI_DIRTY, &lip->li_flags))
 919			continue;
 920		if (lip->li_ops->iop_precommit) {
 921			error = lip->li_ops->iop_precommit(tp, lip);
 922			if (error)
 923				break;
 924		}
 925	}
 926	if (error)
 927		xfs_force_shutdown(mp, SHUTDOWN_CORRUPT_INCORE);
 928	return error;
 929}
 930
 931/*
 932 * Commit the given transaction to the log.
 933 *
 934 * XFS disk error handling mechanism is not based on a typical
 935 * transaction abort mechanism. Logically after the filesystem
 936 * gets marked 'SHUTDOWN', we can't let any new transactions
 937 * be durable - ie. committed to disk - because some metadata might
 938 * be inconsistent. In such cases, this returns an error, and the
 939 * caller may assume that all locked objects joined to the transaction
 940 * have already been unlocked as if the commit had succeeded.
 941 * Do not reference the transaction structure after this call.
 942 */
 943static int
 944__xfs_trans_commit(
 945	struct xfs_trans	*tp,
 946	bool			regrant)
 947{
 948	struct xfs_mount	*mp = tp->t_mountp;
 949	struct xlog		*log = mp->m_log;
 950	xfs_csn_t		commit_seq = 0;
 951	int			error = 0;
 952	int			sync = tp->t_flags & XFS_TRANS_SYNC;
 953
 954	trace_xfs_trans_commit(tp, _RET_IP_);
 955
 956	error = xfs_trans_run_precommits(tp);
 957	if (error) {
 958		if (tp->t_flags & XFS_TRANS_PERM_LOG_RES)
 959			xfs_defer_cancel(tp);
 960		goto out_unreserve;
 961	}
 962
 963	/*
 964	 * Finish deferred items on final commit. Only permanent transactions
 965	 * should ever have deferred ops.
 966	 */
 967	WARN_ON_ONCE(!list_empty(&tp->t_dfops) &&
 968		     !(tp->t_flags & XFS_TRANS_PERM_LOG_RES));
 969	if (!regrant && (tp->t_flags & XFS_TRANS_PERM_LOG_RES)) {
 970		error = xfs_defer_finish_noroll(&tp);
 971		if (error)
 972			goto out_unreserve;
 973	}
 974
 975	/*
 976	 * If there is nothing to be logged by the transaction,
 977	 * then unlock all of the items associated with the
 978	 * transaction and free the transaction structure.
 979	 * Also make sure to return any reserved blocks to
 980	 * the free pool.
 981	 */
 982	if (!(tp->t_flags & XFS_TRANS_DIRTY))
 983		goto out_unreserve;
 984
 985	/*
 986	 * We must check against log shutdown here because we cannot abort log
 987	 * items and leave them dirty, inconsistent and unpinned in memory while
 988	 * the log is active. This leaves them open to being written back to
 989	 * disk, and that will lead to on-disk corruption.
 990	 */
 991	if (xlog_is_shutdown(log)) {
 992		error = -EIO;
 993		goto out_unreserve;
 994	}
 995
 996	ASSERT(tp->t_ticket != NULL);
 997
 998	/*
 999	 * If we need to update the superblock, then do it now.
1000	 */
1001	if (tp->t_flags & XFS_TRANS_SB_DIRTY)
1002		xfs_trans_apply_sb_deltas(tp);
1003	xfs_trans_apply_dquot_deltas(tp);
1004
1005	xlog_cil_commit(log, tp, &commit_seq, regrant);
1006
 
1007	xfs_trans_free(tp);
1008
1009	/*
1010	 * If the transaction needs to be synchronous, then force the
1011	 * log out now and wait for it.
1012	 */
1013	if (sync) {
1014		error = xfs_log_force_seq(mp, commit_seq, XFS_LOG_SYNC, NULL);
1015		XFS_STATS_INC(mp, xs_trans_sync);
1016	} else {
1017		XFS_STATS_INC(mp, xs_trans_async);
1018	}
1019
1020	return error;
1021
1022out_unreserve:
1023	xfs_trans_unreserve_and_mod_sb(tp);
1024
1025	/*
1026	 * It is indeed possible for the transaction to be not dirty but
1027	 * the dqinfo portion to be.  All that means is that we have some
1028	 * (non-persistent) quota reservations that need to be unreserved.
1029	 */
1030	xfs_trans_unreserve_and_mod_dquots(tp);
1031	if (tp->t_ticket) {
1032		if (regrant && !xlog_is_shutdown(log))
1033			xfs_log_ticket_regrant(log, tp->t_ticket);
1034		else
1035			xfs_log_ticket_ungrant(log, tp->t_ticket);
1036		tp->t_ticket = NULL;
1037	}
 
1038	xfs_trans_free_items(tp, !!error);
1039	xfs_trans_free(tp);
1040
1041	XFS_STATS_INC(mp, xs_trans_empty);
1042	return error;
1043}
1044
1045int
1046xfs_trans_commit(
1047	struct xfs_trans	*tp)
1048{
1049	return __xfs_trans_commit(tp, false);
1050}
1051
1052/*
1053 * Unlock all of the transaction's items and free the transaction.  If the
1054 * transaction is dirty, we must shut down the filesystem because there is no
1055 * way to restore them to their previous state.
1056 *
1057 * If the transaction has made a log reservation, make sure to release it as
1058 * well.
1059 *
1060 * This is a high level function (equivalent to xfs_trans_commit()) and so can
1061 * be called after the transaction has effectively been aborted due to the mount
1062 * being shut down. However, if the mount has not been shut down and the
1063 * transaction is dirty we will shut the mount down and, in doing so, that
1064 * guarantees that the log is shut down, too. Hence we don't need to be as
1065 * careful with shutdown state and dirty items here as we need to be in
1066 * xfs_trans_commit().
1067 */
1068void
1069xfs_trans_cancel(
1070	struct xfs_trans	*tp)
1071{
1072	struct xfs_mount	*mp = tp->t_mountp;
1073	struct xlog		*log = mp->m_log;
1074	bool			dirty = (tp->t_flags & XFS_TRANS_DIRTY);
1075
1076	trace_xfs_trans_cancel(tp, _RET_IP_);
1077
1078	/*
1079	 * It's never valid to cancel a transaction with deferred ops attached,
1080	 * because the transaction is effectively dirty.  Complain about this
1081	 * loudly before freeing the in-memory defer items.
1082	 */
1083	if (!list_empty(&tp->t_dfops)) {
1084		ASSERT(xfs_is_shutdown(mp) || list_empty(&tp->t_dfops));
1085		ASSERT(tp->t_flags & XFS_TRANS_PERM_LOG_RES);
1086		dirty = true;
1087		xfs_defer_cancel(tp);
1088	}
1089
1090	/*
1091	 * See if the caller is relying on us to shut down the filesystem. We
1092	 * only want an error report if there isn't already a shutdown in
1093	 * progress, so we only need to check against the mount shutdown state
1094	 * here.
1095	 */
1096	if (dirty && !xfs_is_shutdown(mp)) {
1097		XFS_ERROR_REPORT("xfs_trans_cancel", XFS_ERRLEVEL_LOW, mp);
1098		xfs_force_shutdown(mp, SHUTDOWN_CORRUPT_INCORE);
1099	}
1100#ifdef DEBUG
1101	/* Log items need to be consistent until the log is shut down. */
1102	if (!dirty && !xlog_is_shutdown(log)) {
1103		struct xfs_log_item *lip;
1104
1105		list_for_each_entry(lip, &tp->t_items, li_trans)
1106			ASSERT(!xlog_item_is_intent_done(lip));
1107	}
1108#endif
1109	xfs_trans_unreserve_and_mod_sb(tp);
1110	xfs_trans_unreserve_and_mod_dquots(tp);
1111
1112	if (tp->t_ticket) {
1113		xfs_log_ticket_ungrant(log, tp->t_ticket);
1114		tp->t_ticket = NULL;
1115	}
1116
 
 
 
1117	xfs_trans_free_items(tp, dirty);
1118	xfs_trans_free(tp);
1119}
1120
1121/*
1122 * Roll from one trans in the sequence of PERMANENT transactions to
1123 * the next: permanent transactions are only flushed out when
1124 * committed with xfs_trans_commit(), but we still want as soon
1125 * as possible to let chunks of it go to the log. So we commit the
1126 * chunk we've been working on and get a new transaction to continue.
1127 */
1128int
1129xfs_trans_roll(
1130	struct xfs_trans	**tpp)
1131{
1132	struct xfs_trans	*trans = *tpp;
1133	struct xfs_trans_res	tres;
1134	int			error;
1135
1136	trace_xfs_trans_roll(trans, _RET_IP_);
1137
1138	/*
1139	 * Copy the critical parameters from one trans to the next.
1140	 */
1141	tres.tr_logres = trans->t_log_res;
1142	tres.tr_logcount = trans->t_log_count;
1143
1144	*tpp = xfs_trans_dup(trans);
1145
1146	/*
1147	 * Commit the current transaction.
1148	 * If this commit failed, then it'd just unlock those items that
1149	 * are not marked ihold. That also means that a filesystem shutdown
1150	 * is in progress. The caller takes the responsibility to cancel
1151	 * the duplicate transaction that gets returned.
1152	 */
1153	error = __xfs_trans_commit(trans, true);
1154	if (error)
1155		return error;
1156
1157	/*
1158	 * Reserve space in the log for the next transaction.
1159	 * This also pushes items in the "AIL", the list of logged items,
1160	 * out to disk if they are taking up space at the tail of the log
1161	 * that we want to use.  This requires that either nothing be locked
1162	 * across this call, or that anything that is locked be logged in
1163	 * the prior and the next transactions.
1164	 */
1165	tres.tr_logflags = XFS_TRANS_PERM_LOG_RES;
1166	return xfs_trans_reserve(*tpp, &tres, 0, 0);
1167}
1168
1169/*
1170 * Allocate an transaction, lock and join the inode to it, and reserve quota.
1171 *
1172 * The caller must ensure that the on-disk dquots attached to this inode have
1173 * already been allocated and initialized.  The caller is responsible for
1174 * releasing ILOCK_EXCL if a new transaction is returned.
1175 */
1176int
1177xfs_trans_alloc_inode(
1178	struct xfs_inode	*ip,
1179	struct xfs_trans_res	*resv,
1180	unsigned int		dblocks,
1181	unsigned int		rblocks,
1182	bool			force,
1183	struct xfs_trans	**tpp)
1184{
1185	struct xfs_trans	*tp;
1186	struct xfs_mount	*mp = ip->i_mount;
1187	bool			retried = false;
1188	int			error;
1189
1190retry:
1191	error = xfs_trans_alloc(mp, resv, dblocks,
1192			rblocks / mp->m_sb.sb_rextsize,
1193			force ? XFS_TRANS_RESERVE : 0, &tp);
1194	if (error)
1195		return error;
1196
1197	xfs_ilock(ip, XFS_ILOCK_EXCL);
1198	xfs_trans_ijoin(tp, ip, 0);
1199
1200	error = xfs_qm_dqattach_locked(ip, false);
1201	if (error) {
1202		/* Caller should have allocated the dquots! */
1203		ASSERT(error != -ENOENT);
1204		goto out_cancel;
1205	}
1206
1207	error = xfs_trans_reserve_quota_nblks(tp, ip, dblocks, rblocks, force);
1208	if ((error == -EDQUOT || error == -ENOSPC) && !retried) {
1209		xfs_trans_cancel(tp);
1210		xfs_iunlock(ip, XFS_ILOCK_EXCL);
1211		xfs_blockgc_free_quota(ip, 0);
1212		retried = true;
1213		goto retry;
1214	}
1215	if (error)
1216		goto out_cancel;
1217
1218	*tpp = tp;
1219	return 0;
1220
1221out_cancel:
1222	xfs_trans_cancel(tp);
1223	xfs_iunlock(ip, XFS_ILOCK_EXCL);
1224	return error;
1225}
1226
1227/*
1228 * Allocate an transaction in preparation for inode creation by reserving quota
1229 * against the given dquots.  Callers are not required to hold any inode locks.
1230 */
1231int
1232xfs_trans_alloc_icreate(
1233	struct xfs_mount	*mp,
1234	struct xfs_trans_res	*resv,
1235	struct xfs_dquot	*udqp,
1236	struct xfs_dquot	*gdqp,
1237	struct xfs_dquot	*pdqp,
1238	unsigned int		dblocks,
1239	struct xfs_trans	**tpp)
1240{
1241	struct xfs_trans	*tp;
1242	bool			retried = false;
1243	int			error;
1244
1245retry:
1246	error = xfs_trans_alloc(mp, resv, dblocks, 0, 0, &tp);
1247	if (error)
1248		return error;
1249
1250	error = xfs_trans_reserve_quota_icreate(tp, udqp, gdqp, pdqp, dblocks);
1251	if ((error == -EDQUOT || error == -ENOSPC) && !retried) {
1252		xfs_trans_cancel(tp);
1253		xfs_blockgc_free_dquots(mp, udqp, gdqp, pdqp, 0);
1254		retried = true;
1255		goto retry;
1256	}
1257	if (error) {
1258		xfs_trans_cancel(tp);
1259		return error;
1260	}
1261
1262	*tpp = tp;
1263	return 0;
1264}
1265
1266/*
1267 * Allocate an transaction, lock and join the inode to it, and reserve quota
1268 * in preparation for inode attribute changes that include uid, gid, or prid
1269 * changes.
1270 *
1271 * The caller must ensure that the on-disk dquots attached to this inode have
1272 * already been allocated and initialized.  The ILOCK will be dropped when the
1273 * transaction is committed or cancelled.
1274 */
1275int
1276xfs_trans_alloc_ichange(
1277	struct xfs_inode	*ip,
1278	struct xfs_dquot	*new_udqp,
1279	struct xfs_dquot	*new_gdqp,
1280	struct xfs_dquot	*new_pdqp,
1281	bool			force,
1282	struct xfs_trans	**tpp)
1283{
1284	struct xfs_trans	*tp;
1285	struct xfs_mount	*mp = ip->i_mount;
1286	struct xfs_dquot	*udqp;
1287	struct xfs_dquot	*gdqp;
1288	struct xfs_dquot	*pdqp;
1289	bool			retried = false;
1290	int			error;
1291
1292retry:
1293	error = xfs_trans_alloc(mp, &M_RES(mp)->tr_ichange, 0, 0, 0, &tp);
1294	if (error)
1295		return error;
1296
1297	xfs_ilock(ip, XFS_ILOCK_EXCL);
1298	xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL);
1299
1300	error = xfs_qm_dqattach_locked(ip, false);
1301	if (error) {
1302		/* Caller should have allocated the dquots! */
1303		ASSERT(error != -ENOENT);
1304		goto out_cancel;
1305	}
1306
1307	/*
1308	 * For each quota type, skip quota reservations if the inode's dquots
1309	 * now match the ones that came from the caller, or the caller didn't
1310	 * pass one in.  The inode's dquots can change if we drop the ILOCK to
1311	 * perform a blockgc scan, so we must preserve the caller's arguments.
1312	 */
1313	udqp = (new_udqp != ip->i_udquot) ? new_udqp : NULL;
1314	gdqp = (new_gdqp != ip->i_gdquot) ? new_gdqp : NULL;
1315	pdqp = (new_pdqp != ip->i_pdquot) ? new_pdqp : NULL;
1316	if (udqp || gdqp || pdqp) {
1317		unsigned int	qflags = XFS_QMOPT_RES_REGBLKS;
1318
1319		if (force)
1320			qflags |= XFS_QMOPT_FORCE_RES;
1321
1322		/*
1323		 * Reserve enough quota to handle blocks on disk and reserved
1324		 * for a delayed allocation.  We'll actually transfer the
1325		 * delalloc reservation between dquots at chown time, even
1326		 * though that part is only semi-transactional.
1327		 */
1328		error = xfs_trans_reserve_quota_bydquots(tp, mp, udqp, gdqp,
1329				pdqp, ip->i_nblocks + ip->i_delayed_blks,
1330				1, qflags);
1331		if ((error == -EDQUOT || error == -ENOSPC) && !retried) {
1332			xfs_trans_cancel(tp);
1333			xfs_blockgc_free_dquots(mp, udqp, gdqp, pdqp, 0);
1334			retried = true;
1335			goto retry;
1336		}
1337		if (error)
1338			goto out_cancel;
1339	}
1340
1341	*tpp = tp;
1342	return 0;
1343
1344out_cancel:
1345	xfs_trans_cancel(tp);
1346	return error;
1347}
1348
1349/*
1350 * Allocate an transaction, lock and join the directory and child inodes to it,
1351 * and reserve quota for a directory update.  If there isn't sufficient space,
1352 * @dblocks will be set to zero for a reservationless directory update and
1353 * @nospace_error will be set to a negative errno describing the space
1354 * constraint we hit.
1355 *
1356 * The caller must ensure that the on-disk dquots attached to this inode have
1357 * already been allocated and initialized.  The ILOCKs will be dropped when the
1358 * transaction is committed or cancelled.
1359 */
1360int
1361xfs_trans_alloc_dir(
1362	struct xfs_inode	*dp,
1363	struct xfs_trans_res	*resv,
1364	struct xfs_inode	*ip,
1365	unsigned int		*dblocks,
1366	struct xfs_trans	**tpp,
1367	int			*nospace_error)
1368{
1369	struct xfs_trans	*tp;
1370	struct xfs_mount	*mp = ip->i_mount;
1371	unsigned int		resblks;
1372	bool			retried = false;
1373	int			error;
1374
1375retry:
1376	*nospace_error = 0;
1377	resblks = *dblocks;
1378	error = xfs_trans_alloc(mp, resv, resblks, 0, 0, &tp);
1379	if (error == -ENOSPC) {
1380		*nospace_error = error;
1381		resblks = 0;
1382		error = xfs_trans_alloc(mp, resv, resblks, 0, 0, &tp);
1383	}
1384	if (error)
1385		return error;
1386
1387	xfs_lock_two_inodes(dp, XFS_ILOCK_EXCL, ip, XFS_ILOCK_EXCL);
1388
1389	xfs_trans_ijoin(tp, dp, XFS_ILOCK_EXCL);
1390	xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL);
1391
1392	error = xfs_qm_dqattach_locked(dp, false);
1393	if (error) {
1394		/* Caller should have allocated the dquots! */
1395		ASSERT(error != -ENOENT);
1396		goto out_cancel;
1397	}
1398
1399	error = xfs_qm_dqattach_locked(ip, false);
1400	if (error) {
1401		/* Caller should have allocated the dquots! */
1402		ASSERT(error != -ENOENT);
1403		goto out_cancel;
1404	}
1405
1406	if (resblks == 0)
1407		goto done;
1408
1409	error = xfs_trans_reserve_quota_nblks(tp, dp, resblks, 0, false);
1410	if (error == -EDQUOT || error == -ENOSPC) {
1411		if (!retried) {
1412			xfs_trans_cancel(tp);
1413			xfs_blockgc_free_quota(dp, 0);
1414			retried = true;
1415			goto retry;
1416		}
1417
1418		*nospace_error = error;
1419		resblks = 0;
1420		error = 0;
1421	}
1422	if (error)
1423		goto out_cancel;
1424
1425done:
1426	*tpp = tp;
1427	*dblocks = resblks;
1428	return 0;
1429
1430out_cancel:
1431	xfs_trans_cancel(tp);
1432	return error;
1433}